Compare commits

..

1 Commits

Author SHA1 Message Date
terence tsao
b4737e4c0e log data times 2025-11-11 12:00:17 -08:00
902 changed files with 4434 additions and 254978 deletions

View File

@@ -197,25 +197,6 @@ nogo(
"//tools/analyzers/logcapitalization:go_default_library",
"//tools/analyzers/logruswitherror:go_default_library",
"//tools/analyzers/maligned:go_default_library",
"//tools/analyzers/modernize/any:go_default_library",
"//tools/analyzers/modernize/appendclipped:go_default_library",
"//tools/analyzers/modernize/bloop:go_default_library",
"//tools/analyzers/modernize/fmtappendf:go_default_library",
"//tools/analyzers/modernize/forvar:go_default_library",
"//tools/analyzers/modernize/mapsloop:go_default_library",
"//tools/analyzers/modernize/minmax:go_default_library",
#"//tools/analyzers/modernize/newexpr:go_default_library", # Disabled until go 1.26.
"//tools/analyzers/modernize/omitzero:go_default_library",
"//tools/analyzers/modernize/rangeint:go_default_library",
"//tools/analyzers/modernize/reflecttypefor:go_default_library",
"//tools/analyzers/modernize/slicescontains:go_default_library",
#"//tools/analyzers/modernize/slicesdelete:go_default_library", # Disabled, see https://go.dev/issue/73686
"//tools/analyzers/modernize/slicessort:go_default_library",
"//tools/analyzers/modernize/stringsbuilder:go_default_library",
"//tools/analyzers/modernize/stringscutprefix:go_default_library",
"//tools/analyzers/modernize/stringsseq:go_default_library",
"//tools/analyzers/modernize/testingcontext:go_default_library",
"//tools/analyzers/modernize/waitgroup:go_default_library",
"//tools/analyzers/nop:go_default_library",
"//tools/analyzers/nopanic:go_default_library",
"//tools/analyzers/properpermissions:go_default_library",

View File

@@ -4,87 +4,6 @@ All notable changes to this project will be documented in this file.
The format is based on Keep a Changelog, and this project adheres to Semantic Versioning.
## [v7.0.0](https://github.com/prysmaticlabs/prysm/compare/v6.1.4...v7.0.0) - 2025-11-10
This is our initial mainnet release for the Ethereum mainnet Fulu fork on December 3rd, 2025. All operators MUST update to v7.0.0 or later release prior to the fulu fork epoch `411392`. See the [Ethereum Foundation blog post](https://blog.ethereum.org/2025/11/06/fusaka-mainnet-announcement) for more information on Fulu.
Other than the mainnet fulu fork schedule, there are a few callouts in this release:
- `by-epoch` blob storage format is the default for new installations. Users that haven't migrated will see a warning to migrate to the new format. Existing deployments may set `--blob-storage-layout=by-epoch` to perform the migration.
- Several deprecated flags have been deleted! Please review the "removed" section of this changelog carefully. If you are referencing a removed flag, Prysm will not start! All of these flags had no effect for at least one release.
- Several deprecated API endpoints have been deleted. Please review the "removed" section of this changelog carefully.
- Backfill is not supported in Fulu. This is expected to be fixed in the next release and should be delivered prior to the mainnet activation fork.
- The builder default gas limit is raised from `45000000` (45 MGas) to `60000000` (60 MGas).
- Several bug fixes and improvements.
### Added
- Allow custom headers in validator client HTTP requests. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15884)
- Metric to track data columns recovered from execution layer. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15924)
- Metrics: Add count of peers per direction and type (inbound/outbound), (TCP/QUIC). [[PR]](https://github.com/prysmaticlabs/prysm/pull/15922)
- `p2p_subscribed_topic_peer_total`: Reset to avoid dangling values. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15922)
- Add `p2p_minimum_peers_per_subnet` metric. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15922)
- Added GeneralizedIndicesFromPath function to calculate the GIs for a given sszInfo object and a PathElement. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15873)
- Add Gloas protobuf definitions with spec tests and SSZ serialization support. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15601)
- Fulu fork epoch for mainnet configurations set for December 3, 2025, 09:49:11pm UTC. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15975)
- Added BPO schedules for December 9, 2025, 02:21:11pm UTC and January 7, 2026, 01:01:11am UTC. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15975)
### Changed
- Updated consensus spec tests to v1.6.0-beta.1 with new hashes and URL template. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15918)
- Use the `by-epoch' blob storage layout by default and log a warning to users who continue to use the flat layout, encouraging them to switch. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15904)
- Update go-netroute to `v0.3.0`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15934)
- Introduced Path type for SSZ-QL queries and updated PathElement (removed Length field, kept Index) enforcing that len queries are terminal (at most one per path). [[PR]](https://github.com/prysmaticlabs/prysm/pull/15935)
- Changed length query syntax from `block.payload.len(transactions)` to `len(block.payload.transactions)`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15935)
- Update `go-netroute` to `v0.4.0`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15949)
- Updated consensus spec tests to v1.6.0-beta.2. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15960)
- Updated go bitfield from prysmaticlabs to offchainlabs. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15968)
- Bump builder default gas limit from `45000000` (45 MGas) to `60000000` (60 MGas). [[PR]](https://github.com/prysmaticlabs/prysm/pull/15979)
- Use head state for block pubsub validation when possible. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15972)
- updated consensus spec to 1.6.0 from 1.6.0-beta.2. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15975)
- Upgrade Prysm v6 to v7. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15989)
- Use head state readonly when possible to validate data column sidecars. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15977)
### Removed
- log mentioning removed flag `--show-deposit-data`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15926)
- Remove Beacon API endpoints that were deprecated in Electra: `GET /eth/v1/beacon/deposit_snapshot`, `GET /eth/v1/beacon/blocks/{block_id}/attestations`, `GET /eth/v1/beacon/pool/attestations`, `POST /eth/v1/beacon/pool/attestations`, `GET /eth/v1/beacon/pool/attester_slashings`, `POST /eth/v1/beacon/pool/attester_slashings`, `GET /eth/v1/validator/aggregate_attestation`, `POST /eth/v1/validator/aggregate_and_proofs`, `POST /eth/v1/beacon/blocks`, `POST /eth/v1/beacon/blinded_blocks`, `GET /eth/v1/builder/states/{state_id}/expected_withdrawals`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15962)
- Deprecated flag `--enable-optional-engine-methods` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
- Deprecated flag `--disable-build-block-parallel` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
- Deprecated flag `--disable-reorg-late-blocks` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
- Deprecated flag `--disable-optional-engine-methods` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
- Deprecated flag `--disable-aggregate-parallel` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
- Deprecated flag `--enable-eip-4881` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
- Deprecated flag `--disable-eip-4881` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
- Deprecated flag `--enable-verbose-sig-verification` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
- Deprecated flag `--enable-debug-rpc-endpoints` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
- Deprecated flag `--beacon-rpc-gateway-provider` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
- Deprecated flag `--disable-grpc-gateway` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
- Deprecated flag `--enable-experimental-state` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
- Deprecated flag `--enable-committee-aware-packing` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
- Deprecated flag `--interop-genesis-time` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
- Deprecated flag `--interop-num-validators` has been removed (from beacon-chain only; still available in validator client). [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
- Deprecated flag `--enable-quic` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
- Deprecated flag `--attest-timely` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
- Deprecated flag `--disable-experimental-state` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
- Deprecated flag `--p2p-metadata` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
### Fixed
- Remove `Reading static P2P private key from a file.` log if Fulu is enabled. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15913)
- `blobSidecarByRootRPCHandler`: Do not serve a sidecar if the corresponding block is not available. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15933)
- `dataColumnSidecarByRootRPCHandler`: Do not serve a sidecar if the corresponding block is not available. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15933)
- Fix incorrect version used when sending attestation version in Fulu. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15950)
- Changed the behavior of topic subscriptions such that only topics that require the active validator count will compute that value. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15955)
- Added a Mutex to the computation of active validator count during topic subscription to avoid a race condition where multiple goroutines are computing the same work. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15955)
- `RODataColumnsVerifier.ValidProposerSignature`: Ensure the expensive signature verification is only performed once for concurrent requests for the same signature data. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15954)
- use filepath for path operations (clean, join, etc.) to ensure correct behavior on Windows. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15953)
- Fix #15969: Handle addition overflow in `/eth/v1/beacon/rewards/attestations/{epoch}`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15970)
- `SidecarProposerExpected`: Add the slot in the single flight key. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15976)
- Ensures the rate limitation is respected for by root blob and data column sidecars requests. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15981)
- Use head only if its compatible with target for attestation validation. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15965)
- Backfill disabled if checkpoint sync origin is after fulu fork due to lack of DataColumnSidecar support in backfill. To track the availability of fulu-compatible backfill please watch https://github.com/OffchainLabs/prysm/issues/15982. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15987)
- `SidecarProposerExpected`: Use the correct value of proposer index in the singleflight group. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15993)
## [v6.1.4](https://github.com/prysmaticlabs/prysm/compare/v6.1.3...v6.1.4) - 2025-10-24
This release includes a bug fix affecting block proposals in rare cases, along with an important update for Windows users running post-Fusaka fork.
@@ -3901,4 +3820,4 @@ There are no security updates in this release.
# Older than v2.0.0
For changelog history for releases older than v2.0.0, please refer to https://github.com/prysmaticlabs/prysm/releases
For changelog history for releases older than v2.0.0, please refer to https://github.com/prysmaticlabs/prysm/releases

View File

@@ -205,26 +205,6 @@ prysm_image_deps()
load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies")
# Override golang.org/x/tools to use v0.38.0 instead of v0.30.0
# This is necessary as this dependency is required by rules_go and they do not accept dependency
# update PRs. Instead, they ask downstream projects to override the dependency. To generate the
# patches or update this dependency again, check out the rules_go repo then run the releaser tool.
# bazel run //go/tools/releaser -- upgrade-dep -mirror=false org_golang_x_tools
# Copy the patches and http_archive updates from rules_go here.
http_archive(
name = "org_golang_x_tools",
patch_args = ["-p1"],
patches = [
"//third_party:org_golang_x_tools-deletegopls.patch",
"//third_party:org_golang_x_tools-gazelle.patch",
],
sha256 = "8509908cd7fc35aa09ff49d8494e4fd25bab9e6239fbf57e0d8344f6bec5802b",
strip_prefix = "tools-0.38.0",
urls = [
"https://github.com/golang/tools/archive/refs/tags/v0.38.0.zip",
],
)
go_rules_dependencies()
go_register_toolchains(

View File

@@ -56,7 +56,7 @@ func ParseAccept(header string) []mediaRange {
}
var out []mediaRange
for field := range strings.SplitSeq(header, ",") {
for _, field := range strings.Split(header, ",") {
if r, ok := parseMediaRange(field); ok {
out = append(out, r)
}

View File

@@ -421,7 +421,7 @@ func (c *Client) RegisterValidator(ctx context.Context, svr []*ethpb.SignedValid
func jsonValidatorRegisterRequest(svr []*ethpb.SignedValidatorRegistrationV1) ([]byte, error) {
vs := make([]*structs.SignedValidatorRegistration, len(svr))
for i := range svr {
for i := 0; i < len(svr); i++ {
vs[i] = structs.SignedValidatorRegistrationFromConsensus(svr[i])
}
body, err := json.Marshal(vs)

View File

@@ -121,7 +121,7 @@ func (s *Uint64String) UnmarshalText(t []byte) error {
// MarshalText returns a byte representation of the text from Uint64String.
func (s Uint64String) MarshalText() ([]byte, error) {
return fmt.Appendf(nil, "%d", s), nil
return []byte(fmt.Sprintf("%d", s)), nil
}
// VersionResponse is a JSON representation of a field in the builder API header response.

View File

@@ -15,7 +15,7 @@ import (
func LogRequests(
ctx context.Context,
method string, req,
reply any,
reply interface{},
cc *grpc.ClientConn,
invoker grpc.UnaryInvoker,
opts ...grpc.CallOption,

View File

@@ -14,5 +14,5 @@ type GetForkScheduleResponse struct {
}
type GetSpecResponse struct {
Data any `json:"data"`
Data interface{} `json:"data"`
}

View File

@@ -93,9 +93,9 @@ func TestToggleMultipleTimes(t *testing.T) {
v := New()
pre := !v.IsSet()
for i := range 100 {
for i := 0; i < 100; i++ {
v.SetTo(false)
for range i {
for j := 0; j < i; j++ {
pre = v.Toggle()
}
@@ -149,7 +149,7 @@ func TestRace(t *testing.T) {
// Writer
go func() {
for range repeat {
for i := 0; i < repeat; i++ {
v.Set()
wg.Done()
}
@@ -157,7 +157,7 @@ func TestRace(t *testing.T) {
// Reader
go func() {
for range repeat {
for i := 0; i < repeat; i++ {
v.IsSet()
wg.Done()
}
@@ -165,7 +165,7 @@ func TestRace(t *testing.T) {
// Writer
go func() {
for range repeat {
for i := 0; i < repeat; i++ {
v.UnSet()
wg.Done()
}
@@ -173,7 +173,7 @@ func TestRace(t *testing.T) {
// Reader And Writer
go func() {
for range repeat {
for i := 0; i < repeat; i++ {
v.Toggle()
wg.Done()
}
@@ -198,8 +198,8 @@ func ExampleAtomicBool() {
func BenchmarkMutexRead(b *testing.B) {
var m sync.RWMutex
var v bool
for b.Loop() {
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.RLock()
_ = v
m.RUnlock()
@@ -208,16 +208,16 @@ func BenchmarkMutexRead(b *testing.B) {
func BenchmarkAtomicValueRead(b *testing.B) {
var v atomic.Value
for b.Loop() {
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = v.Load() != nil
}
}
func BenchmarkAtomicBoolRead(b *testing.B) {
v := New()
for b.Loop() {
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = v.IsSet()
}
}
@@ -227,8 +227,8 @@ func BenchmarkAtomicBoolRead(b *testing.B) {
func BenchmarkMutexWrite(b *testing.B) {
var m sync.RWMutex
var v bool
for b.Loop() {
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.RLock()
v = true
m.RUnlock()
@@ -239,16 +239,16 @@ func BenchmarkMutexWrite(b *testing.B) {
func BenchmarkAtomicValueWrite(b *testing.B) {
var v atomic.Value
for b.Loop() {
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.Store(true)
}
}
func BenchmarkAtomicBoolWrite(b *testing.B) {
v := New()
for b.Loop() {
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.Set()
}
}
@@ -258,8 +258,8 @@ func BenchmarkAtomicBoolWrite(b *testing.B) {
func BenchmarkMutexCAS(b *testing.B) {
var m sync.RWMutex
var v bool
for b.Loop() {
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.Lock()
if !v {
v = true
@@ -270,8 +270,8 @@ func BenchmarkMutexCAS(b *testing.B) {
func BenchmarkAtomicBoolCAS(b *testing.B) {
v := New()
for b.Loop() {
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.SetToIf(false, true)
}
}
@@ -281,8 +281,8 @@ func BenchmarkAtomicBoolCAS(b *testing.B) {
func BenchmarkMutexToggle(b *testing.B) {
var m sync.RWMutex
var v bool
for b.Loop() {
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.Lock()
v = !v
m.Unlock()
@@ -291,8 +291,8 @@ func BenchmarkMutexToggle(b *testing.B) {
func BenchmarkAtomicBoolToggle(b *testing.B) {
v := New()
for b.Loop() {
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.Toggle()
}
}

View File

@@ -21,7 +21,7 @@ const (
func init() {
input = make([][]byte, benchmarkElements)
for i := range benchmarkElements {
for i := 0; i < benchmarkElements; i++ {
input[i] = make([]byte, benchmarkElementSize)
_, err := rand.Read(input[i])
if err != nil {
@@ -35,7 +35,7 @@ func hash(input [][]byte) [][]byte {
output := make([][]byte, len(input))
for i := range input {
copy(output, input)
for range benchmarkHashRuns {
for j := 0; j < benchmarkHashRuns; j++ {
hash := sha256.Sum256(output[i])
output[i] = hash[:]
}
@@ -44,15 +44,15 @@ func hash(input [][]byte) [][]byte {
}
func BenchmarkHash(b *testing.B) {
for b.Loop() {
for i := 0; i < b.N; i++ {
hash(input)
}
}
func BenchmarkHashMP(b *testing.B) {
output := make([][]byte, len(input))
for b.Loop() {
workerResults, err := async.Scatter(len(input), func(offset int, entries int, _ *sync.RWMutex) (any, error) {
for i := 0; i < b.N; i++ {
workerResults, err := async.Scatter(len(input), func(offset int, entries int, _ *sync.RWMutex) (interface{}, error) {
return hash(input[offset : offset+entries]), nil
})
require.NoError(b, err)

View File

@@ -7,7 +7,7 @@ import (
// Debounce events fired over a channel by a specified duration, ensuring no events
// are handled until a certain interval of time has passed.
func Debounce(ctx context.Context, interval time.Duration, eventsChan <-chan any, handler func(any)) {
func Debounce(ctx context.Context, interval time.Duration, eventsChan <-chan interface{}, handler func(interface{})) {
var timer *time.Timer
defer func() {
if timer != nil {

View File

@@ -14,7 +14,7 @@ import (
)
func TestDebounce_NoEvents(t *testing.T) {
eventsChan := make(chan any, 100)
eventsChan := make(chan interface{}, 100)
ctx, cancel := context.WithCancel(t.Context())
interval := time.Second
timesHandled := int32(0)
@@ -26,7 +26,7 @@ func TestDebounce_NoEvents(t *testing.T) {
})
}()
go func() {
async.Debounce(ctx, interval, eventsChan, func(event any) {
async.Debounce(ctx, interval, eventsChan, func(event interface{}) {
atomic.AddInt32(&timesHandled, 1)
})
wg.Done()
@@ -38,7 +38,7 @@ func TestDebounce_NoEvents(t *testing.T) {
}
func TestDebounce_CtxClosing(t *testing.T) {
eventsChan := make(chan any, 100)
eventsChan := make(chan interface{}, 100)
ctx, cancel := context.WithCancel(t.Context())
interval := time.Second
timesHandled := int32(0)
@@ -62,7 +62,7 @@ func TestDebounce_CtxClosing(t *testing.T) {
})
}()
go func() {
async.Debounce(ctx, interval, eventsChan, func(event any) {
async.Debounce(ctx, interval, eventsChan, func(event interface{}) {
atomic.AddInt32(&timesHandled, 1)
})
wg.Done()
@@ -74,14 +74,14 @@ func TestDebounce_CtxClosing(t *testing.T) {
}
func TestDebounce_SingleHandlerInvocation(t *testing.T) {
eventsChan := make(chan any, 100)
eventsChan := make(chan interface{}, 100)
ctx, cancel := context.WithCancel(t.Context())
interval := time.Second
timesHandled := int32(0)
go async.Debounce(ctx, interval, eventsChan, func(event any) {
go async.Debounce(ctx, interval, eventsChan, func(event interface{}) {
atomic.AddInt32(&timesHandled, 1)
})
for range 100 {
for i := 0; i < 100; i++ {
eventsChan <- struct{}{}
}
// We should expect 100 rapid fire changes to only have caused
@@ -92,14 +92,14 @@ func TestDebounce_SingleHandlerInvocation(t *testing.T) {
}
func TestDebounce_MultipleHandlerInvocation(t *testing.T) {
eventsChan := make(chan any, 100)
eventsChan := make(chan interface{}, 100)
ctx, cancel := context.WithCancel(t.Context())
interval := time.Second
timesHandled := int32(0)
go async.Debounce(ctx, interval, eventsChan, func(event any) {
go async.Debounce(ctx, interval, eventsChan, func(event interface{}) {
atomic.AddInt32(&timesHandled, 1)
})
for range 100 {
for i := 0; i < 100; i++ {
eventsChan <- struct{}{}
}
require.Equal(t, int32(0), atomic.LoadInt32(&timesHandled), "Events must prevent from handler execution")

View File

@@ -93,7 +93,9 @@ func ExampleSubscriptionScope() {
// Run a subscriber in the background.
divsub := app.SubscribeResults('/', divs)
mulsub := app.SubscribeResults('*', muls)
wg.Go(func() {
wg.Add(1)
go func() {
defer wg.Done()
defer fmt.Println("subscriber exited")
defer divsub.Unsubscribe()
defer mulsub.Unsubscribe()
@@ -109,7 +111,7 @@ func ExampleSubscriptionScope() {
return
}
}
})
}()
// Interact with the app.
app.Calc('/', 22, 11)

View File

@@ -26,7 +26,7 @@ func ExampleNewSubscription() {
// Create a subscription that sends 10 integers on ch.
ch := make(chan int)
sub := event.NewSubscription(func(quit <-chan struct{}) error {
for i := range 10 {
for i := 0; i < 10; i++ {
select {
case ch <- i:
case <-quit:

View File

@@ -3,6 +3,6 @@ package event
// SubscriberSender is an abstract representation of an *event.Feed
// to use in describing types that accept or return an *event.Feed.
type SubscriberSender interface {
Subscribe(channel any) Subscription
Send(value any) (nsent int)
Subscribe(channel interface{}) Subscription
Send(value interface{}) (nsent int)
}

View File

@@ -30,7 +30,7 @@ var errInts = errors.New("error in subscribeInts")
func subscribeInts(max, fail int, c chan<- int) Subscription {
return NewSubscription(func(quit <-chan struct{}) error {
for i := range max {
for i := 0; i < max; i++ {
if i >= fail {
return errInts
}
@@ -50,7 +50,7 @@ func TestNewSubscriptionError(t *testing.T) {
channel := make(chan int)
sub := subscribeInts(10, 2, channel)
loop:
for want := range 10 {
for want := 0; want < 10; want++ {
select {
case got := <-channel:
require.Equal(t, want, got)

View File

@@ -107,13 +107,15 @@ func TestLockUnlock(_ *testing.T) {
func TestLockUnlock_CleansUnused(t *testing.T) {
var wg sync.WaitGroup
wg.Go(func() {
wg.Add(1)
go func() {
lock := NewMultilock("dog", "cat", "owl")
lock.Lock()
assert.Equal(t, 3, len(locks.list))
lock.Unlock()
})
wg.Done()
}()
wg.Wait()
// We expect that unlocking completely cleared the locks list
// given all 3 lock keys were unused at time of unlock.

View File

@@ -9,14 +9,14 @@ import (
// WorkerResults are the results of a scatter worker.
type WorkerResults struct {
Offset int
Extent any
Extent interface{}
}
// Scatter scatters a computation across multiple goroutines.
// This breaks the task in to a number of chunks and executes those chunks in parallel with the function provided.
// Results returned are collected and presented as a set of WorkerResults, which can be reassembled by the calling function.
// Any error that occurs in the workers will be passed back to the calling function.
func Scatter(inputLen int, sFunc func(int, int, *sync.RWMutex) (any, error)) ([]*WorkerResults, error) {
func Scatter(inputLen int, sFunc func(int, int, *sync.RWMutex) (interface{}, error)) ([]*WorkerResults, error) {
if inputLen <= 0 {
return nil, errors.New("input length must be greater than 0")
}

View File

@@ -46,9 +46,9 @@ func TestDouble(t *testing.T) {
inValues[i] = i
}
outValues := make([]int, test.inValues)
workerResults, err := async.Scatter(len(inValues), func(offset int, entries int, _ *sync.RWMutex) (any, error) {
workerResults, err := async.Scatter(len(inValues), func(offset int, entries int, _ *sync.RWMutex) (interface{}, error) {
extent := make([]int, entries)
for i := range entries {
for i := 0; i < entries; i++ {
extent[i] = inValues[offset+i] * 2
}
return extent, nil
@@ -72,8 +72,8 @@ func TestDouble(t *testing.T) {
func TestMutex(t *testing.T) {
totalRuns := 1048576
val := 0
_, err := async.Scatter(totalRuns, func(offset int, entries int, mu *sync.RWMutex) (any, error) {
for range entries {
_, err := async.Scatter(totalRuns, func(offset int, entries int, mu *sync.RWMutex) (interface{}, error) {
for i := 0; i < entries; i++ {
mu.Lock()
val++
mu.Unlock()
@@ -90,8 +90,8 @@ func TestMutex(t *testing.T) {
func TestError(t *testing.T) {
totalRuns := 1024
val := 0
_, err := async.Scatter(totalRuns, func(offset int, entries int, mu *sync.RWMutex) (any, error) {
for range entries {
_, err := async.Scatter(totalRuns, func(offset int, entries int, mu *sync.RWMutex) (interface{}, error) {
for i := 0; i < entries; i++ {
mu.Lock()
val++
if val == 1011 {

View File

@@ -23,7 +23,6 @@ go_library(
go_test(
name = "go_default_test",
srcs = [
"kzg_test.go",
"trusted_setup_test.go",
"validation_test.go",
],

View File

@@ -34,6 +34,12 @@ type Bytes48 = ckzg4844.Bytes48
// Bytes32 is a 32-byte array.
type Bytes32 = ckzg4844.Bytes32
// CellsAndProofs represents the Cells and Proofs corresponding to a single blob.
type CellsAndProofs struct {
Cells []Cell
Proofs []Proof
}
// BlobToKZGCommitment computes a KZG commitment from a given blob.
func BlobToKZGCommitment(blob *Blob) (Commitment, error) {
var kzgBlob kzg4844.Blob
@@ -59,7 +65,7 @@ func ComputeCells(blob *Blob) ([]Cell, error) {
cells := make([]Cell, len(ckzgCells))
for i := range ckzgCells {
copy(cells[i][:], ckzgCells[i][:])
cells[i] = Cell(ckzgCells[i])
}
return cells, nil
@@ -72,35 +78,22 @@ func ComputeBlobKZGProof(blob *Blob, commitment Commitment) (Proof, error) {
proof, err := kzg4844.ComputeBlobProof(&kzgBlob, kzg4844.Commitment(commitment))
if err != nil {
return Proof{}, err
return [48]byte{}, err
}
var result Proof
copy(result[:], proof[:])
return result, nil
return Proof(proof), nil
}
// ComputeCellsAndKZGProofs computes the cells and cells KZG proofs from a given blob.
func ComputeCellsAndKZGProofs(blob *Blob) ([]Cell, []Proof, error) {
func ComputeCellsAndKZGProofs(blob *Blob) (CellsAndProofs, error) {
var ckzgBlob ckzg4844.Blob
copy(ckzgBlob[:], blob[:])
ckzgCells, ckzgProofs, err := ckzg4844.ComputeCellsAndKZGProofs(&ckzgBlob)
if err != nil {
return nil, nil, err
return CellsAndProofs{}, err
}
if len(ckzgCells) != len(ckzgProofs) {
return nil, nil, errors.New("mismatched cells and proofs length")
}
cells := make([]Cell, len(ckzgCells))
proofs := make([]Proof, len(ckzgProofs))
for i := range ckzgCells {
copy(cells[i][:], ckzgCells[i][:])
copy(proofs[i][:], ckzgProofs[i][:])
}
return cells, proofs, nil
return makeCellsAndProofs(ckzgCells[:], ckzgProofs[:])
}
// VerifyCellKZGProofBatch verifies the KZG proofs for a given slice of commitments, cells indices, cells and proofs.
@@ -110,57 +103,44 @@ func VerifyCellKZGProofBatch(commitmentsBytes []Bytes48, cellIndices []uint64, c
ckzgCells := make([]ckzg4844.Cell, len(cells))
for i := range cells {
copy(ckzgCells[i][:], cells[i][:])
ckzgCells[i] = ckzg4844.Cell(cells[i])
}
return ckzg4844.VerifyCellKZGProofBatch(commitmentsBytes, cellIndices, ckzgCells, proofsBytes)
}
// RecoverCells recovers the complete cells from a given set of cell indices and partial cells.
// Note: `len(cellIndices)` must be equal to `len(partialCells)` and `cellIndices` must be sorted in ascending order.
func RecoverCells(cellIndices []uint64, partialCells []Cell) ([]Cell, error) {
// Convert `Cell` type to `ckzg4844.Cell`
ckzgPartialCells := make([]ckzg4844.Cell, len(partialCells))
for i := range partialCells {
copy(ckzgPartialCells[i][:], partialCells[i][:])
}
ckzgCells, err := ckzg4844.RecoverCells(cellIndices, ckzgPartialCells)
if err != nil {
return nil, errors.Wrap(err, "recover cells")
}
cells := make([]Cell, len(ckzgCells))
for i := range ckzgCells {
copy(cells[i][:], ckzgCells[i][:])
}
return cells, nil
}
// RecoverCellsAndKZGProofs recovers the complete cells and KZG proofs from a given set of cell indices and partial cells.
// Note: `len(cellIndices)` must be equal to `len(partialCells)` and `cellIndices` must be sorted in ascending order.
func RecoverCellsAndKZGProofs(cellIndices []uint64, partialCells []Cell) ([]Cell, []Proof, error) {
func RecoverCellsAndKZGProofs(cellIndices []uint64, partialCells []Cell) (CellsAndProofs, error) {
// Convert `Cell` type to `ckzg4844.Cell`
ckzgPartialCells := make([]ckzg4844.Cell, len(partialCells))
for i := range partialCells {
copy(ckzgPartialCells[i][:], partialCells[i][:])
ckzgPartialCells[i] = ckzg4844.Cell(partialCells[i])
}
ckzgCells, ckzgProofs, err := ckzg4844.RecoverCellsAndKZGProofs(cellIndices, ckzgPartialCells)
if err != nil {
return nil, nil, errors.Wrap(err, "recover cells and KZG proofs")
return CellsAndProofs{}, errors.Wrap(err, "recover cells and KZG proofs")
}
if len(ckzgCells) != len(ckzgProofs) {
return nil, nil, errors.New("mismatched cells and proofs length")
}
cells := make([]Cell, len(ckzgCells))
proofs := make([]Proof, len(ckzgProofs))
for i := range ckzgCells {
copy(cells[i][:], ckzgCells[i][:])
copy(proofs[i][:], ckzgProofs[i][:])
}
return cells, proofs, nil
return makeCellsAndProofs(ckzgCells[:], ckzgProofs[:])
}
// makeCellsAndProofs converts cells/proofs to the CellsAndProofs type defined in this package.
func makeCellsAndProofs(ckzgCells []ckzg4844.Cell, ckzgProofs []ckzg4844.KZGProof) (CellsAndProofs, error) {
if len(ckzgCells) != len(ckzgProofs) {
return CellsAndProofs{}, errors.New("different number of cells/proofs")
}
cells := make([]Cell, 0, len(ckzgCells))
proofs := make([]Proof, 0, len(ckzgProofs))
for i := range ckzgCells {
cells = append(cells, Cell(ckzgCells[i]))
proofs = append(proofs, Proof(ckzgProofs[i]))
}
return CellsAndProofs{
Cells: cells,
Proofs: proofs,
}, nil
}

View File

@@ -1,236 +0,0 @@
package kzg
import (
"testing"
"github.com/OffchainLabs/prysm/v7/crypto/random"
"github.com/OffchainLabs/prysm/v7/testing/require"
)
func TestComputeCells(t *testing.T) {
require.NoError(t, Start())
t.Run("valid blob", func(t *testing.T) {
randBlob := random.GetRandBlob(123)
var blob Blob
copy(blob[:], randBlob[:])
cells, err := ComputeCells(&blob)
require.NoError(t, err)
require.Equal(t, 128, len(cells))
})
}
func TestComputeBlobKZGProof(t *testing.T) {
require.NoError(t, Start())
t.Run("valid blob and commitment", func(t *testing.T) {
randBlob := random.GetRandBlob(123)
var blob Blob
copy(blob[:], randBlob[:])
commitment, err := BlobToKZGCommitment(&blob)
require.NoError(t, err)
proof, err := ComputeBlobKZGProof(&blob, commitment)
require.NoError(t, err)
require.Equal(t, BytesPerProof, len(proof))
require.NotEqual(t, Proof{}, proof, "proof should not be empty")
})
}
func TestComputeCellsAndKZGProofs(t *testing.T) {
require.NoError(t, Start())
t.Run("valid blob returns matching cells and proofs", func(t *testing.T) {
randBlob := random.GetRandBlob(123)
var blob Blob
copy(blob[:], randBlob[:])
cells, proofs, err := ComputeCellsAndKZGProofs(&blob)
require.NoError(t, err)
require.Equal(t, 128, len(cells))
require.Equal(t, 128, len(proofs))
require.Equal(t, len(cells), len(proofs), "cells and proofs should have matching lengths")
})
}
func TestVerifyCellKZGProofBatch(t *testing.T) {
require.NoError(t, Start())
t.Run("valid proof batch", func(t *testing.T) {
randBlob := random.GetRandBlob(123)
var blob Blob
copy(blob[:], randBlob[:])
commitment, err := BlobToKZGCommitment(&blob)
require.NoError(t, err)
cells, proofs, err := ComputeCellsAndKZGProofs(&blob)
require.NoError(t, err)
// Verify a subset of cells
cellIndices := []uint64{0, 1, 2, 3, 4}
selectedCells := make([]Cell, len(cellIndices))
commitmentsBytes := make([]Bytes48, len(cellIndices))
proofsBytes := make([]Bytes48, len(cellIndices))
for i, idx := range cellIndices {
selectedCells[i] = cells[idx]
copy(commitmentsBytes[i][:], commitment[:])
copy(proofsBytes[i][:], proofs[idx][:])
}
valid, err := VerifyCellKZGProofBatch(commitmentsBytes, cellIndices, selectedCells, proofsBytes)
require.NoError(t, err)
require.Equal(t, true, valid)
})
t.Run("invalid proof should fail", func(t *testing.T) {
randBlob := random.GetRandBlob(123)
var blob Blob
copy(blob[:], randBlob[:])
commitment, err := BlobToKZGCommitment(&blob)
require.NoError(t, err)
cells, _, err := ComputeCellsAndKZGProofs(&blob)
require.NoError(t, err)
// Use invalid proofs
cellIndices := []uint64{0}
selectedCells := []Cell{cells[0]}
commitmentsBytes := make([]Bytes48, 1)
copy(commitmentsBytes[0][:], commitment[:])
// Create an invalid proof
invalidProof := Bytes48{}
proofsBytes := []Bytes48{invalidProof}
valid, err := VerifyCellKZGProofBatch(commitmentsBytes, cellIndices, selectedCells, proofsBytes)
require.NotNil(t, err)
require.Equal(t, false, valid)
})
}
func TestRecoverCells(t *testing.T) {
require.NoError(t, Start())
t.Run("recover from partial cells", func(t *testing.T) {
randBlob := random.GetRandBlob(123)
var blob Blob
copy(blob[:], randBlob[:])
cells, err := ComputeCells(&blob)
require.NoError(t, err)
// Use half of the cells
partialIndices := make([]uint64, 64)
partialCells := make([]Cell, 64)
for i := range 64 {
partialIndices[i] = uint64(i)
partialCells[i] = cells[i]
}
recoveredCells, err := RecoverCells(partialIndices, partialCells)
require.NoError(t, err)
require.Equal(t, 128, len(recoveredCells))
// Verify recovered cells match original
for i := range cells {
require.Equal(t, cells[i], recoveredCells[i])
}
})
t.Run("insufficient cells should fail", func(t *testing.T) {
randBlob := random.GetRandBlob(123)
var blob Blob
copy(blob[:], randBlob[:])
cells, err := ComputeCells(&blob)
require.NoError(t, err)
// Use only 32 cells (less than 50% required)
partialIndices := make([]uint64, 32)
partialCells := make([]Cell, 32)
for i := range 32 {
partialIndices[i] = uint64(i)
partialCells[i] = cells[i]
}
_, err = RecoverCells(partialIndices, partialCells)
require.NotNil(t, err)
})
}
func TestRecoverCellsAndKZGProofs(t *testing.T) {
require.NoError(t, Start())
t.Run("recover cells and proofs from partial cells", func(t *testing.T) {
randBlob := random.GetRandBlob(123)
var blob Blob
copy(blob[:], randBlob[:])
cells, proofs, err := ComputeCellsAndKZGProofs(&blob)
require.NoError(t, err)
// Use half of the cells
partialIndices := make([]uint64, 64)
partialCells := make([]Cell, 64)
for i := range 64 {
partialIndices[i] = uint64(i)
partialCells[i] = cells[i]
}
recoveredCells, recoveredProofs, err := RecoverCellsAndKZGProofs(partialIndices, partialCells)
require.NoError(t, err)
require.Equal(t, 128, len(recoveredCells))
require.Equal(t, 128, len(recoveredProofs))
require.Equal(t, len(recoveredCells), len(recoveredProofs), "recovered cells and proofs should have matching lengths")
// Verify recovered cells match original
for i := range cells {
require.Equal(t, cells[i], recoveredCells[i])
require.Equal(t, proofs[i], recoveredProofs[i])
}
})
t.Run("insufficient cells should fail", func(t *testing.T) {
randBlob := random.GetRandBlob(123)
var blob Blob
copy(blob[:], randBlob[:])
cells, err := ComputeCells(&blob)
require.NoError(t, err)
// Use only 32 cells (less than 50% required)
partialIndices := make([]uint64, 32)
partialCells := make([]Cell, 32)
for i := range 32 {
partialIndices[i] = uint64(i)
partialCells[i] = cells[i]
}
_, _, err = RecoverCellsAndKZGProofs(partialIndices, partialCells)
require.NotNil(t, err)
})
}
func TestBlobToKZGCommitment(t *testing.T) {
require.NoError(t, Start())
t.Run("valid blob", func(t *testing.T) {
randBlob := random.GetRandBlob(123)
var blob Blob
copy(blob[:], randBlob[:])
commitment, err := BlobToKZGCommitment(&blob)
require.NoError(t, err)
require.Equal(t, 48, len(commitment))
// Verify commitment is deterministic
commitment2, err := BlobToKZGCommitment(&blob)
require.NoError(t, err)
require.Equal(t, commitment, commitment2)
})
}

View File

@@ -70,7 +70,7 @@ func TestVerifyBlobKZGProofBatch(t *testing.T) {
commitments := make([][]byte, blobCount)
proofs := make([][]byte, blobCount)
for i := range blobCount {
for i := 0; i < blobCount; i++ {
blob := random.GetRandBlob(int64(i))
commitment, proof, err := GenerateCommitmentAndProof(blob)
require.NoError(t, err)
@@ -203,13 +203,13 @@ func TestVerifyCellKZGProofBatchFromBlobData(t *testing.T) {
require.NoError(t, err)
// Compute cells and proofs
_, proofs, err := ComputeCellsAndKZGProofs(&blob)
cellsAndProofs, err := ComputeCellsAndKZGProofs(&blob)
require.NoError(t, err)
// Create flattened cell proofs (like execution client format)
cellProofs := make([][]byte, numberOfColumns)
for i := range numberOfColumns {
cellProofs[i] = proofs[i][:]
cellProofs[i] = cellsAndProofs.Proofs[i][:]
}
blobs := [][]byte{blob[:]}
@@ -236,7 +236,7 @@ func TestVerifyCellKZGProofBatchFromBlobData(t *testing.T) {
require.NoError(t, err)
// Compute cells and proofs
_, proofs, err := ComputeCellsAndKZGProofs(&blob)
cellsAndProofs, err := ComputeCellsAndKZGProofs(&blob)
require.NoError(t, err)
blobs[i] = blob[:]
@@ -244,7 +244,7 @@ func TestVerifyCellKZGProofBatchFromBlobData(t *testing.T) {
// Add cell proofs for this blob
for j := range numberOfColumns {
allCellProofs = append(allCellProofs, proofs[j][:])
allCellProofs = append(allCellProofs, cellsAndProofs.Proofs[j][:])
}
}
@@ -319,7 +319,7 @@ func TestVerifyCellKZGProofBatchFromBlobData(t *testing.T) {
randBlob := random.GetRandBlob(123)
var blob Blob
copy(blob[:], randBlob[:])
_, proofs, err := ComputeCellsAndKZGProofs(&blob)
cellsAndProofs, err := ComputeCellsAndKZGProofs(&blob)
require.NoError(t, err)
// Generate wrong commitment from different blob
@@ -331,7 +331,7 @@ func TestVerifyCellKZGProofBatchFromBlobData(t *testing.T) {
cellProofs := make([][]byte, numberOfColumns)
for i := range numberOfColumns {
cellProofs[i] = proofs[i][:]
cellProofs[i] = cellsAndProofs.Proofs[i][:]
}
blobs := [][]byte{blob[:]}
@@ -432,8 +432,8 @@ func TestVerifyCellKZGProofBatchFromBlobData(t *testing.T) {
commitments[1] = make([]byte, 32) // Wrong size
// Add cell proofs for both blobs
for range blobCount {
for range numberOfColumns {
for i := 0; i < blobCount; i++ {
for j := uint64(0); j < numberOfColumns; j++ {
allCellProofs = append(allCellProofs, make([]byte, 48))
}
}
@@ -450,7 +450,7 @@ func TestVerifyCellKZGProofBatchFromBlobData(t *testing.T) {
commitments := make([][]byte, blobCount)
var allCellProofs [][]byte
for i := range blobCount {
for i := 0; i < blobCount; i++ {
randBlob := random.GetRandBlob(int64(i))
var blob Blob
copy(blob[:], randBlob[:])
@@ -461,7 +461,7 @@ func TestVerifyCellKZGProofBatchFromBlobData(t *testing.T) {
commitments[i] = commitment[:]
// Add cell proofs - make some invalid in the second blob
for j := range numberOfColumns {
for j := uint64(0); j < numberOfColumns; j++ {
if i == 1 && j == 64 {
// Invalid proof size in middle of second blob's proofs
allCellProofs = append(allCellProofs, make([]byte, 20))

View File

@@ -22,7 +22,10 @@ import (
// The caller of this function must have a lock on forkchoice.
func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) state.ReadOnlyBeaconState {
headEpoch := slots.ToEpoch(s.HeadSlot())
if c.Epoch < headEpoch || c.Epoch == 0 {
if c.Epoch < headEpoch {
return nil
}
if !s.cfg.ForkChoiceStore.IsCanonical([32]byte(c.Root)) {
return nil
}
// Only use head state if the head state is compatible with the target checkpoint.
@@ -30,11 +33,11 @@ func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) st
if err != nil {
return nil
}
headDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(headRoot), c.Epoch-1)
headDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(headRoot), c.Epoch)
if err != nil {
return nil
}
targetDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(c.Root), c.Epoch-1)
targetDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(c.Root), c.Epoch)
if err != nil {
return nil
}
@@ -50,11 +53,7 @@ func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) st
}
return st
}
// At this point we can only have c.Epoch > headEpoch.
if !s.cfg.ForkChoiceStore.IsCanonical([32]byte(c.Root)) {
return nil
}
// Advance the head state to the start of the target epoch.
// Otherwise we need to advance the head state to the start of the target epoch.
// This point can only be reached if c.Root == headRoot and c.Epoch > headEpoch.
slot, err := slots.EpochStart(c.Epoch)
if err != nil {

View File

@@ -181,123 +181,6 @@ func TestService_GetRecentPreState(t *testing.T) {
require.NotNil(t, service.getRecentPreState(ctx, &ethpb.Checkpoint{Epoch: 1, Root: ckRoot}))
}
func TestService_GetRecentPreState_Epoch_0(t *testing.T) {
service, _ := minimalTestService(t)
ctx := t.Context()
require.IsNil(t, service.getRecentPreState(ctx, &ethpb.Checkpoint{}))
}
func TestService_GetRecentPreState_Old_Checkpoint(t *testing.T) {
service, _ := minimalTestService(t)
ctx := t.Context()
s, err := util.NewBeaconState()
require.NoError(t, err)
ckRoot := bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)
cp0 := &ethpb.Checkpoint{Epoch: 0, Root: ckRoot}
err = s.SetFinalizedCheckpoint(cp0)
require.NoError(t, err)
st, root, err := prepareForkchoiceState(ctx, 33, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
service.head = &head{
root: [32]byte(ckRoot),
state: s,
slot: 33,
}
require.IsNil(t, service.getRecentPreState(ctx, &ethpb.Checkpoint{}))
}
func TestService_GetRecentPreState_Same_DependentRoots(t *testing.T) {
service, _ := minimalTestService(t)
ctx := t.Context()
s, err := util.NewBeaconState()
require.NoError(t, err)
ckRoot := bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)
cp0 := &ethpb.Checkpoint{Epoch: 0, Root: ckRoot}
// Create a fork 31 <-- 32 <--- 64
// \---------33
// With the same dependent root at epoch 0 for a checkpoint at epoch 2
st, blk, err := prepareForkchoiceState(ctx, 31, [32]byte(ckRoot), [32]byte{}, [32]byte{}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
st, blk, err = prepareForkchoiceState(ctx, 32, [32]byte{'S'}, blk.Root(), [32]byte{}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
st, blk, err = prepareForkchoiceState(ctx, 64, [32]byte{'T'}, blk.Root(), [32]byte{}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
st, blk, err = prepareForkchoiceState(ctx, 33, [32]byte{'U'}, [32]byte(ckRoot), [32]byte{}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
cpRoot := blk.Root()
service.head = &head{
root: [32]byte{'T'},
state: s,
slot: 64,
}
require.NotNil(t, service.getRecentPreState(ctx, &ethpb.Checkpoint{Epoch: 2, Root: cpRoot[:]}))
}
func TestService_GetRecentPreState_Different_DependentRoots(t *testing.T) {
service, _ := minimalTestService(t)
ctx := t.Context()
s, err := util.NewBeaconState()
require.NoError(t, err)
ckRoot := bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)
cp0 := &ethpb.Checkpoint{Epoch: 0, Root: ckRoot}
// Create a fork 30 <-- 31 <-- 32 <--- 64
// \---------33
// With the same dependent root at epoch 0 for a checkpoint at epoch 2
st, blk, err := prepareForkchoiceState(ctx, 30, [32]byte(ckRoot), [32]byte{}, [32]byte{}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
st, blk, err = prepareForkchoiceState(ctx, 31, [32]byte{'S'}, blk.Root(), [32]byte{}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
st, blk, err = prepareForkchoiceState(ctx, 32, [32]byte{'T'}, blk.Root(), [32]byte{}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
st, blk, err = prepareForkchoiceState(ctx, 64, [32]byte{'U'}, blk.Root(), [32]byte{}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
st, blk, err = prepareForkchoiceState(ctx, 33, [32]byte{'V'}, [32]byte(ckRoot), [32]byte{}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
cpRoot := blk.Root()
service.head = &head{
root: [32]byte{'T'},
state: s,
slot: 64,
}
require.IsNil(t, service.getRecentPreState(ctx, &ethpb.Checkpoint{Epoch: 2, Root: cpRoot[:]}))
}
func TestService_GetRecentPreState_Different(t *testing.T) {
service, _ := minimalTestService(t)
ctx := t.Context()
s, err := util.NewBeaconState()
require.NoError(t, err)
ckRoot := bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)
cp0 := &ethpb.Checkpoint{Epoch: 0, Root: ckRoot}
err = s.SetFinalizedCheckpoint(cp0)
require.NoError(t, err)
st, root, err := prepareForkchoiceState(ctx, 33, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
service.head = &head{
root: [32]byte(ckRoot),
state: s,
slot: 33,
}
require.IsNil(t, service.getRecentPreState(ctx, &ethpb.Checkpoint{}))
}
func TestService_GetAttPreState_Concurrency(t *testing.T) {
service, _ := minimalTestService(t)
ctx := t.Context()
@@ -326,14 +209,16 @@ func TestService_GetAttPreState_Concurrency(t *testing.T) {
var wg sync.WaitGroup
errChan := make(chan error, 1000)
for range 1000 {
wg.Go(func() {
for i := 0; i < 1000; i++ {
wg.Add(1)
go func() {
defer wg.Done()
cp1 := &ethpb.Checkpoint{Epoch: 1, Root: ckRoot}
_, err := service.getAttPreState(ctx, cp1)
if err != nil {
errChan <- err
}
})
}()
}
go func() {

View File

@@ -134,7 +134,7 @@ func getStateVersionAndPayload(st state.BeaconState) (int, interfaces.ExecutionD
return preStateVersion, preStateHeader, nil
}
func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlock, avs das.AvailabilityChecker) error {
func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlock, avs das.AvailabilityStore) error {
ctx, span := trace.StartSpan(ctx, "blockChain.onBlockBatch")
defer span.End()
@@ -306,7 +306,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
return s.saveHeadNoDB(ctx, lastB, lastBR, preState, !isValidPayload)
}
func (s *Service) areSidecarsAvailable(ctx context.Context, avs das.AvailabilityChecker, roBlock consensusblocks.ROBlock) error {
func (s *Service) areSidecarsAvailable(ctx context.Context, avs das.AvailabilityStore, roBlock consensusblocks.ROBlock) error {
blockVersion := roBlock.Version()
block := roBlock.Block()
slot := block.Slot()
@@ -634,7 +634,9 @@ func missingDataColumnIndices(store *filesystem.DataColumnStorage, root [fieldpa
return nil, nil
}
if len(expected) > fieldparams.NumberOfColumns {
numberOfColumns := params.BeaconConfig().NumberOfColumns
if uint64(len(expected)) > numberOfColumns {
return nil, errMaxDataColumnsExceeded
}
@@ -815,10 +817,11 @@ func (s *Service) areDataColumnsAvailable(
}
case <-ctx.Done():
var missingIndices any = "all"
missingIndicesCount := len(missing)
var missingIndices interface{} = "all"
numberOfColumns := params.BeaconConfig().NumberOfColumns
missingIndicesCount := uint64(len(missing))
if missingIndicesCount < fieldparams.NumberOfColumns {
if missingIndicesCount < numberOfColumns {
missingIndices = helpers.SortedPrettySliceFromMap(missing)
}
@@ -945,6 +948,13 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
attribute := s.getPayloadAttribute(ctx, headState, s.CurrentSlot()+1, headRoot[:])
// return early if we are not proposing next slot
if attribute.IsEmpty() {
headBlock, err := s.headBlock()
if err != nil {
log.WithError(err).WithField("head_root", headRoot).Error("Unable to retrieve head block to fire payload attributes event")
}
// notifyForkchoiceUpdate fires the payload attribute event. But in this case, we won't
// call notifyForkchoiceUpdate, so the event is fired here.
go s.firePayloadAttributesEvent(s.cfg.StateNotifier.StateFeed(), headBlock, headRoot, s.CurrentSlot()+1)
return
}

View File

@@ -147,7 +147,7 @@ func TestStore_OnBlockBatch(t *testing.T) {
bState := st.Copy()
var blks []consensusblocks.ROBlock
for i := range 97 {
for i := 0; i < 97; i++ {
b, err := util.GenerateFullBlock(bState, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
require.NoError(t, err)
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
@@ -1323,7 +1323,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
require.NoError(t, err)
logHook := logTest.NewGlobal()
for range 10 {
for i := 0; i < 10; i++ {
fc := &ethpb.Checkpoint{}
st, blkRoot, err := prepareForkchoiceState(ctx, 0, wsb1.Block().ParentRoot(), [32]byte{}, [32]byte{}, fc, fc)
require.NoError(t, err)
@@ -1949,7 +1949,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
require.Equal(t, true, optimistic)
// Check that the invalid blocks are not in database
for i := range 19 - 13 {
for i := 0; i < 19-13; i++ {
require.Equal(t, false, service.cfg.BeaconDB.HasBlock(ctx, invalidRoots[i]))
}
@@ -2495,8 +2495,7 @@ func TestMissingBlobIndices(t *testing.T) {
}
func TestMissingDataColumnIndices(t *testing.T) {
const countPlusOne = fieldparams.NumberOfColumns + 1
countPlusOne := params.BeaconConfig().NumberOfColumns + 1
tooManyColumns := make(map[uint64]bool, countPlusOne)
for i := range countPlusOne {
tooManyColumns[uint64(i)] = true
@@ -2806,10 +2805,6 @@ func TestProcessLightClientUpdate(t *testing.T) {
require.NoError(t, s.cfg.BeaconDB.SaveHeadBlockRoot(ctx, [32]byte{1, 2}))
for _, testVersion := range version.All()[1:] {
if testVersion == version.Gloas {
// TODO(16027): Unskip light client tests for Gloas
continue
}
t.Run(version.String(testVersion), func(t *testing.T) {
l := util.NewTestLightClient(t, testVersion)
@@ -2884,7 +2879,7 @@ func TestProcessLightClientUpdate(t *testing.T) {
// set a better sync aggregate
scb := make([]byte, 64)
for i := range 5 {
for i := 0; i < 5; i++ {
scb[i] = 0x01
}
oldUpdate.SetSyncAggregate(&ethpb.SyncAggregate{

View File

@@ -39,8 +39,8 @@ var epochsSinceFinalityExpandCache = primitives.Epoch(4)
// BlockReceiver interface defines the methods of chain service for receiving and processing new blocks.
type BlockReceiver interface {
ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, avs das.AvailabilityChecker) error
ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock, avs das.AvailabilityChecker) error
ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, avs das.AvailabilityStore) error
ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock, avs das.AvailabilityStore) error
HasBlock(ctx context.Context, root [32]byte) bool
RecentBlockSlot(root [32]byte) (primitives.Slot, error)
BlockBeingSynced([32]byte) bool
@@ -69,7 +69,7 @@ type SlashingReceiver interface {
// 1. Validate block, apply state transition and update checkpoints
// 2. Apply fork choice to the processed block
// 3. Save latest head info
func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, avs das.AvailabilityChecker) error {
func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, avs das.AvailabilityStore) error {
ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveBlock")
defer span.End()
// Return early if the block is blacklisted
@@ -242,7 +242,7 @@ func (s *Service) validateExecutionAndConsensus(
return postState, isValidPayload, nil
}
func (s *Service) handleDA(ctx context.Context, avs das.AvailabilityChecker, block blocks.ROBlock) (time.Duration, error) {
func (s *Service) handleDA(ctx context.Context, avs das.AvailabilityStore, block blocks.ROBlock) (time.Duration, error) {
var err error
start := time.Now()
if avs != nil {
@@ -332,7 +332,7 @@ func (s *Service) executePostFinalizationTasks(ctx context.Context, finalizedSta
// ReceiveBlockBatch processes the whole block batch at once, assuming the block batch is linear ,transitioning
// the state, performing batch verification of all collected signatures and then performing the appropriate
// actions for a block post-transition.
func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock, avs das.AvailabilityChecker) error {
func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock, avs das.AvailabilityStore) error {
ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveBlockBatch")
defer span.End()

View File

@@ -216,11 +216,13 @@ func TestService_ReceiveBlockUpdateHead(t *testing.T) {
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
wg := sync.WaitGroup{}
wg.Go(func() {
wg.Add(1)
go func() {
wsb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, s.ReceiveBlock(ctx, wsb, root, nil))
})
wg.Done()
}()
wg.Wait()
time.Sleep(100 * time.Millisecond)
if recvd := len(s.cfg.StateNotifier.(*blockchainTesting.MockStateNotifier).ReceivedEvents()); recvd < 1 {

View File

@@ -14,7 +14,6 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
statefeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/state"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
coreTime "github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
"github.com/OffchainLabs/prysm/v7/beacon-chain/db"
@@ -471,35 +470,30 @@ func (s *Service) removeStartupState() {
// UpdateCustodyInfoInDB updates the custody information in the database.
// It returns the (potentially updated) custody group count and the earliest available slot.
func (s *Service) updateCustodyInfoInDB(slot primitives.Slot) (primitives.Slot, uint64, error) {
isSupernode := flags.Get().Supernode
isSemiSupernode := flags.Get().SemiSupernode
isSubscribedToAllDataSubnets := flags.Get().SubscribeAllDataSubnets
cfg := params.BeaconConfig()
custodyRequirement := cfg.CustodyRequirement
// Check if the node was previously subscribed to all data subnets, and if so,
// store the new status accordingly.
wasSupernode, err := s.cfg.BeaconDB.UpdateSubscribedToAllDataSubnets(s.ctx, isSupernode)
wasSubscribedToAllDataSubnets, err := s.cfg.BeaconDB.UpdateSubscribedToAllDataSubnets(s.ctx, isSubscribedToAllDataSubnets)
if err != nil {
return 0, 0, errors.Wrap(err, "update subscribed to all data subnets")
log.WithError(err).Error("Could not update subscription status to all data subnets")
}
// Compute the target custody group count based on current flag configuration.
targetCustodyGroupCount := custodyRequirement
// Supernode: custody all groups (either currently set or previously enabled)
if isSupernode {
targetCustodyGroupCount = cfg.NumberOfCustodyGroups
// Warn the user if the node was previously subscribed to all data subnets and is not any more.
if wasSubscribedToAllDataSubnets && !isSubscribedToAllDataSubnets {
log.Warnf(
"Because the flag `--%s` was previously used, the node will still subscribe to all data subnets.",
flags.SubscribeAllDataSubnets.Name,
)
}
// Semi-supernode: custody minimum needed for reconstruction, or custody requirement if higher
if isSemiSupernode {
semiSupernodeCustody, err := peerdas.MinimumCustodyGroupCountToReconstruct()
if err != nil {
return 0, 0, errors.Wrap(err, "minimum custody group count")
}
targetCustodyGroupCount = max(custodyRequirement, semiSupernodeCustody)
// Compute the custody group count.
custodyGroupCount := custodyRequirement
if isSubscribedToAllDataSubnets {
custodyGroupCount = cfg.NumberOfCustodyGroups
}
// Safely compute the fulu fork slot.
@@ -516,23 +510,12 @@ func (s *Service) updateCustodyInfoInDB(slot primitives.Slot) (primitives.Slot,
}
}
earliestAvailableSlot, actualCustodyGroupCount, err := s.cfg.BeaconDB.UpdateCustodyInfo(s.ctx, slot, targetCustodyGroupCount)
earliestAvailableSlot, custodyGroupCount, err := s.cfg.BeaconDB.UpdateCustodyInfo(s.ctx, slot, custodyGroupCount)
if err != nil {
return 0, 0, errors.Wrap(err, "update custody info")
}
if isSupernode {
log.WithFields(logrus.Fields{
"current": actualCustodyGroupCount,
"target": cfg.NumberOfCustodyGroups,
}).Info("Supernode mode enabled. Will custody all data columns going forward.")
}
if wasSupernode && !isSupernode {
log.Warningf("Because the `--%s` flag was previously used, the node will continue to act as a super node.", flags.Supernode.Name)
}
return earliestAvailableSlot, actualCustodyGroupCount, nil
return earliestAvailableSlot, custodyGroupCount, nil
}
func spawnCountdownIfPreGenesis(ctx context.Context, genesisTime time.Time, db db.HeadAccessDatabase) {

View File

@@ -412,7 +412,8 @@ func BenchmarkHasBlockDB(b *testing.B) {
r, err := blk.Block.HashTreeRoot()
require.NoError(b, err)
for b.Loop() {
b.ResetTimer()
for i := 0; i < b.N; i++ {
require.Equal(b, true, s.cfg.BeaconDB.HasBlock(ctx, r), "Block is not in DB")
}
}
@@ -431,7 +432,8 @@ func BenchmarkHasBlockForkChoiceStore_DoublyLinkedTree(b *testing.B) {
require.NoError(b, err)
require.NoError(b, s.cfg.ForkChoiceStore.InsertNode(ctx, beaconState, roblock))
for b.Loop() {
b.ResetTimer()
for i := 0; i < b.N; i++ {
require.Equal(b, true, s.cfg.ForkChoiceStore.HasNode(r), "Block is not in fork choice store")
}
}
@@ -603,6 +605,7 @@ func TestUpdateCustodyInfoInDB(t *testing.T) {
custodyRequirement = uint64(4)
earliestStoredSlot = primitives.Slot(12)
numberOfCustodyGroups = uint64(64)
numberOfColumns = uint64(128)
)
params.SetupTestConfigCleanup(t)
@@ -610,6 +613,7 @@ func TestUpdateCustodyInfoInDB(t *testing.T) {
cfg.FuluForkEpoch = fuluForkEpoch
cfg.CustodyRequirement = custodyRequirement
cfg.NumberOfCustodyGroups = numberOfCustodyGroups
cfg.NumberOfColumns = numberOfColumns
params.OverrideBeaconConfig(cfg)
ctx := t.Context()
@@ -640,7 +644,7 @@ func TestUpdateCustodyInfoInDB(t *testing.T) {
resetFlags := flags.Get()
gFlags := new(flags.GlobalFlags)
gFlags.Supernode = true
gFlags.SubscribeAllDataSubnets = true
flags.Init(gFlags)
defer flags.Init(resetFlags)
@@ -678,7 +682,7 @@ func TestUpdateCustodyInfoInDB(t *testing.T) {
// ----------
resetFlags := flags.Get()
gFlags := new(flags.GlobalFlags)
gFlags.Supernode = true
gFlags.SubscribeAllDataSubnets = true
flags.Init(gFlags)
defer flags.Init(resetFlags)
@@ -693,121 +697,4 @@ func TestUpdateCustodyInfoInDB(t *testing.T) {
require.Equal(t, slot, actualEas)
require.Equal(t, numberOfCustodyGroups, actualCgc)
})
t.Run("Supernode downgrade prevented", func(t *testing.T) {
service, requirements := minimalTestService(t)
err = requirements.db.SaveBlock(ctx, roBlock)
require.NoError(t, err)
// Enable supernode
resetFlags := flags.Get()
gFlags := new(flags.GlobalFlags)
gFlags.Supernode = true
flags.Init(gFlags)
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
actualEas, actualCgc, err := service.updateCustodyInfoInDB(slot)
require.NoError(t, err)
require.Equal(t, slot, actualEas)
require.Equal(t, numberOfCustodyGroups, actualCgc)
// Try to downgrade by removing flag
gFlags.Supernode = false
flags.Init(gFlags)
defer flags.Init(resetFlags)
// Should still be supernode
actualEas, actualCgc, err = service.updateCustodyInfoInDB(slot + 2)
require.NoError(t, err)
require.Equal(t, slot, actualEas)
require.Equal(t, numberOfCustodyGroups, actualCgc) // Still 64, not downgraded
})
t.Run("Semi-supernode downgrade prevented", func(t *testing.T) {
service, requirements := minimalTestService(t)
err = requirements.db.SaveBlock(ctx, roBlock)
require.NoError(t, err)
// Enable semi-supernode
resetFlags := flags.Get()
gFlags := new(flags.GlobalFlags)
gFlags.SemiSupernode = true
flags.Init(gFlags)
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
actualEas, actualCgc, err := service.updateCustodyInfoInDB(slot)
require.NoError(t, err)
require.Equal(t, slot, actualEas)
semiSupernodeCustody := numberOfCustodyGroups / 2 // 64
require.Equal(t, semiSupernodeCustody, actualCgc) // Semi-supernode custodies 64 groups
// Try to downgrade by removing flag
gFlags.SemiSupernode = false
flags.Init(gFlags)
defer flags.Init(resetFlags)
// UpdateCustodyInfo should prevent downgrade - custody count should remain at 64
actualEas, actualCgc, err = service.updateCustodyInfoInDB(slot + 2)
require.NoError(t, err)
require.Equal(t, slot, actualEas)
require.Equal(t, semiSupernodeCustody, actualCgc) // Still 64 due to downgrade prevention by UpdateCustodyInfo
})
t.Run("Semi-supernode to supernode upgrade allowed", func(t *testing.T) {
service, requirements := minimalTestService(t)
err = requirements.db.SaveBlock(ctx, roBlock)
require.NoError(t, err)
// Start with semi-supernode
resetFlags := flags.Get()
gFlags := new(flags.GlobalFlags)
gFlags.SemiSupernode = true
flags.Init(gFlags)
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
actualEas, actualCgc, err := service.updateCustodyInfoInDB(slot)
require.NoError(t, err)
require.Equal(t, slot, actualEas)
semiSupernodeCustody := numberOfCustodyGroups / 2 // 64
require.Equal(t, semiSupernodeCustody, actualCgc) // Semi-supernode custodies 64 groups
// Upgrade to full supernode
gFlags.SemiSupernode = false
gFlags.Supernode = true
flags.Init(gFlags)
defer flags.Init(resetFlags)
// Should upgrade to full supernode
upgradeSlot := slot + 2
actualEas, actualCgc, err = service.updateCustodyInfoInDB(upgradeSlot)
require.NoError(t, err)
require.Equal(t, upgradeSlot, actualEas) // Earliest slot updates when upgrading
require.Equal(t, numberOfCustodyGroups, actualCgc) // Upgraded to 128
})
t.Run("Semi-supernode with high validator requirements uses higher custody", func(t *testing.T) {
service, requirements := minimalTestService(t)
err = requirements.db.SaveBlock(ctx, roBlock)
require.NoError(t, err)
// Enable semi-supernode
resetFlags := flags.Get()
gFlags := new(flags.GlobalFlags)
gFlags.SemiSupernode = true
flags.Init(gFlags)
defer flags.Init(resetFlags)
// Mock a high custody requirement (simulating many validators)
// We need to override the custody requirement calculation
// For this test, we'll verify the logic by checking if custodyRequirement > 64
// Since custodyRequirement in minimalTestService is 4, we can't test the high case here
// This would require a different test setup with actual validators
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
actualEas, actualCgc, err := service.updateCustodyInfoInDB(slot)
require.NoError(t, err)
require.Equal(t, slot, actualEas)
semiSupernodeCustody := numberOfCustodyGroups / 2 // 64
// With low validator requirements (4), should use semi-supernode minimum (64)
require.Equal(t, semiSupernodeCustody, actualCgc)
})
}

View File

@@ -106,7 +106,7 @@ type EventFeedWrapper struct {
subscribed chan struct{} // this channel is closed once a subscription is made
}
func (w *EventFeedWrapper) Subscribe(channel any) event.Subscription {
func (w *EventFeedWrapper) Subscribe(channel interface{}) event.Subscription {
select {
case <-w.subscribed:
break // already closed
@@ -116,7 +116,7 @@ func (w *EventFeedWrapper) Subscribe(channel any) event.Subscription {
return w.feed.Subscribe(channel)
}
func (w *EventFeedWrapper) Send(value any) int {
func (w *EventFeedWrapper) Send(value interface{}) int {
return w.feed.Send(value)
}
@@ -275,7 +275,7 @@ func (s *ChainService) ReceiveBlockInitialSync(ctx context.Context, block interf
}
// ReceiveBlockBatch processes blocks in batches from initial-sync.
func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []blocks.ROBlock, _ das.AvailabilityChecker) error {
func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []blocks.ROBlock, _ das.AvailabilityStore) error {
if s.State == nil {
return ErrNilState
}
@@ -305,7 +305,7 @@ func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []blocks.ROBl
}
// ReceiveBlock mocks ReceiveBlock method in chain service.
func (s *ChainService) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, _ [32]byte, _ das.AvailabilityChecker) error {
func (s *ChainService) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, _ [32]byte, _ das.AvailabilityStore) error {
if s.ReceiveBlockMockErr != nil {
return s.ReceiveBlockMockErr
}

View File

@@ -166,7 +166,7 @@ func (s *Service) RegisterValidator(ctx context.Context, reg []*ethpb.SignedVali
indexToRegistration := make(map[primitives.ValidatorIndex]*ethpb.ValidatorRegistrationV1)
valid := make([]*ethpb.SignedValidatorRegistrationV1, 0)
for i := range reg {
for i := 0; i < len(reg); i++ {
r := reg[i]
nx, exists := s.cfg.headFetcher.HeadPublicKeyToValidatorIndex(bytesutil.ToBytes48(r.Message.Pubkey))
if !exists {

View File

@@ -17,7 +17,7 @@ import (
func TestBalanceCache_AddGetBalance(t *testing.T) {
blockRoots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot)
for i := range blockRoots {
for i := 0; i < len(blockRoots); i++ {
b := make([]byte, 8)
binary.LittleEndian.PutUint64(b, uint64(i))
blockRoots[i] = b
@@ -61,7 +61,7 @@ func TestBalanceCache_AddGetBalance(t *testing.T) {
func TestBalanceCache_BalanceKey(t *testing.T) {
blockRoots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot)
for i := range blockRoots {
for i := 0; i < len(blockRoots); i++ {
b := make([]byte, 8)
binary.LittleEndian.PutUint64(b, uint64(i))
blockRoots[i] = b

View File

@@ -51,7 +51,7 @@ type CommitteeCache struct {
}
// committeeKeyFn takes the seed as the key to retrieve shuffled indices of a committee in a given epoch.
func committeeKeyFn(obj any) (string, error) {
func committeeKeyFn(obj interface{}) (string, error) {
info, ok := obj.(*Committees)
if !ok {
return "", ErrNotCommittee

View File

@@ -14,7 +14,7 @@ func TestCommitteeKeyFuzz_OK(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
c := &Committees{}
for range 100000 {
for i := 0; i < 100000; i++ {
fuzzer.Fuzz(c)
k, err := committeeKeyFn(c)
require.NoError(t, err)
@@ -27,7 +27,7 @@ func TestCommitteeCache_FuzzCommitteesByEpoch(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
c := &Committees{}
for range 100000 {
for i := 0; i < 100000; i++ {
fuzzer.Fuzz(c)
require.NoError(t, cache.AddCommitteeShuffledList(t.Context(), c))
_, err := cache.Committee(t.Context(), 0, c.Seed, 0)
@@ -42,7 +42,7 @@ func TestCommitteeCache_FuzzActiveIndices(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
c := &Committees{}
for range 100000 {
for i := 0; i < 100000; i++ {
fuzzer.Fuzz(c)
require.NoError(t, cache.AddCommitteeShuffledList(t.Context(), c))

View File

@@ -17,6 +17,6 @@ func trim(queue *cache.FIFO, maxSize uint64) {
}
// popProcessNoopFunc is a no-op function that never returns an error.
func popProcessNoopFunc(_ any, _ bool) error {
func popProcessNoopFunc(_ interface{}, _ bool) error {
return nil
}

View File

@@ -769,7 +769,7 @@ func TestFinalizedDeposits_ReturnsTrieCorrectly(t *testing.T) {
}
var ctrs []*ethpb.DepositContainer
for i := range 2000 {
for i := 0; i < 2000; i++ {
ctrs = append(ctrs, generateCtr(uint64(10+(i/2)), int64(i)))
}
@@ -948,9 +948,9 @@ func rootCreator(rn byte) []byte {
func BenchmarkDepositTree_InsertNewImplementation(b *testing.B) {
totalDeposits := 10000
input := bytesutil.ToBytes32([]byte("foo"))
for b.Loop() {
for i := 0; i < b.N; i++ {
dt := NewDepositTree()
for range totalDeposits {
for j := 0; j < totalDeposits; j++ {
err := dt.Insert(input[:], 0)
require.NoError(b, err)
}
@@ -959,10 +959,10 @@ func BenchmarkDepositTree_InsertNewImplementation(b *testing.B) {
func BenchmarkDepositTree_InsertOldImplementation(b *testing.B) {
totalDeposits := 10000
input := bytesutil.ToBytes32([]byte("foo"))
for b.Loop() {
for i := 0; i < b.N; i++ {
dt, err := trie.NewTrie(33)
require.NoError(b, err)
for range totalDeposits {
for j := 0; j < totalDeposits; j++ {
err := dt.Insert(input[:], 0)
require.NoError(b, err)
}
@@ -980,8 +980,8 @@ func BenchmarkDepositTree_HashTreeRootNewImplementation(b *testing.B) {
}
b.ReportAllocs()
for b.Loop() {
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err = tr.HashTreeRoot()
require.NoError(b, err)
}
@@ -999,8 +999,8 @@ func BenchmarkDepositTree_HashTreeRootOldImplementation(b *testing.B) {
}
b.ReportAllocs()
for b.Loop() {
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err = dt.HashTreeRoot()
require.NoError(b, err)
}

View File

@@ -20,7 +20,7 @@ func (ds *DepositTreeSnapshot) CalculateRoot() ([32]byte, error) {
size := ds.depositCount
index := len(ds.finalized)
root := trie.ZeroHashes[0]
for i := range DepositContractDepth {
for i := 0; i < DepositContractDepth; i++ {
if (size & 1) == 1 {
if index == 0 {
break

View File

@@ -47,13 +47,15 @@ func TestSkipSlotCache_DisabledAndEnabled(t *testing.T) {
c.Enable()
wg := new(sync.WaitGroup)
wg.Go(func() {
wg.Add(1)
go func() {
// Get call will only terminate when
// it is not longer in progress.
obj, err := c.Get(ctx, r)
require.NoError(t, err)
require.IsNil(t, obj)
})
wg.Done()
}()
c.MarkNotInProgress(r)
wg.Wait()

View File

@@ -236,7 +236,7 @@ func (s *SyncCommitteeCache) UpdatePositionsInCommittee(syncCommitteeBoundaryRoo
// Given the `syncCommitteeIndexPosition` object, this returns the key of the object.
// The key is the `currentSyncCommitteeRoot` within the field.
// Error gets returned if input does not comply with `currentSyncCommitteeRoot` object.
func keyFn(obj any) (string, error) {
func keyFn(obj interface{}) (string, error) {
info, ok := obj.(*syncCommitteeIndexPosition)
if !ok {
return "", errNotSyncCommitteeIndexPosition

View File

@@ -12,12 +12,12 @@ import (
func TestSyncSubnetIDsCache_Roundtrip(t *testing.T) {
c := newSyncSubnetIDs()
for i := range 20 {
for i := 0; i < 20; i++ {
pubkey := [fieldparams.BLSPubkeyLength]byte{byte(i)}
c.AddSyncCommitteeSubnets(pubkey[:], 100, []uint64{uint64(i)}, 0)
}
for i := range uint64(20) {
for i := uint64(0); i < 20; i++ {
pubkey := [fieldparams.BLSPubkeyLength]byte{byte(i)}
idxs, _, ok, _ := c.GetSyncCommitteeSubnets(pubkey[:], 100)
@@ -34,7 +34,7 @@ func TestSyncSubnetIDsCache_Roundtrip(t *testing.T) {
func TestSyncSubnetIDsCache_ValidateCurrentEpoch(t *testing.T) {
c := newSyncSubnetIDs()
for i := range 20 {
for i := 0; i < 20; i++ {
pubkey := [fieldparams.BLSPubkeyLength]byte{byte(i)}
c.AddSyncCommitteeSubnets(pubkey[:], 100, []uint64{uint64(i)}, 0)
}
@@ -42,7 +42,7 @@ func TestSyncSubnetIDsCache_ValidateCurrentEpoch(t *testing.T) {
coms := c.GetAllSubnets(50)
assert.Equal(t, 0, len(coms))
for i := range uint64(20) {
for i := uint64(0); i < 20; i++ {
pubkey := [fieldparams.BLSPubkeyLength]byte{byte(i)}
_, jEpoch, ok, _ := c.GetSyncCommitteeSubnets(pubkey[:], 100)

View File

@@ -461,7 +461,7 @@ func TestFuzzProcessAttestationsNoVerify_10000(t *testing.T) {
fuzzer := gofuzz.NewWithSeed(0)
st := &ethpb.BeaconStateAltair{}
b := &ethpb.SignedBeaconBlockAltair{Block: &ethpb.BeaconBlockAltair{}}
for i := range 10000 {
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(st)
fuzzer.Fuzz(b)
if b.Block == nil {

View File

@@ -240,7 +240,7 @@ func TestProcessSyncCommittee_processSyncAggregate(t *testing.T) {
proposerIndex, err := helpers.BeaconProposerIndex(t.Context(), beaconState)
require.NoError(t, err)
for i := range syncBits {
for i := 0; i < len(syncBits); i++ {
if syncBits.BitAt(uint64(i)) {
pk := bytesutil.ToBytes48(committeeKeys[i])
require.DeepEqual(t, true, votedMap[pk])

View File

@@ -195,7 +195,10 @@ func AddValidatorToRegistry(beaconState state.BeaconState, pubKey []byte, withdr
// withdrawable_epoch=FAR_FUTURE_EPOCH,
// )
func GetValidatorFromDeposit(pubKey []byte, withdrawalCredentials []byte, amount uint64) *ethpb.Validator {
effectiveBalance := min(params.BeaconConfig().MaxEffectiveBalance, amount-(amount%params.BeaconConfig().EffectiveBalanceIncrement))
effectiveBalance := amount - (amount % params.BeaconConfig().EffectiveBalanceIncrement)
if params.BeaconConfig().MaxEffectiveBalance < effectiveBalance {
effectiveBalance = params.BeaconConfig().MaxEffectiveBalance
}
return &ethpb.Validator{
PublicKey: pubKey,

View File

@@ -16,7 +16,7 @@ func TestFuzzProcessDeposits_10000(t *testing.T) {
state := &ethpb.BeaconStateAltair{}
deposits := make([]*ethpb.Deposit, 100)
ctx := t.Context()
for i := range 10000 {
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
for i := range deposits {
fuzzer.Fuzz(deposits[i])
@@ -37,7 +37,7 @@ func TestFuzzProcessPreGenesisDeposit_10000(t *testing.T) {
deposit := &ethpb.Deposit{}
ctx := t.Context()
for i := range 10000 {
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(deposit)
s, err := state_native.InitializeFromProtoUnsafeAltair(state)
@@ -56,7 +56,7 @@ func TestFuzzProcessPreGenesisDeposit_Phase0_10000(t *testing.T) {
deposit := &ethpb.Deposit{}
ctx := t.Context()
for i := range 10000 {
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(deposit)
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
@@ -74,7 +74,7 @@ func TestFuzzProcessDeposit_Phase0_10000(t *testing.T) {
state := &ethpb.BeaconState{}
deposit := &ethpb.Deposit{}
for i := range 10000 {
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(deposit)
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
@@ -92,7 +92,7 @@ func TestFuzzProcessDeposit_10000(t *testing.T) {
state := &ethpb.BeaconStateAltair{}
deposit := &ethpb.Deposit{}
for i := range 10000 {
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(deposit)
s, err := state_native.InitializeFromProtoUnsafeAltair(state)

View File

@@ -122,8 +122,11 @@ func ProcessInactivityScores(
}
if !helpers.IsInInactivityLeak(prevEpoch, finalizedEpoch) {
score := recoveryRate
// Prevents underflow below 0.
score := min(recoveryRate, v.InactivityScore)
if score > v.InactivityScore {
score = v.InactivityScore
}
v.InactivityScore -= score
}
inactivityScores[i] = v.InactivityScore
@@ -239,7 +242,7 @@ func ProcessRewardsAndPenaltiesPrecompute(
}
balances := beaconState.Balances()
for i := range numOfVals {
for i := 0; i < numOfVals; i++ {
vals[i].BeforeEpochTransitionBalance = balances[i]
// Compute the post balance of the validator after accounting for the

View File

@@ -21,7 +21,7 @@ import (
func TestSyncCommitteeIndices_CanGet(t *testing.T) {
getState := func(t *testing.T, count uint64, vers int) state.BeaconState {
validators := make([]*ethpb.Validator, count)
for i := range validators {
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
EffectiveBalance: params.BeaconConfig().MinDepositAmount,
@@ -113,7 +113,7 @@ func TestSyncCommitteeIndices_DifferentPeriods(t *testing.T) {
helpers.ClearCache()
getState := func(t *testing.T, count uint64) state.BeaconState {
validators := make([]*ethpb.Validator, count)
for i := range validators {
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
EffectiveBalance: params.BeaconConfig().MinDepositAmount,
@@ -147,7 +147,7 @@ func TestSyncCommitteeIndices_DifferentPeriods(t *testing.T) {
func TestSyncCommittee_CanGet(t *testing.T) {
getState := func(t *testing.T, count uint64) state.BeaconState {
validators := make([]*ethpb.Validator, count)
for i := range validators {
for i := 0; i < len(validators); i++ {
blsKey, err := bls.RandKey()
require.NoError(t, err)
validators[i] = &ethpb.Validator{
@@ -394,7 +394,7 @@ func Test_ValidateSyncMessageTime(t *testing.T) {
func getState(t *testing.T, count uint64) state.BeaconState {
validators := make([]*ethpb.Validator, count)
for i := range validators {
for i := 0; i < len(validators); i++ {
blsKey, err := bls.RandKey()
require.NoError(t, err)
validators[i] = &ethpb.Validator{

View File

@@ -33,7 +33,7 @@ func TestTranslateParticipation(t *testing.T) {
r, err := helpers.BlockRootAtSlot(s, 0)
require.NoError(t, err)
var pendingAtts []*ethpb.PendingAttestation
for i := range 3 {
for i := 0; i < 3; i++ {
pendingAtts = append(pendingAtts, &ethpb.PendingAttestation{
Data: &ethpb.AttestationData{
CommitteeIndex: primitives.CommitteeIndex(i),

View File

@@ -257,7 +257,7 @@ func VerifyIndexedAttestation(ctx context.Context, beaconState state.ReadOnlyBea
}
indices := indexedAtt.GetAttestingIndices()
var pubkeys []bls.PublicKey
for i := range indices {
for i := 0; i < len(indices); i++ {
pubkeyAtIdx := beaconState.PubkeyAtIndex(primitives.ValidatorIndex(indices[i]))
pk, err := bls.PublicKeyFromBytes(pubkeyAtIdx[:])
if err != nil {

View File

@@ -317,7 +317,7 @@ func TestVerifyAttestationNoVerifySignature_Electra(t *testing.T) {
func TestConvertToIndexed_OK(t *testing.T) {
helpers.ClearCache()
validators := make([]*ethpb.Validator, 2*params.BeaconConfig().SlotsPerEpoch)
for i := range validators {
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
}
@@ -373,7 +373,7 @@ func TestVerifyIndexedAttestation_OK(t *testing.T) {
validators := make([]*ethpb.Validator, numOfValidators)
_, keys, err := util.DeterministicDepositsAndKeys(numOfValidators)
require.NoError(t, err)
for i := range validators {
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
PublicKey: keys[i].PublicKey().Marshal(),
@@ -481,7 +481,7 @@ func TestValidateIndexedAttestation_BadAttestationsSignatureSet(t *testing.T) {
sig := keys[0].Sign([]byte{'t', 'e', 's', 't'})
list := bitfield.Bitlist{0b11111}
var atts []ethpb.Att
for range uint64(1000) {
for i := uint64(0); i < 1000; i++ {
atts = append(atts, &ethpb.Attestation{
Data: &ethpb.AttestationData{
CommitteeIndex: 1,
@@ -498,7 +498,7 @@ func TestValidateIndexedAttestation_BadAttestationsSignatureSet(t *testing.T) {
atts = []ethpb.Att{}
list = bitfield.Bitlist{0b10000}
for range uint64(1000) {
for i := uint64(0); i < 1000; i++ {
atts = append(atts, &ethpb.Attestation{
Data: &ethpb.AttestationData{
CommitteeIndex: 1,
@@ -524,7 +524,7 @@ func TestVerifyAttestations_HandlesPlannedFork(t *testing.T) {
validators := make([]*ethpb.Validator, numOfValidators)
_, keys, err := util.DeterministicDepositsAndKeys(numOfValidators)
require.NoError(t, err)
for i := range validators {
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
PublicKey: keys[i].PublicKey().Marshal(),
@@ -588,7 +588,7 @@ func TestRetrieveAttestationSignatureSet_VerifiesMultipleAttestations(t *testing
validators := make([]*ethpb.Validator, numOfValidators)
_, keys, err := util.DeterministicDepositsAndKeys(numOfValidators)
require.NoError(t, err)
for i := range validators {
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
PublicKey: keys[i].PublicKey().Marshal(),
@@ -707,7 +707,7 @@ func TestRetrieveAttestationSignatureSet_AcrossFork(t *testing.T) {
validators := make([]*ethpb.Validator, numOfValidators)
_, keys, err := util.DeterministicDepositsAndKeys(numOfValidators)
require.NoError(t, err)
for i := range validators {
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
PublicKey: keys[i].PublicKey().Marshal(),

View File

@@ -21,7 +21,7 @@ func TestFuzzProcessAttestationNoVerify_10000(t *testing.T) {
state := &ethpb.BeaconState{}
att := &ethpb.Attestation{}
for i := range 10000 {
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(att)
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
@@ -37,7 +37,7 @@ func TestFuzzProcessBlockHeader_10000(t *testing.T) {
state := &ethpb.BeaconState{}
block := &ethpb.SignedBeaconBlock{}
for i := range 10000 {
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(block)
@@ -63,7 +63,7 @@ func TestFuzzverifyDepositDataSigningRoot_10000(_ *testing.T) {
var p []byte
var s []byte
var d []byte
for range 10000 {
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(&ba)
fuzzer.Fuzz(&pubkey)
fuzzer.Fuzz(&sig)
@@ -83,7 +83,7 @@ func TestFuzzProcessEth1DataInBlock_10000(t *testing.T) {
e := &ethpb.Eth1Data{}
state, err := state_native.InitializeFromProtoUnsafePhase0(&ethpb.BeaconState{})
require.NoError(t, err)
for range 10000 {
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(e)
s, err := ProcessEth1DataInBlock(t.Context(), state, e)
@@ -98,7 +98,7 @@ func TestFuzzareEth1DataEqual_10000(_ *testing.T) {
eth1data := &ethpb.Eth1Data{}
eth1data2 := &ethpb.Eth1Data{}
for range 10000 {
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(eth1data)
fuzzer.Fuzz(eth1data2)
AreEth1DataEqual(eth1data, eth1data2)
@@ -110,7 +110,7 @@ func TestFuzzEth1DataHasEnoughSupport_10000(t *testing.T) {
fuzzer := gofuzz.NewWithSeed(0)
eth1data := &ethpb.Eth1Data{}
var stateVotes []*ethpb.Eth1Data
for i := range 100000 {
for i := 0; i < 100000; i++ {
fuzzer.Fuzz(eth1data)
fuzzer.Fuzz(&stateVotes)
s, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{
@@ -129,7 +129,7 @@ func TestFuzzProcessBlockHeaderNoVerify_10000(t *testing.T) {
state := &ethpb.BeaconState{}
block := &ethpb.BeaconBlock{}
for i := range 10000 {
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(block)
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
@@ -145,7 +145,7 @@ func TestFuzzProcessRandao_10000(t *testing.T) {
state := &ethpb.BeaconState{}
b := &ethpb.SignedBeaconBlock{}
for i := range 10000 {
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(b)
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
@@ -168,7 +168,7 @@ func TestFuzzProcessRandaoNoVerify_10000(t *testing.T) {
state := &ethpb.BeaconState{}
blockBody := &ethpb.BeaconBlockBody{}
for i := range 10000 {
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(blockBody)
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
@@ -186,7 +186,7 @@ func TestFuzzProcessProposerSlashings_10000(t *testing.T) {
state := &ethpb.BeaconState{}
p := &ethpb.ProposerSlashing{}
ctx := t.Context()
for i := range 10000 {
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(p)
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
@@ -203,7 +203,7 @@ func TestFuzzVerifyProposerSlashing_10000(t *testing.T) {
fuzzer := gofuzz.NewWithSeed(0)
state := &ethpb.BeaconState{}
proposerSlashing := &ethpb.ProposerSlashing{}
for i := range 10000 {
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(proposerSlashing)
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
@@ -219,7 +219,7 @@ func TestFuzzProcessAttesterSlashings_10000(t *testing.T) {
state := &ethpb.BeaconState{}
a := &ethpb.AttesterSlashing{}
ctx := t.Context()
for i := range 10000 {
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(a)
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
@@ -237,7 +237,7 @@ func TestFuzzVerifyAttesterSlashing_10000(t *testing.T) {
state := &ethpb.BeaconState{}
attesterSlashing := &ethpb.AttesterSlashing{}
ctx := t.Context()
for i := range 10000 {
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(attesterSlashing)
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
@@ -253,7 +253,7 @@ func TestFuzzIsSlashableAttestationData_10000(_ *testing.T) {
attestationData := &ethpb.AttestationData{}
attestationData2 := &ethpb.AttestationData{}
for range 10000 {
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(attestationData)
fuzzer.Fuzz(attestationData2)
IsSlashableAttestationData(attestationData, attestationData2)
@@ -264,7 +264,7 @@ func TestFuzzslashableAttesterIndices_10000(_ *testing.T) {
fuzzer := gofuzz.NewWithSeed(0)
attesterSlashing := &ethpb.AttesterSlashing{}
for range 10000 {
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(attesterSlashing)
SlashableAttesterIndices(attesterSlashing)
}
@@ -275,7 +275,7 @@ func TestFuzzProcessAttestationsNoVerify_10000(t *testing.T) {
state := &ethpb.BeaconState{}
b := &ethpb.SignedBeaconBlock{}
ctx := t.Context()
for i := range 10000 {
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(b)
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
@@ -298,7 +298,7 @@ func TestFuzzVerifyIndexedAttestationn_10000(t *testing.T) {
state := &ethpb.BeaconState{}
idxAttestation := &ethpb.IndexedAttestation{}
ctx := t.Context()
for i := range 10000 {
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(idxAttestation)
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
@@ -313,7 +313,7 @@ func TestFuzzverifyDeposit_10000(t *testing.T) {
fuzzer := gofuzz.NewWithSeed(0)
state := &ethpb.BeaconState{}
deposit := &ethpb.Deposit{}
for i := range 10000 {
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(deposit)
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
@@ -329,7 +329,7 @@ func TestFuzzProcessVoluntaryExits_10000(t *testing.T) {
state := &ethpb.BeaconState{}
e := &ethpb.SignedVoluntaryExit{}
ctx := t.Context()
for i := range 10000 {
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(e)
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
@@ -346,7 +346,7 @@ func TestFuzzProcessVoluntaryExitsNoVerify_10000(t *testing.T) {
fuzzer := gofuzz.NewWithSeed(0)
state := &ethpb.BeaconState{}
e := &ethpb.SignedVoluntaryExit{}
for i := range 10000 {
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(e)
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
@@ -366,7 +366,7 @@ func TestFuzzVerifyExit_10000(t *testing.T) {
fork := &ethpb.Fork{}
var slot primitives.Slot
for i := range 10000 {
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(ve)
fuzzer.Fuzz(rawVal)
fuzzer.Fuzz(fork)

View File

@@ -19,7 +19,7 @@ import (
func FakeDeposits(n uint64) []*ethpb.Eth1Data {
deposits := make([]*ethpb.Eth1Data, n)
for i := range n {
for i := uint64(0); i < n; i++ {
deposits[i] = &ethpb.Eth1Data{
DepositCount: 1,
DepositRoot: bytesutil.PadTo([]byte("root"), 32),
@@ -175,7 +175,7 @@ func TestProcessEth1Data_SetsCorrectly(t *testing.T) {
}
period := uint64(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().EpochsPerEth1VotingPeriod)))
for range period {
for i := uint64(0); i < period; i++ {
processedState, err := blocks.ProcessEth1DataInBlock(t.Context(), beaconState, b.Block.Body.Eth1Data)
require.NoError(t, err)
require.Equal(t, true, processedState.Version() == version.Phase0)

View File

@@ -27,7 +27,7 @@ func init() {
func TestProcessBlockHeader_ImproperBlockSlot(t *testing.T) {
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
for i := range validators {
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
PublicKey: make([]byte, 32),
WithdrawalCredentials: make([]byte, 32),
@@ -104,7 +104,7 @@ func TestProcessBlockHeader_WrongProposerSig(t *testing.T) {
func TestProcessBlockHeader_DifferentSlots(t *testing.T) {
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
for i := range validators {
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
PublicKey: make([]byte, 32),
WithdrawalCredentials: make([]byte, 32),
@@ -148,7 +148,7 @@ func TestProcessBlockHeader_DifferentSlots(t *testing.T) {
func TestProcessBlockHeader_PreviousBlockRootNotSignedRoot(t *testing.T) {
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
for i := range validators {
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
PublicKey: make([]byte, 48),
WithdrawalCredentials: make([]byte, 32),
@@ -189,7 +189,7 @@ func TestProcessBlockHeader_PreviousBlockRootNotSignedRoot(t *testing.T) {
func TestProcessBlockHeader_SlashedProposer(t *testing.T) {
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
for i := range validators {
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
PublicKey: make([]byte, 48),
WithdrawalCredentials: make([]byte, 32),
@@ -233,7 +233,7 @@ func TestProcessBlockHeader_SlashedProposer(t *testing.T) {
func TestProcessBlockHeader_OK(t *testing.T) {
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
for i := range validators {
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
PublicKey: make([]byte, 32),
WithdrawalCredentials: make([]byte, 32),
@@ -293,7 +293,7 @@ func TestProcessBlockHeader_OK(t *testing.T) {
func TestBlockSignatureSet_OK(t *testing.T) {
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
for i := range validators {
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
PublicKey: make([]byte, 32),
WithdrawalCredentials: make([]byte, 32),

View File

@@ -90,9 +90,6 @@ func IsExecutionEnabled(st state.ReadOnlyBeaconState, body interfaces.ReadOnlyBe
if st == nil || body == nil {
return false, errors.New("nil state or block body")
}
if st.Version() >= version.Capella {
return true, nil
}
if IsPreBellatrixVersion(st.Version()) {
return false, nil
}

View File

@@ -260,12 +260,11 @@ func Test_IsExecutionBlockCapella(t *testing.T) {
func Test_IsExecutionEnabled(t *testing.T) {
tests := []struct {
name string
payload *enginev1.ExecutionPayload
header interfaces.ExecutionData
useAltairSt bool
useCapellaSt bool
want bool
name string
payload *enginev1.ExecutionPayload
header interfaces.ExecutionData
useAltairSt bool
want bool
}{
{
name: "use older than bellatrix state",
@@ -332,17 +331,6 @@ func Test_IsExecutionEnabled(t *testing.T) {
}(),
want: true,
},
{
name: "capella state always enabled",
payload: emptyPayload(),
header: func() interfaces.ExecutionData {
h, err := emptyPayloadHeader()
require.NoError(t, err)
return h
}(),
useCapellaSt: true,
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
@@ -354,8 +342,6 @@ func Test_IsExecutionEnabled(t *testing.T) {
require.NoError(t, err)
if tt.useAltairSt {
st, _ = util.DeterministicGenesisStateAltair(t, 1)
} else if tt.useCapellaSt {
st, _ = util.DeterministicGenesisStateCapella(t, 1)
}
got, err := blocks.IsExecutionEnabled(st, body)
require.NoError(t, err)
@@ -865,7 +851,8 @@ func BenchmarkBellatrixComplete(b *testing.B) {
require.NoError(b, err)
require.NoError(b, st.SetLatestExecutionPayloadHeader(h))
for b.Loop() {
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := blocks.IsMergeTransitionComplete(st)
require.NoError(b, err)
}

View File

@@ -28,7 +28,7 @@ func createValidatorsWithTotalActiveBalance(totalBal primitives.Gwei) []*eth.Val
ActivationEpoch: primitives.Epoch(0),
EffectiveBalance: params.BeaconConfig().MinActivationBalance,
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
PublicKey: fmt.Appendf(nil, "val_%d", i),
PublicKey: []byte(fmt.Sprintf("val_%d", i)),
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
WithdrawalCredentials: wd,
}

View File

@@ -16,7 +16,7 @@ func TestFuzzProcessDeposits_10000(t *testing.T) {
state := &ethpb.BeaconStateElectra{}
deposits := make([]*ethpb.Deposit, 100)
ctx := t.Context()
for i := range 10000 {
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
for i := range deposits {
fuzzer.Fuzz(deposits[i])
@@ -36,7 +36,7 @@ func TestFuzzProcessDeposit_10000(t *testing.T) {
state := &ethpb.BeaconStateElectra{}
deposit := &ethpb.Deposit{}
for i := range 10000 {
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(deposit)
s, err := state_native.InitializeFromProtoUnsafeElectra(state)

View File

@@ -95,7 +95,7 @@ func TestProcessPendingDeposits(t *testing.T) {
require.NoError(t, err)
require.Equal(t, primitives.Gwei(100), res)
// Validators 0..9 should have their balance increased
for i := range primitives.ValidatorIndex(10) {
for i := primitives.ValidatorIndex(0); i < 10; i++ {
b, err := st.BalanceAtIndex(i)
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().MinActivationBalance+uint64(amountAvailForProcessing)/10, b)
@@ -122,7 +122,7 @@ func TestProcessPendingDeposits(t *testing.T) {
check: func(t *testing.T, st state.BeaconState) {
amountAvailForProcessing := helpers.ActivationExitChurnLimit(1_000 * 1e9)
// Validators 0..9 should have their balance increased
for i := range primitives.ValidatorIndex(2) {
for i := primitives.ValidatorIndex(0); i < 2; i++ {
b, err := st.BalanceAtIndex(i)
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().MinActivationBalance+uint64(amountAvailForProcessing), b)
@@ -149,7 +149,7 @@ func TestProcessPendingDeposits(t *testing.T) {
require.NoError(t, err)
require.Equal(t, primitives.Gwei(0), res)
// Validators 0..4 should have their balance increased
for i := range primitives.ValidatorIndex(4) {
for i := primitives.ValidatorIndex(0); i < 4; i++ {
b, err := st.BalanceAtIndex(i)
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().MinActivationBalance+uint64(amountAvailForProcessing)/5, b)
@@ -528,7 +528,7 @@ func stateWithActiveBalanceETH(t *testing.T, balETH uint64) state.BeaconState {
vals := make([]*eth.Validator, numVals)
bals := make([]uint64, numVals)
for i := range numVals {
for i := uint64(0); i < numVals; i++ {
wc := make([]byte, 32)
wc[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
wc[31] = byte(i)

View File

@@ -56,7 +56,7 @@ func TestProcessRegistryUpdates(t *testing.T) {
Slot: 5 * params.BeaconConfig().SlotsPerEpoch,
FinalizedCheckpoint: &eth.Checkpoint{Epoch: finalizedEpoch, Root: make([]byte, fieldparams.RootLength)},
}
for range uint64(10) {
for i := uint64(0); i < 10; i++ {
base.Validators = append(base.Validators, &eth.Validator{
ActivationEligibilityEpoch: finalizedEpoch,
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
@@ -82,7 +82,7 @@ func TestProcessRegistryUpdates(t *testing.T) {
Slot: 5 * params.BeaconConfig().SlotsPerEpoch,
FinalizedCheckpoint: &eth.Checkpoint{Epoch: finalizedEpoch, Root: make([]byte, fieldparams.RootLength)},
}
for range uint64(10) {
for i := uint64(0); i < 10; i++ {
base.Validators = append(base.Validators, &eth.Validator{
EffectiveBalance: params.BeaconConfig().EjectionBalance - 1,
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
@@ -108,7 +108,7 @@ func TestProcessRegistryUpdates(t *testing.T) {
Slot: 5 * params.BeaconConfig().SlotsPerEpoch,
FinalizedCheckpoint: &eth.Checkpoint{Epoch: finalizedEpoch, Root: make([]byte, fieldparams.RootLength)},
}
for range uint64(10) {
for i := uint64(0); i < 10; i++ {
base.Validators = append(base.Validators, &eth.Validator{
EffectiveBalance: params.BeaconConfig().EjectionBalance - 1,
ExitEpoch: 10,
@@ -157,7 +157,7 @@ func Benchmark_ProcessRegistryUpdates_MassEjection(b *testing.B) {
st, err := util.NewBeaconStateElectra()
require.NoError(b, err)
for b.Loop() {
for i := 0; i < b.N; i++ {
b.StopTimer()
if err := st.SetValidators(genValidators(100000)); err != nil {
panic(err)

View File

@@ -329,7 +329,10 @@ func ProcessEffectiveBalanceUpdates(st state.BeaconState) (state.BeaconState, er
balance := bals[idx]
if balance+downwardThreshold < val.EffectiveBalance() || val.EffectiveBalance()+upwardThreshold < balance {
effectiveBal := min(maxEffBalance, balance-balance%effBalanceInc)
effectiveBal := maxEffBalance
if effectiveBal > balance-balance%effBalanceInc {
effectiveBal = balance - balance%effBalanceInc
}
if effectiveBal != val.EffectiveBalance() {
newVal = val.Copy()
newVal.EffectiveBalance = effectiveBal

View File

@@ -14,7 +14,7 @@ func TestFuzzFinalUpdates_10000(t *testing.T) {
fuzzer := gofuzz.NewWithSeed(0)
base := &ethpb.BeaconState{}
for i := range 10000 {
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(base)
s, err := state_native.InitializeFromProtoUnsafePhase0(base)
require.NoError(t, err)

View File

@@ -218,7 +218,7 @@ func TestProcessRegistryUpdates_EligibleToActivate_Cancun(t *testing.T) {
cfg.ChurnLimitQuotient = 1
params.OverrideBeaconConfig(cfg)
for range uint64(10) {
for i := uint64(0); i < 10; i++ {
base.Validators = append(base.Validators, &ethpb.Validator{
ActivationEligibilityEpoch: finalizedEpoch,
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
@@ -314,28 +314,28 @@ func TestProcessRegistryUpdates_CanExits(t *testing.T) {
func buildState(t testing.TB, slot primitives.Slot, validatorCount uint64) state.BeaconState {
validators := make([]*ethpb.Validator, validatorCount)
for i := range validators {
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
}
}
validatorBalances := make([]uint64, len(validators))
for i := range validatorBalances {
for i := 0; i < len(validatorBalances); i++ {
validatorBalances[i] = params.BeaconConfig().MaxEffectiveBalance
}
latestActiveIndexRoots := make(
[][]byte,
params.BeaconConfig().EpochsPerHistoricalVector,
)
for i := range latestActiveIndexRoots {
for i := 0; i < len(latestActiveIndexRoots); i++ {
latestActiveIndexRoots[i] = params.BeaconConfig().ZeroHash[:]
}
latestRandaoMixes := make(
[][]byte,
params.BeaconConfig().EpochsPerHistoricalVector,
)
for i := range latestRandaoMixes {
for i := 0; i < len(latestRandaoMixes); i++ {
latestRandaoMixes[i] = params.BeaconConfig().ZeroHash[:]
}
s, err := util.NewBeaconState()

View File

@@ -19,7 +19,7 @@ func TestProcessJustificationAndFinalizationPreCompute_ConsecutiveEpochs(t *test
e := params.BeaconConfig().FarFutureEpoch
a := params.BeaconConfig().MaxEffectiveBalance
blockRoots := make([][]byte, params.BeaconConfig().SlotsPerEpoch*2+1)
for i := range blockRoots {
for i := 0; i < len(blockRoots); i++ {
blockRoots[i] = []byte{byte(i)}
}
base := &ethpb.BeaconState{
@@ -56,7 +56,7 @@ func TestProcessJustificationAndFinalizationPreCompute_JustifyCurrentEpoch(t *te
e := params.BeaconConfig().FarFutureEpoch
a := params.BeaconConfig().MaxEffectiveBalance
blockRoots := make([][]byte, params.BeaconConfig().SlotsPerEpoch*2+1)
for i := range blockRoots {
for i := 0; i < len(blockRoots); i++ {
blockRoots[i] = []byte{byte(i)}
}
base := &ethpb.BeaconState{
@@ -93,7 +93,7 @@ func TestProcessJustificationAndFinalizationPreCompute_JustifyPrevEpoch(t *testi
e := params.BeaconConfig().FarFutureEpoch
a := params.BeaconConfig().MaxEffectiveBalance
blockRoots := make([][]byte, params.BeaconConfig().SlotsPerEpoch*2+1)
for i := range blockRoots {
for i := 0; i < len(blockRoots); i++ {
blockRoots[i] = []byte{byte(i)}
}
base := &ethpb.BeaconState{
@@ -128,7 +128,7 @@ func TestProcessJustificationAndFinalizationPreCompute_JustifyPrevEpoch(t *testi
func TestUnrealizedCheckpoints(t *testing.T) {
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
balances := make([]uint64, len(validators))
for i := range validators {
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,

View File

@@ -42,7 +42,7 @@ func ProcessRewardsAndPenaltiesPrecompute(
return nil, errors.Wrap(err, "could not get proposer attestation delta")
}
validatorBals := state.Balances()
for i := range numOfVals {
for i := 0; i < numOfVals; i++ {
vp[i].BeforeEpochTransitionBalance = validatorBals[i]
// Compute the post balance of the validator after accounting for the

View File

@@ -24,7 +24,7 @@ func TestProcessRewardsAndPenaltiesPrecompute(t *testing.T) {
validatorCount := uint64(2048)
base := buildState(e+3, validatorCount)
atts := make([]*ethpb.PendingAttestation, 3)
for i := range atts {
for i := 0; i < len(atts); i++ {
atts[i] = &ethpb.PendingAttestation{
Data: &ethpb.AttestationData{
Target: &ethpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)},
@@ -63,7 +63,7 @@ func TestAttestationDeltas_ZeroEpoch(t *testing.T) {
base := buildState(e+2, validatorCount)
atts := make([]*ethpb.PendingAttestation, 3)
var emptyRoot [32]byte
for i := range atts {
for i := 0; i < len(atts); i++ {
atts[i] = &ethpb.PendingAttestation{
Data: &ethpb.AttestationData{
Target: &ethpb.Checkpoint{
@@ -99,7 +99,7 @@ func TestAttestationDeltas_ZeroInclusionDelay(t *testing.T) {
base := buildState(e+2, validatorCount)
atts := make([]*ethpb.PendingAttestation, 3)
var emptyRoot [32]byte
for i := range atts {
for i := 0; i < len(atts); i++ {
atts[i] = &ethpb.PendingAttestation{
Data: &ethpb.AttestationData{
Target: &ethpb.Checkpoint{
@@ -131,7 +131,7 @@ func TestProcessRewardsAndPenaltiesPrecompute_SlashedInactivePenalty(t *testing.
validatorCount := uint64(2048)
base := buildState(e+3, validatorCount)
atts := make([]*ethpb.PendingAttestation, 3)
for i := range atts {
for i := 0; i < len(atts); i++ {
atts[i] = &ethpb.PendingAttestation{
Data: &ethpb.AttestationData{
Target: &ethpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)},
@@ -176,28 +176,28 @@ func TestProcessRewardsAndPenaltiesPrecompute_SlashedInactivePenalty(t *testing.
func buildState(slot primitives.Slot, validatorCount uint64) *ethpb.BeaconState {
validators := make([]*ethpb.Validator, validatorCount)
for i := range validators {
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
}
}
validatorBalances := make([]uint64, len(validators))
for i := range validatorBalances {
for i := 0; i < len(validatorBalances); i++ {
validatorBalances[i] = params.BeaconConfig().MaxEffectiveBalance
}
latestActiveIndexRoots := make(
[][]byte,
params.BeaconConfig().EpochsPerHistoricalVector,
)
for i := range latestActiveIndexRoots {
for i := 0; i < len(latestActiveIndexRoots); i++ {
latestActiveIndexRoots[i] = params.BeaconConfig().ZeroHash[:]
}
latestRandaoMixes := make(
[][]byte,
params.BeaconConfig().EpochsPerHistoricalVector,
)
for i := range latestRandaoMixes {
for i := 0; i < len(latestRandaoMixes); i++ {
latestRandaoMixes[i] = params.BeaconConfig().ZeroHash[:]
}
return &ethpb.BeaconState{

View File

@@ -17,5 +17,5 @@ type Event struct {
// Type is the type of event.
Type EventType
// Data is event-specific data.
Data any
Data interface{}
}

View File

@@ -54,7 +54,7 @@ func TestAttestation_ComputeSubnetForAttestation(t *testing.T) {
validatorCount := committeeCount * params.BeaconConfig().TargetCommitteeSize
validators := make([]*ethpb.Validator, validatorCount)
for i := range validators {
for i := 0; i < len(validators); i++ {
k := make([]byte, 48)
copy(k, strconv.Itoa(i))
validators[i] = &ethpb.Validator{

View File

@@ -5,7 +5,7 @@ package helpers
import (
"context"
"fmt"
"slices"
"sort"
"github.com/OffchainLabs/go-bitfield"
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
@@ -515,7 +515,9 @@ func UpdateCommitteeCache(ctx context.Context, state state.ReadOnlyBeaconState,
// used for failing verify signature fallback.
sortedIndices := make([]primitives.ValidatorIndex, len(shuffledIndices))
copy(sortedIndices, shuffledIndices)
slices.Sort(sortedIndices)
sort.Slice(sortedIndices, func(i, j int) bool {
return sortedIndices[i] < sortedIndices[j]
})
if err := committeeCache.AddCommitteeShuffledList(ctx, &cache.Committees{
ShuffledIndices: shuffledIndices,

View File

@@ -29,7 +29,7 @@ func TestComputeCommittee_WithoutCache(t *testing.T) {
validatorCount := committeeCount * params.BeaconConfig().TargetCommitteeSize
validators := make([]*ethpb.Validator, validatorCount)
for i := range validators {
for i := 0; i < len(validators); i++ {
k := make([]byte, 48)
copy(k, strconv.Itoa(i))
validators[i] = &ethpb.Validator{
@@ -122,7 +122,7 @@ func TestCommitteeAssignments_NoProposerForSlot0(t *testing.T) {
helpers.ClearCache()
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
for i := range validators {
for i := 0; i < len(validators); i++ {
var activationEpoch primitives.Epoch
if i >= len(validators)/2 {
activationEpoch = 3
@@ -151,7 +151,7 @@ func TestCommitteeAssignments_CanRetrieve(t *testing.T) {
// Initialize test with 256 validators, each slot and each index gets 4 validators.
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
validatorIndices := make([]primitives.ValidatorIndex, len(validators))
for i := range validators {
for i := 0; i < len(validators); i++ {
// First 2 epochs only half validators are activated.
var activationEpoch primitives.Epoch
if i >= len(validators)/2 {
@@ -234,7 +234,7 @@ func TestCommitteeAssignments_CannotRetrieveFuture(t *testing.T) {
// Initialize test with 256 validators, each slot and each index gets 4 validators.
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
for i := range validators {
for i := 0; i < len(validators); i++ {
// First 2 epochs only half validators are activated.
var activationEpoch primitives.Epoch
if i >= len(validators)/2 {
@@ -266,7 +266,7 @@ func TestCommitteeAssignments_CannotRetrieveOlderThanSlotsPerHistoricalRoot(t *t
// Initialize test with 256 validators, each slot and each index gets 4 validators.
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
for i := range validators {
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
}
@@ -287,7 +287,7 @@ func TestCommitteeAssignments_EverySlotHasMin1Proposer(t *testing.T) {
// Initialize test with 256 validators, each slot and each index gets 4 validators.
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
for i := range validators {
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ActivationEpoch: 0,
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
@@ -323,7 +323,7 @@ func TestCommitteeAssignments_EverySlotHasMin1Proposer(t *testing.T) {
func TestVerifyAttestationBitfieldLengths_OK(t *testing.T) {
validators := make([]*ethpb.Validator, 2*params.BeaconConfig().SlotsPerEpoch)
activeRoots := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector)
for i := range validators {
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
}
@@ -489,7 +489,7 @@ func TestUpdateCommitteeCache_CanUpdateAcrossEpochs(t *testing.T) {
func BenchmarkComputeCommittee300000_WithPreCache(b *testing.B) {
validators := make([]*ethpb.Validator, 300000)
for i := range validators {
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
}
@@ -512,7 +512,8 @@ func BenchmarkComputeCommittee300000_WithPreCache(b *testing.B) {
panic(err)
}
for b.Loop() {
b.ResetTimer()
for n := 0; n < b.N; n++ {
_, err := helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
if err != nil {
panic(err)
@@ -522,7 +523,7 @@ func BenchmarkComputeCommittee300000_WithPreCache(b *testing.B) {
func BenchmarkComputeCommittee3000000_WithPreCache(b *testing.B) {
validators := make([]*ethpb.Validator, 3000000)
for i := range validators {
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
}
@@ -545,7 +546,8 @@ func BenchmarkComputeCommittee3000000_WithPreCache(b *testing.B) {
panic(err)
}
for b.Loop() {
b.ResetTimer()
for n := 0; n < b.N; n++ {
_, err := helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
if err != nil {
panic(err)
@@ -555,7 +557,7 @@ func BenchmarkComputeCommittee3000000_WithPreCache(b *testing.B) {
func BenchmarkComputeCommittee128000_WithOutPreCache(b *testing.B) {
validators := make([]*ethpb.Validator, 128000)
for i := range validators {
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
}
@@ -574,8 +576,8 @@ func BenchmarkComputeCommittee128000_WithOutPreCache(b *testing.B) {
i := uint64(0)
index := uint64(0)
for b.Loop() {
b.ResetTimer()
for n := 0; n < b.N; n++ {
i++
_, err := helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
if err != nil {
@@ -590,7 +592,7 @@ func BenchmarkComputeCommittee128000_WithOutPreCache(b *testing.B) {
func BenchmarkComputeCommittee1000000_WithOutCache(b *testing.B) {
validators := make([]*ethpb.Validator, 1000000)
for i := range validators {
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
}
@@ -609,8 +611,8 @@ func BenchmarkComputeCommittee1000000_WithOutCache(b *testing.B) {
i := uint64(0)
index := uint64(0)
for b.Loop() {
b.ResetTimer()
for n := 0; n < b.N; n++ {
i++
_, err := helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
if err != nil {
@@ -625,7 +627,7 @@ func BenchmarkComputeCommittee1000000_WithOutCache(b *testing.B) {
func BenchmarkComputeCommittee4000000_WithOutCache(b *testing.B) {
validators := make([]*ethpb.Validator, 4000000)
for i := range validators {
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
}
@@ -644,8 +646,8 @@ func BenchmarkComputeCommittee4000000_WithOutCache(b *testing.B) {
i := uint64(0)
index := uint64(0)
for b.Loop() {
b.ResetTimer()
for n := 0; n < b.N; n++ {
i++
_, err := helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
if err != nil {
@@ -661,7 +663,7 @@ func BenchmarkComputeCommittee4000000_WithOutCache(b *testing.B) {
func TestBeaconCommitteeFromState_UpdateCacheForPreviousEpoch(t *testing.T) {
committeeSize := uint64(16)
validators := make([]*ethpb.Validator, params.BeaconConfig().SlotsPerEpoch.Mul(committeeSize))
for i := range validators {
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
}
@@ -686,7 +688,7 @@ func TestBeaconCommitteeFromState_UpdateCacheForPreviousEpoch(t *testing.T) {
func TestPrecomputeProposerIndices_Ok(t *testing.T) {
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
for i := range validators {
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
}
@@ -730,7 +732,7 @@ func TestAttestationCommitteesFromState(t *testing.T) {
ctx := t.Context()
validators := make([]*ethpb.Validator, params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().TargetCommitteeSize))
for i := range validators {
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
}
@@ -766,7 +768,7 @@ func TestAttestationCommitteesFromCache(t *testing.T) {
ctx := t.Context()
validators := make([]*ethpb.Validator, params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().TargetCommitteeSize))
for i := range validators {
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
}
@@ -932,7 +934,7 @@ func TestInitializeProposerLookahead_RegressionTest(t *testing.T) {
proposerLookahead, err := helpers.InitializeProposerLookahead(ctx, state, epoch)
require.NoError(t, err)
slotsPerEpoch := int(params.BeaconConfig().SlotsPerEpoch)
for epochOffset := range primitives.Epoch(2) {
for epochOffset := primitives.Epoch(0); epochOffset < 2; epochOffset++ {
targetEpoch := epoch + epochOffset
activeIndices, err := helpers.ActiveValidatorIndices(ctx, state, targetEpoch)

View File

@@ -16,7 +16,7 @@ import (
func TestRandaoMix_OK(t *testing.T) {
randaoMixes := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector)
for i := range randaoMixes {
for i := 0; i < len(randaoMixes); i++ {
intInBytes := make([]byte, 32)
binary.LittleEndian.PutUint64(intInBytes, uint64(i))
randaoMixes[i] = intInBytes
@@ -52,7 +52,7 @@ func TestRandaoMix_OK(t *testing.T) {
func TestRandaoMix_CopyOK(t *testing.T) {
randaoMixes := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector)
for i := range randaoMixes {
for i := 0; i < len(randaoMixes); i++ {
intInBytes := make([]byte, 32)
binary.LittleEndian.PutUint64(intInBytes, uint64(i))
randaoMixes[i] = intInBytes
@@ -96,7 +96,7 @@ func TestGenerateSeed_OK(t *testing.T) {
helpers.ClearCache()
randaoMixes := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector)
for i := range randaoMixes {
for i := 0; i < len(randaoMixes); i++ {
intInBytes := make([]byte, 32)
binary.LittleEndian.PutUint64(intInBytes, uint64(i))
randaoMixes[i] = intInBytes

View File

@@ -239,28 +239,28 @@ func TestIsInInactivityLeak(t *testing.T) {
func buildState(slot primitives.Slot, validatorCount uint64) *ethpb.BeaconState {
validators := make([]*ethpb.Validator, validatorCount)
for i := range validators {
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
}
}
validatorBalances := make([]uint64, len(validators))
for i := range validatorBalances {
for i := 0; i < len(validatorBalances); i++ {
validatorBalances[i] = params.BeaconConfig().MaxEffectiveBalance
}
latestActiveIndexRoots := make(
[][]byte,
params.BeaconConfig().EpochsPerHistoricalVector,
)
for i := range latestActiveIndexRoots {
for i := 0; i < len(latestActiveIndexRoots); i++ {
latestActiveIndexRoots[i] = params.BeaconConfig().ZeroHash[:]
}
latestRandaoMixes := make(
[][]byte,
params.BeaconConfig().EpochsPerHistoricalVector,
)
for i := range latestRandaoMixes {
for i := 0; i < len(latestRandaoMixes); i++ {
latestRandaoMixes[i] = params.BeaconConfig().ZeroHash[:]
}
return &ethpb.BeaconState{

View File

@@ -23,7 +23,7 @@ var maxShuffleListSize uint64 = 1 << 40
func SplitIndices(l []uint64, n uint64) [][]uint64 {
var divided [][]uint64
var lSize = uint64(len(l))
for i := range n {
for i := uint64(0); i < n; i++ {
start := slice.SplitOffset(lSize, n, i)
end := slice.SplitOffset(lSize, n, i+1)
divided = append(divided, l[start:end])
@@ -103,7 +103,10 @@ func ComputeShuffledIndex(index primitives.ValidatorIndex, indexCount uint64, se
pivot := hash8Int % indexCount
flip := (pivot + indexCount - uint64(index)) % indexCount
// Consider every pair only once by picking the highest pair index to retrieve randomness.
position := max(flip, uint64(index))
position := uint64(index)
if flip > position {
position = flip
}
// Add position except its last byte to []buf for randomness,
// it will be used later to select a bit from the resulting hash.
binary.LittleEndian.PutUint64(posBuffer[:8], position>>8)

View File

@@ -30,7 +30,7 @@ func TestShuffleList_OK(t *testing.T) {
var list1 []primitives.ValidatorIndex
seed1 := [32]byte{1, 128, 12}
seed2 := [32]byte{2, 128, 12}
for i := range 10 {
for i := 0; i < 10; i++ {
list1 = append(list1, primitives.ValidatorIndex(i))
}
@@ -55,7 +55,7 @@ func TestSplitIndices_OK(t *testing.T) {
var l []uint64
numValidators := uint64(64000)
for i := range numValidators {
for i := uint64(0); i < numValidators; i++ {
l = append(l, i)
}
split := SplitIndices(l, uint64(params.BeaconConfig().SlotsPerEpoch))
@@ -104,7 +104,7 @@ func BenchmarkIndexComparison(b *testing.B) {
seed := [32]byte{123, 42}
for _, listSize := range listSizes {
b.Run(fmt.Sprintf("Indexwise_ShuffleList_%d", listSize), func(ib *testing.B) {
for ib.Loop() {
for i := 0; i < ib.N; i++ {
// Simulate a list-shuffle by running shuffle-index listSize times.
for j := primitives.ValidatorIndex(0); uint64(j) < listSize; j++ {
_, err := ShuffledIndex(j, listSize, seed)
@@ -120,11 +120,11 @@ func BenchmarkShuffleList(b *testing.B) {
seed := [32]byte{123, 42}
for _, listSize := range listSizes {
testIndices := make([]primitives.ValidatorIndex, listSize)
for i := range listSize {
for i := uint64(0); i < listSize; i++ {
testIndices[i] = primitives.ValidatorIndex(i)
}
b.Run(fmt.Sprintf("ShuffleList_%d", listSize), func(ib *testing.B) {
for ib.Loop() {
for i := 0; i < ib.N; i++ {
_, err := ShuffleList(testIndices, seed)
assert.NoError(b, err)
}
@@ -161,12 +161,12 @@ func TestSplitIndicesAndOffset_OK(t *testing.T) {
var l []uint64
validators := uint64(64000)
for i := range validators {
for i := uint64(0); i < validators; i++ {
l = append(l, i)
}
chunks := uint64(6)
split := SplitIndices(l, chunks)
for i := range chunks {
for i := uint64(0); i < chunks; i++ {
if !reflect.DeepEqual(split[i], l[slice.SplitOffset(uint64(len(l)), chunks, i):slice.SplitOffset(uint64(len(l)), chunks, i+1)]) {
t.Errorf("Want: %v got: %v", l[slice.SplitOffset(uint64(len(l)), chunks, i):slice.SplitOffset(uint64(len(l)), chunks, i+1)], split[i])
break

View File

@@ -24,7 +24,7 @@ func TestCurrentPeriodPositions(t *testing.T) {
syncCommittee := &ethpb.SyncCommittee{
Pubkeys: make([][]byte, params.BeaconConfig().SyncCommitteeSize),
}
for i := range validators {
for i := 0; i < len(validators); i++ {
k := make([]byte, 48)
copy(k, strconv.Itoa(i))
validators[i] = &ethpb.Validator{
@@ -56,7 +56,7 @@ func TestIsCurrentEpochSyncCommittee_UsingCache(t *testing.T) {
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
}
for i := range validators {
for i := 0; i < len(validators); i++ {
k := make([]byte, 48)
copy(k, strconv.Itoa(i))
validators[i] = &ethpb.Validator{
@@ -87,7 +87,7 @@ func TestIsCurrentEpochSyncCommittee_UsingCommittee(t *testing.T) {
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
}
for i := range validators {
for i := 0; i < len(validators); i++ {
k := make([]byte, 48)
copy(k, strconv.Itoa(i))
validators[i] = &ethpb.Validator{
@@ -116,7 +116,7 @@ func TestIsCurrentEpochSyncCommittee_DoesNotExist(t *testing.T) {
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
}
for i := range validators {
for i := 0; i < len(validators); i++ {
k := make([]byte, 48)
copy(k, strconv.Itoa(i))
validators[i] = &ethpb.Validator{
@@ -144,7 +144,7 @@ func TestIsNextEpochSyncCommittee_UsingCache(t *testing.T) {
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
}
for i := range validators {
for i := 0; i < len(validators); i++ {
k := make([]byte, 48)
copy(k, strconv.Itoa(i))
validators[i] = &ethpb.Validator{
@@ -175,7 +175,7 @@ func TestIsNextEpochSyncCommittee_UsingCommittee(t *testing.T) {
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
}
for i := range validators {
for i := 0; i < len(validators); i++ {
k := make([]byte, 48)
copy(k, strconv.Itoa(i))
validators[i] = &ethpb.Validator{
@@ -203,7 +203,7 @@ func TestIsNextEpochSyncCommittee_DoesNotExist(t *testing.T) {
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
}
for i := range validators {
for i := 0; i < len(validators); i++ {
k := make([]byte, 48)
copy(k, strconv.Itoa(i))
validators[i] = &ethpb.Validator{
@@ -231,7 +231,7 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
}
for i := range validators {
for i := 0; i < len(validators); i++ {
k := make([]byte, 48)
copy(k, strconv.Itoa(i))
validators[i] = &ethpb.Validator{
@@ -262,7 +262,7 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
}
for i := range validators {
for i := 0; i < len(validators); i++ {
k := make([]byte, 48)
copy(k, strconv.Itoa(i))
validators[i] = &ethpb.Validator{
@@ -304,7 +304,7 @@ func TestCurrentEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
}
for i := range validators {
for i := 0; i < len(validators); i++ {
k := make([]byte, 48)
copy(k, strconv.Itoa(i))
validators[i] = &ethpb.Validator{
@@ -332,7 +332,7 @@ func TestNextEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
}
for i := range validators {
for i := 0; i < len(validators); i++ {
k := make([]byte, 48)
copy(k, strconv.Itoa(i))
validators[i] = &ethpb.Validator{
@@ -363,7 +363,7 @@ func TestNextEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
}
for i := range validators {
for i := 0; i < len(validators); i++ {
k := make([]byte, 48)
copy(k, strconv.Itoa(i))
validators[i] = &ethpb.Validator{
@@ -391,7 +391,7 @@ func TestNextEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
}
for i := range validators {
for i := 0; i < len(validators); i++ {
k := make([]byte, 48)
copy(k, strconv.Itoa(i))
validators[i] = &ethpb.Validator{
@@ -449,7 +449,7 @@ func TestIsCurrentEpochSyncCommittee_SameBlockRoot(t *testing.T) {
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
}
for i := range validators {
for i := 0; i < len(validators); i++ {
k := make([]byte, 48)
copy(k, strconv.Itoa(i))
validators[i] = &ethpb.Validator{

View File

@@ -184,7 +184,7 @@ func TestBeaconProposerIndex_OK(t *testing.T) {
c.MinGenesisActiveValidatorCount = 16384
params.OverrideBeaconConfig(c)
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount/8)
for i := range validators {
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
}
@@ -241,7 +241,7 @@ func TestBeaconProposerIndex_BadState(t *testing.T) {
c.MinGenesisActiveValidatorCount = 16384
params.OverrideBeaconConfig(c)
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount/8)
for i := range validators {
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
}
@@ -270,7 +270,7 @@ func TestComputeProposerIndex_Compatibility(t *testing.T) {
helpers.ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
for i := range validators {
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
}
@@ -322,7 +322,7 @@ func TestActiveValidatorCount_Genesis(t *testing.T) {
c := 1000
validators := make([]*ethpb.Validator, c)
for i := range validators {
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
}
@@ -357,7 +357,7 @@ func TestChurnLimit_OK(t *testing.T) {
helpers.ClearCache()
validators := make([]*ethpb.Validator, test.validatorCount)
for i := range validators {
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
}
@@ -861,7 +861,7 @@ func TestLastActivatedValidatorIndex_OK(t *testing.T) {
validators := make([]*ethpb.Validator, 4)
balances := make([]uint64, len(validators))
for i := range uint64(4) {
for i := uint64(0); i < 4; i++ {
validators[i] = &ethpb.Validator{
PublicKey: make([]byte, params.BeaconConfig().BLSPubkeyLength),
WithdrawalCredentials: make([]byte, 32),

View File

@@ -270,7 +270,7 @@ func genState(t *testing.T, valCount, avgBalance uint64) state.BeaconState {
validators := make([]*ethpb.Validator, valCount)
balances := make([]uint64, len(validators))
for i := range valCount {
for i := uint64(0); i < valCount; i++ {
validators[i] = &ethpb.Validator{
PublicKey: make([]byte, params.BeaconConfig().BLSPubkeyLength),
WithdrawalCredentials: make([]byte, 32),

View File

@@ -43,15 +43,13 @@ go_test(
"das_core_test.go",
"info_test.go",
"p2p_interface_test.go",
"reconstruction_helpers_test.go",
"reconstruction_test.go",
"semi_supernode_test.go",
"utils_test.go",
"validator_test.go",
"verification_test.go",
],
embed = [":go_default_library"],
deps = [
":go_default_library",
"//beacon-chain/blockchain/kzg:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
"//config/fieldparams:go_default_library",

View File

@@ -5,7 +5,6 @@ import (
"math"
"slices"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/crypto/hash"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
@@ -97,7 +96,8 @@ func ComputeColumnsForCustodyGroup(custodyGroup uint64) ([]uint64, error) {
return nil, ErrCustodyGroupTooLarge
}
numberOfColumns := uint64(fieldparams.NumberOfColumns)
numberOfColumns := cfg.NumberOfColumns
columnsPerGroup := numberOfColumns / numberOfCustodyGroups
columns := make([]uint64, 0, columnsPerGroup)
@@ -112,9 +112,8 @@ func ComputeColumnsForCustodyGroup(custodyGroup uint64) ([]uint64, error) {
// ComputeCustodyGroupForColumn computes the custody group for a given column.
// It is the reciprocal function of ComputeColumnsForCustodyGroup.
func ComputeCustodyGroupForColumn(columnIndex uint64) (uint64, error) {
const numberOfColumns = fieldparams.NumberOfColumns
cfg := params.BeaconConfig()
numberOfColumns := cfg.NumberOfColumns
numberOfCustodyGroups := cfg.NumberOfCustodyGroups
if columnIndex >= numberOfColumns {

View File

@@ -30,6 +30,7 @@ func TestComputeColumnsForCustodyGroup(t *testing.T) {
func TestComputeCustodyGroupForColumn(t *testing.T) {
params.SetupTestConfigCleanup(t)
config := params.BeaconConfig()
config.NumberOfColumns = 128
config.NumberOfCustodyGroups = 64
params.OverrideBeaconConfig(config)

View File

@@ -2,7 +2,6 @@ package peerdas
import (
"encoding/binary"
"maps"
"sync"
"github.com/ethereum/go-ethereum/p2p/enode"
@@ -108,102 +107,3 @@ func computeInfoCacheKey(nodeID enode.ID, custodyGroupCount uint64) [nodeInfoCac
return key
}
// ColumnIndices represents as a set of ColumnIndices. This could be the set of indices that a node is required to custody,
// the set that a peer custodies, missing indices for a given block, indices that are present on disk, etc.
type ColumnIndices map[uint64]struct{}
// Has returns true if the index is present in the ColumnIndices.
func (ci ColumnIndices) Has(index uint64) bool {
_, ok := ci[index]
return ok
}
// Count returns the number of indices present in the ColumnIndices.
func (ci ColumnIndices) Count() int {
return len(ci)
}
// Set sets the index in the ColumnIndices.
func (ci ColumnIndices) Set(index uint64) {
ci[index] = struct{}{}
}
// Unset removes the index from the ColumnIndices.
func (ci ColumnIndices) Unset(index uint64) {
delete(ci, index)
}
// Copy creates a copy of the ColumnIndices.
func (ci ColumnIndices) Copy() ColumnIndices {
newCi := make(ColumnIndices, len(ci))
maps.Copy(newCi, ci)
return newCi
}
// Intersection returns a new ColumnIndices that contains only the indices that are present in both ColumnIndices.
func (ci ColumnIndices) Intersection(other ColumnIndices) ColumnIndices {
result := make(ColumnIndices)
for index := range ci {
if other.Has(index) {
result.Set(index)
}
}
return result
}
// Merge mutates the receiver so that any index that is set in either of
// the two ColumnIndices is set in the receiver after the function finishes.
// It does not mutate the other ColumnIndices given as a function argument.
func (ci ColumnIndices) Merge(other ColumnIndices) {
for index := range other {
ci.Set(index)
}
}
// ToMap converts a ColumnIndices into a map[uint64]struct{}.
// In the future ColumnIndices may be changed to a bit map, so using
// ToMap will ensure forwards-compatibility.
func (ci ColumnIndices) ToMap() map[uint64]struct{} {
return ci.Copy()
}
// ToSlice converts a ColumnIndices into a slice of uint64 indices.
func (ci ColumnIndices) ToSlice() []uint64 {
indices := make([]uint64, 0, len(ci))
for index := range ci {
indices = append(indices, index)
}
return indices
}
// NewColumnIndicesFromSlice creates a ColumnIndices from a slice of uint64.
func NewColumnIndicesFromSlice(indices []uint64) ColumnIndices {
ci := make(ColumnIndices, len(indices))
for _, index := range indices {
ci[index] = struct{}{}
}
return ci
}
// NewColumnIndicesFromMap creates a ColumnIndices from a map[uint64]bool. This kind of map
// is used in several places in peerdas code. Converting from this map type to ColumnIndices
// will allow us to move ColumnIndices underlying type to a bitmap in the future and avoid
// lots of loops for things like intersections/unions or copies.
func NewColumnIndicesFromMap(indices map[uint64]bool) ColumnIndices {
ci := make(ColumnIndices, len(indices))
for index, set := range indices {
if !set {
continue
}
ci[index] = struct{}{}
}
return ci
}
// NewColumnIndices creates an empty ColumnIndices.
// In the future ColumnIndices may change from a reference type to a value type,
// so using this constructor will ensure forwards-compatibility.
func NewColumnIndices() ColumnIndices {
return make(ColumnIndices)
}

View File

@@ -25,10 +25,3 @@ func TestInfo(t *testing.T) {
require.DeepEqual(t, expectedDataColumnsSubnets, actual.DataColumnsSubnets)
}
}
func TestNewColumnIndicesFromMap(t *testing.T) {
t.Run("nil map", func(t *testing.T) {
ci := peerdas.NewColumnIndicesFromMap(nil)
require.Equal(t, 0, ci.Count())
})
}

View File

@@ -33,7 +33,8 @@ func (Cgc) ENRKey() string { return params.BeaconNetworkConfig().CustodyGroupCou
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#verify_data_column_sidecar
func VerifyDataColumnSidecar(sidecar blocks.RODataColumn) error {
// The sidecar index must be within the valid range.
if sidecar.Index >= fieldparams.NumberOfColumns {
numberOfColumns := params.BeaconConfig().NumberOfColumns
if sidecar.Index >= numberOfColumns {
return ErrIndexTooLarge
}

View File

@@ -100,7 +100,7 @@ func Test_VerifyKZGInclusionProofColumn(t *testing.T) {
// Generate random KZG commitments `blobCount` blobs.
kzgCommitments := make([][]byte, blobCount)
for i := range blobCount {
for i := 0; i < blobCount; i++ {
kzgCommitments[i] = make([]byte, 48)
_, err := rand.Read(kzgCommitments[i])
require.NoError(t, err)
@@ -281,11 +281,8 @@ func BenchmarkVerifyDataColumnSidecarKZGProofs_SameCommitments_NoBatch(b *testin
}
func BenchmarkVerifyDataColumnSidecarKZGProofs_DiffCommitments_Batch(b *testing.B) {
const (
blobCount = 12
numberOfColumns = fieldparams.NumberOfColumns
)
const blobCount = 12
numberOfColumns := int64(params.BeaconConfig().NumberOfColumns)
err := kzg.Start()
require.NoError(b, err)
@@ -390,10 +387,10 @@ func generateRandomSidecars(t testing.TB, seed, blobCount int64) []blocks.ROData
sBlock, err := blocks.NewSignedBeaconBlock(dbBlock)
require.NoError(t, err)
cellsPerBlob, proofsPerBlob := util.GenerateCellsAndProofs(t, blobs)
cellsAndProofs := util.GenerateCellsAndProofs(t, blobs)
rob, err := blocks.NewROBlock(sBlock)
require.NoError(t, err)
sidecars, err := peerdas.DataColumnSidecars(cellsPerBlob, proofsPerBlob, peerdas.PopulateFromBlock(rob))
sidecars, err := peerdas.DataColumnSidecars(cellsAndProofs, peerdas.PopulateFromBlock(rob))
require.NoError(t, err)
return sidecars

View File

@@ -2,7 +2,6 @@ package peerdas
import (
"sort"
"sync"
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
@@ -26,114 +25,7 @@ var (
func MinimumColumnCountToReconstruct() uint64 {
// If the number of columns is odd, then we need total / 2 + 1 columns to reconstruct.
// If the number of columns is even, then we need total / 2 columns to reconstruct.
return (fieldparams.NumberOfColumns + 1) / 2
}
// MinimumCustodyGroupCountToReconstruct returns the minimum number of custody groups needed to
// custody enough data columns for reconstruction. This accounts for the relationship between
// custody groups and columns, making it future-proof if these values change.
// Returns an error if the configuration values are invalid (zero or would cause division by zero).
func MinimumCustodyGroupCountToReconstruct() (uint64, error) {
const numberOfColumns = fieldparams.NumberOfColumns
cfg := params.BeaconConfig()
// Validate configuration values
if numberOfColumns == 0 {
return 0, errors.New("NumberOfColumns cannot be zero")
}
if cfg.NumberOfCustodyGroups == 0 {
return 0, errors.New("NumberOfCustodyGroups cannot be zero")
}
minimumColumnCount := MinimumColumnCountToReconstruct()
// Calculate how many columns each custody group represents
columnsPerGroup := numberOfColumns / cfg.NumberOfCustodyGroups
// If there are more groups than columns (columnsPerGroup = 0), this is an invalid configuration
// for reconstruction purposes as we cannot determine a meaningful custody group count
if columnsPerGroup == 0 {
return 0, errors.Errorf("invalid configuration: NumberOfCustodyGroups (%d) exceeds NumberOfColumns (%d)",
cfg.NumberOfCustodyGroups, numberOfColumns)
}
// Use ceiling division to ensure we have enough groups to cover the minimum columns
// ceiling(a/b) = (a + b - 1) / b
return (minimumColumnCount + columnsPerGroup - 1) / columnsPerGroup, nil
}
// recoverCellsForBlobs reconstructs cells for specified blobs from the given data column sidecars.
// This is optimized to only recover cells without computing proofs.
// Returns a map from blob index to recovered cells.
func recoverCellsForBlobs(verifiedRoSidecars []blocks.VerifiedRODataColumn, blobIndices []int) (map[int][]kzg.Cell, error) {
sidecarCount := len(verifiedRoSidecars)
var wg errgroup.Group
cellsPerBlob := make(map[int][]kzg.Cell, len(blobIndices))
var mu sync.Mutex
for _, blobIndex := range blobIndices {
wg.Go(func() error {
cellsIndices := make([]uint64, 0, sidecarCount)
cells := make([]kzg.Cell, 0, sidecarCount)
for _, sidecar := range verifiedRoSidecars {
cell := sidecar.Column[blobIndex]
cells = append(cells, kzg.Cell(cell))
cellsIndices = append(cellsIndices, sidecar.Index)
}
recoveredCells, err := kzg.RecoverCells(cellsIndices, cells)
if err != nil {
return errors.Wrapf(err, "recover cells for blob %d", blobIndex)
}
mu.Lock()
cellsPerBlob[blobIndex] = recoveredCells
mu.Unlock()
return nil
})
}
if err := wg.Wait(); err != nil {
return nil, errors.Wrap(err, "wait for RecoverCells")
}
return cellsPerBlob, nil
}
// recoverCellsAndProofsForBlobs reconstructs both cells and proofs for specified blobs from the given data column sidecars.
func recoverCellsAndProofsForBlobs(verifiedRoSidecars []blocks.VerifiedRODataColumn, blobIndices []int) ([][]kzg.Cell, [][]kzg.Proof, error) {
sidecarCount := len(verifiedRoSidecars)
var wg errgroup.Group
cellsPerBlob := make([][]kzg.Cell, len(blobIndices))
proofsPerBlob := make([][]kzg.Proof, len(blobIndices))
for i, blobIndex := range blobIndices {
wg.Go(func() error {
cellsIndices := make([]uint64, 0, sidecarCount)
cells := make([]kzg.Cell, 0, sidecarCount)
for _, sidecar := range verifiedRoSidecars {
cell := sidecar.Column[blobIndex]
cells = append(cells, kzg.Cell(cell))
cellsIndices = append(cellsIndices, sidecar.Index)
}
recoveredCells, recoveredProofs, err := kzg.RecoverCellsAndKZGProofs(cellsIndices, cells)
if err != nil {
return errors.Wrapf(err, "recover cells and KZG proofs for blob %d", blobIndex)
}
cellsPerBlob[i] = recoveredCells
proofsPerBlob[i] = recoveredProofs
return nil
})
}
if err := wg.Wait(); err != nil {
return nil, nil, errors.Wrap(err, "wait for RecoverCellsAndKZGProofs")
}
return cellsPerBlob, proofsPerBlob, nil
return (params.BeaconConfig().NumberOfColumns + 1) / 2
}
// ReconstructDataColumnSidecars reconstructs all the data column sidecars from the given input data column sidecars.
@@ -174,16 +66,38 @@ func ReconstructDataColumnSidecars(verifiedRoSidecars []blocks.VerifiedRODataCol
})
// Recover cells and compute proofs in parallel.
blobIndices := make([]int, blobCount)
for i := range blobIndices {
blobIndices[i] = i
}
cellsPerBlob, proofsPerBlob, err := recoverCellsAndProofsForBlobs(verifiedRoSidecars, blobIndices)
if err != nil {
return nil, errors.Wrap(err, "recover cells and proofs for blobs")
var wg errgroup.Group
cellsAndProofs := make([]kzg.CellsAndProofs, blobCount)
for blobIndex := range uint64(blobCount) {
wg.Go(func() error {
cellsIndices := make([]uint64, 0, sidecarCount)
cells := make([]kzg.Cell, 0, sidecarCount)
for _, sidecar := range verifiedRoSidecars {
cell := sidecar.Column[blobIndex]
cells = append(cells, kzg.Cell(cell))
cellsIndices = append(cellsIndices, sidecar.Index)
}
// Recover the cells and proofs for the corresponding blob
cellsAndProofsForBlob, err := kzg.RecoverCellsAndKZGProofs(cellsIndices, cells)
if err != nil {
return errors.Wrapf(err, "recover cells and KZG proofs for blob %d", blobIndex)
}
// It is safe for multiple goroutines to concurrently write to the same slice,
// as long as they are writing to different indices, which is the case here.
cellsAndProofs[blobIndex] = cellsAndProofsForBlob
return nil
})
}
outSidecars, err := DataColumnSidecars(cellsPerBlob, proofsPerBlob, PopulateFromSidecar(referenceSidecar))
if err := wg.Wait(); err != nil {
return nil, errors.Wrap(err, "wait for RecoverCellsAndKZGProofs")
}
outSidecars, err := DataColumnSidecars(cellsAndProofs, PopulateFromSidecar(referenceSidecar))
if err != nil {
return nil, errors.Wrap(err, "data column sidecars from items")
}
@@ -199,10 +113,18 @@ func ReconstructDataColumnSidecars(verifiedRoSidecars []blocks.VerifiedRODataCol
return reconstructedVerifiedRoSidecars, nil
}
// reconstructIfNeeded validates the input data column sidecars and returns the prepared sidecars
// (reconstructed if necessary). This function performs common validation and reconstruction logic used by
// both ReconstructBlobs and ReconstructBlobSidecars.
func reconstructIfNeeded(verifiedDataColumnSidecars []blocks.VerifiedRODataColumn) ([]blocks.VerifiedRODataColumn, error) {
// ReconstructBlobs constructs verified read only blobs sidecars from verified read only blob sidecars.
// The following constraints must be satisfied:
// - All `dataColumnSidecars` has to be committed to the same block, and
// - `dataColumnSidecars` must be sorted by index and should not contain duplicates.
// - `dataColumnSidecars` must contain either all sidecars corresponding to (non-extended) blobs,
// or either enough sidecars to reconstruct the blobs.
func ReconstructBlobs(block blocks.ROBlock, verifiedDataColumnSidecars []blocks.VerifiedRODataColumn, indices []int) ([]*blocks.VerifiedROBlob, error) {
// Return early if no blobs are requested.
if len(indices) == 0 {
return nil, nil
}
if len(verifiedDataColumnSidecars) == 0 {
return nil, ErrNotEnoughDataColumnSidecars
}
@@ -224,34 +146,6 @@ func reconstructIfNeeded(verifiedDataColumnSidecars []blocks.VerifiedRODataColum
return nil, ErrNotEnoughDataColumnSidecars
}
// If all column sidecars corresponding to (non-extended) blobs are present, no need to reconstruct.
if verifiedDataColumnSidecars[cellsPerBlob-1].Index == uint64(cellsPerBlob-1) {
return verifiedDataColumnSidecars, nil
}
// We need to reconstruct the data column sidecars.
return ReconstructDataColumnSidecars(verifiedDataColumnSidecars)
}
// ReconstructBlobSidecars constructs verified read only blobs sidecars from verified read only blob sidecars.
// The following constraints must be satisfied:
// - All `dataColumnSidecars` has to be committed to the same block, and
// - `dataColumnSidecars` must be sorted by index and should not contain duplicates.
// - `dataColumnSidecars` must contain either all sidecars corresponding to (non-extended) blobs,
// - either enough sidecars to reconstruct the blobs.
func ReconstructBlobSidecars(block blocks.ROBlock, verifiedDataColumnSidecars []blocks.VerifiedRODataColumn, indices []int) ([]*blocks.VerifiedROBlob, error) {
// Return early if no blobs are requested.
if len(indices) == 0 {
return nil, nil
}
// Validate and prepare data columns (reconstruct if necessary).
// This also checks if input is empty.
preparedDataColumnSidecars, err := reconstructIfNeeded(verifiedDataColumnSidecars)
if err != nil {
return nil, err
}
// Check if the blob index is too high.
commitments, err := block.Block().Body().BlobKzgCommitments()
if err != nil {
@@ -265,8 +159,8 @@ func ReconstructBlobSidecars(block blocks.ROBlock, verifiedDataColumnSidecars []
}
// Check if the data column sidecars are aligned with the block.
dataColumnSidecars := make([]blocks.RODataColumn, 0, len(preparedDataColumnSidecars))
for _, verifiedDataColumnSidecar := range preparedDataColumnSidecars {
dataColumnSidecars := make([]blocks.RODataColumn, 0, len(verifiedDataColumnSidecars))
for _, verifiedDataColumnSidecar := range verifiedDataColumnSidecars {
dataColumnSidecar := verifiedDataColumnSidecar.RODataColumn
dataColumnSidecars = append(dataColumnSidecars, dataColumnSidecar)
}
@@ -275,8 +169,25 @@ func ReconstructBlobSidecars(block blocks.ROBlock, verifiedDataColumnSidecars []
return nil, errors.Wrap(err, "data columns align with block")
}
// If all column sidecars corresponding to (non-extended) blobs are present, no need to reconstruct.
if verifiedDataColumnSidecars[cellsPerBlob-1].Index == uint64(cellsPerBlob-1) {
// Convert verified data column sidecars to verified blob sidecars.
blobSidecars, err := blobSidecarsFromDataColumnSidecars(block, verifiedDataColumnSidecars, indices)
if err != nil {
return nil, errors.Wrap(err, "blob sidecars from data column sidecars")
}
return blobSidecars, nil
}
// We need to reconstruct the data column sidecars.
reconstructedDataColumnSidecars, err := ReconstructDataColumnSidecars(verifiedDataColumnSidecars)
if err != nil {
return nil, errors.Wrap(err, "reconstruct data column sidecars")
}
// Convert verified data column sidecars to verified blob sidecars.
blobSidecars, err := blobSidecarsFromDataColumnSidecars(block, preparedDataColumnSidecars, indices)
blobSidecars, err := blobSidecarsFromDataColumnSidecars(block, reconstructedDataColumnSidecars, indices)
if err != nil {
return nil, errors.Wrap(err, "blob sidecars from data column sidecars")
}
@@ -285,190 +196,86 @@ func ReconstructBlobSidecars(block blocks.ROBlock, verifiedDataColumnSidecars []
}
// ComputeCellsAndProofsFromFlat computes the cells and proofs from blobs and cell flat proofs.
func ComputeCellsAndProofsFromFlat(blobs [][]byte, cellProofs [][]byte) ([][]kzg.Cell, [][]kzg.Proof, error) {
const numberOfColumns = fieldparams.NumberOfColumns
func ComputeCellsAndProofsFromFlat(blobs [][]byte, cellProofs [][]byte) ([]kzg.CellsAndProofs, error) {
numberOfColumns := params.BeaconConfig().NumberOfColumns
blobCount := uint64(len(blobs))
cellProofsCount := uint64(len(cellProofs))
cellsCount := blobCount * numberOfColumns
if cellsCount != cellProofsCount {
return nil, nil, ErrBlobsCellsProofsMismatch
return nil, ErrBlobsCellsProofsMismatch
}
cellsPerBlob := make([][]kzg.Cell, 0, blobCount)
proofsPerBlob := make([][]kzg.Proof, 0, blobCount)
cellsAndProofs := make([]kzg.CellsAndProofs, 0, blobCount)
for i, blob := range blobs {
var kzgBlob kzg.Blob
if copy(kzgBlob[:], blob) != len(kzgBlob) {
return nil, nil, errors.New("wrong blob size - should never happen")
return nil, errors.New("wrong blob size - should never happen")
}
// Compute the extended cells from the (non-extended) blob.
cells, err := kzg.ComputeCells(&kzgBlob)
if err != nil {
return nil, nil, errors.Wrap(err, "compute cells")
return nil, errors.Wrap(err, "compute cells")
}
var proofs []kzg.Proof
for idx := uint64(i) * numberOfColumns; idx < (uint64(i)+1)*numberOfColumns; idx++ {
var kzgProof kzg.Proof
if copy(kzgProof[:], cellProofs[idx]) != len(kzgProof) {
return nil, nil, errors.New("wrong KZG proof size - should never happen")
return nil, errors.New("wrong KZG proof size - should never happen")
}
proofs = append(proofs, kzgProof)
}
cellsPerBlob = append(cellsPerBlob, cells)
proofsPerBlob = append(proofsPerBlob, proofs)
cellsProofs := kzg.CellsAndProofs{Cells: cells, Proofs: proofs}
cellsAndProofs = append(cellsAndProofs, cellsProofs)
}
return cellsPerBlob, proofsPerBlob, nil
return cellsAndProofs, nil
}
// ComputeCellsAndProofsFromStructured computes the cells and proofs from blobs and cell proofs.
func ComputeCellsAndProofsFromStructured(blobsAndProofs []*pb.BlobAndProofV2) ([][]kzg.Cell, [][]kzg.Proof, error) {
cellsPerBlob := make([][]kzg.Cell, 0, len(blobsAndProofs))
proofsPerBlob := make([][]kzg.Proof, 0, len(blobsAndProofs))
// ComputeCellsAndProofs computes the cells and proofs from blobs and cell proofs.
func ComputeCellsAndProofsFromStructured(blobsAndProofs []*pb.BlobAndProofV2) ([]kzg.CellsAndProofs, error) {
numberOfColumns := params.BeaconConfig().NumberOfColumns
cellsAndProofs := make([]kzg.CellsAndProofs, 0, len(blobsAndProofs))
for _, blobAndProof := range blobsAndProofs {
if blobAndProof == nil {
return nil, nil, ErrNilBlobAndProof
return nil, ErrNilBlobAndProof
}
var kzgBlob kzg.Blob
if copy(kzgBlob[:], blobAndProof.Blob) != len(kzgBlob) {
return nil, nil, errors.New("wrong blob size - should never happen")
return nil, errors.New("wrong blob size - should never happen")
}
// Compute the extended cells from the (non-extended) blob.
cells, err := kzg.ComputeCells(&kzgBlob)
if err != nil {
return nil, nil, errors.Wrap(err, "compute cells")
return nil, errors.Wrap(err, "compute cells")
}
kzgProofs := make([]kzg.Proof, 0, fieldparams.NumberOfColumns)
kzgProofs := make([]kzg.Proof, 0, numberOfColumns)
for _, kzgProofBytes := range blobAndProof.KzgProofs {
if len(kzgProofBytes) != kzg.BytesPerProof {
return nil, nil, errors.New("wrong KZG proof size - should never happen")
return nil, errors.New("wrong KZG proof size - should never happen")
}
var kzgProof kzg.Proof
if copy(kzgProof[:], kzgProofBytes) != len(kzgProof) {
return nil, nil, errors.New("wrong copied KZG proof size - should never happen")
return nil, errors.New("wrong copied KZG proof size - should never happen")
}
kzgProofs = append(kzgProofs, kzgProof)
}
cellsPerBlob = append(cellsPerBlob, cells)
proofsPerBlob = append(proofsPerBlob, kzgProofs)
cellsProofs := kzg.CellsAndProofs{Cells: cells, Proofs: kzgProofs}
cellsAndProofs = append(cellsAndProofs, cellsProofs)
}
return cellsPerBlob, proofsPerBlob, nil
}
// ReconstructBlobs reconstructs blobs from data column sidecars without computing KZG proofs or creating sidecars.
// This is an optimized version for when only the blob data is needed (e.g., for the GetBlobs endpoint).
// The following constraints must be satisfied:
// - All `dataColumnSidecars` must be committed to the same block, and
// - `dataColumnSidecars` must be sorted by index and should not contain duplicates.
// - `dataColumnSidecars` must contain either all sidecars corresponding to (non-extended) blobs,
// - or enough sidecars to reconstruct the blobs.
func ReconstructBlobs(verifiedDataColumnSidecars []blocks.VerifiedRODataColumn, indices []int, blobCount int) ([][]byte, error) {
// If no specific indices are requested, populate with all blob indices.
if len(indices) == 0 {
indices = make([]int, blobCount)
for i := range indices {
indices[i] = i
}
}
if len(verifiedDataColumnSidecars) == 0 {
return nil, ErrNotEnoughDataColumnSidecars
}
// Check if the sidecars are sorted by index and do not contain duplicates.
previousColumnIndex := verifiedDataColumnSidecars[0].Index
for _, dataColumnSidecar := range verifiedDataColumnSidecars[1:] {
columnIndex := dataColumnSidecar.Index
if columnIndex <= previousColumnIndex {
return nil, ErrDataColumnSidecarsNotSortedByIndex
}
previousColumnIndex = columnIndex
}
// Check if we have enough columns.
cellsPerBlob := fieldparams.CellsPerBlob
if len(verifiedDataColumnSidecars) < cellsPerBlob {
return nil, ErrNotEnoughDataColumnSidecars
}
// Verify that the actual blob count from the first sidecar matches the expected count
referenceSidecar := verifiedDataColumnSidecars[0]
actualBlobCount := len(referenceSidecar.Column)
if actualBlobCount != blobCount {
return nil, errors.Errorf("blob count mismatch: expected %d, got %d", blobCount, actualBlobCount)
}
// Check if the blob index is too high.
for _, blobIndex := range indices {
if blobIndex >= blobCount {
return nil, ErrBlobIndexTooHigh
}
}
// Check if all columns have the same length and are committed to the same block.
blockRoot := referenceSidecar.BlockRoot()
for _, sidecar := range verifiedDataColumnSidecars[1:] {
if len(sidecar.Column) != blobCount {
return nil, ErrColumnLengthsDiffer
}
if sidecar.BlockRoot() != blockRoot {
return nil, ErrBlockRootMismatch
}
}
// Check if we have all non-extended columns (0..63) - if so, no reconstruction needed.
hasAllNonExtendedColumns := verifiedDataColumnSidecars[cellsPerBlob-1].Index == uint64(cellsPerBlob-1)
var reconstructedCells map[int][]kzg.Cell
if !hasAllNonExtendedColumns {
// Need to reconstruct cells (but NOT proofs) for the requested blobs only.
var err error
reconstructedCells, err = recoverCellsForBlobs(verifiedDataColumnSidecars, indices)
if err != nil {
return nil, errors.Wrap(err, "recover cells")
}
}
// Extract blob data without computing proofs.
blobs := make([][]byte, 0, len(indices))
for _, blobIndex := range indices {
var blob kzg.Blob
// Compute the content of the blob.
for columnIndex := range cellsPerBlob {
var cell []byte
if hasAllNonExtendedColumns {
// Use existing cells from sidecars
cell = verifiedDataColumnSidecars[columnIndex].Column[blobIndex]
} else {
// Use reconstructed cells
cell = reconstructedCells[blobIndex][columnIndex][:]
}
if copy(blob[kzg.BytesPerCell*columnIndex:], cell) != kzg.BytesPerCell {
return nil, errors.New("wrong cell size - should never happen")
}
}
blobs = append(blobs, blob[:])
}
return blobs, nil
return cellsAndProofs, nil
}
// blobSidecarsFromDataColumnSidecars converts verified data column sidecars to verified blob sidecars.

View File

@@ -1,79 +0,0 @@
package peerdas_test
// Test helpers for reconstruction tests
import (
"testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/testing/require"
"github.com/OffchainLabs/prysm/v7/testing/util"
)
// testBlobSetup holds common test data for blob reconstruction tests.
type testBlobSetup struct {
blobCount int
blobs []kzg.Blob
roBlock blocks.ROBlock
roDataColumnSidecars []blocks.RODataColumn
verifiedRoDataColumnSidecars []blocks.VerifiedRODataColumn
}
// setupTestBlobs creates a complete test setup with blobs, cells, proofs, and data column sidecars.
func setupTestBlobs(t *testing.T, blobCount int) *testBlobSetup {
_, roBlobSidecars := util.GenerateTestElectraBlockWithSidecar(t, [32]byte{}, 42, blobCount)
blobs := make([]kzg.Blob, blobCount)
for i := range blobCount {
copy(blobs[i][:], roBlobSidecars[i].Blob)
}
cellsPerBlob, proofsPerBlob := util.GenerateCellsAndProofs(t, blobs)
fs := util.SlotAtEpoch(t, params.BeaconConfig().FuluForkEpoch)
roBlock, _, _ := util.GenerateTestFuluBlockWithSidecars(t, blobCount, util.WithSlot(fs))
roDataColumnSidecars, err := peerdas.DataColumnSidecars(cellsPerBlob, proofsPerBlob, peerdas.PopulateFromBlock(roBlock))
require.NoError(t, err)
verifiedRoSidecars := toVerifiedSidecars(roDataColumnSidecars)
return &testBlobSetup{
blobCount: blobCount,
blobs: blobs,
roBlock: roBlock,
roDataColumnSidecars: roDataColumnSidecars,
verifiedRoDataColumnSidecars: verifiedRoSidecars,
}
}
// toVerifiedSidecars converts a slice of RODataColumn to VerifiedRODataColumn.
func toVerifiedSidecars(roDataColumnSidecars []blocks.RODataColumn) []blocks.VerifiedRODataColumn {
verifiedRoSidecars := make([]blocks.VerifiedRODataColumn, 0, len(roDataColumnSidecars))
for _, roDataColumnSidecar := range roDataColumnSidecars {
verifiedRoSidecar := blocks.NewVerifiedRODataColumn(roDataColumnSidecar)
verifiedRoSidecars = append(verifiedRoSidecars, verifiedRoSidecar)
}
return verifiedRoSidecars
}
// filterEvenIndexedSidecars returns only the even-indexed sidecars (0, 2, 4, ...).
// This is useful for forcing reconstruction in tests.
func filterEvenIndexedSidecars(sidecars []blocks.VerifiedRODataColumn) []blocks.VerifiedRODataColumn {
filtered := make([]blocks.VerifiedRODataColumn, 0, len(sidecars)/2)
for i := 0; i < len(sidecars); i += 2 {
filtered = append(filtered, sidecars[i])
}
return filtered
}
// setupFuluForkEpoch sets up the test configuration with Fulu fork after Electra.
func setupFuluForkEpoch(t *testing.T) primitives.Slot {
params.SetupTestConfigCleanup(t)
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096*2
return util.SlotAtEpoch(t, params.BeaconConfig().FuluForkEpoch)
}

View File

@@ -17,9 +17,41 @@ import (
)
func TestMinimumColumnsCountToReconstruct(t *testing.T) {
const expected = uint64(64)
actual := peerdas.MinimumColumnCountToReconstruct()
require.Equal(t, expected, actual)
testCases := []struct {
name string
numberOfColumns uint64
expected uint64
}{
{
name: "numberOfColumns=128",
numberOfColumns: 128,
expected: 64,
},
{
name: "numberOfColumns=129",
numberOfColumns: 129,
expected: 65,
},
{
name: "numberOfColumns=130",
numberOfColumns: 130,
expected: 65,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
// Set the total number of columns.
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.NumberOfColumns = tc.numberOfColumns
params.OverrideBeaconConfig(cfg)
// Compute the minimum number of columns needed to reconstruct.
actual := peerdas.MinimumColumnCountToReconstruct()
require.Equal(t, tc.expected, actual)
})
}
}
func TestReconstructDataColumnSidecars(t *testing.T) {
@@ -92,7 +124,7 @@ func TestReconstructDataColumnSidecars(t *testing.T) {
})
}
func TestReconstructBlobSidecars(t *testing.T) {
func TestReconstructBlobs(t *testing.T) {
params.SetupTestConfigCleanup(t)
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096*2
@@ -101,13 +133,13 @@ func TestReconstructBlobSidecars(t *testing.T) {
fs := util.SlotAtEpoch(t, params.BeaconConfig().FuluForkEpoch)
t.Run("no index", func(t *testing.T) {
actual, err := peerdas.ReconstructBlobSidecars(emptyBlock, nil, nil)
actual, err := peerdas.ReconstructBlobs(emptyBlock, nil, nil)
require.NoError(t, err)
require.IsNil(t, actual)
})
t.Run("empty input", func(t *testing.T) {
_, err := peerdas.ReconstructBlobSidecars(emptyBlock, nil, []int{0})
_, err := peerdas.ReconstructBlobs(emptyBlock, nil, []int{0})
require.ErrorIs(t, err, peerdas.ErrNotEnoughDataColumnSidecars)
})
@@ -117,7 +149,7 @@ func TestReconstructBlobSidecars(t *testing.T) {
// Arbitrarily change the order of the sidecars.
verifiedRoSidecars[3], verifiedRoSidecars[2] = verifiedRoSidecars[2], verifiedRoSidecars[3]
_, err := peerdas.ReconstructBlobSidecars(emptyBlock, verifiedRoSidecars, []int{0})
_, err := peerdas.ReconstructBlobs(emptyBlock, verifiedRoSidecars, []int{0})
require.ErrorIs(t, err, peerdas.ErrDataColumnSidecarsNotSortedByIndex)
})
@@ -127,7 +159,7 @@ func TestReconstructBlobSidecars(t *testing.T) {
// [0, 1, 1, 3, 4, ...]
verifiedRoSidecars[2] = verifiedRoSidecars[1]
_, err := peerdas.ReconstructBlobSidecars(emptyBlock, verifiedRoSidecars, []int{0})
_, err := peerdas.ReconstructBlobs(emptyBlock, verifiedRoSidecars, []int{0})
require.ErrorIs(t, err, peerdas.ErrDataColumnSidecarsNotSortedByIndex)
})
@@ -137,7 +169,7 @@ func TestReconstructBlobSidecars(t *testing.T) {
// [0, 1, 2, 1, 4, ...]
verifiedRoSidecars[3] = verifiedRoSidecars[1]
_, err := peerdas.ReconstructBlobSidecars(emptyBlock, verifiedRoSidecars, []int{0})
_, err := peerdas.ReconstructBlobs(emptyBlock, verifiedRoSidecars, []int{0})
require.ErrorIs(t, err, peerdas.ErrDataColumnSidecarsNotSortedByIndex)
})
@@ -145,7 +177,7 @@ func TestReconstructBlobSidecars(t *testing.T) {
_, _, verifiedRoSidecars := util.GenerateTestFuluBlockWithSidecars(t, 3)
inputSidecars := verifiedRoSidecars[:fieldparams.CellsPerBlob-1]
_, err := peerdas.ReconstructBlobSidecars(emptyBlock, inputSidecars, []int{0})
_, err := peerdas.ReconstructBlobs(emptyBlock, inputSidecars, []int{0})
require.ErrorIs(t, err, peerdas.ErrNotEnoughDataColumnSidecars)
})
@@ -154,7 +186,7 @@ func TestReconstructBlobSidecars(t *testing.T) {
roBlock, _, verifiedRoSidecars := util.GenerateTestFuluBlockWithSidecars(t, blobCount)
_, err := peerdas.ReconstructBlobSidecars(roBlock, verifiedRoSidecars, []int{1, blobCount})
_, err := peerdas.ReconstructBlobs(roBlock, verifiedRoSidecars, []int{1, blobCount})
require.ErrorIs(t, err, peerdas.ErrBlobIndexTooHigh)
})
@@ -162,20 +194,20 @@ func TestReconstructBlobSidecars(t *testing.T) {
_, _, verifiedRoSidecars := util.GenerateTestFuluBlockWithSidecars(t, 3, util.WithParentRoot([fieldparams.RootLength]byte{1}), util.WithSlot(fs))
roBlock, _, _ := util.GenerateTestFuluBlockWithSidecars(t, 3, util.WithParentRoot([fieldparams.RootLength]byte{2}), util.WithSlot(fs))
_, err := peerdas.ReconstructBlobSidecars(roBlock, verifiedRoSidecars, []int{0})
_, err := peerdas.ReconstructBlobs(roBlock, verifiedRoSidecars, []int{0})
require.ErrorContains(t, peerdas.ErrRootMismatch.Error(), err)
})
t.Run("nominal", func(t *testing.T) {
const blobCount = 3
numberOfColumns := params.BeaconConfig().NumberOfColumns
roBlock, roBlobSidecars := util.GenerateTestElectraBlockWithSidecar(t, [fieldparams.RootLength]byte{}, 42, blobCount)
// Compute cells and proofs from blob sidecars.
var wg errgroup.Group
blobs := make([][]byte, blobCount)
inputCellsPerBlob := make([][]kzg.Cell, blobCount)
inputProofsPerBlob := make([][]kzg.Proof, blobCount)
inputCellsAndProofs := make([]kzg.CellsAndProofs, blobCount)
for i := range blobCount {
blob := roBlobSidecars[i].Blob
blobs[i] = blob
@@ -185,15 +217,14 @@ func TestReconstructBlobSidecars(t *testing.T) {
count := copy(kzgBlob[:], blob)
require.Equal(t, len(kzgBlob), count)
cells, proofs, err := kzg.ComputeCellsAndKZGProofs(&kzgBlob)
cp, err := kzg.ComputeCellsAndKZGProofs(&kzgBlob)
if err != nil {
return errors.Wrapf(err, "compute cells and kzg proofs for blob %d", i)
}
// It is safe for multiple goroutines to concurrently write to the same slice,
// as long as they are writing to different indices, which is the case here.
inputCellsPerBlob[i] = cells
inputProofsPerBlob[i] = proofs
inputCellsAndProofs[i] = cp
return nil
})
@@ -203,19 +234,19 @@ func TestReconstructBlobSidecars(t *testing.T) {
require.NoError(t, err)
// Flatten proofs.
cellProofs := make([][]byte, 0, blobCount*fieldparams.NumberOfColumns)
for _, proofs := range inputProofsPerBlob {
for _, proof := range proofs {
cellProofs := make([][]byte, 0, blobCount*numberOfColumns)
for _, cp := range inputCellsAndProofs {
for _, proof := range cp.Proofs {
cellProofs = append(cellProofs, proof[:])
}
}
// Compute celles and proofs from the blobs and cell proofs.
cellsPerBlob, proofsPerBlob, err := peerdas.ComputeCellsAndProofsFromFlat(blobs, cellProofs)
cellsAndProofs, err := peerdas.ComputeCellsAndProofsFromFlat(blobs, cellProofs)
require.NoError(t, err)
// Construct data column sidears from the signed block and cells and proofs.
roDataColumnSidecars, err := peerdas.DataColumnSidecars(cellsPerBlob, proofsPerBlob, peerdas.PopulateFromBlock(roBlock))
roDataColumnSidecars, err := peerdas.DataColumnSidecars(cellsAndProofs, peerdas.PopulateFromBlock(roBlock))
require.NoError(t, err)
// Convert to verified data column sidecars.
@@ -229,7 +260,7 @@ func TestReconstructBlobSidecars(t *testing.T) {
t.Run("no reconstruction needed", func(t *testing.T) {
// Reconstruct blobs.
reconstructedVerifiedRoBlobSidecars, err := peerdas.ReconstructBlobSidecars(roBlock, verifiedRoSidecars, indices)
reconstructedVerifiedRoBlobSidecars, err := peerdas.ReconstructBlobs(roBlock, verifiedRoSidecars, indices)
require.NoError(t, err)
// Compare blobs.
@@ -249,7 +280,7 @@ func TestReconstructBlobSidecars(t *testing.T) {
}
// Reconstruct blobs.
reconstructedVerifiedRoBlobSidecars, err := peerdas.ReconstructBlobSidecars(roBlock, filteredSidecars, indices)
reconstructedVerifiedRoBlobSidecars, err := peerdas.ReconstructBlobs(roBlock, filteredSidecars, indices)
require.NoError(t, err)
// Compare blobs.
@@ -265,162 +296,34 @@ func TestReconstructBlobSidecars(t *testing.T) {
}
func TestReconstructBlobs(t *testing.T) {
setupFuluForkEpoch(t)
require.NoError(t, kzg.Start())
t.Run("empty indices with blobCount > 0", func(t *testing.T) {
setup := setupTestBlobs(t, 3)
// Call with empty indices - should return all blobs
reconstructedBlobs, err := peerdas.ReconstructBlobs(setup.verifiedRoDataColumnSidecars, []int{}, setup.blobCount)
require.NoError(t, err)
require.Equal(t, setup.blobCount, len(reconstructedBlobs))
// Verify each blob matches
for i := 0; i < setup.blobCount; i++ {
require.DeepEqual(t, setup.blobs[i][:], reconstructedBlobs[i])
}
})
t.Run("specific indices", func(t *testing.T) {
setup := setupTestBlobs(t, 3)
// Request only blobs at indices 0 and 2
indices := []int{0, 2}
reconstructedBlobs, err := peerdas.ReconstructBlobs(setup.verifiedRoDataColumnSidecars, indices, setup.blobCount)
require.NoError(t, err)
require.Equal(t, len(indices), len(reconstructedBlobs))
// Verify requested blobs match
for i, blobIndex := range indices {
require.DeepEqual(t, setup.blobs[blobIndex][:], reconstructedBlobs[i])
}
})
t.Run("blob count mismatch", func(t *testing.T) {
setup := setupTestBlobs(t, 3)
// Pass wrong blob count
wrongBlobCount := 5
_, err := peerdas.ReconstructBlobs(setup.verifiedRoDataColumnSidecars, []int{0}, wrongBlobCount)
require.ErrorContains(t, "blob count mismatch", err)
})
t.Run("empty data columns", func(t *testing.T) {
_, err := peerdas.ReconstructBlobs([]blocks.VerifiedRODataColumn{}, []int{0}, 1)
require.ErrorIs(t, err, peerdas.ErrNotEnoughDataColumnSidecars)
})
t.Run("index too high", func(t *testing.T) {
setup := setupTestBlobs(t, 3)
// Request blob index that's too high
_, err := peerdas.ReconstructBlobs(setup.verifiedRoDataColumnSidecars, []int{setup.blobCount}, setup.blobCount)
require.ErrorIs(t, err, peerdas.ErrBlobIndexTooHigh)
})
t.Run("not enough columns", func(t *testing.T) {
setup := setupTestBlobs(t, 3)
// Only provide 63 columns (need at least 64)
inputSidecars := setup.verifiedRoDataColumnSidecars[:fieldparams.CellsPerBlob-1]
_, err := peerdas.ReconstructBlobs(inputSidecars, []int{0}, setup.blobCount)
require.ErrorIs(t, err, peerdas.ErrNotEnoughDataColumnSidecars)
})
t.Run("not sorted", func(t *testing.T) {
setup := setupTestBlobs(t, 3)
// Swap two sidecars to make them unsorted
setup.verifiedRoDataColumnSidecars[3], setup.verifiedRoDataColumnSidecars[2] = setup.verifiedRoDataColumnSidecars[2], setup.verifiedRoDataColumnSidecars[3]
_, err := peerdas.ReconstructBlobs(setup.verifiedRoDataColumnSidecars, []int{0}, setup.blobCount)
require.ErrorIs(t, err, peerdas.ErrDataColumnSidecarsNotSortedByIndex)
})
t.Run("with reconstruction needed", func(t *testing.T) {
setup := setupTestBlobs(t, 3)
// Keep only even-indexed columns (will need reconstruction)
filteredSidecars := filterEvenIndexedSidecars(setup.verifiedRoDataColumnSidecars)
// Reconstruct all blobs
reconstructedBlobs, err := peerdas.ReconstructBlobs(filteredSidecars, []int{}, setup.blobCount)
require.NoError(t, err)
require.Equal(t, setup.blobCount, len(reconstructedBlobs))
// Verify all blobs match
for i := range setup.blobCount {
require.DeepEqual(t, setup.blobs[i][:], reconstructedBlobs[i])
}
})
t.Run("no reconstruction needed - all non-extended columns present", func(t *testing.T) {
setup := setupTestBlobs(t, 3)
// Use all columns (no reconstruction needed since we have all non-extended columns 0-63)
reconstructedBlobs, err := peerdas.ReconstructBlobs(setup.verifiedRoDataColumnSidecars, []int{1}, setup.blobCount)
require.NoError(t, err)
require.Equal(t, 1, len(reconstructedBlobs))
// Verify blob matches
require.DeepEqual(t, setup.blobs[1][:], reconstructedBlobs[0])
})
t.Run("reconstruct only requested blob indices", func(t *testing.T) {
// This test verifies the optimization: when reconstruction is needed and specific
// blob indices are requested, we only reconstruct those blobs, not all of them.
setup := setupTestBlobs(t, 6)
// Keep only even-indexed columns (will need reconstruction)
// This ensures we don't have all non-extended columns (0-63)
filteredSidecars := filterEvenIndexedSidecars(setup.verifiedRoDataColumnSidecars)
// Request only specific blob indices (not all of them)
requestedIndices := []int{1, 3, 5}
reconstructedBlobs, err := peerdas.ReconstructBlobs(filteredSidecars, requestedIndices, setup.blobCount)
require.NoError(t, err)
// Should only get the requested blobs back (not all 6)
require.Equal(t, len(requestedIndices), len(reconstructedBlobs),
"should only reconstruct requested blobs, not all blobs")
// Verify each requested blob matches the original
for i, blobIndex := range requestedIndices {
require.DeepEqual(t, setup.blobs[blobIndex][:], reconstructedBlobs[i],
"blob at index %d should match", blobIndex)
}
})
}
func TestComputeCellsAndProofsFromFlat(t *testing.T) {
const numberOfColumns = fieldparams.NumberOfColumns
// Start the trusted setup.
err := kzg.Start()
require.NoError(t, err)
t.Run("mismatched blob and proof counts", func(t *testing.T) {
numberOfColumns := params.BeaconConfig().NumberOfColumns
// Create one blob but proofs for two blobs
blobs := [][]byte{{}}
// Create proofs for 2 blobs worth of columns
cellProofs := make([][]byte, 2*numberOfColumns)
_, _, err := peerdas.ComputeCellsAndProofsFromFlat(blobs, cellProofs)
_, err := peerdas.ComputeCellsAndProofsFromFlat(blobs, cellProofs)
require.ErrorIs(t, err, peerdas.ErrBlobsCellsProofsMismatch)
})
t.Run("nominal", func(t *testing.T) {
const blobCount = 2
numberOfColumns := params.BeaconConfig().NumberOfColumns
// Generate test blobs
_, roBlobSidecars := util.GenerateTestElectraBlockWithSidecar(t, [fieldparams.RootLength]byte{}, 42, blobCount)
// Extract blobs and compute expected cells and proofs
blobs := make([][]byte, blobCount)
expectedCellsPerBlob := make([][]kzg.Cell, blobCount)
expectedProofsPerBlob := make([][]kzg.Proof, blobCount)
expectedCellsAndProofs := make([]kzg.CellsAndProofs, blobCount)
var wg errgroup.Group
for i := range blobCount {
@@ -432,13 +335,12 @@ func TestComputeCellsAndProofsFromFlat(t *testing.T) {
count := copy(kzgBlob[:], blob)
require.Equal(t, len(kzgBlob), count)
cells, proofs, err := kzg.ComputeCellsAndKZGProofs(&kzgBlob)
cp, err := kzg.ComputeCellsAndKZGProofs(&kzgBlob)
if err != nil {
return errors.Wrapf(err, "compute cells and kzg proofs for blob %d", i)
}
expectedCellsPerBlob[i] = cells
expectedProofsPerBlob[i] = proofs
expectedCellsAndProofs[i] = cp
return nil
})
}
@@ -448,30 +350,30 @@ func TestComputeCellsAndProofsFromFlat(t *testing.T) {
// Flatten proofs
cellProofs := make([][]byte, 0, blobCount*numberOfColumns)
for _, proofs := range expectedProofsPerBlob {
for _, proof := range proofs {
for _, cp := range expectedCellsAndProofs {
for _, proof := range cp.Proofs {
cellProofs = append(cellProofs, proof[:])
}
}
// Test ComputeCellsAndProofs
actualCellsPerBlob, actualProofsPerBlob, err := peerdas.ComputeCellsAndProofsFromFlat(blobs, cellProofs)
actualCellsAndProofs, err := peerdas.ComputeCellsAndProofsFromFlat(blobs, cellProofs)
require.NoError(t, err)
require.Equal(t, blobCount, len(actualCellsPerBlob))
require.Equal(t, blobCount, len(actualCellsAndProofs))
// Verify the results match expected
for i := range blobCount {
require.Equal(t, len(expectedCellsPerBlob[i]), len(actualCellsPerBlob[i]))
require.Equal(t, len(expectedProofsPerBlob[i]), len(actualProofsPerBlob[i]))
require.Equal(t, len(expectedCellsAndProofs[i].Cells), len(actualCellsAndProofs[i].Cells))
require.Equal(t, len(expectedCellsAndProofs[i].Proofs), len(actualCellsAndProofs[i].Proofs))
// Compare cells
for j, expectedCell := range expectedCellsPerBlob[i] {
require.Equal(t, expectedCell, actualCellsPerBlob[i][j])
for j, expectedCell := range expectedCellsAndProofs[i].Cells {
require.Equal(t, expectedCell, actualCellsAndProofs[i].Cells[j])
}
// Compare proofs
for j, expectedProof := range expectedProofsPerBlob[i] {
require.Equal(t, expectedProof, actualProofsPerBlob[i][j])
for j, expectedProof := range expectedCellsAndProofs[i].Proofs {
require.Equal(t, expectedProof, actualCellsAndProofs[i].Proofs[j])
}
}
})
@@ -479,7 +381,7 @@ func TestComputeCellsAndProofsFromFlat(t *testing.T) {
func TestComputeCellsAndProofsFromStructured(t *testing.T) {
t.Run("nil blob and proof", func(t *testing.T) {
_, _, err := peerdas.ComputeCellsAndProofsFromStructured([]*pb.BlobAndProofV2{nil})
_, err := peerdas.ComputeCellsAndProofsFromStructured([]*pb.BlobAndProofV2{nil})
require.ErrorIs(t, err, peerdas.ErrNilBlobAndProof)
})
@@ -495,8 +397,7 @@ func TestComputeCellsAndProofsFromStructured(t *testing.T) {
// Extract blobs and compute expected cells and proofs
blobsAndProofs := make([]*pb.BlobAndProofV2, blobCount)
expectedCellsPerBlob := make([][]kzg.Cell, blobCount)
expectedProofsPerBlob := make([][]kzg.Proof, blobCount)
expectedCellsAndProofs := make([]kzg.CellsAndProofs, blobCount)
var wg errgroup.Group
for i := range blobCount {
@@ -507,15 +408,14 @@ func TestComputeCellsAndProofsFromStructured(t *testing.T) {
count := copy(kzgBlob[:], blob)
require.Equal(t, len(kzgBlob), count)
cells, proofs, err := kzg.ComputeCellsAndKZGProofs(&kzgBlob)
cellsAndProofs, err := kzg.ComputeCellsAndKZGProofs(&kzgBlob)
if err != nil {
return errors.Wrapf(err, "compute cells and kzg proofs for blob %d", i)
}
expectedCellsPerBlob[i] = cells
expectedProofsPerBlob[i] = proofs
expectedCellsAndProofs[i] = cellsAndProofs
kzgProofs := make([][]byte, 0, len(proofs))
for _, proof := range proofs {
kzgProofs := make([][]byte, 0, len(cellsAndProofs.Proofs))
for _, proof := range cellsAndProofs.Proofs {
kzgProofs = append(kzgProofs, proof[:])
}
@@ -533,24 +433,24 @@ func TestComputeCellsAndProofsFromStructured(t *testing.T) {
require.NoError(t, err)
// Test ComputeCellsAndProofs
actualCellsPerBlob, actualProofsPerBlob, err := peerdas.ComputeCellsAndProofsFromStructured(blobsAndProofs)
actualCellsAndProofs, err := peerdas.ComputeCellsAndProofsFromStructured(blobsAndProofs)
require.NoError(t, err)
require.Equal(t, blobCount, len(actualCellsPerBlob))
require.Equal(t, blobCount, len(actualCellsAndProofs))
// Verify the results match expected
for i := range blobCount {
require.Equal(t, len(expectedCellsPerBlob[i]), len(actualCellsPerBlob[i]))
require.Equal(t, len(expectedProofsPerBlob[i]), len(actualProofsPerBlob[i]))
require.Equal(t, len(expectedProofsPerBlob[i]), cap(actualProofsPerBlob[i]))
require.Equal(t, len(expectedCellsAndProofs[i].Cells), len(actualCellsAndProofs[i].Cells))
require.Equal(t, len(expectedCellsAndProofs[i].Proofs), len(actualCellsAndProofs[i].Proofs))
require.Equal(t, len(expectedCellsAndProofs[i].Proofs), cap(actualCellsAndProofs[i].Proofs))
// Compare cells
for j, expectedCell := range expectedCellsPerBlob[i] {
require.Equal(t, expectedCell, actualCellsPerBlob[i][j])
for j, expectedCell := range expectedCellsAndProofs[i].Cells {
require.Equal(t, expectedCell, actualCellsAndProofs[i].Cells[j])
}
// Compare proofs
for j, expectedProof := range expectedProofsPerBlob[i] {
require.Equal(t, expectedProof, actualProofsPerBlob[i][j])
for j, expectedProof := range expectedCellsAndProofs[i].Proofs {
require.Equal(t, expectedProof, actualCellsAndProofs[i].Proofs[j])
}
}
})

View File

@@ -1,137 +0,0 @@
package peerdas
import (
"testing"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/testing/require"
"github.com/ethereum/go-ethereum/p2p/enode"
)
func TestSemiSupernodeCustody(t *testing.T) {
const numberOfColumns = fieldparams.NumberOfColumns
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.NumberOfCustodyGroups = 128
params.OverrideBeaconConfig(cfg)
// Create a test node ID
nodeID := enode.ID([32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32})
t.Run("semi-supernode custodies exactly 64 columns", func(t *testing.T) {
// Semi-supernode uses 64 custody groups (half of 128)
const semiSupernodeCustodyGroupCount = 64
// Get custody groups for semi-supernode
custodyGroups, err := CustodyGroups(nodeID, semiSupernodeCustodyGroupCount)
require.NoError(t, err)
require.Equal(t, semiSupernodeCustodyGroupCount, len(custodyGroups))
// Verify we get exactly 64 custody columns
custodyColumns, err := CustodyColumns(custodyGroups)
require.NoError(t, err)
require.Equal(t, semiSupernodeCustodyGroupCount, len(custodyColumns))
// Verify the columns are valid (within 0-127 range)
for columnIndex := range custodyColumns {
if columnIndex >= numberOfColumns {
t.Fatalf("Invalid column index %d, should be less than %d", columnIndex, numberOfColumns)
}
}
})
t.Run("64 columns is exactly the minimum for reconstruction", func(t *testing.T) {
minimumCount := MinimumColumnCountToReconstruct()
require.Equal(t, uint64(64), minimumCount)
})
t.Run("semi-supernode vs supernode custody", func(t *testing.T) {
// Semi-supernode (64 custody groups)
semiSupernodeGroups, err := CustodyGroups(nodeID, 64)
require.NoError(t, err)
semiSupernodeColumns, err := CustodyColumns(semiSupernodeGroups)
require.NoError(t, err)
// Supernode (128 custody groups = all groups)
supernodeGroups, err := CustodyGroups(nodeID, 128)
require.NoError(t, err)
supernodeColumns, err := CustodyColumns(supernodeGroups)
require.NoError(t, err)
// Verify semi-supernode has exactly half the columns of supernode
require.Equal(t, 64, len(semiSupernodeColumns))
require.Equal(t, 128, len(supernodeColumns))
require.Equal(t, len(supernodeColumns)/2, len(semiSupernodeColumns))
// Verify all semi-supernode columns are a subset of supernode columns
for columnIndex := range semiSupernodeColumns {
if !supernodeColumns[columnIndex] {
t.Fatalf("Semi-supernode column %d not found in supernode columns", columnIndex)
}
}
})
}
func TestMinimumCustodyGroupCountToReconstruct(t *testing.T) {
tests := []struct {
name string
numberOfGroups uint64
expectedResult uint64
}{
{
name: "Standard 1:1 ratio (128 columns, 128 groups)",
numberOfGroups: 128,
expectedResult: 64, // Need half of 128 groups
},
{
name: "2 columns per group (128 columns, 64 groups)",
numberOfGroups: 64,
expectedResult: 32, // Need 64 columns, which is 32 groups (64/2)
},
{
name: "4 columns per group (128 columns, 32 groups)",
numberOfGroups: 32,
expectedResult: 16, // Need 64 columns, which is 16 groups (64/4)
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.NumberOfCustodyGroups = tt.numberOfGroups
params.OverrideBeaconConfig(cfg)
result, err := MinimumCustodyGroupCountToReconstruct()
require.NoError(t, err)
require.Equal(t, tt.expectedResult, result)
})
}
}
func TestMinimumCustodyGroupCountToReconstruct_ErrorCases(t *testing.T) {
t.Run("Returns error when NumberOfCustodyGroups is zero", func(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.NumberOfCustodyGroups = 0
params.OverrideBeaconConfig(cfg)
_, err := MinimumCustodyGroupCountToReconstruct()
require.NotNil(t, err)
require.Equal(t, true, err.Error() == "NumberOfCustodyGroups cannot be zero")
})
t.Run("Returns error when NumberOfCustodyGroups exceeds NumberOfColumns", func(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.NumberOfCustodyGroups = 256
params.OverrideBeaconConfig(cfg)
_, err := MinimumCustodyGroupCountToReconstruct()
require.NotNil(t, err)
// Just check that we got an error about the configuration
require.Equal(t, true, len(err.Error()) > 0)
})
}

View File

@@ -93,22 +93,19 @@ func ValidatorsCustodyRequirement(state beaconState.ReadOnlyBeaconState, validat
return min(max(count, validatorCustodyRequirement), numberOfCustodyGroups), nil
}
// DataColumnSidecars given ConstructionPopulator and the cells/proofs associated with each blob in the
// DataColumnSidecars, given ConstructionPopulator and the cells/proofs associated with each blob in the
// block, assembles sidecars which can be distributed to peers.
// cellsPerBlob and proofsPerBlob are parallel slices where each index represents a blob sidecar.
// This is an adapted version of
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/validator.md#get_data_column_sidecars,
// which is designed to be used both when constructing sidecars from a block and from a sidecar, replacing
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/validator.md#get_data_column_sidecars_from_block and
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/validator.md#get_data_column_sidecars_from_column_sidecar
func DataColumnSidecars(cellsPerBlob [][]kzg.Cell, proofsPerBlob [][]kzg.Proof, src ConstructionPopulator) ([]blocks.RODataColumn, error) {
const numberOfColumns = uint64(fieldparams.NumberOfColumns)
if len(cellsPerBlob) == 0 {
func DataColumnSidecars(rows []kzg.CellsAndProofs, src ConstructionPopulator) ([]blocks.RODataColumn, error) {
if len(rows) == 0 {
return nil, nil
}
start := time.Now()
cells, proofs, err := rotateRowsToCols(cellsPerBlob, proofsPerBlob, numberOfColumns)
cells, proofs, err := rotateRowsToCols(rows, params.BeaconConfig().NumberOfColumns)
if err != nil {
return nil, errors.Wrap(err, "rotate cells and proofs")
}
@@ -117,8 +114,9 @@ func DataColumnSidecars(cellsPerBlob [][]kzg.Cell, proofsPerBlob [][]kzg.Proof,
return nil, errors.Wrap(err, "extract block info")
}
roSidecars := make([]blocks.RODataColumn, 0, numberOfColumns)
for idx := range numberOfColumns {
maxIdx := params.BeaconConfig().NumberOfColumns
roSidecars := make([]blocks.RODataColumn, 0, maxIdx)
for idx := range maxIdx {
sidecar := &ethpb.DataColumnSidecar{
Index: idx,
Column: cells[idx],
@@ -199,31 +197,26 @@ func (b *BlockReconstructionSource) extract() (*blockInfo, error) {
// rotateRowsToCols takes a 2D slice of cells and proofs, where the x is rows (blobs) and y is columns,
// and returns a 2D slice where x is columns and y is rows.
func rotateRowsToCols(cellsPerBlob [][]kzg.Cell, proofsPerBlob [][]kzg.Proof, numCols uint64) ([][][]byte, [][][]byte, error) {
if len(cellsPerBlob) == 0 {
func rotateRowsToCols(rows []kzg.CellsAndProofs, numCols uint64) ([][][]byte, [][][]byte, error) {
if len(rows) == 0 {
return nil, nil, nil
}
if len(cellsPerBlob) != len(proofsPerBlob) {
return nil, nil, errors.New("cells and proofs length mismatch")
}
cellCols := make([][][]byte, numCols)
proofCols := make([][][]byte, numCols)
for i := range cellsPerBlob {
cells := cellsPerBlob[i]
proofs := proofsPerBlob[i]
if uint64(len(cells)) != numCols {
for i, cp := range rows {
if uint64(len(cp.Cells)) != numCols {
return nil, nil, errors.Wrap(ErrNotEnoughDataColumnSidecars, "not enough cells")
}
if len(cells) != len(proofs) {
if len(cp.Cells) != len(cp.Proofs) {
return nil, nil, errors.Wrap(ErrNotEnoughDataColumnSidecars, "not enough proofs")
}
for j := range numCols {
for j := uint64(0); j < numCols; j++ {
if i == 0 {
cellCols[j] = make([][]byte, len(cellsPerBlob))
proofCols[j] = make([][]byte, len(cellsPerBlob))
cellCols[j] = make([][]byte, len(rows))
proofCols[j] = make([][]byte, len(rows))
}
cellCols[j][i] = cells[j][:]
proofCols[j][i] = proofs[j][:]
cellCols[j][i] = cp.Cells[j][:]
proofCols[j][i] = cp.Proofs[j][:]
}
}
return cellCols, proofCols, nil

View File

@@ -6,7 +6,7 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
@@ -59,8 +59,6 @@ func TestValidatorsCustodyRequirement(t *testing.T) {
}
func TestDataColumnSidecars(t *testing.T) {
const numberOfColumns = fieldparams.NumberOfColumns
t.Run("sizes mismatch", func(t *testing.T) {
// Create a protobuf signed beacon block.
signedBeaconBlockPb := util.NewBeaconBlockDeneb()
@@ -70,16 +68,16 @@ func TestDataColumnSidecars(t *testing.T) {
require.NoError(t, err)
// Create cells and proofs.
cellsPerBlob := [][]kzg.Cell{
make([]kzg.Cell, numberOfColumns),
}
proofsPerBlob := [][]kzg.Proof{
make([]kzg.Proof, numberOfColumns),
cellsAndProofs := []kzg.CellsAndProofs{
{
Cells: make([]kzg.Cell, params.BeaconConfig().NumberOfColumns),
Proofs: make([]kzg.Proof, params.BeaconConfig().NumberOfColumns),
},
}
rob, err := blocks.NewROBlock(signedBeaconBlock)
require.NoError(t, err)
_, err = peerdas.DataColumnSidecars(cellsPerBlob, proofsPerBlob, peerdas.PopulateFromBlock(rob))
_, err = peerdas.DataColumnSidecars(cellsAndProofs, peerdas.PopulateFromBlock(rob))
require.ErrorIs(t, err, peerdas.ErrSizeMismatch)
})
@@ -94,18 +92,18 @@ func TestDataColumnSidecars(t *testing.T) {
// Create cells and proofs with insufficient cells for the number of columns.
// This simulates a scenario where cellsAndProofs has fewer cells than expected columns.
cellsPerBlob := [][]kzg.Cell{
make([]kzg.Cell, 10), // Only 10 cells
}
proofsPerBlob := [][]kzg.Proof{
make([]kzg.Proof, 10), // Only 10 proofs
cellsAndProofs := []kzg.CellsAndProofs{
{
Cells: make([]kzg.Cell, 10), // Only 10 cells
Proofs: make([]kzg.Proof, 10), // Only 10 proofs
},
}
// This should fail because the function will try to access columns up to NumberOfColumns
// but we only have 10 cells/proofs.
rob, err := blocks.NewROBlock(signedBeaconBlock)
require.NoError(t, err)
_, err = peerdas.DataColumnSidecars(cellsPerBlob, proofsPerBlob, peerdas.PopulateFromBlock(rob))
_, err = peerdas.DataColumnSidecars(cellsAndProofs, peerdas.PopulateFromBlock(rob))
require.ErrorIs(t, err, peerdas.ErrNotEnoughDataColumnSidecars)
})
@@ -119,17 +117,18 @@ func TestDataColumnSidecars(t *testing.T) {
require.NoError(t, err)
// Create cells and proofs with sufficient cells but insufficient proofs.
cellsPerBlob := [][]kzg.Cell{
make([]kzg.Cell, numberOfColumns),
}
proofsPerBlob := [][]kzg.Proof{
make([]kzg.Proof, 5), // Only 5 proofs, less than columns
numberOfColumns := params.BeaconConfig().NumberOfColumns
cellsAndProofs := []kzg.CellsAndProofs{
{
Cells: make([]kzg.Cell, numberOfColumns),
Proofs: make([]kzg.Proof, 5), // Only 5 proofs, less than columns
},
}
// This should fail when trying to access proof beyond index 4.
rob, err := blocks.NewROBlock(signedBeaconBlock)
require.NoError(t, err)
_, err = peerdas.DataColumnSidecars(cellsPerBlob, proofsPerBlob, peerdas.PopulateFromBlock(rob))
_, err = peerdas.DataColumnSidecars(cellsAndProofs, peerdas.PopulateFromBlock(rob))
require.ErrorIs(t, err, peerdas.ErrNotEnoughDataColumnSidecars)
require.ErrorContains(t, "not enough proofs", err)
})
@@ -150,26 +149,29 @@ func TestDataColumnSidecars(t *testing.T) {
require.NoError(t, err)
// Create cells and proofs with correct dimensions.
cellsPerBlob := [][]kzg.Cell{
make([]kzg.Cell, numberOfColumns),
make([]kzg.Cell, numberOfColumns),
}
proofsPerBlob := [][]kzg.Proof{
make([]kzg.Proof, numberOfColumns),
make([]kzg.Proof, numberOfColumns),
numberOfColumns := params.BeaconConfig().NumberOfColumns
cellsAndProofs := []kzg.CellsAndProofs{
{
Cells: make([]kzg.Cell, numberOfColumns),
Proofs: make([]kzg.Proof, numberOfColumns),
},
{
Cells: make([]kzg.Cell, numberOfColumns),
Proofs: make([]kzg.Proof, numberOfColumns),
},
}
// Set distinct values in cells and proofs for testing
for i := range numberOfColumns {
cellsPerBlob[0][i][0] = byte(i)
proofsPerBlob[0][i][0] = byte(i)
cellsPerBlob[1][i][0] = byte(i + 128)
proofsPerBlob[1][i][0] = byte(i + 128)
cellsAndProofs[0].Cells[i][0] = byte(i)
cellsAndProofs[0].Proofs[i][0] = byte(i)
cellsAndProofs[1].Cells[i][0] = byte(i + 128)
cellsAndProofs[1].Proofs[i][0] = byte(i + 128)
}
rob, err := blocks.NewROBlock(signedBeaconBlock)
require.NoError(t, err)
sidecars, err := peerdas.DataColumnSidecars(cellsPerBlob, proofsPerBlob, peerdas.PopulateFromBlock(rob))
sidecars, err := peerdas.DataColumnSidecars(cellsAndProofs, peerdas.PopulateFromBlock(rob))
require.NoError(t, err)
require.NotNil(t, sidecars)
require.Equal(t, int(numberOfColumns), len(sidecars))
@@ -197,7 +199,6 @@ func TestDataColumnSidecars(t *testing.T) {
}
func TestReconstructionSource(t *testing.T) {
const numberOfColumns = fieldparams.NumberOfColumns
// Create a Fulu block with blob commitments.
signedBeaconBlockPb := util.NewBeaconBlockFulu()
commitment1 := make([]byte, 48)
@@ -213,26 +214,29 @@ func TestReconstructionSource(t *testing.T) {
require.NoError(t, err)
// Create cells and proofs with correct dimensions.
cellsPerBlob := [][]kzg.Cell{
make([]kzg.Cell, numberOfColumns),
make([]kzg.Cell, numberOfColumns),
}
proofsPerBlob := [][]kzg.Proof{
make([]kzg.Proof, numberOfColumns),
make([]kzg.Proof, numberOfColumns),
numberOfColumns := params.BeaconConfig().NumberOfColumns
cellsAndProofs := []kzg.CellsAndProofs{
{
Cells: make([]kzg.Cell, numberOfColumns),
Proofs: make([]kzg.Proof, numberOfColumns),
},
{
Cells: make([]kzg.Cell, numberOfColumns),
Proofs: make([]kzg.Proof, numberOfColumns),
},
}
// Set distinct values in cells and proofs for testing
for i := range numberOfColumns {
cellsPerBlob[0][i][0] = byte(i)
proofsPerBlob[0][i][0] = byte(i)
cellsPerBlob[1][i][0] = byte(i + 128)
proofsPerBlob[1][i][0] = byte(i + 128)
cellsAndProofs[0].Cells[i][0] = byte(i)
cellsAndProofs[0].Proofs[i][0] = byte(i)
cellsAndProofs[1].Cells[i][0] = byte(i + 128)
cellsAndProofs[1].Proofs[i][0] = byte(i + 128)
}
rob, err := blocks.NewROBlock(signedBeaconBlock)
require.NoError(t, err)
sidecars, err := peerdas.DataColumnSidecars(cellsPerBlob, proofsPerBlob, peerdas.PopulateFromBlock(rob))
sidecars, err := peerdas.DataColumnSidecars(cellsAndProofs, peerdas.PopulateFromBlock(rob))
require.NoError(t, err)
require.NotNil(t, sidecars)
require.Equal(t, int(numberOfColumns), len(sidecars))

View File

@@ -119,7 +119,7 @@ func TestFuzzverifySigningRoot_10000(_ *testing.T) {
var p []byte
var s []byte
var d []byte
for range 10000 {
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(st)
fuzzer.Fuzz(&pubkey)
fuzzer.Fuzz(&sig)

View File

@@ -28,7 +28,8 @@ func BenchmarkExecuteStateTransition_FullBlock(b *testing.B) {
block, err := benchmark.PreGenFullBlock()
require.NoError(b, err)
for i := 0; b.Loop(); i++ {
b.ResetTimer()
for i := 0; i < b.N; i++ {
wsb, err := blocks.NewSignedBeaconBlock(block)
require.NoError(b, err)
_, err = coreState.ExecuteStateTransition(b.Context(), cleanStates[i], wsb)
@@ -59,7 +60,8 @@ func BenchmarkExecuteStateTransition_WithCache(b *testing.B) {
_, err = coreState.ExecuteStateTransition(b.Context(), beaconState, wsb)
require.NoError(b, err, "Failed to process block, benchmarks will fail")
for i := 0; b.Loop(); i++ {
b.ResetTimer()
for i := 0; i < b.N; i++ {
wsb, err := blocks.NewSignedBeaconBlock(block)
require.NoError(b, err)
_, err = coreState.ExecuteStateTransition(b.Context(), cleanStates[i], wsb)
@@ -81,7 +83,8 @@ func BenchmarkProcessEpoch_2FullEpochs(b *testing.B) {
require.NoError(b, helpers.UpdateCommitteeCache(b.Context(), beaconState, time.CurrentEpoch(beaconState)))
require.NoError(b, beaconState.SetSlot(currentSlot))
for b.Loop() {
b.ResetTimer()
for i := 0; i < b.N; i++ {
// ProcessEpochPrecompute is the optimized version of process epoch. It's enabled by default
// at run time.
_, err := coreState.ProcessEpochPrecompute(b.Context(), beaconState.Copy())
@@ -93,7 +96,8 @@ func BenchmarkHashTreeRoot_FullState(b *testing.B) {
beaconState, err := benchmark.PreGenstateFullEpochs()
require.NoError(b, err)
for b.Loop() {
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := beaconState.HashTreeRoot(b.Context())
require.NoError(b, err)
}
@@ -109,7 +113,8 @@ func BenchmarkHashTreeRootState_FullState(b *testing.B) {
_, err = beaconState.HashTreeRoot(ctx)
require.NoError(b, err)
for b.Loop() {
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := beaconState.HashTreeRoot(ctx)
require.NoError(b, err)
}
@@ -123,7 +128,7 @@ func BenchmarkMarshalState_FullState(b *testing.B) {
b.Run("Proto_Marshal", func(b *testing.B) {
b.ResetTimer()
b.ReportAllocs()
for b.Loop() {
for i := 0; i < b.N; i++ {
_, err := proto.Marshal(natState)
require.NoError(b, err)
}
@@ -132,7 +137,7 @@ func BenchmarkMarshalState_FullState(b *testing.B) {
b.Run("Fast_SSZ_Marshal", func(b *testing.B) {
b.ResetTimer()
b.ReportAllocs()
for b.Loop() {
for i := 0; i < b.N; i++ {
_, err := natState.MarshalSSZ()
require.NoError(b, err)
}
@@ -152,7 +157,7 @@ func BenchmarkUnmarshalState_FullState(b *testing.B) {
b.Run("Proto_Unmarshal", func(b *testing.B) {
b.ResetTimer()
b.ReportAllocs()
for b.Loop() {
for i := 0; i < b.N; i++ {
require.NoError(b, proto.Unmarshal(protoObject, &ethpb.BeaconState{}))
}
})
@@ -160,7 +165,7 @@ func BenchmarkUnmarshalState_FullState(b *testing.B) {
b.Run("Fast_SSZ_Unmarshal", func(b *testing.B) {
b.ResetTimer()
b.ReportAllocs()
for b.Loop() {
for i := 0; i < b.N; i++ {
sszState := &ethpb.BeaconState{}
require.NoError(b, sszState.UnmarshalSSZ(sszObject))
}
@@ -169,7 +174,7 @@ func BenchmarkUnmarshalState_FullState(b *testing.B) {
func clonedStates(beaconState state.BeaconState) []state.BeaconState {
clonedStates := make([]state.BeaconState, runAmount)
for i := range runAmount {
for i := 0; i < runAmount; i++ {
clonedStates[i] = beaconState.Copy()
}
return clonedStates

View File

@@ -108,7 +108,7 @@ func TestSkipSlotCache_ConcurrentMixup(t *testing.T) {
// prepare copies for both states
var setups []state.BeaconState
for i := range uint64(300) {
for i := uint64(0); i < 300; i++ {
var st state.BeaconState
if i%2 == 0 {
st = s1

View File

@@ -95,7 +95,7 @@ func OptimizedGenesisBeaconStateBellatrix(genesisTime uint64, preState state.Bea
}
randaoMixes := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector)
for i := range randaoMixes {
for i := 0; i < len(randaoMixes); i++ {
h := make([]byte, 32)
copy(h, eth1Data.BlockHash)
randaoMixes[i] = h
@@ -104,17 +104,17 @@ func OptimizedGenesisBeaconStateBellatrix(genesisTime uint64, preState state.Bea
zeroHash := params.BeaconConfig().ZeroHash[:]
activeIndexRoots := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector)
for i := range activeIndexRoots {
for i := 0; i < len(activeIndexRoots); i++ {
activeIndexRoots[i] = zeroHash
}
blockRoots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot)
for i := range blockRoots {
for i := 0; i < len(blockRoots); i++ {
blockRoots[i] = zeroHash
}
stateRoots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot)
for i := range stateRoots {
for i := 0; i < len(stateRoots); i++ {
stateRoots[i] = zeroHash
}
@@ -131,7 +131,7 @@ func OptimizedGenesisBeaconStateBellatrix(genesisTime uint64, preState state.Bea
}
scoresMissing := len(preState.Validators()) - len(scores)
if scoresMissing > 0 {
for range scoresMissing {
for i := 0; i < scoresMissing; i++ {
scores = append(scores, 0)
}
}

View File

@@ -122,7 +122,7 @@ func OptimizedGenesisBeaconState(genesisTime uint64, preState state.BeaconState,
}
randaoMixes := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector)
for i := range randaoMixes {
for i := 0; i < len(randaoMixes); i++ {
h := make([]byte, 32)
copy(h, eth1Data.BlockHash)
randaoMixes[i] = h
@@ -131,17 +131,17 @@ func OptimizedGenesisBeaconState(genesisTime uint64, preState state.BeaconState,
zeroHash := params.BeaconConfig().ZeroHash[:]
activeIndexRoots := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector)
for i := range activeIndexRoots {
for i := 0; i < len(activeIndexRoots); i++ {
activeIndexRoots[i] = zeroHash
}
blockRoots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot)
for i := range blockRoots {
for i := 0; i < len(blockRoots); i++ {
blockRoots[i] = zeroHash
}
stateRoots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot)
for i := range stateRoots {
for i := 0; i < len(stateRoots); i++ {
stateRoots[i] = zeroHash
}

View File

@@ -17,7 +17,7 @@ func TestGenesisBeaconState_1000(t *testing.T) {
deposits := make([]*ethpb.Deposit, 300000)
var genesisTime uint64
eth1Data := &ethpb.Eth1Data{}
for range 1000 {
for i := 0; i < 1000; i++ {
fuzzer.Fuzz(&deposits)
fuzzer.Fuzz(&genesisTime)
fuzzer.Fuzz(eth1Data)
@@ -40,7 +40,7 @@ func TestOptimizedGenesisBeaconState_1000(t *testing.T) {
preState, err := state_native.InitializeFromProtoUnsafePhase0(&ethpb.BeaconState{})
require.NoError(t, err)
eth1Data := &ethpb.Eth1Data{}
for range 1000 {
for i := 0; i < 1000; i++ {
fuzzer.Fuzz(&genesisTime)
fuzzer.Fuzz(eth1Data)
fuzzer.Fuzz(preState)
@@ -60,7 +60,7 @@ func TestIsValidGenesisState_100000(_ *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
fuzzer.NilChance(0.1)
var chainStartDepositCount, currentTime uint64
for range 100000 {
for i := 0; i < 100000; i++ {
fuzzer.Fuzz(&chainStartDepositCount)
fuzzer.Fuzz(&currentTime)
IsValidGenesisState(chainStartDepositCount, currentTime)

View File

@@ -21,7 +21,7 @@ func TestFuzzExecuteStateTransition_1000(t *testing.T) {
sb := &ethpb.SignedBeaconBlock{}
fuzzer := fuzz.NewWithSeed(0)
fuzzer.NilChance(0.1)
for range 1000 {
for i := 0; i < 1000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(sb)
if sb.Block == nil || sb.Block.Body == nil {
@@ -45,7 +45,7 @@ func TestFuzzCalculateStateRoot_1000(t *testing.T) {
sb := &ethpb.SignedBeaconBlock{}
fuzzer := fuzz.NewWithSeed(0)
fuzzer.NilChance(0.1)
for range 1000 {
for i := 0; i < 1000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(sb)
if sb.Block == nil || sb.Block.Body == nil {
@@ -68,7 +68,7 @@ func TestFuzzProcessSlot_1000(t *testing.T) {
require.NoError(t, err)
fuzzer := fuzz.NewWithSeed(0)
fuzzer.NilChance(0.1)
for range 1000 {
for i := 0; i < 1000; i++ {
fuzzer.Fuzz(state)
s, err := ProcessSlot(ctx, state)
if err != nil && s != nil {
@@ -86,7 +86,7 @@ func TestFuzzProcessSlots_1000(t *testing.T) {
slot := primitives.Slot(0)
fuzzer := fuzz.NewWithSeed(0)
fuzzer.NilChance(0.1)
for range 1000 {
for i := 0; i < 1000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(&slot)
s, err := ProcessSlots(ctx, state, slot)
@@ -105,7 +105,7 @@ func TestFuzzprocessOperationsNoVerify_1000(t *testing.T) {
bb := &ethpb.BeaconBlock{}
fuzzer := fuzz.NewWithSeed(0)
fuzzer.NilChance(0.1)
for range 1000 {
for i := 0; i < 1000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(bb)
if bb.Body == nil {
@@ -128,7 +128,7 @@ func TestFuzzverifyOperationLengths_10000(t *testing.T) {
bb := &ethpb.BeaconBlock{}
fuzzer := fuzz.NewWithSeed(0)
fuzzer.NilChance(0.1)
for range 10000 {
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(bb)
if bb.Body == nil {
@@ -148,7 +148,7 @@ func TestFuzzCanProcessEpoch_10000(t *testing.T) {
require.NoError(t, err)
fuzzer := fuzz.NewWithSeed(0)
fuzzer.NilChance(0.1)
for range 10000 {
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
time.CanProcessEpoch(state)
}
@@ -162,7 +162,7 @@ func TestFuzzProcessEpochPrecompute_1000(t *testing.T) {
require.NoError(t, err)
fuzzer := fuzz.NewWithSeed(0)
fuzzer.NilChance(0.1)
for range 1000 {
for i := 0; i < 1000; i++ {
fuzzer.Fuzz(state)
s, err := ProcessEpochPrecompute(ctx, state)
if err != nil && s != nil {
@@ -180,7 +180,7 @@ func TestFuzzProcessBlockForStateRoot_1000(t *testing.T) {
sb := &ethpb.SignedBeaconBlock{}
fuzzer := fuzz.NewWithSeed(0)
fuzzer.NilChance(0.1)
for range 1000 {
for i := 0; i < 1000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(sb)
if sb.Block == nil || sb.Block.Body == nil || sb.Block.Body.Eth1Data == nil {

View File

@@ -754,7 +754,8 @@ func BenchmarkProcessSlots_Capella(b *testing.B) {
var err error
for b.Loop() {
b.ResetTimer()
for i := 0; i < b.N; i++ {
st, err = transition.ProcessSlots(b.Context(), st, st.Slot()+1)
if err != nil {
b.Fatalf("Failed to process slot %v", err)
@@ -767,7 +768,8 @@ func BenchmarkProcessSlots_Deneb(b *testing.B) {
var err error
for b.Loop() {
b.ResetTimer()
for i := 0; i < b.N; i++ {
st, err = transition.ProcessSlots(b.Context(), st, st.Slot()+1)
if err != nil {
b.Fatalf("Failed to process slot %v", err)
@@ -780,7 +782,8 @@ func BenchmarkProcessSlots_Electra(b *testing.B) {
var err error
for b.Loop() {
b.ResetTimer()
for i := 0; i < b.N; i++ {
st, err = transition.ProcessSlots(b.Context(), st, st.Slot()+1)
if err != nil {
b.Fatalf("Failed to process slot %v", err)

View File

@@ -307,7 +307,7 @@ func SlashValidator(
// ActivatedValidatorIndices determines the indices activated during the given epoch.
func ActivatedValidatorIndices(epoch primitives.Epoch, validators []*ethpb.Validator) []primitives.ValidatorIndex {
activations := make([]primitives.ValidatorIndex, 0)
for i := range validators {
for i := 0; i < len(validators); i++ {
val := validators[i]
if val.ActivationEpoch <= epoch && epoch < val.ExitEpoch {
activations = append(activations, primitives.ValidatorIndex(i))
@@ -319,7 +319,7 @@ func ActivatedValidatorIndices(epoch primitives.Epoch, validators []*ethpb.Valid
// SlashedValidatorIndices determines the indices slashed during the given epoch.
func SlashedValidatorIndices(epoch primitives.Epoch, validators []*ethpb.Validator) []primitives.ValidatorIndex {
slashed := make([]primitives.ValidatorIndex, 0)
for i := range validators {
for i := 0; i < len(validators); i++ {
val := validators[i]
maxWithdrawableEpoch := primitives.MaxEpoch(val.WithdrawableEpoch, epoch+params.BeaconConfig().EpochsPerSlashingsVector)
if val.WithdrawableEpoch == maxWithdrawableEpoch && val.Slashed {

View File

@@ -172,7 +172,7 @@ func TestSlashValidator_OK(t *testing.T) {
validatorCount := 100
registry := make([]*ethpb.Validator, 0, validatorCount)
balances := make([]uint64, 0, validatorCount)
for range validatorCount {
for i := 0; i < validatorCount; i++ {
registry = append(registry, &ethpb.Validator{
ActivationEpoch: 0,
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
@@ -226,7 +226,7 @@ func TestSlashValidator_Electra(t *testing.T) {
validatorCount := 100
registry := make([]*ethpb.Validator, 0, validatorCount)
balances := make([]uint64, 0, validatorCount)
for range validatorCount {
for i := 0; i < validatorCount; i++ {
registry = append(registry, &ethpb.Validator{
ActivationEpoch: 0,
ExitEpoch: params.BeaconConfig().FarFutureEpoch,

Some files were not shown because too many files have changed in this diff Show More