mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 07:58:22 -05:00
Merge branch 'develop' into backfill-data-columns
This commit is contained in:
19
BUILD.bazel
19
BUILD.bazel
@@ -197,6 +197,25 @@ nogo(
|
||||
"//tools/analyzers/logcapitalization:go_default_library",
|
||||
"//tools/analyzers/logruswitherror:go_default_library",
|
||||
"//tools/analyzers/maligned:go_default_library",
|
||||
"//tools/analyzers/modernize/any:go_default_library",
|
||||
"//tools/analyzers/modernize/appendclipped:go_default_library",
|
||||
"//tools/analyzers/modernize/bloop:go_default_library",
|
||||
"//tools/analyzers/modernize/fmtappendf:go_default_library",
|
||||
"//tools/analyzers/modernize/forvar:go_default_library",
|
||||
"//tools/analyzers/modernize/mapsloop:go_default_library",
|
||||
"//tools/analyzers/modernize/minmax:go_default_library",
|
||||
#"//tools/analyzers/modernize/newexpr:go_default_library", # Disabled until go 1.26.
|
||||
"//tools/analyzers/modernize/omitzero:go_default_library",
|
||||
"//tools/analyzers/modernize/rangeint:go_default_library",
|
||||
"//tools/analyzers/modernize/reflecttypefor:go_default_library",
|
||||
"//tools/analyzers/modernize/slicescontains:go_default_library",
|
||||
#"//tools/analyzers/modernize/slicesdelete:go_default_library", # Disabled, see https://go.dev/issue/73686
|
||||
"//tools/analyzers/modernize/slicessort:go_default_library",
|
||||
"//tools/analyzers/modernize/stringsbuilder:go_default_library",
|
||||
"//tools/analyzers/modernize/stringscutprefix:go_default_library",
|
||||
"//tools/analyzers/modernize/stringsseq:go_default_library",
|
||||
"//tools/analyzers/modernize/testingcontext:go_default_library",
|
||||
"//tools/analyzers/modernize/waitgroup:go_default_library",
|
||||
"//tools/analyzers/nop:go_default_library",
|
||||
"//tools/analyzers/nopanic:go_default_library",
|
||||
"//tools/analyzers/properpermissions:go_default_library",
|
||||
|
||||
83
CHANGELOG.md
83
CHANGELOG.md
@@ -4,6 +4,87 @@ All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on Keep a Changelog, and this project adheres to Semantic Versioning.
|
||||
|
||||
## [v7.0.0](https://github.com/prysmaticlabs/prysm/compare/v6.1.4...v7.0.0) - 2025-11-10
|
||||
|
||||
This is our initial mainnet release for the Ethereum mainnet Fulu fork on December 3rd, 2025. All operators MUST update to v7.0.0 or later release prior to the fulu fork epoch `411392`. See the [Ethereum Foundation blog post](https://blog.ethereum.org/2025/11/06/fusaka-mainnet-announcement) for more information on Fulu.
|
||||
|
||||
Other than the mainnet fulu fork schedule, there are a few callouts in this release:
|
||||
- `by-epoch` blob storage format is the default for new installations. Users that haven't migrated will see a warning to migrate to the new format. Existing deployments may set `--blob-storage-layout=by-epoch` to perform the migration.
|
||||
- Several deprecated flags have been deleted! Please review the "removed" section of this changelog carefully. If you are referencing a removed flag, Prysm will not start! All of these flags had no effect for at least one release.
|
||||
- Several deprecated API endpoints have been deleted. Please review the "removed" section of this changelog carefully.
|
||||
- Backfill is not supported in Fulu. This is expected to be fixed in the next release and should be delivered prior to the mainnet activation fork.
|
||||
- The builder default gas limit is raised from `45000000` (45 MGas) to `60000000` (60 MGas).
|
||||
- Several bug fixes and improvements.
|
||||
|
||||
### Added
|
||||
|
||||
- Allow custom headers in validator client HTTP requests. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15884)
|
||||
- Metric to track data columns recovered from execution layer. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15924)
|
||||
- Metrics: Add count of peers per direction and type (inbound/outbound), (TCP/QUIC). [[PR]](https://github.com/prysmaticlabs/prysm/pull/15922)
|
||||
- `p2p_subscribed_topic_peer_total`: Reset to avoid dangling values. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15922)
|
||||
- Add `p2p_minimum_peers_per_subnet` metric. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15922)
|
||||
- Added GeneralizedIndicesFromPath function to calculate the GIs for a given sszInfo object and a PathElement. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15873)
|
||||
- Add Gloas protobuf definitions with spec tests and SSZ serialization support. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15601)
|
||||
- Fulu fork epoch for mainnet configurations set for December 3, 2025, 09:49:11pm UTC. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15975)
|
||||
- Added BPO schedules for December 9, 2025, 02:21:11pm UTC and January 7, 2026, 01:01:11am UTC. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15975)
|
||||
|
||||
### Changed
|
||||
|
||||
- Updated consensus spec tests to v1.6.0-beta.1 with new hashes and URL template. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15918)
|
||||
- Use the `by-epoch' blob storage layout by default and log a warning to users who continue to use the flat layout, encouraging them to switch. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15904)
|
||||
- Update go-netroute to `v0.3.0`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15934)
|
||||
- Introduced Path type for SSZ-QL queries and updated PathElement (removed Length field, kept Index) enforcing that len queries are terminal (at most one per path). [[PR]](https://github.com/prysmaticlabs/prysm/pull/15935)
|
||||
- Changed length query syntax from `block.payload.len(transactions)` to `len(block.payload.transactions)`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15935)
|
||||
- Update `go-netroute` to `v0.4.0`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15949)
|
||||
- Updated consensus spec tests to v1.6.0-beta.2. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15960)
|
||||
- Updated go bitfield from prysmaticlabs to offchainlabs. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15968)
|
||||
- Bump builder default gas limit from `45000000` (45 MGas) to `60000000` (60 MGas). [[PR]](https://github.com/prysmaticlabs/prysm/pull/15979)
|
||||
- Use head state for block pubsub validation when possible. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15972)
|
||||
- updated consensus spec to 1.6.0 from 1.6.0-beta.2. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15975)
|
||||
- Upgrade Prysm v6 to v7. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15989)
|
||||
- Use head state readonly when possible to validate data column sidecars. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15977)
|
||||
|
||||
### Removed
|
||||
|
||||
- log mentioning removed flag `--show-deposit-data`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15926)
|
||||
- Remove Beacon API endpoints that were deprecated in Electra: `GET /eth/v1/beacon/deposit_snapshot`, `GET /eth/v1/beacon/blocks/{block_id}/attestations`, `GET /eth/v1/beacon/pool/attestations`, `POST /eth/v1/beacon/pool/attestations`, `GET /eth/v1/beacon/pool/attester_slashings`, `POST /eth/v1/beacon/pool/attester_slashings`, `GET /eth/v1/validator/aggregate_attestation`, `POST /eth/v1/validator/aggregate_and_proofs`, `POST /eth/v1/beacon/blocks`, `POST /eth/v1/beacon/blinded_blocks`, `GET /eth/v1/builder/states/{state_id}/expected_withdrawals`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15962)
|
||||
- Deprecated flag `--enable-optional-engine-methods` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
|
||||
- Deprecated flag `--disable-build-block-parallel` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
|
||||
- Deprecated flag `--disable-reorg-late-blocks` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
|
||||
- Deprecated flag `--disable-optional-engine-methods` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
|
||||
- Deprecated flag `--disable-aggregate-parallel` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
|
||||
- Deprecated flag `--enable-eip-4881` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
|
||||
- Deprecated flag `--disable-eip-4881` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
|
||||
- Deprecated flag `--enable-verbose-sig-verification` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
|
||||
- Deprecated flag `--enable-debug-rpc-endpoints` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
|
||||
- Deprecated flag `--beacon-rpc-gateway-provider` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
|
||||
- Deprecated flag `--disable-grpc-gateway` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
|
||||
- Deprecated flag `--enable-experimental-state` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
|
||||
- Deprecated flag `--enable-committee-aware-packing` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
|
||||
- Deprecated flag `--interop-genesis-time` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
|
||||
- Deprecated flag `--interop-num-validators` has been removed (from beacon-chain only; still available in validator client). [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
|
||||
- Deprecated flag `--enable-quic` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
|
||||
- Deprecated flag `--attest-timely` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
|
||||
- Deprecated flag `--disable-experimental-state` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
|
||||
- Deprecated flag `--p2p-metadata` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
|
||||
|
||||
### Fixed
|
||||
|
||||
- Remove `Reading static P2P private key from a file.` log if Fulu is enabled. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15913)
|
||||
- `blobSidecarByRootRPCHandler`: Do not serve a sidecar if the corresponding block is not available. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15933)
|
||||
- `dataColumnSidecarByRootRPCHandler`: Do not serve a sidecar if the corresponding block is not available. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15933)
|
||||
- Fix incorrect version used when sending attestation version in Fulu. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15950)
|
||||
- Changed the behavior of topic subscriptions such that only topics that require the active validator count will compute that value. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15955)
|
||||
- Added a Mutex to the computation of active validator count during topic subscription to avoid a race condition where multiple goroutines are computing the same work. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15955)
|
||||
- `RODataColumnsVerifier.ValidProposerSignature`: Ensure the expensive signature verification is only performed once for concurrent requests for the same signature data. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15954)
|
||||
- use filepath for path operations (clean, join, etc.) to ensure correct behavior on Windows. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15953)
|
||||
- Fix #15969: Handle addition overflow in `/eth/v1/beacon/rewards/attestations/{epoch}`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15970)
|
||||
- `SidecarProposerExpected`: Add the slot in the single flight key. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15976)
|
||||
- Ensures the rate limitation is respected for by root blob and data column sidecars requests. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15981)
|
||||
- Use head only if its compatible with target for attestation validation. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15965)
|
||||
- Backfill disabled if checkpoint sync origin is after fulu fork due to lack of DataColumnSidecar support in backfill. To track the availability of fulu-compatible backfill please watch https://github.com/OffchainLabs/prysm/issues/15982. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15987)
|
||||
- `SidecarProposerExpected`: Use the correct value of proposer index in the singleflight group. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15993)
|
||||
|
||||
## [v6.1.4](https://github.com/prysmaticlabs/prysm/compare/v6.1.3...v6.1.4) - 2025-10-24
|
||||
|
||||
This release includes a bug fix affecting block proposals in rare cases, along with an important update for Windows users running post-Fusaka fork.
|
||||
@@ -3820,4 +3901,4 @@ There are no security updates in this release.
|
||||
|
||||
# Older than v2.0.0
|
||||
|
||||
For changelog history for releases older than v2.0.0, please refer to https://github.com/prysmaticlabs/prysm/releases
|
||||
For changelog history for releases older than v2.0.0, please refer to https://github.com/prysmaticlabs/prysm/releases
|
||||
|
||||
20
WORKSPACE
20
WORKSPACE
@@ -205,6 +205,26 @@ prysm_image_deps()
|
||||
|
||||
load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies")
|
||||
|
||||
# Override golang.org/x/tools to use v0.38.0 instead of v0.30.0
|
||||
# This is necessary as this dependency is required by rules_go and they do not accept dependency
|
||||
# update PRs. Instead, they ask downstream projects to override the dependency. To generate the
|
||||
# patches or update this dependency again, check out the rules_go repo then run the releaser tool.
|
||||
# bazel run //go/tools/releaser -- upgrade-dep -mirror=false org_golang_x_tools
|
||||
# Copy the patches and http_archive updates from rules_go here.
|
||||
http_archive(
|
||||
name = "org_golang_x_tools",
|
||||
patch_args = ["-p1"],
|
||||
patches = [
|
||||
"//third_party:org_golang_x_tools-deletegopls.patch",
|
||||
"//third_party:org_golang_x_tools-gazelle.patch",
|
||||
],
|
||||
sha256 = "8509908cd7fc35aa09ff49d8494e4fd25bab9e6239fbf57e0d8344f6bec5802b",
|
||||
strip_prefix = "tools-0.38.0",
|
||||
urls = [
|
||||
"https://github.com/golang/tools/archive/refs/tags/v0.38.0.zip",
|
||||
],
|
||||
)
|
||||
|
||||
go_rules_dependencies()
|
||||
|
||||
go_register_toolchains(
|
||||
|
||||
@@ -56,7 +56,7 @@ func ParseAccept(header string) []mediaRange {
|
||||
}
|
||||
|
||||
var out []mediaRange
|
||||
for _, field := range strings.Split(header, ",") {
|
||||
for field := range strings.SplitSeq(header, ",") {
|
||||
if r, ok := parseMediaRange(field); ok {
|
||||
out = append(out, r)
|
||||
}
|
||||
|
||||
@@ -421,7 +421,7 @@ func (c *Client) RegisterValidator(ctx context.Context, svr []*ethpb.SignedValid
|
||||
|
||||
func jsonValidatorRegisterRequest(svr []*ethpb.SignedValidatorRegistrationV1) ([]byte, error) {
|
||||
vs := make([]*structs.SignedValidatorRegistration, len(svr))
|
||||
for i := 0; i < len(svr); i++ {
|
||||
for i := range svr {
|
||||
vs[i] = structs.SignedValidatorRegistrationFromConsensus(svr[i])
|
||||
}
|
||||
body, err := json.Marshal(vs)
|
||||
|
||||
@@ -121,7 +121,7 @@ func (s *Uint64String) UnmarshalText(t []byte) error {
|
||||
|
||||
// MarshalText returns a byte representation of the text from Uint64String.
|
||||
func (s Uint64String) MarshalText() ([]byte, error) {
|
||||
return []byte(fmt.Sprintf("%d", s)), nil
|
||||
return fmt.Appendf(nil, "%d", s), nil
|
||||
}
|
||||
|
||||
// VersionResponse is a JSON representation of a field in the builder API header response.
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
func LogRequests(
|
||||
ctx context.Context,
|
||||
method string, req,
|
||||
reply interface{},
|
||||
reply any,
|
||||
cc *grpc.ClientConn,
|
||||
invoker grpc.UnaryInvoker,
|
||||
opts ...grpc.CallOption,
|
||||
|
||||
@@ -14,5 +14,5 @@ type GetForkScheduleResponse struct {
|
||||
}
|
||||
|
||||
type GetSpecResponse struct {
|
||||
Data interface{} `json:"data"`
|
||||
Data any `json:"data"`
|
||||
}
|
||||
|
||||
@@ -93,9 +93,9 @@ func TestToggleMultipleTimes(t *testing.T) {
|
||||
|
||||
v := New()
|
||||
pre := !v.IsSet()
|
||||
for i := 0; i < 100; i++ {
|
||||
for i := range 100 {
|
||||
v.SetTo(false)
|
||||
for j := 0; j < i; j++ {
|
||||
for range i {
|
||||
pre = v.Toggle()
|
||||
}
|
||||
|
||||
@@ -149,7 +149,7 @@ func TestRace(t *testing.T) {
|
||||
|
||||
// Writer
|
||||
go func() {
|
||||
for i := 0; i < repeat; i++ {
|
||||
for range repeat {
|
||||
v.Set()
|
||||
wg.Done()
|
||||
}
|
||||
@@ -157,7 +157,7 @@ func TestRace(t *testing.T) {
|
||||
|
||||
// Reader
|
||||
go func() {
|
||||
for i := 0; i < repeat; i++ {
|
||||
for range repeat {
|
||||
v.IsSet()
|
||||
wg.Done()
|
||||
}
|
||||
@@ -165,7 +165,7 @@ func TestRace(t *testing.T) {
|
||||
|
||||
// Writer
|
||||
go func() {
|
||||
for i := 0; i < repeat; i++ {
|
||||
for range repeat {
|
||||
v.UnSet()
|
||||
wg.Done()
|
||||
}
|
||||
@@ -173,7 +173,7 @@ func TestRace(t *testing.T) {
|
||||
|
||||
// Reader And Writer
|
||||
go func() {
|
||||
for i := 0; i < repeat; i++ {
|
||||
for range repeat {
|
||||
v.Toggle()
|
||||
wg.Done()
|
||||
}
|
||||
@@ -198,8 +198,8 @@ func ExampleAtomicBool() {
|
||||
func BenchmarkMutexRead(b *testing.B) {
|
||||
var m sync.RWMutex
|
||||
var v bool
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
m.RLock()
|
||||
_ = v
|
||||
m.RUnlock()
|
||||
@@ -208,16 +208,16 @@ func BenchmarkMutexRead(b *testing.B) {
|
||||
|
||||
func BenchmarkAtomicValueRead(b *testing.B) {
|
||||
var v atomic.Value
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
_ = v.Load() != nil
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAtomicBoolRead(b *testing.B) {
|
||||
v := New()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
_ = v.IsSet()
|
||||
}
|
||||
}
|
||||
@@ -227,8 +227,8 @@ func BenchmarkAtomicBoolRead(b *testing.B) {
|
||||
func BenchmarkMutexWrite(b *testing.B) {
|
||||
var m sync.RWMutex
|
||||
var v bool
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
m.RLock()
|
||||
v = true
|
||||
m.RUnlock()
|
||||
@@ -239,16 +239,16 @@ func BenchmarkMutexWrite(b *testing.B) {
|
||||
|
||||
func BenchmarkAtomicValueWrite(b *testing.B) {
|
||||
var v atomic.Value
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
v.Store(true)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAtomicBoolWrite(b *testing.B) {
|
||||
v := New()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
v.Set()
|
||||
}
|
||||
}
|
||||
@@ -258,8 +258,8 @@ func BenchmarkAtomicBoolWrite(b *testing.B) {
|
||||
func BenchmarkMutexCAS(b *testing.B) {
|
||||
var m sync.RWMutex
|
||||
var v bool
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
m.Lock()
|
||||
if !v {
|
||||
v = true
|
||||
@@ -270,8 +270,8 @@ func BenchmarkMutexCAS(b *testing.B) {
|
||||
|
||||
func BenchmarkAtomicBoolCAS(b *testing.B) {
|
||||
v := New()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
v.SetToIf(false, true)
|
||||
}
|
||||
}
|
||||
@@ -281,8 +281,8 @@ func BenchmarkAtomicBoolCAS(b *testing.B) {
|
||||
func BenchmarkMutexToggle(b *testing.B) {
|
||||
var m sync.RWMutex
|
||||
var v bool
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
m.Lock()
|
||||
v = !v
|
||||
m.Unlock()
|
||||
@@ -291,8 +291,8 @@ func BenchmarkMutexToggle(b *testing.B) {
|
||||
|
||||
func BenchmarkAtomicBoolToggle(b *testing.B) {
|
||||
v := New()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
v.Toggle()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,7 +21,7 @@ const (
|
||||
|
||||
func init() {
|
||||
input = make([][]byte, benchmarkElements)
|
||||
for i := 0; i < benchmarkElements; i++ {
|
||||
for i := range benchmarkElements {
|
||||
input[i] = make([]byte, benchmarkElementSize)
|
||||
_, err := rand.Read(input[i])
|
||||
if err != nil {
|
||||
@@ -35,7 +35,7 @@ func hash(input [][]byte) [][]byte {
|
||||
output := make([][]byte, len(input))
|
||||
for i := range input {
|
||||
copy(output, input)
|
||||
for j := 0; j < benchmarkHashRuns; j++ {
|
||||
for range benchmarkHashRuns {
|
||||
hash := sha256.Sum256(output[i])
|
||||
output[i] = hash[:]
|
||||
}
|
||||
@@ -44,15 +44,15 @@ func hash(input [][]byte) [][]byte {
|
||||
}
|
||||
|
||||
func BenchmarkHash(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
hash(input)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkHashMP(b *testing.B) {
|
||||
output := make([][]byte, len(input))
|
||||
for i := 0; i < b.N; i++ {
|
||||
workerResults, err := async.Scatter(len(input), func(offset int, entries int, _ *sync.RWMutex) (interface{}, error) {
|
||||
for b.Loop() {
|
||||
workerResults, err := async.Scatter(len(input), func(offset int, entries int, _ *sync.RWMutex) (any, error) {
|
||||
return hash(input[offset : offset+entries]), nil
|
||||
})
|
||||
require.NoError(b, err)
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
|
||||
// Debounce events fired over a channel by a specified duration, ensuring no events
|
||||
// are handled until a certain interval of time has passed.
|
||||
func Debounce(ctx context.Context, interval time.Duration, eventsChan <-chan interface{}, handler func(interface{})) {
|
||||
func Debounce(ctx context.Context, interval time.Duration, eventsChan <-chan any, handler func(any)) {
|
||||
var timer *time.Timer
|
||||
defer func() {
|
||||
if timer != nil {
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
)
|
||||
|
||||
func TestDebounce_NoEvents(t *testing.T) {
|
||||
eventsChan := make(chan interface{}, 100)
|
||||
eventsChan := make(chan any, 100)
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
interval := time.Second
|
||||
timesHandled := int32(0)
|
||||
@@ -26,7 +26,7 @@ func TestDebounce_NoEvents(t *testing.T) {
|
||||
})
|
||||
}()
|
||||
go func() {
|
||||
async.Debounce(ctx, interval, eventsChan, func(event interface{}) {
|
||||
async.Debounce(ctx, interval, eventsChan, func(event any) {
|
||||
atomic.AddInt32(×Handled, 1)
|
||||
})
|
||||
wg.Done()
|
||||
@@ -38,7 +38,7 @@ func TestDebounce_NoEvents(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDebounce_CtxClosing(t *testing.T) {
|
||||
eventsChan := make(chan interface{}, 100)
|
||||
eventsChan := make(chan any, 100)
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
interval := time.Second
|
||||
timesHandled := int32(0)
|
||||
@@ -62,7 +62,7 @@ func TestDebounce_CtxClosing(t *testing.T) {
|
||||
})
|
||||
}()
|
||||
go func() {
|
||||
async.Debounce(ctx, interval, eventsChan, func(event interface{}) {
|
||||
async.Debounce(ctx, interval, eventsChan, func(event any) {
|
||||
atomic.AddInt32(×Handled, 1)
|
||||
})
|
||||
wg.Done()
|
||||
@@ -74,14 +74,14 @@ func TestDebounce_CtxClosing(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDebounce_SingleHandlerInvocation(t *testing.T) {
|
||||
eventsChan := make(chan interface{}, 100)
|
||||
eventsChan := make(chan any, 100)
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
interval := time.Second
|
||||
timesHandled := int32(0)
|
||||
go async.Debounce(ctx, interval, eventsChan, func(event interface{}) {
|
||||
go async.Debounce(ctx, interval, eventsChan, func(event any) {
|
||||
atomic.AddInt32(×Handled, 1)
|
||||
})
|
||||
for i := 0; i < 100; i++ {
|
||||
for range 100 {
|
||||
eventsChan <- struct{}{}
|
||||
}
|
||||
// We should expect 100 rapid fire changes to only have caused
|
||||
@@ -92,14 +92,14 @@ func TestDebounce_SingleHandlerInvocation(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDebounce_MultipleHandlerInvocation(t *testing.T) {
|
||||
eventsChan := make(chan interface{}, 100)
|
||||
eventsChan := make(chan any, 100)
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
interval := time.Second
|
||||
timesHandled := int32(0)
|
||||
go async.Debounce(ctx, interval, eventsChan, func(event interface{}) {
|
||||
go async.Debounce(ctx, interval, eventsChan, func(event any) {
|
||||
atomic.AddInt32(×Handled, 1)
|
||||
})
|
||||
for i := 0; i < 100; i++ {
|
||||
for range 100 {
|
||||
eventsChan <- struct{}{}
|
||||
}
|
||||
require.Equal(t, int32(0), atomic.LoadInt32(×Handled), "Events must prevent from handler execution")
|
||||
|
||||
@@ -93,9 +93,7 @@ func ExampleSubscriptionScope() {
|
||||
// Run a subscriber in the background.
|
||||
divsub := app.SubscribeResults('/', divs)
|
||||
mulsub := app.SubscribeResults('*', muls)
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
wg.Go(func() {
|
||||
defer fmt.Println("subscriber exited")
|
||||
defer divsub.Unsubscribe()
|
||||
defer mulsub.Unsubscribe()
|
||||
@@ -111,7 +109,7 @@ func ExampleSubscriptionScope() {
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
})
|
||||
|
||||
// Interact with the app.
|
||||
app.Calc('/', 22, 11)
|
||||
|
||||
@@ -26,7 +26,7 @@ func ExampleNewSubscription() {
|
||||
// Create a subscription that sends 10 integers on ch.
|
||||
ch := make(chan int)
|
||||
sub := event.NewSubscription(func(quit <-chan struct{}) error {
|
||||
for i := 0; i < 10; i++ {
|
||||
for i := range 10 {
|
||||
select {
|
||||
case ch <- i:
|
||||
case <-quit:
|
||||
|
||||
@@ -3,6 +3,6 @@ package event
|
||||
// SubscriberSender is an abstract representation of an *event.Feed
|
||||
// to use in describing types that accept or return an *event.Feed.
|
||||
type SubscriberSender interface {
|
||||
Subscribe(channel interface{}) Subscription
|
||||
Send(value interface{}) (nsent int)
|
||||
Subscribe(channel any) Subscription
|
||||
Send(value any) (nsent int)
|
||||
}
|
||||
|
||||
@@ -30,7 +30,7 @@ var errInts = errors.New("error in subscribeInts")
|
||||
|
||||
func subscribeInts(max, fail int, c chan<- int) Subscription {
|
||||
return NewSubscription(func(quit <-chan struct{}) error {
|
||||
for i := 0; i < max; i++ {
|
||||
for i := range max {
|
||||
if i >= fail {
|
||||
return errInts
|
||||
}
|
||||
@@ -50,7 +50,7 @@ func TestNewSubscriptionError(t *testing.T) {
|
||||
channel := make(chan int)
|
||||
sub := subscribeInts(10, 2, channel)
|
||||
loop:
|
||||
for want := 0; want < 10; want++ {
|
||||
for want := range 10 {
|
||||
select {
|
||||
case got := <-channel:
|
||||
require.Equal(t, want, got)
|
||||
|
||||
@@ -107,15 +107,13 @@ func TestLockUnlock(_ *testing.T) {
|
||||
|
||||
func TestLockUnlock_CleansUnused(t *testing.T) {
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
wg.Go(func() {
|
||||
lock := NewMultilock("dog", "cat", "owl")
|
||||
lock.Lock()
|
||||
assert.Equal(t, 3, len(locks.list))
|
||||
lock.Unlock()
|
||||
|
||||
wg.Done()
|
||||
}()
|
||||
})
|
||||
wg.Wait()
|
||||
// We expect that unlocking completely cleared the locks list
|
||||
// given all 3 lock keys were unused at time of unlock.
|
||||
|
||||
@@ -9,14 +9,14 @@ import (
|
||||
// WorkerResults are the results of a scatter worker.
|
||||
type WorkerResults struct {
|
||||
Offset int
|
||||
Extent interface{}
|
||||
Extent any
|
||||
}
|
||||
|
||||
// Scatter scatters a computation across multiple goroutines.
|
||||
// This breaks the task in to a number of chunks and executes those chunks in parallel with the function provided.
|
||||
// Results returned are collected and presented as a set of WorkerResults, which can be reassembled by the calling function.
|
||||
// Any error that occurs in the workers will be passed back to the calling function.
|
||||
func Scatter(inputLen int, sFunc func(int, int, *sync.RWMutex) (interface{}, error)) ([]*WorkerResults, error) {
|
||||
func Scatter(inputLen int, sFunc func(int, int, *sync.RWMutex) (any, error)) ([]*WorkerResults, error) {
|
||||
if inputLen <= 0 {
|
||||
return nil, errors.New("input length must be greater than 0")
|
||||
}
|
||||
|
||||
@@ -46,9 +46,9 @@ func TestDouble(t *testing.T) {
|
||||
inValues[i] = i
|
||||
}
|
||||
outValues := make([]int, test.inValues)
|
||||
workerResults, err := async.Scatter(len(inValues), func(offset int, entries int, _ *sync.RWMutex) (interface{}, error) {
|
||||
workerResults, err := async.Scatter(len(inValues), func(offset int, entries int, _ *sync.RWMutex) (any, error) {
|
||||
extent := make([]int, entries)
|
||||
for i := 0; i < entries; i++ {
|
||||
for i := range entries {
|
||||
extent[i] = inValues[offset+i] * 2
|
||||
}
|
||||
return extent, nil
|
||||
@@ -72,8 +72,8 @@ func TestDouble(t *testing.T) {
|
||||
func TestMutex(t *testing.T) {
|
||||
totalRuns := 1048576
|
||||
val := 0
|
||||
_, err := async.Scatter(totalRuns, func(offset int, entries int, mu *sync.RWMutex) (interface{}, error) {
|
||||
for i := 0; i < entries; i++ {
|
||||
_, err := async.Scatter(totalRuns, func(offset int, entries int, mu *sync.RWMutex) (any, error) {
|
||||
for range entries {
|
||||
mu.Lock()
|
||||
val++
|
||||
mu.Unlock()
|
||||
@@ -90,8 +90,8 @@ func TestMutex(t *testing.T) {
|
||||
func TestError(t *testing.T) {
|
||||
totalRuns := 1024
|
||||
val := 0
|
||||
_, err := async.Scatter(totalRuns, func(offset int, entries int, mu *sync.RWMutex) (interface{}, error) {
|
||||
for i := 0; i < entries; i++ {
|
||||
_, err := async.Scatter(totalRuns, func(offset int, entries int, mu *sync.RWMutex) (any, error) {
|
||||
for range entries {
|
||||
mu.Lock()
|
||||
val++
|
||||
if val == 1011 {
|
||||
|
||||
@@ -70,7 +70,7 @@ func TestVerifyBlobKZGProofBatch(t *testing.T) {
|
||||
commitments := make([][]byte, blobCount)
|
||||
proofs := make([][]byte, blobCount)
|
||||
|
||||
for i := 0; i < blobCount; i++ {
|
||||
for i := range blobCount {
|
||||
blob := random.GetRandBlob(int64(i))
|
||||
commitment, proof, err := GenerateCommitmentAndProof(blob)
|
||||
require.NoError(t, err)
|
||||
@@ -432,8 +432,8 @@ func TestVerifyCellKZGProofBatchFromBlobData(t *testing.T) {
|
||||
commitments[1] = make([]byte, 32) // Wrong size
|
||||
|
||||
// Add cell proofs for both blobs
|
||||
for i := 0; i < blobCount; i++ {
|
||||
for j := uint64(0); j < numberOfColumns; j++ {
|
||||
for range blobCount {
|
||||
for range numberOfColumns {
|
||||
allCellProofs = append(allCellProofs, make([]byte, 48))
|
||||
}
|
||||
}
|
||||
@@ -450,7 +450,7 @@ func TestVerifyCellKZGProofBatchFromBlobData(t *testing.T) {
|
||||
commitments := make([][]byte, blobCount)
|
||||
var allCellProofs [][]byte
|
||||
|
||||
for i := 0; i < blobCount; i++ {
|
||||
for i := range blobCount {
|
||||
randBlob := random.GetRandBlob(int64(i))
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
@@ -461,7 +461,7 @@ func TestVerifyCellKZGProofBatchFromBlobData(t *testing.T) {
|
||||
commitments[i] = commitment[:]
|
||||
|
||||
// Add cell proofs - make some invalid in the second blob
|
||||
for j := uint64(0); j < numberOfColumns; j++ {
|
||||
for j := range numberOfColumns {
|
||||
if i == 1 && j == 64 {
|
||||
// Invalid proof size in middle of second blob's proofs
|
||||
allCellProofs = append(allCellProofs, make([]byte, 20))
|
||||
|
||||
@@ -209,16 +209,14 @@ func TestService_GetAttPreState_Concurrency(t *testing.T) {
|
||||
var wg sync.WaitGroup
|
||||
errChan := make(chan error, 1000)
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for range 1000 {
|
||||
wg.Go(func() {
|
||||
cp1 := ðpb.Checkpoint{Epoch: 1, Root: ckRoot}
|
||||
_, err := service.getAttPreState(ctx, cp1)
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
}
|
||||
}()
|
||||
})
|
||||
}
|
||||
|
||||
go func() {
|
||||
|
||||
@@ -817,7 +817,7 @@ func (s *Service) areDataColumnsAvailable(
|
||||
}
|
||||
|
||||
case <-ctx.Done():
|
||||
var missingIndices interface{} = "all"
|
||||
var missingIndices any = "all"
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
missingIndicesCount := uint64(len(missing))
|
||||
|
||||
@@ -948,13 +948,6 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
attribute := s.getPayloadAttribute(ctx, headState, s.CurrentSlot()+1, headRoot[:])
|
||||
// return early if we are not proposing next slot
|
||||
if attribute.IsEmpty() {
|
||||
headBlock, err := s.headBlock()
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("head_root", headRoot).Error("Unable to retrieve head block to fire payload attributes event")
|
||||
}
|
||||
// notifyForkchoiceUpdate fires the payload attribute event. But in this case, we won't
|
||||
// call notifyForkchoiceUpdate, so the event is fired here.
|
||||
go s.firePayloadAttributesEvent(s.cfg.StateNotifier.StateFeed(), headBlock, headRoot, s.CurrentSlot()+1)
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -147,7 +147,7 @@ func TestStore_OnBlockBatch(t *testing.T) {
|
||||
bState := st.Copy()
|
||||
|
||||
var blks []consensusblocks.ROBlock
|
||||
for i := 0; i < 97; i++ {
|
||||
for i := range 97 {
|
||||
b, err := util.GenerateFullBlock(bState, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
|
||||
require.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
@@ -1323,7 +1323,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
logHook := logTest.NewGlobal()
|
||||
for i := 0; i < 10; i++ {
|
||||
for range 10 {
|
||||
fc := ðpb.Checkpoint{}
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 0, wsb1.Block().ParentRoot(), [32]byte{}, [32]byte{}, fc, fc)
|
||||
require.NoError(t, err)
|
||||
@@ -1949,7 +1949,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.Equal(t, true, optimistic)
|
||||
|
||||
// Check that the invalid blocks are not in database
|
||||
for i := 0; i < 19-13; i++ {
|
||||
for i := range 19 - 13 {
|
||||
require.Equal(t, false, service.cfg.BeaconDB.HasBlock(ctx, invalidRoots[i]))
|
||||
}
|
||||
|
||||
@@ -2805,6 +2805,10 @@ func TestProcessLightClientUpdate(t *testing.T) {
|
||||
require.NoError(t, s.cfg.BeaconDB.SaveHeadBlockRoot(ctx, [32]byte{1, 2}))
|
||||
|
||||
for _, testVersion := range version.All()[1:] {
|
||||
if testVersion == version.Gloas {
|
||||
// TODO(16027): Unskip light client tests for Gloas
|
||||
continue
|
||||
}
|
||||
t.Run(version.String(testVersion), func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t, testVersion)
|
||||
|
||||
@@ -2879,7 +2883,7 @@ func TestProcessLightClientUpdate(t *testing.T) {
|
||||
|
||||
// set a better sync aggregate
|
||||
scb := make([]byte, 64)
|
||||
for i := 0; i < 5; i++ {
|
||||
for i := range 5 {
|
||||
scb[i] = 0x01
|
||||
}
|
||||
oldUpdate.SetSyncAggregate(ðpb.SyncAggregate{
|
||||
|
||||
@@ -216,13 +216,11 @@ func TestService_ReceiveBlockUpdateHead(t *testing.T) {
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
wg.Go(func() {
|
||||
wsb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.ReceiveBlock(ctx, wsb, root, nil))
|
||||
wg.Done()
|
||||
}()
|
||||
})
|
||||
wg.Wait()
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
if recvd := len(s.cfg.StateNotifier.(*blockchainTesting.MockStateNotifier).ReceivedEvents()); recvd < 1 {
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
|
||||
statefeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
coreTime "github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db"
|
||||
@@ -470,30 +471,35 @@ func (s *Service) removeStartupState() {
|
||||
// UpdateCustodyInfoInDB updates the custody information in the database.
|
||||
// It returns the (potentially updated) custody group count and the earliest available slot.
|
||||
func (s *Service) updateCustodyInfoInDB(slot primitives.Slot) (primitives.Slot, uint64, error) {
|
||||
isSubscribedToAllDataSubnets := flags.Get().SubscribeAllDataSubnets
|
||||
isSupernode := flags.Get().Supernode
|
||||
isSemiSupernode := flags.Get().SemiSupernode
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
custodyRequirement := cfg.CustodyRequirement
|
||||
|
||||
// Check if the node was previously subscribed to all data subnets, and if so,
|
||||
// store the new status accordingly.
|
||||
wasSubscribedToAllDataSubnets, err := s.cfg.BeaconDB.UpdateSubscribedToAllDataSubnets(s.ctx, isSubscribedToAllDataSubnets)
|
||||
wasSupernode, err := s.cfg.BeaconDB.UpdateSubscribedToAllDataSubnets(s.ctx, isSupernode)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not update subscription status to all data subnets")
|
||||
return 0, 0, errors.Wrap(err, "update subscribed to all data subnets")
|
||||
}
|
||||
|
||||
// Warn the user if the node was previously subscribed to all data subnets and is not any more.
|
||||
if wasSubscribedToAllDataSubnets && !isSubscribedToAllDataSubnets {
|
||||
log.Warnf(
|
||||
"Because the flag `--%s` was previously used, the node will still subscribe to all data subnets.",
|
||||
flags.SubscribeAllDataSubnets.Name,
|
||||
)
|
||||
// Compute the target custody group count based on current flag configuration.
|
||||
targetCustodyGroupCount := custodyRequirement
|
||||
|
||||
// Supernode: custody all groups (either currently set or previously enabled)
|
||||
if isSupernode {
|
||||
targetCustodyGroupCount = cfg.NumberOfCustodyGroups
|
||||
}
|
||||
|
||||
// Compute the custody group count.
|
||||
custodyGroupCount := custodyRequirement
|
||||
if isSubscribedToAllDataSubnets {
|
||||
custodyGroupCount = cfg.NumberOfCustodyGroups
|
||||
// Semi-supernode: custody minimum needed for reconstruction, or custody requirement if higher
|
||||
if isSemiSupernode {
|
||||
semiSupernodeCustody, err := peerdas.MinimumCustodyGroupCountToReconstruct()
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrap(err, "minimum custody group count")
|
||||
}
|
||||
|
||||
targetCustodyGroupCount = max(custodyRequirement, semiSupernodeCustody)
|
||||
}
|
||||
|
||||
// Safely compute the fulu fork slot.
|
||||
@@ -510,12 +516,23 @@ func (s *Service) updateCustodyInfoInDB(slot primitives.Slot) (primitives.Slot,
|
||||
}
|
||||
}
|
||||
|
||||
earliestAvailableSlot, custodyGroupCount, err := s.cfg.BeaconDB.UpdateCustodyInfo(s.ctx, slot, custodyGroupCount)
|
||||
earliestAvailableSlot, actualCustodyGroupCount, err := s.cfg.BeaconDB.UpdateCustodyInfo(s.ctx, slot, targetCustodyGroupCount)
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrap(err, "update custody info")
|
||||
}
|
||||
|
||||
return earliestAvailableSlot, custodyGroupCount, nil
|
||||
if isSupernode {
|
||||
log.WithFields(logrus.Fields{
|
||||
"current": actualCustodyGroupCount,
|
||||
"target": cfg.NumberOfCustodyGroups,
|
||||
}).Info("Supernode mode enabled. Will custody all data columns going forward.")
|
||||
}
|
||||
|
||||
if wasSupernode && !isSupernode {
|
||||
log.Warningf("Because the `--%s` flag was previously used, the node will continue to act as a super node.", flags.Supernode.Name)
|
||||
}
|
||||
|
||||
return earliestAvailableSlot, actualCustodyGroupCount, nil
|
||||
}
|
||||
|
||||
func spawnCountdownIfPreGenesis(ctx context.Context, genesisTime time.Time, db db.HeadAccessDatabase) {
|
||||
|
||||
@@ -412,8 +412,7 @@ func BenchmarkHasBlockDB(b *testing.B) {
|
||||
r, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(b, err)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
require.Equal(b, true, s.cfg.BeaconDB.HasBlock(ctx, r), "Block is not in DB")
|
||||
}
|
||||
}
|
||||
@@ -432,8 +431,7 @@ func BenchmarkHasBlockForkChoiceStore_DoublyLinkedTree(b *testing.B) {
|
||||
require.NoError(b, err)
|
||||
require.NoError(b, s.cfg.ForkChoiceStore.InsertNode(ctx, beaconState, roblock))
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
require.Equal(b, true, s.cfg.ForkChoiceStore.HasNode(r), "Block is not in fork choice store")
|
||||
}
|
||||
}
|
||||
@@ -644,7 +642,7 @@ func TestUpdateCustodyInfoInDB(t *testing.T) {
|
||||
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SubscribeAllDataSubnets = true
|
||||
gFlags.Supernode = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
@@ -682,7 +680,7 @@ func TestUpdateCustodyInfoInDB(t *testing.T) {
|
||||
// ----------
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SubscribeAllDataSubnets = true
|
||||
gFlags.Supernode = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
@@ -697,4 +695,121 @@ func TestUpdateCustodyInfoInDB(t *testing.T) {
|
||||
require.Equal(t, slot, actualEas)
|
||||
require.Equal(t, numberOfCustodyGroups, actualCgc)
|
||||
})
|
||||
|
||||
t.Run("Supernode downgrade prevented", func(t *testing.T) {
|
||||
service, requirements := minimalTestService(t)
|
||||
err = requirements.db.SaveBlock(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Enable supernode
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.Supernode = true
|
||||
flags.Init(gFlags)
|
||||
|
||||
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
|
||||
actualEas, actualCgc, err := service.updateCustodyInfoInDB(slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
require.Equal(t, numberOfCustodyGroups, actualCgc)
|
||||
|
||||
// Try to downgrade by removing flag
|
||||
gFlags.Supernode = false
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
// Should still be supernode
|
||||
actualEas, actualCgc, err = service.updateCustodyInfoInDB(slot + 2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
require.Equal(t, numberOfCustodyGroups, actualCgc) // Still 64, not downgraded
|
||||
})
|
||||
|
||||
t.Run("Semi-supernode downgrade prevented", func(t *testing.T) {
|
||||
service, requirements := minimalTestService(t)
|
||||
err = requirements.db.SaveBlock(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Enable semi-supernode
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SemiSupernode = true
|
||||
flags.Init(gFlags)
|
||||
|
||||
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
|
||||
actualEas, actualCgc, err := service.updateCustodyInfoInDB(slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
semiSupernodeCustody := numberOfCustodyGroups / 2 // 64
|
||||
require.Equal(t, semiSupernodeCustody, actualCgc) // Semi-supernode custodies 64 groups
|
||||
|
||||
// Try to downgrade by removing flag
|
||||
gFlags.SemiSupernode = false
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
// UpdateCustodyInfo should prevent downgrade - custody count should remain at 64
|
||||
actualEas, actualCgc, err = service.updateCustodyInfoInDB(slot + 2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
require.Equal(t, semiSupernodeCustody, actualCgc) // Still 64 due to downgrade prevention by UpdateCustodyInfo
|
||||
})
|
||||
|
||||
t.Run("Semi-supernode to supernode upgrade allowed", func(t *testing.T) {
|
||||
service, requirements := minimalTestService(t)
|
||||
err = requirements.db.SaveBlock(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Start with semi-supernode
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SemiSupernode = true
|
||||
flags.Init(gFlags)
|
||||
|
||||
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
|
||||
actualEas, actualCgc, err := service.updateCustodyInfoInDB(slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
semiSupernodeCustody := numberOfCustodyGroups / 2 // 64
|
||||
require.Equal(t, semiSupernodeCustody, actualCgc) // Semi-supernode custodies 64 groups
|
||||
|
||||
// Upgrade to full supernode
|
||||
gFlags.SemiSupernode = false
|
||||
gFlags.Supernode = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
// Should upgrade to full supernode
|
||||
upgradeSlot := slot + 2
|
||||
actualEas, actualCgc, err = service.updateCustodyInfoInDB(upgradeSlot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, upgradeSlot, actualEas) // Earliest slot updates when upgrading
|
||||
require.Equal(t, numberOfCustodyGroups, actualCgc) // Upgraded to 128
|
||||
})
|
||||
|
||||
t.Run("Semi-supernode with high validator requirements uses higher custody", func(t *testing.T) {
|
||||
service, requirements := minimalTestService(t)
|
||||
err = requirements.db.SaveBlock(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Enable semi-supernode
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SemiSupernode = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
// Mock a high custody requirement (simulating many validators)
|
||||
// We need to override the custody requirement calculation
|
||||
// For this test, we'll verify the logic by checking if custodyRequirement > 64
|
||||
// Since custodyRequirement in minimalTestService is 4, we can't test the high case here
|
||||
// This would require a different test setup with actual validators
|
||||
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
|
||||
actualEas, actualCgc, err := service.updateCustodyInfoInDB(slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
semiSupernodeCustody := numberOfCustodyGroups / 2 // 64
|
||||
// With low validator requirements (4), should use semi-supernode minimum (64)
|
||||
require.Equal(t, semiSupernodeCustody, actualCgc)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -106,7 +106,7 @@ type EventFeedWrapper struct {
|
||||
subscribed chan struct{} // this channel is closed once a subscription is made
|
||||
}
|
||||
|
||||
func (w *EventFeedWrapper) Subscribe(channel interface{}) event.Subscription {
|
||||
func (w *EventFeedWrapper) Subscribe(channel any) event.Subscription {
|
||||
select {
|
||||
case <-w.subscribed:
|
||||
break // already closed
|
||||
@@ -116,7 +116,7 @@ func (w *EventFeedWrapper) Subscribe(channel interface{}) event.Subscription {
|
||||
return w.feed.Subscribe(channel)
|
||||
}
|
||||
|
||||
func (w *EventFeedWrapper) Send(value interface{}) int {
|
||||
func (w *EventFeedWrapper) Send(value any) int {
|
||||
return w.feed.Send(value)
|
||||
}
|
||||
|
||||
|
||||
@@ -166,7 +166,7 @@ func (s *Service) RegisterValidator(ctx context.Context, reg []*ethpb.SignedVali
|
||||
indexToRegistration := make(map[primitives.ValidatorIndex]*ethpb.ValidatorRegistrationV1)
|
||||
|
||||
valid := make([]*ethpb.SignedValidatorRegistrationV1, 0)
|
||||
for i := 0; i < len(reg); i++ {
|
||||
for i := range reg {
|
||||
r := reg[i]
|
||||
nx, exists := s.cfg.headFetcher.HeadPublicKeyToValidatorIndex(bytesutil.ToBytes48(r.Message.Pubkey))
|
||||
if !exists {
|
||||
|
||||
4
beacon-chain/cache/active_balance_test.go
vendored
4
beacon-chain/cache/active_balance_test.go
vendored
@@ -17,7 +17,7 @@ import (
|
||||
|
||||
func TestBalanceCache_AddGetBalance(t *testing.T) {
|
||||
blockRoots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot)
|
||||
for i := 0; i < len(blockRoots); i++ {
|
||||
for i := range blockRoots {
|
||||
b := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(b, uint64(i))
|
||||
blockRoots[i] = b
|
||||
@@ -61,7 +61,7 @@ func TestBalanceCache_AddGetBalance(t *testing.T) {
|
||||
|
||||
func TestBalanceCache_BalanceKey(t *testing.T) {
|
||||
blockRoots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot)
|
||||
for i := 0; i < len(blockRoots); i++ {
|
||||
for i := range blockRoots {
|
||||
b := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(b, uint64(i))
|
||||
blockRoots[i] = b
|
||||
|
||||
2
beacon-chain/cache/committee.go
vendored
2
beacon-chain/cache/committee.go
vendored
@@ -51,7 +51,7 @@ type CommitteeCache struct {
|
||||
}
|
||||
|
||||
// committeeKeyFn takes the seed as the key to retrieve shuffled indices of a committee in a given epoch.
|
||||
func committeeKeyFn(obj interface{}) (string, error) {
|
||||
func committeeKeyFn(obj any) (string, error) {
|
||||
info, ok := obj.(*Committees)
|
||||
if !ok {
|
||||
return "", ErrNotCommittee
|
||||
|
||||
6
beacon-chain/cache/committee_fuzz_test.go
vendored
6
beacon-chain/cache/committee_fuzz_test.go
vendored
@@ -14,7 +14,7 @@ func TestCommitteeKeyFuzz_OK(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
c := &Committees{}
|
||||
|
||||
for i := 0; i < 100000; i++ {
|
||||
for range 100000 {
|
||||
fuzzer.Fuzz(c)
|
||||
k, err := committeeKeyFn(c)
|
||||
require.NoError(t, err)
|
||||
@@ -27,7 +27,7 @@ func TestCommitteeCache_FuzzCommitteesByEpoch(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
c := &Committees{}
|
||||
|
||||
for i := 0; i < 100000; i++ {
|
||||
for range 100000 {
|
||||
fuzzer.Fuzz(c)
|
||||
require.NoError(t, cache.AddCommitteeShuffledList(t.Context(), c))
|
||||
_, err := cache.Committee(t.Context(), 0, c.Seed, 0)
|
||||
@@ -42,7 +42,7 @@ func TestCommitteeCache_FuzzActiveIndices(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
c := &Committees{}
|
||||
|
||||
for i := 0; i < 100000; i++ {
|
||||
for range 100000 {
|
||||
fuzzer.Fuzz(c)
|
||||
require.NoError(t, cache.AddCommitteeShuffledList(t.Context(), c))
|
||||
|
||||
|
||||
2
beacon-chain/cache/common.go
vendored
2
beacon-chain/cache/common.go
vendored
@@ -17,6 +17,6 @@ func trim(queue *cache.FIFO, maxSize uint64) {
|
||||
}
|
||||
|
||||
// popProcessNoopFunc is a no-op function that never returns an error.
|
||||
func popProcessNoopFunc(_ interface{}, _ bool) error {
|
||||
func popProcessNoopFunc(_ any, _ bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -769,7 +769,7 @@ func TestFinalizedDeposits_ReturnsTrieCorrectly(t *testing.T) {
|
||||
}
|
||||
|
||||
var ctrs []*ethpb.DepositContainer
|
||||
for i := 0; i < 2000; i++ {
|
||||
for i := range 2000 {
|
||||
ctrs = append(ctrs, generateCtr(uint64(10+(i/2)), int64(i)))
|
||||
}
|
||||
|
||||
@@ -948,9 +948,9 @@ func rootCreator(rn byte) []byte {
|
||||
func BenchmarkDepositTree_InsertNewImplementation(b *testing.B) {
|
||||
totalDeposits := 10000
|
||||
input := bytesutil.ToBytes32([]byte("foo"))
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
dt := NewDepositTree()
|
||||
for j := 0; j < totalDeposits; j++ {
|
||||
for range totalDeposits {
|
||||
err := dt.Insert(input[:], 0)
|
||||
require.NoError(b, err)
|
||||
}
|
||||
@@ -959,10 +959,10 @@ func BenchmarkDepositTree_InsertNewImplementation(b *testing.B) {
|
||||
func BenchmarkDepositTree_InsertOldImplementation(b *testing.B) {
|
||||
totalDeposits := 10000
|
||||
input := bytesutil.ToBytes32([]byte("foo"))
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
dt, err := trie.NewTrie(33)
|
||||
require.NoError(b, err)
|
||||
for j := 0; j < totalDeposits; j++ {
|
||||
for range totalDeposits {
|
||||
err := dt.Insert(input[:], 0)
|
||||
require.NoError(b, err)
|
||||
}
|
||||
@@ -980,8 +980,8 @@ func BenchmarkDepositTree_HashTreeRootNewImplementation(b *testing.B) {
|
||||
}
|
||||
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
_, err = tr.HashTreeRoot()
|
||||
require.NoError(b, err)
|
||||
}
|
||||
@@ -999,8 +999,8 @@ func BenchmarkDepositTree_HashTreeRootOldImplementation(b *testing.B) {
|
||||
}
|
||||
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
_, err = dt.HashTreeRoot()
|
||||
require.NoError(b, err)
|
||||
}
|
||||
|
||||
@@ -20,7 +20,7 @@ func (ds *DepositTreeSnapshot) CalculateRoot() ([32]byte, error) {
|
||||
size := ds.depositCount
|
||||
index := len(ds.finalized)
|
||||
root := trie.ZeroHashes[0]
|
||||
for i := 0; i < DepositContractDepth; i++ {
|
||||
for i := range DepositContractDepth {
|
||||
if (size & 1) == 1 {
|
||||
if index == 0 {
|
||||
break
|
||||
|
||||
6
beacon-chain/cache/skip_slot_cache_test.go
vendored
6
beacon-chain/cache/skip_slot_cache_test.go
vendored
@@ -47,15 +47,13 @@ func TestSkipSlotCache_DisabledAndEnabled(t *testing.T) {
|
||||
|
||||
c.Enable()
|
||||
wg := new(sync.WaitGroup)
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
wg.Go(func() {
|
||||
// Get call will only terminate when
|
||||
// it is not longer in progress.
|
||||
obj, err := c.Get(ctx, r)
|
||||
require.NoError(t, err)
|
||||
require.IsNil(t, obj)
|
||||
wg.Done()
|
||||
}()
|
||||
})
|
||||
|
||||
c.MarkNotInProgress(r)
|
||||
wg.Wait()
|
||||
|
||||
2
beacon-chain/cache/sync_committee.go
vendored
2
beacon-chain/cache/sync_committee.go
vendored
@@ -236,7 +236,7 @@ func (s *SyncCommitteeCache) UpdatePositionsInCommittee(syncCommitteeBoundaryRoo
|
||||
// Given the `syncCommitteeIndexPosition` object, this returns the key of the object.
|
||||
// The key is the `currentSyncCommitteeRoot` within the field.
|
||||
// Error gets returned if input does not comply with `currentSyncCommitteeRoot` object.
|
||||
func keyFn(obj interface{}) (string, error) {
|
||||
func keyFn(obj any) (string, error) {
|
||||
info, ok := obj.(*syncCommitteeIndexPosition)
|
||||
if !ok {
|
||||
return "", errNotSyncCommitteeIndexPosition
|
||||
|
||||
8
beacon-chain/cache/sync_subnet_ids_test.go
vendored
8
beacon-chain/cache/sync_subnet_ids_test.go
vendored
@@ -12,12 +12,12 @@ import (
|
||||
func TestSyncSubnetIDsCache_Roundtrip(t *testing.T) {
|
||||
c := newSyncSubnetIDs()
|
||||
|
||||
for i := 0; i < 20; i++ {
|
||||
for i := range 20 {
|
||||
pubkey := [fieldparams.BLSPubkeyLength]byte{byte(i)}
|
||||
c.AddSyncCommitteeSubnets(pubkey[:], 100, []uint64{uint64(i)}, 0)
|
||||
}
|
||||
|
||||
for i := uint64(0); i < 20; i++ {
|
||||
for i := range uint64(20) {
|
||||
pubkey := [fieldparams.BLSPubkeyLength]byte{byte(i)}
|
||||
|
||||
idxs, _, ok, _ := c.GetSyncCommitteeSubnets(pubkey[:], 100)
|
||||
@@ -34,7 +34,7 @@ func TestSyncSubnetIDsCache_Roundtrip(t *testing.T) {
|
||||
func TestSyncSubnetIDsCache_ValidateCurrentEpoch(t *testing.T) {
|
||||
c := newSyncSubnetIDs()
|
||||
|
||||
for i := 0; i < 20; i++ {
|
||||
for i := range 20 {
|
||||
pubkey := [fieldparams.BLSPubkeyLength]byte{byte(i)}
|
||||
c.AddSyncCommitteeSubnets(pubkey[:], 100, []uint64{uint64(i)}, 0)
|
||||
}
|
||||
@@ -42,7 +42,7 @@ func TestSyncSubnetIDsCache_ValidateCurrentEpoch(t *testing.T) {
|
||||
coms := c.GetAllSubnets(50)
|
||||
assert.Equal(t, 0, len(coms))
|
||||
|
||||
for i := uint64(0); i < 20; i++ {
|
||||
for i := range uint64(20) {
|
||||
pubkey := [fieldparams.BLSPubkeyLength]byte{byte(i)}
|
||||
|
||||
_, jEpoch, ok, _ := c.GetSyncCommitteeSubnets(pubkey[:], 100)
|
||||
|
||||
@@ -461,7 +461,7 @@ func TestFuzzProcessAttestationsNoVerify_10000(t *testing.T) {
|
||||
fuzzer := gofuzz.NewWithSeed(0)
|
||||
st := ðpb.BeaconStateAltair{}
|
||||
b := ðpb.SignedBeaconBlockAltair{Block: ðpb.BeaconBlockAltair{}}
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(st)
|
||||
fuzzer.Fuzz(b)
|
||||
if b.Block == nil {
|
||||
|
||||
@@ -240,7 +240,7 @@ func TestProcessSyncCommittee_processSyncAggregate(t *testing.T) {
|
||||
proposerIndex, err := helpers.BeaconProposerIndex(t.Context(), beaconState)
|
||||
require.NoError(t, err)
|
||||
|
||||
for i := 0; i < len(syncBits); i++ {
|
||||
for i := range syncBits {
|
||||
if syncBits.BitAt(uint64(i)) {
|
||||
pk := bytesutil.ToBytes48(committeeKeys[i])
|
||||
require.DeepEqual(t, true, votedMap[pk])
|
||||
|
||||
@@ -195,10 +195,7 @@ func AddValidatorToRegistry(beaconState state.BeaconState, pubKey []byte, withdr
|
||||
// withdrawable_epoch=FAR_FUTURE_EPOCH,
|
||||
// )
|
||||
func GetValidatorFromDeposit(pubKey []byte, withdrawalCredentials []byte, amount uint64) *ethpb.Validator {
|
||||
effectiveBalance := amount - (amount % params.BeaconConfig().EffectiveBalanceIncrement)
|
||||
if params.BeaconConfig().MaxEffectiveBalance < effectiveBalance {
|
||||
effectiveBalance = params.BeaconConfig().MaxEffectiveBalance
|
||||
}
|
||||
effectiveBalance := min(params.BeaconConfig().MaxEffectiveBalance, amount-(amount%params.BeaconConfig().EffectiveBalanceIncrement))
|
||||
|
||||
return ðpb.Validator{
|
||||
PublicKey: pubKey,
|
||||
|
||||
@@ -16,7 +16,7 @@ func TestFuzzProcessDeposits_10000(t *testing.T) {
|
||||
state := ðpb.BeaconStateAltair{}
|
||||
deposits := make([]*ethpb.Deposit, 100)
|
||||
ctx := t.Context()
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
for i := range deposits {
|
||||
fuzzer.Fuzz(deposits[i])
|
||||
@@ -37,7 +37,7 @@ func TestFuzzProcessPreGenesisDeposit_10000(t *testing.T) {
|
||||
deposit := ðpb.Deposit{}
|
||||
ctx := t.Context()
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(deposit)
|
||||
s, err := state_native.InitializeFromProtoUnsafeAltair(state)
|
||||
@@ -56,7 +56,7 @@ func TestFuzzProcessPreGenesisDeposit_Phase0_10000(t *testing.T) {
|
||||
deposit := ðpb.Deposit{}
|
||||
ctx := t.Context()
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(deposit)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
@@ -74,7 +74,7 @@ func TestFuzzProcessDeposit_Phase0_10000(t *testing.T) {
|
||||
state := ðpb.BeaconState{}
|
||||
deposit := ðpb.Deposit{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(deposit)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
@@ -92,7 +92,7 @@ func TestFuzzProcessDeposit_10000(t *testing.T) {
|
||||
state := ðpb.BeaconStateAltair{}
|
||||
deposit := ðpb.Deposit{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(deposit)
|
||||
s, err := state_native.InitializeFromProtoUnsafeAltair(state)
|
||||
|
||||
@@ -122,11 +122,8 @@ func ProcessInactivityScores(
|
||||
}
|
||||
|
||||
if !helpers.IsInInactivityLeak(prevEpoch, finalizedEpoch) {
|
||||
score := recoveryRate
|
||||
// Prevents underflow below 0.
|
||||
if score > v.InactivityScore {
|
||||
score = v.InactivityScore
|
||||
}
|
||||
score := min(recoveryRate, v.InactivityScore)
|
||||
v.InactivityScore -= score
|
||||
}
|
||||
inactivityScores[i] = v.InactivityScore
|
||||
@@ -242,7 +239,7 @@ func ProcessRewardsAndPenaltiesPrecompute(
|
||||
}
|
||||
|
||||
balances := beaconState.Balances()
|
||||
for i := 0; i < numOfVals; i++ {
|
||||
for i := range numOfVals {
|
||||
vals[i].BeforeEpochTransitionBalance = balances[i]
|
||||
|
||||
// Compute the post balance of the validator after accounting for the
|
||||
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
func TestSyncCommitteeIndices_CanGet(t *testing.T) {
|
||||
getState := func(t *testing.T, count uint64, vers int) state.BeaconState {
|
||||
validators := make([]*ethpb.Validator, count)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MinDepositAmount,
|
||||
@@ -113,7 +113,7 @@ func TestSyncCommitteeIndices_DifferentPeriods(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
getState := func(t *testing.T, count uint64) state.BeaconState {
|
||||
validators := make([]*ethpb.Validator, count)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MinDepositAmount,
|
||||
@@ -147,7 +147,7 @@ func TestSyncCommitteeIndices_DifferentPeriods(t *testing.T) {
|
||||
func TestSyncCommittee_CanGet(t *testing.T) {
|
||||
getState := func(t *testing.T, count uint64) state.BeaconState {
|
||||
validators := make([]*ethpb.Validator, count)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
blsKey, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -394,7 +394,7 @@ func Test_ValidateSyncMessageTime(t *testing.T) {
|
||||
|
||||
func getState(t *testing.T, count uint64) state.BeaconState {
|
||||
validators := make([]*ethpb.Validator, count)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
blsKey, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
validators[i] = ðpb.Validator{
|
||||
|
||||
@@ -33,7 +33,7 @@ func TestTranslateParticipation(t *testing.T) {
|
||||
r, err := helpers.BlockRootAtSlot(s, 0)
|
||||
require.NoError(t, err)
|
||||
var pendingAtts []*ethpb.PendingAttestation
|
||||
for i := 0; i < 3; i++ {
|
||||
for i := range 3 {
|
||||
pendingAtts = append(pendingAtts, ðpb.PendingAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
CommitteeIndex: primitives.CommitteeIndex(i),
|
||||
|
||||
@@ -257,7 +257,7 @@ func VerifyIndexedAttestation(ctx context.Context, beaconState state.ReadOnlyBea
|
||||
}
|
||||
indices := indexedAtt.GetAttestingIndices()
|
||||
var pubkeys []bls.PublicKey
|
||||
for i := 0; i < len(indices); i++ {
|
||||
for i := range indices {
|
||||
pubkeyAtIdx := beaconState.PubkeyAtIndex(primitives.ValidatorIndex(indices[i]))
|
||||
pk, err := bls.PublicKeyFromBytes(pubkeyAtIdx[:])
|
||||
if err != nil {
|
||||
|
||||
@@ -317,7 +317,7 @@ func TestVerifyAttestationNoVerifySignature_Electra(t *testing.T) {
|
||||
func TestConvertToIndexed_OK(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
validators := make([]*ethpb.Validator, 2*params.BeaconConfig().SlotsPerEpoch)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -373,7 +373,7 @@ func TestVerifyIndexedAttestation_OK(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, numOfValidators)
|
||||
_, keys, err := util.DeterministicDepositsAndKeys(numOfValidators)
|
||||
require.NoError(t, err)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
PublicKey: keys[i].PublicKey().Marshal(),
|
||||
@@ -481,7 +481,7 @@ func TestValidateIndexedAttestation_BadAttestationsSignatureSet(t *testing.T) {
|
||||
sig := keys[0].Sign([]byte{'t', 'e', 's', 't'})
|
||||
list := bitfield.Bitlist{0b11111}
|
||||
var atts []ethpb.Att
|
||||
for i := uint64(0); i < 1000; i++ {
|
||||
for range uint64(1000) {
|
||||
atts = append(atts, ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
CommitteeIndex: 1,
|
||||
@@ -498,7 +498,7 @@ func TestValidateIndexedAttestation_BadAttestationsSignatureSet(t *testing.T) {
|
||||
|
||||
atts = []ethpb.Att{}
|
||||
list = bitfield.Bitlist{0b10000}
|
||||
for i := uint64(0); i < 1000; i++ {
|
||||
for range uint64(1000) {
|
||||
atts = append(atts, ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
CommitteeIndex: 1,
|
||||
@@ -524,7 +524,7 @@ func TestVerifyAttestations_HandlesPlannedFork(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, numOfValidators)
|
||||
_, keys, err := util.DeterministicDepositsAndKeys(numOfValidators)
|
||||
require.NoError(t, err)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
PublicKey: keys[i].PublicKey().Marshal(),
|
||||
@@ -588,7 +588,7 @@ func TestRetrieveAttestationSignatureSet_VerifiesMultipleAttestations(t *testing
|
||||
validators := make([]*ethpb.Validator, numOfValidators)
|
||||
_, keys, err := util.DeterministicDepositsAndKeys(numOfValidators)
|
||||
require.NoError(t, err)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
PublicKey: keys[i].PublicKey().Marshal(),
|
||||
@@ -707,7 +707,7 @@ func TestRetrieveAttestationSignatureSet_AcrossFork(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, numOfValidators)
|
||||
_, keys, err := util.DeterministicDepositsAndKeys(numOfValidators)
|
||||
require.NoError(t, err)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
PublicKey: keys[i].PublicKey().Marshal(),
|
||||
|
||||
@@ -21,7 +21,7 @@ func TestFuzzProcessAttestationNoVerify_10000(t *testing.T) {
|
||||
state := ðpb.BeaconState{}
|
||||
att := ðpb.Attestation{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(att)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
@@ -37,7 +37,7 @@ func TestFuzzProcessBlockHeader_10000(t *testing.T) {
|
||||
state := ðpb.BeaconState{}
|
||||
block := ðpb.SignedBeaconBlock{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(block)
|
||||
|
||||
@@ -63,7 +63,7 @@ func TestFuzzverifyDepositDataSigningRoot_10000(_ *testing.T) {
|
||||
var p []byte
|
||||
var s []byte
|
||||
var d []byte
|
||||
for i := 0; i < 10000; i++ {
|
||||
for range 10000 {
|
||||
fuzzer.Fuzz(&ba)
|
||||
fuzzer.Fuzz(&pubkey)
|
||||
fuzzer.Fuzz(&sig)
|
||||
@@ -83,7 +83,7 @@ func TestFuzzProcessEth1DataInBlock_10000(t *testing.T) {
|
||||
e := ðpb.Eth1Data{}
|
||||
state, err := state_native.InitializeFromProtoUnsafePhase0(ðpb.BeaconState{})
|
||||
require.NoError(t, err)
|
||||
for i := 0; i < 10000; i++ {
|
||||
for range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(e)
|
||||
s, err := ProcessEth1DataInBlock(t.Context(), state, e)
|
||||
@@ -98,7 +98,7 @@ func TestFuzzareEth1DataEqual_10000(_ *testing.T) {
|
||||
eth1data := ðpb.Eth1Data{}
|
||||
eth1data2 := ðpb.Eth1Data{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
for range 10000 {
|
||||
fuzzer.Fuzz(eth1data)
|
||||
fuzzer.Fuzz(eth1data2)
|
||||
AreEth1DataEqual(eth1data, eth1data2)
|
||||
@@ -110,7 +110,7 @@ func TestFuzzEth1DataHasEnoughSupport_10000(t *testing.T) {
|
||||
fuzzer := gofuzz.NewWithSeed(0)
|
||||
eth1data := ðpb.Eth1Data{}
|
||||
var stateVotes []*ethpb.Eth1Data
|
||||
for i := 0; i < 100000; i++ {
|
||||
for i := range 100000 {
|
||||
fuzzer.Fuzz(eth1data)
|
||||
fuzzer.Fuzz(&stateVotes)
|
||||
s, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
@@ -129,7 +129,7 @@ func TestFuzzProcessBlockHeaderNoVerify_10000(t *testing.T) {
|
||||
state := ðpb.BeaconState{}
|
||||
block := ðpb.BeaconBlock{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(block)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
@@ -145,7 +145,7 @@ func TestFuzzProcessRandao_10000(t *testing.T) {
|
||||
state := ðpb.BeaconState{}
|
||||
b := ðpb.SignedBeaconBlock{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(b)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
@@ -168,7 +168,7 @@ func TestFuzzProcessRandaoNoVerify_10000(t *testing.T) {
|
||||
state := ðpb.BeaconState{}
|
||||
blockBody := ðpb.BeaconBlockBody{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(blockBody)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
@@ -186,7 +186,7 @@ func TestFuzzProcessProposerSlashings_10000(t *testing.T) {
|
||||
state := ðpb.BeaconState{}
|
||||
p := ðpb.ProposerSlashing{}
|
||||
ctx := t.Context()
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(p)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
@@ -203,7 +203,7 @@ func TestFuzzVerifyProposerSlashing_10000(t *testing.T) {
|
||||
fuzzer := gofuzz.NewWithSeed(0)
|
||||
state := ðpb.BeaconState{}
|
||||
proposerSlashing := ðpb.ProposerSlashing{}
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(proposerSlashing)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
@@ -219,7 +219,7 @@ func TestFuzzProcessAttesterSlashings_10000(t *testing.T) {
|
||||
state := ðpb.BeaconState{}
|
||||
a := ðpb.AttesterSlashing{}
|
||||
ctx := t.Context()
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(a)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
@@ -237,7 +237,7 @@ func TestFuzzVerifyAttesterSlashing_10000(t *testing.T) {
|
||||
state := ðpb.BeaconState{}
|
||||
attesterSlashing := ðpb.AttesterSlashing{}
|
||||
ctx := t.Context()
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(attesterSlashing)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
@@ -253,7 +253,7 @@ func TestFuzzIsSlashableAttestationData_10000(_ *testing.T) {
|
||||
attestationData := ðpb.AttestationData{}
|
||||
attestationData2 := ðpb.AttestationData{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
for range 10000 {
|
||||
fuzzer.Fuzz(attestationData)
|
||||
fuzzer.Fuzz(attestationData2)
|
||||
IsSlashableAttestationData(attestationData, attestationData2)
|
||||
@@ -264,7 +264,7 @@ func TestFuzzslashableAttesterIndices_10000(_ *testing.T) {
|
||||
fuzzer := gofuzz.NewWithSeed(0)
|
||||
attesterSlashing := ðpb.AttesterSlashing{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
for range 10000 {
|
||||
fuzzer.Fuzz(attesterSlashing)
|
||||
SlashableAttesterIndices(attesterSlashing)
|
||||
}
|
||||
@@ -275,7 +275,7 @@ func TestFuzzProcessAttestationsNoVerify_10000(t *testing.T) {
|
||||
state := ðpb.BeaconState{}
|
||||
b := ðpb.SignedBeaconBlock{}
|
||||
ctx := t.Context()
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(b)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
@@ -298,7 +298,7 @@ func TestFuzzVerifyIndexedAttestationn_10000(t *testing.T) {
|
||||
state := ðpb.BeaconState{}
|
||||
idxAttestation := ðpb.IndexedAttestation{}
|
||||
ctx := t.Context()
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(idxAttestation)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
@@ -313,7 +313,7 @@ func TestFuzzverifyDeposit_10000(t *testing.T) {
|
||||
fuzzer := gofuzz.NewWithSeed(0)
|
||||
state := ðpb.BeaconState{}
|
||||
deposit := ðpb.Deposit{}
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(deposit)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
@@ -329,7 +329,7 @@ func TestFuzzProcessVoluntaryExits_10000(t *testing.T) {
|
||||
state := ðpb.BeaconState{}
|
||||
e := ðpb.SignedVoluntaryExit{}
|
||||
ctx := t.Context()
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(e)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
@@ -346,7 +346,7 @@ func TestFuzzProcessVoluntaryExitsNoVerify_10000(t *testing.T) {
|
||||
fuzzer := gofuzz.NewWithSeed(0)
|
||||
state := ðpb.BeaconState{}
|
||||
e := ðpb.SignedVoluntaryExit{}
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(e)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
@@ -366,7 +366,7 @@ func TestFuzzVerifyExit_10000(t *testing.T) {
|
||||
fork := ðpb.Fork{}
|
||||
var slot primitives.Slot
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(ve)
|
||||
fuzzer.Fuzz(rawVal)
|
||||
fuzzer.Fuzz(fork)
|
||||
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
|
||||
func FakeDeposits(n uint64) []*ethpb.Eth1Data {
|
||||
deposits := make([]*ethpb.Eth1Data, n)
|
||||
for i := uint64(0); i < n; i++ {
|
||||
for i := range n {
|
||||
deposits[i] = ðpb.Eth1Data{
|
||||
DepositCount: 1,
|
||||
DepositRoot: bytesutil.PadTo([]byte("root"), 32),
|
||||
@@ -175,7 +175,7 @@ func TestProcessEth1Data_SetsCorrectly(t *testing.T) {
|
||||
}
|
||||
|
||||
period := uint64(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().EpochsPerEth1VotingPeriod)))
|
||||
for i := uint64(0); i < period; i++ {
|
||||
for range period {
|
||||
processedState, err := blocks.ProcessEth1DataInBlock(t.Context(), beaconState, b.Block.Body.Eth1Data)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, processedState.Version() == version.Phase0)
|
||||
|
||||
@@ -27,7 +27,7 @@ func init() {
|
||||
|
||||
func TestProcessBlockHeader_ImproperBlockSlot(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: make([]byte, 32),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
@@ -104,7 +104,7 @@ func TestProcessBlockHeader_WrongProposerSig(t *testing.T) {
|
||||
|
||||
func TestProcessBlockHeader_DifferentSlots(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: make([]byte, 32),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
@@ -148,7 +148,7 @@ func TestProcessBlockHeader_DifferentSlots(t *testing.T) {
|
||||
|
||||
func TestProcessBlockHeader_PreviousBlockRootNotSignedRoot(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: make([]byte, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
@@ -189,7 +189,7 @@ func TestProcessBlockHeader_PreviousBlockRootNotSignedRoot(t *testing.T) {
|
||||
|
||||
func TestProcessBlockHeader_SlashedProposer(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: make([]byte, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
@@ -233,7 +233,7 @@ func TestProcessBlockHeader_SlashedProposer(t *testing.T) {
|
||||
|
||||
func TestProcessBlockHeader_OK(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: make([]byte, 32),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
@@ -293,7 +293,7 @@ func TestProcessBlockHeader_OK(t *testing.T) {
|
||||
|
||||
func TestBlockSignatureSet_OK(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: make([]byte, 32),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
|
||||
@@ -90,6 +90,9 @@ func IsExecutionEnabled(st state.ReadOnlyBeaconState, body interfaces.ReadOnlyBe
|
||||
if st == nil || body == nil {
|
||||
return false, errors.New("nil state or block body")
|
||||
}
|
||||
if st.Version() >= version.Capella {
|
||||
return true, nil
|
||||
}
|
||||
if IsPreBellatrixVersion(st.Version()) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@@ -260,11 +260,12 @@ func Test_IsExecutionBlockCapella(t *testing.T) {
|
||||
|
||||
func Test_IsExecutionEnabled(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
payload *enginev1.ExecutionPayload
|
||||
header interfaces.ExecutionData
|
||||
useAltairSt bool
|
||||
want bool
|
||||
name string
|
||||
payload *enginev1.ExecutionPayload
|
||||
header interfaces.ExecutionData
|
||||
useAltairSt bool
|
||||
useCapellaSt bool
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "use older than bellatrix state",
|
||||
@@ -331,6 +332,17 @@ func Test_IsExecutionEnabled(t *testing.T) {
|
||||
}(),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "capella state always enabled",
|
||||
payload: emptyPayload(),
|
||||
header: func() interfaces.ExecutionData {
|
||||
h, err := emptyPayloadHeader()
|
||||
require.NoError(t, err)
|
||||
return h
|
||||
}(),
|
||||
useCapellaSt: true,
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
@@ -342,6 +354,8 @@ func Test_IsExecutionEnabled(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
if tt.useAltairSt {
|
||||
st, _ = util.DeterministicGenesisStateAltair(t, 1)
|
||||
} else if tt.useCapellaSt {
|
||||
st, _ = util.DeterministicGenesisStateCapella(t, 1)
|
||||
}
|
||||
got, err := blocks.IsExecutionEnabled(st, body)
|
||||
require.NoError(t, err)
|
||||
@@ -851,8 +865,7 @@ func BenchmarkBellatrixComplete(b *testing.B) {
|
||||
require.NoError(b, err)
|
||||
require.NoError(b, st.SetLatestExecutionPayloadHeader(h))
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
_, err := blocks.IsMergeTransitionComplete(st)
|
||||
require.NoError(b, err)
|
||||
}
|
||||
|
||||
@@ -28,7 +28,7 @@ func createValidatorsWithTotalActiveBalance(totalBal primitives.Gwei) []*eth.Val
|
||||
ActivationEpoch: primitives.Epoch(0),
|
||||
EffectiveBalance: params.BeaconConfig().MinActivationBalance,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
PublicKey: []byte(fmt.Sprintf("val_%d", i)),
|
||||
PublicKey: fmt.Appendf(nil, "val_%d", i),
|
||||
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
WithdrawalCredentials: wd,
|
||||
}
|
||||
|
||||
@@ -16,7 +16,7 @@ func TestFuzzProcessDeposits_10000(t *testing.T) {
|
||||
state := ðpb.BeaconStateElectra{}
|
||||
deposits := make([]*ethpb.Deposit, 100)
|
||||
ctx := t.Context()
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
for i := range deposits {
|
||||
fuzzer.Fuzz(deposits[i])
|
||||
@@ -36,7 +36,7 @@ func TestFuzzProcessDeposit_10000(t *testing.T) {
|
||||
state := ðpb.BeaconStateElectra{}
|
||||
deposit := ðpb.Deposit{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(deposit)
|
||||
s, err := state_native.InitializeFromProtoUnsafeElectra(state)
|
||||
|
||||
@@ -95,7 +95,7 @@ func TestProcessPendingDeposits(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.Gwei(100), res)
|
||||
// Validators 0..9 should have their balance increased
|
||||
for i := primitives.ValidatorIndex(0); i < 10; i++ {
|
||||
for i := range primitives.ValidatorIndex(10) {
|
||||
b, err := st.BalanceAtIndex(i)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance+uint64(amountAvailForProcessing)/10, b)
|
||||
@@ -122,7 +122,7 @@ func TestProcessPendingDeposits(t *testing.T) {
|
||||
check: func(t *testing.T, st state.BeaconState) {
|
||||
amountAvailForProcessing := helpers.ActivationExitChurnLimit(1_000 * 1e9)
|
||||
// Validators 0..9 should have their balance increased
|
||||
for i := primitives.ValidatorIndex(0); i < 2; i++ {
|
||||
for i := range primitives.ValidatorIndex(2) {
|
||||
b, err := st.BalanceAtIndex(i)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance+uint64(amountAvailForProcessing), b)
|
||||
@@ -149,7 +149,7 @@ func TestProcessPendingDeposits(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.Gwei(0), res)
|
||||
// Validators 0..4 should have their balance increased
|
||||
for i := primitives.ValidatorIndex(0); i < 4; i++ {
|
||||
for i := range primitives.ValidatorIndex(4) {
|
||||
b, err := st.BalanceAtIndex(i)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance+uint64(amountAvailForProcessing)/5, b)
|
||||
@@ -528,7 +528,7 @@ func stateWithActiveBalanceETH(t *testing.T, balETH uint64) state.BeaconState {
|
||||
|
||||
vals := make([]*eth.Validator, numVals)
|
||||
bals := make([]uint64, numVals)
|
||||
for i := uint64(0); i < numVals; i++ {
|
||||
for i := range numVals {
|
||||
wc := make([]byte, 32)
|
||||
wc[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
wc[31] = byte(i)
|
||||
|
||||
@@ -56,7 +56,7 @@ func TestProcessRegistryUpdates(t *testing.T) {
|
||||
Slot: 5 * params.BeaconConfig().SlotsPerEpoch,
|
||||
FinalizedCheckpoint: ð.Checkpoint{Epoch: finalizedEpoch, Root: make([]byte, fieldparams.RootLength)},
|
||||
}
|
||||
for i := uint64(0); i < 10; i++ {
|
||||
for range uint64(10) {
|
||||
base.Validators = append(base.Validators, ð.Validator{
|
||||
ActivationEligibilityEpoch: finalizedEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
@@ -82,7 +82,7 @@ func TestProcessRegistryUpdates(t *testing.T) {
|
||||
Slot: 5 * params.BeaconConfig().SlotsPerEpoch,
|
||||
FinalizedCheckpoint: ð.Checkpoint{Epoch: finalizedEpoch, Root: make([]byte, fieldparams.RootLength)},
|
||||
}
|
||||
for i := uint64(0); i < 10; i++ {
|
||||
for range uint64(10) {
|
||||
base.Validators = append(base.Validators, ð.Validator{
|
||||
EffectiveBalance: params.BeaconConfig().EjectionBalance - 1,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
@@ -108,7 +108,7 @@ func TestProcessRegistryUpdates(t *testing.T) {
|
||||
Slot: 5 * params.BeaconConfig().SlotsPerEpoch,
|
||||
FinalizedCheckpoint: ð.Checkpoint{Epoch: finalizedEpoch, Root: make([]byte, fieldparams.RootLength)},
|
||||
}
|
||||
for i := uint64(0); i < 10; i++ {
|
||||
for range uint64(10) {
|
||||
base.Validators = append(base.Validators, ð.Validator{
|
||||
EffectiveBalance: params.BeaconConfig().EjectionBalance - 1,
|
||||
ExitEpoch: 10,
|
||||
@@ -157,7 +157,7 @@ func Benchmark_ProcessRegistryUpdates_MassEjection(b *testing.B) {
|
||||
st, err := util.NewBeaconStateElectra()
|
||||
require.NoError(b, err)
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
b.StopTimer()
|
||||
if err := st.SetValidators(genValidators(100000)); err != nil {
|
||||
panic(err)
|
||||
|
||||
@@ -329,10 +329,7 @@ func ProcessEffectiveBalanceUpdates(st state.BeaconState) (state.BeaconState, er
|
||||
balance := bals[idx]
|
||||
|
||||
if balance+downwardThreshold < val.EffectiveBalance() || val.EffectiveBalance()+upwardThreshold < balance {
|
||||
effectiveBal := maxEffBalance
|
||||
if effectiveBal > balance-balance%effBalanceInc {
|
||||
effectiveBal = balance - balance%effBalanceInc
|
||||
}
|
||||
effectiveBal := min(maxEffBalance, balance-balance%effBalanceInc)
|
||||
if effectiveBal != val.EffectiveBalance() {
|
||||
newVal = val.Copy()
|
||||
newVal.EffectiveBalance = effectiveBal
|
||||
|
||||
@@ -14,7 +14,7 @@ func TestFuzzFinalUpdates_10000(t *testing.T) {
|
||||
fuzzer := gofuzz.NewWithSeed(0)
|
||||
base := ðpb.BeaconState{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(base)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(base)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -218,7 +218,7 @@ func TestProcessRegistryUpdates_EligibleToActivate_Cancun(t *testing.T) {
|
||||
cfg.ChurnLimitQuotient = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
for i := uint64(0); i < 10; i++ {
|
||||
for range uint64(10) {
|
||||
base.Validators = append(base.Validators, ðpb.Validator{
|
||||
ActivationEligibilityEpoch: finalizedEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
@@ -314,28 +314,28 @@ func TestProcessRegistryUpdates_CanExits(t *testing.T) {
|
||||
|
||||
func buildState(t testing.TB, slot primitives.Slot, validatorCount uint64) state.BeaconState {
|
||||
validators := make([]*ethpb.Validator, validatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
}
|
||||
}
|
||||
validatorBalances := make([]uint64, len(validators))
|
||||
for i := 0; i < len(validatorBalances); i++ {
|
||||
for i := range validatorBalances {
|
||||
validatorBalances[i] = params.BeaconConfig().MaxEffectiveBalance
|
||||
}
|
||||
latestActiveIndexRoots := make(
|
||||
[][]byte,
|
||||
params.BeaconConfig().EpochsPerHistoricalVector,
|
||||
)
|
||||
for i := 0; i < len(latestActiveIndexRoots); i++ {
|
||||
for i := range latestActiveIndexRoots {
|
||||
latestActiveIndexRoots[i] = params.BeaconConfig().ZeroHash[:]
|
||||
}
|
||||
latestRandaoMixes := make(
|
||||
[][]byte,
|
||||
params.BeaconConfig().EpochsPerHistoricalVector,
|
||||
)
|
||||
for i := 0; i < len(latestRandaoMixes); i++ {
|
||||
for i := range latestRandaoMixes {
|
||||
latestRandaoMixes[i] = params.BeaconConfig().ZeroHash[:]
|
||||
}
|
||||
s, err := util.NewBeaconState()
|
||||
|
||||
@@ -19,7 +19,7 @@ func TestProcessJustificationAndFinalizationPreCompute_ConsecutiveEpochs(t *test
|
||||
e := params.BeaconConfig().FarFutureEpoch
|
||||
a := params.BeaconConfig().MaxEffectiveBalance
|
||||
blockRoots := make([][]byte, params.BeaconConfig().SlotsPerEpoch*2+1)
|
||||
for i := 0; i < len(blockRoots); i++ {
|
||||
for i := range blockRoots {
|
||||
blockRoots[i] = []byte{byte(i)}
|
||||
}
|
||||
base := ðpb.BeaconState{
|
||||
@@ -56,7 +56,7 @@ func TestProcessJustificationAndFinalizationPreCompute_JustifyCurrentEpoch(t *te
|
||||
e := params.BeaconConfig().FarFutureEpoch
|
||||
a := params.BeaconConfig().MaxEffectiveBalance
|
||||
blockRoots := make([][]byte, params.BeaconConfig().SlotsPerEpoch*2+1)
|
||||
for i := 0; i < len(blockRoots); i++ {
|
||||
for i := range blockRoots {
|
||||
blockRoots[i] = []byte{byte(i)}
|
||||
}
|
||||
base := ðpb.BeaconState{
|
||||
@@ -93,7 +93,7 @@ func TestProcessJustificationAndFinalizationPreCompute_JustifyPrevEpoch(t *testi
|
||||
e := params.BeaconConfig().FarFutureEpoch
|
||||
a := params.BeaconConfig().MaxEffectiveBalance
|
||||
blockRoots := make([][]byte, params.BeaconConfig().SlotsPerEpoch*2+1)
|
||||
for i := 0; i < len(blockRoots); i++ {
|
||||
for i := range blockRoots {
|
||||
blockRoots[i] = []byte{byte(i)}
|
||||
}
|
||||
base := ðpb.BeaconState{
|
||||
@@ -128,7 +128,7 @@ func TestProcessJustificationAndFinalizationPreCompute_JustifyPrevEpoch(t *testi
|
||||
func TestUnrealizedCheckpoints(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
balances := make([]uint64, len(validators))
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
|
||||
@@ -42,7 +42,7 @@ func ProcessRewardsAndPenaltiesPrecompute(
|
||||
return nil, errors.Wrap(err, "could not get proposer attestation delta")
|
||||
}
|
||||
validatorBals := state.Balances()
|
||||
for i := 0; i < numOfVals; i++ {
|
||||
for i := range numOfVals {
|
||||
vp[i].BeforeEpochTransitionBalance = validatorBals[i]
|
||||
|
||||
// Compute the post balance of the validator after accounting for the
|
||||
|
||||
@@ -24,7 +24,7 @@ func TestProcessRewardsAndPenaltiesPrecompute(t *testing.T) {
|
||||
validatorCount := uint64(2048)
|
||||
base := buildState(e+3, validatorCount)
|
||||
atts := make([]*ethpb.PendingAttestation, 3)
|
||||
for i := 0; i < len(atts); i++ {
|
||||
for i := range atts {
|
||||
atts[i] = ðpb.PendingAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)},
|
||||
@@ -63,7 +63,7 @@ func TestAttestationDeltas_ZeroEpoch(t *testing.T) {
|
||||
base := buildState(e+2, validatorCount)
|
||||
atts := make([]*ethpb.PendingAttestation, 3)
|
||||
var emptyRoot [32]byte
|
||||
for i := 0; i < len(atts); i++ {
|
||||
for i := range atts {
|
||||
atts[i] = ðpb.PendingAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{
|
||||
@@ -99,7 +99,7 @@ func TestAttestationDeltas_ZeroInclusionDelay(t *testing.T) {
|
||||
base := buildState(e+2, validatorCount)
|
||||
atts := make([]*ethpb.PendingAttestation, 3)
|
||||
var emptyRoot [32]byte
|
||||
for i := 0; i < len(atts); i++ {
|
||||
for i := range atts {
|
||||
atts[i] = ðpb.PendingAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{
|
||||
@@ -131,7 +131,7 @@ func TestProcessRewardsAndPenaltiesPrecompute_SlashedInactivePenalty(t *testing.
|
||||
validatorCount := uint64(2048)
|
||||
base := buildState(e+3, validatorCount)
|
||||
atts := make([]*ethpb.PendingAttestation, 3)
|
||||
for i := 0; i < len(atts); i++ {
|
||||
for i := range atts {
|
||||
atts[i] = ðpb.PendingAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)},
|
||||
@@ -176,28 +176,28 @@ func TestProcessRewardsAndPenaltiesPrecompute_SlashedInactivePenalty(t *testing.
|
||||
|
||||
func buildState(slot primitives.Slot, validatorCount uint64) *ethpb.BeaconState {
|
||||
validators := make([]*ethpb.Validator, validatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
}
|
||||
}
|
||||
validatorBalances := make([]uint64, len(validators))
|
||||
for i := 0; i < len(validatorBalances); i++ {
|
||||
for i := range validatorBalances {
|
||||
validatorBalances[i] = params.BeaconConfig().MaxEffectiveBalance
|
||||
}
|
||||
latestActiveIndexRoots := make(
|
||||
[][]byte,
|
||||
params.BeaconConfig().EpochsPerHistoricalVector,
|
||||
)
|
||||
for i := 0; i < len(latestActiveIndexRoots); i++ {
|
||||
for i := range latestActiveIndexRoots {
|
||||
latestActiveIndexRoots[i] = params.BeaconConfig().ZeroHash[:]
|
||||
}
|
||||
latestRandaoMixes := make(
|
||||
[][]byte,
|
||||
params.BeaconConfig().EpochsPerHistoricalVector,
|
||||
)
|
||||
for i := 0; i < len(latestRandaoMixes); i++ {
|
||||
for i := range latestRandaoMixes {
|
||||
latestRandaoMixes[i] = params.BeaconConfig().ZeroHash[:]
|
||||
}
|
||||
return ðpb.BeaconState{
|
||||
|
||||
@@ -17,5 +17,5 @@ type Event struct {
|
||||
// Type is the type of event.
|
||||
Type EventType
|
||||
// Data is event-specific data.
|
||||
Data interface{}
|
||||
Data any
|
||||
}
|
||||
|
||||
@@ -54,7 +54,7 @@ func TestAttestation_ComputeSubnetForAttestation(t *testing.T) {
|
||||
validatorCount := committeeCount * params.BeaconConfig().TargetCommitteeSize
|
||||
validators := make([]*ethpb.Validator, validatorCount)
|
||||
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
|
||||
@@ -5,7 +5,7 @@ package helpers
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"slices"
|
||||
|
||||
"github.com/OffchainLabs/go-bitfield"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
|
||||
@@ -515,9 +515,7 @@ func UpdateCommitteeCache(ctx context.Context, state state.ReadOnlyBeaconState,
|
||||
// used for failing verify signature fallback.
|
||||
sortedIndices := make([]primitives.ValidatorIndex, len(shuffledIndices))
|
||||
copy(sortedIndices, shuffledIndices)
|
||||
sort.Slice(sortedIndices, func(i, j int) bool {
|
||||
return sortedIndices[i] < sortedIndices[j]
|
||||
})
|
||||
slices.Sort(sortedIndices)
|
||||
|
||||
if err := committeeCache.AddCommitteeShuffledList(ctx, &cache.Committees{
|
||||
ShuffledIndices: shuffledIndices,
|
||||
|
||||
@@ -29,7 +29,7 @@ func TestComputeCommittee_WithoutCache(t *testing.T) {
|
||||
validatorCount := committeeCount * params.BeaconConfig().TargetCommitteeSize
|
||||
validators := make([]*ethpb.Validator, validatorCount)
|
||||
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -122,7 +122,7 @@ func TestCommitteeAssignments_NoProposerForSlot0(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
var activationEpoch primitives.Epoch
|
||||
if i >= len(validators)/2 {
|
||||
activationEpoch = 3
|
||||
@@ -151,7 +151,7 @@ func TestCommitteeAssignments_CanRetrieve(t *testing.T) {
|
||||
// Initialize test with 256 validators, each slot and each index gets 4 validators.
|
||||
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
|
||||
validatorIndices := make([]primitives.ValidatorIndex, len(validators))
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
// First 2 epochs only half validators are activated.
|
||||
var activationEpoch primitives.Epoch
|
||||
if i >= len(validators)/2 {
|
||||
@@ -234,7 +234,7 @@ func TestCommitteeAssignments_CannotRetrieveFuture(t *testing.T) {
|
||||
|
||||
// Initialize test with 256 validators, each slot and each index gets 4 validators.
|
||||
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
// First 2 epochs only half validators are activated.
|
||||
var activationEpoch primitives.Epoch
|
||||
if i >= len(validators)/2 {
|
||||
@@ -266,7 +266,7 @@ func TestCommitteeAssignments_CannotRetrieveOlderThanSlotsPerHistoricalRoot(t *t
|
||||
|
||||
// Initialize test with 256 validators, each slot and each index gets 4 validators.
|
||||
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -287,7 +287,7 @@ func TestCommitteeAssignments_EverySlotHasMin1Proposer(t *testing.T) {
|
||||
|
||||
// Initialize test with 256 validators, each slot and each index gets 4 validators.
|
||||
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ActivationEpoch: 0,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
@@ -323,7 +323,7 @@ func TestCommitteeAssignments_EverySlotHasMin1Proposer(t *testing.T) {
|
||||
func TestVerifyAttestationBitfieldLengths_OK(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, 2*params.BeaconConfig().SlotsPerEpoch)
|
||||
activeRoots := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -489,7 +489,7 @@ func TestUpdateCommitteeCache_CanUpdateAcrossEpochs(t *testing.T) {
|
||||
|
||||
func BenchmarkComputeCommittee300000_WithPreCache(b *testing.B) {
|
||||
validators := make([]*ethpb.Validator, 300000)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -512,8 +512,7 @@ func BenchmarkComputeCommittee300000_WithPreCache(b *testing.B) {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for n := 0; n < b.N; n++ {
|
||||
for b.Loop() {
|
||||
_, err := helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@@ -523,7 +522,7 @@ func BenchmarkComputeCommittee300000_WithPreCache(b *testing.B) {
|
||||
|
||||
func BenchmarkComputeCommittee3000000_WithPreCache(b *testing.B) {
|
||||
validators := make([]*ethpb.Validator, 3000000)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -546,8 +545,7 @@ func BenchmarkComputeCommittee3000000_WithPreCache(b *testing.B) {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for n := 0; n < b.N; n++ {
|
||||
for b.Loop() {
|
||||
_, err := helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@@ -557,7 +555,7 @@ func BenchmarkComputeCommittee3000000_WithPreCache(b *testing.B) {
|
||||
|
||||
func BenchmarkComputeCommittee128000_WithOutPreCache(b *testing.B) {
|
||||
validators := make([]*ethpb.Validator, 128000)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -576,8 +574,8 @@ func BenchmarkComputeCommittee128000_WithOutPreCache(b *testing.B) {
|
||||
|
||||
i := uint64(0)
|
||||
index := uint64(0)
|
||||
b.ResetTimer()
|
||||
for n := 0; n < b.N; n++ {
|
||||
|
||||
for b.Loop() {
|
||||
i++
|
||||
_, err := helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
if err != nil {
|
||||
@@ -592,7 +590,7 @@ func BenchmarkComputeCommittee128000_WithOutPreCache(b *testing.B) {
|
||||
|
||||
func BenchmarkComputeCommittee1000000_WithOutCache(b *testing.B) {
|
||||
validators := make([]*ethpb.Validator, 1000000)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -611,8 +609,8 @@ func BenchmarkComputeCommittee1000000_WithOutCache(b *testing.B) {
|
||||
|
||||
i := uint64(0)
|
||||
index := uint64(0)
|
||||
b.ResetTimer()
|
||||
for n := 0; n < b.N; n++ {
|
||||
|
||||
for b.Loop() {
|
||||
i++
|
||||
_, err := helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
if err != nil {
|
||||
@@ -627,7 +625,7 @@ func BenchmarkComputeCommittee1000000_WithOutCache(b *testing.B) {
|
||||
|
||||
func BenchmarkComputeCommittee4000000_WithOutCache(b *testing.B) {
|
||||
validators := make([]*ethpb.Validator, 4000000)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -646,8 +644,8 @@ func BenchmarkComputeCommittee4000000_WithOutCache(b *testing.B) {
|
||||
|
||||
i := uint64(0)
|
||||
index := uint64(0)
|
||||
b.ResetTimer()
|
||||
for n := 0; n < b.N; n++ {
|
||||
|
||||
for b.Loop() {
|
||||
i++
|
||||
_, err := helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
if err != nil {
|
||||
@@ -663,7 +661,7 @@ func BenchmarkComputeCommittee4000000_WithOutCache(b *testing.B) {
|
||||
func TestBeaconCommitteeFromState_UpdateCacheForPreviousEpoch(t *testing.T) {
|
||||
committeeSize := uint64(16)
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SlotsPerEpoch.Mul(committeeSize))
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -688,7 +686,7 @@ func TestBeaconCommitteeFromState_UpdateCacheForPreviousEpoch(t *testing.T) {
|
||||
|
||||
func TestPrecomputeProposerIndices_Ok(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -732,7 +730,7 @@ func TestAttestationCommitteesFromState(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().TargetCommitteeSize))
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -768,7 +766,7 @@ func TestAttestationCommitteesFromCache(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().TargetCommitteeSize))
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -934,7 +932,7 @@ func TestInitializeProposerLookahead_RegressionTest(t *testing.T) {
|
||||
proposerLookahead, err := helpers.InitializeProposerLookahead(ctx, state, epoch)
|
||||
require.NoError(t, err)
|
||||
slotsPerEpoch := int(params.BeaconConfig().SlotsPerEpoch)
|
||||
for epochOffset := primitives.Epoch(0); epochOffset < 2; epochOffset++ {
|
||||
for epochOffset := range primitives.Epoch(2) {
|
||||
targetEpoch := epoch + epochOffset
|
||||
|
||||
activeIndices, err := helpers.ActiveValidatorIndices(ctx, state, targetEpoch)
|
||||
|
||||
@@ -16,7 +16,7 @@ import (
|
||||
|
||||
func TestRandaoMix_OK(t *testing.T) {
|
||||
randaoMixes := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector)
|
||||
for i := 0; i < len(randaoMixes); i++ {
|
||||
for i := range randaoMixes {
|
||||
intInBytes := make([]byte, 32)
|
||||
binary.LittleEndian.PutUint64(intInBytes, uint64(i))
|
||||
randaoMixes[i] = intInBytes
|
||||
@@ -52,7 +52,7 @@ func TestRandaoMix_OK(t *testing.T) {
|
||||
|
||||
func TestRandaoMix_CopyOK(t *testing.T) {
|
||||
randaoMixes := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector)
|
||||
for i := 0; i < len(randaoMixes); i++ {
|
||||
for i := range randaoMixes {
|
||||
intInBytes := make([]byte, 32)
|
||||
binary.LittleEndian.PutUint64(intInBytes, uint64(i))
|
||||
randaoMixes[i] = intInBytes
|
||||
@@ -96,7 +96,7 @@ func TestGenerateSeed_OK(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
randaoMixes := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector)
|
||||
for i := 0; i < len(randaoMixes); i++ {
|
||||
for i := range randaoMixes {
|
||||
intInBytes := make([]byte, 32)
|
||||
binary.LittleEndian.PutUint64(intInBytes, uint64(i))
|
||||
randaoMixes[i] = intInBytes
|
||||
|
||||
@@ -239,28 +239,28 @@ func TestIsInInactivityLeak(t *testing.T) {
|
||||
|
||||
func buildState(slot primitives.Slot, validatorCount uint64) *ethpb.BeaconState {
|
||||
validators := make([]*ethpb.Validator, validatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
}
|
||||
}
|
||||
validatorBalances := make([]uint64, len(validators))
|
||||
for i := 0; i < len(validatorBalances); i++ {
|
||||
for i := range validatorBalances {
|
||||
validatorBalances[i] = params.BeaconConfig().MaxEffectiveBalance
|
||||
}
|
||||
latestActiveIndexRoots := make(
|
||||
[][]byte,
|
||||
params.BeaconConfig().EpochsPerHistoricalVector,
|
||||
)
|
||||
for i := 0; i < len(latestActiveIndexRoots); i++ {
|
||||
for i := range latestActiveIndexRoots {
|
||||
latestActiveIndexRoots[i] = params.BeaconConfig().ZeroHash[:]
|
||||
}
|
||||
latestRandaoMixes := make(
|
||||
[][]byte,
|
||||
params.BeaconConfig().EpochsPerHistoricalVector,
|
||||
)
|
||||
for i := 0; i < len(latestRandaoMixes); i++ {
|
||||
for i := range latestRandaoMixes {
|
||||
latestRandaoMixes[i] = params.BeaconConfig().ZeroHash[:]
|
||||
}
|
||||
return ðpb.BeaconState{
|
||||
|
||||
@@ -23,7 +23,7 @@ var maxShuffleListSize uint64 = 1 << 40
|
||||
func SplitIndices(l []uint64, n uint64) [][]uint64 {
|
||||
var divided [][]uint64
|
||||
var lSize = uint64(len(l))
|
||||
for i := uint64(0); i < n; i++ {
|
||||
for i := range n {
|
||||
start := slice.SplitOffset(lSize, n, i)
|
||||
end := slice.SplitOffset(lSize, n, i+1)
|
||||
divided = append(divided, l[start:end])
|
||||
@@ -103,10 +103,7 @@ func ComputeShuffledIndex(index primitives.ValidatorIndex, indexCount uint64, se
|
||||
pivot := hash8Int % indexCount
|
||||
flip := (pivot + indexCount - uint64(index)) % indexCount
|
||||
// Consider every pair only once by picking the highest pair index to retrieve randomness.
|
||||
position := uint64(index)
|
||||
if flip > position {
|
||||
position = flip
|
||||
}
|
||||
position := max(flip, uint64(index))
|
||||
// Add position except its last byte to []buf for randomness,
|
||||
// it will be used later to select a bit from the resulting hash.
|
||||
binary.LittleEndian.PutUint64(posBuffer[:8], position>>8)
|
||||
|
||||
@@ -30,7 +30,7 @@ func TestShuffleList_OK(t *testing.T) {
|
||||
var list1 []primitives.ValidatorIndex
|
||||
seed1 := [32]byte{1, 128, 12}
|
||||
seed2 := [32]byte{2, 128, 12}
|
||||
for i := 0; i < 10; i++ {
|
||||
for i := range 10 {
|
||||
list1 = append(list1, primitives.ValidatorIndex(i))
|
||||
}
|
||||
|
||||
@@ -55,7 +55,7 @@ func TestSplitIndices_OK(t *testing.T) {
|
||||
|
||||
var l []uint64
|
||||
numValidators := uint64(64000)
|
||||
for i := uint64(0); i < numValidators; i++ {
|
||||
for i := range numValidators {
|
||||
l = append(l, i)
|
||||
}
|
||||
split := SplitIndices(l, uint64(params.BeaconConfig().SlotsPerEpoch))
|
||||
@@ -104,7 +104,7 @@ func BenchmarkIndexComparison(b *testing.B) {
|
||||
seed := [32]byte{123, 42}
|
||||
for _, listSize := range listSizes {
|
||||
b.Run(fmt.Sprintf("Indexwise_ShuffleList_%d", listSize), func(ib *testing.B) {
|
||||
for i := 0; i < ib.N; i++ {
|
||||
for ib.Loop() {
|
||||
// Simulate a list-shuffle by running shuffle-index listSize times.
|
||||
for j := primitives.ValidatorIndex(0); uint64(j) < listSize; j++ {
|
||||
_, err := ShuffledIndex(j, listSize, seed)
|
||||
@@ -120,11 +120,11 @@ func BenchmarkShuffleList(b *testing.B) {
|
||||
seed := [32]byte{123, 42}
|
||||
for _, listSize := range listSizes {
|
||||
testIndices := make([]primitives.ValidatorIndex, listSize)
|
||||
for i := uint64(0); i < listSize; i++ {
|
||||
for i := range listSize {
|
||||
testIndices[i] = primitives.ValidatorIndex(i)
|
||||
}
|
||||
b.Run(fmt.Sprintf("ShuffleList_%d", listSize), func(ib *testing.B) {
|
||||
for i := 0; i < ib.N; i++ {
|
||||
for ib.Loop() {
|
||||
_, err := ShuffleList(testIndices, seed)
|
||||
assert.NoError(b, err)
|
||||
}
|
||||
@@ -161,12 +161,12 @@ func TestSplitIndicesAndOffset_OK(t *testing.T) {
|
||||
|
||||
var l []uint64
|
||||
validators := uint64(64000)
|
||||
for i := uint64(0); i < validators; i++ {
|
||||
for i := range validators {
|
||||
l = append(l, i)
|
||||
}
|
||||
chunks := uint64(6)
|
||||
split := SplitIndices(l, chunks)
|
||||
for i := uint64(0); i < chunks; i++ {
|
||||
for i := range chunks {
|
||||
if !reflect.DeepEqual(split[i], l[slice.SplitOffset(uint64(len(l)), chunks, i):slice.SplitOffset(uint64(len(l)), chunks, i+1)]) {
|
||||
t.Errorf("Want: %v got: %v", l[slice.SplitOffset(uint64(len(l)), chunks, i):slice.SplitOffset(uint64(len(l)), chunks, i+1)], split[i])
|
||||
break
|
||||
|
||||
@@ -24,7 +24,7 @@ func TestCurrentPeriodPositions(t *testing.T) {
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
Pubkeys: make([][]byte, params.BeaconConfig().SyncCommitteeSize),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -56,7 +56,7 @@ func TestIsCurrentEpochSyncCommittee_UsingCache(t *testing.T) {
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -87,7 +87,7 @@ func TestIsCurrentEpochSyncCommittee_UsingCommittee(t *testing.T) {
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -116,7 +116,7 @@ func TestIsCurrentEpochSyncCommittee_DoesNotExist(t *testing.T) {
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -144,7 +144,7 @@ func TestIsNextEpochSyncCommittee_UsingCache(t *testing.T) {
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -175,7 +175,7 @@ func TestIsNextEpochSyncCommittee_UsingCommittee(t *testing.T) {
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -203,7 +203,7 @@ func TestIsNextEpochSyncCommittee_DoesNotExist(t *testing.T) {
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -231,7 +231,7 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -262,7 +262,7 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -304,7 +304,7 @@ func TestCurrentEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -332,7 +332,7 @@ func TestNextEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -363,7 +363,7 @@ func TestNextEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -391,7 +391,7 @@ func TestNextEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -449,7 +449,7 @@ func TestIsCurrentEpochSyncCommittee_SameBlockRoot(t *testing.T) {
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
|
||||
@@ -184,7 +184,7 @@ func TestBeaconProposerIndex_OK(t *testing.T) {
|
||||
c.MinGenesisActiveValidatorCount = 16384
|
||||
params.OverrideBeaconConfig(c)
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount/8)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -241,7 +241,7 @@ func TestBeaconProposerIndex_BadState(t *testing.T) {
|
||||
c.MinGenesisActiveValidatorCount = 16384
|
||||
params.OverrideBeaconConfig(c)
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount/8)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -270,7 +270,7 @@ func TestComputeProposerIndex_Compatibility(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -322,7 +322,7 @@ func TestActiveValidatorCount_Genesis(t *testing.T) {
|
||||
|
||||
c := 1000
|
||||
validators := make([]*ethpb.Validator, c)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -357,7 +357,7 @@ func TestChurnLimit_OK(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, test.validatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -861,7 +861,7 @@ func TestLastActivatedValidatorIndex_OK(t *testing.T) {
|
||||
|
||||
validators := make([]*ethpb.Validator, 4)
|
||||
balances := make([]uint64, len(validators))
|
||||
for i := uint64(0); i < 4; i++ {
|
||||
for i := range uint64(4) {
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: make([]byte, params.BeaconConfig().BLSPubkeyLength),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
|
||||
@@ -270,7 +270,7 @@ func genState(t *testing.T, valCount, avgBalance uint64) state.BeaconState {
|
||||
|
||||
validators := make([]*ethpb.Validator, valCount)
|
||||
balances := make([]uint64, len(validators))
|
||||
for i := uint64(0); i < valCount; i++ {
|
||||
for i := range valCount {
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: make([]byte, params.BeaconConfig().BLSPubkeyLength),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
|
||||
@@ -45,12 +45,13 @@ go_test(
|
||||
"p2p_interface_test.go",
|
||||
"reconstruction_helpers_test.go",
|
||||
"reconstruction_test.go",
|
||||
"semi_supernode_test.go",
|
||||
"utils_test.go",
|
||||
"validator_test.go",
|
||||
"verification_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
":go_default_library",
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
|
||||
@@ -100,7 +100,7 @@ func Test_VerifyKZGInclusionProofColumn(t *testing.T) {
|
||||
// Generate random KZG commitments `blobCount` blobs.
|
||||
kzgCommitments := make([][]byte, blobCount)
|
||||
|
||||
for i := 0; i < blobCount; i++ {
|
||||
for i := range blobCount {
|
||||
kzgCommitments[i] = make([]byte, 48)
|
||||
_, err := rand.Read(kzgCommitments[i])
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -29,6 +29,38 @@ func MinimumColumnCountToReconstruct() uint64 {
|
||||
return (params.BeaconConfig().NumberOfColumns + 1) / 2
|
||||
}
|
||||
|
||||
// MinimumCustodyGroupCountToReconstruct returns the minimum number of custody groups needed to
|
||||
// custody enough data columns for reconstruction. This accounts for the relationship between
|
||||
// custody groups and columns, making it future-proof if these values change.
|
||||
// Returns an error if the configuration values are invalid (zero or would cause division by zero).
|
||||
func MinimumCustodyGroupCountToReconstruct() (uint64, error) {
|
||||
cfg := params.BeaconConfig()
|
||||
|
||||
// Validate configuration values
|
||||
if cfg.NumberOfColumns == 0 {
|
||||
return 0, errors.New("NumberOfColumns cannot be zero")
|
||||
}
|
||||
if cfg.NumberOfCustodyGroups == 0 {
|
||||
return 0, errors.New("NumberOfCustodyGroups cannot be zero")
|
||||
}
|
||||
|
||||
minimumColumnCount := MinimumColumnCountToReconstruct()
|
||||
|
||||
// Calculate how many columns each custody group represents
|
||||
columnsPerGroup := cfg.NumberOfColumns / cfg.NumberOfCustodyGroups
|
||||
|
||||
// If there are more groups than columns (columnsPerGroup = 0), this is an invalid configuration
|
||||
// for reconstruction purposes as we cannot determine a meaningful custody group count
|
||||
if columnsPerGroup == 0 {
|
||||
return 0, errors.Errorf("invalid configuration: NumberOfCustodyGroups (%d) exceeds NumberOfColumns (%d)",
|
||||
cfg.NumberOfCustodyGroups, cfg.NumberOfColumns)
|
||||
}
|
||||
|
||||
// Use ceiling division to ensure we have enough groups to cover the minimum columns
|
||||
// ceiling(a/b) = (a + b - 1) / b
|
||||
return (minimumColumnCount + columnsPerGroup - 1) / columnsPerGroup, nil
|
||||
}
|
||||
|
||||
// recoverCellsForBlobs reconstructs cells for specified blobs from the given data column sidecars.
|
||||
// This is optimized to only recover cells without computing proofs.
|
||||
// Returns a map from blob index to recovered cells.
|
||||
|
||||
@@ -16,11 +16,11 @@ import (
|
||||
|
||||
// testBlobSetup holds common test data for blob reconstruction tests.
|
||||
type testBlobSetup struct {
|
||||
blobCount int
|
||||
blobs []kzg.Blob
|
||||
roBlock blocks.ROBlock
|
||||
roDataColumnSidecars []blocks.RODataColumn
|
||||
verifiedRoDataColumnSidecars []blocks.VerifiedRODataColumn
|
||||
blobCount int
|
||||
blobs []kzg.Blob
|
||||
roBlock blocks.ROBlock
|
||||
roDataColumnSidecars []blocks.RODataColumn
|
||||
verifiedRoDataColumnSidecars []blocks.VerifiedRODataColumn
|
||||
}
|
||||
|
||||
// setupTestBlobs creates a complete test setup with blobs, cells, proofs, and data column sidecars.
|
||||
|
||||
160
beacon-chain/core/peerdas/semi_supernode_test.go
Normal file
160
beacon-chain/core/peerdas/semi_supernode_test.go
Normal file
@@ -0,0 +1,160 @@
|
||||
package peerdas
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
)
|
||||
|
||||
func TestSemiSupernodeCustody(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.NumberOfCustodyGroups = 128
|
||||
cfg.NumberOfColumns = 128
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Create a test node ID
|
||||
nodeID := enode.ID([32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32})
|
||||
|
||||
t.Run("semi-supernode custodies exactly 64 columns", func(t *testing.T) {
|
||||
// Semi-supernode uses 64 custody groups (half of 128)
|
||||
const semiSupernodeCustodyGroupCount = 64
|
||||
|
||||
// Get custody groups for semi-supernode
|
||||
custodyGroups, err := CustodyGroups(nodeID, semiSupernodeCustodyGroupCount)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, semiSupernodeCustodyGroupCount, len(custodyGroups))
|
||||
|
||||
// Verify we get exactly 64 custody columns
|
||||
custodyColumns, err := CustodyColumns(custodyGroups)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, semiSupernodeCustodyGroupCount, len(custodyColumns))
|
||||
|
||||
// Verify the columns are valid (within 0-127 range)
|
||||
for columnIndex := range custodyColumns {
|
||||
if columnIndex >= cfg.NumberOfColumns {
|
||||
t.Fatalf("Invalid column index %d, should be less than %d", columnIndex, cfg.NumberOfColumns)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("64 columns is exactly the minimum for reconstruction", func(t *testing.T) {
|
||||
minimumCount := MinimumColumnCountToReconstruct()
|
||||
require.Equal(t, uint64(64), minimumCount)
|
||||
})
|
||||
|
||||
t.Run("semi-supernode vs supernode custody", func(t *testing.T) {
|
||||
// Semi-supernode (64 custody groups)
|
||||
semiSupernodeGroups, err := CustodyGroups(nodeID, 64)
|
||||
require.NoError(t, err)
|
||||
semiSupernodeColumns, err := CustodyColumns(semiSupernodeGroups)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Supernode (128 custody groups = all groups)
|
||||
supernodeGroups, err := CustodyGroups(nodeID, 128)
|
||||
require.NoError(t, err)
|
||||
supernodeColumns, err := CustodyColumns(supernodeGroups)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify semi-supernode has exactly half the columns of supernode
|
||||
require.Equal(t, 64, len(semiSupernodeColumns))
|
||||
require.Equal(t, 128, len(supernodeColumns))
|
||||
require.Equal(t, len(supernodeColumns)/2, len(semiSupernodeColumns))
|
||||
|
||||
// Verify all semi-supernode columns are a subset of supernode columns
|
||||
for columnIndex := range semiSupernodeColumns {
|
||||
if !supernodeColumns[columnIndex] {
|
||||
t.Fatalf("Semi-supernode column %d not found in supernode columns", columnIndex)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestMinimumCustodyGroupCountToReconstruct(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
numberOfColumns uint64
|
||||
numberOfGroups uint64
|
||||
expectedResult uint64
|
||||
}{
|
||||
{
|
||||
name: "Standard 1:1 ratio (128 columns, 128 groups)",
|
||||
numberOfColumns: 128,
|
||||
numberOfGroups: 128,
|
||||
expectedResult: 64, // Need half of 128 groups
|
||||
},
|
||||
{
|
||||
name: "2 columns per group (128 columns, 64 groups)",
|
||||
numberOfColumns: 128,
|
||||
numberOfGroups: 64,
|
||||
expectedResult: 32, // Need 64 columns, which is 32 groups (64/2)
|
||||
},
|
||||
{
|
||||
name: "4 columns per group (128 columns, 32 groups)",
|
||||
numberOfColumns: 128,
|
||||
numberOfGroups: 32,
|
||||
expectedResult: 16, // Need 64 columns, which is 16 groups (64/4)
|
||||
},
|
||||
{
|
||||
name: "Odd number requiring ceiling division (100 columns, 30 groups)",
|
||||
numberOfColumns: 100,
|
||||
numberOfGroups: 30,
|
||||
expectedResult: 17, // Need 50 columns, 3 columns per group (100/30), ceiling(50/3) = 17
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.NumberOfColumns = tt.numberOfColumns
|
||||
cfg.NumberOfCustodyGroups = tt.numberOfGroups
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
result, err := MinimumCustodyGroupCountToReconstruct()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tt.expectedResult, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMinimumCustodyGroupCountToReconstruct_ErrorCases(t *testing.T) {
|
||||
t.Run("Returns error when NumberOfColumns is zero", func(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.NumberOfColumns = 0
|
||||
cfg.NumberOfCustodyGroups = 128
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
_, err := MinimumCustodyGroupCountToReconstruct()
|
||||
require.NotNil(t, err)
|
||||
require.Equal(t, true, err.Error() == "NumberOfColumns cannot be zero")
|
||||
})
|
||||
|
||||
t.Run("Returns error when NumberOfCustodyGroups is zero", func(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.NumberOfColumns = 128
|
||||
cfg.NumberOfCustodyGroups = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
_, err := MinimumCustodyGroupCountToReconstruct()
|
||||
require.NotNil(t, err)
|
||||
require.Equal(t, true, err.Error() == "NumberOfCustodyGroups cannot be zero")
|
||||
})
|
||||
|
||||
t.Run("Returns error when NumberOfCustodyGroups exceeds NumberOfColumns", func(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.NumberOfColumns = 128
|
||||
cfg.NumberOfCustodyGroups = 256
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
_, err := MinimumCustodyGroupCountToReconstruct()
|
||||
require.NotNil(t, err)
|
||||
// Just check that we got an error about the configuration
|
||||
require.Equal(t, true, len(err.Error()) > 0)
|
||||
})
|
||||
}
|
||||
@@ -216,7 +216,7 @@ func rotateRowsToCols(cellsPerBlob [][]kzg.Cell, proofsPerBlob [][]kzg.Proof, nu
|
||||
if len(cells) != len(proofs) {
|
||||
return nil, nil, errors.Wrap(ErrNotEnoughDataColumnSidecars, "not enough proofs")
|
||||
}
|
||||
for j := uint64(0); j < numCols; j++ {
|
||||
for j := range numCols {
|
||||
if i == 0 {
|
||||
cellCols[j] = make([][]byte, len(cellsPerBlob))
|
||||
proofCols[j] = make([][]byte, len(cellsPerBlob))
|
||||
|
||||
@@ -119,7 +119,7 @@ func TestFuzzverifySigningRoot_10000(_ *testing.T) {
|
||||
var p []byte
|
||||
var s []byte
|
||||
var d []byte
|
||||
for i := 0; i < 10000; i++ {
|
||||
for range 10000 {
|
||||
fuzzer.Fuzz(st)
|
||||
fuzzer.Fuzz(&pubkey)
|
||||
fuzzer.Fuzz(&sig)
|
||||
|
||||
@@ -28,8 +28,7 @@ func BenchmarkExecuteStateTransition_FullBlock(b *testing.B) {
|
||||
block, err := benchmark.PreGenFullBlock()
|
||||
require.NoError(b, err)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for i := 0; b.Loop(); i++ {
|
||||
wsb, err := blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(b, err)
|
||||
_, err = coreState.ExecuteStateTransition(b.Context(), cleanStates[i], wsb)
|
||||
@@ -60,8 +59,7 @@ func BenchmarkExecuteStateTransition_WithCache(b *testing.B) {
|
||||
_, err = coreState.ExecuteStateTransition(b.Context(), beaconState, wsb)
|
||||
require.NoError(b, err, "Failed to process block, benchmarks will fail")
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for i := 0; b.Loop(); i++ {
|
||||
wsb, err := blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(b, err)
|
||||
_, err = coreState.ExecuteStateTransition(b.Context(), cleanStates[i], wsb)
|
||||
@@ -83,8 +81,7 @@ func BenchmarkProcessEpoch_2FullEpochs(b *testing.B) {
|
||||
require.NoError(b, helpers.UpdateCommitteeCache(b.Context(), beaconState, time.CurrentEpoch(beaconState)))
|
||||
require.NoError(b, beaconState.SetSlot(currentSlot))
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
// ProcessEpochPrecompute is the optimized version of process epoch. It's enabled by default
|
||||
// at run time.
|
||||
_, err := coreState.ProcessEpochPrecompute(b.Context(), beaconState.Copy())
|
||||
@@ -96,8 +93,7 @@ func BenchmarkHashTreeRoot_FullState(b *testing.B) {
|
||||
beaconState, err := benchmark.PreGenstateFullEpochs()
|
||||
require.NoError(b, err)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
_, err := beaconState.HashTreeRoot(b.Context())
|
||||
require.NoError(b, err)
|
||||
}
|
||||
@@ -113,8 +109,7 @@ func BenchmarkHashTreeRootState_FullState(b *testing.B) {
|
||||
_, err = beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(b, err)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
_, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(b, err)
|
||||
}
|
||||
@@ -128,7 +123,7 @@ func BenchmarkMarshalState_FullState(b *testing.B) {
|
||||
b.Run("Proto_Marshal", func(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
_, err := proto.Marshal(natState)
|
||||
require.NoError(b, err)
|
||||
}
|
||||
@@ -137,7 +132,7 @@ func BenchmarkMarshalState_FullState(b *testing.B) {
|
||||
b.Run("Fast_SSZ_Marshal", func(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
_, err := natState.MarshalSSZ()
|
||||
require.NoError(b, err)
|
||||
}
|
||||
@@ -157,7 +152,7 @@ func BenchmarkUnmarshalState_FullState(b *testing.B) {
|
||||
b.Run("Proto_Unmarshal", func(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
require.NoError(b, proto.Unmarshal(protoObject, ðpb.BeaconState{}))
|
||||
}
|
||||
})
|
||||
@@ -165,7 +160,7 @@ func BenchmarkUnmarshalState_FullState(b *testing.B) {
|
||||
b.Run("Fast_SSZ_Unmarshal", func(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
sszState := ðpb.BeaconState{}
|
||||
require.NoError(b, sszState.UnmarshalSSZ(sszObject))
|
||||
}
|
||||
@@ -174,7 +169,7 @@ func BenchmarkUnmarshalState_FullState(b *testing.B) {
|
||||
|
||||
func clonedStates(beaconState state.BeaconState) []state.BeaconState {
|
||||
clonedStates := make([]state.BeaconState, runAmount)
|
||||
for i := 0; i < runAmount; i++ {
|
||||
for i := range runAmount {
|
||||
clonedStates[i] = beaconState.Copy()
|
||||
}
|
||||
return clonedStates
|
||||
|
||||
@@ -108,7 +108,7 @@ func TestSkipSlotCache_ConcurrentMixup(t *testing.T) {
|
||||
|
||||
// prepare copies for both states
|
||||
var setups []state.BeaconState
|
||||
for i := uint64(0); i < 300; i++ {
|
||||
for i := range uint64(300) {
|
||||
var st state.BeaconState
|
||||
if i%2 == 0 {
|
||||
st = s1
|
||||
|
||||
@@ -95,7 +95,7 @@ func OptimizedGenesisBeaconStateBellatrix(genesisTime uint64, preState state.Bea
|
||||
}
|
||||
|
||||
randaoMixes := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector)
|
||||
for i := 0; i < len(randaoMixes); i++ {
|
||||
for i := range randaoMixes {
|
||||
h := make([]byte, 32)
|
||||
copy(h, eth1Data.BlockHash)
|
||||
randaoMixes[i] = h
|
||||
@@ -104,17 +104,17 @@ func OptimizedGenesisBeaconStateBellatrix(genesisTime uint64, preState state.Bea
|
||||
zeroHash := params.BeaconConfig().ZeroHash[:]
|
||||
|
||||
activeIndexRoots := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector)
|
||||
for i := 0; i < len(activeIndexRoots); i++ {
|
||||
for i := range activeIndexRoots {
|
||||
activeIndexRoots[i] = zeroHash
|
||||
}
|
||||
|
||||
blockRoots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot)
|
||||
for i := 0; i < len(blockRoots); i++ {
|
||||
for i := range blockRoots {
|
||||
blockRoots[i] = zeroHash
|
||||
}
|
||||
|
||||
stateRoots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot)
|
||||
for i := 0; i < len(stateRoots); i++ {
|
||||
for i := range stateRoots {
|
||||
stateRoots[i] = zeroHash
|
||||
}
|
||||
|
||||
@@ -131,7 +131,7 @@ func OptimizedGenesisBeaconStateBellatrix(genesisTime uint64, preState state.Bea
|
||||
}
|
||||
scoresMissing := len(preState.Validators()) - len(scores)
|
||||
if scoresMissing > 0 {
|
||||
for i := 0; i < scoresMissing; i++ {
|
||||
for range scoresMissing {
|
||||
scores = append(scores, 0)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -122,7 +122,7 @@ func OptimizedGenesisBeaconState(genesisTime uint64, preState state.BeaconState,
|
||||
}
|
||||
|
||||
randaoMixes := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector)
|
||||
for i := 0; i < len(randaoMixes); i++ {
|
||||
for i := range randaoMixes {
|
||||
h := make([]byte, 32)
|
||||
copy(h, eth1Data.BlockHash)
|
||||
randaoMixes[i] = h
|
||||
@@ -131,17 +131,17 @@ func OptimizedGenesisBeaconState(genesisTime uint64, preState state.BeaconState,
|
||||
zeroHash := params.BeaconConfig().ZeroHash[:]
|
||||
|
||||
activeIndexRoots := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector)
|
||||
for i := 0; i < len(activeIndexRoots); i++ {
|
||||
for i := range activeIndexRoots {
|
||||
activeIndexRoots[i] = zeroHash
|
||||
}
|
||||
|
||||
blockRoots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot)
|
||||
for i := 0; i < len(blockRoots); i++ {
|
||||
for i := range blockRoots {
|
||||
blockRoots[i] = zeroHash
|
||||
}
|
||||
|
||||
stateRoots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot)
|
||||
for i := 0; i < len(stateRoots); i++ {
|
||||
for i := range stateRoots {
|
||||
stateRoots[i] = zeroHash
|
||||
}
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ func TestGenesisBeaconState_1000(t *testing.T) {
|
||||
deposits := make([]*ethpb.Deposit, 300000)
|
||||
var genesisTime uint64
|
||||
eth1Data := ðpb.Eth1Data{}
|
||||
for i := 0; i < 1000; i++ {
|
||||
for range 1000 {
|
||||
fuzzer.Fuzz(&deposits)
|
||||
fuzzer.Fuzz(&genesisTime)
|
||||
fuzzer.Fuzz(eth1Data)
|
||||
@@ -40,7 +40,7 @@ func TestOptimizedGenesisBeaconState_1000(t *testing.T) {
|
||||
preState, err := state_native.InitializeFromProtoUnsafePhase0(ðpb.BeaconState{})
|
||||
require.NoError(t, err)
|
||||
eth1Data := ðpb.Eth1Data{}
|
||||
for i := 0; i < 1000; i++ {
|
||||
for range 1000 {
|
||||
fuzzer.Fuzz(&genesisTime)
|
||||
fuzzer.Fuzz(eth1Data)
|
||||
fuzzer.Fuzz(preState)
|
||||
@@ -60,7 +60,7 @@ func TestIsValidGenesisState_100000(_ *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
fuzzer.NilChance(0.1)
|
||||
var chainStartDepositCount, currentTime uint64
|
||||
for i := 0; i < 100000; i++ {
|
||||
for range 100000 {
|
||||
fuzzer.Fuzz(&chainStartDepositCount)
|
||||
fuzzer.Fuzz(¤tTime)
|
||||
IsValidGenesisState(chainStartDepositCount, currentTime)
|
||||
|
||||
@@ -21,7 +21,7 @@ func TestFuzzExecuteStateTransition_1000(t *testing.T) {
|
||||
sb := ðpb.SignedBeaconBlock{}
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
fuzzer.NilChance(0.1)
|
||||
for i := 0; i < 1000; i++ {
|
||||
for range 1000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(sb)
|
||||
if sb.Block == nil || sb.Block.Body == nil {
|
||||
@@ -45,7 +45,7 @@ func TestFuzzCalculateStateRoot_1000(t *testing.T) {
|
||||
sb := ðpb.SignedBeaconBlock{}
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
fuzzer.NilChance(0.1)
|
||||
for i := 0; i < 1000; i++ {
|
||||
for range 1000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(sb)
|
||||
if sb.Block == nil || sb.Block.Body == nil {
|
||||
@@ -68,7 +68,7 @@ func TestFuzzProcessSlot_1000(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
fuzzer.NilChance(0.1)
|
||||
for i := 0; i < 1000; i++ {
|
||||
for range 1000 {
|
||||
fuzzer.Fuzz(state)
|
||||
s, err := ProcessSlot(ctx, state)
|
||||
if err != nil && s != nil {
|
||||
@@ -86,7 +86,7 @@ func TestFuzzProcessSlots_1000(t *testing.T) {
|
||||
slot := primitives.Slot(0)
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
fuzzer.NilChance(0.1)
|
||||
for i := 0; i < 1000; i++ {
|
||||
for range 1000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(&slot)
|
||||
s, err := ProcessSlots(ctx, state, slot)
|
||||
@@ -105,7 +105,7 @@ func TestFuzzprocessOperationsNoVerify_1000(t *testing.T) {
|
||||
bb := ðpb.BeaconBlock{}
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
fuzzer.NilChance(0.1)
|
||||
for i := 0; i < 1000; i++ {
|
||||
for range 1000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(bb)
|
||||
if bb.Body == nil {
|
||||
@@ -128,7 +128,7 @@ func TestFuzzverifyOperationLengths_10000(t *testing.T) {
|
||||
bb := ðpb.BeaconBlock{}
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
fuzzer.NilChance(0.1)
|
||||
for i := 0; i < 10000; i++ {
|
||||
for range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(bb)
|
||||
if bb.Body == nil {
|
||||
@@ -148,7 +148,7 @@ func TestFuzzCanProcessEpoch_10000(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
fuzzer.NilChance(0.1)
|
||||
for i := 0; i < 10000; i++ {
|
||||
for range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
time.CanProcessEpoch(state)
|
||||
}
|
||||
@@ -162,7 +162,7 @@ func TestFuzzProcessEpochPrecompute_1000(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
fuzzer.NilChance(0.1)
|
||||
for i := 0; i < 1000; i++ {
|
||||
for range 1000 {
|
||||
fuzzer.Fuzz(state)
|
||||
s, err := ProcessEpochPrecompute(ctx, state)
|
||||
if err != nil && s != nil {
|
||||
@@ -180,7 +180,7 @@ func TestFuzzProcessBlockForStateRoot_1000(t *testing.T) {
|
||||
sb := ðpb.SignedBeaconBlock{}
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
fuzzer.NilChance(0.1)
|
||||
for i := 0; i < 1000; i++ {
|
||||
for range 1000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(sb)
|
||||
if sb.Block == nil || sb.Block.Body == nil || sb.Block.Body.Eth1Data == nil {
|
||||
|
||||
@@ -754,8 +754,7 @@ func BenchmarkProcessSlots_Capella(b *testing.B) {
|
||||
|
||||
var err error
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
st, err = transition.ProcessSlots(b.Context(), st, st.Slot()+1)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed to process slot %v", err)
|
||||
@@ -768,8 +767,7 @@ func BenchmarkProcessSlots_Deneb(b *testing.B) {
|
||||
|
||||
var err error
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
st, err = transition.ProcessSlots(b.Context(), st, st.Slot()+1)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed to process slot %v", err)
|
||||
@@ -782,8 +780,7 @@ func BenchmarkProcessSlots_Electra(b *testing.B) {
|
||||
|
||||
var err error
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
st, err = transition.ProcessSlots(b.Context(), st, st.Slot()+1)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed to process slot %v", err)
|
||||
|
||||
@@ -307,7 +307,7 @@ func SlashValidator(
|
||||
// ActivatedValidatorIndices determines the indices activated during the given epoch.
|
||||
func ActivatedValidatorIndices(epoch primitives.Epoch, validators []*ethpb.Validator) []primitives.ValidatorIndex {
|
||||
activations := make([]primitives.ValidatorIndex, 0)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
val := validators[i]
|
||||
if val.ActivationEpoch <= epoch && epoch < val.ExitEpoch {
|
||||
activations = append(activations, primitives.ValidatorIndex(i))
|
||||
@@ -319,7 +319,7 @@ func ActivatedValidatorIndices(epoch primitives.Epoch, validators []*ethpb.Valid
|
||||
// SlashedValidatorIndices determines the indices slashed during the given epoch.
|
||||
func SlashedValidatorIndices(epoch primitives.Epoch, validators []*ethpb.Validator) []primitives.ValidatorIndex {
|
||||
slashed := make([]primitives.ValidatorIndex, 0)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
val := validators[i]
|
||||
maxWithdrawableEpoch := primitives.MaxEpoch(val.WithdrawableEpoch, epoch+params.BeaconConfig().EpochsPerSlashingsVector)
|
||||
if val.WithdrawableEpoch == maxWithdrawableEpoch && val.Slashed {
|
||||
|
||||
@@ -172,7 +172,7 @@ func TestSlashValidator_OK(t *testing.T) {
|
||||
validatorCount := 100
|
||||
registry := make([]*ethpb.Validator, 0, validatorCount)
|
||||
balances := make([]uint64, 0, validatorCount)
|
||||
for i := 0; i < validatorCount; i++ {
|
||||
for range validatorCount {
|
||||
registry = append(registry, ðpb.Validator{
|
||||
ActivationEpoch: 0,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
@@ -226,7 +226,7 @@ func TestSlashValidator_Electra(t *testing.T) {
|
||||
validatorCount := 100
|
||||
registry := make([]*ethpb.Validator, 0, validatorCount)
|
||||
balances := make([]uint64, 0, validatorCount)
|
||||
for i := 0; i < validatorCount; i++ {
|
||||
for range validatorCount {
|
||||
registry = append(registry, ðpb.Validator{
|
||||
ActivationEpoch: 0,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
|
||||
@@ -109,7 +109,7 @@ func (s *LazilyPersistentStoreBlob) checkOne(ctx context.Context, current primit
|
||||
// Verify we have all the expected sidecars, and fail fast if any are missing or inconsistent.
|
||||
// We don't try to salvage problematic batches because this indicates a misbehaving peer and we'd rather
|
||||
// ignore their response and decrease their peer score.
|
||||
sidecars, err := entry.filter(root, blockCommitments, b.Block().Slot())
|
||||
sidecars, err := entry.filter(root, blockCommitments)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "incomplete BlobSidecar batch")
|
||||
}
|
||||
|
||||
@@ -26,7 +26,7 @@ func Test_commitmentsToCheck(t *testing.T) {
|
||||
windowSlots = windowSlots + primitives.Slot(params.BeaconConfig().FuluForkEpoch)
|
||||
maxBlobs := params.LastNetworkScheduleEntry().MaxBlobsPerBlock
|
||||
commits := make([][]byte, maxBlobs+1)
|
||||
for i := 0; i < len(commits); i++ {
|
||||
for i := range commits {
|
||||
commits[i] = bytesutil.PadTo([]byte{byte(i)}, 48)
|
||||
}
|
||||
cases := []struct {
|
||||
|
||||
@@ -88,7 +88,7 @@ func (e *blobCacheEntry) stash(sc *blocks.ROBlob) error {
|
||||
// commitments were found in the cache and the sidecar slice return value can be used
|
||||
// to perform a DA check against the cached sidecars.
|
||||
// filter only returns blobs that need to be checked. Blobs already available on disk will be excluded.
|
||||
func (e *blobCacheEntry) filter(root [32]byte, kc [][]byte, slot primitives.Slot) ([]blocks.ROBlob, error) {
|
||||
func (e *blobCacheEntry) filter(root [32]byte, kc [][]byte) ([]blocks.ROBlob, error) {
|
||||
count := len(kc)
|
||||
if e.diskSummary.AllAvailable(count) {
|
||||
return nil, nil
|
||||
|
||||
@@ -44,7 +44,7 @@ func filterTestCaseSetup(slot primitives.Slot, nBlobs int, onDisk []int, numExpe
|
||||
entry.setDiskSummary(sum)
|
||||
}
|
||||
expected := make([]blocks.ROBlob, 0, nBlobs)
|
||||
for i := 0; i < len(commits); i++ {
|
||||
for i := range commits {
|
||||
if entry.diskSummary.HasIndex(uint64(i)) {
|
||||
continue
|
||||
}
|
||||
@@ -113,7 +113,7 @@ func TestFilterDiskSummary(t *testing.T) {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
entry, commits, expected := c.setup(t)
|
||||
// first (root) argument doesn't matter, it is just for logs
|
||||
got, err := entry.filter([32]byte{}, commits, 100)
|
||||
got, err := entry.filter([32]byte{}, commits)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(expected), len(got))
|
||||
})
|
||||
@@ -195,7 +195,7 @@ func TestFilter(t *testing.T) {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
entry, commits, expected := c.setup(t)
|
||||
// first (root) argument doesn't matter, it is just for logs
|
||||
got, err := entry.filter([32]byte{}, commits, 100)
|
||||
got, err := entry.filter([32]byte{}, commits)
|
||||
if c.err != nil {
|
||||
require.ErrorIs(t, err, c.err)
|
||||
return
|
||||
|
||||
@@ -112,12 +112,10 @@ func TestBlobStorage_SaveBlobData(t *testing.T) {
|
||||
blob := testSidecars[0]
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < 100; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for range 100 {
|
||||
wg.Go(func() {
|
||||
require.NoError(t, b.Save(blob))
|
||||
}()
|
||||
})
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
@@ -32,7 +32,7 @@ func (s BlobStorageSummary) AllAvailable(count int) bool {
|
||||
if count > len(s.mask) {
|
||||
return false
|
||||
}
|
||||
for i := 0; i < count; i++ {
|
||||
for i := range count {
|
||||
if !s.mask[i] {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -74,7 +74,7 @@ func (aq AncestryQuery) Span() primitives.Slot {
|
||||
// QueryFilter defines a generic interface for type-asserting
|
||||
// specific filters to use in querying DB objects.
|
||||
type QueryFilter struct {
|
||||
queries map[FilterType]interface{}
|
||||
queries map[FilterType]any
|
||||
ancestry AncestryQuery
|
||||
}
|
||||
|
||||
@@ -82,14 +82,14 @@ type QueryFilter struct {
|
||||
// certain Ethereum data types by attribute.
|
||||
func NewFilter() *QueryFilter {
|
||||
return &QueryFilter{
|
||||
queries: make(map[FilterType]interface{}),
|
||||
queries: make(map[FilterType]any),
|
||||
}
|
||||
}
|
||||
|
||||
// Filters returns and underlying map of FilterType to interface{}, giving us
|
||||
// a copy of the currently set filters which can then be iterated over and type
|
||||
// asserted for use anywhere.
|
||||
func (q *QueryFilter) Filters() map[FilterType]interface{} {
|
||||
func (q *QueryFilter) Filters() map[FilterType]any {
|
||||
return q.queries
|
||||
}
|
||||
|
||||
|
||||
@@ -128,9 +128,9 @@ type NoHeadAccessDatabase interface {
|
||||
BackfillFinalizedIndex(ctx context.Context, blocks []blocks.ROBlock, finalizedChildRoot [32]byte) error
|
||||
|
||||
// Custody operations.
|
||||
UpdateSubscribedToAllDataSubnets(ctx context.Context, subscribed bool) (bool, error)
|
||||
UpdateCustodyInfo(ctx context.Context, earliestAvailableSlot primitives.Slot, custodyGroupCount uint64) (primitives.Slot, uint64, error)
|
||||
UpdateEarliestAvailableSlot(ctx context.Context, earliestAvailableSlot primitives.Slot) error
|
||||
UpdateSubscribedToAllDataSubnets(ctx context.Context, subscribed bool) (bool, error)
|
||||
|
||||
// P2P Metadata operations.
|
||||
SaveMetadataSeqNum(ctx context.Context, seqNum uint64) error
|
||||
|
||||
@@ -27,6 +27,9 @@ go_library(
|
||||
"p2p.go",
|
||||
"schema.go",
|
||||
"state.go",
|
||||
"state_diff.go",
|
||||
"state_diff_cache.go",
|
||||
"state_diff_helpers.go",
|
||||
"state_summary.go",
|
||||
"state_summary_cache.go",
|
||||
"utils.go",
|
||||
@@ -41,10 +44,12 @@ go_library(
|
||||
"//beacon-chain/db/iface:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/hdiff:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/light-client:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
@@ -53,6 +58,7 @@ go_library(
|
||||
"//encoding/ssz/detect:go_default_library",
|
||||
"//genesis:go_default_library",
|
||||
"//io/file:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//monitoring/progress:go_default_library",
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
@@ -98,6 +104,7 @@ go_test(
|
||||
"migration_block_slot_index_test.go",
|
||||
"migration_state_validators_test.go",
|
||||
"p2p_test.go",
|
||||
"state_diff_test.go",
|
||||
"state_summary_test.go",
|
||||
"state_test.go",
|
||||
"utils_test.go",
|
||||
@@ -111,6 +118,7 @@ go_test(
|
||||
"//beacon-chain/db/iface:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
@@ -120,6 +128,7 @@ go_test(
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//genesis:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//proto/dbval:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
@@ -133,6 +142,7 @@ go_test(
|
||||
"@com_github_golang_snappy//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@io_bazel_rules_go//go/tools/bazel:go_default_library",
|
||||
"@io_etcd_go_bbolt//:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
|
||||
@@ -215,7 +215,7 @@ func (s *Store) Blocks(ctx context.Context, f *filters.QueryFilter) ([]interface
|
||||
return err
|
||||
}
|
||||
|
||||
for i := 0; i < len(keys); i++ {
|
||||
for i := range keys {
|
||||
encoded := bkt.Get(keys[i])
|
||||
blk, err := unmarshalBlock(ctx, encoded)
|
||||
if err != nil {
|
||||
@@ -307,7 +307,7 @@ func (s *Store) BlockRoots(ctx context.Context, f *filters.QueryFilter) ([][32]b
|
||||
return err
|
||||
}
|
||||
|
||||
for i := 0; i < len(keys); i++ {
|
||||
for i := range keys {
|
||||
blockRoots = append(blockRoots, bytesutil.ToBytes32(keys[i]))
|
||||
}
|
||||
return nil
|
||||
@@ -1063,7 +1063,7 @@ func blockRootsByFilter(ctx context.Context, tx *bolt.Tx, f *filters.QueryFilter
|
||||
func blockRootsBySlotRange(
|
||||
ctx context.Context,
|
||||
bkt *bolt.Bucket,
|
||||
startSlotEncoded, endSlotEncoded, startEpochEncoded, endEpochEncoded, slotStepEncoded interface{},
|
||||
startSlotEncoded, endSlotEncoded, startEpochEncoded, endEpochEncoded, slotStepEncoded any,
|
||||
) ([][]byte, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.blockRootsBySlotRange")
|
||||
defer span.End()
|
||||
|
||||
@@ -172,7 +172,7 @@ func TestStore_SaveBlock_NoDuplicates(t *testing.T) {
|
||||
|
||||
// Even with a full cache, saving new blocks should not cause
|
||||
// duplicated blocks in the DB.
|
||||
for i := 0; i < 100; i++ {
|
||||
for range 100 {
|
||||
require.NoError(t, db.SaveBlock(ctx, blk))
|
||||
}
|
||||
|
||||
@@ -255,7 +255,7 @@ func TestStore_BlocksHandleZeroCase(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
numBlocks := 10
|
||||
totalBlocks := make([]interfaces.ReadOnlySignedBeaconBlock, numBlocks)
|
||||
for i := 0; i < len(totalBlocks); i++ {
|
||||
for i := range totalBlocks {
|
||||
b, err := tt.newBlock(primitives.Slot(i), bytesutil.PadTo([]byte("parent"), 32))
|
||||
require.NoError(t, err)
|
||||
totalBlocks[i] = b
|
||||
@@ -279,7 +279,7 @@ func TestStore_BlocksHandleInvalidEndSlot(t *testing.T) {
|
||||
numBlocks := 10
|
||||
totalBlocks := make([]interfaces.ReadOnlySignedBeaconBlock, numBlocks)
|
||||
// Save blocks from slot 1 onwards.
|
||||
for i := 0; i < len(totalBlocks); i++ {
|
||||
for i := range totalBlocks {
|
||||
b, err := tt.newBlock(primitives.Slot(i+1), bytesutil.PadTo([]byte("parent"), 32))
|
||||
require.NoError(t, err)
|
||||
totalBlocks[i] = b
|
||||
@@ -927,7 +927,7 @@ func TestStore_Blocks_Retrieve_SlotRange(t *testing.T) {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
totalBlocks := make([]interfaces.ReadOnlySignedBeaconBlock, 500)
|
||||
for i := 0; i < 500; i++ {
|
||||
for i := range 500 {
|
||||
b, err := tt.newBlock(primitives.Slot(i), bytesutil.PadTo([]byte("parent"), 32))
|
||||
require.NoError(t, err)
|
||||
totalBlocks[i] = b
|
||||
@@ -947,7 +947,7 @@ func TestStore_Blocks_Retrieve_Epoch(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
slots := params.BeaconConfig().SlotsPerEpoch.Mul(7)
|
||||
totalBlocks := make([]interfaces.ReadOnlySignedBeaconBlock, slots)
|
||||
for i := primitives.Slot(0); i < slots; i++ {
|
||||
for i := range slots {
|
||||
b, err := tt.newBlock(i, bytesutil.PadTo([]byte("parent"), 32))
|
||||
require.NoError(t, err)
|
||||
totalBlocks[i] = b
|
||||
@@ -971,7 +971,7 @@ func TestStore_Blocks_Retrieve_SlotRangeWithStep(t *testing.T) {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
totalBlocks := make([]interfaces.ReadOnlySignedBeaconBlock, 500)
|
||||
for i := 0; i < 500; i++ {
|
||||
for i := range 500 {
|
||||
b, err := tt.newBlock(primitives.Slot(i), bytesutil.PadTo([]byte("parent"), 32))
|
||||
require.NoError(t, err)
|
||||
totalBlocks[i] = b
|
||||
@@ -1140,7 +1140,7 @@ func TestStore_SaveBlocks_HasCachedBlocks(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
b := make([]interfaces.ReadOnlySignedBeaconBlock, 500)
|
||||
for i := 0; i < 500; i++ {
|
||||
for i := range 500 {
|
||||
blk, err := tt.newBlock(primitives.Slot(i), bytesutil.PadTo([]byte("parent"), 32))
|
||||
require.NoError(t, err)
|
||||
b[i] = blk
|
||||
@@ -1164,7 +1164,7 @@ func TestStore_SaveBlocks_HasRootsMatched(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
b := make([]interfaces.ReadOnlySignedBeaconBlock, 500)
|
||||
for i := 0; i < 500; i++ {
|
||||
for i := range 500 {
|
||||
blk, err := tt.newBlock(primitives.Slot(i), bytesutil.PadTo([]byte("parent"), 32))
|
||||
require.NoError(t, err)
|
||||
b[i] = blk
|
||||
|
||||
@@ -146,9 +146,9 @@ func (s *Store) UpdateEarliestAvailableSlot(ctx context.Context, earliestAvailab
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateSubscribedToAllDataSubnets updates the "subscribed to all data subnets" status in the database
|
||||
// only if `subscribed` is `true`.
|
||||
// It returns the previous subscription status.
|
||||
// UpdateSubscribedToAllDataSubnets updates whether the node is subscribed to all data subnets (supernode mode).
|
||||
// This is a one-way flag - once set to true, it cannot be reverted to false.
|
||||
// Returns the previous state.
|
||||
func (s *Store) UpdateSubscribedToAllDataSubnets(ctx context.Context, subscribed bool) (bool, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.UpdateSubscribedToAllDataSubnets")
|
||||
defer span.End()
|
||||
@@ -156,13 +156,11 @@ func (s *Store) UpdateSubscribedToAllDataSubnets(ctx context.Context, subscribed
|
||||
result := false
|
||||
if !subscribed {
|
||||
if err := s.db.View(func(tx *bolt.Tx) error {
|
||||
// Retrieve the custody bucket.
|
||||
bucket := tx.Bucket(custodyBucket)
|
||||
if bucket == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Retrieve the subscribe all data subnets flag.
|
||||
bytes := bucket.Get(subscribeAllDataSubnetsKey)
|
||||
if len(bytes) == 0 {
|
||||
return nil
|
||||
@@ -181,7 +179,6 @@ func (s *Store) UpdateSubscribedToAllDataSubnets(ctx context.Context, subscribed
|
||||
}
|
||||
|
||||
if err := s.db.Update(func(tx *bolt.Tx) error {
|
||||
// Retrieve the custody bucket.
|
||||
bucket, err := tx.CreateBucketIfNotExists(custodyBucket)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "create custody bucket")
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user