Compare commits

..

9 Commits

Author SHA1 Message Date
satushh
2018b647ea bazel run //:gazelle -- fix 2025-10-24 16:27:57 +01:00
satushh
0352e39f64 remove unwanted comment 2025-10-24 16:23:36 +01:00
satushh
82847da8a7 Merge branch 'develop' into plus-one-bug 2025-10-24 16:19:45 +01:00
satushh
accfdd0f7c lint 2025-10-24 16:19:06 +01:00
satushh
2b51e9e350 updated changelog 2025-10-24 16:12:56 +01:00
satushh
e05fec0f5f corrected test and extra test for epoch aligned pruning 2025-10-24 16:12:14 +01:00
satushh
1a904dbae3 epoch-aligned pruning to keep complete epochs rather than fractional epochs 2025-10-24 13:01:11 +01:00
satushh
37417e5905 changelog 2025-10-23 11:18:27 +01:00
satushh
7a5f4cf122 correct defaultRetentionEpochs 2025-10-22 19:58:29 +01:00
37 changed files with 320 additions and 210 deletions

View File

@@ -472,8 +472,8 @@ func (s *Service) removeStartupState() {
func (s *Service) updateCustodyInfoInDB(slot primitives.Slot) (primitives.Slot, uint64, error) {
isSubscribedToAllDataSubnets := flags.Get().SubscribeAllDataSubnets
cfg := params.BeaconConfig()
custodyRequirement := cfg.CustodyRequirement
beaconConfig := params.BeaconConfig()
custodyRequirement := beaconConfig.CustodyRequirement
// Check if the node was previously subscribed to all data subnets, and if so,
// store the new status accordingly.
@@ -493,7 +493,7 @@ func (s *Service) updateCustodyInfoInDB(slot primitives.Slot) (primitives.Slot,
// Compute the custody group count.
custodyGroupCount := custodyRequirement
if isSubscribedToAllDataSubnets {
custodyGroupCount = cfg.NumberOfCustodyGroups
custodyGroupCount = beaconConfig.NumberOfCustodyGroups
}
// Safely compute the fulu fork slot.
@@ -536,11 +536,11 @@ func spawnCountdownIfPreGenesis(ctx context.Context, genesisTime time.Time, db d
}
func fuluForkSlot() (primitives.Slot, error) {
cfg := params.BeaconConfig()
beaconConfig := params.BeaconConfig()
fuluForkEpoch := cfg.FuluForkEpoch
if fuluForkEpoch == cfg.FarFutureEpoch {
return cfg.FarFutureSlot, nil
fuluForkEpoch := beaconConfig.FuluForkEpoch
if fuluForkEpoch == beaconConfig.FarFutureEpoch {
return beaconConfig.FarFutureSlot, nil
}
forkFuluSlot, err := slots.EpochStart(fuluForkEpoch)

View File

@@ -401,7 +401,7 @@ func ComputeProposerIndex(bState state.ReadOnlyBeaconState, activeIndices []prim
return 0, errors.New("empty active indices list")
}
hashFunc := hash.CustomSHA256Hasher()
cfg := params.BeaconConfig()
beaconConfig := params.BeaconConfig()
seedBuffer := make([]byte, len(seed)+8)
copy(seedBuffer, seed[:])
@@ -426,14 +426,14 @@ func ComputeProposerIndex(bState state.ReadOnlyBeaconState, activeIndices []prim
offset := (i % 16) * 2
randomValue := uint64(randomBytes[offset]) | uint64(randomBytes[offset+1])<<8
if effectiveBal*fieldparams.MaxRandomValueElectra >= cfg.MaxEffectiveBalanceElectra*randomValue {
if effectiveBal*fieldparams.MaxRandomValueElectra >= beaconConfig.MaxEffectiveBalanceElectra*randomValue {
return candidateIndex, nil
}
} else {
binary.LittleEndian.PutUint64(seedBuffer[len(seed):], i/32)
randomByte := hashFunc(seedBuffer)[i%32]
if effectiveBal*fieldparams.MaxRandomByte >= cfg.MaxEffectiveBalance*uint64(randomByte) {
if effectiveBal*fieldparams.MaxRandomByte >= beaconConfig.MaxEffectiveBalance*uint64(randomByte) {
return candidateIndex, nil
}
}

View File

@@ -89,14 +89,14 @@ func CustodyGroups(nodeId enode.ID, custodyGroupCount uint64) ([]uint64, error)
// ComputeColumnsForCustodyGroup computes the columns for a given custody group.
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/das-core.md#compute_columns_for_custody_group
func ComputeColumnsForCustodyGroup(custodyGroup uint64) ([]uint64, error) {
cfg := params.BeaconConfig()
numberOfCustodyGroups := cfg.NumberOfCustodyGroups
beaconConfig := params.BeaconConfig()
numberOfCustodyGroups := beaconConfig.NumberOfCustodyGroups
if custodyGroup >= numberOfCustodyGroups {
return nil, ErrCustodyGroupTooLarge
}
numberOfColumns := cfg.NumberOfColumns
numberOfColumns := beaconConfig.NumberOfColumns
columnsPerGroup := numberOfColumns / numberOfCustodyGroups
@@ -112,9 +112,9 @@ func ComputeColumnsForCustodyGroup(custodyGroup uint64) ([]uint64, error) {
// ComputeCustodyGroupForColumn computes the custody group for a given column.
// It is the reciprocal function of ComputeColumnsForCustodyGroup.
func ComputeCustodyGroupForColumn(columnIndex uint64) (uint64, error) {
cfg := params.BeaconConfig()
numberOfColumns := cfg.NumberOfColumns
numberOfCustodyGroups := cfg.NumberOfCustodyGroups
beaconConfig := params.BeaconConfig()
numberOfColumns := beaconConfig.NumberOfColumns
numberOfCustodyGroups := beaconConfig.NumberOfCustodyGroups
if columnIndex >= numberOfColumns {
return 0, ErrIndexTooLarge

View File

@@ -84,10 +84,10 @@ func ValidatorsCustodyRequirement(state beaconState.ReadOnlyBeaconState, validat
totalNodeBalance += validator.EffectiveBalance()
}
cfg := params.BeaconConfig()
numberOfCustodyGroups := cfg.NumberOfCustodyGroups
validatorCustodyRequirement := cfg.ValidatorCustodyRequirement
balancePerAdditionalCustodyGroup := cfg.BalancePerAdditionalCustodyGroup
beaconConfig := params.BeaconConfig()
numberOfCustodyGroups := beaconConfig.NumberOfCustodyGroups
validatorCustodyRequirement := beaconConfig.ValidatorCustodyRequirement
balancePerAdditionalCustodyGroup := beaconConfig.BalancePerAdditionalCustodyGroup
count := totalNodeBalance / balancePerAdditionalCustodyGroup
return min(max(count, validatorCustodyRequirement), numberOfCustodyGroups), nil

View File

@@ -196,7 +196,7 @@ func TestAltairCompatible(t *testing.T) {
}
func TestCanUpgradeTo(t *testing.T) {
cfg := params.BeaconConfig()
beaconConfig := params.BeaconConfig()
outerTestCases := []struct {
name string
@@ -205,32 +205,32 @@ func TestCanUpgradeTo(t *testing.T) {
}{
{
name: "Altair",
forkEpoch: &cfg.AltairForkEpoch,
forkEpoch: &beaconConfig.AltairForkEpoch,
upgradeFunc: time.CanUpgradeToAltair,
},
{
name: "Bellatrix",
forkEpoch: &cfg.BellatrixForkEpoch,
forkEpoch: &beaconConfig.BellatrixForkEpoch,
upgradeFunc: time.CanUpgradeToBellatrix,
},
{
name: "Capella",
forkEpoch: &cfg.CapellaForkEpoch,
forkEpoch: &beaconConfig.CapellaForkEpoch,
upgradeFunc: time.CanUpgradeToCapella,
},
{
name: "Deneb",
forkEpoch: &cfg.DenebForkEpoch,
forkEpoch: &beaconConfig.DenebForkEpoch,
upgradeFunc: time.CanUpgradeToDeneb,
},
{
name: "Electra",
forkEpoch: &cfg.ElectraForkEpoch,
forkEpoch: &beaconConfig.ElectraForkEpoch,
upgradeFunc: time.CanUpgradeToElectra,
},
{
name: "Fulu",
forkEpoch: &cfg.FuluForkEpoch,
forkEpoch: &beaconConfig.FuluForkEpoch,
upgradeFunc: time.CanUpgradeToFulu,
},
}
@@ -238,7 +238,7 @@ func TestCanUpgradeTo(t *testing.T) {
for _, otc := range outerTestCases {
params.SetupTestConfigCleanup(t)
*otc.forkEpoch = 5
params.OverrideBeaconConfig(cfg)
params.OverrideBeaconConfig(beaconConfig)
innerTestCases := []struct {
name string

View File

@@ -31,6 +31,7 @@ go_test(
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"//time/slots:go_default_library",
"//time/slots/testing:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_sirupsen_logrus//hooks/test:go_default_library",

View File

@@ -36,7 +36,7 @@ type ServiceOption func(*Service)
// The retention period is specified in epochs, and must be >= MIN_EPOCHS_FOR_BLOCK_REQUESTS.
func WithRetentionPeriod(retentionEpochs primitives.Epoch) ServiceOption {
return func(s *Service) {
defaultRetentionEpochs := primitives.Epoch(params.BeaconConfig().MinEpochsForBlockRequests) + 1
defaultRetentionEpochs := primitives.Epoch(params.BeaconConfig().MinEpochsForBlockRequests)
if retentionEpochs < defaultRetentionEpochs {
log.WithField("userEpochs", retentionEpochs).
WithField("minRequired", defaultRetentionEpochs).
@@ -75,7 +75,7 @@ func New(ctx context.Context, db iface.Database, genesisTime time.Time, initSync
p := &Service{
ctx: ctx,
db: db,
ps: pruneStartSlotFunc(primitives.Epoch(params.BeaconConfig().MinEpochsForBlockRequests) + 1), // Default retention epochs is MIN_EPOCHS_FOR_BLOCK_REQUESTS + 1 from the current slot.
ps: pruneStartSlotFunc(primitives.Epoch(params.BeaconConfig().MinEpochsForBlockRequests)),
done: make(chan struct{}),
slotTicker: slots.NewSlotTicker(slots.UnsafeStartTime(genesisTime, 0), params.BeaconConfig().SecondsPerSlot),
initSyncWaiter: initSyncWaiter,
@@ -239,15 +239,38 @@ func (p *Service) pruneBatches(pruneUpto primitives.Slot) (int, error) {
}
// pruneStartSlotFunc returns the function to determine the start slot to start pruning.
// The pruning calculation is epoch-aligned,
// ensuring that earliestAvailableSlot is always at an epoch boundary.
// So that we prune epoch-wise.
// e.g. if retentionEpochs is 3 i.e. we should keep at least 3 epochs from current slot,
//
// current slot is 325 (=> current epoch is 10),
// then we should keep epoch 7 onwards (inclusive of epoch 7).
// So we can prune up to the last slot of 6th epoch i.e. 32 x 7 - 1 = 223
// Earliest available slot would be 224 in that case.
func pruneStartSlotFunc(retentionEpochs primitives.Epoch) func(primitives.Slot) primitives.Slot {
return func(current primitives.Slot) primitives.Slot {
if retentionEpochs > slots.MaxSafeEpoch() {
retentionEpochs = slots.MaxSafeEpoch()
}
offset := slots.UnsafeEpochStart(retentionEpochs)
if offset >= current {
// Calculate epoch-aligned minimum required slot
currentEpoch := slots.ToEpoch(current)
var minRequiredEpoch primitives.Epoch
if currentEpoch > retentionEpochs {
minRequiredEpoch = currentEpoch - retentionEpochs
} else {
minRequiredEpoch = 0
}
// Get the start slot of the minimum required epoch
minRequiredSlot, err := slots.EpochStart(minRequiredEpoch)
if err != nil || minRequiredSlot == 0 {
return 0
}
return current - offset
// Prune up to (but not including) the minimum required slot
// This ensures earliestAvailableSlot (pruneUpto + 1) is at the epoch boundary
return minRequiredSlot - 1
}
}

View File

@@ -11,6 +11,7 @@ import (
eth "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/testing/util"
"github.com/OffchainLabs/prysm/v6/time/slots"
slottest "github.com/OffchainLabs/prysm/v6/time/slots/testing"
"github.com/sirupsen/logrus"
@@ -247,12 +248,23 @@ func TestWithRetentionPeriod_EnforcesMinimum(t *testing.T) {
ctx := t.Context()
beaconDB := dbtest.SetupDB(t)
// Get the minimum required epochs (272 + 1 = 273 for minimal)
minRequiredEpochs := primitives.Epoch(params.BeaconConfig().MinEpochsForBlockRequests + 1)
// Get the minimum required epochs (272 for minimal)
minRequiredEpochs := primitives.Epoch(params.BeaconConfig().MinEpochsForBlockRequests)
// Use a slot that's guaranteed to be after the minimum retention period
currentSlot := primitives.Slot(minRequiredEpochs+100) * (params.BeaconConfig().SlotsPerEpoch)
// Calculate epoch-aligned expected prune slot
// For epoch-aligned pruning: pruneUpto = epochStart(currentEpoch - retention) - 1
currentEpoch := slots.ToEpoch(currentSlot)
// Helper function to calculate expected prune slot for a given retention
calcExpectedPruneSlot := func(retention primitives.Epoch) primitives.Slot {
minEpoch := currentEpoch - retention
minSlot, _ := slots.EpochStart(minEpoch)
return minSlot - 1
}
tests := []struct {
name string
userRetentionEpochs primitives.Epoch
@@ -262,19 +274,19 @@ func TestWithRetentionPeriod_EnforcesMinimum(t *testing.T) {
{
name: "User value below minimum - should use minimum",
userRetentionEpochs: 2, // Way below minimum
expectedPruneSlot: currentSlot - primitives.Slot(minRequiredEpochs)*params.BeaconConfig().SlotsPerEpoch,
expectedPruneSlot: calcExpectedPruneSlot(minRequiredEpochs),
description: "Should use minimum when user value is too low",
},
{
name: "User value at minimum",
userRetentionEpochs: minRequiredEpochs,
expectedPruneSlot: currentSlot - primitives.Slot(minRequiredEpochs)*params.BeaconConfig().SlotsPerEpoch,
expectedPruneSlot: calcExpectedPruneSlot(minRequiredEpochs),
description: "Should use user value when at minimum",
},
{
name: "User value above minimum",
userRetentionEpochs: minRequiredEpochs + 10,
expectedPruneSlot: currentSlot - primitives.Slot(minRequiredEpochs+10)*params.BeaconConfig().SlotsPerEpoch,
expectedPruneSlot: calcExpectedPruneSlot(minRequiredEpochs + 10),
description: "Should use user value when above minimum",
},
}
@@ -311,6 +323,133 @@ func TestWithRetentionPeriod_EnforcesMinimum(t *testing.T) {
}
}
func TestWithRetentionPeriod_AcceptsSpecMinimum(t *testing.T) {
params.SetupTestConfigCleanup(t)
config := params.MinimalSpecConfig()
params.OverrideBeaconConfig(config)
ctx := t.Context()
beaconDB := dbtest.SetupDB(t)
hook := logTest.NewGlobal()
logrus.SetLevel(logrus.WarnLevel)
// The spec minimum - this SHOULD be accepted without warning
specMinimum := primitives.Epoch(params.BeaconConfig().MinEpochsForBlockRequests)
// Use a slot that's guaranteed to be after the minimum retention period
currentSlot := primitives.Slot(specMinimum+100) * (params.BeaconConfig().SlotsPerEpoch)
mockCustody := &mockCustodyUpdater{}
p, err := New(
ctx,
beaconDB,
time.Now(),
nil,
nil,
mockCustody,
WithRetentionPeriod(specMinimum),
)
require.NoError(t, err)
// Test the pruning calculation
pruneUptoSlot := p.ps(currentSlot)
// The expected prune slot should use epoch-aligned calculation
// pruneUpto = epochStart(currentEpoch - retention) - 1
currentEpoch := slots.ToEpoch(currentSlot)
minRequiredEpoch := currentEpoch - specMinimum
minRequiredSlot, err := slots.EpochStart(minRequiredEpoch)
require.NoError(t, err)
expectedPruneSlot := minRequiredSlot - 1
assert.Equal(t, expectedPruneSlot, pruneUptoSlot,
"Pruner should accept and use MIN_EPOCHS_FOR_BLOCK_REQUESTS without adding 1")
for _, entry := range hook.AllEntries() {
if entry.Level == logrus.WarnLevel {
t.Errorf("Unexpected warning when using spec minimum: %s", entry.Message)
}
}
}
func TestPruneStartSlotFunc_EpochAlignment(t *testing.T) {
// This test verifies that the pruning calculation is epoch-aligned.
params.SetupTestConfigCleanup(t)
config := params.MinimalSpecConfig()
params.OverrideBeaconConfig(config)
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch // 8 for minimal config
retentionEpochs := primitives.Epoch(3)
tests := []struct {
name string
currentSlot primitives.Slot
expectedEpochAlignment bool
expectedMinRequiredSlot primitives.Slot
description string
}{
{
name: "Pruning at epoch boundary",
currentSlot: primitives.Slot(4 * slotsPerEpoch), // Slot 32 (epoch 4, slot 0 of epoch)
expectedEpochAlignment: true,
expectedMinRequiredSlot: primitives.Slot(1 * slotsPerEpoch), // Epoch 1 start = slot 8
description: "When pruning at epoch boundary, earliestAvailableSlot should be at epoch boundary",
},
{
name: "Pruning at middle of epoch",
currentSlot: primitives.Slot(4*slotsPerEpoch + 4), // Slot 36 (epoch 4, slot 4 of epoch)
expectedEpochAlignment: true,
expectedMinRequiredSlot: primitives.Slot(1 * slotsPerEpoch), // Epoch 1 start = slot 8
description: "When pruning mid-epoch, earliestAvailableSlot must still be at epoch boundary",
},
{
name: "Pruning at end of epoch",
currentSlot: primitives.Slot(5*slotsPerEpoch - 1), // Slot 39 (epoch 4, last slot)
expectedEpochAlignment: true,
expectedMinRequiredSlot: primitives.Slot(1 * slotsPerEpoch), // Epoch 1 start = slot 8
description: "When pruning at epoch end, earliestAvailableSlot must be at epoch boundary",
},
{
name: "Pruning at various epoch positions",
currentSlot: primitives.Slot(8*slotsPerEpoch + 5), // Slot 69 (epoch 8, slot 5 of epoch)
expectedEpochAlignment: true,
expectedMinRequiredSlot: primitives.Slot(5 * slotsPerEpoch), // Epoch 5 start = slot 40
description: "EarliestAvailableSlot should always align to epoch boundary",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Create the prune start slot function
ps := pruneStartSlotFunc(retentionEpochs)
// Calculate pruneUpto slot
pruneUpto := ps(tt.currentSlot)
// EarliestAvailableSlot is pruneUpto + 1
earliestAvailableSlot := pruneUpto + 1
// Verify epoch alignment: earliestAvailableSlot should be at an epoch boundary
if tt.expectedEpochAlignment {
// Check if earliestAvailableSlot is at the start of an epoch
epoch := slots.ToEpoch(earliestAvailableSlot)
epochStartSlot, err := slots.EpochStart(epoch)
require.NoError(t, err)
assert.Equal(t, epochStartSlot, earliestAvailableSlot,
"%s: earliestAvailableSlot (%d) should be at epoch boundary (slot %d of epoch %d)",
tt.description, earliestAvailableSlot, epochStartSlot, epoch)
}
// Verify it matches the expected minimum required slot for custody validation
assert.Equal(t, tt.expectedMinRequiredSlot, earliestAvailableSlot,
"%s: earliestAvailableSlot should match custody minimum required slot",
tt.description)
})
}
}
func TestPruner_UpdateEarliestSlotError(t *testing.T) {
params.SetupTestConfigCleanup(t)
config := params.BeaconConfig()

View File

@@ -10,13 +10,11 @@ import (
// pruneExpired prunes attestations pool on every slot interval.
func (s *Service) pruneExpired() {
secondsPerSlot := params.BeaconConfig().SecondsPerSlot
offset := time.Duration(secondsPerSlot-1) * time.Second
slotTicker := slots.NewSlotTickerWithOffset(s.genesisTime, offset, secondsPerSlot)
defer slotTicker.Done()
ticker := time.NewTicker(s.cfg.pruneInterval)
defer ticker.Stop()
for {
select {
case <-slotTicker.C():
case <-ticker.C:
s.pruneExpiredAtts()
s.updateMetrics()
case <-s.ctx.Done():

View File

@@ -17,9 +17,7 @@ import (
)
func TestPruneExpired_Ticker(t *testing.T) {
// Need timeout longer than the offset (secondsPerSlot - 1) + some buffer
timeout := time.Duration(params.BeaconConfig().SecondsPerSlot+5) * time.Second
ctx, cancel := context.WithTimeout(t.Context(), timeout)
ctx, cancel := context.WithTimeout(t.Context(), 3*time.Second)
defer cancel()
s, err := NewService(ctx, &Config{

View File

@@ -7,7 +7,6 @@ import (
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
"github.com/sirupsen/logrus"
)
// This is the default queue size used if we have specified an invalid one.
@@ -64,17 +63,12 @@ func (cfg *Config) connManagerLowHigh() (int, int) {
return low, high
}
// validateConfig validates whether the provided config has valid values and sets
// the invalid ones to default.
func validateConfig(cfg *Config) {
if cfg.QueueSize > 0 {
return
// validateConfig validates whether the values provided are accurate and will set
// the appropriate values for those that are invalid.
func validateConfig(cfg *Config) *Config {
if cfg.QueueSize == 0 {
log.Warnf("Invalid pubsub queue size of %d initialized, setting the quese size as %d instead", cfg.QueueSize, defaultPubsubQueueSize)
cfg.QueueSize = defaultPubsubQueueSize
}
log.WithFields(logrus.Fields{
"queueSize": cfg.QueueSize,
"default": defaultPubsubQueueSize,
}).Warning("Invalid pubsub queue size, setting the queue size to the default value")
cfg.QueueSize = defaultPubsubQueueSize
return cfg
}

View File

@@ -259,11 +259,11 @@ func (s *Service) custodyGroupCountFromPeerENR(pid peer.ID) uint64 {
}
func fuluForkSlot() (primitives.Slot, error) {
cfg := params.BeaconConfig()
beaconConfig := params.BeaconConfig()
fuluForkEpoch := cfg.FuluForkEpoch
if fuluForkEpoch == cfg.FarFutureEpoch {
return cfg.FarFutureSlot, nil
fuluForkEpoch := beaconConfig.FuluForkEpoch
if fuluForkEpoch == beaconConfig.FarFutureEpoch {
return beaconConfig.FarFutureSlot, nil
}
forkFuluSlot, err := slots.EpochStart(fuluForkEpoch)

View File

@@ -3,7 +3,6 @@ package p2p
import (
"strings"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/peerstore"
"github.com/prometheus/client_golang/prometheus"
@@ -27,25 +26,12 @@ var (
Help: "The number of peers in a given state.",
},
[]string{"state"})
p2pMaxPeers = promauto.NewGauge(prometheus.GaugeOpts{
Name: "p2p_max_peers",
Help: "The target maximum number of peers.",
})
p2pPeerCountDirectionType = promauto.NewGaugeVec(prometheus.GaugeOpts{
Name: "p2p_peer_count_direction_type",
Help: "The number of peers in a given direction and type.",
},
[]string{"direction", "type"})
connectedPeersCount = promauto.NewGaugeVec(prometheus.GaugeOpts{
Name: "connected_libp2p_peers",
Help: "Tracks the total number of connected libp2p peers by agent string",
},
[]string{"agent"},
)
minimumPeersPerSubnet = promauto.NewGauge(prometheus.GaugeOpts{
Name: "p2p_minimum_peers_per_subnet",
Help: "The minimum number of peers to connect to per subnet",
})
avgScoreConnectedClients = promauto.NewGaugeVec(prometheus.GaugeOpts{
Name: "connected_libp2p_peers_average_scores",
Help: "Tracks the overall p2p scores of connected libp2p peers by agent string",
@@ -188,26 +174,18 @@ var (
)
func (s *Service) updateMetrics() {
store := s.Host().Peerstore()
connectedPeers := s.peers.Connected()
p2pPeerCount.WithLabelValues("Connected").Set(float64(len(connectedPeers)))
p2pPeerCount.WithLabelValues("Disconnected").Set(float64(len(s.peers.Disconnected())))
p2pPeerCount.WithLabelValues("Connecting").Set(float64(len(s.peers.Connecting())))
p2pPeerCount.WithLabelValues("Disconnecting").Set(float64(len(s.peers.Disconnecting())))
p2pPeerCount.WithLabelValues("Bad").Set(float64(len(s.peers.Bad())))
upperTCP := strings.ToUpper(string(peers.TCP))
upperQUIC := strings.ToUpper(string(peers.QUIC))
p2pPeerCountDirectionType.WithLabelValues("inbound", upperTCP).Set(float64(len(s.peers.InboundConnectedWithProtocol(peers.TCP))))
p2pPeerCountDirectionType.WithLabelValues("inbound", upperQUIC).Set(float64(len(s.peers.InboundConnectedWithProtocol(peers.QUIC))))
p2pPeerCountDirectionType.WithLabelValues("outbound", upperTCP).Set(float64(len(s.peers.OutboundConnectedWithProtocol(peers.TCP))))
p2pPeerCountDirectionType.WithLabelValues("outbound", upperQUIC).Set(float64(len(s.peers.OutboundConnectedWithProtocol(peers.QUIC))))
connectedPeersCountByClient := make(map[string]float64)
store := s.Host().Peerstore()
numConnectedPeersByClient := make(map[string]float64)
peerScoresByClient := make(map[string][]float64)
for _, p := range connectedPeers {
for i := 0; i < len(connectedPeers); i++ {
p := connectedPeers[i]
pid, err := peer.Decode(p.String())
if err != nil {
log.WithError(err).Debug("Could not decode peer string")
@@ -215,18 +193,16 @@ func (s *Service) updateMetrics() {
}
foundName := agentFromPid(pid, store)
connectedPeersCountByClient[foundName] += 1
numConnectedPeersByClient[foundName] += 1
// Get peer scoring data.
overallScore := s.peers.Scorers().Score(pid)
peerScoresByClient[foundName] = append(peerScoresByClient[foundName], overallScore)
}
connectedPeersCount.Reset() // Clear out previous results.
for agent, total := range connectedPeersCountByClient {
for agent, total := range numConnectedPeersByClient {
connectedPeersCount.WithLabelValues(agent).Set(total)
}
avgScoreConnectedClients.Reset() // Clear out previous results.
for agent, scoringData := range peerScoresByClient {
avgScore := average(scoringData)

View File

@@ -81,31 +81,29 @@ const (
type InternetProtocol string
const (
TCP = InternetProtocol("tcp")
QUIC = InternetProtocol("quic")
TCP = "tcp"
QUIC = "quic"
)
type (
// Status is the structure holding the peer status information.
Status struct {
ctx context.Context
scorers *scorers.Service
store *peerdata.Store
ipTracker map[string]uint64
rand *rand.Rand
ipColocationWhitelist []*net.IPNet
}
// Status is the structure holding the peer status information.
type Status struct {
ctx context.Context
scorers *scorers.Service
store *peerdata.Store
ipTracker map[string]uint64
rand *rand.Rand
ipColocationWhitelist []*net.IPNet
}
// StatusConfig represents peer status service params.
StatusConfig struct {
// PeerLimit specifies maximum amount of concurrent peers that are expected to be connect to the node.
PeerLimit int
// ScorerParams holds peer scorer configuration params.
ScorerParams *scorers.Config
// IPColocationWhitelist contains CIDR ranges that are exempt from IP colocation limits.
IPColocationWhitelist []*net.IPNet
}
)
// StatusConfig represents peer status service params.
type StatusConfig struct {
// PeerLimit specifies maximum amount of concurrent peers that are expected to be connect to the node.
PeerLimit int
// ScorerParams holds peer scorer configuration params.
ScorerParams *scorers.Config
// IPColocationWhitelist contains CIDR ranges that are exempt from IP colocation limits.
IPColocationWhitelist []*net.IPNet
}
// NewStatus creates a new status entity.
func NewStatus(ctx context.Context, config *StatusConfig) *Status {

View File

@@ -345,17 +345,17 @@ func TopicFromMessage(msg string, epoch primitives.Epoch) (string, error) {
return "", errors.Errorf("%s: %s", invalidRPCMessageType, msg)
}
cfg := params.BeaconConfig()
beaconConfig := params.BeaconConfig()
// Check if the message is to be updated in fulu.
if epoch >= cfg.FuluForkEpoch {
if epoch >= beaconConfig.FuluForkEpoch {
if version, ok := fuluMapping[msg]; ok {
return protocolPrefix + msg + version, nil
}
}
// Check if the message is to be updated in altair.
if epoch >= cfg.AltairForkEpoch {
if epoch >= beaconConfig.AltairForkEpoch {
if version, ok := altairMapping[msg]; ok {
return protocolPrefix + msg + version, nil
}

View File

@@ -14,7 +14,6 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
"github.com/OffchainLabs/prysm/v6/config/features"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
@@ -107,16 +106,12 @@ func NewService(ctx context.Context, cfg *Config) (*Service, error) {
ctx, cancel := context.WithCancel(ctx)
_ = cancel // govet fix for lost cancel. Cancel is handled in service.Stop().
validateConfig(cfg)
cfg = validateConfig(cfg)
privKey, err := privKey(cfg)
if err != nil {
return nil, errors.Wrapf(err, "failed to generate p2p private key")
}
p2pMaxPeers.Set(float64(cfg.MaxPeers))
minimumPeersPerSubnet.Set(float64(flags.Get().MinimumPeersPerSubnet))
metaData, err := metaDataFromDB(ctx, cfg.DB)
if err != nil {
log.WithError(err).Error("Failed to create peer metadata")

View File

@@ -514,18 +514,18 @@ func initializePersistentSubnets(id enode.ID, epoch primitives.Epoch) error {
//
// return [compute_subscribed_subnet(node_id, epoch, index) for index in range(SUBNETS_PER_NODE)]
func computeSubscribedSubnets(nodeID enode.ID, epoch primitives.Epoch) ([]uint64, error) {
cfg := params.BeaconConfig()
beaconConfig := params.BeaconConfig()
if flags.Get().SubscribeToAllSubnets {
subnets := make([]uint64, 0, cfg.AttestationSubnetCount)
for i := range cfg.AttestationSubnetCount {
subnets := make([]uint64, 0, beaconConfig.AttestationSubnetCount)
for i := range beaconConfig.AttestationSubnetCount {
subnets = append(subnets, i)
}
return subnets, nil
}
subnets := make([]uint64, 0, cfg.SubnetsPerNode)
for i := range cfg.SubnetsPerNode {
subnets := make([]uint64, 0, beaconConfig.SubnetsPerNode)
for i := range beaconConfig.SubnetsPerNode {
sub, err := computeSubscribedSubnet(nodeID, epoch, i)
if err != nil {
return nil, errors.Wrap(err, "compute subscribed subnet")

View File

@@ -524,12 +524,12 @@ func TestSubnetComputation(t *testing.T) {
require.NoError(t, err)
localNode := enode.NewLocalNode(db, convertedKey)
cfg := params.BeaconConfig()
beaconConfig := params.BeaconConfig()
t.Run("standard", func(t *testing.T) {
retrievedSubnets, err := computeSubscribedSubnets(localNode.ID(), 1000)
require.NoError(t, err)
require.Equal(t, cfg.SubnetsPerNode, uint64(len(retrievedSubnets)))
require.Equal(t, beaconConfig.SubnetsPerNode, uint64(len(retrievedSubnets)))
require.Equal(t, retrievedSubnets[0]+1, retrievedSubnets[1])
})
@@ -541,8 +541,8 @@ func TestSubnetComputation(t *testing.T) {
retrievedSubnets, err := computeSubscribedSubnets(localNode.ID(), 1000)
require.NoError(t, err)
require.Equal(t, cfg.AttestationSubnetCount, uint64(len(retrievedSubnets)))
for i := range cfg.AttestationSubnetCount {
require.Equal(t, beaconConfig.AttestationSubnetCount, uint64(len(retrievedSubnets)))
for i := range beaconConfig.AttestationSubnetCount {
require.Equal(t, i, retrievedSubnets[i])
}
})

View File

@@ -90,10 +90,10 @@ func (s *Service) updateCustodyInfoIfNeeded() error {
// custodyGroupCount computes the custody group count based on the custody requirement,
// the validators custody requirement, and whether the node is subscribed to all data subnets.
func (s *Service) custodyGroupCount(context.Context) (uint64, error) {
cfg := params.BeaconConfig()
beaconConfig := params.BeaconConfig()
if flags.Get().SubscribeAllDataSubnets {
return cfg.NumberOfCustodyGroups, nil
return beaconConfig.NumberOfCustodyGroups, nil
}
validatorsCustodyRequirement, err := s.validatorsCustodyRequirement()
@@ -101,7 +101,7 @@ func (s *Service) custodyGroupCount(context.Context) (uint64, error) {
return 0, errors.Wrap(err, "validators custody requirement")
}
return max(cfg.CustodyRequirement, validatorsCustodyRequirement), nil
return max(beaconConfig.CustodyRequirement, validatorsCustodyRequirement), nil
}
// validatorsCustodyRequirements computes the custody requirements based on the

View File

@@ -116,11 +116,11 @@ func withSubscribeAllDataSubnets(t *testing.T, fn func()) {
func TestUpdateCustodyInfoIfNeeded(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.NumberOfCustodyGroups = 128
cfg.CustodyRequirement = 4
cfg.SamplesPerSlot = 8
params.OverrideBeaconConfig(cfg)
beaconConfig := params.BeaconConfig()
beaconConfig.NumberOfCustodyGroups = 128
beaconConfig.CustodyRequirement = 4
beaconConfig.SamplesPerSlot = 8
params.OverrideBeaconConfig(beaconConfig)
t.Run("Skip update when actual custody count >= target", func(t *testing.T) {
setup := setupCustodyTest(t, false)
@@ -159,7 +159,7 @@ func TestUpdateCustodyInfoIfNeeded(t *testing.T) {
require.NoError(t, err)
const expectedSlot = primitives.Slot(100)
setup.assertCustodyInfo(t, expectedSlot, cfg.NumberOfCustodyGroups)
setup.assertCustodyInfo(t, expectedSlot, beaconConfig.NumberOfCustodyGroups)
})
})
}

View File

@@ -1133,14 +1133,9 @@ func randomPeer(
"delay": waitPeriod,
}).Debug("Waiting for a peer with enough bandwidth for data column sidecars")
timer := time.NewTimer(waitPeriod)
select {
case <-timer.C:
// Timer expired, retry the loop
case <-time.After(waitPeriod):
case <-ctx.Done():
// Context cancelled - stop timer to prevent leak
timer.Stop()
return "", ctx.Err()
}
}

View File

@@ -1366,16 +1366,16 @@ func TestFetchSidecars(t *testing.T) {
})
t.Run("Nominal", func(t *testing.T) {
cfg := params.BeaconConfig()
numberOfColumns := cfg.NumberOfColumns
samplesPerSlot := cfg.SamplesPerSlot
beaconConfig := params.BeaconConfig()
numberOfColumns := beaconConfig.NumberOfColumns
samplesPerSlot := beaconConfig.SamplesPerSlot
// Define "now" to be one epoch after genesis time + retention period.
genesisTime := time.Date(2025, time.August, 10, 0, 0, 0, 0, time.UTC)
secondsPerSlot := cfg.SecondsPerSlot
slotsPerEpoch := cfg.SlotsPerEpoch
secondsPerSlot := beaconConfig.SecondsPerSlot
slotsPerEpoch := beaconConfig.SlotsPerEpoch
secondsPerEpoch := uint64(slotsPerEpoch.Mul(secondsPerSlot))
retentionEpochs := cfg.MinEpochsForDataColumnSidecarsRequest
retentionEpochs := beaconConfig.MinEpochsForDataColumnSidecarsRequest
nowWrtGenesisSecs := retentionEpochs.Add(1).Mul(secondsPerEpoch)
now := genesisTime.Add(time.Duration(nowWrtGenesisSecs) * time.Second)

View File

@@ -530,12 +530,12 @@ func TestOriginOutsideRetention(t *testing.T) {
func TestFetchOriginSidecars(t *testing.T) {
ctx := t.Context()
cfg := params.BeaconConfig()
beaconConfig := params.BeaconConfig()
genesisTime := time.Date(2025, time.August, 10, 0, 0, 0, 0, time.UTC)
secondsPerSlot := cfg.SecondsPerSlot
slotsPerEpoch := cfg.SlotsPerEpoch
secondsPerSlot := beaconConfig.SecondsPerSlot
slotsPerEpoch := beaconConfig.SlotsPerEpoch
secondsPerEpoch := uint64(slotsPerEpoch.Mul(secondsPerSlot))
retentionEpochs := cfg.MinEpochsForDataColumnSidecarsRequest
retentionEpochs := beaconConfig.MinEpochsForDataColumnSidecarsRequest
genesisValidatorRoot := [fieldparams.RootLength]byte{}

View File

@@ -286,7 +286,6 @@ func (s *Service) updateMetrics() {
topicPeerCount.WithLabelValues(formattedTopic).Set(float64(len(s.cfg.p2p.PubSub().ListPeers(formattedTopic))))
}
subscribedTopicPeerCount.Reset()
for _, topic := range s.cfg.p2p.PubSub().GetTopics() {
subscribedTopicPeerCount.WithLabelValues(topic).Set(float64(len(s.cfg.p2p.PubSub().ListPeers(topic))))
}

View File

@@ -113,19 +113,19 @@ func (s *Service) blobSidecarByRootRPCHandler(ctx context.Context, msg interface
}
func validateBlobByRootRequest(blobIdents types.BlobSidecarsByRootReq, slot primitives.Slot) error {
cfg := params.BeaconConfig()
beaconConfig := params.BeaconConfig()
epoch := slots.ToEpoch(slot)
blobIdentCount := uint64(len(blobIdents))
if epoch >= cfg.ElectraForkEpoch {
if blobIdentCount > cfg.MaxRequestBlobSidecarsElectra {
if epoch >= beaconConfig.ElectraForkEpoch {
if blobIdentCount > beaconConfig.MaxRequestBlobSidecarsElectra {
return types.ErrMaxBlobReqExceeded
}
return nil
}
if blobIdentCount > cfg.MaxRequestBlobSidecars {
if blobIdentCount > beaconConfig.MaxRequestBlobSidecars {
return types.ErrMaxBlobReqExceeded
}

View File

@@ -38,8 +38,8 @@ func (s *Service) dataColumnSidecarsByRangeRPCHandler(ctx context.Context, msg i
defer cancel()
SetRPCStreamDeadlines(stream)
cfg := params.BeaconConfig()
maxRequestDataColumnSidecars := cfg.MaxRequestDataColumnSidecars
beaconConfig := params.BeaconConfig()
maxRequestDataColumnSidecars := beaconConfig.MaxRequestDataColumnSidecars
remotePeer := stream.Conn().RemotePeer()
log := log.WithFields(logrus.Fields{
@@ -102,7 +102,7 @@ func (s *Service) dataColumnSidecarsByRangeRPCHandler(ctx context.Context, msg i
// Once the quota is reached, we're done serving the request.
if maxRequestDataColumnSidecars == 0 {
log.WithField("initialQuota", cfg.MaxRequestDataColumnSidecars).Trace("Reached quota for data column sidecars by range request")
log.WithField("initialQuota", beaconConfig.MaxRequestDataColumnSidecars).Trace("Reached quota for data column sidecars by range request")
break
}
}

View File

@@ -31,9 +31,9 @@ import (
func TestDataColumnSidecarsByRangeRPCHandler(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.FuluForkEpoch = 0
params.OverrideBeaconConfig(cfg)
beaconConfig := params.BeaconConfig()
beaconConfig.FuluForkEpoch = 0
params.OverrideBeaconConfig(beaconConfig)
params.BeaconConfig().InitializeForkSchedule()
ctx := context.Background()
t.Run("wrong message type", func(t *testing.T) {

View File

@@ -163,9 +163,9 @@ func dataColumnsRPCMinValidSlot(currentSlot primitives.Slot) (primitives.Slot, e
return primitives.Slot(math.MaxUint64), nil
}
cfg := params.BeaconConfig()
minReqEpochs := cfg.MinEpochsForDataColumnSidecarsRequest
minStartEpoch := cfg.FuluForkEpoch
beaconConfig := params.BeaconConfig()
minReqEpochs := beaconConfig.MinEpochsForDataColumnSidecarsRequest
minStartEpoch := beaconConfig.FuluForkEpoch
currEpoch := slots.ToEpoch(currentSlot)
if currEpoch > minReqEpochs && currEpoch-minReqEpochs > minStartEpoch {

View File

@@ -28,9 +28,9 @@ import (
func TestDataColumnSidecarsByRootRPCHandler(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.FuluForkEpoch = 0
params.OverrideBeaconConfig(cfg)
beaconConfig := params.BeaconConfig()
beaconConfig.FuluForkEpoch = 0
params.OverrideBeaconConfig(beaconConfig)
params.BeaconConfig().InitializeForkSchedule()
ctxMap, err := ContextByteVersionsForValRoot(params.BeaconConfig().GenesisValidatorsRoot)
require.NoError(t, err)
@@ -43,9 +43,9 @@ func TestDataColumnSidecarsByRootRPCHandler(t *testing.T) {
t.Run("invalid request", func(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.MaxRequestDataColumnSidecars = 1
params.OverrideBeaconConfig(cfg)
beaconConfig := params.BeaconConfig()
beaconConfig.MaxRequestDataColumnSidecars = 1
params.OverrideBeaconConfig(beaconConfig)
localP2P := p2ptest.NewTestP2P(t)
service := &Service{cfg: &config{p2p: localP2P}}
@@ -96,9 +96,9 @@ func TestDataColumnSidecarsByRootRPCHandler(t *testing.T) {
}()
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.FuluForkEpoch = 1
params.OverrideBeaconConfig(cfg)
beaconConfig := params.BeaconConfig()
beaconConfig.FuluForkEpoch = 1
params.OverrideBeaconConfig(beaconConfig)
localP2P := p2ptest.NewTestP2P(t)
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})

View File

@@ -465,8 +465,8 @@ func SendDataColumnSidecarsByRangeRequest(
return nil, nil
}
cfg := params.BeaconConfig()
numberOfColumns := cfg.NumberOfColumns
beaconConfig := params.BeaconConfig()
numberOfColumns := beaconConfig.NumberOfColumns
maxRequestDataColumnSidecars := params.BeaconConfig().MaxRequestDataColumnSidecars
// Check if we do not request too many sidecars.

View File

@@ -889,9 +889,9 @@ func TestErrInvalidFetchedDataDistinction(t *testing.T) {
func TestSendDataColumnSidecarsByRangeRequest(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.FuluForkEpoch = 0
params.OverrideBeaconConfig(cfg)
beaconConfig := params.BeaconConfig()
beaconConfig.FuluForkEpoch = 0
params.OverrideBeaconConfig(beaconConfig)
params.BeaconConfig().InitializeForkSchedule()
ctxMap, err := ContextByteVersionsForValRoot(params.BeaconConfig().GenesisValidatorsRoot)
require.NoError(t, err)
@@ -923,9 +923,9 @@ func TestSendDataColumnSidecarsByRangeRequest(t *testing.T) {
t.Run("too many columns in request", func(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.MaxRequestDataColumnSidecars = 0
params.OverrideBeaconConfig(cfg)
beaconConfig := params.BeaconConfig()
beaconConfig.MaxRequestDataColumnSidecars = 0
params.OverrideBeaconConfig(beaconConfig)
request := &ethpb.DataColumnSidecarsByRangeRequest{Count: 1, Columns: []uint64{1, 2, 3}}
_, err := SendDataColumnSidecarsByRangeRequest(DataColumnSidecarsParams{Ctx: t.Context()}, "", request)
@@ -1193,9 +1193,9 @@ func TestIsSidecarIndexRequested(t *testing.T) {
func TestSendDataColumnSidecarsByRootRequest(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.FuluForkEpoch = 0
params.OverrideBeaconConfig(cfg)
beaconConfig := params.BeaconConfig()
beaconConfig.FuluForkEpoch = 0
params.OverrideBeaconConfig(beaconConfig)
params.BeaconConfig().InitializeForkSchedule()
ctxMap, err := ContextByteVersionsForValRoot(params.BeaconConfig().GenesisValidatorsRoot)
require.NoError(t, err)
@@ -1223,9 +1223,9 @@ func TestSendDataColumnSidecarsByRootRequest(t *testing.T) {
t.Run("too many columns in request", func(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.MaxRequestDataColumnSidecars = 4
params.OverrideBeaconConfig(cfg)
beaconConfig := params.BeaconConfig()
beaconConfig.MaxRequestDataColumnSidecars = 4
params.OverrideBeaconConfig(beaconConfig)
request := p2ptypes.DataColumnsByRootIdentifiers{
{Columns: []uint64{1, 2, 3}},

View File

@@ -445,7 +445,7 @@ func TestStatusRPCRequest_RequestSent(t *testing.T) {
custodyGroupCount = uint64(4)
)
cfg := params.BeaconConfig()
beaconConfig := params.BeaconConfig()
ctx := t.Context()
testCases := []struct {
@@ -456,7 +456,7 @@ func TestStatusRPCRequest_RequestSent(t *testing.T) {
}{
{
name: "before fulu",
fuluForkEpoch: cfg.FarFutureEpoch,
fuluForkEpoch: beaconConfig.FarFutureEpoch,
topic: "/eth2/beacon_chain/req/status/1/ssz_snappy",
streamHandler: func(service *Service, stream network.Stream, genesisState beaconState.BeaconState, beaconRoot, headRoot, finalizedRoot []byte) {
out := &ethpb.Status{}

View File

@@ -695,10 +695,10 @@ func (s *Service) dataColumnSubnetIndices(primitives.Slot) map[uint64]bool {
// the validators custody requirement, and whether the node is subscribed to all data subnets.
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/das-core.md#custody-sampling
func (s *Service) samplingSize() (uint64, error) {
cfg := params.BeaconConfig()
beaconConfig := params.BeaconConfig()
if flags.Get().SubscribeAllDataSubnets {
return cfg.DataColumnSidecarSubnetCount, nil
return beaconConfig.DataColumnSidecarSubnetCount, nil
}
// Compute the validators custody requirement.
@@ -712,7 +712,7 @@ func (s *Service) samplingSize() (uint64, error) {
return 0, errors.Wrap(err, "custody group count")
}
return max(cfg.SamplesPerSlot, validatorsCustodyRequirement, custodyGroupCount), nil
return max(beaconConfig.SamplesPerSlot, validatorsCustodyRequirement, custodyGroupCount), nil
}
func (s *Service) persistentAndAggregatorSubnetIndices(currentSlot primitives.Slot) map[uint64]bool {

View File

@@ -1,4 +0,0 @@
### Added
- Metrics: Add count of peers per direction and type (inbound/outbound), (TCP/QUIC).
- `p2p_subscribed_topic_peer_total`: Reset to avoid dangling values.
- Add `p2p_minimum_peers_per_subnet` metric.

View File

@@ -0,0 +1,4 @@
### Fixed
- corrected defaultRetentionEpochs in pruner
- epoch aligned pruning: pruning should be epoch-wise. No fractional epoch pruning.

View File

@@ -1,3 +0,0 @@
### Changed
- Replace `time.After()` with `time.NewTimer()` and explicitly stop the timer when the context is cancelled

View File

@@ -1,3 +0,0 @@
### Ignored
- Use SlotTicker with offset instead of time.Ticker for attestation pool pruning to avoid conflicts with slot boundary operations