From d613f3a2621e0d16472f582581fc5678ae2f433f Mon Sep 17 00:00:00 2001 From: satushh Date: Tue, 21 Oct 2025 14:54:52 +0100 Subject: [PATCH] Update Earliest available slot when pruning (#15694) * Update Earliest available slot when pruning * bazel run //:gazelle -- fix * custodyUpdater interface to avoid import cycle * bazel run //:gazelle -- fix * simplify test * separation of concerns * debug log for updating eas * UpdateEarliestAvailableSlot function in CustodyManager * fix test * UpdateEarliestAvailableSlot function for FakeP2P * lint * UpdateEarliestAvailableSlot instead of UpdateCustodyInfo + check for Fulu * fix test and lint * bugfix: enforce minimum retention period in pruner * remove MinEpochsForBlockRequests function and use from config * remove modifying earliest_available_slot after data column pruning * correct earliestAvailableSlot validation: allow backfill decrease but prevent increase within MIN_EPOCHS_FOR_BLOCK_REQUESTS * lint * bazel run //:gazelle -- fix * lint and remove unwanted debug logs * Return a wrapped error, and let the caller decide what to do * fix tests because updateEarliestSlot returns error now * avoid re-doing computation in the test function * lint and correct changelog * custody updater should be a mandatory part of the pruner service * ensure never increase eas if we are in the block requests window * slot level granularity edge case * update the value stored in the DB * log tidy up * use errNoCustodyInfo * allow earliestAvailableSlot edit when custodyGroupCount doesnt change * undo the minimal config change * add context to CustodyGroupCount after merging from develop * cosmetic change * shift responsibility from caller to callee, protection for updateEarliestSlot. UpdateEarliestAvailableSlot returns cgc * allow increase in earliestAvailableSlot only when custodyGroupCount also increases * remove CustodyGroupCount as it is no longer needed as UpdateEarliestAvailableSlot returns cgc now * proper place for log and name refactor * test for Nil custody info * allow decreasing earliest slot in DB (just like in memory) * invert if statement to make more readable * UpdateEarliestAvailableSlot for DB (equivalent of p2p's UpdateEarliestAvailableSlot) & undo changes made to UpdateCustodyInfo * in UpdateEarliestAvailableSlot, no need to return unused values * no need to log stored group count * log.WithField instead of log.WithFields --- beacon-chain/blockchain/setup_test.go | 8 + .../core/helpers/weak_subjectivity.go | 11 - .../core/helpers/weak_subjectivity_test.go | 17 -- beacon-chain/db/iface/interface.go | 1 + beacon-chain/db/kv/custody.go | 78 +++++- beacon-chain/db/kv/custody_test.go | 128 +++++++++ beacon-chain/db/pruner/BUILD.bazel | 2 +- beacon-chain/db/pruner/pruner.go | 64 ++++- beacon-chain/db/pruner/pruner_test.go | 247 +++++++++++++++++- beacon-chain/node/node.go | 1 + beacon-chain/p2p/custody.go | 51 ++++ beacon-chain/p2p/custody_test.go | 143 ++++++++++ beacon-chain/p2p/interfaces.go | 1 + beacon-chain/p2p/testing/fuzz_p2p.go | 5 + beacon-chain/p2p/testing/p2p.go | 9 + beacon-chain/sync/backfill/BUILD.bazel | 2 - beacon-chain/sync/backfill/service.go | 4 +- beacon-chain/sync/backfill/service_test.go | 5 +- changelog/satushh-update-easlot-pruning.md | 3 + 19 files changed, 730 insertions(+), 50 deletions(-) create mode 100644 changelog/satushh-update-easlot-pruning.md diff --git a/beacon-chain/blockchain/setup_test.go b/beacon-chain/blockchain/setup_test.go index b18640f327..7c453cf2d7 100644 --- a/beacon-chain/blockchain/setup_test.go +++ b/beacon-chain/blockchain/setup_test.go @@ -130,6 +130,14 @@ func (dch *mockCustodyManager) UpdateCustodyInfo(earliestAvailableSlot primitive return earliestAvailableSlot, custodyGroupCount, nil } +func (dch *mockCustodyManager) UpdateEarliestAvailableSlot(earliestAvailableSlot primitives.Slot) error { + dch.mut.Lock() + defer dch.mut.Unlock() + + dch.earliestAvailableSlot = earliestAvailableSlot + return nil +} + func (dch *mockCustodyManager) CustodyGroupCountFromPeer(peer.ID) uint64 { return 0 } diff --git a/beacon-chain/core/helpers/weak_subjectivity.go b/beacon-chain/core/helpers/weak_subjectivity.go index 4260bea2d8..1280f89f6c 100644 --- a/beacon-chain/core/helpers/weak_subjectivity.go +++ b/beacon-chain/core/helpers/weak_subjectivity.go @@ -201,14 +201,3 @@ func ParseWeakSubjectivityInputString(wsCheckpointString string) (*v1alpha1.Chec Root: bRoot, }, nil } - -// MinEpochsForBlockRequests computes the number of epochs of block history that we need to maintain, -// relative to the current epoch, per the p2p specs. This is used to compute the slot where backfill is complete. -// value defined: -// https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/p2p-interface.md#configuration -// MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2 (= 33024, ~5 months) -// detailed rationale: https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/p2p-interface.md#why-are-blocksbyrange-requests-only-required-to-be-served-for-the-latest-min_epochs_for_block_requests-epochs -func MinEpochsForBlockRequests() primitives.Epoch { - return params.BeaconConfig().MinValidatorWithdrawabilityDelay + - primitives.Epoch(params.BeaconConfig().ChurnLimitQuotient/2) -} diff --git a/beacon-chain/core/helpers/weak_subjectivity_test.go b/beacon-chain/core/helpers/weak_subjectivity_test.go index b488008365..8cd74e7819 100644 --- a/beacon-chain/core/helpers/weak_subjectivity_test.go +++ b/beacon-chain/core/helpers/weak_subjectivity_test.go @@ -286,20 +286,3 @@ func genState(t *testing.T, valCount, avgBalance uint64) state.BeaconState { return beaconState } -func TestMinEpochsForBlockRequests(t *testing.T) { - helpers.ClearCache() - - params.SetActiveTestCleanup(t, params.MainnetConfig()) - var expected primitives.Epoch = 33024 - // expected value of 33024 via spec commentary: - // https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/p2p-interface.md#why-are-blocksbyrange-requests-only-required-to-be-served-for-the-latest-min_epochs_for_block_requests-epochs - // MIN_EPOCHS_FOR_BLOCK_REQUESTS is calculated using the arithmetic from compute_weak_subjectivity_period found in the weak subjectivity guide. Specifically to find this max epoch range, we use the worst case event of a very large validator size (>= MIN_PER_EPOCH_CHURN_LIMIT * CHURN_LIMIT_QUOTIENT). - // - // MIN_EPOCHS_FOR_BLOCK_REQUESTS = ( - // MIN_VALIDATOR_WITHDRAWABILITY_DELAY - // + MAX_SAFETY_DECAY * CHURN_LIMIT_QUOTIENT // (2 * 100) - // ) - // - // Where MAX_SAFETY_DECAY = 100 and thus MIN_EPOCHS_FOR_BLOCK_REQUESTS = 33024 (~5 months). - require.Equal(t, expected, helpers.MinEpochsForBlockRequests()) -} diff --git a/beacon-chain/db/iface/interface.go b/beacon-chain/db/iface/interface.go index 7595c93a86..58233ffe1b 100644 --- a/beacon-chain/db/iface/interface.go +++ b/beacon-chain/db/iface/interface.go @@ -129,6 +129,7 @@ type NoHeadAccessDatabase interface { // Custody operations. UpdateSubscribedToAllDataSubnets(ctx context.Context, subscribed bool) (bool, error) UpdateCustodyInfo(ctx context.Context, earliestAvailableSlot primitives.Slot, custodyGroupCount uint64) (primitives.Slot, uint64, error) + UpdateEarliestAvailableSlot(ctx context.Context, earliestAvailableSlot primitives.Slot) error // P2P Metadata operations. SaveMetadataSeqNum(ctx context.Context, seqNum uint64) error diff --git a/beacon-chain/db/kv/custody.go b/beacon-chain/db/kv/custody.go index 9d7fa60234..63df838bed 100644 --- a/beacon-chain/db/kv/custody.go +++ b/beacon-chain/db/kv/custody.go @@ -2,16 +2,19 @@ package kv import ( "context" + "time" + "github.com/OffchainLabs/prysm/v6/config/params" "github.com/OffchainLabs/prysm/v6/consensus-types/primitives" "github.com/OffchainLabs/prysm/v6/encoding/bytesutil" "github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace" + "github.com/OffchainLabs/prysm/v6/time/slots" "github.com/pkg/errors" "github.com/sirupsen/logrus" bolt "go.etcd.io/bbolt" ) -// UpdateCustodyInfo atomically updates the custody group count only it is greater than the stored one. +// UpdateCustodyInfo atomically updates the custody group count only if it is greater than the stored one. // In this case, it also updates the earliest available slot with the provided value. // It returns the (potentially updated) custody group count and earliest available slot. func (s *Store) UpdateCustodyInfo(ctx context.Context, earliestAvailableSlot primitives.Slot, custodyGroupCount uint64) (primitives.Slot, uint64, error) { @@ -70,6 +73,79 @@ func (s *Store) UpdateCustodyInfo(ctx context.Context, earliestAvailableSlot pri return storedEarliestAvailableSlot, storedGroupCount, nil } +// UpdateEarliestAvailableSlot updates the earliest available slot. +func (s *Store) UpdateEarliestAvailableSlot(ctx context.Context, earliestAvailableSlot primitives.Slot) error { + _, span := trace.StartSpan(ctx, "BeaconDB.UpdateEarliestAvailableSlot") + defer span.End() + + storedEarliestAvailableSlot := primitives.Slot(0) + if err := s.db.Update(func(tx *bolt.Tx) error { + // Retrieve the custody bucket. + bucket, err := tx.CreateBucketIfNotExists(custodyBucket) + if err != nil { + return errors.Wrap(err, "create custody bucket") + } + + // Retrieve the stored earliest available slot. + storedEarliestAvailableSlotBytes := bucket.Get(earliestAvailableSlotKey) + if len(storedEarliestAvailableSlotBytes) != 0 { + storedEarliestAvailableSlot = primitives.Slot(bytesutil.BytesToUint64BigEndian(storedEarliestAvailableSlotBytes)) + } + + // Allow decrease (for backfill scenarios) + if earliestAvailableSlot <= storedEarliestAvailableSlot { + storedEarliestAvailableSlot = earliestAvailableSlot + bytes := bytesutil.Uint64ToBytesBigEndian(uint64(earliestAvailableSlot)) + if err := bucket.Put(earliestAvailableSlotKey, bytes); err != nil { + return errors.Wrap(err, "put earliest available slot") + } + return nil + } + + // Prevent increase within the MIN_EPOCHS_FOR_BLOCK_REQUESTS period + // This ensures we don't voluntarily refuse to serve mandatory block data + genesisTime := time.Unix(int64(params.BeaconConfig().MinGenesisTime+params.BeaconConfig().GenesisDelay), 0) + currentSlot := slots.CurrentSlot(genesisTime) + currentEpoch := slots.ToEpoch(currentSlot) + minEpochsForBlocks := primitives.Epoch(params.BeaconConfig().MinEpochsForBlockRequests) + + // Calculate the minimum required epoch (or 0 if we're early in the chain) + minRequiredEpoch := primitives.Epoch(0) + if currentEpoch > minEpochsForBlocks { + minRequiredEpoch = currentEpoch - minEpochsForBlocks + } + + // Convert to slot to ensure we compare at slot-level granularity + minRequiredSlot, err := slots.EpochStart(minRequiredEpoch) + if err != nil { + return errors.Wrap(err, "calculate minimum required slot") + } + + // Prevent any increase that would put earliest available slot beyond the minimum required slot + if earliestAvailableSlot > minRequiredSlot { + return errors.Errorf( + "cannot increase earliest available slot to %d (epoch %d) as it exceeds minimum required slot %d (epoch %d)", + earliestAvailableSlot, slots.ToEpoch(earliestAvailableSlot), + minRequiredSlot, minRequiredEpoch, + ) + } + + storedEarliestAvailableSlot = earliestAvailableSlot + bytes := bytesutil.Uint64ToBytesBigEndian(uint64(earliestAvailableSlot)) + if err := bucket.Put(earliestAvailableSlotKey, bytes); err != nil { + return errors.Wrap(err, "put earliest available slot") + } + + return nil + }); err != nil { + return err + } + + log.WithField("earliestAvailableSlot", storedEarliestAvailableSlot).Debug("Updated earliest available slot") + + return nil +} + // UpdateSubscribedToAllDataSubnets updates the "subscribed to all data subnets" status in the database // only if `subscribed` is `true`. // It returns the previous subscription status. diff --git a/beacon-chain/db/kv/custody_test.go b/beacon-chain/db/kv/custody_test.go index 3c1c371731..6db0148109 100644 --- a/beacon-chain/db/kv/custody_test.go +++ b/beacon-chain/db/kv/custody_test.go @@ -3,10 +3,13 @@ package kv import ( "context" "testing" + "time" + "github.com/OffchainLabs/prysm/v6/config/params" "github.com/OffchainLabs/prysm/v6/consensus-types/primitives" "github.com/OffchainLabs/prysm/v6/encoding/bytesutil" "github.com/OffchainLabs/prysm/v6/testing/require" + "github.com/OffchainLabs/prysm/v6/time/slots" bolt "go.etcd.io/bbolt" ) @@ -132,6 +135,131 @@ func TestUpdateCustodyInfo(t *testing.T) { }) } +func TestUpdateEarliestAvailableSlot(t *testing.T) { + ctx := t.Context() + + t.Run("allow decreasing earliest slot (backfill scenario)", func(t *testing.T) { + const ( + initialSlot = primitives.Slot(300) + initialCount = uint64(10) + earliestSlot = primitives.Slot(200) // Lower than initial (backfill discovered earlier blocks) + ) + + db := setupDB(t) + + // Initialize custody info + _, _, err := db.UpdateCustodyInfo(ctx, initialSlot, initialCount) + require.NoError(t, err) + + // Update with a lower slot (should update for backfill) + err = db.UpdateEarliestAvailableSlot(ctx, earliestSlot) + require.NoError(t, err) + + storedSlot, storedCount := getCustodyInfoFromDB(t, db) + require.Equal(t, earliestSlot, storedSlot) + require.Equal(t, initialCount, storedCount) + }) + + t.Run("allow increasing slot within MIN_EPOCHS_FOR_BLOCK_REQUESTS (pruning scenario)", func(t *testing.T) { + db := setupDB(t) + + // Calculate the current slot and minimum required slot based on actual current time + genesisTime := time.Unix(int64(params.BeaconConfig().MinGenesisTime+params.BeaconConfig().GenesisDelay), 0) + currentSlot := slots.CurrentSlot(genesisTime) + currentEpoch := slots.ToEpoch(currentSlot) + minEpochsForBlocks := primitives.Epoch(params.BeaconConfig().MinEpochsForBlockRequests) + + var minRequiredEpoch primitives.Epoch + if currentEpoch > minEpochsForBlocks { + minRequiredEpoch = currentEpoch - minEpochsForBlocks + } else { + minRequiredEpoch = 0 + } + + minRequiredSlot, err := slots.EpochStart(minRequiredEpoch) + require.NoError(t, err) + + // Initial setup: set earliest slot well before minRequiredSlot + const groupCount = uint64(5) + initialSlot := primitives.Slot(1000) + + _, _, err = db.UpdateCustodyInfo(ctx, initialSlot, groupCount) + require.NoError(t, err) + + // Try to increase to a slot that's still BEFORE minRequiredSlot (should succeed) + validSlot := minRequiredSlot - 100 + + err = db.UpdateEarliestAvailableSlot(ctx, validSlot) + require.NoError(t, err) + + // Verify the database was updated + storedSlot, storedCount := getCustodyInfoFromDB(t, db) + require.Equal(t, validSlot, storedSlot) + require.Equal(t, groupCount, storedCount) + }) + + t.Run("prevent increasing slot beyond MIN_EPOCHS_FOR_BLOCK_REQUESTS", func(t *testing.T) { + db := setupDB(t) + + // Calculate the current slot and minimum required slot based on actual current time + genesisTime := time.Unix(int64(params.BeaconConfig().MinGenesisTime+params.BeaconConfig().GenesisDelay), 0) + currentSlot := slots.CurrentSlot(genesisTime) + currentEpoch := slots.ToEpoch(currentSlot) + minEpochsForBlocks := primitives.Epoch(params.BeaconConfig().MinEpochsForBlockRequests) + + var minRequiredEpoch primitives.Epoch + if currentEpoch > minEpochsForBlocks { + minRequiredEpoch = currentEpoch - minEpochsForBlocks + } else { + minRequiredEpoch = 0 + } + + minRequiredSlot, err := slots.EpochStart(minRequiredEpoch) + require.NoError(t, err) + + // Initial setup: set a valid earliest slot (well before minRequiredSlot) + const initialCount = uint64(5) + initialSlot := primitives.Slot(1000) + + _, _, err = db.UpdateCustodyInfo(ctx, initialSlot, initialCount) + require.NoError(t, err) + + // Try to set earliest slot beyond the minimum required slot + invalidSlot := minRequiredSlot + 100 + + // This should fail + err = db.UpdateEarliestAvailableSlot(ctx, invalidSlot) + require.ErrorContains(t, "cannot increase earliest available slot", err) + require.ErrorContains(t, "exceeds minimum required slot", err) + + // Verify the database wasn't updated + storedSlot, storedCount := getCustodyInfoFromDB(t, db) + require.Equal(t, initialSlot, storedSlot) + require.Equal(t, initialCount, storedCount) + }) + + t.Run("no change when slot equals current slot", func(t *testing.T) { + const ( + initialSlot = primitives.Slot(100) + initialCount = uint64(5) + ) + + db := setupDB(t) + + // Initialize custody info + _, _, err := db.UpdateCustodyInfo(ctx, initialSlot, initialCount) + require.NoError(t, err) + + // Update with the same slot + err = db.UpdateEarliestAvailableSlot(ctx, initialSlot) + require.NoError(t, err) + + storedSlot, storedCount := getCustodyInfoFromDB(t, db) + require.Equal(t, initialSlot, storedSlot) + require.Equal(t, initialCount, storedCount) + }) +} + func TestUpdateSubscribedToAllDataSubnets(t *testing.T) { ctx := context.Background() diff --git a/beacon-chain/db/pruner/BUILD.bazel b/beacon-chain/db/pruner/BUILD.bazel index 21e31bb7d0..aea71592ef 100644 --- a/beacon-chain/db/pruner/BUILD.bazel +++ b/beacon-chain/db/pruner/BUILD.bazel @@ -8,7 +8,6 @@ go_library( "//beacon-chain:__subpackages__", ], deps = [ - "//beacon-chain/core/helpers:go_default_library", "//beacon-chain/db:go_default_library", "//beacon-chain/db/iface:go_default_library", "//config/params:go_default_library", @@ -29,6 +28,7 @@ go_test( "//consensus-types/blocks:go_default_library", "//consensus-types/primitives:go_default_library", "//proto/prysm/v1alpha1:go_default_library", + "//testing/assert:go_default_library", "//testing/require:go_default_library", "//testing/util:go_default_library", "//time/slots/testing:go_default_library", diff --git a/beacon-chain/db/pruner/pruner.go b/beacon-chain/db/pruner/pruner.go index 2b5cb02d59..6e3699a9e6 100644 --- a/beacon-chain/db/pruner/pruner.go +++ b/beacon-chain/db/pruner/pruner.go @@ -4,7 +4,6 @@ import ( "context" "time" - "github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers" "github.com/OffchainLabs/prysm/v6/beacon-chain/db" "github.com/OffchainLabs/prysm/v6/beacon-chain/db/iface" "github.com/OffchainLabs/prysm/v6/config/params" @@ -25,17 +24,24 @@ const ( defaultNumBatchesToPrune = 15 ) +// custodyUpdater is a tiny interface that p2p service implements; kept here to avoid +// importing the p2p package and creating a cycle. +type custodyUpdater interface { + UpdateEarliestAvailableSlot(earliestAvailableSlot primitives.Slot) error +} + type ServiceOption func(*Service) // WithRetentionPeriod allows the user to specify a different data retention period than the spec default. // The retention period is specified in epochs, and must be >= MIN_EPOCHS_FOR_BLOCK_REQUESTS. func WithRetentionPeriod(retentionEpochs primitives.Epoch) ServiceOption { return func(s *Service) { - defaultRetentionEpochs := helpers.MinEpochsForBlockRequests() + 1 + defaultRetentionEpochs := primitives.Epoch(params.BeaconConfig().MinEpochsForBlockRequests) + 1 if retentionEpochs < defaultRetentionEpochs { log.WithField("userEpochs", retentionEpochs). WithField("minRequired", defaultRetentionEpochs). - Warn("Retention period too low, using minimum required value") + Warn("Retention period too low, ignoring and using minimum required value") + retentionEpochs = defaultRetentionEpochs } s.ps = pruneStartSlotFunc(retentionEpochs) @@ -58,17 +64,23 @@ type Service struct { slotTicker slots.Ticker backfillWaiter func() error initSyncWaiter func() error + custody custodyUpdater } -func New(ctx context.Context, db iface.Database, genesisTime time.Time, initSyncWaiter, backfillWaiter func() error, opts ...ServiceOption) (*Service, error) { +func New(ctx context.Context, db iface.Database, genesisTime time.Time, initSyncWaiter, backfillWaiter func() error, custody custodyUpdater, opts ...ServiceOption) (*Service, error) { + if custody == nil { + return nil, errors.New("custody updater is required for pruner but was not provided") + } + p := &Service{ ctx: ctx, db: db, - ps: pruneStartSlotFunc(helpers.MinEpochsForBlockRequests() + 1), // Default retention epochs is MIN_EPOCHS_FOR_BLOCK_REQUESTS + 1 from the current slot. + ps: pruneStartSlotFunc(primitives.Epoch(params.BeaconConfig().MinEpochsForBlockRequests) + 1), // Default retention epochs is MIN_EPOCHS_FOR_BLOCK_REQUESTS + 1 from the current slot. done: make(chan struct{}), slotTicker: slots.NewSlotTicker(slots.UnsafeStartTime(genesisTime, 0), params.BeaconConfig().SecondsPerSlot), initSyncWaiter: initSyncWaiter, backfillWaiter: backfillWaiter, + custody: custody, } for _, o := range opts { @@ -157,17 +169,45 @@ func (p *Service) prune(slot primitives.Slot) error { return errors.Wrap(err, "failed to prune batches") } - log.WithFields(logrus.Fields{ - "prunedUpto": pruneUpto, - "duration": time.Since(tt), - "currentSlot": slot, - "batchSize": defaultPrunableBatchSize, - "numBatches": numBatches, - }).Debug("Successfully pruned chain data") + earliestAvailableSlot := pruneUpto + 1 // Update pruning checkpoint. p.prunedUpto = pruneUpto + // Update the earliest available slot after pruning + if err := p.updateEarliestAvailableSlot(earliestAvailableSlot); err != nil { + return errors.Wrap(err, "update earliest available slot") + } + + log.WithFields(logrus.Fields{ + "prunedUpto": pruneUpto, + "earliestAvailableSlot": earliestAvailableSlot, + "duration": time.Since(tt), + "currentSlot": slot, + "batchSize": defaultPrunableBatchSize, + "numBatches": numBatches, + }).Debug("Successfully pruned chain data") + + return nil +} + +// updateEarliestAvailableSlot updates the earliest available slot via the injected custody updater +// and also persists it to the database. +func (p *Service) updateEarliestAvailableSlot(earliestAvailableSlot primitives.Slot) error { + if !params.FuluEnabled() { + return nil + } + + // Update the p2p in-memory state + if err := p.custody.UpdateEarliestAvailableSlot(earliestAvailableSlot); err != nil { + return errors.Wrapf(err, "update earliest available slot after pruning to %d", earliestAvailableSlot) + } + + // Persist to database to ensure it survives restarts + if err := p.db.UpdateEarliestAvailableSlot(p.ctx, earliestAvailableSlot); err != nil { + return errors.Wrapf(err, "update earliest available slot in database for slot %d", earliestAvailableSlot) + } + return nil } diff --git a/beacon-chain/db/pruner/pruner_test.go b/beacon-chain/db/pruner/pruner_test.go index e0cc1d254d..55e3c109a2 100644 --- a/beacon-chain/db/pruner/pruner_test.go +++ b/beacon-chain/db/pruner/pruner_test.go @@ -2,6 +2,7 @@ package pruner import ( "context" + "errors" "testing" "time" @@ -15,6 +16,7 @@ import ( dbtest "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing" "github.com/OffchainLabs/prysm/v6/consensus-types/primitives" + "github.com/OffchainLabs/prysm/v6/testing/assert" "github.com/OffchainLabs/prysm/v6/testing/require" logTest "github.com/sirupsen/logrus/hooks/test" ) @@ -62,7 +64,9 @@ func TestPruner_PruningConditions(t *testing.T) { if !tt.backfillCompleted { backfillWaiter = waiter } - p, err := New(ctx, beaconDB, time.Now(), initSyncWaiter, backfillWaiter, WithSlotTicker(slotTicker)) + + mockCustody := &mockCustodyUpdater{} + p, err := New(ctx, beaconDB, time.Now(), initSyncWaiter, backfillWaiter, mockCustody, WithSlotTicker(slotTicker)) require.NoError(t, err) go p.Start() @@ -97,12 +101,14 @@ func TestPruner_PruneSuccess(t *testing.T) { retentionEpochs := primitives.Epoch(2) slotTicker := &slottest.MockTicker{Channel: make(chan primitives.Slot)} + mockCustody := &mockCustodyUpdater{} p, err := New( ctx, beaconDB, time.Now(), nil, nil, + mockCustody, WithSlotTicker(slotTicker), ) require.NoError(t, err) @@ -133,3 +139,242 @@ func TestPruner_PruneSuccess(t *testing.T) { require.NoError(t, p.Stop()) } + +// Mock custody updater for testing +type mockCustodyUpdater struct { + custodyGroupCount uint64 + earliestAvailableSlot primitives.Slot + updateCallCount int +} + +func (m *mockCustodyUpdater) UpdateEarliestAvailableSlot(earliestAvailableSlot primitives.Slot) error { + m.updateCallCount++ + m.earliestAvailableSlot = earliestAvailableSlot + return nil +} + +func TestPruner_UpdatesEarliestAvailableSlot(t *testing.T) { + params.SetupTestConfigCleanup(t) + config := params.BeaconConfig() + config.FuluForkEpoch = 0 // Enable Fulu from epoch 0 + params.OverrideBeaconConfig(config) + + logrus.SetLevel(logrus.DebugLevel) + hook := logTest.NewGlobal() + ctx, cancel := context.WithCancel(t.Context()) + defer cancel() + + beaconDB := dbtest.SetupDB(t) + retentionEpochs := primitives.Epoch(2) + + slotTicker := &slottest.MockTicker{Channel: make(chan primitives.Slot)} + + // Create mock custody updater + mockCustody := &mockCustodyUpdater{ + custodyGroupCount: 4, + earliestAvailableSlot: 0, + } + + // Create pruner with mock custody updater + p, err := New( + ctx, + beaconDB, + time.Now(), + nil, + nil, + mockCustody, + WithSlotTicker(slotTicker), + ) + require.NoError(t, err) + + p.ps = func(current primitives.Slot) primitives.Slot { + return current - primitives.Slot(retentionEpochs)*params.BeaconConfig().SlotsPerEpoch + } + + // Save some blocks to be pruned + for i := primitives.Slot(1); i <= 32; i++ { + blk := util.NewBeaconBlock() + blk.Block.Slot = i + wsb, err := blocks.NewSignedBeaconBlock(blk) + require.NoError(t, err) + require.NoError(t, beaconDB.SaveBlock(ctx, wsb)) + } + + // Start pruner and trigger at slot 80 (middle of 3rd epoch) + go p.Start() + currentSlot := primitives.Slot(80) + slotTicker.Channel <- currentSlot + + // Wait for pruning to complete + time.Sleep(100 * time.Millisecond) + + // Check that UpdateEarliestAvailableSlot was called + assert.Equal(t, true, mockCustody.updateCallCount > 0, "UpdateEarliestAvailableSlot should have been called") + + // The earliest available slot should be pruneUpto + 1 + // pruneUpto = currentSlot - retentionEpochs*slotsPerEpoch = 80 - 2*32 = 16 + // So earliest available slot should be 16 + 1 = 17 + expectedEarliestSlot := primitives.Slot(17) + require.Equal(t, expectedEarliestSlot, mockCustody.earliestAvailableSlot, "Earliest available slot should be updated correctly") + require.Equal(t, uint64(4), mockCustody.custodyGroupCount, "Custody group count should be preserved") + + // Verify that no error was logged + for _, entry := range hook.AllEntries() { + if entry.Level == logrus.ErrorLevel { + t.Errorf("Unexpected error log: %s", entry.Message) + } + } + + require.NoError(t, p.Stop()) +} + +// Mock custody updater that returns an error for UpdateEarliestAvailableSlot +type mockCustodyUpdaterWithUpdateError struct { + updateCallCount int +} + +func (m *mockCustodyUpdaterWithUpdateError) UpdateEarliestAvailableSlot(earliestAvailableSlot primitives.Slot) error { + m.updateCallCount++ + return errors.New("failed to update earliest available slot") +} + +func TestWithRetentionPeriod_EnforcesMinimum(t *testing.T) { + // Use minimal config for testing + params.SetupTestConfigCleanup(t) + config := params.MinimalSpecConfig() + params.OverrideBeaconConfig(config) + + ctx := t.Context() + beaconDB := dbtest.SetupDB(t) + + // Get the minimum required epochs (272 + 1 = 273 for minimal) + minRequiredEpochs := primitives.Epoch(params.BeaconConfig().MinEpochsForBlockRequests + 1) + + // Use a slot that's guaranteed to be after the minimum retention period + currentSlot := primitives.Slot(minRequiredEpochs+100) * (params.BeaconConfig().SlotsPerEpoch) + + tests := []struct { + name string + userRetentionEpochs primitives.Epoch + expectedPruneSlot primitives.Slot + description string + }{ + { + name: "User value below minimum - should use minimum", + userRetentionEpochs: 2, // Way below minimum + expectedPruneSlot: currentSlot - primitives.Slot(minRequiredEpochs)*params.BeaconConfig().SlotsPerEpoch, + description: "Should use minimum when user value is too low", + }, + { + name: "User value at minimum", + userRetentionEpochs: minRequiredEpochs, + expectedPruneSlot: currentSlot - primitives.Slot(minRequiredEpochs)*params.BeaconConfig().SlotsPerEpoch, + description: "Should use user value when at minimum", + }, + { + name: "User value above minimum", + userRetentionEpochs: minRequiredEpochs + 10, + expectedPruneSlot: currentSlot - primitives.Slot(minRequiredEpochs+10)*params.BeaconConfig().SlotsPerEpoch, + description: "Should use user value when above minimum", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + hook := logTest.NewGlobal() + logrus.SetLevel(logrus.WarnLevel) + + mockCustody := &mockCustodyUpdater{} + // Create pruner with retention period + p, err := New( + ctx, + beaconDB, + time.Now(), + nil, + nil, + mockCustody, + WithRetentionPeriod(tt.userRetentionEpochs), + ) + require.NoError(t, err) + + // Test the pruning calculation + pruneUptoSlot := p.ps(currentSlot) + + // Verify the pruning slot + assert.Equal(t, tt.expectedPruneSlot, pruneUptoSlot, tt.description) + + // Check if warning was logged when value was too low + if tt.userRetentionEpochs < minRequiredEpochs { + assert.LogsContain(t, hook, "Retention period too low, ignoring and using minimum required value") + } + }) + } +} + +func TestPruner_UpdateEarliestSlotError(t *testing.T) { + params.SetupTestConfigCleanup(t) + config := params.BeaconConfig() + config.FuluForkEpoch = 0 // Enable Fulu from epoch 0 + params.OverrideBeaconConfig(config) + + logrus.SetLevel(logrus.DebugLevel) + hook := logTest.NewGlobal() + ctx, cancel := context.WithCancel(t.Context()) + defer cancel() + + beaconDB := dbtest.SetupDB(t) + retentionEpochs := primitives.Epoch(2) + + slotTicker := &slottest.MockTicker{Channel: make(chan primitives.Slot)} + + // Create mock custody updater that returns an error for UpdateEarliestAvailableSlot + mockCustody := &mockCustodyUpdaterWithUpdateError{} + + // Create pruner with mock custody updater + p, err := New( + ctx, + beaconDB, + time.Now(), + nil, + nil, + mockCustody, + WithSlotTicker(slotTicker), + ) + require.NoError(t, err) + + p.ps = func(current primitives.Slot) primitives.Slot { + return current - primitives.Slot(retentionEpochs)*params.BeaconConfig().SlotsPerEpoch + } + + // Save some blocks to be pruned + for i := primitives.Slot(1); i <= 32; i++ { + blk := util.NewBeaconBlock() + blk.Block.Slot = i + wsb, err := blocks.NewSignedBeaconBlock(blk) + require.NoError(t, err) + require.NoError(t, beaconDB.SaveBlock(ctx, wsb)) + } + + // Start pruner and trigger at slot 80 + go p.Start() + currentSlot := primitives.Slot(80) + slotTicker.Channel <- currentSlot + + // Wait for pruning to complete + time.Sleep(100 * time.Millisecond) + + // Should have called UpdateEarliestAvailableSlot + assert.Equal(t, 1, mockCustody.updateCallCount, "UpdateEarliestAvailableSlot should be called") + + // Check that error was logged by the prune function + found := false + for _, entry := range hook.AllEntries() { + if entry.Level == logrus.ErrorLevel && entry.Message == "Failed to prune database" { + found = true + break + } + } + assert.Equal(t, true, found, "Should log error when UpdateEarliestAvailableSlot fails") + + require.NoError(t, p.Stop()) +} diff --git a/beacon-chain/node/node.go b/beacon-chain/node/node.go index 36678afb70..6b305d8d49 100644 --- a/beacon-chain/node/node.go +++ b/beacon-chain/node/node.go @@ -1108,6 +1108,7 @@ func (b *BeaconNode) registerPrunerService(cliCtx *cli.Context) error { genesis, initSyncWaiter(cliCtx.Context, b.initialSyncComplete), backfillService.WaitForCompletion, + b.fetchP2P(), opts..., ) if err != nil { diff --git a/beacon-chain/p2p/custody.go b/beacon-chain/p2p/custody.go index 0ca21da26d..2318b9aaad 100644 --- a/beacon-chain/p2p/custody.go +++ b/beacon-chain/p2p/custody.go @@ -115,6 +115,57 @@ func (s *Service) UpdateCustodyInfo(earliestAvailableSlot primitives.Slot, custo return earliestAvailableSlot, custodyGroupCount, nil } +// UpdateEarliestAvailableSlot updates the earliest available slot. +// +// IMPORTANT: This function should only be called when Fulu is enabled. The caller is responsible +// for checking params.FuluEnabled() before calling this function. +func (s *Service) UpdateEarliestAvailableSlot(earliestAvailableSlot primitives.Slot) error { + s.custodyInfoLock.Lock() + defer s.custodyInfoLock.Unlock() + + if s.custodyInfo == nil { + return errors.New("no custody info available") + } + + currentSlot := slots.CurrentSlot(s.genesisTime) + currentEpoch := slots.ToEpoch(currentSlot) + + // Allow decrease (for backfill scenarios) + if earliestAvailableSlot < s.custodyInfo.earliestAvailableSlot { + s.custodyInfo.earliestAvailableSlot = earliestAvailableSlot + return nil + } + + // Prevent increase within the MIN_EPOCHS_FOR_BLOCK_REQUESTS period + // This ensures we don't voluntarily refuse to serve mandatory block data + // This check applies regardless of whether we're early or late in the chain + minEpochsForBlocks := primitives.Epoch(params.BeaconConfig().MinEpochsForBlockRequests) + + // Calculate the minimum required epoch (or 0 if we're early in the chain) + minRequiredEpoch := primitives.Epoch(0) + if currentEpoch > minEpochsForBlocks { + minRequiredEpoch = currentEpoch - minEpochsForBlocks + } + + // Convert to slot to ensure we compare at slot-level granularity, not epoch-level + // This prevents allowing increases to slots within minRequiredEpoch that are after its first slot + minRequiredSlot, err := slots.EpochStart(minRequiredEpoch) + if err != nil { + return errors.Wrap(err, "epoch start") + } + + // Prevent any increase that would put earliest slot beyond the minimum required slot + if earliestAvailableSlot > s.custodyInfo.earliestAvailableSlot && earliestAvailableSlot > minRequiredSlot { + return errors.Errorf( + "cannot increase earliest available slot to %d (epoch %d) as it exceeds minimum required slot %d (epoch %d)", + earliestAvailableSlot, slots.ToEpoch(earliestAvailableSlot), minRequiredSlot, minRequiredEpoch, + ) + } + + s.custodyInfo.earliestAvailableSlot = earliestAvailableSlot + return nil +} + // CustodyGroupCountFromPeer retrieves custody group count from a peer. // It first tries to get the custody group count from the peer's metadata, // then falls back to the ENR value if the metadata is not available, then diff --git a/beacon-chain/p2p/custody_test.go b/beacon-chain/p2p/custody_test.go index 0a875e1127..5c6b3b29e7 100644 --- a/beacon-chain/p2p/custody_test.go +++ b/beacon-chain/p2p/custody_test.go @@ -4,6 +4,7 @@ import ( "context" "strings" "testing" + "time" "github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas" "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers" @@ -167,6 +168,148 @@ func TestUpdateCustodyInfo(t *testing.T) { } } +func TestUpdateEarliestAvailableSlot(t *testing.T) { + params.SetupTestConfigCleanup(t) + config := params.BeaconConfig() + config.FuluForkEpoch = 0 // Enable Fulu from epoch 0 + params.OverrideBeaconConfig(config) + + t.Run("Valid update", func(t *testing.T) { + const ( + initialSlot primitives.Slot = 50 + newSlot primitives.Slot = 100 + groupCount uint64 = 5 + ) + + // Set up a scenario where we're far enough in the chain that increasing to newSlot is valid + minEpochsForBlocks := primitives.Epoch(params.BeaconConfig().MinEpochsForBlockRequests) + currentEpoch := minEpochsForBlocks + 100 // Well beyond MIN_EPOCHS_FOR_BLOCK_REQUESTS + currentSlot := primitives.Slot(currentEpoch) * primitives.Slot(params.BeaconConfig().SlotsPerEpoch) + + service := &Service{ + // Set genesis time in the past so currentSlot is the "current" slot + genesisTime: time.Now().Add(-time.Duration(currentSlot) * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second), + custodyInfo: &custodyInfo{ + earliestAvailableSlot: initialSlot, + groupCount: groupCount, + }, + } + + err := service.UpdateEarliestAvailableSlot(newSlot) + + require.NoError(t, err) + require.Equal(t, newSlot, service.custodyInfo.earliestAvailableSlot) + require.Equal(t, groupCount, service.custodyInfo.groupCount) // Should preserve group count + }) + + t.Run("Earlier slot - allowed for backfill", func(t *testing.T) { + const initialSlot primitives.Slot = 100 + const earlierSlot primitives.Slot = 50 + + service := &Service{ + genesisTime: time.Now(), + custodyInfo: &custodyInfo{ + earliestAvailableSlot: initialSlot, + groupCount: 5, + }, + } + + err := service.UpdateEarliestAvailableSlot(earlierSlot) + + require.NoError(t, err) + require.Equal(t, earlierSlot, service.custodyInfo.earliestAvailableSlot) // Should decrease for backfill + }) + + t.Run("Prevent increase within MIN_EPOCHS_FOR_BLOCK_REQUESTS - late in chain", func(t *testing.T) { + // Set current time far enough in the future to have a meaningful MIN_EPOCHS_FOR_BLOCK_REQUESTS period + minEpochsForBlocks := primitives.Epoch(params.BeaconConfig().MinEpochsForBlockRequests) + currentEpoch := minEpochsForBlocks + 100 // Well beyond the minimum + currentSlot := primitives.Slot(currentEpoch) * primitives.Slot(params.BeaconConfig().SlotsPerEpoch) + + // Calculate the minimum allowed epoch + minRequiredEpoch := currentEpoch - minEpochsForBlocks + minRequiredSlot := primitives.Slot(minRequiredEpoch) * primitives.Slot(params.BeaconConfig().SlotsPerEpoch) + + // Try to set earliest slot to a value within the MIN_EPOCHS_FOR_BLOCK_REQUESTS period (should fail) + attemptedSlot := minRequiredSlot + 1000 // Within the mandatory retention period + + service := &Service{ + genesisTime: time.Now().Add(-time.Duration(currentSlot) * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second), + custodyInfo: &custodyInfo{ + earliestAvailableSlot: minRequiredSlot - 100, // Current value is before the min required + groupCount: 5, + }, + } + + err := service.UpdateEarliestAvailableSlot(attemptedSlot) + + require.NotNil(t, err) + require.Equal(t, true, strings.Contains(err.Error(), "cannot increase earliest available slot")) + }) + + t.Run("Prevent increase at epoch boundary - slot precision matters", func(t *testing.T) { + minEpochsForBlocks := primitives.Epoch(params.BeaconConfig().MinEpochsForBlockRequests) + currentEpoch := minEpochsForBlocks + 976 // Current epoch + currentSlot := primitives.Slot(currentEpoch) * primitives.Slot(params.BeaconConfig().SlotsPerEpoch) + + minRequiredEpoch := currentEpoch - minEpochsForBlocks // = 976 + storedEarliestSlot := primitives.Slot(minRequiredEpoch)*primitives.Slot(params.BeaconConfig().SlotsPerEpoch) - 232 // Before minRequired + + // Try to set earliest to slot 8 of the minRequiredEpoch (should fail with slot comparison) + attemptedSlot := primitives.Slot(minRequiredEpoch)*primitives.Slot(params.BeaconConfig().SlotsPerEpoch) + 8 + + service := &Service{ + genesisTime: time.Now().Add(-time.Duration(currentSlot) * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second), + custodyInfo: &custodyInfo{ + earliestAvailableSlot: storedEarliestSlot, + groupCount: 5, + }, + } + + err := service.UpdateEarliestAvailableSlot(attemptedSlot) + + require.NotNil(t, err, "Should prevent increasing earliest slot beyond the minimum required SLOT (not just epoch)") + require.Equal(t, true, strings.Contains(err.Error(), "cannot increase earliest available slot")) + }) + + t.Run("Prevent increase within MIN_EPOCHS_FOR_BLOCK_REQUESTS - early in chain", func(t *testing.T) { + minEpochsForBlocks := primitives.Epoch(params.BeaconConfig().MinEpochsForBlockRequests) + currentEpoch := minEpochsForBlocks - 10 // Early in chain, BEFORE we have MIN_EPOCHS_FOR_BLOCK_REQUESTS of history + currentSlot := primitives.Slot(currentEpoch) * primitives.Slot(params.BeaconConfig().SlotsPerEpoch) + + // Current earliest slot is at slot 100 + currentEarliestSlot := primitives.Slot(100) + + // Try to increase earliest slot to slot 1000 (which would be within the mandatory window from currentSlot) + attemptedSlot := primitives.Slot(1000) + + service := &Service{ + genesisTime: time.Now().Add(-time.Duration(currentSlot) * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second), + custodyInfo: &custodyInfo{ + earliestAvailableSlot: currentEarliestSlot, + groupCount: 5, + }, + } + + err := service.UpdateEarliestAvailableSlot(attemptedSlot) + + require.NotNil(t, err, "Should prevent increasing earliest slot within the mandatory retention window, even early in chain") + require.Equal(t, true, strings.Contains(err.Error(), "cannot increase earliest available slot")) + }) + + t.Run("Nil custody info - should return error", func(t *testing.T) { + service := &Service{ + genesisTime: time.Now(), + custodyInfo: nil, // No custody info set + } + + err := service.UpdateEarliestAvailableSlot(100) + + require.NotNil(t, err) + require.Equal(t, true, strings.Contains(err.Error(), "no custody info available")) + }) +} + func TestCustodyGroupCountFromPeer(t *testing.T) { const ( expectedENR uint64 = 7 diff --git a/beacon-chain/p2p/interfaces.go b/beacon-chain/p2p/interfaces.go index f648bb53cc..2d2d0993d3 100644 --- a/beacon-chain/p2p/interfaces.go +++ b/beacon-chain/p2p/interfaces.go @@ -126,6 +126,7 @@ type ( EarliestAvailableSlot(ctx context.Context) (primitives.Slot, error) CustodyGroupCount(ctx context.Context) (uint64, error) UpdateCustodyInfo(earliestAvailableSlot primitives.Slot, custodyGroupCount uint64) (primitives.Slot, uint64, error) + UpdateEarliestAvailableSlot(earliestAvailableSlot primitives.Slot) error CustodyGroupCountFromPeer(peer.ID) uint64 } ) diff --git a/beacon-chain/p2p/testing/fuzz_p2p.go b/beacon-chain/p2p/testing/fuzz_p2p.go index 3bfea3c5ce..b9dbb71e41 100644 --- a/beacon-chain/p2p/testing/fuzz_p2p.go +++ b/beacon-chain/p2p/testing/fuzz_p2p.go @@ -213,6 +213,11 @@ func (s *FakeP2P) UpdateCustodyInfo(earliestAvailableSlot primitives.Slot, custo return earliestAvailableSlot, custodyGroupCount, nil } +// UpdateEarliestAvailableSlot -- fake. +func (*FakeP2P) UpdateEarliestAvailableSlot(earliestAvailableSlot primitives.Slot) error { + return nil +} + // CustodyGroupCountFromPeer -- fake. func (*FakeP2P) CustodyGroupCountFromPeer(peer.ID) uint64 { return 0 diff --git a/beacon-chain/p2p/testing/p2p.go b/beacon-chain/p2p/testing/p2p.go index b4beaf7633..641172ea07 100644 --- a/beacon-chain/p2p/testing/p2p.go +++ b/beacon-chain/p2p/testing/p2p.go @@ -499,6 +499,15 @@ func (s *TestP2P) UpdateCustodyInfo(earliestAvailableSlot primitives.Slot, custo return s.earliestAvailableSlot, s.custodyGroupCount, nil } +// UpdateEarliestAvailableSlot . +func (s *TestP2P) UpdateEarliestAvailableSlot(earliestAvailableSlot primitives.Slot) error { + s.custodyInfoMut.Lock() + defer s.custodyInfoMut.Unlock() + + s.earliestAvailableSlot = earliestAvailableSlot + return nil +} + // CustodyGroupCountFromPeer retrieves custody group count from a peer. // It first tries to get the custody group count from the peer's metadata, // then falls back to the ENR value if the metadata is not available, then diff --git a/beacon-chain/sync/backfill/BUILD.bazel b/beacon-chain/sync/backfill/BUILD.bazel index 053ee571c9..bbd2691dc4 100644 --- a/beacon-chain/sync/backfill/BUILD.bazel +++ b/beacon-chain/sync/backfill/BUILD.bazel @@ -17,7 +17,6 @@ go_library( importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/sync/backfill", visibility = ["//visibility:public"], deps = [ - "//beacon-chain/core/helpers:go_default_library", "//beacon-chain/core/signing:go_default_library", "//beacon-chain/das:go_default_library", "//beacon-chain/db:go_default_library", @@ -61,7 +60,6 @@ go_test( ], embed = [":go_default_library"], deps = [ - "//beacon-chain/core/helpers:go_default_library", "//beacon-chain/core/signing:go_default_library", "//beacon-chain/das:go_default_library", "//beacon-chain/db:go_default_library", diff --git a/beacon-chain/sync/backfill/service.go b/beacon-chain/sync/backfill/service.go index 2bf7d4f1f3..fcbd0086fe 100644 --- a/beacon-chain/sync/backfill/service.go +++ b/beacon-chain/sync/backfill/service.go @@ -3,12 +3,12 @@ package backfill import ( "context" - "github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers" "github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem" "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p" "github.com/OffchainLabs/prysm/v6/beacon-chain/startup" "github.com/OffchainLabs/prysm/v6/beacon-chain/sync" "github.com/OffchainLabs/prysm/v6/beacon-chain/verification" + "github.com/OffchainLabs/prysm/v6/config/params" "github.com/OffchainLabs/prysm/v6/consensus-types/blocks" "github.com/OffchainLabs/prysm/v6/consensus-types/primitives" "github.com/OffchainLabs/prysm/v6/encoding/bytesutil" @@ -348,7 +348,7 @@ func (*Service) Status() error { // minimumBackfillSlot determines the lowest slot that backfill needs to download based on looking back // MIN_EPOCHS_FOR_BLOCK_REQUESTS from the current slot. func minimumBackfillSlot(current primitives.Slot) primitives.Slot { - oe := helpers.MinEpochsForBlockRequests() + oe := primitives.Epoch(params.BeaconConfig().MinEpochsForBlockRequests) if oe > slots.MaxSafeEpoch() { oe = slots.MaxSafeEpoch() } diff --git a/beacon-chain/sync/backfill/service_test.go b/beacon-chain/sync/backfill/service_test.go index d6ba959837..59efb21f0e 100644 --- a/beacon-chain/sync/backfill/service_test.go +++ b/beacon-chain/sync/backfill/service_test.go @@ -5,7 +5,6 @@ import ( "testing" "time" - "github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers" "github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem" p2ptest "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing" "github.com/OffchainLabs/prysm/v6/beacon-chain/startup" @@ -84,7 +83,7 @@ func TestServiceInit(t *testing.T) { } func TestMinimumBackfillSlot(t *testing.T) { - oe := helpers.MinEpochsForBlockRequests() + oe := primitives.Epoch(params.BeaconConfig().MinEpochsForBlockRequests) currSlot := (oe + 100).Mul(uint64(params.BeaconConfig().SlotsPerEpoch)) minSlot := minimumBackfillSlot(primitives.Slot(currSlot)) @@ -109,7 +108,7 @@ func testReadN(ctx context.Context, t *testing.T, c chan batch, n int, into []ba } func TestBackfillMinSlotDefault(t *testing.T) { - oe := helpers.MinEpochsForBlockRequests() + oe := primitives.Epoch(params.BeaconConfig().MinEpochsForBlockRequests) current := primitives.Slot((oe + 100).Mul(uint64(params.BeaconConfig().SlotsPerEpoch))) s := &Service{} specMin := minimumBackfillSlot(current) diff --git a/changelog/satushh-update-easlot-pruning.md b/changelog/satushh-update-easlot-pruning.md new file mode 100644 index 0000000000..2944788dfc --- /dev/null +++ b/changelog/satushh-update-easlot-pruning.md @@ -0,0 +1,3 @@ +### Added + +- Update the earliest available slot after pruning operations in beacon chain database pruner. This ensures the P2P layer accurately knows which historical data is available after pruning, preventing nodes from advertising or attempting to serve data that has been pruned. \ No newline at end of file