mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 13:58:09 -05:00
Compare commits
31 Commits
lite-super
...
dont-gen-s
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
503c1ca465 | ||
|
|
fa056c2d21 | ||
|
|
61de11e2c4 | ||
|
|
2773bdef89 | ||
|
|
2a23dc7f4a | ||
|
|
f97622b054 | ||
|
|
08d0f42725 | ||
|
|
74c8a25354 | ||
|
|
a466c6db9c | ||
|
|
4da6c4291f | ||
|
|
2d242a8d09 | ||
|
|
6be1541e57 | ||
|
|
b845222ce7 | ||
|
|
5bbdebee22 | ||
|
|
26100e074d | ||
|
|
768fa0e5a1 | ||
|
|
11bb8542a4 | ||
|
|
b78c2c354b | ||
|
|
55e2001a0b | ||
|
|
c093283b1b | ||
|
|
5449fd0352 | ||
|
|
3d7f7b588b | ||
|
|
2f067c4164 | ||
|
|
81266f60af | ||
|
|
207f36065a | ||
|
|
eb9feabd6f | ||
|
|
bc0868e232 | ||
|
|
35c1ab5e88 | ||
|
|
21bb6f5258 | ||
|
|
4914882e97 | ||
|
|
2302ef918a |
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v7/beacon-chain/forkchoice/types"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
@@ -19,6 +20,8 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var ErrStopAttestationStateGen = errors.New("stopped attestation state generation")
|
||||
|
||||
// The caller of this function must have a lock on forkchoice.
|
||||
func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) state.ReadOnlyBeaconState {
|
||||
headEpoch := slots.ToEpoch(s.HeadSlot())
|
||||
@@ -59,7 +62,7 @@ func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) st
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
// Try if we have already set the checkpoint cache. This will be tried again if we fail here but the check is cheap anyway.
|
||||
|
||||
epochKey := strconv.FormatUint(uint64(c.Epoch), 10 /* base 10 */)
|
||||
lock := async.NewMultilock(string(c.Root) + epochKey)
|
||||
lock.Lock()
|
||||
@@ -135,6 +138,10 @@ func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (stat
|
||||
return nil, errors.Wrap(ErrNotCheckpoint, fmt.Sprintf("epoch %d root %#x", c.Epoch, c.Root))
|
||||
}
|
||||
|
||||
if features.Get().DisableAttestationStateGen {
|
||||
return nil, ErrStopAttestationStateGen
|
||||
}
|
||||
|
||||
// Fallback to state regeneration.
|
||||
log.WithFields(logrus.Fields{"epoch": c.Epoch, "root": fmt.Sprintf("%#x", c.Root)}).Debug("Regenerating attestation pre-state")
|
||||
baseState, err := s.cfg.StateGen.StateByRoot(ctx, bytesutil.ToBytes32(c.Root))
|
||||
|
||||
@@ -134,7 +134,7 @@ func getStateVersionAndPayload(st state.BeaconState) (int, interfaces.ExecutionD
|
||||
return preStateVersion, preStateHeader, nil
|
||||
}
|
||||
|
||||
func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlock, avs das.AvailabilityStore) error {
|
||||
func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlock, avs das.AvailabilityChecker) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.onBlockBatch")
|
||||
defer span.End()
|
||||
|
||||
@@ -306,7 +306,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
return s.saveHeadNoDB(ctx, lastB, lastBR, preState, !isValidPayload)
|
||||
}
|
||||
|
||||
func (s *Service) areSidecarsAvailable(ctx context.Context, avs das.AvailabilityStore, roBlock consensusblocks.ROBlock) error {
|
||||
func (s *Service) areSidecarsAvailable(ctx context.Context, avs das.AvailabilityChecker, roBlock consensusblocks.ROBlock) error {
|
||||
blockVersion := roBlock.Version()
|
||||
block := roBlock.Block()
|
||||
slot := block.Slot()
|
||||
@@ -634,9 +634,7 @@ func missingDataColumnIndices(store *filesystem.DataColumnStorage, root [fieldpa
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
if uint64(len(expected)) > numberOfColumns {
|
||||
if len(expected) > fieldparams.NumberOfColumns {
|
||||
return nil, errMaxDataColumnsExceeded
|
||||
}
|
||||
|
||||
@@ -818,10 +816,9 @@ func (s *Service) areDataColumnsAvailable(
|
||||
|
||||
case <-ctx.Done():
|
||||
var missingIndices any = "all"
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
missingIndicesCount := uint64(len(missing))
|
||||
missingIndicesCount := len(missing)
|
||||
|
||||
if missingIndicesCount < numberOfColumns {
|
||||
if missingIndicesCount < fieldparams.NumberOfColumns {
|
||||
missingIndices = helpers.SortedPrettySliceFromMap(missing)
|
||||
}
|
||||
|
||||
@@ -948,13 +945,6 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
attribute := s.getPayloadAttribute(ctx, headState, s.CurrentSlot()+1, headRoot[:])
|
||||
// return early if we are not proposing next slot
|
||||
if attribute.IsEmpty() {
|
||||
headBlock, err := s.headBlock()
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("head_root", headRoot).Error("Unable to retrieve head block to fire payload attributes event")
|
||||
}
|
||||
// notifyForkchoiceUpdate fires the payload attribute event. But in this case, we won't
|
||||
// call notifyForkchoiceUpdate, so the event is fired here.
|
||||
go s.firePayloadAttributesEvent(s.cfg.StateNotifier.StateFeed(), headBlock, headRoot, s.CurrentSlot()+1)
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -2495,7 +2495,8 @@ func TestMissingBlobIndices(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMissingDataColumnIndices(t *testing.T) {
|
||||
countPlusOne := params.BeaconConfig().NumberOfColumns + 1
|
||||
const countPlusOne = fieldparams.NumberOfColumns + 1
|
||||
|
||||
tooManyColumns := make(map[uint64]bool, countPlusOne)
|
||||
for i := range countPlusOne {
|
||||
tooManyColumns[uint64(i)] = true
|
||||
@@ -2805,6 +2806,10 @@ func TestProcessLightClientUpdate(t *testing.T) {
|
||||
require.NoError(t, s.cfg.BeaconDB.SaveHeadBlockRoot(ctx, [32]byte{1, 2}))
|
||||
|
||||
for _, testVersion := range version.All()[1:] {
|
||||
if testVersion == version.Gloas {
|
||||
// TODO(16027): Unskip light client tests for Gloas
|
||||
continue
|
||||
}
|
||||
t.Run(version.String(testVersion), func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t, testVersion)
|
||||
|
||||
|
||||
@@ -39,8 +39,8 @@ var epochsSinceFinalityExpandCache = primitives.Epoch(4)
|
||||
|
||||
// BlockReceiver interface defines the methods of chain service for receiving and processing new blocks.
|
||||
type BlockReceiver interface {
|
||||
ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, avs das.AvailabilityStore) error
|
||||
ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock, avs das.AvailabilityStore) error
|
||||
ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, avs das.AvailabilityChecker) error
|
||||
ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock, avs das.AvailabilityChecker) error
|
||||
HasBlock(ctx context.Context, root [32]byte) bool
|
||||
RecentBlockSlot(root [32]byte) (primitives.Slot, error)
|
||||
BlockBeingSynced([32]byte) bool
|
||||
@@ -69,7 +69,7 @@ type SlashingReceiver interface {
|
||||
// 1. Validate block, apply state transition and update checkpoints
|
||||
// 2. Apply fork choice to the processed block
|
||||
// 3. Save latest head info
|
||||
func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, avs das.AvailabilityStore) error {
|
||||
func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, avs das.AvailabilityChecker) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveBlock")
|
||||
defer span.End()
|
||||
// Return early if the block is blacklisted
|
||||
@@ -242,7 +242,7 @@ func (s *Service) validateExecutionAndConsensus(
|
||||
return postState, isValidPayload, nil
|
||||
}
|
||||
|
||||
func (s *Service) handleDA(ctx context.Context, avs das.AvailabilityStore, block blocks.ROBlock) (time.Duration, error) {
|
||||
func (s *Service) handleDA(ctx context.Context, avs das.AvailabilityChecker, block blocks.ROBlock) (time.Duration, error) {
|
||||
var err error
|
||||
start := time.Now()
|
||||
if avs != nil {
|
||||
@@ -332,7 +332,7 @@ func (s *Service) executePostFinalizationTasks(ctx context.Context, finalizedSta
|
||||
// ReceiveBlockBatch processes the whole block batch at once, assuming the block batch is linear ,transitioning
|
||||
// the state, performing batch verification of all collected signatures and then performing the appropriate
|
||||
// actions for a block post-transition.
|
||||
func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock, avs das.AvailabilityStore) error {
|
||||
func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock, avs das.AvailabilityChecker) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveBlockBatch")
|
||||
defer span.End()
|
||||
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
|
||||
statefeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
coreTime "github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db"
|
||||
@@ -470,30 +471,35 @@ func (s *Service) removeStartupState() {
|
||||
// UpdateCustodyInfoInDB updates the custody information in the database.
|
||||
// It returns the (potentially updated) custody group count and the earliest available slot.
|
||||
func (s *Service) updateCustodyInfoInDB(slot primitives.Slot) (primitives.Slot, uint64, error) {
|
||||
isSubscribedToAllDataSubnets := flags.Get().SubscribeAllDataSubnets
|
||||
isSupernode := flags.Get().Supernode
|
||||
isSemiSupernode := flags.Get().SemiSupernode
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
custodyRequirement := cfg.CustodyRequirement
|
||||
|
||||
// Check if the node was previously subscribed to all data subnets, and if so,
|
||||
// store the new status accordingly.
|
||||
wasSubscribedToAllDataSubnets, err := s.cfg.BeaconDB.UpdateSubscribedToAllDataSubnets(s.ctx, isSubscribedToAllDataSubnets)
|
||||
wasSupernode, err := s.cfg.BeaconDB.UpdateSubscribedToAllDataSubnets(s.ctx, isSupernode)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not update subscription status to all data subnets")
|
||||
return 0, 0, errors.Wrap(err, "update subscribed to all data subnets")
|
||||
}
|
||||
|
||||
// Warn the user if the node was previously subscribed to all data subnets and is not any more.
|
||||
if wasSubscribedToAllDataSubnets && !isSubscribedToAllDataSubnets {
|
||||
log.Warnf(
|
||||
"Because the flag `--%s` was previously used, the node will still subscribe to all data subnets.",
|
||||
flags.SubscribeAllDataSubnets.Name,
|
||||
)
|
||||
// Compute the target custody group count based on current flag configuration.
|
||||
targetCustodyGroupCount := custodyRequirement
|
||||
|
||||
// Supernode: custody all groups (either currently set or previously enabled)
|
||||
if isSupernode {
|
||||
targetCustodyGroupCount = cfg.NumberOfCustodyGroups
|
||||
}
|
||||
|
||||
// Compute the custody group count.
|
||||
custodyGroupCount := custodyRequirement
|
||||
if isSubscribedToAllDataSubnets {
|
||||
custodyGroupCount = cfg.NumberOfCustodyGroups
|
||||
// Semi-supernode: custody minimum needed for reconstruction, or custody requirement if higher
|
||||
if isSemiSupernode {
|
||||
semiSupernodeCustody, err := peerdas.MinimumCustodyGroupCountToReconstruct()
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrap(err, "minimum custody group count")
|
||||
}
|
||||
|
||||
targetCustodyGroupCount = max(custodyRequirement, semiSupernodeCustody)
|
||||
}
|
||||
|
||||
// Safely compute the fulu fork slot.
|
||||
@@ -510,12 +516,23 @@ func (s *Service) updateCustodyInfoInDB(slot primitives.Slot) (primitives.Slot,
|
||||
}
|
||||
}
|
||||
|
||||
earliestAvailableSlot, custodyGroupCount, err := s.cfg.BeaconDB.UpdateCustodyInfo(s.ctx, slot, custodyGroupCount)
|
||||
earliestAvailableSlot, actualCustodyGroupCount, err := s.cfg.BeaconDB.UpdateCustodyInfo(s.ctx, slot, targetCustodyGroupCount)
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrap(err, "update custody info")
|
||||
}
|
||||
|
||||
return earliestAvailableSlot, custodyGroupCount, nil
|
||||
if isSupernode {
|
||||
log.WithFields(logrus.Fields{
|
||||
"current": actualCustodyGroupCount,
|
||||
"target": cfg.NumberOfCustodyGroups,
|
||||
}).Info("Supernode mode enabled. Will custody all data columns going forward.")
|
||||
}
|
||||
|
||||
if wasSupernode && !isSupernode {
|
||||
log.Warningf("Because the `--%s` flag was previously used, the node will continue to act as a super node.", flags.Supernode.Name)
|
||||
}
|
||||
|
||||
return earliestAvailableSlot, actualCustodyGroupCount, nil
|
||||
}
|
||||
|
||||
func spawnCountdownIfPreGenesis(ctx context.Context, genesisTime time.Time, db db.HeadAccessDatabase) {
|
||||
|
||||
@@ -603,7 +603,6 @@ func TestUpdateCustodyInfoInDB(t *testing.T) {
|
||||
custodyRequirement = uint64(4)
|
||||
earliestStoredSlot = primitives.Slot(12)
|
||||
numberOfCustodyGroups = uint64(64)
|
||||
numberOfColumns = uint64(128)
|
||||
)
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
@@ -611,7 +610,6 @@ func TestUpdateCustodyInfoInDB(t *testing.T) {
|
||||
cfg.FuluForkEpoch = fuluForkEpoch
|
||||
cfg.CustodyRequirement = custodyRequirement
|
||||
cfg.NumberOfCustodyGroups = numberOfCustodyGroups
|
||||
cfg.NumberOfColumns = numberOfColumns
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
ctx := t.Context()
|
||||
@@ -642,7 +640,7 @@ func TestUpdateCustodyInfoInDB(t *testing.T) {
|
||||
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SubscribeAllDataSubnets = true
|
||||
gFlags.Supernode = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
@@ -680,7 +678,7 @@ func TestUpdateCustodyInfoInDB(t *testing.T) {
|
||||
// ----------
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SubscribeAllDataSubnets = true
|
||||
gFlags.Supernode = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
@@ -695,4 +693,121 @@ func TestUpdateCustodyInfoInDB(t *testing.T) {
|
||||
require.Equal(t, slot, actualEas)
|
||||
require.Equal(t, numberOfCustodyGroups, actualCgc)
|
||||
})
|
||||
|
||||
t.Run("Supernode downgrade prevented", func(t *testing.T) {
|
||||
service, requirements := minimalTestService(t)
|
||||
err = requirements.db.SaveBlock(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Enable supernode
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.Supernode = true
|
||||
flags.Init(gFlags)
|
||||
|
||||
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
|
||||
actualEas, actualCgc, err := service.updateCustodyInfoInDB(slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
require.Equal(t, numberOfCustodyGroups, actualCgc)
|
||||
|
||||
// Try to downgrade by removing flag
|
||||
gFlags.Supernode = false
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
// Should still be supernode
|
||||
actualEas, actualCgc, err = service.updateCustodyInfoInDB(slot + 2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
require.Equal(t, numberOfCustodyGroups, actualCgc) // Still 64, not downgraded
|
||||
})
|
||||
|
||||
t.Run("Semi-supernode downgrade prevented", func(t *testing.T) {
|
||||
service, requirements := minimalTestService(t)
|
||||
err = requirements.db.SaveBlock(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Enable semi-supernode
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SemiSupernode = true
|
||||
flags.Init(gFlags)
|
||||
|
||||
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
|
||||
actualEas, actualCgc, err := service.updateCustodyInfoInDB(slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
semiSupernodeCustody := numberOfCustodyGroups / 2 // 64
|
||||
require.Equal(t, semiSupernodeCustody, actualCgc) // Semi-supernode custodies 64 groups
|
||||
|
||||
// Try to downgrade by removing flag
|
||||
gFlags.SemiSupernode = false
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
// UpdateCustodyInfo should prevent downgrade - custody count should remain at 64
|
||||
actualEas, actualCgc, err = service.updateCustodyInfoInDB(slot + 2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
require.Equal(t, semiSupernodeCustody, actualCgc) // Still 64 due to downgrade prevention by UpdateCustodyInfo
|
||||
})
|
||||
|
||||
t.Run("Semi-supernode to supernode upgrade allowed", func(t *testing.T) {
|
||||
service, requirements := minimalTestService(t)
|
||||
err = requirements.db.SaveBlock(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Start with semi-supernode
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SemiSupernode = true
|
||||
flags.Init(gFlags)
|
||||
|
||||
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
|
||||
actualEas, actualCgc, err := service.updateCustodyInfoInDB(slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
semiSupernodeCustody := numberOfCustodyGroups / 2 // 64
|
||||
require.Equal(t, semiSupernodeCustody, actualCgc) // Semi-supernode custodies 64 groups
|
||||
|
||||
// Upgrade to full supernode
|
||||
gFlags.SemiSupernode = false
|
||||
gFlags.Supernode = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
// Should upgrade to full supernode
|
||||
upgradeSlot := slot + 2
|
||||
actualEas, actualCgc, err = service.updateCustodyInfoInDB(upgradeSlot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, upgradeSlot, actualEas) // Earliest slot updates when upgrading
|
||||
require.Equal(t, numberOfCustodyGroups, actualCgc) // Upgraded to 128
|
||||
})
|
||||
|
||||
t.Run("Semi-supernode with high validator requirements uses higher custody", func(t *testing.T) {
|
||||
service, requirements := minimalTestService(t)
|
||||
err = requirements.db.SaveBlock(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Enable semi-supernode
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SemiSupernode = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
// Mock a high custody requirement (simulating many validators)
|
||||
// We need to override the custody requirement calculation
|
||||
// For this test, we'll verify the logic by checking if custodyRequirement > 64
|
||||
// Since custodyRequirement in minimalTestService is 4, we can't test the high case here
|
||||
// This would require a different test setup with actual validators
|
||||
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
|
||||
actualEas, actualCgc, err := service.updateCustodyInfoInDB(slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
semiSupernodeCustody := numberOfCustodyGroups / 2 // 64
|
||||
// With low validator requirements (4), should use semi-supernode minimum (64)
|
||||
require.Equal(t, semiSupernodeCustody, actualCgc)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -275,7 +275,7 @@ func (s *ChainService) ReceiveBlockInitialSync(ctx context.Context, block interf
|
||||
}
|
||||
|
||||
// ReceiveBlockBatch processes blocks in batches from initial-sync.
|
||||
func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []blocks.ROBlock, _ das.AvailabilityStore) error {
|
||||
func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []blocks.ROBlock, _ das.AvailabilityChecker) error {
|
||||
if s.State == nil {
|
||||
return ErrNilState
|
||||
}
|
||||
@@ -305,7 +305,7 @@ func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []blocks.ROBl
|
||||
}
|
||||
|
||||
// ReceiveBlock mocks ReceiveBlock method in chain service.
|
||||
func (s *ChainService) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, _ [32]byte, _ das.AvailabilityStore) error {
|
||||
func (s *ChainService) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, _ [32]byte, _ das.AvailabilityChecker) error {
|
||||
if s.ReceiveBlockMockErr != nil {
|
||||
return s.ReceiveBlockMockErr
|
||||
}
|
||||
|
||||
@@ -90,6 +90,9 @@ func IsExecutionEnabled(st state.ReadOnlyBeaconState, body interfaces.ReadOnlyBe
|
||||
if st == nil || body == nil {
|
||||
return false, errors.New("nil state or block body")
|
||||
}
|
||||
if st.Version() >= version.Capella {
|
||||
return true, nil
|
||||
}
|
||||
if IsPreBellatrixVersion(st.Version()) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@@ -260,11 +260,12 @@ func Test_IsExecutionBlockCapella(t *testing.T) {
|
||||
|
||||
func Test_IsExecutionEnabled(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
payload *enginev1.ExecutionPayload
|
||||
header interfaces.ExecutionData
|
||||
useAltairSt bool
|
||||
want bool
|
||||
name string
|
||||
payload *enginev1.ExecutionPayload
|
||||
header interfaces.ExecutionData
|
||||
useAltairSt bool
|
||||
useCapellaSt bool
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "use older than bellatrix state",
|
||||
@@ -331,6 +332,17 @@ func Test_IsExecutionEnabled(t *testing.T) {
|
||||
}(),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "capella state always enabled",
|
||||
payload: emptyPayload(),
|
||||
header: func() interfaces.ExecutionData {
|
||||
h, err := emptyPayloadHeader()
|
||||
require.NoError(t, err)
|
||||
return h
|
||||
}(),
|
||||
useCapellaSt: true,
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
@@ -342,6 +354,8 @@ func Test_IsExecutionEnabled(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
if tt.useAltairSt {
|
||||
st, _ = util.DeterministicGenesisStateAltair(t, 1)
|
||||
} else if tt.useCapellaSt {
|
||||
st, _ = util.DeterministicGenesisStateCapella(t, 1)
|
||||
}
|
||||
got, err := blocks.IsExecutionEnabled(st, body)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -45,12 +45,13 @@ go_test(
|
||||
"p2p_interface_test.go",
|
||||
"reconstruction_helpers_test.go",
|
||||
"reconstruction_test.go",
|
||||
"semi_supernode_test.go",
|
||||
"utils_test.go",
|
||||
"validator_test.go",
|
||||
"verification_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
":go_default_library",
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"math"
|
||||
"slices"
|
||||
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/hash"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
@@ -96,8 +97,7 @@ func ComputeColumnsForCustodyGroup(custodyGroup uint64) ([]uint64, error) {
|
||||
return nil, ErrCustodyGroupTooLarge
|
||||
}
|
||||
|
||||
numberOfColumns := cfg.NumberOfColumns
|
||||
|
||||
numberOfColumns := uint64(fieldparams.NumberOfColumns)
|
||||
columnsPerGroup := numberOfColumns / numberOfCustodyGroups
|
||||
|
||||
columns := make([]uint64, 0, columnsPerGroup)
|
||||
@@ -112,8 +112,9 @@ func ComputeColumnsForCustodyGroup(custodyGroup uint64) ([]uint64, error) {
|
||||
// ComputeCustodyGroupForColumn computes the custody group for a given column.
|
||||
// It is the reciprocal function of ComputeColumnsForCustodyGroup.
|
||||
func ComputeCustodyGroupForColumn(columnIndex uint64) (uint64, error) {
|
||||
const numberOfColumns = fieldparams.NumberOfColumns
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
numberOfColumns := cfg.NumberOfColumns
|
||||
numberOfCustodyGroups := cfg.NumberOfCustodyGroups
|
||||
|
||||
if columnIndex >= numberOfColumns {
|
||||
|
||||
@@ -30,7 +30,6 @@ func TestComputeColumnsForCustodyGroup(t *testing.T) {
|
||||
func TestComputeCustodyGroupForColumn(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.NumberOfColumns = 128
|
||||
config.NumberOfCustodyGroups = 64
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@ package peerdas
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"maps"
|
||||
"sync"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
@@ -107,3 +108,102 @@ func computeInfoCacheKey(nodeID enode.ID, custodyGroupCount uint64) [nodeInfoCac
|
||||
|
||||
return key
|
||||
}
|
||||
|
||||
// ColumnIndices represents as a set of ColumnIndices. This could be the set of indices that a node is required to custody,
|
||||
// the set that a peer custodies, missing indices for a given block, indices that are present on disk, etc.
|
||||
type ColumnIndices map[uint64]struct{}
|
||||
|
||||
// Has returns true if the index is present in the ColumnIndices.
|
||||
func (ci ColumnIndices) Has(index uint64) bool {
|
||||
_, ok := ci[index]
|
||||
return ok
|
||||
}
|
||||
|
||||
// Count returns the number of indices present in the ColumnIndices.
|
||||
func (ci ColumnIndices) Count() int {
|
||||
return len(ci)
|
||||
}
|
||||
|
||||
// Set sets the index in the ColumnIndices.
|
||||
func (ci ColumnIndices) Set(index uint64) {
|
||||
ci[index] = struct{}{}
|
||||
}
|
||||
|
||||
// Unset removes the index from the ColumnIndices.
|
||||
func (ci ColumnIndices) Unset(index uint64) {
|
||||
delete(ci, index)
|
||||
}
|
||||
|
||||
// Copy creates a copy of the ColumnIndices.
|
||||
func (ci ColumnIndices) Copy() ColumnIndices {
|
||||
newCi := make(ColumnIndices, len(ci))
|
||||
maps.Copy(newCi, ci)
|
||||
return newCi
|
||||
}
|
||||
|
||||
// Intersection returns a new ColumnIndices that contains only the indices that are present in both ColumnIndices.
|
||||
func (ci ColumnIndices) Intersection(other ColumnIndices) ColumnIndices {
|
||||
result := make(ColumnIndices)
|
||||
for index := range ci {
|
||||
if other.Has(index) {
|
||||
result.Set(index)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Merge mutates the receiver so that any index that is set in either of
|
||||
// the two ColumnIndices is set in the receiver after the function finishes.
|
||||
// It does not mutate the other ColumnIndices given as a function argument.
|
||||
func (ci ColumnIndices) Merge(other ColumnIndices) {
|
||||
for index := range other {
|
||||
ci.Set(index)
|
||||
}
|
||||
}
|
||||
|
||||
// ToMap converts a ColumnIndices into a map[uint64]struct{}.
|
||||
// In the future ColumnIndices may be changed to a bit map, so using
|
||||
// ToMap will ensure forwards-compatibility.
|
||||
func (ci ColumnIndices) ToMap() map[uint64]struct{} {
|
||||
return ci.Copy()
|
||||
}
|
||||
|
||||
// ToSlice converts a ColumnIndices into a slice of uint64 indices.
|
||||
func (ci ColumnIndices) ToSlice() []uint64 {
|
||||
indices := make([]uint64, 0, len(ci))
|
||||
for index := range ci {
|
||||
indices = append(indices, index)
|
||||
}
|
||||
return indices
|
||||
}
|
||||
|
||||
// NewColumnIndicesFromSlice creates a ColumnIndices from a slice of uint64.
|
||||
func NewColumnIndicesFromSlice(indices []uint64) ColumnIndices {
|
||||
ci := make(ColumnIndices, len(indices))
|
||||
for _, index := range indices {
|
||||
ci[index] = struct{}{}
|
||||
}
|
||||
return ci
|
||||
}
|
||||
|
||||
// NewColumnIndicesFromMap creates a ColumnIndices from a map[uint64]bool. This kind of map
|
||||
// is used in several places in peerdas code. Converting from this map type to ColumnIndices
|
||||
// will allow us to move ColumnIndices underlying type to a bitmap in the future and avoid
|
||||
// lots of loops for things like intersections/unions or copies.
|
||||
func NewColumnIndicesFromMap(indices map[uint64]bool) ColumnIndices {
|
||||
ci := make(ColumnIndices, len(indices))
|
||||
for index, set := range indices {
|
||||
if !set {
|
||||
continue
|
||||
}
|
||||
ci[index] = struct{}{}
|
||||
}
|
||||
return ci
|
||||
}
|
||||
|
||||
// NewColumnIndices creates an empty ColumnIndices.
|
||||
// In the future ColumnIndices may change from a reference type to a value type,
|
||||
// so using this constructor will ensure forwards-compatibility.
|
||||
func NewColumnIndices() ColumnIndices {
|
||||
return make(ColumnIndices)
|
||||
}
|
||||
|
||||
@@ -25,3 +25,10 @@ func TestInfo(t *testing.T) {
|
||||
require.DeepEqual(t, expectedDataColumnsSubnets, actual.DataColumnsSubnets)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewColumnIndicesFromMap(t *testing.T) {
|
||||
t.Run("nil map", func(t *testing.T) {
|
||||
ci := peerdas.NewColumnIndicesFromMap(nil)
|
||||
require.Equal(t, 0, ci.Count())
|
||||
})
|
||||
}
|
||||
|
||||
@@ -33,8 +33,7 @@ func (Cgc) ENRKey() string { return params.BeaconNetworkConfig().CustodyGroupCou
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#verify_data_column_sidecar
|
||||
func VerifyDataColumnSidecar(sidecar blocks.RODataColumn) error {
|
||||
// The sidecar index must be within the valid range.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
if sidecar.Index >= numberOfColumns {
|
||||
if sidecar.Index >= fieldparams.NumberOfColumns {
|
||||
return ErrIndexTooLarge
|
||||
}
|
||||
|
||||
|
||||
@@ -281,8 +281,11 @@ func BenchmarkVerifyDataColumnSidecarKZGProofs_SameCommitments_NoBatch(b *testin
|
||||
}
|
||||
|
||||
func BenchmarkVerifyDataColumnSidecarKZGProofs_DiffCommitments_Batch(b *testing.B) {
|
||||
const blobCount = 12
|
||||
numberOfColumns := int64(params.BeaconConfig().NumberOfColumns)
|
||||
const (
|
||||
blobCount = 12
|
||||
numberOfColumns = fieldparams.NumberOfColumns
|
||||
)
|
||||
|
||||
err := kzg.Start()
|
||||
require.NoError(b, err)
|
||||
|
||||
|
||||
@@ -26,7 +26,40 @@ var (
|
||||
func MinimumColumnCountToReconstruct() uint64 {
|
||||
// If the number of columns is odd, then we need total / 2 + 1 columns to reconstruct.
|
||||
// If the number of columns is even, then we need total / 2 columns to reconstruct.
|
||||
return (params.BeaconConfig().NumberOfColumns + 1) / 2
|
||||
return (fieldparams.NumberOfColumns + 1) / 2
|
||||
}
|
||||
|
||||
// MinimumCustodyGroupCountToReconstruct returns the minimum number of custody groups needed to
|
||||
// custody enough data columns for reconstruction. This accounts for the relationship between
|
||||
// custody groups and columns, making it future-proof if these values change.
|
||||
// Returns an error if the configuration values are invalid (zero or would cause division by zero).
|
||||
func MinimumCustodyGroupCountToReconstruct() (uint64, error) {
|
||||
const numberOfColumns = fieldparams.NumberOfColumns
|
||||
cfg := params.BeaconConfig()
|
||||
|
||||
// Validate configuration values
|
||||
if numberOfColumns == 0 {
|
||||
return 0, errors.New("NumberOfColumns cannot be zero")
|
||||
}
|
||||
if cfg.NumberOfCustodyGroups == 0 {
|
||||
return 0, errors.New("NumberOfCustodyGroups cannot be zero")
|
||||
}
|
||||
|
||||
minimumColumnCount := MinimumColumnCountToReconstruct()
|
||||
|
||||
// Calculate how many columns each custody group represents
|
||||
columnsPerGroup := numberOfColumns / cfg.NumberOfCustodyGroups
|
||||
|
||||
// If there are more groups than columns (columnsPerGroup = 0), this is an invalid configuration
|
||||
// for reconstruction purposes as we cannot determine a meaningful custody group count
|
||||
if columnsPerGroup == 0 {
|
||||
return 0, errors.Errorf("invalid configuration: NumberOfCustodyGroups (%d) exceeds NumberOfColumns (%d)",
|
||||
cfg.NumberOfCustodyGroups, numberOfColumns)
|
||||
}
|
||||
|
||||
// Use ceiling division to ensure we have enough groups to cover the minimum columns
|
||||
// ceiling(a/b) = (a + b - 1) / b
|
||||
return (minimumColumnCount + columnsPerGroup - 1) / columnsPerGroup, nil
|
||||
}
|
||||
|
||||
// recoverCellsForBlobs reconstructs cells for specified blobs from the given data column sidecars.
|
||||
@@ -253,7 +286,8 @@ func ReconstructBlobSidecars(block blocks.ROBlock, verifiedDataColumnSidecars []
|
||||
|
||||
// ComputeCellsAndProofsFromFlat computes the cells and proofs from blobs and cell flat proofs.
|
||||
func ComputeCellsAndProofsFromFlat(blobs [][]byte, cellProofs [][]byte) ([][]kzg.Cell, [][]kzg.Proof, error) {
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
const numberOfColumns = fieldparams.NumberOfColumns
|
||||
|
||||
blobCount := uint64(len(blobs))
|
||||
cellProofsCount := uint64(len(cellProofs))
|
||||
|
||||
@@ -295,8 +329,6 @@ func ComputeCellsAndProofsFromFlat(blobs [][]byte, cellProofs [][]byte) ([][]kzg
|
||||
|
||||
// ComputeCellsAndProofsFromStructured computes the cells and proofs from blobs and cell proofs.
|
||||
func ComputeCellsAndProofsFromStructured(blobsAndProofs []*pb.BlobAndProofV2) ([][]kzg.Cell, [][]kzg.Proof, error) {
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
cellsPerBlob := make([][]kzg.Cell, 0, len(blobsAndProofs))
|
||||
proofsPerBlob := make([][]kzg.Proof, 0, len(blobsAndProofs))
|
||||
for _, blobAndProof := range blobsAndProofs {
|
||||
@@ -315,7 +347,7 @@ func ComputeCellsAndProofsFromStructured(blobsAndProofs []*pb.BlobAndProofV2) ([
|
||||
return nil, nil, errors.Wrap(err, "compute cells")
|
||||
}
|
||||
|
||||
kzgProofs := make([]kzg.Proof, 0, numberOfColumns)
|
||||
kzgProofs := make([]kzg.Proof, 0, fieldparams.NumberOfColumns)
|
||||
for _, kzgProofBytes := range blobAndProof.KzgProofs {
|
||||
if len(kzgProofBytes) != kzg.BytesPerProof {
|
||||
return nil, nil, errors.New("wrong KZG proof size - should never happen")
|
||||
|
||||
@@ -17,41 +17,9 @@ import (
|
||||
)
|
||||
|
||||
func TestMinimumColumnsCountToReconstruct(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
numberOfColumns uint64
|
||||
expected uint64
|
||||
}{
|
||||
{
|
||||
name: "numberOfColumns=128",
|
||||
numberOfColumns: 128,
|
||||
expected: 64,
|
||||
},
|
||||
{
|
||||
name: "numberOfColumns=129",
|
||||
numberOfColumns: 129,
|
||||
expected: 65,
|
||||
},
|
||||
{
|
||||
name: "numberOfColumns=130",
|
||||
numberOfColumns: 130,
|
||||
expected: 65,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Set the total number of columns.
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.NumberOfColumns = tc.numberOfColumns
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Compute the minimum number of columns needed to reconstruct.
|
||||
actual := peerdas.MinimumColumnCountToReconstruct()
|
||||
require.Equal(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
const expected = uint64(64)
|
||||
actual := peerdas.MinimumColumnCountToReconstruct()
|
||||
require.Equal(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestReconstructDataColumnSidecars(t *testing.T) {
|
||||
@@ -200,7 +168,6 @@ func TestReconstructBlobSidecars(t *testing.T) {
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
const blobCount = 3
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
roBlock, roBlobSidecars := util.GenerateTestElectraBlockWithSidecar(t, [fieldparams.RootLength]byte{}, 42, blobCount)
|
||||
|
||||
@@ -236,7 +203,7 @@ func TestReconstructBlobSidecars(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Flatten proofs.
|
||||
cellProofs := make([][]byte, 0, blobCount*numberOfColumns)
|
||||
cellProofs := make([][]byte, 0, blobCount*fieldparams.NumberOfColumns)
|
||||
for _, proofs := range inputProofsPerBlob {
|
||||
for _, proof := range proofs {
|
||||
cellProofs = append(cellProofs, proof[:])
|
||||
@@ -428,13 +395,12 @@ func TestReconstructBlobs(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestComputeCellsAndProofsFromFlat(t *testing.T) {
|
||||
const numberOfColumns = fieldparams.NumberOfColumns
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("mismatched blob and proof counts", func(t *testing.T) {
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
// Create one blob but proofs for two blobs
|
||||
blobs := [][]byte{{}}
|
||||
|
||||
@@ -447,7 +413,6 @@ func TestComputeCellsAndProofsFromFlat(t *testing.T) {
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
const blobCount = 2
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
// Generate test blobs
|
||||
_, roBlobSidecars := util.GenerateTestElectraBlockWithSidecar(t, [fieldparams.RootLength]byte{}, 42, blobCount)
|
||||
|
||||
137
beacon-chain/core/peerdas/semi_supernode_test.go
Normal file
137
beacon-chain/core/peerdas/semi_supernode_test.go
Normal file
@@ -0,0 +1,137 @@
|
||||
package peerdas
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
)
|
||||
|
||||
func TestSemiSupernodeCustody(t *testing.T) {
|
||||
const numberOfColumns = fieldparams.NumberOfColumns
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.NumberOfCustodyGroups = 128
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Create a test node ID
|
||||
nodeID := enode.ID([32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32})
|
||||
|
||||
t.Run("semi-supernode custodies exactly 64 columns", func(t *testing.T) {
|
||||
// Semi-supernode uses 64 custody groups (half of 128)
|
||||
const semiSupernodeCustodyGroupCount = 64
|
||||
|
||||
// Get custody groups for semi-supernode
|
||||
custodyGroups, err := CustodyGroups(nodeID, semiSupernodeCustodyGroupCount)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, semiSupernodeCustodyGroupCount, len(custodyGroups))
|
||||
|
||||
// Verify we get exactly 64 custody columns
|
||||
custodyColumns, err := CustodyColumns(custodyGroups)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, semiSupernodeCustodyGroupCount, len(custodyColumns))
|
||||
|
||||
// Verify the columns are valid (within 0-127 range)
|
||||
for columnIndex := range custodyColumns {
|
||||
if columnIndex >= numberOfColumns {
|
||||
t.Fatalf("Invalid column index %d, should be less than %d", columnIndex, numberOfColumns)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("64 columns is exactly the minimum for reconstruction", func(t *testing.T) {
|
||||
minimumCount := MinimumColumnCountToReconstruct()
|
||||
require.Equal(t, uint64(64), minimumCount)
|
||||
})
|
||||
|
||||
t.Run("semi-supernode vs supernode custody", func(t *testing.T) {
|
||||
// Semi-supernode (64 custody groups)
|
||||
semiSupernodeGroups, err := CustodyGroups(nodeID, 64)
|
||||
require.NoError(t, err)
|
||||
semiSupernodeColumns, err := CustodyColumns(semiSupernodeGroups)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Supernode (128 custody groups = all groups)
|
||||
supernodeGroups, err := CustodyGroups(nodeID, 128)
|
||||
require.NoError(t, err)
|
||||
supernodeColumns, err := CustodyColumns(supernodeGroups)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify semi-supernode has exactly half the columns of supernode
|
||||
require.Equal(t, 64, len(semiSupernodeColumns))
|
||||
require.Equal(t, 128, len(supernodeColumns))
|
||||
require.Equal(t, len(supernodeColumns)/2, len(semiSupernodeColumns))
|
||||
|
||||
// Verify all semi-supernode columns are a subset of supernode columns
|
||||
for columnIndex := range semiSupernodeColumns {
|
||||
if !supernodeColumns[columnIndex] {
|
||||
t.Fatalf("Semi-supernode column %d not found in supernode columns", columnIndex)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestMinimumCustodyGroupCountToReconstruct(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
numberOfGroups uint64
|
||||
expectedResult uint64
|
||||
}{
|
||||
{
|
||||
name: "Standard 1:1 ratio (128 columns, 128 groups)",
|
||||
numberOfGroups: 128,
|
||||
expectedResult: 64, // Need half of 128 groups
|
||||
},
|
||||
{
|
||||
name: "2 columns per group (128 columns, 64 groups)",
|
||||
numberOfGroups: 64,
|
||||
expectedResult: 32, // Need 64 columns, which is 32 groups (64/2)
|
||||
},
|
||||
{
|
||||
name: "4 columns per group (128 columns, 32 groups)",
|
||||
numberOfGroups: 32,
|
||||
expectedResult: 16, // Need 64 columns, which is 16 groups (64/4)
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.NumberOfCustodyGroups = tt.numberOfGroups
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
result, err := MinimumCustodyGroupCountToReconstruct()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tt.expectedResult, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMinimumCustodyGroupCountToReconstruct_ErrorCases(t *testing.T) {
|
||||
t.Run("Returns error when NumberOfCustodyGroups is zero", func(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.NumberOfCustodyGroups = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
_, err := MinimumCustodyGroupCountToReconstruct()
|
||||
require.NotNil(t, err)
|
||||
require.Equal(t, true, err.Error() == "NumberOfCustodyGroups cannot be zero")
|
||||
})
|
||||
|
||||
t.Run("Returns error when NumberOfCustodyGroups exceeds NumberOfColumns", func(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.NumberOfCustodyGroups = 256
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
_, err := MinimumCustodyGroupCountToReconstruct()
|
||||
require.NotNil(t, err)
|
||||
// Just check that we got an error about the configuration
|
||||
require.Equal(t, true, len(err.Error()) > 0)
|
||||
})
|
||||
}
|
||||
@@ -102,11 +102,13 @@ func ValidatorsCustodyRequirement(state beaconState.ReadOnlyBeaconState, validat
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/validator.md#get_data_column_sidecars_from_block and
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/validator.md#get_data_column_sidecars_from_column_sidecar
|
||||
func DataColumnSidecars(cellsPerBlob [][]kzg.Cell, proofsPerBlob [][]kzg.Proof, src ConstructionPopulator) ([]blocks.RODataColumn, error) {
|
||||
const numberOfColumns = uint64(fieldparams.NumberOfColumns)
|
||||
|
||||
if len(cellsPerBlob) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
start := time.Now()
|
||||
cells, proofs, err := rotateRowsToCols(cellsPerBlob, proofsPerBlob, params.BeaconConfig().NumberOfColumns)
|
||||
cells, proofs, err := rotateRowsToCols(cellsPerBlob, proofsPerBlob, numberOfColumns)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "rotate cells and proofs")
|
||||
}
|
||||
@@ -115,9 +117,8 @@ func DataColumnSidecars(cellsPerBlob [][]kzg.Cell, proofsPerBlob [][]kzg.Proof,
|
||||
return nil, errors.Wrap(err, "extract block info")
|
||||
}
|
||||
|
||||
maxIdx := params.BeaconConfig().NumberOfColumns
|
||||
roSidecars := make([]blocks.RODataColumn, 0, maxIdx)
|
||||
for idx := range maxIdx {
|
||||
roSidecars := make([]blocks.RODataColumn, 0, numberOfColumns)
|
||||
for idx := range numberOfColumns {
|
||||
sidecar := ðpb.DataColumnSidecar{
|
||||
Index: idx,
|
||||
Column: cells[idx],
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
@@ -59,6 +59,8 @@ func TestValidatorsCustodyRequirement(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDataColumnSidecars(t *testing.T) {
|
||||
const numberOfColumns = fieldparams.NumberOfColumns
|
||||
|
||||
t.Run("sizes mismatch", func(t *testing.T) {
|
||||
// Create a protobuf signed beacon block.
|
||||
signedBeaconBlockPb := util.NewBeaconBlockDeneb()
|
||||
@@ -69,10 +71,10 @@ func TestDataColumnSidecars(t *testing.T) {
|
||||
|
||||
// Create cells and proofs.
|
||||
cellsPerBlob := [][]kzg.Cell{
|
||||
make([]kzg.Cell, params.BeaconConfig().NumberOfColumns),
|
||||
make([]kzg.Cell, numberOfColumns),
|
||||
}
|
||||
proofsPerBlob := [][]kzg.Proof{
|
||||
make([]kzg.Proof, params.BeaconConfig().NumberOfColumns),
|
||||
make([]kzg.Proof, numberOfColumns),
|
||||
}
|
||||
|
||||
rob, err := blocks.NewROBlock(signedBeaconBlock)
|
||||
@@ -117,7 +119,6 @@ func TestDataColumnSidecars(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create cells and proofs with sufficient cells but insufficient proofs.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
cellsPerBlob := [][]kzg.Cell{
|
||||
make([]kzg.Cell, numberOfColumns),
|
||||
}
|
||||
@@ -149,7 +150,6 @@ func TestDataColumnSidecars(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create cells and proofs with correct dimensions.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
cellsPerBlob := [][]kzg.Cell{
|
||||
make([]kzg.Cell, numberOfColumns),
|
||||
make([]kzg.Cell, numberOfColumns),
|
||||
@@ -197,6 +197,7 @@ func TestDataColumnSidecars(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestReconstructionSource(t *testing.T) {
|
||||
const numberOfColumns = fieldparams.NumberOfColumns
|
||||
// Create a Fulu block with blob commitments.
|
||||
signedBeaconBlockPb := util.NewBeaconBlockFulu()
|
||||
commitment1 := make([]byte, 48)
|
||||
@@ -212,7 +213,6 @@ func TestReconstructionSource(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create cells and proofs with correct dimensions.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
cellsPerBlob := [][]kzg.Cell{
|
||||
make([]kzg.Cell, numberOfColumns),
|
||||
make([]kzg.Cell, numberOfColumns),
|
||||
|
||||
@@ -4,14 +4,19 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"availability_blobs.go",
|
||||
"availability_columns.go",
|
||||
"bisect.go",
|
||||
"blob_cache.go",
|
||||
"data_column_cache.go",
|
||||
"iface.go",
|
||||
"log.go",
|
||||
"mock.go",
|
||||
"needs.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/das",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
@@ -21,6 +26,7 @@ go_library(
|
||||
"//runtime/logging:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
@@ -30,11 +36,14 @@ go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"availability_blobs_test.go",
|
||||
"availability_columns_test.go",
|
||||
"blob_cache_test.go",
|
||||
"data_column_cache_test.go",
|
||||
"needs_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
@@ -45,6 +54,7 @@ go_test(
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -11,9 +11,8 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/logging"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -24,12 +23,13 @@ var (
|
||||
// This implementation will hold any blobs passed to Persist until the IsDataAvailable is called for their
|
||||
// block, at which time they will undergo full verification and be saved to the disk.
|
||||
type LazilyPersistentStoreBlob struct {
|
||||
store *filesystem.BlobStorage
|
||||
cache *blobCache
|
||||
verifier BlobBatchVerifier
|
||||
store *filesystem.BlobStorage
|
||||
cache *blobCache
|
||||
verifier BlobBatchVerifier
|
||||
shouldRetain RetentionChecker
|
||||
}
|
||||
|
||||
var _ AvailabilityStore = &LazilyPersistentStoreBlob{}
|
||||
var _ AvailabilityChecker = &LazilyPersistentStoreBlob{}
|
||||
|
||||
// BlobBatchVerifier enables LazyAvailabilityStore to manage the verification process
|
||||
// going from ROBlob->VerifiedROBlob, while avoiding the decision of which individual verifications
|
||||
@@ -42,11 +42,12 @@ type BlobBatchVerifier interface {
|
||||
|
||||
// NewLazilyPersistentStore creates a new LazilyPersistentStore. This constructor should always be used
|
||||
// when creating a LazilyPersistentStore because it needs to initialize the cache under the hood.
|
||||
func NewLazilyPersistentStore(store *filesystem.BlobStorage, verifier BlobBatchVerifier) *LazilyPersistentStoreBlob {
|
||||
func NewLazilyPersistentStore(store *filesystem.BlobStorage, verifier BlobBatchVerifier, shouldRetain RetentionChecker) *LazilyPersistentStoreBlob {
|
||||
return &LazilyPersistentStoreBlob{
|
||||
store: store,
|
||||
cache: newBlobCache(),
|
||||
verifier: verifier,
|
||||
store: store,
|
||||
cache: newBlobCache(),
|
||||
verifier: verifier,
|
||||
shouldRetain: shouldRetain,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -66,9 +67,6 @@ func (s *LazilyPersistentStoreBlob) Persist(current primitives.Slot, sidecars ..
|
||||
}
|
||||
}
|
||||
}
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(sidecars[0].Slot()), slots.ToEpoch(current)) {
|
||||
return nil
|
||||
}
|
||||
key := keyFromSidecar(sidecars[0])
|
||||
entry := s.cache.ensure(key)
|
||||
for _, blobSidecar := range sidecars {
|
||||
@@ -81,8 +79,17 @@ func (s *LazilyPersistentStoreBlob) Persist(current primitives.Slot, sidecars ..
|
||||
|
||||
// IsDataAvailable returns nil if all the commitments in the given block are persisted to the db and have been verified.
|
||||
// BlobSidecars already in the db are assumed to have been previously verified against the block.
|
||||
func (s *LazilyPersistentStoreBlob) IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error {
|
||||
blockCommitments, err := commitmentsToCheck(b, current)
|
||||
func (s *LazilyPersistentStoreBlob) IsDataAvailable(ctx context.Context, current primitives.Slot, blks ...blocks.ROBlock) error {
|
||||
for _, b := range blks {
|
||||
if err := s.checkOne(ctx, current, b); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *LazilyPersistentStoreBlob) checkOne(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error {
|
||||
blockCommitments, err := commitmentsToCheck(b, s.shouldRetain)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not check data availability for block %#x", b.Root())
|
||||
}
|
||||
@@ -100,7 +107,7 @@ func (s *LazilyPersistentStoreBlob) IsDataAvailable(ctx context.Context, current
|
||||
// Verify we have all the expected sidecars, and fail fast if any are missing or inconsistent.
|
||||
// We don't try to salvage problematic batches because this indicates a misbehaving peer and we'd rather
|
||||
// ignore their response and decrease their peer score.
|
||||
sidecars, err := entry.filter(root, blockCommitments, b.Block().Slot())
|
||||
sidecars, err := entry.filter(root, blockCommitments)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "incomplete BlobSidecar batch")
|
||||
}
|
||||
@@ -112,7 +119,7 @@ func (s *LazilyPersistentStoreBlob) IsDataAvailable(ctx context.Context, current
|
||||
ok := errors.As(err, &me)
|
||||
if ok {
|
||||
fails := me.Failures()
|
||||
lf := make(log.Fields, len(fails))
|
||||
lf := make(logrus.Fields, len(fails))
|
||||
for i := range fails {
|
||||
lf[fmt.Sprintf("fail_%d", i)] = fails[i].Error()
|
||||
}
|
||||
@@ -131,13 +138,12 @@ func (s *LazilyPersistentStoreBlob) IsDataAvailable(ctx context.Context, current
|
||||
return nil
|
||||
}
|
||||
|
||||
func commitmentsToCheck(b blocks.ROBlock, current primitives.Slot) ([][]byte, error) {
|
||||
func commitmentsToCheck(b blocks.ROBlock, shouldRetain RetentionChecker) ([][]byte, error) {
|
||||
if b.Version() < version.Deneb {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// We are only required to check within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUEST
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(b.Block().Slot()), slots.ToEpoch(current)) {
|
||||
if !shouldRetain(b.Block().Slot()) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -17,6 +17,10 @@ import (
|
||||
errors "github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func testShouldRetainAlways(s primitives.Slot) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func Test_commitmentsToCheck(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096*2
|
||||
@@ -30,11 +34,12 @@ func Test_commitmentsToCheck(t *testing.T) {
|
||||
commits[i] = bytesutil.PadTo([]byte{byte(i)}, 48)
|
||||
}
|
||||
cases := []struct {
|
||||
name string
|
||||
commits [][]byte
|
||||
block func(*testing.T) blocks.ROBlock
|
||||
slot primitives.Slot
|
||||
err error
|
||||
name string
|
||||
commits [][]byte
|
||||
block func(*testing.T) blocks.ROBlock
|
||||
slot primitives.Slot
|
||||
err error
|
||||
shouldRetain RetentionChecker
|
||||
}{
|
||||
{
|
||||
name: "pre deneb",
|
||||
@@ -60,6 +65,7 @@ func Test_commitmentsToCheck(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
return rb
|
||||
},
|
||||
shouldRetain: testShouldRetainAlways,
|
||||
commits: func() [][]byte {
|
||||
mb := params.GetNetworkScheduleEntry(slots.ToEpoch(fulu + 100)).MaxBlobsPerBlock
|
||||
return commits[:mb]
|
||||
@@ -79,7 +85,8 @@ func Test_commitmentsToCheck(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
return rb
|
||||
},
|
||||
slot: fulu + windowSlots + 1,
|
||||
shouldRetain: func(s primitives.Slot) bool { return false },
|
||||
slot: fulu + windowSlots + 1,
|
||||
},
|
||||
{
|
||||
name: "excessive commitments",
|
||||
@@ -97,14 +104,15 @@ func Test_commitmentsToCheck(t *testing.T) {
|
||||
require.Equal(t, true, len(c) > params.BeaconConfig().MaxBlobsPerBlock(sb.Block().Slot()))
|
||||
return rb
|
||||
},
|
||||
slot: windowSlots + 1,
|
||||
err: errIndexOutOfBounds,
|
||||
shouldRetain: testShouldRetainAlways,
|
||||
slot: windowSlots + 1,
|
||||
err: errIndexOutOfBounds,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
b := c.block(t)
|
||||
co, err := commitmentsToCheck(b, c.slot)
|
||||
co, err := commitmentsToCheck(b, c.shouldRetain)
|
||||
if c.err != nil {
|
||||
require.ErrorIs(t, err, c.err)
|
||||
} else {
|
||||
@@ -126,7 +134,7 @@ func TestLazilyPersistent_Missing(t *testing.T) {
|
||||
blk, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, ds, 3)
|
||||
|
||||
mbv := &mockBlobBatchVerifier{t: t, scs: blobSidecars}
|
||||
as := NewLazilyPersistentStore(store, mbv)
|
||||
as := NewLazilyPersistentStore(store, mbv, testShouldRetainAlways)
|
||||
|
||||
// Only one commitment persisted, should return error with other indices
|
||||
require.NoError(t, as.Persist(ds, blobSidecars[2]))
|
||||
@@ -153,7 +161,7 @@ func TestLazilyPersistent_Mismatch(t *testing.T) {
|
||||
|
||||
mbv := &mockBlobBatchVerifier{t: t, err: errors.New("kzg check should not run")}
|
||||
blobSidecars[0].KzgCommitment = bytesutil.PadTo([]byte("nope"), 48)
|
||||
as := NewLazilyPersistentStore(store, mbv)
|
||||
as := NewLazilyPersistentStore(store, mbv, testShouldRetainAlways)
|
||||
|
||||
// Only one commitment persisted, should return error with other indices
|
||||
require.NoError(t, as.Persist(ds, blobSidecars[0]))
|
||||
@@ -166,11 +174,11 @@ func TestLazyPersistOnceCommitted(t *testing.T) {
|
||||
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
_, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, ds, 6)
|
||||
|
||||
as := NewLazilyPersistentStore(filesystem.NewEphemeralBlobStorage(t), &mockBlobBatchVerifier{})
|
||||
as := NewLazilyPersistentStore(filesystem.NewEphemeralBlobStorage(t), &mockBlobBatchVerifier{}, testShouldRetainAlways)
|
||||
// stashes as expected
|
||||
require.NoError(t, as.Persist(ds, blobSidecars...))
|
||||
// ignores duplicates
|
||||
require.ErrorIs(t, as.Persist(ds, blobSidecars...), ErrDuplicateSidecar)
|
||||
require.ErrorIs(t, as.Persist(ds, blobSidecars...), errDuplicateSidecar)
|
||||
|
||||
// ignores index out of bound
|
||||
blobSidecars[0].Index = 6
|
||||
@@ -183,7 +191,7 @@ func TestLazyPersistOnceCommitted(t *testing.T) {
|
||||
require.NoError(t, as.Persist(slotOOB, moreBlobSidecars[0]))
|
||||
|
||||
// doesn't ignore new sidecars with a different block root
|
||||
require.NoError(t, as.Persist(ds, moreBlobSidecars...))
|
||||
require.NoError(t, as.Persist(ds, moreBlobSidecars[1:]...))
|
||||
}
|
||||
|
||||
type mockBlobBatchVerifier struct {
|
||||
|
||||
244
beacon-chain/das/availability_columns.go
Normal file
244
beacon-chain/das/availability_columns.go
Normal file
@@ -0,0 +1,244 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
errors "github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// LazilyPersistentStoreColumn is an implementation of AvailabilityStore to be used when batch syncing data columns.
|
||||
// This implementation will hold any data columns passed to Persist until the IsDataAvailable is called for their
|
||||
// block, at which time they will undergo full verification and be saved to the disk.
|
||||
type LazilyPersistentStoreColumn struct {
|
||||
store *filesystem.DataColumnStorage
|
||||
cache *dataColumnCache
|
||||
newDataColumnsVerifier verification.NewDataColumnsVerifier
|
||||
custody *custodyRequirement
|
||||
bisector Bisector
|
||||
shouldRetain RetentionChecker
|
||||
}
|
||||
|
||||
var _ AvailabilityChecker = &LazilyPersistentStoreColumn{}
|
||||
|
||||
// DataColumnsVerifier enables LazilyPersistentStoreColumn to manage the verification process
|
||||
// going from RODataColumn->VerifiedRODataColumn, while avoiding the decision of which individual verifications
|
||||
// to run and in what order. Since LazilyPersistentStoreColumn always tries to verify and save data columns only when
|
||||
// they are all available, the interface takes a slice of data column sidecars.
|
||||
type DataColumnsVerifier interface {
|
||||
VerifiedRODataColumns(ctx context.Context, blk blocks.ROBlock, scs []blocks.RODataColumn) ([]blocks.VerifiedRODataColumn, error)
|
||||
}
|
||||
|
||||
// NewLazilyPersistentStoreColumn creates a new LazilyPersistentStoreColumn.
|
||||
// WARNING: The resulting LazilyPersistentStoreColumn is NOT thread-safe.
|
||||
func NewLazilyPersistentStoreColumn(
|
||||
store *filesystem.DataColumnStorage,
|
||||
newDataColumnsVerifier verification.NewDataColumnsVerifier,
|
||||
nodeID enode.ID,
|
||||
cgc uint64,
|
||||
bisector Bisector,
|
||||
shouldRetain RetentionChecker,
|
||||
) *LazilyPersistentStoreColumn {
|
||||
return &LazilyPersistentStoreColumn{
|
||||
store: store,
|
||||
cache: newDataColumnCache(),
|
||||
newDataColumnsVerifier: newDataColumnsVerifier,
|
||||
custody: &custodyRequirement{nodeID: nodeID, cgc: cgc},
|
||||
bisector: bisector,
|
||||
shouldRetain: shouldRetain,
|
||||
}
|
||||
}
|
||||
|
||||
// PersistColumns adds columns to the working column cache. Columns stored in this cache will be persisted
|
||||
// for at least as long as the node is running. Once IsDataAvailable succeeds, all columns referenced
|
||||
// by the given block are guaranteed to be persisted for the remainder of the retention period.
|
||||
func (s *LazilyPersistentStoreColumn) Persist(_ primitives.Slot, sidecars ...blocks.RODataColumn) error {
|
||||
for _, sidecar := range sidecars {
|
||||
if err := s.cache.stash(sidecar); err != nil {
|
||||
return errors.Wrap(err, "stash DataColumnSidecar")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsDataAvailable returns nil if all the commitments in the given block are persisted to the db and have been verified.
|
||||
// DataColumnsSidecars already in the db are assumed to have been previously verified against the block.
|
||||
func (s *LazilyPersistentStoreColumn) IsDataAvailable(ctx context.Context, _ primitives.Slot, blks ...blocks.ROBlock) error {
|
||||
toVerify := make([]blocks.RODataColumn, 0)
|
||||
for _, block := range blks {
|
||||
indices, err := s.required(block)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "full commitments to check with block root `%#x`", block.Root())
|
||||
}
|
||||
if indices.Count() == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
key := keyFromBlock(block)
|
||||
entry := s.cache.entry(key)
|
||||
toVerify, err = entry.append(toVerify, IndicesNotStored(s.store.Summary(block.Root()), indices))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "entry filter")
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.verifyAndSave(toVerify); err != nil {
|
||||
log.Warn("Batch verification failed, bisecting columns by peer")
|
||||
if err := s.bisectVerification(toVerify); err != nil {
|
||||
return errors.Wrap(err, "bisect verification")
|
||||
}
|
||||
}
|
||||
|
||||
s.cache.cleanup(blks)
|
||||
return nil
|
||||
}
|
||||
|
||||
// required returns the set of column indices to check for a given block.
|
||||
func (s *LazilyPersistentStoreColumn) required(block blocks.ROBlock) (peerdas.ColumnIndices, error) {
|
||||
if !s.shouldRetain(block.Block().Slot()) {
|
||||
return peerdas.NewColumnIndices(), nil
|
||||
}
|
||||
|
||||
// If there are any commitments in the block, there are blobs,
|
||||
// and if there are blobs, we need the columns bisecting those blobs.
|
||||
commitments, err := block.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
// No DA check needed if the block has no blobs.
|
||||
if len(commitments) == 0 {
|
||||
return peerdas.NewColumnIndices(), nil
|
||||
}
|
||||
|
||||
return s.custody.required()
|
||||
}
|
||||
|
||||
// verifyAndSave calls Save on the column store if the columns pass verification.
|
||||
func (s *LazilyPersistentStoreColumn) verifyAndSave(columns []blocks.RODataColumn) error {
|
||||
if len(columns) == 0 {
|
||||
return nil
|
||||
}
|
||||
verified, err := s.verifyColumns(columns)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "verify columns")
|
||||
}
|
||||
if err := s.store.Save(verified); err != nil {
|
||||
return errors.Wrap(err, "save data column sidecars")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *LazilyPersistentStoreColumn) verifyColumns(columns []blocks.RODataColumn) ([]blocks.VerifiedRODataColumn, error) {
|
||||
if len(columns) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
verifier := s.newDataColumnsVerifier(columns, verification.ByRangeRequestDataColumnSidecarRequirements)
|
||||
if err := verifier.ValidFields(); err != nil {
|
||||
return nil, errors.Wrap(err, "valid fields")
|
||||
}
|
||||
if err := verifier.SidecarInclusionProven(); err != nil {
|
||||
return nil, errors.Wrap(err, "sidecar inclusion proven")
|
||||
}
|
||||
if err := verifier.SidecarKzgProofVerified(); err != nil {
|
||||
return nil, errors.Wrap(err, "sidecar KZG proof verified")
|
||||
}
|
||||
|
||||
return verifier.VerifiedRODataColumns()
|
||||
}
|
||||
|
||||
// bisectVerification is used when verification of a batch of columns fails. Since the batch could
|
||||
// span multiple blocks or have been fetched from multiple peers, this pattern enables code using the
|
||||
// store to break the verification into smaller units and learn the results, in order to plan to retry
|
||||
// retrieval of the unusable columns.
|
||||
func (s *LazilyPersistentStoreColumn) bisectVerification(columns []blocks.RODataColumn) error {
|
||||
if len(columns) == 0 {
|
||||
return nil
|
||||
}
|
||||
if s.bisector == nil {
|
||||
return errors.New("bisector not initialized")
|
||||
}
|
||||
iter, err := s.bisector.Bisect(columns)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Bisector.Bisect")
|
||||
}
|
||||
// It's up to the bisector how to chunk up columns for verification,
|
||||
// which could be by block, or by peer, or any other strategy.
|
||||
// For the purposes of range syncing or backfill this will be by peer,
|
||||
// so that the node can learn which peer is giving us bad data and downscore them.
|
||||
for columns, err := iter.Next(); columns != nil; columns, err = iter.Next() {
|
||||
if err != nil {
|
||||
if !errors.Is(err, io.EOF) {
|
||||
return errors.Wrap(err, "Bisector.Next")
|
||||
}
|
||||
break // io.EOF signals end of iteration
|
||||
}
|
||||
// We save the parts of the batch that have been verified successfully even though we don't know
|
||||
// if all columns for the block will be available until the block is imported.
|
||||
if err := s.verifyAndSave(s.columnsNotStored(columns)); err != nil {
|
||||
iter.OnError(err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
// This should give us a single error representing any unresolved errors seen via onError.
|
||||
return iter.Error()
|
||||
}
|
||||
|
||||
// columnsNotStored filters the list of ROColumnSidecars to only include those that are not found in the storage summary.
|
||||
func (s *LazilyPersistentStoreColumn) columnsNotStored(sidecars []blocks.RODataColumn) []blocks.RODataColumn {
|
||||
// We use this method to filter a set of sidecars that were previously seen to be unavailable on disk. So our base assumption
|
||||
// is that they are still available and we don't need to copy the list. Instead we make a slice of any indices that are unexpectedly
|
||||
// stored and only when we find that the storage view has changed do we need to create a new slice.
|
||||
stored := make(map[int]struct{}, 0)
|
||||
lastRoot := [32]byte{}
|
||||
var sum filesystem.DataColumnStorageSummary
|
||||
for i, sc := range sidecars {
|
||||
if sc.BlockRoot() != lastRoot {
|
||||
sum = s.store.Summary(sc.BlockRoot())
|
||||
lastRoot = sc.BlockRoot()
|
||||
}
|
||||
if sum.HasIndex(sc.Index) {
|
||||
stored[i] = struct{}{}
|
||||
}
|
||||
}
|
||||
// If the view on storage hasn't changed, return the original list.
|
||||
if len(stored) == 0 {
|
||||
return sidecars
|
||||
}
|
||||
shift := 0
|
||||
for i := range sidecars {
|
||||
if _, ok := stored[i]; ok {
|
||||
// If the index is stored, skip and overwrite it.
|
||||
// Track how many spaces down to shift unseen sidecars (to overwrite the previously shifted or seen).
|
||||
shift++
|
||||
continue
|
||||
}
|
||||
if shift > 0 {
|
||||
// If the index is not stored and we have seen stored indices,
|
||||
// we need to shift the current index down.
|
||||
sidecars[i-shift] = sidecars[i]
|
||||
}
|
||||
}
|
||||
return sidecars[:len(sidecars)-shift]
|
||||
}
|
||||
|
||||
type custodyRequirement struct {
|
||||
nodeID enode.ID
|
||||
cgc uint64 // custody group count
|
||||
indices peerdas.ColumnIndices
|
||||
}
|
||||
|
||||
func (c *custodyRequirement) required() (peerdas.ColumnIndices, error) {
|
||||
peerInfo, _, err := peerdas.Info(c.nodeID, c.cgc)
|
||||
if err != nil {
|
||||
return peerdas.NewColumnIndices(), errors.Wrap(err, "peer info")
|
||||
}
|
||||
return peerdas.NewColumnIndicesFromMap(peerInfo.CustodyColumns), nil
|
||||
}
|
||||
908
beacon-chain/das/availability_columns_test.go
Normal file
908
beacon-chain/das/availability_columns_test.go
Normal file
@@ -0,0 +1,908 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/verification"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func mockShouldRetain(current primitives.Epoch) RetentionChecker {
|
||||
return func(slot primitives.Slot) bool {
|
||||
return params.WithinDAPeriod(slots.ToEpoch(slot), current)
|
||||
}
|
||||
}
|
||||
|
||||
var commitments = [][]byte{
|
||||
bytesutil.PadTo([]byte("a"), 48),
|
||||
bytesutil.PadTo([]byte("b"), 48),
|
||||
bytesutil.PadTo([]byte("c"), 48),
|
||||
bytesutil.PadTo([]byte("d"), 48),
|
||||
}
|
||||
|
||||
func TestPersist(t *testing.T) {
|
||||
t.Run("no sidecars", func(t *testing.T) {
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, nil, enode.ID{}, 0, nil, mockShouldRetain(0))
|
||||
err := lazilyPersistentStoreColumns.Persist(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(lazilyPersistentStoreColumns.cache.entries))
|
||||
})
|
||||
|
||||
t.Run("outside DA period", func(t *testing.T) {
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
|
||||
dataColumnParamsByBlockRoot := []util.DataColumnParam{
|
||||
{Slot: 1, Index: 1},
|
||||
}
|
||||
|
||||
var current primitives.Slot = 1_000_000
|
||||
sr := mockShouldRetain(slots.ToEpoch(current))
|
||||
roSidecars, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnParamsByBlockRoot)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, nil, enode.ID{}, 0, nil, sr)
|
||||
|
||||
err := lazilyPersistentStoreColumns.Persist(current, roSidecars...)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(roSidecars), len(lazilyPersistentStoreColumns.cache.entries))
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
const slot = 42
|
||||
store := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
|
||||
dataColumnParamsByBlockRoot := []util.DataColumnParam{
|
||||
{Slot: slot, Index: 1},
|
||||
{Slot: slot, Index: 5},
|
||||
}
|
||||
|
||||
roSidecars, roDataColumns := util.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnParamsByBlockRoot)
|
||||
avs := NewLazilyPersistentStoreColumn(store, nil, enode.ID{}, 0, nil, mockShouldRetain(slots.ToEpoch(slot)))
|
||||
|
||||
err := avs.Persist(slot, roSidecars...)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(avs.cache.entries))
|
||||
|
||||
key := cacheKey{slot: slot, root: roDataColumns[0].BlockRoot()}
|
||||
entry, ok := avs.cache.entries[key]
|
||||
require.Equal(t, true, ok)
|
||||
summary := store.Summary(key.root)
|
||||
// A call to Persist does NOT save the sidecars to disk.
|
||||
require.Equal(t, uint64(0), summary.Count())
|
||||
require.Equal(t, len(roSidecars), len(entry.scs))
|
||||
|
||||
idx1 := entry.scs[1]
|
||||
require.NotNil(t, idx1)
|
||||
require.DeepSSZEqual(t, roDataColumns[0].BlockRoot(), idx1.BlockRoot())
|
||||
idx5 := entry.scs[5]
|
||||
require.NotNil(t, idx5)
|
||||
require.DeepSSZEqual(t, roDataColumns[1].BlockRoot(), idx5.BlockRoot())
|
||||
|
||||
for i, roDataColumn := range entry.scs {
|
||||
if map[uint64]bool{1: true, 5: true}[i] {
|
||||
continue
|
||||
}
|
||||
|
||||
require.IsNil(t, roDataColumn)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestIsDataAvailable(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096*2
|
||||
newDataColumnsVerifier := func(dataColumnSidecars []blocks.RODataColumn, _ []verification.Requirement) verification.DataColumnsVerifier {
|
||||
return &mockDataColumnsVerifier{t: t, dataColumnSidecars: dataColumnSidecars}
|
||||
}
|
||||
|
||||
ctx := t.Context()
|
||||
|
||||
t.Run("without commitments", func(t *testing.T) {
|
||||
signedBeaconBlockFulu := util.NewBeaconBlockFulu()
|
||||
signedRoBlock := newSignedRoBlock(t, signedBeaconBlockFulu)
|
||||
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, newDataColumnsVerifier, enode.ID{}, 0, nil, mockShouldRetain(0))
|
||||
|
||||
err := lazilyPersistentStoreColumns.IsDataAvailable(ctx, 0, signedRoBlock)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("with commitments", func(t *testing.T) {
|
||||
signedBeaconBlockFulu := util.NewBeaconBlockFulu()
|
||||
signedBeaconBlockFulu.Block.Slot = primitives.Slot(params.BeaconConfig().FuluForkEpoch) * params.BeaconConfig().SlotsPerEpoch
|
||||
signedBeaconBlockFulu.Block.Body.BlobKzgCommitments = commitments
|
||||
signedRoBlock := newSignedRoBlock(t, signedBeaconBlockFulu)
|
||||
block := signedRoBlock.Block()
|
||||
slot := block.Slot()
|
||||
proposerIndex := block.ProposerIndex()
|
||||
parentRoot := block.ParentRoot()
|
||||
stateRoot := block.StateRoot()
|
||||
bodyRoot, err := block.Body().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
root := signedRoBlock.Root()
|
||||
|
||||
storage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
|
||||
indices := []uint64{1, 17, 19, 42, 75, 87, 102, 117}
|
||||
avs := NewLazilyPersistentStoreColumn(storage, newDataColumnsVerifier, enode.ID{}, uint64(len(indices)), nil, mockShouldRetain(slots.ToEpoch(slot)))
|
||||
dcparams := make([]util.DataColumnParam, 0, len(indices))
|
||||
for _, index := range indices {
|
||||
dataColumnParams := util.DataColumnParam{
|
||||
Index: index,
|
||||
KzgCommitments: commitments,
|
||||
|
||||
Slot: slot,
|
||||
ProposerIndex: proposerIndex,
|
||||
ParentRoot: parentRoot[:],
|
||||
StateRoot: stateRoot[:],
|
||||
BodyRoot: bodyRoot[:],
|
||||
}
|
||||
|
||||
dcparams = append(dcparams, dataColumnParams)
|
||||
}
|
||||
|
||||
_, verifiedRoDataColumns := util.CreateTestVerifiedRoDataColumnSidecars(t, dcparams)
|
||||
|
||||
key := keyFromBlock(signedRoBlock)
|
||||
entry := avs.cache.entry(key)
|
||||
defer avs.cache.delete(key)
|
||||
|
||||
for _, verifiedRoDataColumn := range verifiedRoDataColumns {
|
||||
err := entry.stash(verifiedRoDataColumn.RODataColumn)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
err = avs.IsDataAvailable(ctx, slot, signedRoBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
actual, err := storage.Get(root, indices)
|
||||
require.NoError(t, err)
|
||||
|
||||
//summary := storage.Summary(root)
|
||||
require.Equal(t, len(verifiedRoDataColumns), len(actual))
|
||||
//require.Equal(t, uint64(len(indices)), summary.Count())
|
||||
//require.DeepSSZEqual(t, verifiedRoDataColumns, actual)
|
||||
})
|
||||
}
|
||||
|
||||
func TestRetentionWindow(t *testing.T) {
|
||||
windowSlots, err := slots.EpochEnd(params.BeaconConfig().MinEpochsForDataColumnSidecarsRequest)
|
||||
require.NoError(t, err)
|
||||
fuluSlot, err := slots.EpochStart(params.BeaconConfig().FuluForkEpoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
numberOfColumns := fieldparams.NumberOfColumns
|
||||
testCases := []struct {
|
||||
name string
|
||||
commitments [][]byte
|
||||
block func(*testing.T) blocks.ROBlock
|
||||
slot primitives.Slot
|
||||
wantedCols int
|
||||
}{
|
||||
{
|
||||
name: "Pre-Fulu block",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
return newSignedRoBlock(t, util.NewBeaconBlockElectra())
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Commitments outside data availability window",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
beaconBlockElectra := util.NewBeaconBlockElectra()
|
||||
|
||||
// Block is from slot 0, "current slot" is window size +1 (so outside the window)
|
||||
beaconBlockElectra.Block.Body.BlobKzgCommitments = commitments
|
||||
|
||||
return newSignedRoBlock(t, beaconBlockElectra)
|
||||
},
|
||||
slot: fuluSlot + windowSlots,
|
||||
},
|
||||
{
|
||||
name: "Commitments within data availability window",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
signedBeaconBlockFulu := util.NewBeaconBlockFulu()
|
||||
signedBeaconBlockFulu.Block.Body.BlobKzgCommitments = commitments
|
||||
signedBeaconBlockFulu.Block.Slot = fuluSlot + windowSlots - 1
|
||||
|
||||
return newSignedRoBlock(t, signedBeaconBlockFulu)
|
||||
},
|
||||
commitments: commitments,
|
||||
slot: fuluSlot + windowSlots,
|
||||
wantedCols: numberOfColumns,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
|
||||
b := tc.block(t)
|
||||
s := NewLazilyPersistentStoreColumn(nil, nil, enode.ID{}, uint64(numberOfColumns), nil, mockShouldRetain(slots.ToEpoch(tc.slot)))
|
||||
|
||||
indices, err := s.required(b)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, tc.wantedCols, len(indices))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func newSignedRoBlock(t *testing.T, signedBeaconBlock any) blocks.ROBlock {
|
||||
sb, err := blocks.NewSignedBeaconBlock(signedBeaconBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
rb, err := blocks.NewROBlock(sb)
|
||||
require.NoError(t, err)
|
||||
|
||||
return rb
|
||||
}
|
||||
|
||||
type mockDataColumnsVerifier struct {
|
||||
t *testing.T
|
||||
dataColumnSidecars []blocks.RODataColumn
|
||||
validCalled, SidecarInclusionProvenCalled, SidecarKzgProofVerifiedCalled bool
|
||||
}
|
||||
|
||||
var _ verification.DataColumnsVerifier = &mockDataColumnsVerifier{}
|
||||
|
||||
func (m *mockDataColumnsVerifier) VerifiedRODataColumns() ([]blocks.VerifiedRODataColumn, error) {
|
||||
require.Equal(m.t, true, m.validCalled && m.SidecarInclusionProvenCalled && m.SidecarKzgProofVerifiedCalled)
|
||||
|
||||
verifiedDataColumnSidecars := make([]blocks.VerifiedRODataColumn, 0, len(m.dataColumnSidecars))
|
||||
for _, dataColumnSidecar := range m.dataColumnSidecars {
|
||||
verifiedDataColumnSidecar := blocks.NewVerifiedRODataColumn(dataColumnSidecar)
|
||||
verifiedDataColumnSidecars = append(verifiedDataColumnSidecars, verifiedDataColumnSidecar)
|
||||
}
|
||||
|
||||
return verifiedDataColumnSidecars, nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) SatisfyRequirement(verification.Requirement) {}
|
||||
|
||||
func (m *mockDataColumnsVerifier) ValidFields() error {
|
||||
m.validCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) CorrectSubnet(dataColumnSidecarSubTopic string, expectedTopics []string) error {
|
||||
return nil
|
||||
}
|
||||
func (m *mockDataColumnsVerifier) NotFromFutureSlot() error { return nil }
|
||||
func (m *mockDataColumnsVerifier) SlotAboveFinalized() error { return nil }
|
||||
func (m *mockDataColumnsVerifier) ValidProposerSignature(ctx context.Context) error { return nil }
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarParentSeen(parentSeen func([fieldparams.RootLength]byte) bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarParentValid(badParent func([fieldparams.RootLength]byte) bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarParentSlotLower() error { return nil }
|
||||
func (m *mockDataColumnsVerifier) SidecarDescendsFromFinalized() error { return nil }
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarInclusionProven() error {
|
||||
m.SidecarInclusionProvenCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarKzgProofVerified() error {
|
||||
m.SidecarKzgProofVerifiedCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarProposerExpected(ctx context.Context) error { return nil }
|
||||
|
||||
// Mock implementations for bisectVerification tests
|
||||
|
||||
// mockBisectionIterator simulates a BisectionIterator for testing.
|
||||
type mockBisectionIterator struct {
|
||||
chunks [][]blocks.RODataColumn
|
||||
chunkErrors []error
|
||||
finalError error
|
||||
chunkIndex int
|
||||
nextCallCount int
|
||||
onErrorCallCount int
|
||||
onErrorErrors []error
|
||||
}
|
||||
|
||||
func (m *mockBisectionIterator) Next() ([]blocks.RODataColumn, error) {
|
||||
if m.chunkIndex >= len(m.chunks) {
|
||||
return nil, io.EOF
|
||||
}
|
||||
chunk := m.chunks[m.chunkIndex]
|
||||
var err error
|
||||
if m.chunkIndex < len(m.chunkErrors) {
|
||||
err = m.chunkErrors[m.chunkIndex]
|
||||
}
|
||||
m.chunkIndex++
|
||||
m.nextCallCount++
|
||||
if err != nil {
|
||||
return chunk, err
|
||||
}
|
||||
return chunk, nil
|
||||
}
|
||||
|
||||
func (m *mockBisectionIterator) OnError(err error) {
|
||||
m.onErrorCallCount++
|
||||
m.onErrorErrors = append(m.onErrorErrors, err)
|
||||
}
|
||||
|
||||
func (m *mockBisectionIterator) Error() error {
|
||||
return m.finalError
|
||||
}
|
||||
|
||||
// mockBisector simulates a Bisector for testing.
|
||||
type mockBisector struct {
|
||||
shouldError bool
|
||||
bisectErr error
|
||||
iterator *mockBisectionIterator
|
||||
}
|
||||
|
||||
func (m *mockBisector) Bisect(columns []blocks.RODataColumn) (BisectionIterator, error) {
|
||||
if m.shouldError {
|
||||
return nil, m.bisectErr
|
||||
}
|
||||
return m.iterator, nil
|
||||
}
|
||||
|
||||
// testDataColumnsVerifier implements verification.DataColumnsVerifier for testing.
|
||||
type testDataColumnsVerifier struct {
|
||||
t *testing.T
|
||||
shouldFail bool
|
||||
columns []blocks.RODataColumn
|
||||
}
|
||||
|
||||
func (v *testDataColumnsVerifier) VerifiedRODataColumns() ([]blocks.VerifiedRODataColumn, error) {
|
||||
verified := make([]blocks.VerifiedRODataColumn, len(v.columns))
|
||||
for i, col := range v.columns {
|
||||
verified[i] = blocks.NewVerifiedRODataColumn(col)
|
||||
}
|
||||
return verified, nil
|
||||
}
|
||||
|
||||
func (v *testDataColumnsVerifier) SatisfyRequirement(verification.Requirement) {}
|
||||
func (v *testDataColumnsVerifier) ValidFields() error {
|
||||
if v.shouldFail {
|
||||
return errors.New("verification failed")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (v *testDataColumnsVerifier) CorrectSubnet(string, []string) error { return nil }
|
||||
func (v *testDataColumnsVerifier) NotFromFutureSlot() error { return nil }
|
||||
func (v *testDataColumnsVerifier) SlotAboveFinalized() error { return nil }
|
||||
func (v *testDataColumnsVerifier) ValidProposerSignature(context.Context) error { return nil }
|
||||
func (v *testDataColumnsVerifier) SidecarParentSeen(func([fieldparams.RootLength]byte) bool) error {
|
||||
return nil
|
||||
}
|
||||
func (v *testDataColumnsVerifier) SidecarParentValid(func([fieldparams.RootLength]byte) bool) error {
|
||||
return nil
|
||||
}
|
||||
func (v *testDataColumnsVerifier) SidecarParentSlotLower() error { return nil }
|
||||
func (v *testDataColumnsVerifier) SidecarDescendsFromFinalized() error { return nil }
|
||||
func (v *testDataColumnsVerifier) SidecarInclusionProven() error { return nil }
|
||||
func (v *testDataColumnsVerifier) SidecarKzgProofVerified() error { return nil }
|
||||
func (v *testDataColumnsVerifier) SidecarProposerExpected(context.Context) error { return nil }
|
||||
|
||||
// Helper function to create test data columns
|
||||
func makeTestDataColumns(t *testing.T, count int, blockRoot [32]byte, startIndex uint64) []blocks.RODataColumn {
|
||||
columns := make([]blocks.RODataColumn, 0, count)
|
||||
for i := range count {
|
||||
params := util.DataColumnParam{
|
||||
Index: startIndex + uint64(i),
|
||||
KzgCommitments: commitments,
|
||||
Slot: primitives.Slot(params.BeaconConfig().FuluForkEpoch) * params.BeaconConfig().SlotsPerEpoch,
|
||||
}
|
||||
_, verifiedCols := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{params})
|
||||
if len(verifiedCols) > 0 {
|
||||
columns = append(columns, verifiedCols[0].RODataColumn)
|
||||
}
|
||||
}
|
||||
return columns
|
||||
}
|
||||
|
||||
// Helper function to create test verifier factory with failure pattern
|
||||
func makeTestVerifierFactory(failurePattern []bool) verification.NewDataColumnsVerifier {
|
||||
callIndex := 0
|
||||
return func(cols []blocks.RODataColumn, _ []verification.Requirement) verification.DataColumnsVerifier {
|
||||
shouldFail := callIndex < len(failurePattern) && failurePattern[callIndex]
|
||||
callIndex++
|
||||
return &testDataColumnsVerifier{
|
||||
shouldFail: shouldFail,
|
||||
columns: cols,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestBisectVerification tests the bisectVerification method with comprehensive table-driven test cases.
|
||||
func TestBisectVerification(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096*2
|
||||
|
||||
cases := []struct {
|
||||
expectedError bool
|
||||
bisectorNil bool
|
||||
expectedOnErrorCallCount int
|
||||
expectedNextCallCount int
|
||||
inputCount int
|
||||
iteratorFinalError error
|
||||
bisectorError error
|
||||
name string
|
||||
storedColumnIndices []uint64
|
||||
verificationFailurePattern []bool
|
||||
chunkErrors []error
|
||||
chunks [][]blocks.RODataColumn
|
||||
}{
|
||||
{
|
||||
name: "EmptyColumns",
|
||||
inputCount: 0,
|
||||
expectedError: false,
|
||||
expectedNextCallCount: 0,
|
||||
expectedOnErrorCallCount: 0,
|
||||
},
|
||||
{
|
||||
name: "NilBisector",
|
||||
inputCount: 3,
|
||||
bisectorNil: true,
|
||||
expectedError: true,
|
||||
expectedNextCallCount: 0,
|
||||
expectedOnErrorCallCount: 0,
|
||||
},
|
||||
{
|
||||
name: "BisectError",
|
||||
inputCount: 5,
|
||||
bisectorError: errors.New("bisect failed"),
|
||||
expectedError: true,
|
||||
expectedNextCallCount: 0,
|
||||
expectedOnErrorCallCount: 0,
|
||||
},
|
||||
{
|
||||
name: "SingleChunkSuccess",
|
||||
inputCount: 4,
|
||||
chunks: [][]blocks.RODataColumn{{}},
|
||||
verificationFailurePattern: []bool{false},
|
||||
expectedError: false,
|
||||
expectedNextCallCount: 2,
|
||||
expectedOnErrorCallCount: 0,
|
||||
},
|
||||
{
|
||||
name: "SingleChunkFails",
|
||||
inputCount: 4,
|
||||
chunks: [][]blocks.RODataColumn{{}},
|
||||
verificationFailurePattern: []bool{true},
|
||||
iteratorFinalError: errors.New("chunk failed"),
|
||||
expectedError: true,
|
||||
expectedNextCallCount: 2,
|
||||
expectedOnErrorCallCount: 1,
|
||||
},
|
||||
{
|
||||
name: "TwoChunks_BothPass",
|
||||
inputCount: 8,
|
||||
chunks: [][]blocks.RODataColumn{{}, {}},
|
||||
verificationFailurePattern: []bool{false, false},
|
||||
expectedError: false,
|
||||
expectedNextCallCount: 3,
|
||||
expectedOnErrorCallCount: 0,
|
||||
},
|
||||
{
|
||||
name: "TwoChunks_FirstFails",
|
||||
inputCount: 8,
|
||||
chunks: [][]blocks.RODataColumn{{}, {}},
|
||||
verificationFailurePattern: []bool{true, false},
|
||||
iteratorFinalError: errors.New("first failed"),
|
||||
expectedError: true,
|
||||
expectedNextCallCount: 3,
|
||||
expectedOnErrorCallCount: 1,
|
||||
},
|
||||
{
|
||||
name: "TwoChunks_SecondFails",
|
||||
inputCount: 8,
|
||||
chunks: [][]blocks.RODataColumn{{}, {}},
|
||||
verificationFailurePattern: []bool{false, true},
|
||||
iteratorFinalError: errors.New("second failed"),
|
||||
expectedError: true,
|
||||
expectedNextCallCount: 3,
|
||||
expectedOnErrorCallCount: 1,
|
||||
},
|
||||
{
|
||||
name: "TwoChunks_BothFail",
|
||||
inputCount: 8,
|
||||
chunks: [][]blocks.RODataColumn{{}, {}},
|
||||
verificationFailurePattern: []bool{true, true},
|
||||
iteratorFinalError: errors.New("both failed"),
|
||||
expectedError: true,
|
||||
expectedNextCallCount: 3,
|
||||
expectedOnErrorCallCount: 2,
|
||||
},
|
||||
{
|
||||
name: "ManyChunks_AllPass",
|
||||
inputCount: 16,
|
||||
chunks: [][]blocks.RODataColumn{{}, {}, {}, {}},
|
||||
verificationFailurePattern: []bool{false, false, false, false},
|
||||
expectedError: false,
|
||||
expectedNextCallCount: 5,
|
||||
expectedOnErrorCallCount: 0,
|
||||
},
|
||||
{
|
||||
name: "ManyChunks_MixedFail",
|
||||
inputCount: 16,
|
||||
chunks: [][]blocks.RODataColumn{{}, {}, {}, {}},
|
||||
verificationFailurePattern: []bool{false, true, false, true},
|
||||
iteratorFinalError: errors.New("mixed failures"),
|
||||
expectedError: true,
|
||||
expectedNextCallCount: 5,
|
||||
expectedOnErrorCallCount: 2,
|
||||
},
|
||||
{
|
||||
name: "FilterStoredColumns_PartialFilter",
|
||||
inputCount: 6,
|
||||
chunks: [][]blocks.RODataColumn{{}},
|
||||
verificationFailurePattern: []bool{false},
|
||||
storedColumnIndices: []uint64{1, 3},
|
||||
expectedError: false,
|
||||
expectedNextCallCount: 2,
|
||||
expectedOnErrorCallCount: 0,
|
||||
},
|
||||
{
|
||||
name: "FilterStoredColumns_AllStored",
|
||||
inputCount: 6,
|
||||
chunks: [][]blocks.RODataColumn{{}},
|
||||
verificationFailurePattern: []bool{false},
|
||||
storedColumnIndices: []uint64{0, 1, 2, 3, 4, 5},
|
||||
expectedError: false,
|
||||
expectedNextCallCount: 2,
|
||||
expectedOnErrorCallCount: 0,
|
||||
},
|
||||
{
|
||||
name: "FilterStoredColumns_MixedAccess",
|
||||
inputCount: 10,
|
||||
chunks: [][]blocks.RODataColumn{{}},
|
||||
verificationFailurePattern: []bool{false},
|
||||
storedColumnIndices: []uint64{1, 5, 9},
|
||||
expectedError: false,
|
||||
expectedNextCallCount: 2,
|
||||
expectedOnErrorCallCount: 0,
|
||||
},
|
||||
{
|
||||
name: "IteratorNextError",
|
||||
inputCount: 4,
|
||||
chunks: [][]blocks.RODataColumn{{}, {}},
|
||||
chunkErrors: []error{nil, errors.New("next error")},
|
||||
verificationFailurePattern: []bool{false},
|
||||
expectedError: true,
|
||||
expectedNextCallCount: 2,
|
||||
expectedOnErrorCallCount: 0,
|
||||
},
|
||||
{
|
||||
name: "IteratorNextEOF",
|
||||
inputCount: 4,
|
||||
chunks: [][]blocks.RODataColumn{{}},
|
||||
verificationFailurePattern: []bool{false},
|
||||
expectedError: false,
|
||||
expectedNextCallCount: 2,
|
||||
expectedOnErrorCallCount: 0,
|
||||
},
|
||||
{
|
||||
name: "LargeChunkSize",
|
||||
inputCount: 128,
|
||||
chunks: [][]blocks.RODataColumn{{}},
|
||||
verificationFailurePattern: []bool{false},
|
||||
expectedError: false,
|
||||
expectedNextCallCount: 2,
|
||||
expectedOnErrorCallCount: 0,
|
||||
},
|
||||
{
|
||||
name: "ManySmallChunks",
|
||||
inputCount: 32,
|
||||
chunks: [][]blocks.RODataColumn{{}, {}, {}, {}, {}, {}, {}, {}},
|
||||
verificationFailurePattern: []bool{false, false, false, false, false, false, false, false},
|
||||
expectedError: false,
|
||||
expectedNextCallCount: 9,
|
||||
expectedOnErrorCallCount: 0,
|
||||
},
|
||||
{
|
||||
name: "ChunkWithSomeStoredColumns",
|
||||
inputCount: 6,
|
||||
chunks: [][]blocks.RODataColumn{{}},
|
||||
verificationFailurePattern: []bool{false},
|
||||
storedColumnIndices: []uint64{0, 2, 4},
|
||||
expectedError: false,
|
||||
expectedNextCallCount: 2,
|
||||
expectedOnErrorCallCount: 0,
|
||||
},
|
||||
{
|
||||
name: "OnErrorDoesNotStopIteration",
|
||||
inputCount: 8,
|
||||
chunks: [][]blocks.RODataColumn{{}, {}},
|
||||
verificationFailurePattern: []bool{true, false},
|
||||
iteratorFinalError: errors.New("first failed"),
|
||||
expectedError: true,
|
||||
expectedNextCallCount: 3,
|
||||
expectedOnErrorCallCount: 1,
|
||||
},
|
||||
{
|
||||
name: "VerificationErrorWrapping",
|
||||
inputCount: 4,
|
||||
chunks: [][]blocks.RODataColumn{{}},
|
||||
verificationFailurePattern: []bool{true},
|
||||
iteratorFinalError: errors.New("verification failed"),
|
||||
expectedError: true,
|
||||
expectedNextCallCount: 2,
|
||||
expectedOnErrorCallCount: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Setup storage
|
||||
var store *filesystem.DataColumnStorage
|
||||
if len(tc.storedColumnIndices) > 0 {
|
||||
mocker, s := filesystem.NewEphemeralDataColumnStorageWithMocker(t)
|
||||
blockRoot := [32]byte{1, 2, 3}
|
||||
slot := primitives.Slot(params.BeaconConfig().FuluForkEpoch) * params.BeaconConfig().SlotsPerEpoch
|
||||
require.NoError(t, mocker.CreateFakeIndices(blockRoot, slot, tc.storedColumnIndices...))
|
||||
store = s
|
||||
} else {
|
||||
store = filesystem.NewEphemeralDataColumnStorage(t)
|
||||
}
|
||||
|
||||
// Create test columns
|
||||
blockRoot := [32]byte{1, 2, 3}
|
||||
columns := makeTestDataColumns(t, tc.inputCount, blockRoot, 0)
|
||||
|
||||
// Setup iterator with chunks
|
||||
iterator := &mockBisectionIterator{
|
||||
chunks: tc.chunks,
|
||||
chunkErrors: tc.chunkErrors,
|
||||
finalError: tc.iteratorFinalError,
|
||||
}
|
||||
|
||||
// Setup bisector
|
||||
var bisector Bisector
|
||||
if tc.bisectorNil || tc.inputCount == 0 {
|
||||
bisector = nil
|
||||
} else if tc.bisectorError != nil {
|
||||
bisector = &mockBisector{
|
||||
shouldError: true,
|
||||
bisectErr: tc.bisectorError,
|
||||
}
|
||||
} else {
|
||||
bisector = &mockBisector{
|
||||
shouldError: false,
|
||||
iterator: iterator,
|
||||
}
|
||||
}
|
||||
|
||||
// Create store with verifier
|
||||
verifierFactory := makeTestVerifierFactory(tc.verificationFailurePattern)
|
||||
lazilyPersistentStore := &LazilyPersistentStoreColumn{
|
||||
store: store,
|
||||
cache: newDataColumnCache(),
|
||||
newDataColumnsVerifier: verifierFactory,
|
||||
custody: &custodyRequirement{},
|
||||
bisector: bisector,
|
||||
}
|
||||
|
||||
// Execute
|
||||
err := lazilyPersistentStore.bisectVerification(columns)
|
||||
|
||||
// Assert
|
||||
if tc.expectedError {
|
||||
require.NotNil(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Verify iterator interactions for non-error cases
|
||||
if tc.inputCount > 0 && bisector != nil && tc.bisectorError == nil && !tc.expectedError {
|
||||
require.NotEqual(t, 0, iterator.nextCallCount, "iterator Next() should have been called")
|
||||
require.Equal(t, tc.expectedOnErrorCallCount, iterator.onErrorCallCount, "OnError() call count mismatch")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func allIndicesExcept(total int, excluded []uint64) []uint64 {
|
||||
excludeMap := make(map[uint64]bool)
|
||||
for _, idx := range excluded {
|
||||
excludeMap[idx] = true
|
||||
}
|
||||
|
||||
var result []uint64
|
||||
for i := range total {
|
||||
if !excludeMap[uint64(i)] {
|
||||
result = append(result, uint64(i))
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// TestColumnsNotStored tests the columnsNotStored method.
|
||||
func TestColumnsNotStored(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096*2
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
count int
|
||||
stored []uint64 // Column indices marked as stored
|
||||
expected []uint64 // Expected column indices in returned result
|
||||
}{
|
||||
// Empty cases
|
||||
{
|
||||
name: "EmptyInput",
|
||||
count: 0,
|
||||
stored: []uint64{},
|
||||
expected: []uint64{},
|
||||
},
|
||||
// Single element cases
|
||||
{
|
||||
name: "SingleElement_NotStored",
|
||||
count: 1,
|
||||
stored: []uint64{},
|
||||
expected: []uint64{0},
|
||||
},
|
||||
{
|
||||
name: "SingleElement_Stored",
|
||||
count: 1,
|
||||
stored: []uint64{0},
|
||||
expected: []uint64{},
|
||||
},
|
||||
// All not stored cases
|
||||
{
|
||||
name: "AllNotStored_FiveElements",
|
||||
count: 5,
|
||||
stored: []uint64{},
|
||||
expected: []uint64{0, 1, 2, 3, 4},
|
||||
},
|
||||
// All stored cases
|
||||
{
|
||||
name: "AllStored",
|
||||
count: 5,
|
||||
stored: []uint64{0, 1, 2, 3, 4},
|
||||
expected: []uint64{},
|
||||
},
|
||||
// Partial storage - beginning
|
||||
{
|
||||
name: "StoredAtBeginning",
|
||||
count: 5,
|
||||
stored: []uint64{0, 1},
|
||||
expected: []uint64{2, 3, 4},
|
||||
},
|
||||
// Partial storage - end
|
||||
{
|
||||
name: "StoredAtEnd",
|
||||
count: 5,
|
||||
stored: []uint64{3, 4},
|
||||
expected: []uint64{0, 1, 2},
|
||||
},
|
||||
// Partial storage - middle
|
||||
{
|
||||
name: "StoredInMiddle",
|
||||
count: 5,
|
||||
stored: []uint64{2},
|
||||
expected: []uint64{0, 1, 3, 4},
|
||||
},
|
||||
// Partial storage - scattered
|
||||
{
|
||||
name: "StoredScattered",
|
||||
count: 8,
|
||||
stored: []uint64{1, 3, 5},
|
||||
expected: []uint64{0, 2, 4, 6, 7},
|
||||
},
|
||||
// Alternating pattern
|
||||
{
|
||||
name: "AlternatingPattern",
|
||||
count: 8,
|
||||
stored: []uint64{0, 2, 4, 6},
|
||||
expected: []uint64{1, 3, 5, 7},
|
||||
},
|
||||
// Consecutive stored
|
||||
{
|
||||
name: "ConsecutiveStored",
|
||||
count: 10,
|
||||
stored: []uint64{3, 4, 5, 6},
|
||||
expected: []uint64{0, 1, 2, 7, 8, 9},
|
||||
},
|
||||
// Large slice cases
|
||||
{
|
||||
name: "LargeSlice_NoStored",
|
||||
count: 64,
|
||||
stored: []uint64{},
|
||||
expected: allIndicesExcept(64, []uint64{}),
|
||||
},
|
||||
{
|
||||
name: "LargeSlice_SingleStored",
|
||||
count: 64,
|
||||
stored: []uint64{32},
|
||||
expected: allIndicesExcept(64, []uint64{32}),
|
||||
},
|
||||
}
|
||||
|
||||
slot := primitives.Slot(params.BeaconConfig().FuluForkEpoch) * params.BeaconConfig().SlotsPerEpoch
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Create test columns first to get the actual block root
|
||||
var columns []blocks.RODataColumn
|
||||
if tc.count > 0 {
|
||||
columns = makeTestDataColumns(t, tc.count, [32]byte{}, 0)
|
||||
}
|
||||
|
||||
// Get the actual block root from the first column (if any)
|
||||
var blockRoot [32]byte
|
||||
if len(columns) > 0 {
|
||||
blockRoot = columns[0].BlockRoot()
|
||||
}
|
||||
|
||||
// Setup storage
|
||||
var store *filesystem.DataColumnStorage
|
||||
if len(tc.stored) > 0 {
|
||||
mocker, s := filesystem.NewEphemeralDataColumnStorageWithMocker(t)
|
||||
require.NoError(t, mocker.CreateFakeIndices(blockRoot, slot, tc.stored...))
|
||||
store = s
|
||||
} else {
|
||||
store = filesystem.NewEphemeralDataColumnStorage(t)
|
||||
}
|
||||
|
||||
// Create store instance
|
||||
lazilyPersistentStore := &LazilyPersistentStoreColumn{
|
||||
store: store,
|
||||
}
|
||||
|
||||
// Execute
|
||||
result := lazilyPersistentStore.columnsNotStored(columns)
|
||||
|
||||
// Assert count
|
||||
require.Equal(t, len(tc.expected), len(result),
|
||||
fmt.Sprintf("expected %d columns, got %d", len(tc.expected), len(result)))
|
||||
|
||||
// Verify that no stored columns are in the result
|
||||
if len(tc.stored) > 0 {
|
||||
resultIndices := make(map[uint64]bool)
|
||||
for _, col := range result {
|
||||
resultIndices[col.Index] = true
|
||||
}
|
||||
for _, storedIdx := range tc.stored {
|
||||
require.Equal(t, false, resultIndices[storedIdx],
|
||||
fmt.Sprintf("stored column index %d should not be in result", storedIdx))
|
||||
}
|
||||
}
|
||||
|
||||
// If expectedIndices is specified, verify the exact column indices in order
|
||||
if len(tc.expected) > 0 && len(tc.stored) == 0 {
|
||||
// Only check exact order for non-stored cases (where we know they stay in same order)
|
||||
for i, expectedIdx := range tc.expected {
|
||||
require.Equal(t, columns[expectedIdx].Index, result[i].Index,
|
||||
fmt.Sprintf("column %d: expected index %d, got %d", i, columns[expectedIdx].Index, result[i].Index))
|
||||
}
|
||||
}
|
||||
|
||||
// Verify optimization: if nothing stored, should return original slice
|
||||
if len(tc.stored) == 0 && tc.count > 0 {
|
||||
require.Equal(t, &columns[0], &result[0],
|
||||
"when no columns stored, should return original slice (same pointer)")
|
||||
}
|
||||
|
||||
// Verify optimization: if some stored, result should use in-place shifting
|
||||
if len(tc.stored) > 0 && len(tc.expected) > 0 && tc.count > 0 {
|
||||
require.Equal(t, cap(columns), cap(result),
|
||||
"result should be in-place shifted from original (same capacity)")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
40
beacon-chain/das/bisect.go
Normal file
40
beacon-chain/das/bisect.go
Normal file
@@ -0,0 +1,40 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
)
|
||||
|
||||
// Bisector describes a type that takes a set of RODataColumns via the Bisect method
|
||||
// and returns a BisectionIterator that returns batches of those columns to be
|
||||
// verified together.
|
||||
type Bisector interface {
|
||||
// Bisect initializes the BisectionIterator and returns the result.
|
||||
Bisect([]blocks.RODataColumn) (BisectionIterator, error)
|
||||
}
|
||||
|
||||
// BisectionIterator describes an iterator that returns groups of columns to verify.
|
||||
// It is up to the bisector implementation to decide how to chunk up the columns,
|
||||
// whether by block, by peer, or any other strategy. For example, backfill implements
|
||||
// a bisector that keeps track of the source of each sidecar by peer, and groups
|
||||
// sidecars by peer in the Next method, enabling it to track which peers, out of all
|
||||
// the peers contributing to a batch, gave us bad data.
|
||||
// When a batch fails, the OnError method should be used so that the bisector can
|
||||
// keep track of the failed groups of columns and eg apply that knowledge in peer scoring.
|
||||
// The same column will be returned multiple times by Next; first as part of a larger batch,
|
||||
// and again as part of a more fine grained batch if there was an error in the large batch.
|
||||
// For example, first as part of a batch of all columns spanning peers, and then again
|
||||
// as part of a batch of columns from a single peer if some column in the larger batch
|
||||
// failed verification.
|
||||
type BisectionIterator interface {
|
||||
// Next returns the next group of columns to verify.
|
||||
// When the iteration is complete, Next should return (nil, io.EOF).
|
||||
Next() ([]blocks.RODataColumn, error)
|
||||
// OnError should be called when verification of a group of columns obtained via Next() fails.
|
||||
OnError(error)
|
||||
// Error can be used at the end of the iteration to get a single error result. It will return
|
||||
// nil if OnError was never called, or an error of the implementers choosing representing the set
|
||||
// of errors seen during iteration. For instance when bisecting from columns spanning peers to columns
|
||||
// from a single peer, the broader error could be dropped, and then the more specific error
|
||||
// (for a single peer's response) returned after bisecting to it.
|
||||
Error() error
|
||||
}
|
||||
@@ -76,7 +76,7 @@ func (e *blobCacheEntry) stash(sc *blocks.ROBlob) error {
|
||||
e.scs = make([]*blocks.ROBlob, maxBlobsPerBlock)
|
||||
}
|
||||
if e.scs[sc.Index] != nil {
|
||||
return errors.Wrapf(ErrDuplicateSidecar, "root=%#x, index=%d, commitment=%#x", sc.BlockRoot(), sc.Index, sc.KzgCommitment)
|
||||
return errors.Wrapf(errDuplicateSidecar, "root=%#x, index=%d, commitment=%#x", sc.BlockRoot(), sc.Index, sc.KzgCommitment)
|
||||
}
|
||||
e.scs[sc.Index] = sc
|
||||
return nil
|
||||
@@ -88,7 +88,7 @@ func (e *blobCacheEntry) stash(sc *blocks.ROBlob) error {
|
||||
// commitments were found in the cache and the sidecar slice return value can be used
|
||||
// to perform a DA check against the cached sidecars.
|
||||
// filter only returns blobs that need to be checked. Blobs already available on disk will be excluded.
|
||||
func (e *blobCacheEntry) filter(root [32]byte, kc [][]byte, slot primitives.Slot) ([]blocks.ROBlob, error) {
|
||||
func (e *blobCacheEntry) filter(root [32]byte, kc [][]byte) ([]blocks.ROBlob, error) {
|
||||
count := len(kc)
|
||||
if e.diskSummary.AllAvailable(count) {
|
||||
return nil, nil
|
||||
|
||||
@@ -34,7 +34,8 @@ type filterTestCaseSetupFunc func(t *testing.T) (*blobCacheEntry, [][]byte, []bl
|
||||
func filterTestCaseSetup(slot primitives.Slot, nBlobs int, onDisk []int, numExpected int) filterTestCaseSetupFunc {
|
||||
return func(t *testing.T) (*blobCacheEntry, [][]byte, []blocks.ROBlob) {
|
||||
blk, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, nBlobs)
|
||||
commits, err := commitmentsToCheck(blk, blk.Block().Slot())
|
||||
shouldRetain := func(s primitives.Slot) bool { return true }
|
||||
commits, err := commitmentsToCheck(blk, shouldRetain)
|
||||
require.NoError(t, err)
|
||||
entry := &blobCacheEntry{}
|
||||
if len(onDisk) > 0 {
|
||||
@@ -113,7 +114,7 @@ func TestFilterDiskSummary(t *testing.T) {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
entry, commits, expected := c.setup(t)
|
||||
// first (root) argument doesn't matter, it is just for logs
|
||||
got, err := entry.filter([32]byte{}, commits, 100)
|
||||
got, err := entry.filter([32]byte{}, commits)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(expected), len(got))
|
||||
})
|
||||
@@ -195,7 +196,7 @@ func TestFilter(t *testing.T) {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
entry, commits, expected := c.setup(t)
|
||||
// first (root) argument doesn't matter, it is just for logs
|
||||
got, err := entry.filter([32]byte{}, commits, 100)
|
||||
got, err := entry.filter([32]byte{}, commits)
|
||||
if c.err != nil {
|
||||
require.ErrorIs(t, err, c.err)
|
||||
return
|
||||
|
||||
@@ -1,9 +1,7 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"slices"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/filesystem"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
@@ -11,9 +9,9 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
ErrDuplicateSidecar = errors.New("duplicate sidecar stashed in AvailabilityStore")
|
||||
errDuplicateSidecar = errors.New("duplicate sidecar stashed in AvailabilityStore")
|
||||
errColumnIndexTooHigh = errors.New("column index too high")
|
||||
errCommitmentMismatch = errors.New("KzgCommitment of sidecar in cache did not match block commitment")
|
||||
errCommitmentMismatch = errors.New("commitment of sidecar in cache did not match block commitment")
|
||||
errMissingSidecar = errors.New("no sidecar in cache for block commitment")
|
||||
)
|
||||
|
||||
@@ -25,107 +23,80 @@ func newDataColumnCache() *dataColumnCache {
|
||||
return &dataColumnCache{entries: make(map[cacheKey]*dataColumnCacheEntry)}
|
||||
}
|
||||
|
||||
// ensure returns the entry for the given key, creating it if it isn't already present.
|
||||
func (c *dataColumnCache) ensure(key cacheKey) *dataColumnCacheEntry {
|
||||
// entry returns the entry for the given key, creating it if it isn't already present.
|
||||
func (c *dataColumnCache) entry(key cacheKey) *dataColumnCacheEntry {
|
||||
entry, ok := c.entries[key]
|
||||
if !ok {
|
||||
entry = &dataColumnCacheEntry{}
|
||||
entry = newDataColumnCacheEntry(key.root)
|
||||
c.entries[key] = entry
|
||||
}
|
||||
|
||||
return entry
|
||||
}
|
||||
|
||||
func (c *dataColumnCache) cleanup(blks []blocks.ROBlock) {
|
||||
for _, block := range blks {
|
||||
key := cacheKey{slot: block.Block().Slot(), root: block.Root()}
|
||||
c.delete(key)
|
||||
}
|
||||
}
|
||||
|
||||
// delete removes the cache entry from the cache.
|
||||
func (c *dataColumnCache) delete(key cacheKey) {
|
||||
delete(c.entries, key)
|
||||
}
|
||||
|
||||
// dataColumnCacheEntry holds a fixed-length cache of BlobSidecars.
|
||||
type dataColumnCacheEntry struct {
|
||||
scs [fieldparams.NumberOfColumns]*blocks.RODataColumn
|
||||
diskSummary filesystem.DataColumnStorageSummary
|
||||
func (c *dataColumnCache) stash(sc blocks.RODataColumn) error {
|
||||
key := cacheKey{slot: sc.Slot(), root: sc.BlockRoot()}
|
||||
entry := c.entry(key)
|
||||
return entry.stash(sc)
|
||||
}
|
||||
|
||||
func (e *dataColumnCacheEntry) setDiskSummary(sum filesystem.DataColumnStorageSummary) {
|
||||
e.diskSummary = sum
|
||||
func newDataColumnCacheEntry(root [32]byte) *dataColumnCacheEntry {
|
||||
return &dataColumnCacheEntry{scs: make(map[uint64]blocks.RODataColumn), root: &root}
|
||||
}
|
||||
|
||||
// dataColumnCacheEntry is the set of RODataColumns for a given block.
|
||||
type dataColumnCacheEntry struct {
|
||||
root *[32]byte
|
||||
scs map[uint64]blocks.RODataColumn
|
||||
}
|
||||
|
||||
// stash adds an item to the in-memory cache of DataColumnSidecars.
|
||||
// Only the first DataColumnSidecar of a given Index will be kept in the cache.
|
||||
// stash will return an error if the given data colunn is already in the cache, or if the Index is out of bounds.
|
||||
func (e *dataColumnCacheEntry) stash(sc *blocks.RODataColumn) error {
|
||||
// stash will return an error if the given data column Index is out of bounds.
|
||||
// It will overwrite any existing entry for the same index.
|
||||
func (e *dataColumnCacheEntry) stash(sc blocks.RODataColumn) error {
|
||||
if sc.Index >= fieldparams.NumberOfColumns {
|
||||
return errors.Wrapf(errColumnIndexTooHigh, "index=%d", sc.Index)
|
||||
}
|
||||
|
||||
if e.scs[sc.Index] != nil {
|
||||
return errors.Wrapf(ErrDuplicateSidecar, "root=%#x, index=%d, commitment=%#x", sc.BlockRoot(), sc.Index, sc.KzgCommitments)
|
||||
}
|
||||
|
||||
e.scs[sc.Index] = sc
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *dataColumnCacheEntry) filter(root [32]byte, commitmentsArray *safeCommitmentsArray) ([]blocks.RODataColumn, error) {
|
||||
nonEmptyIndices := commitmentsArray.nonEmptyIndices()
|
||||
if e.diskSummary.AllAvailable(nonEmptyIndices) {
|
||||
return nil, nil
|
||||
// append appends the requested root and indices from the cache to the given sidecars slice and returns the result.
|
||||
// If any of the given indices are missing, an error will be returned and the sidecars slice will be unchanged.
|
||||
func (e *dataColumnCacheEntry) append(sidecars []blocks.RODataColumn, indices peerdas.ColumnIndices) ([]blocks.RODataColumn, error) {
|
||||
needed := indices.ToMap()
|
||||
for col := range needed {
|
||||
_, ok := e.scs[col]
|
||||
if !ok {
|
||||
return nil, errors.Wrapf(errMissingSidecar, "root=%#x, index=%#x", e.root, col)
|
||||
}
|
||||
}
|
||||
|
||||
commitmentsCount := commitmentsArray.count()
|
||||
sidecars := make([]blocks.RODataColumn, 0, commitmentsCount)
|
||||
|
||||
for i := range nonEmptyIndices {
|
||||
if e.diskSummary.HasIndex(i) {
|
||||
continue
|
||||
}
|
||||
|
||||
if e.scs[i] == nil {
|
||||
return nil, errors.Wrapf(errMissingSidecar, "root=%#x, index=%#x", root, i)
|
||||
}
|
||||
|
||||
if !sliceBytesEqual(commitmentsArray[i], e.scs[i].KzgCommitments) {
|
||||
return nil, errors.Wrapf(errCommitmentMismatch, "root=%#x, index=%#x, commitment=%#x, block commitment=%#x", root, i, e.scs[i].KzgCommitments, commitmentsArray[i])
|
||||
}
|
||||
|
||||
sidecars = append(sidecars, *e.scs[i])
|
||||
// Loop twice so we can avoid touching the slice if any of the blobs are missing.
|
||||
for col := range needed {
|
||||
sidecars = append(sidecars, e.scs[col])
|
||||
}
|
||||
|
||||
return sidecars, nil
|
||||
}
|
||||
|
||||
// safeCommitmentsArray is a fixed size array of commitments.
|
||||
// This is helpful for avoiding gratuitous bounds checks.
|
||||
type safeCommitmentsArray [fieldparams.NumberOfColumns][][]byte
|
||||
|
||||
// count returns the number of commitments in the array.
|
||||
func (s *safeCommitmentsArray) count() int {
|
||||
count := 0
|
||||
|
||||
for i := range s {
|
||||
if s[i] != nil {
|
||||
count++
|
||||
// IndicesNotStored filters the list of indices to only include those that are not found in the storage summary.
|
||||
func IndicesNotStored(sum filesystem.DataColumnStorageSummary, indices peerdas.ColumnIndices) peerdas.ColumnIndices {
|
||||
indices = indices.Copy()
|
||||
for col := range indices {
|
||||
if sum.HasIndex(col) {
|
||||
indices.Unset(col)
|
||||
}
|
||||
}
|
||||
|
||||
return count
|
||||
}
|
||||
|
||||
// nonEmptyIndices returns a map of indices that are non-nil in the array.
|
||||
func (s *safeCommitmentsArray) nonEmptyIndices() map[uint64]bool {
|
||||
columns := make(map[uint64]bool)
|
||||
|
||||
for i := range s {
|
||||
if s[i] != nil {
|
||||
columns[uint64(i)] = true
|
||||
}
|
||||
}
|
||||
|
||||
return columns
|
||||
}
|
||||
|
||||
func sliceBytesEqual(a, b [][]byte) bool {
|
||||
return slices.EqualFunc(a, b, bytes.Equal)
|
||||
return indices
|
||||
}
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/filesystem"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
@@ -13,124 +15,105 @@ import (
|
||||
func TestEnsureDeleteSetDiskSummary(t *testing.T) {
|
||||
c := newDataColumnCache()
|
||||
key := cacheKey{}
|
||||
entry := c.ensure(key)
|
||||
require.DeepEqual(t, dataColumnCacheEntry{}, *entry)
|
||||
entry := c.entry(key)
|
||||
require.Equal(t, 0, len(entry.scs))
|
||||
|
||||
diskSummary := filesystem.NewDataColumnStorageSummary(42, [fieldparams.NumberOfColumns]bool{true})
|
||||
entry.setDiskSummary(diskSummary)
|
||||
entry = c.ensure(key)
|
||||
require.DeepEqual(t, dataColumnCacheEntry{diskSummary: diskSummary}, *entry)
|
||||
nonDupe := c.entry(key)
|
||||
require.Equal(t, entry, nonDupe) // same pointer
|
||||
expect, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{{Index: 1}})
|
||||
require.NoError(t, entry.stash(expect[0]))
|
||||
require.Equal(t, 1, len(entry.scs))
|
||||
cols, err := nonDupe.append([]blocks.RODataColumn{}, peerdas.NewColumnIndicesFromSlice([]uint64{expect[0].Index}))
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, expect[0], cols[0])
|
||||
|
||||
c.delete(key)
|
||||
entry = c.ensure(key)
|
||||
require.DeepEqual(t, dataColumnCacheEntry{}, *entry)
|
||||
entry = c.entry(key)
|
||||
require.Equal(t, 0, len(entry.scs))
|
||||
require.NotEqual(t, entry, nonDupe) // different pointer
|
||||
}
|
||||
|
||||
func TestStash(t *testing.T) {
|
||||
t.Run("Index too high", func(t *testing.T) {
|
||||
roDataColumns, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{{Index: 10_000}})
|
||||
columns, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{{Index: 10_000}})
|
||||
|
||||
var entry dataColumnCacheEntry
|
||||
err := entry.stash(&roDataColumns[0])
|
||||
err := entry.stash(columns[0])
|
||||
require.NotNil(t, err)
|
||||
})
|
||||
|
||||
t.Run("Nominal and already existing", func(t *testing.T) {
|
||||
roDataColumns, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{{Index: 1}})
|
||||
|
||||
var entry dataColumnCacheEntry
|
||||
err := entry.stash(&roDataColumns[0])
|
||||
entry := newDataColumnCacheEntry(roDataColumns[0].BlockRoot())
|
||||
err := entry.stash(roDataColumns[0])
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepEqual(t, roDataColumns[0], entry.scs[1])
|
||||
|
||||
err = entry.stash(&roDataColumns[0])
|
||||
require.NotNil(t, err)
|
||||
require.NoError(t, entry.stash(roDataColumns[0]))
|
||||
// stash simply replaces duplicate values now
|
||||
require.DeepEqual(t, roDataColumns[0], entry.scs[1])
|
||||
})
|
||||
}
|
||||
|
||||
func TestFilterDataColumns(t *testing.T) {
|
||||
func TestAppendDataColumns(t *testing.T) {
|
||||
t.Run("All available", func(t *testing.T) {
|
||||
commitmentsArray := safeCommitmentsArray{nil, [][]byte{[]byte{1}}, nil, [][]byte{[]byte{3}}}
|
||||
|
||||
diskSummary := filesystem.NewDataColumnStorageSummary(42, [fieldparams.NumberOfColumns]bool{false, true, false, true})
|
||||
|
||||
dataColumnCacheEntry := dataColumnCacheEntry{diskSummary: diskSummary}
|
||||
|
||||
actual, err := dataColumnCacheEntry.filter([fieldparams.RootLength]byte{}, &commitmentsArray)
|
||||
sum := filesystem.NewDataColumnStorageSummary(42, [fieldparams.NumberOfColumns]bool{false, true, false, true})
|
||||
notStored := IndicesNotStored(sum, peerdas.NewColumnIndicesFromSlice([]uint64{1, 3}))
|
||||
actual, err := newDataColumnCacheEntry([32]byte{}).append([]blocks.RODataColumn{}, notStored)
|
||||
require.NoError(t, err)
|
||||
require.IsNil(t, actual)
|
||||
require.Equal(t, 0, len(actual))
|
||||
})
|
||||
|
||||
t.Run("Some scs missing", func(t *testing.T) {
|
||||
commitmentsArray := safeCommitmentsArray{nil, [][]byte{[]byte{1}}}
|
||||
sum := filesystem.NewDataColumnStorageSummary(42, [fieldparams.NumberOfColumns]bool{})
|
||||
|
||||
diskSummary := filesystem.NewDataColumnStorageSummary(42, [fieldparams.NumberOfColumns]bool{})
|
||||
|
||||
dataColumnCacheEntry := dataColumnCacheEntry{diskSummary: diskSummary}
|
||||
|
||||
_, err := dataColumnCacheEntry.filter([fieldparams.RootLength]byte{}, &commitmentsArray)
|
||||
require.NotNil(t, err)
|
||||
})
|
||||
|
||||
t.Run("Commitments not equal", func(t *testing.T) {
|
||||
commitmentsArray := safeCommitmentsArray{nil, [][]byte{[]byte{1}}}
|
||||
|
||||
roDataColumns, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{{Index: 1}})
|
||||
|
||||
var scs [fieldparams.NumberOfColumns]*blocks.RODataColumn
|
||||
scs[1] = &roDataColumns[0]
|
||||
|
||||
dataColumnCacheEntry := dataColumnCacheEntry{scs: scs}
|
||||
|
||||
_, err := dataColumnCacheEntry.filter(roDataColumns[0].BlockRoot(), &commitmentsArray)
|
||||
notStored := IndicesNotStored(sum, peerdas.NewColumnIndicesFromSlice([]uint64{1}))
|
||||
actual, err := newDataColumnCacheEntry([32]byte{}).append([]blocks.RODataColumn{}, notStored)
|
||||
require.Equal(t, 0, len(actual))
|
||||
require.NotNil(t, err)
|
||||
})
|
||||
|
||||
t.Run("Nominal", func(t *testing.T) {
|
||||
commitmentsArray := safeCommitmentsArray{nil, [][]byte{[]byte{1}}, nil, [][]byte{[]byte{3}}}
|
||||
diskSummary := filesystem.NewDataColumnStorageSummary(42, [fieldparams.NumberOfColumns]bool{false, true})
|
||||
indices := peerdas.NewColumnIndicesFromSlice([]uint64{1, 3})
|
||||
expected, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{{Index: 3, KzgCommitments: [][]byte{[]byte{3}}}})
|
||||
|
||||
var scs [fieldparams.NumberOfColumns]*blocks.RODataColumn
|
||||
scs[3] = &expected[0]
|
||||
scs := map[uint64]blocks.RODataColumn{
|
||||
3: expected[0],
|
||||
}
|
||||
sum := filesystem.NewDataColumnStorageSummary(42, [fieldparams.NumberOfColumns]bool{false, true})
|
||||
entry := dataColumnCacheEntry{scs: scs}
|
||||
|
||||
dataColumnCacheEntry := dataColumnCacheEntry{scs: scs, diskSummary: diskSummary}
|
||||
|
||||
actual, err := dataColumnCacheEntry.filter(expected[0].BlockRoot(), &commitmentsArray)
|
||||
actual, err := entry.append([]blocks.RODataColumn{}, IndicesNotStored(sum, indices))
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepEqual(t, expected, actual)
|
||||
})
|
||||
}
|
||||
|
||||
func TestCount(t *testing.T) {
|
||||
s := safeCommitmentsArray{nil, [][]byte{[]byte{1}}, nil, [][]byte{[]byte{3}}}
|
||||
require.Equal(t, 2, s.count())
|
||||
}
|
||||
t.Run("Append does not mutate the input", func(t *testing.T) {
|
||||
indices := peerdas.NewColumnIndicesFromSlice([]uint64{1, 2})
|
||||
expected, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{
|
||||
{Index: 0, KzgCommitments: [][]byte{[]byte{1}}},
|
||||
{Index: 1, KzgCommitments: [][]byte{[]byte{2}}},
|
||||
{Index: 2, KzgCommitments: [][]byte{[]byte{3}}},
|
||||
})
|
||||
|
||||
func TestNonEmptyIndices(t *testing.T) {
|
||||
s := safeCommitmentsArray{nil, [][]byte{[]byte{10}}, nil, [][]byte{[]byte{20}}}
|
||||
actual := s.nonEmptyIndices()
|
||||
require.DeepEqual(t, map[uint64]bool{1: true, 3: true}, actual)
|
||||
}
|
||||
scs := map[uint64]blocks.RODataColumn{
|
||||
1: expected[1],
|
||||
2: expected[2],
|
||||
}
|
||||
entry := dataColumnCacheEntry{scs: scs}
|
||||
|
||||
func TestSliceBytesEqual(t *testing.T) {
|
||||
t.Run("Different lengths", func(t *testing.T) {
|
||||
a := [][]byte{[]byte{1, 2, 3}}
|
||||
b := [][]byte{[]byte{1, 2, 3}, []byte{4, 5, 6}}
|
||||
require.Equal(t, false, sliceBytesEqual(a, b))
|
||||
})
|
||||
|
||||
t.Run("Same length but different content", func(t *testing.T) {
|
||||
a := [][]byte{[]byte{1, 2, 3}, []byte{4, 5, 6}}
|
||||
b := [][]byte{[]byte{1, 2, 3}, []byte{4, 5, 7}}
|
||||
require.Equal(t, false, sliceBytesEqual(a, b))
|
||||
})
|
||||
|
||||
t.Run("Equal slices", func(t *testing.T) {
|
||||
a := [][]byte{[]byte{1, 2, 3}, []byte{4, 5, 6}}
|
||||
b := [][]byte{[]byte{1, 2, 3}, []byte{4, 5, 6}}
|
||||
require.Equal(t, true, sliceBytesEqual(a, b))
|
||||
original := []blocks.RODataColumn{expected[0]}
|
||||
actual, err := entry.append(original, indices)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(expected), len(actual))
|
||||
slices.SortFunc(actual, func(i, j blocks.RODataColumn) int {
|
||||
return int(i.Index) - int(j.Index)
|
||||
})
|
||||
for i := range expected {
|
||||
require.Equal(t, expected[i].Index, actual[i].Index)
|
||||
}
|
||||
require.Equal(t, 1, len(original))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -7,13 +7,13 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
)
|
||||
|
||||
// AvailabilityStore describes a component that can verify and save sidecars for a given block, and confirm previously
|
||||
// verified and saved sidecars.
|
||||
// Persist guarantees that the sidecar will be available to perform a DA check
|
||||
// for the life of the beacon node process.
|
||||
// IsDataAvailable guarantees that all blobs committed to in the block have been
|
||||
// durably persisted before returning a non-error value.
|
||||
type AvailabilityStore interface {
|
||||
IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error
|
||||
Persist(current primitives.Slot, blobSidecar ...blocks.ROBlob) error
|
||||
// AvailabilityChecker is the minimum interface needed to check if data is available for a block.
|
||||
// By convention there is a concept of an AvailabilityStore that implements a method to persist
|
||||
// blobs or data columns to prepare for Availability checking, but since those methods are different
|
||||
// for different forms of blob data, they are not included in the interface.
|
||||
type AvailabilityChecker interface {
|
||||
IsDataAvailable(ctx context.Context, current primitives.Slot, b ...blocks.ROBlock) error
|
||||
}
|
||||
|
||||
// RetentionChecker is a callback that determines whether blobs at the given slot are within the retention period.
|
||||
type RetentionChecker func(primitives.Slot) bool
|
||||
|
||||
5
beacon-chain/das/log.go
Normal file
5
beacon-chain/das/log.go
Normal file
@@ -0,0 +1,5 @@
|
||||
package das
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
var log = logrus.WithField("prefix", "das")
|
||||
@@ -9,16 +9,20 @@ import (
|
||||
|
||||
// MockAvailabilityStore is an implementation of AvailabilityStore that can be used by other packages in tests.
|
||||
type MockAvailabilityStore struct {
|
||||
VerifyAvailabilityCallback func(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error
|
||||
VerifyAvailabilityCallback func(ctx context.Context, current primitives.Slot, b ...blocks.ROBlock) error
|
||||
ErrIsDataAvailable error
|
||||
PersistBlobsCallback func(current primitives.Slot, blobSidecar ...blocks.ROBlob) error
|
||||
}
|
||||
|
||||
var _ AvailabilityStore = &MockAvailabilityStore{}
|
||||
var _ AvailabilityChecker = &MockAvailabilityStore{}
|
||||
|
||||
// IsDataAvailable satisfies the corresponding method of the AvailabilityStore interface in a way that is useful for tests.
|
||||
func (m *MockAvailabilityStore) IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error {
|
||||
func (m *MockAvailabilityStore) IsDataAvailable(ctx context.Context, current primitives.Slot, b ...blocks.ROBlock) error {
|
||||
if m.ErrIsDataAvailable != nil {
|
||||
return m.ErrIsDataAvailable
|
||||
}
|
||||
if m.VerifyAvailabilityCallback != nil {
|
||||
return m.VerifyAvailabilityCallback(ctx, current, b)
|
||||
return m.VerifyAvailabilityCallback(ctx, current, b...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
135
beacon-chain/das/needs.go
Normal file
135
beacon-chain/das/needs.go
Normal file
@@ -0,0 +1,135 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// NeedSpan represents the need for a resource over a span of slots.
|
||||
type NeedSpan struct {
|
||||
Begin primitives.Slot
|
||||
End primitives.Slot
|
||||
}
|
||||
|
||||
// At returns whether blocks/blobs/columns are needed At the given slot.
|
||||
func (n NeedSpan) At(slot primitives.Slot) bool {
|
||||
return slot >= n.Begin && slot < n.End
|
||||
}
|
||||
|
||||
// CurrentNeeds fields can be used to check whether the given resource type is needed
|
||||
// at a given slot. The values are based on the current slot, so this value shouldn't
|
||||
// be retained / reused across slots.
|
||||
type CurrentNeeds struct {
|
||||
Block NeedSpan
|
||||
Blob NeedSpan
|
||||
Col NeedSpan
|
||||
}
|
||||
|
||||
// SyncNeeds holds configuration and state for determining what data is needed
|
||||
// at any given slot during backfill based on the current slot.
|
||||
type SyncNeeds struct {
|
||||
current func() primitives.Slot
|
||||
deneb primitives.Slot
|
||||
fulu primitives.Slot
|
||||
|
||||
oldestSlotFlagPtr *primitives.Slot
|
||||
validOldestSlotPtr *primitives.Slot
|
||||
blockRetention primitives.Epoch
|
||||
|
||||
blobRetentionFlag primitives.Epoch
|
||||
blobRetention primitives.Epoch
|
||||
colRetention primitives.Epoch
|
||||
}
|
||||
|
||||
type CurrentSlotter func() primitives.Slot
|
||||
|
||||
func NewSyncNeeds(current CurrentSlotter, oldestSlotFlagPtr *primitives.Slot, blobRetentionFlag primitives.Epoch) (SyncNeeds, error) {
|
||||
deneb, err := slots.EpochStart(params.BeaconConfig().DenebForkEpoch)
|
||||
if err != nil {
|
||||
return SyncNeeds{}, errors.Wrap(err, "deneb fork slot")
|
||||
}
|
||||
fuluBoundary := min(params.BeaconConfig().FuluForkEpoch, slots.MaxSafeEpoch())
|
||||
fulu, err := slots.EpochStart(fuluBoundary)
|
||||
if err != nil {
|
||||
return SyncNeeds{}, errors.Wrap(err, "fulu fork slot")
|
||||
}
|
||||
sn := SyncNeeds{
|
||||
current: func() primitives.Slot { return current() },
|
||||
deneb: deneb,
|
||||
fulu: fulu,
|
||||
blobRetentionFlag: blobRetentionFlag,
|
||||
}
|
||||
// We apply the --blob-retention-epochs flag to both blob and column retention.
|
||||
sn.blobRetention = max(sn.blobRetentionFlag, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
|
||||
sn.colRetention = max(sn.blobRetentionFlag, params.BeaconConfig().MinEpochsForDataColumnSidecarsRequest)
|
||||
|
||||
// Override spec minimum block retention with user-provided flag only if it is lower than the spec minimum.
|
||||
sn.blockRetention = primitives.Epoch(params.BeaconConfig().MinEpochsForBlockRequests)
|
||||
if oldestSlotFlagPtr != nil {
|
||||
oldestEpoch := slots.ToEpoch(*oldestSlotFlagPtr)
|
||||
if oldestEpoch < sn.blockRetention {
|
||||
sn.validOldestSlotPtr = oldestSlotFlagPtr
|
||||
} else {
|
||||
log.WithField("backfill-oldest-slot", *oldestSlotFlagPtr).
|
||||
WithField("specMinSlot", syncEpochOffset(current(), sn.blockRetention)).
|
||||
Warn("Ignoring user-specified slot > MIN_EPOCHS_FOR_BLOCK_REQUESTS.")
|
||||
}
|
||||
}
|
||||
|
||||
return sn, nil
|
||||
}
|
||||
|
||||
// Currently is the main callback given to the different parts of backfill to determine
|
||||
// what resources are needed at a given slot. It assumes the current instance of SyncNeeds
|
||||
// is the result of calling initialize.
|
||||
func (n SyncNeeds) Currently() CurrentNeeds {
|
||||
current := n.current()
|
||||
c := CurrentNeeds{
|
||||
Block: n.blockSpan(current),
|
||||
Blob: NeedSpan{Begin: syncEpochOffset(current, n.blobRetention), End: n.fulu},
|
||||
Col: NeedSpan{Begin: syncEpochOffset(current, n.colRetention), End: current},
|
||||
}
|
||||
// Adjust the minimums forward to the slots where the sidecar types were introduced
|
||||
c.Blob.Begin = max(c.Blob.Begin, n.deneb)
|
||||
c.Col.Begin = max(c.Col.Begin, n.fulu)
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func (n SyncNeeds) blockSpan(current primitives.Slot) NeedSpan {
|
||||
if n.validOldestSlotPtr != nil { // assumes validation done in initialize()
|
||||
return NeedSpan{Begin: *n.validOldestSlotPtr, End: current}
|
||||
}
|
||||
return NeedSpan{Begin: syncEpochOffset(current, n.blockRetention), End: current}
|
||||
}
|
||||
|
||||
func (n SyncNeeds) BlobRetentionChecker() RetentionChecker {
|
||||
return func(slot primitives.Slot) bool {
|
||||
current := n.Currently()
|
||||
return current.Blob.At(slot)
|
||||
}
|
||||
}
|
||||
|
||||
func (n SyncNeeds) DataColumnRetentionChecker() RetentionChecker {
|
||||
return func(slot primitives.Slot) bool {
|
||||
current := n.Currently()
|
||||
return current.Col.At(slot)
|
||||
}
|
||||
}
|
||||
|
||||
// syncEpochOffset subtracts a number of epochs as slots from the current slot, with underflow checks.
|
||||
// It returns slot 1 if the result would be 0 or underflow. It doesn't return slot 0 because the
|
||||
// genesis block needs to be specially synced (it doesn't have a valid signature).
|
||||
func syncEpochOffset(current primitives.Slot, subtract primitives.Epoch) primitives.Slot {
|
||||
minEpoch := min(subtract, slots.MaxSafeEpoch())
|
||||
// compute slot offset - offset is a number of slots to go back from current (not an absolute slot).
|
||||
offset := slots.UnsafeEpochStart(minEpoch)
|
||||
// Undeflow protection: slot 0 is the genesis block, therefore the signature in it is invalid.
|
||||
// To prevent us from rejecting a batch, we restrict the minimum backfill batch till only slot 1
|
||||
if offset >= current {
|
||||
return 1
|
||||
}
|
||||
return current - offset
|
||||
}
|
||||
675
beacon-chain/das/needs_test.go
Normal file
675
beacon-chain/das/needs_test.go
Normal file
@@ -0,0 +1,675 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
)
|
||||
|
||||
// TestNeedSpanAt tests the needSpan.at() method for range checking.
|
||||
func TestNeedSpanAt(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
span NeedSpan
|
||||
slots []primitives.Slot
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "within bounds",
|
||||
span: NeedSpan{Begin: 100, End: 200},
|
||||
slots: []primitives.Slot{101, 150, 199},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "before begin / at end boundary (exclusive)",
|
||||
span: NeedSpan{Begin: 100, End: 200},
|
||||
slots: []primitives.Slot{99, 200, 201},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "empty span (begin == end)",
|
||||
span: NeedSpan{Begin: 100, End: 100},
|
||||
slots: []primitives.Slot{100},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "slot 0 with span starting at 0",
|
||||
span: NeedSpan{Begin: 0, End: 100},
|
||||
slots: []primitives.Slot{0},
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
for _, sl := range tc.slots {
|
||||
t.Run(fmt.Sprintf("%s at slot %d, ", tc.name, sl), func(t *testing.T) {
|
||||
result := tc.span.At(sl)
|
||||
require.Equal(t, tc.expected, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestSyncEpochOffset tests the syncEpochOffset helper function.
|
||||
func TestSyncEpochOffset(t *testing.T) {
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
current primitives.Slot
|
||||
subtract primitives.Epoch
|
||||
expected primitives.Slot
|
||||
}{
|
||||
{
|
||||
name: "typical offset - 5 epochs back",
|
||||
current: primitives.Slot(10000),
|
||||
subtract: 5,
|
||||
expected: primitives.Slot(10000 - 5*slotsPerEpoch),
|
||||
},
|
||||
{
|
||||
name: "zero subtract returns current",
|
||||
current: primitives.Slot(5000),
|
||||
subtract: 0,
|
||||
expected: primitives.Slot(5000),
|
||||
},
|
||||
{
|
||||
name: "subtract 1 epoch from mid-range slot",
|
||||
current: primitives.Slot(1000),
|
||||
subtract: 1,
|
||||
expected: primitives.Slot(1000 - slotsPerEpoch),
|
||||
},
|
||||
{
|
||||
name: "offset equals current - underflow protection",
|
||||
current: primitives.Slot(slotsPerEpoch),
|
||||
subtract: 1,
|
||||
expected: 1,
|
||||
},
|
||||
{
|
||||
name: "offset exceeds current - underflow protection",
|
||||
current: primitives.Slot(50),
|
||||
subtract: 1000,
|
||||
expected: 1,
|
||||
},
|
||||
{
|
||||
name: "current very close to 0",
|
||||
current: primitives.Slot(10),
|
||||
subtract: 1,
|
||||
expected: 1,
|
||||
},
|
||||
{
|
||||
name: "subtract MaxSafeEpoch",
|
||||
current: primitives.Slot(1000000),
|
||||
subtract: slots.MaxSafeEpoch(),
|
||||
expected: 1, // underflow protection
|
||||
},
|
||||
{
|
||||
name: "result exactly at slot 1",
|
||||
current: primitives.Slot(1 + slotsPerEpoch),
|
||||
subtract: 1,
|
||||
expected: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
result := syncEpochOffset(tc.current, tc.subtract)
|
||||
require.Equal(t, tc.expected, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestSyncNeedsInitialize tests the syncNeeds.initialize() method.
|
||||
func TestSyncNeedsInitialize(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
minBlobEpochs := params.BeaconConfig().MinEpochsForBlobsSidecarsRequest
|
||||
minColEpochs := params.BeaconConfig().MinEpochsForDataColumnSidecarsRequest
|
||||
|
||||
currentSlot := primitives.Slot(10000)
|
||||
currentFunc := func() primitives.Slot { return currentSlot }
|
||||
|
||||
cases := []struct {
|
||||
invalidOldestFlag bool
|
||||
expectValidOldest bool
|
||||
oldestSlotFlagPtr *primitives.Slot
|
||||
blobRetentionFlag primitives.Epoch
|
||||
expectedBlob primitives.Epoch
|
||||
expectedCol primitives.Epoch
|
||||
name string
|
||||
input SyncNeeds
|
||||
}{
|
||||
{
|
||||
name: "basic initialization with no flags",
|
||||
expectValidOldest: false,
|
||||
expectedBlob: minBlobEpochs,
|
||||
expectedCol: minColEpochs,
|
||||
blobRetentionFlag: 0,
|
||||
},
|
||||
{
|
||||
name: "blob retention flag less than spec minimum",
|
||||
blobRetentionFlag: minBlobEpochs - 1,
|
||||
expectValidOldest: false,
|
||||
expectedBlob: minBlobEpochs,
|
||||
expectedCol: minColEpochs,
|
||||
},
|
||||
{
|
||||
name: "blob retention flag greater than spec minimum",
|
||||
blobRetentionFlag: minBlobEpochs + 10,
|
||||
expectValidOldest: false,
|
||||
expectedBlob: minBlobEpochs + 10,
|
||||
expectedCol: minBlobEpochs + 10,
|
||||
},
|
||||
{
|
||||
name: "oldestSlotFlagPtr is nil",
|
||||
blobRetentionFlag: 0,
|
||||
oldestSlotFlagPtr: nil,
|
||||
expectValidOldest: false,
|
||||
expectedBlob: minBlobEpochs,
|
||||
expectedCol: minColEpochs,
|
||||
},
|
||||
{
|
||||
name: "valid oldestSlotFlagPtr (earlier than spec minimum)",
|
||||
blobRetentionFlag: 0,
|
||||
oldestSlotFlagPtr: func() *primitives.Slot {
|
||||
slot := primitives.Slot(10)
|
||||
return &slot
|
||||
}(),
|
||||
expectValidOldest: true,
|
||||
expectedBlob: minBlobEpochs,
|
||||
expectedCol: minColEpochs,
|
||||
},
|
||||
{
|
||||
name: "invalid oldestSlotFlagPtr (later than spec minimum)",
|
||||
blobRetentionFlag: 0,
|
||||
oldestSlotFlagPtr: func() *primitives.Slot {
|
||||
// Make it way past the spec minimum
|
||||
slot := currentSlot - primitives.Slot(params.BeaconConfig().MinEpochsForBlockRequests-1)*slotsPerEpoch
|
||||
return &slot
|
||||
}(),
|
||||
expectValidOldest: false,
|
||||
expectedBlob: minBlobEpochs,
|
||||
expectedCol: minColEpochs,
|
||||
invalidOldestFlag: true,
|
||||
},
|
||||
{
|
||||
name: "oldestSlotFlagPtr at boundary (exactly at spec minimum)",
|
||||
blobRetentionFlag: 0,
|
||||
oldestSlotFlagPtr: func() *primitives.Slot {
|
||||
slot := currentSlot - primitives.Slot(params.BeaconConfig().MinEpochsForBlockRequests)*slotsPerEpoch
|
||||
return &slot
|
||||
}(),
|
||||
expectValidOldest: false,
|
||||
expectedBlob: minBlobEpochs,
|
||||
expectedCol: minColEpochs,
|
||||
invalidOldestFlag: true,
|
||||
},
|
||||
{
|
||||
name: "both blob retention flag and oldest slot set",
|
||||
blobRetentionFlag: minBlobEpochs + 5,
|
||||
oldestSlotFlagPtr: func() *primitives.Slot {
|
||||
slot := primitives.Slot(100)
|
||||
return &slot
|
||||
}(),
|
||||
expectValidOldest: true,
|
||||
expectedBlob: minBlobEpochs + 5,
|
||||
expectedCol: minBlobEpochs + 5,
|
||||
},
|
||||
{
|
||||
name: "zero blob retention uses spec minimum",
|
||||
blobRetentionFlag: 0,
|
||||
expectValidOldest: false,
|
||||
expectedBlob: minBlobEpochs,
|
||||
expectedCol: minColEpochs,
|
||||
},
|
||||
{
|
||||
name: "large blob retention value",
|
||||
blobRetentionFlag: 5000,
|
||||
expectValidOldest: false,
|
||||
expectedBlob: 5000,
|
||||
expectedCol: 5000,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
result, err := NewSyncNeeds(currentFunc, tc.oldestSlotFlagPtr, tc.blobRetentionFlag)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check that current, deneb, fulu are set correctly
|
||||
require.Equal(t, currentSlot, result.current())
|
||||
|
||||
// Check retention calculations
|
||||
require.Equal(t, tc.expectedBlob, result.blobRetention)
|
||||
require.Equal(t, tc.expectedCol, result.colRetention)
|
||||
|
||||
if tc.invalidOldestFlag {
|
||||
require.IsNil(t, result.validOldestSlotPtr)
|
||||
} else {
|
||||
require.Equal(t, tc.oldestSlotFlagPtr, result.validOldestSlotPtr)
|
||||
}
|
||||
|
||||
// Check blockRetention is always spec minimum
|
||||
require.Equal(t, primitives.Epoch(params.BeaconConfig().MinEpochsForBlockRequests), result.blockRetention)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestSyncNeedsBlockSpan tests the syncNeeds.blockSpan() method.
|
||||
func TestSyncNeedsBlockSpan(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
minBlockEpochs := params.BeaconConfig().MinEpochsForBlockRequests
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
validOldest *primitives.Slot
|
||||
blockRetention primitives.Epoch
|
||||
current primitives.Slot
|
||||
expectedBegin primitives.Slot
|
||||
expectedEnd primitives.Slot
|
||||
}{
|
||||
{
|
||||
name: "with validOldestSlotPtr set",
|
||||
validOldest: func() *primitives.Slot { s := primitives.Slot(500); return &s }(),
|
||||
blockRetention: primitives.Epoch(minBlockEpochs),
|
||||
current: 10000,
|
||||
expectedBegin: 500,
|
||||
expectedEnd: 10000,
|
||||
},
|
||||
{
|
||||
name: "without validOldestSlotPtr (nil)",
|
||||
validOldest: nil,
|
||||
blockRetention: primitives.Epoch(minBlockEpochs),
|
||||
current: 10000,
|
||||
expectedBegin: syncEpochOffset(10000, primitives.Epoch(minBlockEpochs)),
|
||||
expectedEnd: 10000,
|
||||
},
|
||||
{
|
||||
name: "very low current slot",
|
||||
validOldest: nil,
|
||||
blockRetention: primitives.Epoch(minBlockEpochs),
|
||||
current: 100,
|
||||
expectedBegin: 1, // underflow protection
|
||||
expectedEnd: 100,
|
||||
},
|
||||
{
|
||||
name: "very high current slot",
|
||||
validOldest: nil,
|
||||
blockRetention: primitives.Epoch(minBlockEpochs),
|
||||
current: 1000000,
|
||||
expectedBegin: syncEpochOffset(1000000, primitives.Epoch(minBlockEpochs)),
|
||||
expectedEnd: 1000000,
|
||||
},
|
||||
{
|
||||
name: "validOldestSlotPtr at boundary value",
|
||||
validOldest: func() *primitives.Slot { s := primitives.Slot(1); return &s }(),
|
||||
blockRetention: primitives.Epoch(minBlockEpochs),
|
||||
current: 5000,
|
||||
expectedBegin: 1,
|
||||
expectedEnd: 5000,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
sn := SyncNeeds{
|
||||
validOldestSlotPtr: tc.validOldest,
|
||||
blockRetention: tc.blockRetention,
|
||||
}
|
||||
result := sn.blockSpan(tc.current)
|
||||
require.Equal(t, tc.expectedBegin, result.Begin)
|
||||
require.Equal(t, tc.expectedEnd, result.End)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestSyncNeedsCurrently tests the syncNeeds.currently() method.
|
||||
func TestSyncNeedsCurrently(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
|
||||
denebSlot := primitives.Slot(1000)
|
||||
fuluSlot := primitives.Slot(2000)
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
current primitives.Slot
|
||||
blobRetention primitives.Epoch
|
||||
colRetention primitives.Epoch
|
||||
blockRetention primitives.Epoch
|
||||
validOldest *primitives.Slot
|
||||
// Expected block span
|
||||
expectBlockBegin primitives.Slot
|
||||
expectBlockEnd primitives.Slot
|
||||
// Expected blob span
|
||||
expectBlobBegin primitives.Slot
|
||||
expectBlobEnd primitives.Slot
|
||||
// Expected column span
|
||||
expectColBegin primitives.Slot
|
||||
expectColEnd primitives.Slot
|
||||
}{
|
||||
{
|
||||
name: "pre-Deneb - only blocks needed",
|
||||
current: 500,
|
||||
blobRetention: 10,
|
||||
colRetention: 10,
|
||||
blockRetention: 5,
|
||||
validOldest: nil,
|
||||
expectBlockBegin: syncEpochOffset(500, 5),
|
||||
expectBlockEnd: 500,
|
||||
expectBlobBegin: denebSlot, // adjusted to deneb
|
||||
expectBlobEnd: fuluSlot,
|
||||
expectColBegin: fuluSlot, // adjusted to fulu
|
||||
expectColEnd: 500,
|
||||
},
|
||||
{
|
||||
name: "between Deneb and Fulu - blocks and blobs needed",
|
||||
current: 1500,
|
||||
blobRetention: 10,
|
||||
colRetention: 10,
|
||||
blockRetention: 5,
|
||||
validOldest: nil,
|
||||
expectBlockBegin: syncEpochOffset(1500, 5),
|
||||
expectBlockEnd: 1500,
|
||||
expectBlobBegin: max(syncEpochOffset(1500, 10), denebSlot),
|
||||
expectBlobEnd: fuluSlot,
|
||||
expectColBegin: fuluSlot, // adjusted to fulu
|
||||
expectColEnd: 1500,
|
||||
},
|
||||
{
|
||||
name: "post-Fulu - all resources needed",
|
||||
current: 3000,
|
||||
blobRetention: 10,
|
||||
colRetention: 10,
|
||||
blockRetention: 5,
|
||||
validOldest: nil,
|
||||
expectBlockBegin: syncEpochOffset(3000, 5),
|
||||
expectBlockEnd: 3000,
|
||||
expectBlobBegin: max(syncEpochOffset(3000, 10), denebSlot),
|
||||
expectBlobEnd: fuluSlot,
|
||||
expectColBegin: max(syncEpochOffset(3000, 10), fuluSlot),
|
||||
expectColEnd: 3000,
|
||||
},
|
||||
{
|
||||
name: "exactly at Deneb boundary",
|
||||
current: denebSlot,
|
||||
blobRetention: 10,
|
||||
colRetention: 10,
|
||||
blockRetention: 5,
|
||||
validOldest: nil,
|
||||
expectBlockBegin: syncEpochOffset(denebSlot, 5),
|
||||
expectBlockEnd: denebSlot,
|
||||
expectBlobBegin: denebSlot,
|
||||
expectBlobEnd: fuluSlot,
|
||||
expectColBegin: fuluSlot,
|
||||
expectColEnd: denebSlot,
|
||||
},
|
||||
{
|
||||
name: "exactly at Fulu boundary",
|
||||
current: fuluSlot,
|
||||
blobRetention: 10,
|
||||
colRetention: 10,
|
||||
blockRetention: 5,
|
||||
validOldest: nil,
|
||||
expectBlockBegin: syncEpochOffset(fuluSlot, 5),
|
||||
expectBlockEnd: fuluSlot,
|
||||
expectBlobBegin: max(syncEpochOffset(fuluSlot, 10), denebSlot),
|
||||
expectBlobEnd: fuluSlot,
|
||||
expectColBegin: fuluSlot,
|
||||
expectColEnd: fuluSlot,
|
||||
},
|
||||
{
|
||||
name: "small retention periods",
|
||||
current: 5000,
|
||||
blobRetention: 1,
|
||||
colRetention: 2,
|
||||
blockRetention: 1,
|
||||
validOldest: nil,
|
||||
expectBlockBegin: syncEpochOffset(5000, 1),
|
||||
expectBlockEnd: 5000,
|
||||
expectBlobBegin: max(syncEpochOffset(5000, 1), denebSlot),
|
||||
expectBlobEnd: fuluSlot,
|
||||
expectColBegin: max(syncEpochOffset(5000, 2), fuluSlot),
|
||||
expectColEnd: 5000,
|
||||
},
|
||||
{
|
||||
name: "large retention periods",
|
||||
current: 10000,
|
||||
blobRetention: 100,
|
||||
colRetention: 100,
|
||||
blockRetention: 50,
|
||||
validOldest: nil,
|
||||
expectBlockBegin: syncEpochOffset(10000, 50),
|
||||
expectBlockEnd: 10000,
|
||||
expectBlobBegin: max(syncEpochOffset(10000, 100), denebSlot),
|
||||
expectBlobEnd: fuluSlot,
|
||||
expectColBegin: max(syncEpochOffset(10000, 100), fuluSlot),
|
||||
expectColEnd: 10000,
|
||||
},
|
||||
{
|
||||
name: "with validOldestSlotPtr for blocks",
|
||||
current: 8000,
|
||||
blobRetention: 10,
|
||||
colRetention: 10,
|
||||
blockRetention: 5,
|
||||
validOldest: func() *primitives.Slot { s := primitives.Slot(100); return &s }(),
|
||||
expectBlockBegin: 100,
|
||||
expectBlockEnd: 8000,
|
||||
expectBlobBegin: max(syncEpochOffset(8000, 10), denebSlot),
|
||||
expectBlobEnd: fuluSlot,
|
||||
expectColBegin: max(syncEpochOffset(8000, 10), fuluSlot),
|
||||
expectColEnd: 8000,
|
||||
},
|
||||
{
|
||||
name: "retention approaching current slot",
|
||||
current: primitives.Slot(2000 + 5*slotsPerEpoch),
|
||||
blobRetention: 5,
|
||||
colRetention: 5,
|
||||
blockRetention: 3,
|
||||
validOldest: nil,
|
||||
expectBlockBegin: syncEpochOffset(primitives.Slot(2000+5*slotsPerEpoch), 3),
|
||||
expectBlockEnd: primitives.Slot(2000 + 5*slotsPerEpoch),
|
||||
expectBlobBegin: max(syncEpochOffset(primitives.Slot(2000+5*slotsPerEpoch), 5), denebSlot),
|
||||
expectBlobEnd: fuluSlot,
|
||||
expectColBegin: max(syncEpochOffset(primitives.Slot(2000+5*slotsPerEpoch), 5), fuluSlot),
|
||||
expectColEnd: primitives.Slot(2000 + 5*slotsPerEpoch),
|
||||
},
|
||||
{
|
||||
name: "current just after Deneb",
|
||||
current: denebSlot + 10,
|
||||
blobRetention: 10,
|
||||
colRetention: 10,
|
||||
blockRetention: 5,
|
||||
validOldest: nil,
|
||||
expectBlockBegin: syncEpochOffset(denebSlot+10, 5),
|
||||
expectBlockEnd: denebSlot + 10,
|
||||
expectBlobBegin: denebSlot,
|
||||
expectBlobEnd: fuluSlot,
|
||||
expectColBegin: fuluSlot,
|
||||
expectColEnd: denebSlot + 10,
|
||||
},
|
||||
{
|
||||
name: "current just after Fulu",
|
||||
current: fuluSlot + 10,
|
||||
blobRetention: 10,
|
||||
colRetention: 10,
|
||||
blockRetention: 5,
|
||||
validOldest: nil,
|
||||
expectBlockBegin: syncEpochOffset(fuluSlot+10, 5),
|
||||
expectBlockEnd: fuluSlot + 10,
|
||||
expectBlobBegin: max(syncEpochOffset(fuluSlot+10, 10), denebSlot),
|
||||
expectBlobEnd: fuluSlot,
|
||||
expectColBegin: fuluSlot,
|
||||
expectColEnd: fuluSlot + 10,
|
||||
},
|
||||
{
|
||||
name: "blob retention would start before Deneb",
|
||||
current: denebSlot + primitives.Slot(5*slotsPerEpoch),
|
||||
blobRetention: 100, // very large retention
|
||||
colRetention: 10,
|
||||
blockRetention: 5,
|
||||
validOldest: nil,
|
||||
expectBlockBegin: syncEpochOffset(denebSlot+primitives.Slot(5*slotsPerEpoch), 5),
|
||||
expectBlockEnd: denebSlot + primitives.Slot(5*slotsPerEpoch),
|
||||
expectBlobBegin: denebSlot, // clamped to deneb
|
||||
expectBlobEnd: fuluSlot,
|
||||
expectColBegin: fuluSlot,
|
||||
expectColEnd: denebSlot + primitives.Slot(5*slotsPerEpoch),
|
||||
},
|
||||
{
|
||||
name: "column retention would start before Fulu",
|
||||
current: fuluSlot + primitives.Slot(5*slotsPerEpoch),
|
||||
blobRetention: 10,
|
||||
colRetention: 100, // very large retention
|
||||
blockRetention: 5,
|
||||
validOldest: nil,
|
||||
expectBlockBegin: syncEpochOffset(fuluSlot+primitives.Slot(5*slotsPerEpoch), 5),
|
||||
expectBlockEnd: fuluSlot + primitives.Slot(5*slotsPerEpoch),
|
||||
expectBlobBegin: max(syncEpochOffset(fuluSlot+primitives.Slot(5*slotsPerEpoch), 10), denebSlot),
|
||||
expectBlobEnd: fuluSlot,
|
||||
expectColBegin: fuluSlot, // clamped to fulu
|
||||
expectColEnd: fuluSlot + primitives.Slot(5*slotsPerEpoch),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
sn := SyncNeeds{
|
||||
current: func() primitives.Slot { return tc.current },
|
||||
deneb: denebSlot,
|
||||
fulu: fuluSlot,
|
||||
validOldestSlotPtr: tc.validOldest,
|
||||
blockRetention: tc.blockRetention,
|
||||
blobRetention: tc.blobRetention,
|
||||
colRetention: tc.colRetention,
|
||||
}
|
||||
|
||||
result := sn.Currently()
|
||||
|
||||
// Verify block span
|
||||
require.Equal(t, tc.expectBlockBegin, result.Block.Begin,
|
||||
"block.begin mismatch")
|
||||
require.Equal(t, tc.expectBlockEnd, result.Block.End,
|
||||
"block.end mismatch")
|
||||
|
||||
// Verify blob span
|
||||
require.Equal(t, tc.expectBlobBegin, result.Blob.Begin,
|
||||
"blob.begin mismatch")
|
||||
require.Equal(t, tc.expectBlobEnd, result.Blob.End,
|
||||
"blob.end mismatch")
|
||||
|
||||
// Verify column span
|
||||
require.Equal(t, tc.expectColBegin, result.Col.Begin,
|
||||
"col.begin mismatch")
|
||||
require.Equal(t, tc.expectColEnd, result.Col.End,
|
||||
"col.end mismatch")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestCurrentNeedsIntegration verifies the complete currentNeeds workflow.
|
||||
func TestCurrentNeedsIntegration(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
|
||||
denebSlot := primitives.Slot(1000)
|
||||
fuluSlot := primitives.Slot(2000)
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
current primitives.Slot
|
||||
blobRetention primitives.Epoch
|
||||
colRetention primitives.Epoch
|
||||
testSlots []primitives.Slot
|
||||
expectBlockAt []bool
|
||||
expectBlobAt []bool
|
||||
expectColAt []bool
|
||||
}{
|
||||
{
|
||||
name: "pre-Deneb slot - only blocks",
|
||||
current: 500,
|
||||
blobRetention: 10,
|
||||
colRetention: 10,
|
||||
testSlots: []primitives.Slot{100, 250, 499, 500, 1000, 2000},
|
||||
expectBlockAt: []bool{true, true, true, false, false, false},
|
||||
expectBlobAt: []bool{false, false, false, false, true, false},
|
||||
expectColAt: []bool{false, false, false, false, false, false},
|
||||
},
|
||||
{
|
||||
name: "between Deneb and Fulu - blocks and blobs",
|
||||
current: 1500,
|
||||
blobRetention: 10,
|
||||
colRetention: 10,
|
||||
testSlots: []primitives.Slot{500, 1000, 1200, 1499, 1500, 2000},
|
||||
expectBlockAt: []bool{true, true, true, true, false, false},
|
||||
expectBlobAt: []bool{false, false, true, true, true, false},
|
||||
expectColAt: []bool{false, false, false, false, false, false},
|
||||
},
|
||||
{
|
||||
name: "post-Fulu - all resources",
|
||||
current: 3000,
|
||||
blobRetention: 10,
|
||||
colRetention: 10,
|
||||
testSlots: []primitives.Slot{1000, 1500, 2000, 2500, 2999, 3000},
|
||||
expectBlockAt: []bool{true, true, true, true, true, false},
|
||||
expectBlobAt: []bool{false, false, false, false, false, false},
|
||||
expectColAt: []bool{false, false, false, false, true, false},
|
||||
},
|
||||
{
|
||||
name: "at Deneb boundary",
|
||||
current: denebSlot,
|
||||
blobRetention: 5,
|
||||
colRetention: 5,
|
||||
testSlots: []primitives.Slot{500, 999, 1000, 1500, 2000},
|
||||
expectBlockAt: []bool{true, true, false, false, false},
|
||||
expectBlobAt: []bool{false, false, true, true, false},
|
||||
expectColAt: []bool{false, false, false, false, false},
|
||||
},
|
||||
{
|
||||
name: "at Fulu boundary",
|
||||
current: fuluSlot,
|
||||
blobRetention: 5,
|
||||
colRetention: 5,
|
||||
testSlots: []primitives.Slot{1000, 1500, 1999, 2000, 2001},
|
||||
expectBlockAt: []bool{true, true, true, false, false},
|
||||
expectBlobAt: []bool{false, false, true, false, false},
|
||||
expectColAt: []bool{false, false, false, false, false},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
sn := SyncNeeds{
|
||||
current: func() primitives.Slot { return tc.current },
|
||||
deneb: denebSlot,
|
||||
fulu: fuluSlot,
|
||||
blockRetention: 100,
|
||||
blobRetention: tc.blobRetention,
|
||||
colRetention: tc.colRetention,
|
||||
}
|
||||
|
||||
cn := sn.Currently()
|
||||
|
||||
// Verify block.end == current
|
||||
require.Equal(t, tc.current, cn.Block.End, "block.end should equal current")
|
||||
|
||||
// Verify blob.end == fulu
|
||||
require.Equal(t, fuluSlot, cn.Blob.End, "blob.end should equal fulu")
|
||||
|
||||
// Verify col.end == current
|
||||
require.Equal(t, tc.current, cn.Col.End, "col.end should equal current")
|
||||
|
||||
// Test each slot
|
||||
for i, slot := range tc.testSlots {
|
||||
require.Equal(t, tc.expectBlockAt[i], cn.Block.At(slot),
|
||||
"block.at(%d) mismatch at index %d", slot, i)
|
||||
require.Equal(t, tc.expectBlobAt[i], cn.Blob.At(slot),
|
||||
"blob.at(%d) mismatch at index %d", slot, i)
|
||||
require.Equal(t, tc.expectColAt[i], cn.Col.At(slot),
|
||||
"col.at(%d) mismatch at index %d", slot, i)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -270,7 +270,7 @@ func (dcs *DataColumnStorage) Save(dataColumnSidecars []blocks.VerifiedRODataCol
|
||||
// Check the number of columns is the one expected.
|
||||
// While implementing this, we expect the number of columns won't change.
|
||||
// If it does, we will need to create a new version of the data column sidecar file.
|
||||
if params.BeaconConfig().NumberOfColumns != mandatoryNumberOfColumns {
|
||||
if fieldparams.NumberOfColumns != mandatoryNumberOfColumns {
|
||||
return errWrongNumberOfColumns
|
||||
}
|
||||
|
||||
@@ -964,8 +964,7 @@ func (si *storageIndices) set(dataColumnIndex uint64, position uint8) error {
|
||||
|
||||
// pullChan pulls data column sidecars from the input channel until it is empty.
|
||||
func pullChan(inputRoDataColumns chan []blocks.VerifiedRODataColumn) []blocks.VerifiedRODataColumn {
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
dataColumnSidecars := make([]blocks.VerifiedRODataColumn, 0, numberOfColumns)
|
||||
dataColumnSidecars := make([]blocks.VerifiedRODataColumn, 0, fieldparams.NumberOfColumns)
|
||||
|
||||
for {
|
||||
select {
|
||||
|
||||
@@ -117,8 +117,6 @@ func (sc *dataColumnStorageSummaryCache) HighestEpoch() primitives.Epoch {
|
||||
|
||||
// set updates the cache.
|
||||
func (sc *dataColumnStorageSummaryCache) set(dataColumnsIdent DataColumnsIdent) error {
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
sc.mu.Lock()
|
||||
defer sc.mu.Unlock()
|
||||
|
||||
@@ -127,7 +125,7 @@ func (sc *dataColumnStorageSummaryCache) set(dataColumnsIdent DataColumnsIdent)
|
||||
|
||||
count := uint64(0)
|
||||
for _, index := range dataColumnsIdent.Indices {
|
||||
if index >= numberOfColumns {
|
||||
if index >= fieldparams.NumberOfColumns {
|
||||
return errDataColumnIndexOutOfBounds
|
||||
}
|
||||
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"testing"
|
||||
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
@@ -88,22 +87,6 @@ func TestWarmCache(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSaveDataColumnsSidecars(t *testing.T) {
|
||||
t.Run("wrong numbers of columns", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.NumberOfColumns = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
params.SetupTestConfigCleanup(t)
|
||||
|
||||
_, verifiedRoDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
[]util.DataColumnParam{{Index: 12}, {Index: 1_000_000}, {Index: 48}},
|
||||
)
|
||||
|
||||
_, dataColumnStorage := NewEphemeralDataColumnStorageAndFs(t)
|
||||
err := dataColumnStorage.Save(verifiedRoDataColumnSidecars)
|
||||
require.ErrorIs(t, err, errWrongNumberOfColumns)
|
||||
})
|
||||
|
||||
t.Run("one of the column index is too large", func(t *testing.T) {
|
||||
_, verifiedRoDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
|
||||
@@ -128,9 +128,9 @@ type NoHeadAccessDatabase interface {
|
||||
BackfillFinalizedIndex(ctx context.Context, blocks []blocks.ROBlock, finalizedChildRoot [32]byte) error
|
||||
|
||||
// Custody operations.
|
||||
UpdateSubscribedToAllDataSubnets(ctx context.Context, subscribed bool) (bool, error)
|
||||
UpdateCustodyInfo(ctx context.Context, earliestAvailableSlot primitives.Slot, custodyGroupCount uint64) (primitives.Slot, uint64, error)
|
||||
UpdateEarliestAvailableSlot(ctx context.Context, earliestAvailableSlot primitives.Slot) error
|
||||
UpdateSubscribedToAllDataSubnets(ctx context.Context, subscribed bool) (bool, error)
|
||||
|
||||
// P2P Metadata operations.
|
||||
SaveMetadataSeqNum(ctx context.Context, seqNum uint64) error
|
||||
|
||||
@@ -27,6 +27,9 @@ go_library(
|
||||
"p2p.go",
|
||||
"schema.go",
|
||||
"state.go",
|
||||
"state_diff.go",
|
||||
"state_diff_cache.go",
|
||||
"state_diff_helpers.go",
|
||||
"state_summary.go",
|
||||
"state_summary_cache.go",
|
||||
"utils.go",
|
||||
@@ -41,10 +44,12 @@ go_library(
|
||||
"//beacon-chain/db/iface:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/hdiff:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/light-client:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
@@ -53,6 +58,7 @@ go_library(
|
||||
"//encoding/ssz/detect:go_default_library",
|
||||
"//genesis:go_default_library",
|
||||
"//io/file:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//monitoring/progress:go_default_library",
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
@@ -98,6 +104,7 @@ go_test(
|
||||
"migration_block_slot_index_test.go",
|
||||
"migration_state_validators_test.go",
|
||||
"p2p_test.go",
|
||||
"state_diff_test.go",
|
||||
"state_summary_test.go",
|
||||
"state_test.go",
|
||||
"utils_test.go",
|
||||
@@ -111,6 +118,7 @@ go_test(
|
||||
"//beacon-chain/db/iface:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
@@ -120,6 +128,7 @@ go_test(
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//genesis:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//proto/dbval:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
@@ -133,6 +142,7 @@ go_test(
|
||||
"@com_github_golang_snappy//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@io_bazel_rules_go//go/tools/bazel:go_default_library",
|
||||
"@io_etcd_go_bbolt//:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
|
||||
@@ -146,9 +146,9 @@ func (s *Store) UpdateEarliestAvailableSlot(ctx context.Context, earliestAvailab
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateSubscribedToAllDataSubnets updates the "subscribed to all data subnets" status in the database
|
||||
// only if `subscribed` is `true`.
|
||||
// It returns the previous subscription status.
|
||||
// UpdateSubscribedToAllDataSubnets updates whether the node is subscribed to all data subnets (supernode mode).
|
||||
// This is a one-way flag - once set to true, it cannot be reverted to false.
|
||||
// Returns the previous state.
|
||||
func (s *Store) UpdateSubscribedToAllDataSubnets(ctx context.Context, subscribed bool) (bool, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.UpdateSubscribedToAllDataSubnets")
|
||||
defer span.End()
|
||||
@@ -156,13 +156,11 @@ func (s *Store) UpdateSubscribedToAllDataSubnets(ctx context.Context, subscribed
|
||||
result := false
|
||||
if !subscribed {
|
||||
if err := s.db.View(func(tx *bolt.Tx) error {
|
||||
// Retrieve the custody bucket.
|
||||
bucket := tx.Bucket(custodyBucket)
|
||||
if bucket == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Retrieve the subscribe all data subnets flag.
|
||||
bytes := bucket.Get(subscribeAllDataSubnetsKey)
|
||||
if len(bytes) == 0 {
|
||||
return nil
|
||||
@@ -181,7 +179,6 @@ func (s *Store) UpdateSubscribedToAllDataSubnets(ctx context.Context, subscribed
|
||||
}
|
||||
|
||||
if err := s.db.Update(func(tx *bolt.Tx) error {
|
||||
// Retrieve the custody bucket.
|
||||
bucket, err := tx.CreateBucketIfNotExists(custodyBucket)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "create custody bucket")
|
||||
|
||||
@@ -67,6 +67,7 @@ func getSubscriptionStatusFromDB(t *testing.T, db *Store) bool {
|
||||
return subscribed
|
||||
}
|
||||
|
||||
|
||||
func TestUpdateCustodyInfo(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
@@ -274,6 +275,17 @@ func TestUpdateSubscribedToAllDataSubnets(t *testing.T) {
|
||||
require.Equal(t, false, stored)
|
||||
})
|
||||
|
||||
t.Run("initial update with empty database - set to true", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
prev, err := db.UpdateSubscribedToAllDataSubnets(ctx, true)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, prev)
|
||||
|
||||
stored := getSubscriptionStatusFromDB(t, db)
|
||||
require.Equal(t, true, stored)
|
||||
})
|
||||
|
||||
t.Run("attempt to update from true to false (should not change)", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
@@ -288,7 +300,7 @@ func TestUpdateSubscribedToAllDataSubnets(t *testing.T) {
|
||||
require.Equal(t, true, stored)
|
||||
})
|
||||
|
||||
t.Run("attempt to update from true to false (should not change)", func(t *testing.T) {
|
||||
t.Run("update from true to true (no change)", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
_, err := db.UpdateSubscribedToAllDataSubnets(ctx, true)
|
||||
|
||||
@@ -2,6 +2,13 @@ package kv
|
||||
|
||||
import "bytes"
|
||||
|
||||
func hasPhase0Key(enc []byte) bool {
|
||||
if len(phase0Key) >= len(enc) {
|
||||
return false
|
||||
}
|
||||
return bytes.Equal(enc[:len(phase0Key)], phase0Key)
|
||||
}
|
||||
|
||||
// In order for an encoding to be Altair compatible, it must be prefixed with altair key.
|
||||
func hasAltairKey(enc []byte) bool {
|
||||
if len(altairKey) >= len(enc) {
|
||||
|
||||
@@ -91,6 +91,7 @@ type Store struct {
|
||||
blockCache *ristretto.Cache[string, interfaces.ReadOnlySignedBeaconBlock]
|
||||
validatorEntryCache *ristretto.Cache[[]byte, *ethpb.Validator]
|
||||
stateSummaryCache *stateSummaryCache
|
||||
stateDiffCache *stateDiffCache
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
@@ -112,6 +113,7 @@ var Buckets = [][]byte{
|
||||
lightClientUpdatesBucket,
|
||||
lightClientBootstrapBucket,
|
||||
lightClientSyncCommitteeBucket,
|
||||
stateDiffBucket,
|
||||
// Indices buckets.
|
||||
blockSlotIndicesBucket,
|
||||
stateSlotIndicesBucket,
|
||||
@@ -201,6 +203,14 @@ func NewKVStore(ctx context.Context, dirPath string, opts ...KVStoreOption) (*St
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if features.Get().EnableStateDiff {
|
||||
sdCache, err := newStateDiffCache(kv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
kv.stateDiffCache = sdCache
|
||||
}
|
||||
|
||||
return kv, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -216,6 +216,10 @@ func TestStore_LightClientUpdate_CanSaveRetrieve(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := t.Context()
|
||||
for _, testVersion := range version.All()[1:] {
|
||||
if testVersion == version.Gloas {
|
||||
// TODO(16027): Unskip light client tests for Gloas
|
||||
continue
|
||||
}
|
||||
t.Run(version.String(testVersion), func(t *testing.T) {
|
||||
update, err := createUpdate(t, testVersion)
|
||||
require.NoError(t, err)
|
||||
@@ -572,6 +576,10 @@ func TestStore_LightClientBootstrap_CanSaveRetrieve(t *testing.T) {
|
||||
require.IsNil(t, retrievedBootstrap)
|
||||
})
|
||||
for _, testVersion := range version.All()[1:] {
|
||||
if testVersion == version.Gloas {
|
||||
// TODO(16027): Unskip light client tests for Gloas
|
||||
continue
|
||||
}
|
||||
t.Run(version.String(testVersion), func(t *testing.T) {
|
||||
bootstrap, err := createDefaultLightClientBootstrap(primitives.Slot(uint64(params.BeaconConfig().VersionToForkEpochMap()[testVersion]) * uint64(params.BeaconConfig().SlotsPerEpoch)))
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -16,6 +16,7 @@ var (
|
||||
stateValidatorsBucket = []byte("state-validators")
|
||||
feeRecipientBucket = []byte("fee-recipient")
|
||||
registrationBucket = []byte("registration")
|
||||
stateDiffBucket = []byte("state-diff")
|
||||
|
||||
// Light Client Updates Bucket
|
||||
lightClientUpdatesBucket = []byte("light-client-updates")
|
||||
@@ -46,6 +47,7 @@ var (
|
||||
|
||||
// Below keys are used to identify objects are to be fork compatible.
|
||||
// Objects that are only compatible with specific forks should be prefixed with such keys.
|
||||
phase0Key = []byte("phase0")
|
||||
altairKey = []byte("altair")
|
||||
bellatrixKey = []byte("merge")
|
||||
bellatrixBlindKey = []byte("blind-bellatrix")
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
statenative "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v7/genesis"
|
||||
@@ -28,6 +27,17 @@ func (s *Store) State(ctx context.Context, blockRoot [32]byte) (state.BeaconStat
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.State")
|
||||
defer span.End()
|
||||
startTime := time.Now()
|
||||
|
||||
// If state diff is enabled, we get the state from the state-diff db.
|
||||
if features.Get().EnableStateDiff {
|
||||
st, err := s.getStateUsingStateDiff(ctx, blockRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stateReadingTime.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
return st, nil
|
||||
}
|
||||
|
||||
enc, err := s.stateBytes(ctx, blockRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -417,6 +427,16 @@ func (s *Store) storeValidatorEntriesSeparately(ctx context.Context, tx *bolt.Tx
|
||||
func (s *Store) HasState(ctx context.Context, blockRoot [32]byte) bool {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.HasState")
|
||||
defer span.End()
|
||||
|
||||
if features.Get().EnableStateDiff {
|
||||
hasState, err := s.hasStateUsingStateDiff(ctx, blockRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error(fmt.Sprintf("error checking state existence using state-diff"))
|
||||
return false
|
||||
}
|
||||
return hasState
|
||||
}
|
||||
|
||||
hasState := false
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(stateBucket)
|
||||
@@ -470,7 +490,7 @@ func (s *Store) DeleteState(ctx context.Context, blockRoot [32]byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
slot, err := s.slotByBlockRoot(ctx, tx, blockRoot[:])
|
||||
slot, err := s.SlotByBlockRoot(ctx, blockRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -812,50 +832,45 @@ func (s *Store) stateBytes(ctx context.Context, blockRoot [32]byte) ([]byte, err
|
||||
return dst, err
|
||||
}
|
||||
|
||||
// slotByBlockRoot retrieves the corresponding slot of the input block root.
|
||||
func (s *Store) slotByBlockRoot(ctx context.Context, tx *bolt.Tx, blockRoot []byte) (primitives.Slot, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.slotByBlockRoot")
|
||||
// SlotByBlockRoot returns the slot of the input block root, based on state summary, block, or state.
|
||||
// Check for state is only done if state diff feature is not enabled.
|
||||
func (s *Store) SlotByBlockRoot(ctx context.Context, blockRoot [32]byte) (primitives.Slot, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SlotByBlockRoot")
|
||||
defer span.End()
|
||||
|
||||
bkt := tx.Bucket(stateSummaryBucket)
|
||||
enc := bkt.Get(blockRoot)
|
||||
|
||||
if enc == nil {
|
||||
// Fall back to check the block.
|
||||
bkt := tx.Bucket(blocksBucket)
|
||||
enc := bkt.Get(blockRoot)
|
||||
|
||||
if enc == nil {
|
||||
// Fallback and check the state.
|
||||
bkt = tx.Bucket(stateBucket)
|
||||
enc = bkt.Get(blockRoot)
|
||||
if enc == nil {
|
||||
return 0, errors.New("state enc can't be nil")
|
||||
}
|
||||
// no need to construct the validator entries as it is not used here.
|
||||
s, err := s.unmarshalState(ctx, enc, nil)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "could not unmarshal state")
|
||||
}
|
||||
if s == nil || s.IsNil() {
|
||||
return 0, errors.New("state can't be nil")
|
||||
}
|
||||
return s.Slot(), nil
|
||||
}
|
||||
b, err := unmarshalBlock(ctx, enc)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "could not unmarshal block")
|
||||
}
|
||||
if err := blocks.BeaconBlockIsNil(b); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return b.Block().Slot(), nil
|
||||
// check state summary first
|
||||
stateSummary, err := s.StateSummary(ctx, blockRoot)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
stateSummary := ðpb.StateSummary{}
|
||||
if err := decode(ctx, enc, stateSummary); err != nil {
|
||||
return 0, errors.Wrap(err, "could not unmarshal state summary")
|
||||
if stateSummary != nil {
|
||||
return stateSummary.Slot, nil
|
||||
}
|
||||
return stateSummary.Slot, nil
|
||||
|
||||
// fall back to block if state summary is not found
|
||||
blk, err := s.Block(ctx, blockRoot)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if blk != nil && !blk.IsNil() {
|
||||
return blk.Block().Slot(), nil
|
||||
}
|
||||
|
||||
// fall back to state, only if state diff feature is not enabled
|
||||
if features.Get().EnableStateDiff {
|
||||
return 0, errors.New("neither state summary nor block found")
|
||||
}
|
||||
|
||||
st, err := s.State(ctx, blockRoot)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if st != nil && !st.IsNil() {
|
||||
return st.Slot(), nil
|
||||
}
|
||||
|
||||
// neither state summary, block nor state found
|
||||
return 0, errors.New("neither state summary, block nor state found")
|
||||
}
|
||||
|
||||
// HighestSlotStatesBelow returns the states with the highest slot below the input slot
|
||||
@@ -1031,3 +1046,30 @@ func (s *Store) isStateValidatorMigrationOver() (bool, error) {
|
||||
}
|
||||
return returnFlag, nil
|
||||
}
|
||||
|
||||
func (s *Store) getStateUsingStateDiff(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error) {
|
||||
slot, err := s.SlotByBlockRoot(ctx, blockRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
st, err := s.stateByDiff(ctx, slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if st == nil || st.IsNil() {
|
||||
return nil, errors.New("state not found")
|
||||
}
|
||||
|
||||
return st, nil
|
||||
}
|
||||
|
||||
func (s *Store) hasStateUsingStateDiff(ctx context.Context, blockRoot [32]byte) (bool, error) {
|
||||
slot, err := s.SlotByBlockRoot(ctx, blockRoot)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
stateLvl := computeLevel(s.getOffset(), slot)
|
||||
return stateLvl != -1, nil
|
||||
}
|
||||
|
||||
232
beacon-chain/db/kv/state_diff.go
Normal file
232
beacon-chain/db/kv/state_diff.go
Normal file
@@ -0,0 +1,232 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/hdiff"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
"github.com/pkg/errors"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
const (
|
||||
stateSuffix = "_s"
|
||||
validatorSuffix = "_v"
|
||||
balancesSuffix = "_b"
|
||||
)
|
||||
|
||||
/*
|
||||
We use a level-based approach to save state diffs. Each level corresponds to an exponent of 2 (exponents[lvl]).
|
||||
The data at level 0 is saved every 2**exponent[0] slots and always contains a full state snapshot that is used as a base for the delta saved at other levels.
|
||||
*/
|
||||
|
||||
// saveStateByDiff takes a state and decides between saving a full state snapshot or a diff.
|
||||
func (s *Store) saveStateByDiff(ctx context.Context, st state.ReadOnlyBeaconState) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.saveStateByDiff")
|
||||
defer span.End()
|
||||
|
||||
if st == nil {
|
||||
return errors.New("state is nil")
|
||||
}
|
||||
|
||||
slot := st.Slot()
|
||||
offset := s.getOffset()
|
||||
if uint64(slot) < offset {
|
||||
return ErrSlotBeforeOffset
|
||||
}
|
||||
|
||||
// Find the level to save the state.
|
||||
lvl := computeLevel(offset, slot)
|
||||
if lvl == -1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Save full state if level is 0.
|
||||
if lvl == 0 {
|
||||
return s.saveFullSnapshot(st)
|
||||
}
|
||||
|
||||
// Get anchor state to compute the diff from.
|
||||
anchorState, err := s.getAnchorState(offset, lvl, slot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return s.saveHdiff(lvl, anchorState, st)
|
||||
}
|
||||
|
||||
// stateByDiff retrieves the full state for a given slot.
|
||||
func (s *Store) stateByDiff(ctx context.Context, slot primitives.Slot) (state.BeaconState, error) {
|
||||
offset := s.getOffset()
|
||||
if uint64(slot) < offset {
|
||||
return nil, ErrSlotBeforeOffset
|
||||
}
|
||||
|
||||
snapshot, diffChain, err := s.getBaseAndDiffChain(offset, slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, diff := range diffChain {
|
||||
if err := ctx.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
snapshot, err = hdiff.ApplyDiff(ctx, snapshot, diff)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return snapshot, nil
|
||||
}
|
||||
|
||||
// saveHdiff computes the diff between the anchor state and the current state and saves it to the database.
|
||||
// This function needs to be called only with the latest finalized state, and in a strictly increasing slot order.
|
||||
func (s *Store) saveHdiff(lvl int, anchor, st state.ReadOnlyBeaconState) error {
|
||||
slot := uint64(st.Slot())
|
||||
key := makeKeyForStateDiffTree(lvl, slot)
|
||||
|
||||
diff, err := hdiff.Diff(anchor, st)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(stateDiffBucket)
|
||||
if bucket == nil {
|
||||
return bolt.ErrBucketNotFound
|
||||
}
|
||||
buf := append(key, stateSuffix...)
|
||||
if err := bucket.Put(buf, diff.StateDiff); err != nil {
|
||||
return err
|
||||
}
|
||||
buf = append(key, validatorSuffix...)
|
||||
if err := bucket.Put(buf, diff.ValidatorDiffs); err != nil {
|
||||
return err
|
||||
}
|
||||
buf = append(key, balancesSuffix...)
|
||||
if err := bucket.Put(buf, diff.BalancesDiff); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Save the full state to the cache (if not the last level).
|
||||
if lvl != len(flags.Get().StateDiffExponents)-1 {
|
||||
err = s.stateDiffCache.setAnchor(lvl, st)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SaveFullSnapshot saves the full level 0 state snapshot to the database.
|
||||
func (s *Store) saveFullSnapshot(st state.ReadOnlyBeaconState) error {
|
||||
slot := uint64(st.Slot())
|
||||
key := makeKeyForStateDiffTree(0, slot)
|
||||
stateBytes, err := st.MarshalSSZ()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// add version key to value
|
||||
enc, err := addKey(st.Version(), stateBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(stateDiffBucket)
|
||||
if bucket == nil {
|
||||
return bolt.ErrBucketNotFound
|
||||
}
|
||||
|
||||
if err := bucket.Put(key, enc); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Save the full state to the cache, and invalidate other levels.
|
||||
s.stateDiffCache.clearAnchors()
|
||||
err = s.stateDiffCache.setAnchor(0, st)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Store) getDiff(lvl int, slot uint64) (hdiff.HdiffBytes, error) {
|
||||
key := makeKeyForStateDiffTree(lvl, slot)
|
||||
var stateDiff []byte
|
||||
var validatorDiff []byte
|
||||
var balancesDiff []byte
|
||||
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(stateDiffBucket)
|
||||
if bucket == nil {
|
||||
return bolt.ErrBucketNotFound
|
||||
}
|
||||
buf := append(key, stateSuffix...)
|
||||
stateDiff = bucket.Get(buf)
|
||||
if stateDiff == nil {
|
||||
return errors.New("state diff not found")
|
||||
}
|
||||
buf = append(key, validatorSuffix...)
|
||||
validatorDiff = bucket.Get(buf)
|
||||
if validatorDiff == nil {
|
||||
return errors.New("validator diff not found")
|
||||
}
|
||||
buf = append(key, balancesSuffix...)
|
||||
balancesDiff = bucket.Get(buf)
|
||||
if balancesDiff == nil {
|
||||
return errors.New("balances diff not found")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return hdiff.HdiffBytes{}, err
|
||||
}
|
||||
|
||||
return hdiff.HdiffBytes{
|
||||
StateDiff: stateDiff,
|
||||
ValidatorDiffs: validatorDiff,
|
||||
BalancesDiff: balancesDiff,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Store) getFullSnapshot(slot uint64) (state.BeaconState, error) {
|
||||
key := makeKeyForStateDiffTree(0, slot)
|
||||
var enc []byte
|
||||
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(stateDiffBucket)
|
||||
if bucket == nil {
|
||||
return bolt.ErrBucketNotFound
|
||||
}
|
||||
enc = bucket.Get(key)
|
||||
if enc == nil {
|
||||
return errors.New("state not found")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return decodeStateSnapshot(enc)
|
||||
}
|
||||
77
beacon-chain/db/kv/state_diff_cache.go
Normal file
77
beacon-chain/db/kv/state_diff_cache.go
Normal file
@@ -0,0 +1,77 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"sync"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
|
||||
"go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
type stateDiffCache struct {
|
||||
sync.RWMutex
|
||||
anchors []state.ReadOnlyBeaconState
|
||||
offset uint64
|
||||
}
|
||||
|
||||
func newStateDiffCache(s *Store) (*stateDiffCache, error) {
|
||||
var offset uint64
|
||||
|
||||
err := s.db.View(func(tx *bbolt.Tx) error {
|
||||
bucket := tx.Bucket(stateDiffBucket)
|
||||
if bucket == nil {
|
||||
return bbolt.ErrBucketNotFound
|
||||
}
|
||||
|
||||
offsetBytes := bucket.Get([]byte("offset"))
|
||||
if offsetBytes == nil {
|
||||
return errors.New("state diff cache: offset not found")
|
||||
}
|
||||
offset = binary.LittleEndian.Uint64(offsetBytes)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &stateDiffCache{
|
||||
anchors: make([]state.ReadOnlyBeaconState, len(flags.Get().StateDiffExponents)-1), // -1 because last level doesn't need to be cached
|
||||
offset: offset,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *stateDiffCache) getAnchor(level int) state.ReadOnlyBeaconState {
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
return c.anchors[level]
|
||||
}
|
||||
|
||||
func (c *stateDiffCache) setAnchor(level int, anchor state.ReadOnlyBeaconState) error {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
if level >= len(c.anchors) || level < 0 {
|
||||
return errors.New("state diff cache: anchor level out of range")
|
||||
}
|
||||
c.anchors[level] = anchor
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *stateDiffCache) getOffset() uint64 {
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
return c.offset
|
||||
}
|
||||
|
||||
func (c *stateDiffCache) setOffset(offset uint64) {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
c.offset = offset
|
||||
}
|
||||
|
||||
func (c *stateDiffCache) clearAnchors() {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
c.anchors = make([]state.ReadOnlyBeaconState, len(flags.Get().StateDiffExponents)-1) // -1 because last level doesn't need to be cached
|
||||
}
|
||||
250
beacon-chain/db/kv/state_diff_helpers.go
Normal file
250
beacon-chain/db/kv/state_diff_helpers.go
Normal file
@@ -0,0 +1,250 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
statenative "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/hdiff"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/math"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
var (
|
||||
offsetKey = []byte("offset")
|
||||
ErrSlotBeforeOffset = errors.New("slot is before root offset")
|
||||
)
|
||||
|
||||
func makeKeyForStateDiffTree(level int, slot uint64) []byte {
|
||||
buf := make([]byte, 16)
|
||||
buf[0] = byte(level)
|
||||
binary.LittleEndian.PutUint64(buf[1:], slot)
|
||||
return buf
|
||||
}
|
||||
|
||||
func (s *Store) getAnchorState(offset uint64, lvl int, slot primitives.Slot) (anchor state.ReadOnlyBeaconState, err error) {
|
||||
if lvl <= 0 || lvl > len(flags.Get().StateDiffExponents) {
|
||||
return nil, errors.New("invalid value for level")
|
||||
}
|
||||
|
||||
if uint64(slot) < offset {
|
||||
return nil, ErrSlotBeforeOffset
|
||||
}
|
||||
relSlot := uint64(slot) - offset
|
||||
prevExp := flags.Get().StateDiffExponents[lvl-1]
|
||||
if prevExp < 2 || prevExp >= 64 {
|
||||
return nil, fmt.Errorf("state diff exponent %d out of range for uint64", prevExp)
|
||||
}
|
||||
span := math.PowerOf2(uint64(prevExp))
|
||||
anchorSlot := primitives.Slot(uint64(slot) - relSlot%span)
|
||||
|
||||
// anchorLvl can be [0, lvl-1]
|
||||
anchorLvl := computeLevel(offset, anchorSlot)
|
||||
if anchorLvl == -1 {
|
||||
return nil, errors.New("could not compute anchor level")
|
||||
}
|
||||
|
||||
// Check if we have the anchor in cache.
|
||||
anchor = s.stateDiffCache.getAnchor(anchorLvl)
|
||||
if anchor != nil {
|
||||
return anchor, nil
|
||||
}
|
||||
|
||||
// If not, load it from the database.
|
||||
anchor, err = s.stateByDiff(context.Background(), anchorSlot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Save it in the cache.
|
||||
err = s.stateDiffCache.setAnchor(anchorLvl, anchor)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return anchor, nil
|
||||
}
|
||||
|
||||
// computeLevel computes the level in the diff tree. Returns -1 in case slot should not be in tree.
|
||||
func computeLevel(offset uint64, slot primitives.Slot) int {
|
||||
rel := uint64(slot) - offset
|
||||
for i, exp := range flags.Get().StateDiffExponents {
|
||||
if exp < 2 || exp >= 64 {
|
||||
return -1
|
||||
}
|
||||
span := math.PowerOf2(uint64(exp))
|
||||
if rel%span == 0 {
|
||||
return i
|
||||
}
|
||||
}
|
||||
// If rel isn’t on any of the boundaries, we should ignore saving it.
|
||||
return -1
|
||||
}
|
||||
|
||||
func (s *Store) setOffset(slot primitives.Slot) error {
|
||||
err := s.db.Update(func(tx *bbolt.Tx) error {
|
||||
bucket := tx.Bucket(stateDiffBucket)
|
||||
if bucket == nil {
|
||||
return bbolt.ErrBucketNotFound
|
||||
}
|
||||
|
||||
offsetBytes := bucket.Get(offsetKey)
|
||||
if offsetBytes != nil {
|
||||
return fmt.Errorf("offset already set to %d", binary.LittleEndian.Uint64(offsetBytes))
|
||||
}
|
||||
|
||||
offsetBytes = make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(offsetBytes, uint64(slot))
|
||||
if err := bucket.Put(offsetKey, offsetBytes); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Save the offset in the cache.
|
||||
s.stateDiffCache.setOffset(uint64(slot))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Store) getOffset() uint64 {
|
||||
return s.stateDiffCache.getOffset()
|
||||
}
|
||||
|
||||
func keyForSnapshot(v int) ([]byte, error) {
|
||||
switch v {
|
||||
case version.Fulu:
|
||||
return fuluKey, nil
|
||||
case version.Electra:
|
||||
return ElectraKey, nil
|
||||
case version.Deneb:
|
||||
return denebKey, nil
|
||||
case version.Capella:
|
||||
return capellaKey, nil
|
||||
case version.Bellatrix:
|
||||
return bellatrixKey, nil
|
||||
case version.Altair:
|
||||
return altairKey, nil
|
||||
case version.Phase0:
|
||||
return phase0Key, nil
|
||||
default:
|
||||
return nil, errors.New("unsupported fork")
|
||||
}
|
||||
}
|
||||
|
||||
func addKey(v int, bytes []byte) ([]byte, error) {
|
||||
key, err := keyForSnapshot(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
enc := make([]byte, len(key)+len(bytes))
|
||||
copy(enc, key)
|
||||
copy(enc[len(key):], bytes)
|
||||
return enc, nil
|
||||
}
|
||||
|
||||
func decodeStateSnapshot(enc []byte) (state.BeaconState, error) {
|
||||
switch {
|
||||
case hasFuluKey(enc):
|
||||
var fuluState ethpb.BeaconStateFulu
|
||||
if err := fuluState.UnmarshalSSZ(enc[len(fuluKey):]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return statenative.InitializeFromProtoUnsafeFulu(&fuluState)
|
||||
case HasElectraKey(enc):
|
||||
var electraState ethpb.BeaconStateElectra
|
||||
if err := electraState.UnmarshalSSZ(enc[len(ElectraKey):]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return statenative.InitializeFromProtoUnsafeElectra(&electraState)
|
||||
case hasDenebKey(enc):
|
||||
var denebState ethpb.BeaconStateDeneb
|
||||
if err := denebState.UnmarshalSSZ(enc[len(denebKey):]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return statenative.InitializeFromProtoUnsafeDeneb(&denebState)
|
||||
case hasCapellaKey(enc):
|
||||
var capellaState ethpb.BeaconStateCapella
|
||||
if err := capellaState.UnmarshalSSZ(enc[len(capellaKey):]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return statenative.InitializeFromProtoUnsafeCapella(&capellaState)
|
||||
case hasBellatrixKey(enc):
|
||||
var bellatrixState ethpb.BeaconStateBellatrix
|
||||
if err := bellatrixState.UnmarshalSSZ(enc[len(bellatrixKey):]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return statenative.InitializeFromProtoUnsafeBellatrix(&bellatrixState)
|
||||
case hasAltairKey(enc):
|
||||
var altairState ethpb.BeaconStateAltair
|
||||
if err := altairState.UnmarshalSSZ(enc[len(altairKey):]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return statenative.InitializeFromProtoUnsafeAltair(&altairState)
|
||||
case hasPhase0Key(enc):
|
||||
var phase0State ethpb.BeaconState
|
||||
if err := phase0State.UnmarshalSSZ(enc[len(phase0Key):]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return statenative.InitializeFromProtoUnsafePhase0(&phase0State)
|
||||
default:
|
||||
return nil, errors.New("unsupported fork")
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Store) getBaseAndDiffChain(offset uint64, slot primitives.Slot) (state.BeaconState, []hdiff.HdiffBytes, error) {
|
||||
if uint64(slot) < offset {
|
||||
return nil, nil, ErrSlotBeforeOffset
|
||||
}
|
||||
rel := uint64(slot) - offset
|
||||
lvl := computeLevel(offset, slot)
|
||||
if lvl == -1 {
|
||||
return nil, nil, errors.New("slot not in tree")
|
||||
}
|
||||
|
||||
exponents := flags.Get().StateDiffExponents
|
||||
|
||||
baseSpan := math.PowerOf2(uint64(exponents[0]))
|
||||
baseAnchorSlot := uint64(slot) - rel%baseSpan
|
||||
|
||||
type diffItem struct {
|
||||
level int
|
||||
slot uint64
|
||||
}
|
||||
|
||||
var diffChainItems []diffItem
|
||||
lastSeenAnchorSlot := baseAnchorSlot
|
||||
for i, exp := range exponents[1 : lvl+1] {
|
||||
span := math.PowerOf2(uint64(exp))
|
||||
diffSlot := rel / span * span
|
||||
if diffSlot == lastSeenAnchorSlot {
|
||||
continue
|
||||
}
|
||||
diffChainItems = append(diffChainItems, diffItem{level: i + 1, slot: diffSlot + offset})
|
||||
lastSeenAnchorSlot = diffSlot
|
||||
}
|
||||
|
||||
baseSnapshot, err := s.getFullSnapshot(baseAnchorSlot)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
diffChain := make([]hdiff.HdiffBytes, 0, len(diffChainItems))
|
||||
for _, item := range diffChainItems {
|
||||
diff, err := s.getDiff(item.level, item.slot)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
diffChain = append(diffChain, diff)
|
||||
}
|
||||
|
||||
return baseSnapshot, diffChain, nil
|
||||
}
|
||||
662
beacon-chain/db/kv/state_diff_test.go
Normal file
662
beacon-chain/db/kv/state_diff_test.go
Normal file
@@ -0,0 +1,662 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/math"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
"go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
func TestStateDiff_LoadOrInitOffset(t *testing.T) {
|
||||
setDefaultStateDiffExponents()
|
||||
|
||||
db := setupDB(t)
|
||||
err := setOffsetInDB(db, 10)
|
||||
require.NoError(t, err)
|
||||
offset := db.getOffset()
|
||||
require.Equal(t, uint64(10), offset)
|
||||
|
||||
err = db.setOffset(10)
|
||||
require.ErrorContains(t, "offset already set", err)
|
||||
offset = db.getOffset()
|
||||
require.Equal(t, uint64(10), offset)
|
||||
}
|
||||
|
||||
func TestStateDiff_ComputeLevel(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
setDefaultStateDiffExponents()
|
||||
|
||||
err := setOffsetInDB(db, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
offset := db.getOffset()
|
||||
|
||||
// 2 ** 21
|
||||
lvl := computeLevel(offset, primitives.Slot(math.PowerOf2(21)))
|
||||
require.Equal(t, 0, lvl)
|
||||
|
||||
// 2 ** 21 * 3
|
||||
lvl = computeLevel(offset, primitives.Slot(math.PowerOf2(21)*3))
|
||||
require.Equal(t, 0, lvl)
|
||||
|
||||
// 2 ** 18
|
||||
lvl = computeLevel(offset, primitives.Slot(math.PowerOf2(18)))
|
||||
require.Equal(t, 1, lvl)
|
||||
|
||||
// 2 ** 18 * 3
|
||||
lvl = computeLevel(offset, primitives.Slot(math.PowerOf2(18)*3))
|
||||
require.Equal(t, 1, lvl)
|
||||
|
||||
// 2 ** 16
|
||||
lvl = computeLevel(offset, primitives.Slot(math.PowerOf2(16)))
|
||||
require.Equal(t, 2, lvl)
|
||||
|
||||
// 2 ** 16 * 3
|
||||
lvl = computeLevel(offset, primitives.Slot(math.PowerOf2(16)*3))
|
||||
require.Equal(t, 2, lvl)
|
||||
|
||||
// 2 ** 13
|
||||
lvl = computeLevel(offset, primitives.Slot(math.PowerOf2(13)))
|
||||
require.Equal(t, 3, lvl)
|
||||
|
||||
// 2 ** 13 * 3
|
||||
lvl = computeLevel(offset, primitives.Slot(math.PowerOf2(13)*3))
|
||||
require.Equal(t, 3, lvl)
|
||||
|
||||
// 2 ** 11
|
||||
lvl = computeLevel(offset, primitives.Slot(math.PowerOf2(11)))
|
||||
require.Equal(t, 4, lvl)
|
||||
|
||||
// 2 ** 11 * 3
|
||||
lvl = computeLevel(offset, primitives.Slot(math.PowerOf2(11)*3))
|
||||
require.Equal(t, 4, lvl)
|
||||
|
||||
// 2 ** 9
|
||||
lvl = computeLevel(offset, primitives.Slot(math.PowerOf2(9)))
|
||||
require.Equal(t, 5, lvl)
|
||||
|
||||
// 2 ** 9 * 3
|
||||
lvl = computeLevel(offset, primitives.Slot(math.PowerOf2(9)*3))
|
||||
require.Equal(t, 5, lvl)
|
||||
|
||||
// 2 ** 5
|
||||
lvl = computeLevel(offset, primitives.Slot(math.PowerOf2(5)))
|
||||
require.Equal(t, 6, lvl)
|
||||
|
||||
// 2 ** 5 * 3
|
||||
lvl = computeLevel(offset, primitives.Slot(math.PowerOf2(5)*3))
|
||||
require.Equal(t, 6, lvl)
|
||||
|
||||
// 2 ** 7
|
||||
lvl = computeLevel(offset, primitives.Slot(math.PowerOf2(7)))
|
||||
require.Equal(t, 6, lvl)
|
||||
|
||||
// 2 ** 5 + 1
|
||||
lvl = computeLevel(offset, primitives.Slot(math.PowerOf2(5)+1))
|
||||
require.Equal(t, -1, lvl)
|
||||
|
||||
// 2 ** 5 + 16
|
||||
lvl = computeLevel(offset, primitives.Slot(math.PowerOf2(5)+16))
|
||||
require.Equal(t, -1, lvl)
|
||||
|
||||
// 2 ** 5 + 32
|
||||
lvl = computeLevel(offset, primitives.Slot(math.PowerOf2(5)+32))
|
||||
require.Equal(t, 6, lvl)
|
||||
|
||||
}
|
||||
|
||||
func TestStateDiff_SaveFullSnapshot(t *testing.T) {
|
||||
setDefaultStateDiffExponents()
|
||||
|
||||
for v := range version.All() {
|
||||
t.Run(version.String(v), func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
// Create state with slot 0
|
||||
st, enc := createState(t, 0, v)
|
||||
|
||||
err := setOffsetInDB(db, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = db.saveStateByDiff(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = db.db.View(func(tx *bbolt.Tx) error {
|
||||
bucket := tx.Bucket(stateDiffBucket)
|
||||
if bucket == nil {
|
||||
return bbolt.ErrBucketNotFound
|
||||
}
|
||||
s := bucket.Get(makeKeyForStateDiffTree(0, uint64(0)))
|
||||
if s == nil {
|
||||
return bbolt.ErrIncompatibleValue
|
||||
}
|
||||
require.DeepSSZEqual(t, enc, s)
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateDiff_SaveAndReadFullSnapshot(t *testing.T) {
|
||||
setDefaultStateDiffExponents()
|
||||
|
||||
for v := range version.All() {
|
||||
t.Run(version.String(v), func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
st, _ := createState(t, 0, v)
|
||||
|
||||
err := setOffsetInDB(db, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = db.saveStateByDiff(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
readSt, err := db.stateByDiff(context.Background(), 0)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, readSt)
|
||||
|
||||
stSSZ, err := st.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
readStSSZ, err := readSt.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, stSSZ, readStSSZ)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateDiff_SaveDiff(t *testing.T) {
|
||||
setDefaultStateDiffExponents()
|
||||
|
||||
for v := range version.All() {
|
||||
t.Run(version.String(v), func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
// Create state with slot 2**21
|
||||
slot := primitives.Slot(math.PowerOf2(21))
|
||||
st, enc := createState(t, slot, v)
|
||||
|
||||
err := setOffsetInDB(db, uint64(slot))
|
||||
require.NoError(t, err)
|
||||
|
||||
err = db.saveStateByDiff(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = db.db.View(func(tx *bbolt.Tx) error {
|
||||
bucket := tx.Bucket(stateDiffBucket)
|
||||
if bucket == nil {
|
||||
return bbolt.ErrBucketNotFound
|
||||
}
|
||||
s := bucket.Get(makeKeyForStateDiffTree(0, uint64(slot)))
|
||||
if s == nil {
|
||||
return bbolt.ErrIncompatibleValue
|
||||
}
|
||||
require.DeepSSZEqual(t, enc, s)
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// create state with slot 2**18 (+2**21)
|
||||
slot = primitives.Slot(math.PowerOf2(18) + math.PowerOf2(21))
|
||||
st, _ = createState(t, slot, v)
|
||||
|
||||
err = db.saveStateByDiff(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
key := makeKeyForStateDiffTree(1, uint64(slot))
|
||||
err = db.db.View(func(tx *bbolt.Tx) error {
|
||||
bucket := tx.Bucket(stateDiffBucket)
|
||||
if bucket == nil {
|
||||
return bbolt.ErrBucketNotFound
|
||||
}
|
||||
buf := append(key, "_s"...)
|
||||
s := bucket.Get(buf)
|
||||
if s == nil {
|
||||
return bbolt.ErrIncompatibleValue
|
||||
}
|
||||
buf = append(key, "_v"...)
|
||||
v := bucket.Get(buf)
|
||||
if v == nil {
|
||||
return bbolt.ErrIncompatibleValue
|
||||
}
|
||||
buf = append(key, "_b"...)
|
||||
b := bucket.Get(buf)
|
||||
if b == nil {
|
||||
return bbolt.ErrIncompatibleValue
|
||||
}
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateDiff_SaveAndReadDiff(t *testing.T) {
|
||||
setDefaultStateDiffExponents()
|
||||
|
||||
for v := range version.All() {
|
||||
t.Run(version.String(v), func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
st, _ := createState(t, 0, v)
|
||||
|
||||
err := setOffsetInDB(db, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = db.saveStateByDiff(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
slot := primitives.Slot(math.PowerOf2(5))
|
||||
st, _ = createState(t, slot, v)
|
||||
|
||||
err = db.saveStateByDiff(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
readSt, err := db.stateByDiff(context.Background(), slot)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, readSt)
|
||||
|
||||
stSSZ, err := st.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
readStSSZ, err := readSt.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, stSSZ, readStSSZ)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateDiff_SaveAndReadDiff_WithRepetitiveAnchorSlots(t *testing.T) {
|
||||
globalFlags := flags.GlobalFlags{
|
||||
StateDiffExponents: []int{20, 14, 10, 7, 5},
|
||||
}
|
||||
flags.Init(&globalFlags)
|
||||
|
||||
for v := range version.All() {
|
||||
t.Run(version.String(v), func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
err := setOffsetInDB(db, 0)
|
||||
|
||||
st, _ := createState(t, 0, v)
|
||||
require.NoError(t, err)
|
||||
err = db.saveStateByDiff(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
slot := primitives.Slot(math.PowerOf2(11))
|
||||
st, _ = createState(t, slot, v)
|
||||
err = db.saveStateByDiff(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
slot = primitives.Slot(math.PowerOf2(11) + math.PowerOf2(5))
|
||||
st, _ = createState(t, slot, v)
|
||||
err = db.saveStateByDiff(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
readSt, err := db.stateByDiff(context.Background(), slot)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, readSt)
|
||||
|
||||
stSSZ, err := st.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
readStSSZ, err := readSt.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, stSSZ, readStSSZ)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateDiff_SaveAndReadDiff_MultipleLevels(t *testing.T) {
|
||||
setDefaultStateDiffExponents()
|
||||
|
||||
for v := range version.All() {
|
||||
t.Run(version.String(v), func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
st, _ := createState(t, 0, v)
|
||||
|
||||
err := setOffsetInDB(db, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = db.saveStateByDiff(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
slot := primitives.Slot(math.PowerOf2(11))
|
||||
st, _ = createState(t, slot, v)
|
||||
|
||||
err = db.saveStateByDiff(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
readSt, err := db.stateByDiff(context.Background(), slot)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, readSt)
|
||||
|
||||
stSSZ, err := st.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
readStSSZ, err := readSt.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, stSSZ, readStSSZ)
|
||||
|
||||
slot = primitives.Slot(math.PowerOf2(11) + math.PowerOf2(9))
|
||||
st, _ = createState(t, slot, v)
|
||||
|
||||
err = db.saveStateByDiff(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
readSt, err = db.stateByDiff(context.Background(), slot)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, readSt)
|
||||
|
||||
stSSZ, err = st.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
readStSSZ, err = readSt.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, stSSZ, readStSSZ)
|
||||
|
||||
slot = primitives.Slot(math.PowerOf2(11) + math.PowerOf2(9) + math.PowerOf2(5))
|
||||
st, _ = createState(t, slot, v)
|
||||
|
||||
err = db.saveStateByDiff(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
readSt, err = db.stateByDiff(context.Background(), slot)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, readSt)
|
||||
|
||||
stSSZ, err = st.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
readStSSZ, err = readSt.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, stSSZ, readStSSZ)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateDiff_SaveAndReadDiffForkTransition(t *testing.T) {
|
||||
setDefaultStateDiffExponents()
|
||||
|
||||
for v := range version.All()[:len(version.All())-1] {
|
||||
t.Run(version.String(v), func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
st, _ := createState(t, 0, v)
|
||||
|
||||
err := setOffsetInDB(db, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = db.saveStateByDiff(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
slot := primitives.Slot(math.PowerOf2(5))
|
||||
st, _ = createState(t, slot, v+1)
|
||||
|
||||
err = db.saveStateByDiff(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
readSt, err := db.stateByDiff(context.Background(), slot)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, readSt)
|
||||
|
||||
stSSZ, err := st.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
readStSSZ, err := readSt.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, stSSZ, readStSSZ)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateDiff_OffsetCache(t *testing.T) {
|
||||
setDefaultStateDiffExponents()
|
||||
|
||||
// test for slot numbers 0 and 1 for every version
|
||||
for slotNum := range 2 {
|
||||
for v := range version.All() {
|
||||
t.Run(fmt.Sprintf("slotNum=%d,%s", slotNum, version.String(v)), func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
slot := primitives.Slot(slotNum)
|
||||
err := setOffsetInDB(db, uint64(slot))
|
||||
require.NoError(t, err)
|
||||
st, _ := createState(t, slot, v)
|
||||
err = db.saveStateByDiff(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
offset := db.stateDiffCache.getOffset()
|
||||
require.Equal(t, uint64(slotNum), offset)
|
||||
|
||||
slot2 := primitives.Slot(uint64(slotNum) + math.PowerOf2(uint64(flags.Get().StateDiffExponents[0])))
|
||||
st2, _ := createState(t, slot2, v)
|
||||
err = db.saveStateByDiff(context.Background(), st2)
|
||||
require.NoError(t, err)
|
||||
|
||||
offset = db.stateDiffCache.getOffset()
|
||||
require.Equal(t, uint64(slot), offset)
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateDiff_AnchorCache(t *testing.T) {
|
||||
setDefaultStateDiffExponents()
|
||||
|
||||
for v := range version.All() {
|
||||
t.Run(version.String(v), func(t *testing.T) {
|
||||
exponents := flags.Get().StateDiffExponents
|
||||
localCache := make([]state.ReadOnlyBeaconState, len(exponents)-1)
|
||||
db := setupDB(t)
|
||||
err := setOffsetInDB(db, 0) // lvl 0
|
||||
require.NoError(t, err)
|
||||
|
||||
// at first the cache should be empty
|
||||
for i := 0; i < len(flags.Get().StateDiffExponents)-1; i++ {
|
||||
anchor := db.stateDiffCache.getAnchor(i)
|
||||
require.IsNil(t, anchor)
|
||||
}
|
||||
|
||||
// add level 0
|
||||
slot := primitives.Slot(0) // offset 0 is already set
|
||||
st, _ := createState(t, slot, v)
|
||||
err = db.saveStateByDiff(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
localCache[0] = st
|
||||
|
||||
// level 0 should be the same
|
||||
require.DeepEqual(t, localCache[0], db.stateDiffCache.getAnchor(0))
|
||||
|
||||
// rest of the cache should be nil
|
||||
for i := 1; i < len(exponents)-1; i++ {
|
||||
require.IsNil(t, db.stateDiffCache.getAnchor(i))
|
||||
}
|
||||
|
||||
// skip last level as it does not get cached
|
||||
for i := len(exponents) - 2; i > 0; i-- {
|
||||
slot = primitives.Slot(math.PowerOf2(uint64(exponents[i])))
|
||||
st, _ := createState(t, slot, v)
|
||||
err = db.saveStateByDiff(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
localCache[i] = st
|
||||
|
||||
// anchor cache must match local cache
|
||||
for i := 0; i < len(exponents)-1; i++ {
|
||||
if localCache[i] == nil {
|
||||
require.IsNil(t, db.stateDiffCache.getAnchor(i))
|
||||
continue
|
||||
}
|
||||
localSSZ, err := localCache[i].MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
anchorSSZ, err := db.stateDiffCache.getAnchor(i).MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, localSSZ, anchorSSZ)
|
||||
}
|
||||
}
|
||||
|
||||
// moving to a new tree should invalidate the cache except for level 0
|
||||
twoTo21 := math.PowerOf2(21)
|
||||
slot = primitives.Slot(twoTo21)
|
||||
st, _ = createState(t, slot, v)
|
||||
err = db.saveStateByDiff(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
localCache = make([]state.ReadOnlyBeaconState, len(exponents)-1)
|
||||
localCache[0] = st
|
||||
|
||||
// level 0 should be the same
|
||||
require.DeepEqual(t, localCache[0], db.stateDiffCache.getAnchor(0))
|
||||
|
||||
// rest of the cache should be nil
|
||||
for i := 1; i < len(exponents)-1; i++ {
|
||||
require.IsNil(t, db.stateDiffCache.getAnchor(i))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateDiff_EncodingAndDecoding(t *testing.T) {
|
||||
for v := range version.All() {
|
||||
t.Run(version.String(v), func(t *testing.T) {
|
||||
st, enc := createState(t, 0, v) // this has addKey called inside
|
||||
stDecoded, err := decodeStateSnapshot(enc)
|
||||
require.NoError(t, err)
|
||||
st1ssz, err := st.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
st2ssz, err := stDecoded.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, st1ssz, st2ssz)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func createState(t *testing.T, slot primitives.Slot, v int) (state.ReadOnlyBeaconState, []byte) {
|
||||
p := params.BeaconConfig()
|
||||
var st state.BeaconState
|
||||
var err error
|
||||
switch v {
|
||||
case version.Phase0:
|
||||
st, err = util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
err = st.SetFork(ðpb.Fork{
|
||||
PreviousVersion: p.GenesisForkVersion,
|
||||
CurrentVersion: p.GenesisForkVersion,
|
||||
Epoch: 0,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
case version.Altair:
|
||||
st, err = util.NewBeaconStateAltair()
|
||||
require.NoError(t, err)
|
||||
err = st.SetFork(ðpb.Fork{
|
||||
PreviousVersion: p.GenesisForkVersion,
|
||||
CurrentVersion: p.AltairForkVersion,
|
||||
Epoch: p.AltairForkEpoch,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
case version.Bellatrix:
|
||||
st, err = util.NewBeaconStateBellatrix()
|
||||
require.NoError(t, err)
|
||||
err = st.SetFork(ðpb.Fork{
|
||||
PreviousVersion: p.AltairForkVersion,
|
||||
CurrentVersion: p.BellatrixForkVersion,
|
||||
Epoch: p.BellatrixForkEpoch,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
case version.Capella:
|
||||
st, err = util.NewBeaconStateCapella()
|
||||
require.NoError(t, err)
|
||||
err = st.SetFork(ðpb.Fork{
|
||||
PreviousVersion: p.BellatrixForkVersion,
|
||||
CurrentVersion: p.CapellaForkVersion,
|
||||
Epoch: p.CapellaForkEpoch,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
case version.Deneb:
|
||||
st, err = util.NewBeaconStateDeneb()
|
||||
require.NoError(t, err)
|
||||
err = st.SetFork(ðpb.Fork{
|
||||
PreviousVersion: p.CapellaForkVersion,
|
||||
CurrentVersion: p.DenebForkVersion,
|
||||
Epoch: p.DenebForkEpoch,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
case version.Electra:
|
||||
st, err = util.NewBeaconStateElectra()
|
||||
require.NoError(t, err)
|
||||
err = st.SetFork(ðpb.Fork{
|
||||
PreviousVersion: p.DenebForkVersion,
|
||||
CurrentVersion: p.ElectraForkVersion,
|
||||
Epoch: p.ElectraForkEpoch,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
case version.Fulu:
|
||||
st, err = util.NewBeaconStateFulu()
|
||||
require.NoError(t, err)
|
||||
err = st.SetFork(ðpb.Fork{
|
||||
PreviousVersion: p.ElectraForkVersion,
|
||||
CurrentVersion: p.FuluForkVersion,
|
||||
Epoch: p.FuluForkEpoch,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
default:
|
||||
t.Fatalf("unsupported version: %d", v)
|
||||
}
|
||||
|
||||
err = st.SetSlot(slot)
|
||||
require.NoError(t, err)
|
||||
slashings := make([]uint64, 8192)
|
||||
slashings[0] = uint64(rand.Intn(10))
|
||||
err = st.SetSlashings(slashings)
|
||||
require.NoError(t, err)
|
||||
stssz, err := st.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
enc, err := addKey(v, stssz)
|
||||
require.NoError(t, err)
|
||||
return st, enc
|
||||
}
|
||||
|
||||
func setOffsetInDB(s *Store, offset uint64) error {
|
||||
err := s.db.Update(func(tx *bbolt.Tx) error {
|
||||
bucket := tx.Bucket(stateDiffBucket)
|
||||
if bucket == nil {
|
||||
return bbolt.ErrBucketNotFound
|
||||
}
|
||||
|
||||
offsetBytes := bucket.Get(offsetKey)
|
||||
if offsetBytes != nil {
|
||||
return fmt.Errorf("offset already set to %d", binary.LittleEndian.Uint64(offsetBytes))
|
||||
}
|
||||
|
||||
offsetBytes = make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(offsetBytes, offset)
|
||||
if err := bucket.Put(offsetKey, offsetBytes); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sdCache, err := newStateDiffCache(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.stateDiffCache = sdCache
|
||||
return nil
|
||||
}
|
||||
|
||||
func setDefaultStateDiffExponents() {
|
||||
globalFlags := flags.GlobalFlags{
|
||||
StateDiffExponents: []int{21, 18, 16, 13, 11, 9, 5},
|
||||
}
|
||||
flags.Init(&globalFlags)
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/binary"
|
||||
mathRand "math/rand"
|
||||
@@ -9,6 +10,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
@@ -17,11 +19,14 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v7/genesis"
|
||||
"github.com/OffchainLabs/prysm/v7/math"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
@@ -1329,3 +1334,297 @@ func TestStore_CleanUpDirtyStates_NoOriginRoot(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_CanSaveRetrieveStateUsingStateDiff(t *testing.T) {
|
||||
t.Run("No state summary or block", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
featCfg := &features.Flags{}
|
||||
featCfg.EnableStateDiff = true
|
||||
reset := features.InitWithReset(featCfg)
|
||||
defer reset()
|
||||
setDefaultStateDiffExponents()
|
||||
|
||||
err := setOffsetInDB(db, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
readSt, err := db.State(context.Background(), [32]byte{'A'})
|
||||
require.IsNil(t, readSt)
|
||||
require.ErrorContains(t, "neither state summary nor block found", err)
|
||||
})
|
||||
|
||||
t.Run("Slot not in tree", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
featCfg := &features.Flags{}
|
||||
featCfg.EnableStateDiff = true
|
||||
reset := features.InitWithReset(featCfg)
|
||||
defer reset()
|
||||
setDefaultStateDiffExponents()
|
||||
|
||||
err := setOffsetInDB(db, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
r := bytesutil.ToBytes32([]byte{'A'})
|
||||
ss := ðpb.StateSummary{Slot: 1, Root: r[:]} // slot 1 not in tree
|
||||
err = db.SaveStateSummary(context.Background(), ss)
|
||||
require.NoError(t, err)
|
||||
|
||||
readSt, err := db.State(context.Background(), r)
|
||||
require.ErrorContains(t, "slot not in tree", err)
|
||||
require.IsNil(t, readSt)
|
||||
|
||||
})
|
||||
|
||||
t.Run("State not found", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
featCfg := &features.Flags{}
|
||||
featCfg.EnableStateDiff = true
|
||||
reset := features.InitWithReset(featCfg)
|
||||
defer reset()
|
||||
setDefaultStateDiffExponents()
|
||||
|
||||
err := setOffsetInDB(db, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
r := bytesutil.ToBytes32([]byte{'A'})
|
||||
ss := ðpb.StateSummary{Slot: 32, Root: r[:]} // slot 32 is in tree
|
||||
err = db.SaveStateSummary(context.Background(), ss)
|
||||
require.NoError(t, err)
|
||||
|
||||
readSt, err := db.State(context.Background(), r)
|
||||
require.ErrorContains(t, "state not found", err)
|
||||
require.IsNil(t, readSt)
|
||||
})
|
||||
|
||||
t.Run("Full state snapshot", func(t *testing.T) {
|
||||
t.Run("using state summary", func(t *testing.T) {
|
||||
for v := range version.All() {
|
||||
t.Run(version.String(v), func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
featCfg := &features.Flags{}
|
||||
featCfg.EnableStateDiff = true
|
||||
reset := features.InitWithReset(featCfg)
|
||||
defer reset()
|
||||
setDefaultStateDiffExponents()
|
||||
|
||||
err := setOffsetInDB(db, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
st, _ := createState(t, 0, v)
|
||||
|
||||
err = db.saveStateByDiff(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
r := bytesutil.ToBytes32([]byte{'A'})
|
||||
ss := ðpb.StateSummary{Slot: 0, Root: r[:]}
|
||||
err = db.SaveStateSummary(context.Background(), ss)
|
||||
require.NoError(t, err)
|
||||
|
||||
readSt, err := db.State(context.Background(), r)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, readSt)
|
||||
|
||||
stSSZ, err := st.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
readStSSZ, err := readSt.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, stSSZ, readStSSZ)
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("using block", func(t *testing.T) {
|
||||
for v := range version.All() {
|
||||
t.Run(version.String(v), func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
featCfg := &features.Flags{}
|
||||
featCfg.EnableStateDiff = true
|
||||
reset := features.InitWithReset(featCfg)
|
||||
defer reset()
|
||||
setDefaultStateDiffExponents()
|
||||
|
||||
err := setOffsetInDB(db, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
st, _ := createState(t, 0, v)
|
||||
|
||||
err = db.saveStateByDiff(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
blk := util.NewBeaconBlock()
|
||||
blk.Block.Slot = 0
|
||||
signedBlk, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
err = db.SaveBlock(context.Background(), signedBlk)
|
||||
require.NoError(t, err)
|
||||
r, err := signedBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
readSt, err := db.State(context.Background(), r)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, readSt)
|
||||
|
||||
stSSZ, err := st.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
readStSSZ, err := readSt.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, stSSZ, readStSSZ)
|
||||
})
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("Diffed state", func(t *testing.T) {
|
||||
t.Run("using state summary", func(t *testing.T) {
|
||||
for v := range version.All() {
|
||||
t.Run(version.String(v), func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
featCfg := &features.Flags{}
|
||||
featCfg.EnableStateDiff = true
|
||||
reset := features.InitWithReset(featCfg)
|
||||
defer reset()
|
||||
setDefaultStateDiffExponents()
|
||||
|
||||
exponents := flags.Get().StateDiffExponents
|
||||
|
||||
err := setOffsetInDB(db, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
st, _ := createState(t, 0, v)
|
||||
err = db.saveStateByDiff(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
slot := primitives.Slot(math.PowerOf2(uint64(exponents[len(exponents)-2])))
|
||||
st, _ = createState(t, slot, v)
|
||||
err = db.saveStateByDiff(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
slot = primitives.Slot(math.PowerOf2(uint64(exponents[len(exponents)-2])) + math.PowerOf2(uint64(exponents[len(exponents)-1])))
|
||||
st, _ = createState(t, slot, v)
|
||||
err = db.saveStateByDiff(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
r := bytesutil.ToBytes32([]byte{'A'})
|
||||
ss := ðpb.StateSummary{Slot: slot, Root: r[:]}
|
||||
err = db.SaveStateSummary(context.Background(), ss)
|
||||
require.NoError(t, err)
|
||||
|
||||
readSt, err := db.State(context.Background(), r)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, readSt)
|
||||
|
||||
stSSZ, err := st.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
readStSSZ, err := readSt.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, stSSZ, readStSSZ)
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("using block", func(t *testing.T) {
|
||||
for v := range version.All() {
|
||||
t.Run(version.String(v), func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
featCfg := &features.Flags{}
|
||||
featCfg.EnableStateDiff = true
|
||||
reset := features.InitWithReset(featCfg)
|
||||
defer reset()
|
||||
setDefaultStateDiffExponents()
|
||||
|
||||
exponents := flags.Get().StateDiffExponents
|
||||
|
||||
err := setOffsetInDB(db, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
st, _ := createState(t, 0, v)
|
||||
|
||||
err = db.saveStateByDiff(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
slot := primitives.Slot(math.PowerOf2(uint64(exponents[len(exponents)-2])))
|
||||
st, _ = createState(t, slot, v)
|
||||
err = db.saveStateByDiff(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
slot = primitives.Slot(math.PowerOf2(uint64(exponents[len(exponents)-2])) + math.PowerOf2(uint64(exponents[len(exponents)-1])))
|
||||
st, _ = createState(t, slot, v)
|
||||
err = db.saveStateByDiff(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
blk := util.NewBeaconBlock()
|
||||
blk.Block.Slot = slot
|
||||
signedBlk, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
err = db.SaveBlock(context.Background(), signedBlk)
|
||||
require.NoError(t, err)
|
||||
r, err := signedBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
readSt, err := db.State(context.Background(), r)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, readSt)
|
||||
|
||||
stSSZ, err := st.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
readStSSZ, err := readSt.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, stSSZ, readStSSZ)
|
||||
})
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestStore_HasStateUsingStateDiff(t *testing.T) {
|
||||
t.Run("No state summary or block", func(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
db := setupDB(t)
|
||||
featCfg := &features.Flags{}
|
||||
featCfg.EnableStateDiff = true
|
||||
reset := features.InitWithReset(featCfg)
|
||||
defer reset()
|
||||
setDefaultStateDiffExponents()
|
||||
|
||||
err := setOffsetInDB(db, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
hasSt := db.HasState(t.Context(), [32]byte{'A'})
|
||||
require.Equal(t, false, hasSt)
|
||||
require.LogsContain(t, hook, "neither state summary nor block found")
|
||||
})
|
||||
|
||||
t.Run("slot in tree or not", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
featCfg := &features.Flags{}
|
||||
featCfg.EnableStateDiff = true
|
||||
reset := features.InitWithReset(featCfg)
|
||||
defer reset()
|
||||
setDefaultStateDiffExponents()
|
||||
|
||||
err := setOffsetInDB(db, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
testCases := []struct {
|
||||
slot primitives.Slot
|
||||
expected bool
|
||||
}{
|
||||
{slot: 1, expected: false}, // slot 1 not in tree
|
||||
{slot: 32, expected: true}, // slot 32 in tree
|
||||
{slot: 0, expected: true}, // slot 0 in tree
|
||||
{slot: primitives.Slot(math.PowerOf2(21)), expected: true}, // slot in tree
|
||||
{slot: primitives.Slot(math.PowerOf2(21) - 1), expected: false}, // slot not in tree
|
||||
{slot: primitives.Slot(math.PowerOf2(22)), expected: true}, // slot in tree
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
r := bytesutil.ToBytes32([]byte{'A'})
|
||||
ss := ðpb.StateSummary{Slot: tc.slot, Root: r[:]}
|
||||
err = db.SaveStateSummary(t.Context(), ss)
|
||||
require.NoError(t, err)
|
||||
|
||||
hasSt := db.HasState(t.Context(), r)
|
||||
require.Equal(t, tc.expected, hasSt)
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
|
||||
@@ -2683,7 +2683,7 @@ func createBlobServerV2(t *testing.T, numBlobs int, blobMasks []bool) *httptest.
|
||||
Blob: []byte("0xblob"),
|
||||
KzgProofs: []hexutil.Bytes{},
|
||||
}
|
||||
for j := 0; j < int(params.BeaconConfig().NumberOfColumns); j++ {
|
||||
for range fieldparams.NumberOfColumns {
|
||||
cellProof := make([]byte, 48)
|
||||
blobAndCellProofs[i].KzgProofs = append(blobAndCellProofs[i].KzgProofs, cellProof)
|
||||
}
|
||||
|
||||
@@ -33,6 +33,10 @@ func TestLightClient_NewLightClientOptimisticUpdateFromBeaconState(t *testing.T)
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
for _, testVersion := range version.All()[1:] {
|
||||
if testVersion == version.Gloas {
|
||||
// TODO(16027): Unskip light client tests for Gloas
|
||||
continue
|
||||
}
|
||||
t.Run(version.String(testVersion), func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t, testVersion)
|
||||
|
||||
|
||||
@@ -23,6 +23,7 @@ go_library(
|
||||
"//beacon-chain/builder:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/cache/depositsnapshot:go_default_library",
|
||||
"//beacon-chain/das:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/db/kv:go_default_library",
|
||||
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/builder"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache/depositsnapshot"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/das"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/kv"
|
||||
@@ -116,7 +117,7 @@ type BeaconNode struct {
|
||||
GenesisProviders []genesis.Provider
|
||||
CheckpointInitializer checkpoint.Initializer
|
||||
forkChoicer forkchoice.ForkChoicer
|
||||
clockWaiter startup.ClockWaiter
|
||||
ClockWaiter startup.ClockWaiter
|
||||
BackfillOpts []backfill.ServiceOption
|
||||
initialSyncComplete chan struct{}
|
||||
BlobStorage *filesystem.BlobStorage
|
||||
@@ -129,6 +130,7 @@ type BeaconNode struct {
|
||||
slasherEnabled bool
|
||||
lcStore *lightclient.Store
|
||||
ConfigOptions []params.Option
|
||||
SyncNeedsWaiter func() (das.SyncNeeds, error)
|
||||
}
|
||||
|
||||
// New creates a new node instance, sets up configuration options, and registers
|
||||
@@ -193,7 +195,7 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
|
||||
params.LogDigests(params.BeaconConfig())
|
||||
|
||||
synchronizer := startup.NewClockSynchronizer()
|
||||
beacon.clockWaiter = synchronizer
|
||||
beacon.ClockWaiter = synchronizer
|
||||
beacon.forkChoicer = doublylinkedtree.New()
|
||||
|
||||
depositAddress, err := execution.DepositContractAddress()
|
||||
@@ -233,12 +235,13 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
|
||||
|
||||
beacon.lhsp = &verification.LazyHeadStateProvider{}
|
||||
beacon.verifyInitWaiter = verification.NewInitializerWaiter(
|
||||
beacon.clockWaiter, forkchoice.NewROForkChoice(beacon.forkChoicer), beacon.stateGen, beacon.lhsp)
|
||||
beacon.ClockWaiter, forkchoice.NewROForkChoice(beacon.forkChoicer), beacon.stateGen, beacon.lhsp)
|
||||
|
||||
beacon.BackfillOpts = append(
|
||||
beacon.BackfillOpts,
|
||||
backfill.WithVerifierWaiter(beacon.verifyInitWaiter),
|
||||
backfill.WithInitSyncWaiter(initSyncWaiter(ctx, beacon.initialSyncComplete)),
|
||||
backfill.WithSyncNeedsWaiter(beacon.SyncNeedsWaiter),
|
||||
)
|
||||
|
||||
if err := registerServices(cliCtx, beacon, synchronizer, bfs); err != nil {
|
||||
@@ -280,7 +283,10 @@ func configureBeacon(cliCtx *cli.Context) error {
|
||||
return errors.Wrap(err, "could not configure beacon chain")
|
||||
}
|
||||
|
||||
flags.ConfigureGlobalFlags(cliCtx)
|
||||
err := flags.ConfigureGlobalFlags(cliCtx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not configure global flags")
|
||||
}
|
||||
|
||||
if err := configureChainConfig(cliCtx); err != nil {
|
||||
return errors.Wrap(err, "could not configure chain config")
|
||||
@@ -660,7 +666,8 @@ func (b *BeaconNode) registerP2P(cliCtx *cli.Context) error {
|
||||
EnableUPnP: cliCtx.Bool(cmd.EnableUPnPFlag.Name),
|
||||
StateNotifier: b,
|
||||
DB: b.db,
|
||||
ClockWaiter: b.clockWaiter,
|
||||
StateGen: b.stateGen,
|
||||
ClockWaiter: b.ClockWaiter,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -702,7 +709,7 @@ func (b *BeaconNode) registerSlashingPoolService() error {
|
||||
return err
|
||||
}
|
||||
|
||||
s := slashings.NewPoolService(b.ctx, b.slashingsPool, slashings.WithElectraTimer(b.clockWaiter, chainService.CurrentSlot))
|
||||
s := slashings.NewPoolService(b.ctx, b.slashingsPool, slashings.WithElectraTimer(b.ClockWaiter, chainService.CurrentSlot))
|
||||
return b.services.RegisterService(s)
|
||||
}
|
||||
|
||||
@@ -824,7 +831,7 @@ func (b *BeaconNode) registerSyncService(initialSyncComplete chan struct{}, bFil
|
||||
regularsync.WithSlasherAttestationsFeed(b.slasherAttestationsFeed),
|
||||
regularsync.WithSlasherBlockHeadersFeed(b.slasherBlockHeadersFeed),
|
||||
regularsync.WithReconstructor(web3Service),
|
||||
regularsync.WithClockWaiter(b.clockWaiter),
|
||||
regularsync.WithClockWaiter(b.ClockWaiter),
|
||||
regularsync.WithInitialSyncComplete(initialSyncComplete),
|
||||
regularsync.WithStateNotifier(b),
|
||||
regularsync.WithBlobStorage(b.BlobStorage),
|
||||
@@ -855,7 +862,8 @@ func (b *BeaconNode) registerInitialSyncService(complete chan struct{}) error {
|
||||
P2P: b.fetchP2P(),
|
||||
StateNotifier: b,
|
||||
BlockNotifier: b,
|
||||
ClockWaiter: b.clockWaiter,
|
||||
ClockWaiter: b.ClockWaiter,
|
||||
SyncNeedsWaiter: b.SyncNeedsWaiter,
|
||||
InitialSyncComplete: complete,
|
||||
BlobStorage: b.BlobStorage,
|
||||
DataColumnStorage: b.DataColumnStorage,
|
||||
@@ -886,7 +894,7 @@ func (b *BeaconNode) registerSlasherService() error {
|
||||
SlashingPoolInserter: b.slashingsPool,
|
||||
SyncChecker: syncService,
|
||||
HeadStateFetcher: chainService,
|
||||
ClockWaiter: b.clockWaiter,
|
||||
ClockWaiter: b.ClockWaiter,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -979,7 +987,7 @@ func (b *BeaconNode) registerRPCService(router *http.ServeMux) error {
|
||||
MaxMsgSize: maxMsgSize,
|
||||
BlockBuilder: b.fetchBuilderService(),
|
||||
Router: router,
|
||||
ClockWaiter: b.clockWaiter,
|
||||
ClockWaiter: b.ClockWaiter,
|
||||
BlobStorage: b.BlobStorage,
|
||||
DataColumnStorage: b.DataColumnStorage,
|
||||
TrackedValidatorsCache: b.trackedValidatorsCache,
|
||||
@@ -1124,7 +1132,7 @@ func (b *BeaconNode) registerPrunerService(cliCtx *cli.Context) error {
|
||||
|
||||
func (b *BeaconNode) RegisterBackfillService(cliCtx *cli.Context, bfs *backfill.Store) error {
|
||||
pa := peers.NewAssigner(b.fetchP2P().Peers(), b.forkChoicer)
|
||||
bf, err := backfill.NewService(cliCtx.Context, bfs, b.BlobStorage, b.clockWaiter, b.fetchP2P(), pa, b.BackfillOpts...)
|
||||
bf, err := backfill.NewService(cliCtx.Context, bfs, b.BlobStorage, b.DataColumnStorage, b.ClockWaiter, b.fetchP2P(), pa, b.BackfillOpts...)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error initializing backfill service")
|
||||
}
|
||||
|
||||
@@ -56,6 +56,7 @@ go_library(
|
||||
"//beacon-chain/p2p/peers/scorers:go_default_library",
|
||||
"//beacon-chain/p2p/types:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
@@ -153,6 +154,7 @@ go_test(
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/db/iface:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/p2p/encoder:go_default_library",
|
||||
"//beacon-chain/p2p/peers:go_default_library",
|
||||
@@ -161,6 +163,7 @@ go_test(
|
||||
"//beacon-chain/p2p/testing:go_default_library",
|
||||
"//beacon-chain/p2p/types:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/state/stategen/mock:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
statefeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stategen"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@@ -49,6 +50,7 @@ type Config struct {
|
||||
IPColocationWhitelist []*net.IPNet
|
||||
StateNotifier statefeed.Notifier
|
||||
DB db.ReadOnlyDatabaseWithSeqNum
|
||||
StateGen stategen.StateManager
|
||||
ClockWaiter startup.ClockWaiter
|
||||
}
|
||||
|
||||
|
||||
@@ -156,29 +156,13 @@ func (s *Service) retrieveActiveValidators() (uint64, error) {
|
||||
if s.activeValidatorCount != 0 {
|
||||
return s.activeValidatorCount, nil
|
||||
}
|
||||
rt := s.cfg.DB.LastArchivedRoot(s.ctx)
|
||||
if rt == params.BeaconConfig().ZeroHash {
|
||||
genState, err := s.cfg.DB.GenesisState(s.ctx)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if genState == nil || genState.IsNil() {
|
||||
return 0, errors.New("no genesis state exists")
|
||||
}
|
||||
activeVals, err := helpers.ActiveValidatorCount(context.Background(), genState, coreTime.CurrentEpoch(genState))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
// Cache active validator count
|
||||
s.activeValidatorCount = activeVals
|
||||
return activeVals, nil
|
||||
}
|
||||
bState, err := s.cfg.DB.State(s.ctx, rt)
|
||||
finalizedCheckpoint, err := s.cfg.DB.FinalizedCheckpoint(s.ctx)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if bState == nil || bState.IsNil() {
|
||||
return 0, errors.Errorf("no state with root %#x exists", rt)
|
||||
bState, err := s.cfg.StateGen.StateByRoot(s.ctx, [32]byte(finalizedCheckpoint.Root))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
activeVals, err := helpers.ActiveValidatorCount(context.Background(), bState, coreTime.CurrentEpoch(bState))
|
||||
if err != nil {
|
||||
|
||||
@@ -1,10 +1,14 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
iface "github.com/OffchainLabs/prysm/v7/beacon-chain/db/iface"
|
||||
dbutil "github.com/OffchainLabs/prysm/v7/beacon-chain/db/testing"
|
||||
mockstategen "github.com/OffchainLabs/prysm/v7/beacon-chain/state/stategen/mock"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
@@ -20,9 +24,11 @@ func TestCorrect_ActiveValidatorsCount(t *testing.T) {
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
db := dbutil.SetupDB(t)
|
||||
wrappedDB := &finalizedCheckpointDB{ReadOnlyDatabaseWithSeqNum: db}
|
||||
stateGen := mockstategen.NewService()
|
||||
s := &Service{
|
||||
ctx: t.Context(),
|
||||
cfg: &Config{DB: db},
|
||||
cfg: &Config{DB: wrappedDB, StateGen: stateGen},
|
||||
}
|
||||
bState, err := util.NewBeaconState(func(state *ethpb.BeaconState) error {
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
@@ -39,6 +45,10 @@ func TestCorrect_ActiveValidatorsCount(t *testing.T) {
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveGenesisData(s.ctx, bState))
|
||||
checkpoint, err := db.FinalizedCheckpoint(s.ctx)
|
||||
require.NoError(t, err)
|
||||
wrappedDB.finalized = checkpoint
|
||||
stateGen.AddStateForRoot(bState, bytesutil.ToBytes32(checkpoint.Root))
|
||||
|
||||
vals, err := s.retrieveActiveValidators()
|
||||
assert.NoError(t, err, "genesis state not retrieved")
|
||||
@@ -52,7 +62,10 @@ func TestCorrect_ActiveValidatorsCount(t *testing.T) {
|
||||
}))
|
||||
}
|
||||
require.NoError(t, bState.SetSlot(10000))
|
||||
require.NoError(t, db.SaveState(s.ctx, bState, [32]byte{'a'}))
|
||||
rootA := [32]byte{'a'}
|
||||
require.NoError(t, db.SaveState(s.ctx, bState, rootA))
|
||||
wrappedDB.finalized = ðpb.Checkpoint{Root: rootA[:]}
|
||||
stateGen.AddStateForRoot(bState, rootA)
|
||||
// Reset count
|
||||
s.activeValidatorCount = 0
|
||||
|
||||
@@ -77,3 +90,15 @@ func TestLoggingParameters(_ *testing.T) {
|
||||
logGossipParameters("testing", defaultLightClientOptimisticUpdateTopicParams())
|
||||
logGossipParameters("testing", defaultLightClientFinalityUpdateTopicParams())
|
||||
}
|
||||
|
||||
type finalizedCheckpointDB struct {
|
||||
iface.ReadOnlyDatabaseWithSeqNum
|
||||
finalized *ethpb.Checkpoint
|
||||
}
|
||||
|
||||
func (f *finalizedCheckpointDB) FinalizedCheckpoint(ctx context.Context) (*ethpb.Checkpoint, error) {
|
||||
if f.finalized != nil {
|
||||
return f.finalized, nil
|
||||
}
|
||||
return f.ReadOnlyDatabaseWithSeqNum.FinalizedCheckpoint(ctx)
|
||||
}
|
||||
|
||||
@@ -47,6 +47,7 @@ go_test(
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/p2p/peers/peerdata:go_default_library",
|
||||
"//beacon-chain/p2p/peers/scorers:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
|
||||
@@ -4,11 +4,18 @@ import (
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v7/beacon-chain/forkchoice/types"
|
||||
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// StatusProvider describes the minimum capability that Assigner needs from peer status tracking.
|
||||
// That is, the ability to retrieve the best peers by finalized checkpoint.
|
||||
type StatusProvider interface {
|
||||
BestFinalized(ourFinalized primitives.Epoch) (primitives.Epoch, []peer.ID)
|
||||
}
|
||||
|
||||
// FinalizedCheckpointer describes the minimum capability that Assigner needs from forkchoice.
|
||||
// That is, the ability to retrieve the latest finalized checkpoint to help with peer evaluation.
|
||||
type FinalizedCheckpointer interface {
|
||||
@@ -17,9 +24,9 @@ type FinalizedCheckpointer interface {
|
||||
|
||||
// NewAssigner assists in the correct construction of an Assigner by code in other packages,
|
||||
// assuring all the important private member fields are given values.
|
||||
// The FinalizedCheckpointer is used to retrieve the latest finalized checkpoint each time peers are requested.
|
||||
// The StatusProvider is used to retrieve best peers, and FinalizedCheckpointer is used to retrieve the latest finalized checkpoint each time peers are requested.
|
||||
// Peers that report an older finalized checkpoint are filtered out.
|
||||
func NewAssigner(s *Status, fc FinalizedCheckpointer) *Assigner {
|
||||
func NewAssigner(s StatusProvider, fc FinalizedCheckpointer) *Assigner {
|
||||
return &Assigner{
|
||||
ps: s,
|
||||
fc: fc,
|
||||
@@ -28,7 +35,7 @@ func NewAssigner(s *Status, fc FinalizedCheckpointer) *Assigner {
|
||||
|
||||
// Assigner uses the "BestFinalized" peer scoring method to pick the next-best peer to receive rpc requests.
|
||||
type Assigner struct {
|
||||
ps *Status
|
||||
ps StatusProvider
|
||||
fc FinalizedCheckpointer
|
||||
}
|
||||
|
||||
@@ -38,38 +45,42 @@ type Assigner struct {
|
||||
var ErrInsufficientSuitable = errors.New("no suitable peers")
|
||||
|
||||
func (a *Assigner) freshPeers() ([]peer.ID, error) {
|
||||
required := min(flags.Get().MinimumSyncPeers, params.BeaconConfig().MaxPeersToSync)
|
||||
_, peers := a.ps.BestFinalized(params.BeaconConfig().MaxPeersToSync, a.fc.FinalizedCheckpoint().Epoch)
|
||||
required := min(flags.Get().MinimumSyncPeers, min(flags.Get().MinimumSyncPeers, params.BeaconConfig().MaxPeersToSync))
|
||||
_, peers := a.ps.BestFinalized(a.fc.FinalizedCheckpoint().Epoch)
|
||||
if len(peers) < required {
|
||||
log.WithFields(logrus.Fields{
|
||||
"suitable": len(peers),
|
||||
"required": required}).Warn("Unable to assign peer while suitable peers < required ")
|
||||
"required": required}).Trace("Unable to assign peer while suitable peers < required")
|
||||
return nil, ErrInsufficientSuitable
|
||||
}
|
||||
return peers, nil
|
||||
}
|
||||
|
||||
// AssignmentFilter describes a function that takes a list of peer.IDs and returns a filtered subset.
|
||||
// An example is the NotBusy filter.
|
||||
type AssignmentFilter func([]peer.ID) []peer.ID
|
||||
|
||||
// Assign uses the "BestFinalized" method to select the best peers that agree on a canonical block
|
||||
// for the configured finalized epoch. At most `n` peers will be returned. The `busy` param can be used
|
||||
// to filter out peers that we know we don't want to connect to, for instance if we are trying to limit
|
||||
// the number of outbound requests to each peer from a given component.
|
||||
func (a *Assigner) Assign(busy map[peer.ID]bool, n int) ([]peer.ID, error) {
|
||||
func (a *Assigner) Assign(filter AssignmentFilter) ([]peer.ID, error) {
|
||||
best, err := a.freshPeers()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return pickBest(busy, n, best), nil
|
||||
return filter(best), nil
|
||||
}
|
||||
|
||||
func pickBest(busy map[peer.ID]bool, n int, best []peer.ID) []peer.ID {
|
||||
ps := make([]peer.ID, 0, n)
|
||||
for _, p := range best {
|
||||
if len(ps) == n {
|
||||
return ps
|
||||
}
|
||||
if !busy[p] {
|
||||
ps = append(ps, p)
|
||||
// NotBusy is a filter that returns the list of peer.IDs that are not in the `busy` map.
|
||||
func NotBusy(busy map[peer.ID]bool) AssignmentFilter {
|
||||
return func(peers []peer.ID) []peer.ID {
|
||||
ps := make([]peer.ID, 0, len(peers))
|
||||
for _, p := range peers {
|
||||
if !busy[p] {
|
||||
ps = append(ps, p)
|
||||
}
|
||||
}
|
||||
return ps
|
||||
}
|
||||
return ps
|
||||
}
|
||||
|
||||
@@ -5,6 +5,8 @@ import (
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v7/beacon-chain/forkchoice/types"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
@@ -14,82 +16,68 @@ func TestPickBest(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
busy map[peer.ID]bool
|
||||
n int
|
||||
best []peer.ID
|
||||
expected []peer.ID
|
||||
}{
|
||||
{
|
||||
name: "",
|
||||
n: 0,
|
||||
name: "don't limit",
|
||||
expected: best,
|
||||
},
|
||||
{
|
||||
name: "none busy",
|
||||
n: 1,
|
||||
expected: best[0:1],
|
||||
expected: best,
|
||||
},
|
||||
{
|
||||
name: "all busy except last",
|
||||
n: 1,
|
||||
busy: testBusyMap(best[0 : len(best)-1]),
|
||||
expected: best[len(best)-1:],
|
||||
},
|
||||
{
|
||||
name: "all busy except i=5",
|
||||
n: 1,
|
||||
busy: testBusyMap(slices.Concat(best[0:5], best[6:])),
|
||||
expected: []peer.ID{best[5]},
|
||||
},
|
||||
{
|
||||
name: "all busy - 0 results",
|
||||
n: 1,
|
||||
busy: testBusyMap(best),
|
||||
},
|
||||
{
|
||||
name: "first half busy",
|
||||
n: 5,
|
||||
busy: testBusyMap(best[0:5]),
|
||||
expected: best[5:],
|
||||
},
|
||||
{
|
||||
name: "back half busy",
|
||||
n: 5,
|
||||
busy: testBusyMap(best[5:]),
|
||||
expected: best[0:5],
|
||||
},
|
||||
{
|
||||
name: "pick all ",
|
||||
n: 10,
|
||||
expected: best,
|
||||
},
|
||||
{
|
||||
name: "none available",
|
||||
n: 10,
|
||||
best: []peer.ID{},
|
||||
},
|
||||
{
|
||||
name: "not enough",
|
||||
n: 10,
|
||||
best: best[0:1],
|
||||
expected: best[0:1],
|
||||
},
|
||||
{
|
||||
name: "not enough, some busy",
|
||||
n: 10,
|
||||
best: best[0:6],
|
||||
busy: testBusyMap(best[0:5]),
|
||||
expected: best[5:6],
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
name := fmt.Sprintf("n=%d", c.n)
|
||||
if c.name != "" {
|
||||
name += " " + c.name
|
||||
}
|
||||
t.Run(name, func(t *testing.T) {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
if c.best == nil {
|
||||
c.best = best
|
||||
}
|
||||
pb := pickBest(c.busy, c.n, c.best)
|
||||
filt := NotBusy(c.busy)
|
||||
pb := filt(c.best)
|
||||
require.Equal(t, len(c.expected), len(pb))
|
||||
for i := range c.expected {
|
||||
require.Equal(t, c.expected[i], pb[i])
|
||||
@@ -113,3 +101,310 @@ func testPeerIds(n int) []peer.ID {
|
||||
}
|
||||
return pids
|
||||
}
|
||||
|
||||
// MockStatus is a test mock for the Status interface used in Assigner.
|
||||
type MockStatus struct {
|
||||
bestFinalizedEpoch primitives.Epoch
|
||||
bestPeers []peer.ID
|
||||
}
|
||||
|
||||
func (m *MockStatus) BestFinalized(ourFinalized primitives.Epoch) (primitives.Epoch, []peer.ID) {
|
||||
return m.bestFinalizedEpoch, m.bestPeers
|
||||
}
|
||||
|
||||
// MockFinalizedCheckpointer is a test mock for FinalizedCheckpointer interface.
|
||||
type MockFinalizedCheckpointer struct {
|
||||
checkpoint *forkchoicetypes.Checkpoint
|
||||
}
|
||||
|
||||
func (m *MockFinalizedCheckpointer) FinalizedCheckpoint() *forkchoicetypes.Checkpoint {
|
||||
return m.checkpoint
|
||||
}
|
||||
|
||||
// TestAssign_HappyPath tests the Assign method with sufficient peers and various filters.
|
||||
func TestAssign_HappyPath(t *testing.T) {
|
||||
peers := testPeerIds(10)
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
bestPeers []peer.ID
|
||||
finalizedEpoch primitives.Epoch
|
||||
filter AssignmentFilter
|
||||
expectedCount int
|
||||
}{
|
||||
{
|
||||
name: "sufficient peers with identity filter",
|
||||
bestPeers: peers,
|
||||
finalizedEpoch: 10,
|
||||
filter: func(p []peer.ID) []peer.ID { return p },
|
||||
expectedCount: 10,
|
||||
},
|
||||
{
|
||||
name: "sufficient peers with NotBusy filter (no busy)",
|
||||
bestPeers: peers,
|
||||
finalizedEpoch: 10,
|
||||
filter: NotBusy(make(map[peer.ID]bool)),
|
||||
expectedCount: 10,
|
||||
},
|
||||
{
|
||||
name: "sufficient peers with NotBusy filter (some busy)",
|
||||
bestPeers: peers,
|
||||
finalizedEpoch: 10,
|
||||
filter: NotBusy(testBusyMap(peers[0:5])),
|
||||
expectedCount: 5,
|
||||
},
|
||||
{
|
||||
name: "minimum threshold exactly met",
|
||||
bestPeers: peers[0:5],
|
||||
finalizedEpoch: 10,
|
||||
filter: func(p []peer.ID) []peer.ID { return p },
|
||||
expectedCount: 5,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
mockStatus := &MockStatus{
|
||||
bestFinalizedEpoch: tc.finalizedEpoch,
|
||||
bestPeers: tc.bestPeers,
|
||||
}
|
||||
mockCheckpointer := &MockFinalizedCheckpointer{
|
||||
checkpoint: &forkchoicetypes.Checkpoint{Epoch: tc.finalizedEpoch},
|
||||
}
|
||||
assigner := NewAssigner(mockStatus, mockCheckpointer)
|
||||
|
||||
result, err := assigner.Assign(tc.filter)
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.expectedCount, len(result),
|
||||
fmt.Sprintf("expected %d peers, got %d", tc.expectedCount, len(result)))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestAssign_InsufficientPeers tests error handling when not enough suitable peers are available.
|
||||
// Note: The actual peer threshold depends on config values MaxPeersToSync and MinimumSyncPeers.
|
||||
func TestAssign_InsufficientPeers(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
bestPeers []peer.ID
|
||||
expectedErr error
|
||||
description string
|
||||
}{
|
||||
{
|
||||
name: "exactly at minimum threshold",
|
||||
bestPeers: testPeerIds(5),
|
||||
expectedErr: nil,
|
||||
description: "5 peers should meet the minimum threshold",
|
||||
},
|
||||
{
|
||||
name: "well above minimum threshold",
|
||||
bestPeers: testPeerIds(50),
|
||||
expectedErr: nil,
|
||||
description: "50 peers should easily meet requirements",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
mockStatus := &MockStatus{
|
||||
bestFinalizedEpoch: 10,
|
||||
bestPeers: tc.bestPeers,
|
||||
}
|
||||
mockCheckpointer := &MockFinalizedCheckpointer{
|
||||
checkpoint: &forkchoicetypes.Checkpoint{Epoch: 10},
|
||||
}
|
||||
assigner := NewAssigner(mockStatus, mockCheckpointer)
|
||||
|
||||
result, err := assigner.Assign(NotBusy(make(map[peer.ID]bool)))
|
||||
|
||||
if tc.expectedErr != nil {
|
||||
require.NotNil(t, err, tc.description)
|
||||
require.Equal(t, tc.expectedErr, err)
|
||||
} else {
|
||||
require.NoError(t, err, tc.description)
|
||||
require.Equal(t, len(tc.bestPeers), len(result))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestAssign_FilterApplication verifies that filters are correctly applied to peer lists.
|
||||
func TestAssign_FilterApplication(t *testing.T) {
|
||||
peers := testPeerIds(10)
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
bestPeers []peer.ID
|
||||
filterToApply AssignmentFilter
|
||||
expectedCount int
|
||||
description string
|
||||
}{
|
||||
{
|
||||
name: "identity filter returns all peers",
|
||||
bestPeers: peers,
|
||||
filterToApply: func(p []peer.ID) []peer.ID { return p },
|
||||
expectedCount: 10,
|
||||
description: "identity filter should not change peer list",
|
||||
},
|
||||
{
|
||||
name: "filter removes all peers (all busy)",
|
||||
bestPeers: peers,
|
||||
filterToApply: NotBusy(testBusyMap(peers)),
|
||||
expectedCount: 0,
|
||||
description: "all peers busy should return empty list",
|
||||
},
|
||||
{
|
||||
name: "filter removes first 5 peers",
|
||||
bestPeers: peers,
|
||||
filterToApply: NotBusy(testBusyMap(peers[0:5])),
|
||||
expectedCount: 5,
|
||||
description: "should only return non-busy peers",
|
||||
},
|
||||
{
|
||||
name: "filter removes last 5 peers",
|
||||
bestPeers: peers,
|
||||
filterToApply: NotBusy(testBusyMap(peers[5:])),
|
||||
expectedCount: 5,
|
||||
description: "should only return non-busy peers from beginning",
|
||||
},
|
||||
{
|
||||
name: "custom filter selects every other peer",
|
||||
bestPeers: peers,
|
||||
filterToApply: func(p []peer.ID) []peer.ID {
|
||||
result := make([]peer.ID, 0)
|
||||
for i := 0; i < len(p); i += 2 {
|
||||
result = append(result, p[i])
|
||||
}
|
||||
return result
|
||||
},
|
||||
expectedCount: 5,
|
||||
description: "custom filter selecting every other peer",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
mockStatus := &MockStatus{
|
||||
bestFinalizedEpoch: 10,
|
||||
bestPeers: tc.bestPeers,
|
||||
}
|
||||
mockCheckpointer := &MockFinalizedCheckpointer{
|
||||
checkpoint: &forkchoicetypes.Checkpoint{Epoch: 10},
|
||||
}
|
||||
assigner := NewAssigner(mockStatus, mockCheckpointer)
|
||||
|
||||
result, err := assigner.Assign(tc.filterToApply)
|
||||
|
||||
require.NoError(t, err, fmt.Sprintf("unexpected error: %v", err))
|
||||
require.Equal(t, tc.expectedCount, len(result),
|
||||
fmt.Sprintf("%s: expected %d peers, got %d", tc.description, tc.expectedCount, len(result)))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestAssign_FinalizedCheckpointUsage verifies that the finalized checkpoint is correctly used.
|
||||
func TestAssign_FinalizedCheckpointUsage(t *testing.T) {
|
||||
peers := testPeerIds(10)
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
finalizedEpoch primitives.Epoch
|
||||
bestPeers []peer.ID
|
||||
expectedCount int
|
||||
description string
|
||||
}{
|
||||
{
|
||||
name: "epoch 0",
|
||||
finalizedEpoch: 0,
|
||||
bestPeers: peers,
|
||||
expectedCount: 10,
|
||||
description: "epoch 0 should work",
|
||||
},
|
||||
{
|
||||
name: "epoch 100",
|
||||
finalizedEpoch: 100,
|
||||
bestPeers: peers,
|
||||
expectedCount: 10,
|
||||
description: "high epoch number should work",
|
||||
},
|
||||
{
|
||||
name: "epoch changes between calls",
|
||||
finalizedEpoch: 50,
|
||||
bestPeers: testPeerIds(5),
|
||||
expectedCount: 5,
|
||||
description: "epoch value should be used in checkpoint",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
mockStatus := &MockStatus{
|
||||
bestFinalizedEpoch: tc.finalizedEpoch,
|
||||
bestPeers: tc.bestPeers,
|
||||
}
|
||||
mockCheckpointer := &MockFinalizedCheckpointer{
|
||||
checkpoint: &forkchoicetypes.Checkpoint{Epoch: tc.finalizedEpoch},
|
||||
}
|
||||
assigner := NewAssigner(mockStatus, mockCheckpointer)
|
||||
|
||||
result, err := assigner.Assign(NotBusy(make(map[peer.ID]bool)))
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.expectedCount, len(result),
|
||||
fmt.Sprintf("%s: expected %d peers, got %d", tc.description, tc.expectedCount, len(result)))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestAssign_EdgeCases tests boundary conditions and edge cases.
|
||||
func TestAssign_EdgeCases(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
bestPeers []peer.ID
|
||||
filter AssignmentFilter
|
||||
expectedCount int
|
||||
description string
|
||||
}{
|
||||
{
|
||||
name: "filter returns empty from sufficient peers",
|
||||
bestPeers: testPeerIds(10),
|
||||
filter: func(p []peer.ID) []peer.ID { return []peer.ID{} },
|
||||
expectedCount: 0,
|
||||
description: "filter can return empty list even if sufficient peers available",
|
||||
},
|
||||
{
|
||||
name: "filter selects subset from sufficient peers",
|
||||
bestPeers: testPeerIds(10),
|
||||
filter: func(p []peer.ID) []peer.ID { return p[0:2] },
|
||||
expectedCount: 2,
|
||||
description: "filter can return subset of available peers",
|
||||
},
|
||||
{
|
||||
name: "filter selects single peer from many",
|
||||
bestPeers: testPeerIds(20),
|
||||
filter: func(p []peer.ID) []peer.ID { return p[0:1] },
|
||||
expectedCount: 1,
|
||||
description: "filter can select single peer from many available",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
mockStatus := &MockStatus{
|
||||
bestFinalizedEpoch: 10,
|
||||
bestPeers: tc.bestPeers,
|
||||
}
|
||||
mockCheckpointer := &MockFinalizedCheckpointer{
|
||||
checkpoint: &forkchoicetypes.Checkpoint{Epoch: 10},
|
||||
}
|
||||
assigner := NewAssigner(mockStatus, mockCheckpointer)
|
||||
|
||||
result, err := assigner.Assign(tc.filter)
|
||||
|
||||
require.NoError(t, err, fmt.Sprintf("%s: unexpected error: %v", tc.description, err))
|
||||
require.Equal(t, tc.expectedCount, len(result),
|
||||
fmt.Sprintf("%s: expected %d peers, got %d", tc.description, tc.expectedCount, len(result)))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -704,76 +704,54 @@ func (p *Status) deprecatedPrune() {
|
||||
p.tallyIPTracker()
|
||||
}
|
||||
|
||||
// BestFinalized returns the highest finalized epoch equal to or higher than `ourFinalizedEpoch`
|
||||
// that is agreed upon by the majority of peers, and the peers agreeing on this finalized epoch.
|
||||
// This method may not return the absolute highest finalized epoch, but the finalized epoch in which
|
||||
// most peers can serve blocks (plurality voting). Ideally, all peers would be reporting the same
|
||||
// finalized epoch but some may be behind due to their own latency, or because of their finalized
|
||||
// epoch at the time we queried them.
|
||||
func (p *Status) BestFinalized(maxPeers int, ourFinalizedEpoch primitives.Epoch) (primitives.Epoch, []peer.ID) {
|
||||
// Retrieve all connected peers.
|
||||
// BestFinalized groups all peers by their last known finalized epoch
|
||||
// and selects the epoch of the largest group as best.
|
||||
// Any peer with a finalized epoch < ourFinalized is excluded from consideration.
|
||||
// In the event of a tie in largest group size, the higher epoch is the tie breaker.
|
||||
// The selected epoch is returned, along with a list of peers with a finalized epoch >= the selected epoch.
|
||||
func (p *Status) BestFinalized(ourFinalized primitives.Epoch) (primitives.Epoch, []peer.ID) {
|
||||
connected := p.Connected()
|
||||
pids := make([]peer.ID, 0, len(connected))
|
||||
views := make(map[peer.ID]*pb.StatusV2, len(connected))
|
||||
|
||||
// key: finalized epoch, value: number of peers that support this finalized epoch.
|
||||
finalizedEpochVotes := make(map[primitives.Epoch]uint64)
|
||||
|
||||
// key: peer ID, value: finalized epoch of the peer.
|
||||
pidEpoch := make(map[peer.ID]primitives.Epoch, len(connected))
|
||||
|
||||
// key: peer ID, value: head slot of the peer.
|
||||
pidHead := make(map[peer.ID]primitives.Slot, len(connected))
|
||||
|
||||
potentialPIDs := make([]peer.ID, 0, len(connected))
|
||||
votes := make(map[primitives.Epoch]uint64)
|
||||
winner := primitives.Epoch(0)
|
||||
for _, pid := range connected {
|
||||
peerChainState, err := p.ChainState(pid)
|
||||
|
||||
// Skip if the peer's finalized epoch is not defined, or if the peer's finalized epoch is
|
||||
// lower than ours.
|
||||
if err != nil || peerChainState == nil || peerChainState.FinalizedEpoch < ourFinalizedEpoch {
|
||||
view, err := p.ChainState(pid)
|
||||
if err != nil || view == nil || view.FinalizedEpoch < ourFinalized {
|
||||
continue
|
||||
}
|
||||
pids = append(pids, pid)
|
||||
views[pid] = view
|
||||
|
||||
finalizedEpochVotes[peerChainState.FinalizedEpoch]++
|
||||
|
||||
pidEpoch[pid] = peerChainState.FinalizedEpoch
|
||||
pidHead[pid] = peerChainState.HeadSlot
|
||||
|
||||
potentialPIDs = append(potentialPIDs, pid)
|
||||
}
|
||||
|
||||
// Select the target epoch, which is the epoch most peers agree upon.
|
||||
// If there is a tie, select the highest epoch.
|
||||
targetEpoch, mostVotes := primitives.Epoch(0), uint64(0)
|
||||
for epoch, count := range finalizedEpochVotes {
|
||||
if count > mostVotes || (count == mostVotes && epoch > targetEpoch) {
|
||||
mostVotes = count
|
||||
targetEpoch = epoch
|
||||
votes[view.FinalizedEpoch]++
|
||||
if winner == 0 {
|
||||
winner = view.FinalizedEpoch
|
||||
continue
|
||||
}
|
||||
e, v := view.FinalizedEpoch, votes[view.FinalizedEpoch]
|
||||
if v > votes[winner] || v == votes[winner] && e > winner {
|
||||
winner = e
|
||||
}
|
||||
}
|
||||
|
||||
// Sort PIDs by finalized (epoch, head), in decreasing order.
|
||||
sort.Slice(potentialPIDs, func(i, j int) bool {
|
||||
if pidEpoch[potentialPIDs[i]] == pidEpoch[potentialPIDs[j]] {
|
||||
return pidHead[potentialPIDs[i]] > pidHead[potentialPIDs[j]]
|
||||
// Descending sort by (finalized, head).
|
||||
sort.Slice(pids, func(i, j int) bool {
|
||||
iv, jv := views[pids[i]], views[pids[j]]
|
||||
if iv.FinalizedEpoch == jv.FinalizedEpoch {
|
||||
return iv.HeadSlot > jv.HeadSlot
|
||||
}
|
||||
|
||||
return pidEpoch[potentialPIDs[i]] > pidEpoch[potentialPIDs[j]]
|
||||
return iv.FinalizedEpoch > jv.FinalizedEpoch
|
||||
})
|
||||
|
||||
// Trim potential peers to those on or after target epoch.
|
||||
for i, pid := range potentialPIDs {
|
||||
if pidEpoch[pid] < targetEpoch {
|
||||
potentialPIDs = potentialPIDs[:i]
|
||||
break
|
||||
}
|
||||
}
|
||||
// Find the first peer with finalized epoch < winner, trim and all following (lower) peers.
|
||||
trim := sort.Search(len(pids), func(i int) bool {
|
||||
return views[pids[i]].FinalizedEpoch < winner
|
||||
})
|
||||
pids = pids[:trim]
|
||||
|
||||
// Trim potential peers to at most maxPeers.
|
||||
if len(potentialPIDs) > maxPeers {
|
||||
potentialPIDs = potentialPIDs[:maxPeers]
|
||||
}
|
||||
|
||||
return targetEpoch, potentialPIDs
|
||||
return winner, pids
|
||||
}
|
||||
|
||||
// BestNonFinalized returns the highest known epoch, higher than ours,
|
||||
|
||||
@@ -654,9 +654,10 @@ func TestTrimmedOrderedPeers(t *testing.T) {
|
||||
FinalizedRoot: mockroot2[:],
|
||||
})
|
||||
|
||||
target, pids := p.BestFinalized(maxPeers, 0)
|
||||
target, pids := p.BestFinalized(0)
|
||||
assert.Equal(t, expectedTarget, target, "Incorrect target epoch retrieved")
|
||||
assert.Equal(t, maxPeers, len(pids), "Incorrect number of peers retrieved")
|
||||
// addPeer called 5 times above
|
||||
assert.Equal(t, 5, len(pids), "Incorrect number of peers retrieved")
|
||||
|
||||
// Expect the returned list to be ordered by finalized epoch and trimmed to max peers.
|
||||
assert.Equal(t, pid3, pids[0], "Incorrect first peer")
|
||||
@@ -1017,7 +1018,10 @@ func TestStatus_BestPeer(t *testing.T) {
|
||||
HeadSlot: peerConfig.headSlot,
|
||||
})
|
||||
}
|
||||
epoch, pids := p.BestFinalized(tt.limitPeers, tt.ourFinalizedEpoch)
|
||||
epoch, pids := p.BestFinalized(tt.ourFinalizedEpoch)
|
||||
if len(pids) > tt.limitPeers {
|
||||
pids = pids[:tt.limitPeers]
|
||||
}
|
||||
assert.Equal(t, tt.targetEpoch, epoch, "Unexpected epoch retrieved")
|
||||
assert.Equal(t, tt.targetEpochSupport, len(pids), "Unexpected number of peers supporting retrieved epoch")
|
||||
})
|
||||
@@ -1044,7 +1048,10 @@ func TestBestFinalized_returnsMaxValue(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
_, pids := p.BestFinalized(maxPeers, 0)
|
||||
_, pids := p.BestFinalized(0)
|
||||
if len(pids) > maxPeers {
|
||||
pids = pids[:maxPeers]
|
||||
}
|
||||
assert.Equal(t, maxPeers, len(pids), "Wrong number of peers returned")
|
||||
}
|
||||
|
||||
|
||||
@@ -77,12 +77,12 @@ func InitializeDataMaps() {
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().ElectraForkVersion): func() (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
return blocks.NewSignedBeaconBlock(
|
||||
ðpb.SignedBeaconBlockElectra{Block: ðpb.BeaconBlockElectra{Body: ðpb.BeaconBlockBodyElectra{ExecutionPayload: &enginev1.ExecutionPayloadDeneb{}}}},
|
||||
ðpb.SignedBeaconBlockElectra{Block: ðpb.BeaconBlockElectra{Body: ðpb.BeaconBlockBodyElectra{ExecutionPayload: &enginev1.ExecutionPayloadDeneb{}, ExecutionRequests: &enginev1.ExecutionRequests{}}}},
|
||||
)
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().FuluForkVersion): func() (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
return blocks.NewSignedBeaconBlock(
|
||||
ðpb.SignedBeaconBlockFulu{Block: ðpb.BeaconBlockElectra{Body: ðpb.BeaconBlockBodyElectra{ExecutionPayload: &enginev1.ExecutionPayloadDeneb{}}}},
|
||||
ðpb.SignedBeaconBlockFulu{Block: ðpb.BeaconBlockElectra{Body: ðpb.BeaconBlockBodyElectra{ExecutionPayload: &enginev1.ExecutionPayloadDeneb{}, ExecutionRequests: &enginev1.ExecutionRequests{}}}},
|
||||
)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -73,7 +73,7 @@ func (e *endpoint) handlerWithMiddleware() http.HandlerFunc {
|
||||
handler.ServeHTTP(rw, r)
|
||||
|
||||
if rw.statusCode >= 400 {
|
||||
httpErrorCount.WithLabelValues(r.URL.Path, http.StatusText(rw.statusCode), r.Method).Inc()
|
||||
httpErrorCount.WithLabelValues(e.name, http.StatusText(rw.statusCode), r.Method).Inc()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -993,10 +993,11 @@ func (s *Server) validateEquivocation(blk interfaces.ReadOnlyBeaconBlock) error
|
||||
}
|
||||
|
||||
func (s *Server) validateBlobs(blk interfaces.SignedBeaconBlock, blobs [][]byte, proofs [][]byte) error {
|
||||
const numberOfColumns = fieldparams.NumberOfColumns
|
||||
|
||||
if blk.Version() < version.Deneb {
|
||||
return nil
|
||||
}
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
commitments, err := blk.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get blob kzg commitments")
|
||||
|
||||
@@ -27,6 +27,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/testutil"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
mockSync "github.com/OffchainLabs/prysm/v7/beacon-chain/sync/initial-sync/testing"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
@@ -3756,6 +3757,7 @@ func Test_validateBlobs(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Fulu block with valid cell proofs", func(t *testing.T) {
|
||||
const numberOfColumns = fieldparams.NumberOfColumns
|
||||
blk := util.NewBeaconBlockFulu()
|
||||
blk.Block.Slot = fs
|
||||
|
||||
@@ -3783,14 +3785,13 @@ func Test_validateBlobs(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Generate cell proofs for the blobs (flattened format like execution client)
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
cellProofs := make([][]byte, uint64(blobCount)*numberOfColumns)
|
||||
for blobIdx := range blobCount {
|
||||
_, proofs, err := kzg.ComputeCellsAndKZGProofs(&kzgBlobs[blobIdx])
|
||||
require.NoError(t, err)
|
||||
|
||||
for colIdx := range numberOfColumns {
|
||||
cellProofIdx := uint64(blobIdx)*numberOfColumns + colIdx
|
||||
cellProofIdx := blobIdx*numberOfColumns + colIdx
|
||||
cellProofs[cellProofIdx] = proofs[colIdx][:]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -155,20 +155,19 @@ func TestGetSpec(t *testing.T) {
|
||||
config.MaxAttesterSlashingsElectra = 88
|
||||
config.MaxAttestationsElectra = 89
|
||||
config.MaxWithdrawalRequestsPerPayload = 90
|
||||
config.MaxCellsInExtendedMatrix = 91
|
||||
config.UnsetDepositRequestsStartIndex = 92
|
||||
config.MaxDepositRequestsPerPayload = 93
|
||||
config.MaxPendingDepositsPerEpoch = 94
|
||||
config.MaxBlobCommitmentsPerBlock = 95
|
||||
config.MaxBytesPerTransaction = 96
|
||||
config.MaxExtraDataBytes = 97
|
||||
config.BytesPerLogsBloom = 98
|
||||
config.MaxTransactionsPerPayload = 99
|
||||
config.FieldElementsPerBlob = 100
|
||||
config.KzgCommitmentInclusionProofDepth = 101
|
||||
config.BlobsidecarSubnetCount = 102
|
||||
config.BlobsidecarSubnetCountElectra = 103
|
||||
config.SyncMessageDueBPS = 104
|
||||
config.UnsetDepositRequestsStartIndex = 91
|
||||
config.MaxDepositRequestsPerPayload = 92
|
||||
config.MaxPendingDepositsPerEpoch = 93
|
||||
config.MaxBlobCommitmentsPerBlock = 94
|
||||
config.MaxBytesPerTransaction = 95
|
||||
config.MaxExtraDataBytes = 96
|
||||
config.BytesPerLogsBloom = 97
|
||||
config.MaxTransactionsPerPayload = 98
|
||||
config.FieldElementsPerBlob = 99
|
||||
config.KzgCommitmentInclusionProofDepth = 100
|
||||
config.BlobsidecarSubnetCount = 101
|
||||
config.BlobsidecarSubnetCountElectra = 102
|
||||
config.SyncMessageDueBPS = 103
|
||||
|
||||
var dbp [4]byte
|
||||
copy(dbp[:], []byte{'0', '0', '0', '1'})
|
||||
@@ -206,7 +205,7 @@ func TestGetSpec(t *testing.T) {
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp))
|
||||
data, ok := resp.Data.(map[string]any)
|
||||
require.Equal(t, true, ok)
|
||||
assert.Equal(t, 176, len(data))
|
||||
assert.Equal(t, 175, len(data))
|
||||
for k, v := range data {
|
||||
t.Run(k, func(t *testing.T) {
|
||||
switch k {
|
||||
@@ -500,8 +499,6 @@ func TestGetSpec(t *testing.T) {
|
||||
assert.Equal(t, "1024", v)
|
||||
case "MAX_REQUEST_BLOCKS_DENEB":
|
||||
assert.Equal(t, "128", v)
|
||||
case "NUMBER_OF_COLUMNS":
|
||||
assert.Equal(t, "128", v)
|
||||
case "MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA":
|
||||
assert.Equal(t, "128000000000", v)
|
||||
case "MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT":
|
||||
@@ -538,14 +535,12 @@ func TestGetSpec(t *testing.T) {
|
||||
assert.Equal(t, "89", v)
|
||||
case "MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD":
|
||||
assert.Equal(t, "90", v)
|
||||
case "MAX_CELLS_IN_EXTENDED_MATRIX":
|
||||
assert.Equal(t, "91", v)
|
||||
case "UNSET_DEPOSIT_REQUESTS_START_INDEX":
|
||||
assert.Equal(t, "92", v)
|
||||
assert.Equal(t, "91", v)
|
||||
case "MAX_DEPOSIT_REQUESTS_PER_PAYLOAD":
|
||||
assert.Equal(t, "93", v)
|
||||
assert.Equal(t, "92", v)
|
||||
case "MAX_PENDING_DEPOSITS_PER_EPOCH":
|
||||
assert.Equal(t, "94", v)
|
||||
assert.Equal(t, "93", v)
|
||||
case "MAX_BLOBS_PER_BLOCK_ELECTRA":
|
||||
assert.Equal(t, "9", v)
|
||||
case "MAX_REQUEST_BLOB_SIDECARS_ELECTRA":
|
||||
@@ -563,25 +558,25 @@ func TestGetSpec(t *testing.T) {
|
||||
case "MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS":
|
||||
assert.Equal(t, "4096", v)
|
||||
case "MAX_BLOB_COMMITMENTS_PER_BLOCK":
|
||||
assert.Equal(t, "95", v)
|
||||
assert.Equal(t, "94", v)
|
||||
case "MAX_BYTES_PER_TRANSACTION":
|
||||
assert.Equal(t, "96", v)
|
||||
assert.Equal(t, "95", v)
|
||||
case "MAX_EXTRA_DATA_BYTES":
|
||||
assert.Equal(t, "97", v)
|
||||
assert.Equal(t, "96", v)
|
||||
case "BYTES_PER_LOGS_BLOOM":
|
||||
assert.Equal(t, "98", v)
|
||||
assert.Equal(t, "97", v)
|
||||
case "MAX_TRANSACTIONS_PER_PAYLOAD":
|
||||
assert.Equal(t, "99", v)
|
||||
assert.Equal(t, "98", v)
|
||||
case "FIELD_ELEMENTS_PER_BLOB":
|
||||
assert.Equal(t, "100", v)
|
||||
assert.Equal(t, "99", v)
|
||||
case "KZG_COMMITMENT_INCLUSION_PROOF_DEPTH":
|
||||
assert.Equal(t, "101", v)
|
||||
assert.Equal(t, "100", v)
|
||||
case "BLOB_SIDECAR_SUBNET_COUNT":
|
||||
assert.Equal(t, "102", v)
|
||||
assert.Equal(t, "101", v)
|
||||
case "BLOB_SIDECAR_SUBNET_COUNT_ELECTRA":
|
||||
assert.Equal(t, "103", v)
|
||||
assert.Equal(t, "102", v)
|
||||
case "SYNC_MESSAGE_DUE_BPS":
|
||||
assert.Equal(t, "104", v)
|
||||
assert.Equal(t, "103", v)
|
||||
case "BLOB_SCHEDULE":
|
||||
blobSchedule, ok := v.([]any)
|
||||
assert.Equal(t, true, ok)
|
||||
|
||||
@@ -17,6 +17,7 @@ go_library(
|
||||
"//beacon-chain/rpc/eth/helpers:go_default_library",
|
||||
"//beacon-chain/rpc/eth/shared:go_default_library",
|
||||
"//beacon-chain/rpc/lookup:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/core"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/eth/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/eth/shared"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
@@ -308,7 +309,7 @@ func (s *Server) DataColumnSidecars(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
// parseDataColumnIndices filters out invalid and duplicate data column indices
|
||||
func parseDataColumnIndices(url *url.URL) ([]int, error) {
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
const numberOfColumns = fieldparams.NumberOfColumns
|
||||
rawIndices := url.Query()["indices"]
|
||||
indices := make([]int, 0, numberOfColumns)
|
||||
invalidIndices := make([]string, 0)
|
||||
|
||||
@@ -709,15 +709,6 @@ func TestDataColumnSidecars(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestParseDataColumnIndices(t *testing.T) {
|
||||
// Save the original config
|
||||
originalConfig := params.BeaconConfig()
|
||||
defer func() { params.OverrideBeaconConfig(originalConfig) }()
|
||||
|
||||
// Set NumberOfColumns to 128 for testing
|
||||
config := params.BeaconConfig().Copy()
|
||||
config.NumberOfColumns = 128
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
queryParams map[string][]string
|
||||
|
||||
@@ -47,6 +47,10 @@ func TestLightClientHandler_GetLightClientBootstrap(t *testing.T) {
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
for _, testVersion := range version.All()[1:] {
|
||||
if testVersion == version.Gloas {
|
||||
// TODO(16027): Unskip light client tests for Gloas
|
||||
continue
|
||||
}
|
||||
t.Run(version.String(testVersion), func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t, testVersion)
|
||||
|
||||
@@ -178,6 +182,10 @@ func TestLightClientHandler_GetLightClientByRange(t *testing.T) {
|
||||
|
||||
t.Run("can save retrieve", func(t *testing.T) {
|
||||
for _, testVersion := range version.All()[1:] {
|
||||
if testVersion == version.Gloas {
|
||||
// TODO(16027): Unskip light client tests for Gloas
|
||||
continue
|
||||
}
|
||||
t.Run(version.String(testVersion), func(t *testing.T) {
|
||||
|
||||
slot := primitives.Slot(params.BeaconConfig().VersionToForkEpochMap()[testVersion] * primitives.Epoch(config.SlotsPerEpoch)).Add(1)
|
||||
@@ -732,6 +740,10 @@ func TestLightClientHandler_GetLightClientFinalityUpdate(t *testing.T) {
|
||||
})
|
||||
|
||||
for _, testVersion := range version.All()[1:] {
|
||||
if testVersion == version.Gloas {
|
||||
// TODO(16027): Unskip light client tests for Gloas
|
||||
continue
|
||||
}
|
||||
t.Run(version.String(testVersion), func(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
@@ -827,6 +839,10 @@ func TestLightClientHandler_GetLightClientOptimisticUpdate(t *testing.T) {
|
||||
})
|
||||
|
||||
for _, testVersion := range version.All()[1:] {
|
||||
if testVersion == version.Gloas {
|
||||
// TODO(16027): Unskip light client tests for Gloas
|
||||
continue
|
||||
}
|
||||
t.Run(version.String(testVersion), func(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
l := util.NewTestLightClient(t, testVersion)
|
||||
|
||||
@@ -99,6 +99,7 @@ go_test(
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@org_uber_go_mock//gomock:go_default_library",
|
||||
],
|
||||
|
||||
@@ -835,12 +835,17 @@ func (s *Server) PrepareBeaconProposer(w http.ResponseWriter, r *http.Request) {
|
||||
s.TrackedValidatorsCache.Set(val)
|
||||
validatorIndices = append(validatorIndices, primitives.ValidatorIndex(validatorIndex))
|
||||
}
|
||||
|
||||
if len(validatorIndices) == 0 {
|
||||
return
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"validatorIndices": validatorIndices,
|
||||
}).Info("Updated fee recipient addresses")
|
||||
|
||||
log := log.WithField("validatorCount", len(validatorIndices))
|
||||
if logrus.GetLevel() >= logrus.TraceLevel {
|
||||
log = log.WithField("validatorIndices", validatorIndices)
|
||||
}
|
||||
|
||||
log.Debug("Updated fee recipient addresses")
|
||||
}
|
||||
|
||||
// GetAttesterDuties requests the beacon node to provide a set of attestation duties,
|
||||
|
||||
@@ -44,6 +44,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
@@ -2854,6 +2855,8 @@ func TestPrepareBeaconProposer(t *testing.T) {
|
||||
|
||||
func TestProposer_PrepareBeaconProposerOverlapping(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
|
||||
db := dbutil.SetupDB(t)
|
||||
|
||||
// New validator
|
||||
|
||||
@@ -450,7 +450,7 @@ func (p *BeaconDbBlocker) blobsDataFromStoredDataColumns(root [fieldparams.RootL
|
||||
if count < peerdas.MinimumColumnCountToReconstruct() {
|
||||
// There is no way to reconstruct the data columns.
|
||||
return nil, &core.RpcError{
|
||||
Err: errors.Errorf("the node does not custody enough data columns to reconstruct blobs - please start the beacon node with the `--%s` flag to ensure this call to succeed, or retry later if it is already the case", flags.SubscribeAllDataSubnets.Name),
|
||||
Err: errors.Errorf("the node does not custody enough data columns to reconstruct blobs - please start the beacon node with the `--%s` flag to ensure this call to succeed, or retry later if it is already the case", flags.Supernode.Name),
|
||||
Reason: core.NotFound,
|
||||
}
|
||||
}
|
||||
@@ -555,7 +555,7 @@ func (p *BeaconDbBlocker) blobSidecarsFromStoredDataColumns(block blocks.ROBlock
|
||||
if count < peerdas.MinimumColumnCountToReconstruct() {
|
||||
// There is no way to reconstruct the data columns.
|
||||
return nil, &core.RpcError{
|
||||
Err: errors.Errorf("the node does not custody enough data columns to reconstruct blobs - please start the beacon node with the `--%s` flag to ensure this call to succeed, or retry later if it is already the case", flags.SubscribeAllDataSubnets.Name),
|
||||
Err: errors.Errorf("the node does not custody enough data columns to reconstruct blobs - please start the beacon node with the `--%s` flag to ensure this call to succeed, or retry later if it is already the case", flags.Supernode.Name),
|
||||
Reason: core.NotFound,
|
||||
}
|
||||
}
|
||||
@@ -628,6 +628,8 @@ func (p *BeaconDbBlocker) neededDataColumnSidecars(root [fieldparams.RootLength]
|
||||
// - no block, 404
|
||||
// - block exists, before Fulu fork, 400 (data columns are not supported before Fulu fork)
|
||||
func (p *BeaconDbBlocker) DataColumns(ctx context.Context, id string, indices []int) ([]blocks.VerifiedRODataColumn, *core.RpcError) {
|
||||
const numberOfColumns = fieldparams.NumberOfColumns
|
||||
|
||||
// Check for genesis block first (not supported for data columns)
|
||||
if id == "genesis" {
|
||||
return nil, &core.RpcError{Err: errors.New("data columns are not supported for Phase 0 fork"), Reason: core.BadRequest}
|
||||
@@ -681,7 +683,6 @@ func (p *BeaconDbBlocker) DataColumns(ctx context.Context, id string, indices []
|
||||
}
|
||||
} else {
|
||||
// Validate and convert indices
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
for _, index := range indices {
|
||||
if index < 0 || uint64(index) >= numberOfColumns {
|
||||
return nil, &core.RpcError{
|
||||
|
||||
@@ -547,11 +547,19 @@ func (vs *Server) PrepareBeaconProposer(
|
||||
vs.TrackedValidatorsCache.Set(val)
|
||||
validatorIndices = append(validatorIndices, r.ValidatorIndex)
|
||||
}
|
||||
if len(validatorIndices) != 0 {
|
||||
log.WithFields(logrus.Fields{
|
||||
"validatorCount": len(validatorIndices),
|
||||
}).Debug("Updated fee recipient addresses for validator indices")
|
||||
|
||||
if len(validatorIndices) == 0 {
|
||||
return &emptypb.Empty{}, nil
|
||||
|
||||
}
|
||||
|
||||
log := log.WithField("validatorCount", len(validatorIndices))
|
||||
if logrus.GetLevel() >= logrus.TraceLevel {
|
||||
log = log.WithField("validatorIndices", validatorIndices)
|
||||
}
|
||||
|
||||
log.Debug("Updated fee recipient addresses")
|
||||
|
||||
return &emptypb.Empty{}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -53,6 +53,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
@@ -3162,6 +3163,8 @@ func TestProposer_PrepareBeaconProposer(t *testing.T) {
|
||||
|
||||
func TestProposer_PrepareBeaconProposerOverlapping(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
|
||||
db := dbutil.SetupDB(t)
|
||||
ctx := t.Context()
|
||||
proposerServer := &Server{
|
||||
@@ -3178,13 +3181,13 @@ func TestProposer_PrepareBeaconProposerOverlapping(t *testing.T) {
|
||||
}
|
||||
_, err := proposerServer.PrepareBeaconProposer(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.LogsContain(t, hook, "Updated fee recipient addresses for validator indices")
|
||||
require.LogsContain(t, hook, "Updated fee recipient addresses")
|
||||
|
||||
// Same validator
|
||||
hook.Reset()
|
||||
_, err = proposerServer.PrepareBeaconProposer(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.LogsContain(t, hook, "Updated fee recipient addresses for validator indices")
|
||||
require.LogsContain(t, hook, "Updated fee recipient addresses")
|
||||
|
||||
// Same validator with different fee recipient
|
||||
hook.Reset()
|
||||
@@ -3196,7 +3199,7 @@ func TestProposer_PrepareBeaconProposerOverlapping(t *testing.T) {
|
||||
}
|
||||
_, err = proposerServer.PrepareBeaconProposer(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.LogsContain(t, hook, "Updated fee recipient addresses for validator indices")
|
||||
require.LogsContain(t, hook, "Updated fee recipient addresses")
|
||||
|
||||
// More than one validator
|
||||
hook.Reset()
|
||||
@@ -3209,13 +3212,13 @@ func TestProposer_PrepareBeaconProposerOverlapping(t *testing.T) {
|
||||
}
|
||||
_, err = proposerServer.PrepareBeaconProposer(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.LogsContain(t, hook, "Updated fee recipient addresses for validator indices")
|
||||
require.LogsContain(t, hook, "Updated fee recipient addresses")
|
||||
|
||||
// Same validators
|
||||
hook.Reset()
|
||||
_, err = proposerServer.PrepareBeaconProposer(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.LogsContain(t, hook, "Updated fee recipient addresses for validator indices")
|
||||
require.LogsContain(t, hook, "Updated fee recipient addresses")
|
||||
}
|
||||
|
||||
func BenchmarkServer_PrepareBeaconProposer(b *testing.B) {
|
||||
|
||||
@@ -23,6 +23,7 @@ go_library(
|
||||
"getters_sync_committee.go",
|
||||
"getters_validator.go",
|
||||
"getters_withdrawal.go",
|
||||
"gloas.go",
|
||||
"hasher.go",
|
||||
"multi_value_slices.go",
|
||||
"proofs.go",
|
||||
|
||||
@@ -70,6 +70,14 @@ type BeaconState struct {
|
||||
pendingConsolidations []*ethpb.PendingConsolidation // pending_consolidations: List[PendingConsolidation, PENDING_CONSOLIDATIONS_LIMIT]
|
||||
proposerLookahead []primitives.ValidatorIndex // proposer_look_ahead: List[uint64, (MIN_LOOKAHEAD + 1)*SLOTS_PER_EPOCH]
|
||||
|
||||
// Gloas fields
|
||||
latestExecutionPayloadBid *ethpb.ExecutionPayloadBid
|
||||
executionPayloadAvailability []byte
|
||||
builderPendingPayments []*ethpb.BuilderPendingPayment
|
||||
builderPendingWithdrawals []*ethpb.BuilderPendingWithdrawal
|
||||
latestBlockHash []byte
|
||||
latestWithdrawalsRoot []byte
|
||||
|
||||
id uint64
|
||||
lock sync.RWMutex
|
||||
dirtyFields map[types.FieldIndex]bool
|
||||
@@ -125,6 +133,12 @@ type beaconStateMarshalable struct {
|
||||
PendingPartialWithdrawals []*ethpb.PendingPartialWithdrawal `json:"pending_partial_withdrawals" yaml:"pending_partial_withdrawals"`
|
||||
PendingConsolidations []*ethpb.PendingConsolidation `json:"pending_consolidations" yaml:"pending_consolidations"`
|
||||
ProposerLookahead []primitives.ValidatorIndex `json:"proposer_look_ahead" yaml:"proposer_look_ahead"`
|
||||
LatestExecutionPayloadBid *ethpb.ExecutionPayloadBid `json:"latest_execution_payload_bid" yaml:"latest_execution_payload_bid"`
|
||||
ExecutionPayloadAvailability []byte `json:"execution_payload_availability" yaml:"execution_payload_availability"`
|
||||
BuilderPendingPayments []*ethpb.BuilderPendingPayment `json:"builder_pending_payments" yaml:"builder_pending_payments"`
|
||||
BuilderPendingWithdrawals []*ethpb.BuilderPendingWithdrawal `json:"builder_pending_withdrawals" yaml:"builder_pending_withdrawals"`
|
||||
LatestBlockHash []byte `json:"latest_block_hash" yaml:"latest_block_hash"`
|
||||
LatestWithdrawalsRoot []byte `json:"latest_withdrawals_root" yaml:"latest_withdrawals_root"`
|
||||
}
|
||||
|
||||
func (b *BeaconState) MarshalJSON() ([]byte, error) {
|
||||
@@ -179,6 +193,12 @@ func (b *BeaconState) MarshalJSON() ([]byte, error) {
|
||||
PendingPartialWithdrawals: b.pendingPartialWithdrawals,
|
||||
PendingConsolidations: b.pendingConsolidations,
|
||||
ProposerLookahead: b.proposerLookahead,
|
||||
LatestExecutionPayloadBid: b.latestExecutionPayloadBid,
|
||||
ExecutionPayloadAvailability: b.executionPayloadAvailability,
|
||||
BuilderPendingPayments: b.builderPendingPayments,
|
||||
BuilderPendingWithdrawals: b.builderPendingWithdrawals,
|
||||
LatestBlockHash: b.latestBlockHash,
|
||||
LatestWithdrawalsRoot: b.latestWithdrawalsRoot,
|
||||
}
|
||||
return json.Marshal(marshalable)
|
||||
}
|
||||
|
||||
@@ -259,6 +259,57 @@ func (b *BeaconState) ToProtoUnsafe() any {
|
||||
PendingConsolidations: b.pendingConsolidations,
|
||||
ProposerLookahead: lookahead,
|
||||
}
|
||||
case version.Gloas:
|
||||
lookahead := make([]uint64, len(b.proposerLookahead))
|
||||
for i, v := range b.proposerLookahead {
|
||||
lookahead[i] = uint64(v)
|
||||
}
|
||||
|
||||
return ðpb.BeaconStateGloas{
|
||||
GenesisTime: b.genesisTime,
|
||||
GenesisValidatorsRoot: gvrCopy[:],
|
||||
Slot: b.slot,
|
||||
Fork: b.fork,
|
||||
LatestBlockHeader: b.latestBlockHeader,
|
||||
BlockRoots: br,
|
||||
StateRoots: sr,
|
||||
HistoricalRoots: b.historicalRoots.Slice(),
|
||||
Eth1Data: b.eth1Data,
|
||||
Eth1DataVotes: b.eth1DataVotes,
|
||||
Eth1DepositIndex: b.eth1DepositIndex,
|
||||
Validators: vals,
|
||||
Balances: bals,
|
||||
RandaoMixes: rm,
|
||||
Slashings: b.slashings,
|
||||
PreviousEpochParticipation: b.previousEpochParticipation,
|
||||
CurrentEpochParticipation: b.currentEpochParticipation,
|
||||
JustificationBits: b.justificationBits,
|
||||
PreviousJustifiedCheckpoint: b.previousJustifiedCheckpoint,
|
||||
CurrentJustifiedCheckpoint: b.currentJustifiedCheckpoint,
|
||||
FinalizedCheckpoint: b.finalizedCheckpoint,
|
||||
InactivityScores: inactivityScores,
|
||||
CurrentSyncCommittee: b.currentSyncCommittee,
|
||||
NextSyncCommittee: b.nextSyncCommittee,
|
||||
LatestExecutionPayloadBid: b.latestExecutionPayloadBid,
|
||||
NextWithdrawalIndex: b.nextWithdrawalIndex,
|
||||
NextWithdrawalValidatorIndex: b.nextWithdrawalValidatorIndex,
|
||||
HistoricalSummaries: b.historicalSummaries,
|
||||
DepositRequestsStartIndex: b.depositRequestsStartIndex,
|
||||
DepositBalanceToConsume: b.depositBalanceToConsume,
|
||||
ExitBalanceToConsume: b.exitBalanceToConsume,
|
||||
EarliestExitEpoch: b.earliestExitEpoch,
|
||||
ConsolidationBalanceToConsume: b.consolidationBalanceToConsume,
|
||||
EarliestConsolidationEpoch: b.earliestConsolidationEpoch,
|
||||
PendingDeposits: b.pendingDeposits,
|
||||
PendingPartialWithdrawals: b.pendingPartialWithdrawals,
|
||||
PendingConsolidations: b.pendingConsolidations,
|
||||
ProposerLookahead: lookahead,
|
||||
ExecutionPayloadAvailability: b.executionPayloadAvailability,
|
||||
BuilderPendingPayments: b.builderPendingPayments,
|
||||
BuilderPendingWithdrawals: b.builderPendingWithdrawals,
|
||||
LatestBlockHash: b.latestBlockHash,
|
||||
LatestWithdrawalsRoot: b.latestWithdrawalsRoot,
|
||||
}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
@@ -510,6 +561,57 @@ func (b *BeaconState) ToProto() any {
|
||||
PendingConsolidations: b.pendingConsolidationsVal(),
|
||||
ProposerLookahead: lookahead,
|
||||
}
|
||||
case version.Gloas:
|
||||
lookahead := make([]uint64, len(b.proposerLookahead))
|
||||
for i, v := range b.proposerLookahead {
|
||||
lookahead[i] = uint64(v)
|
||||
}
|
||||
|
||||
return ðpb.BeaconStateGloas{
|
||||
GenesisTime: b.genesisTime,
|
||||
GenesisValidatorsRoot: gvrCopy[:],
|
||||
Slot: b.slot,
|
||||
Fork: b.forkVal(),
|
||||
LatestBlockHeader: b.latestBlockHeaderVal(),
|
||||
BlockRoots: br,
|
||||
StateRoots: sr,
|
||||
HistoricalRoots: b.historicalRoots.Slice(),
|
||||
Eth1Data: b.eth1DataVal(),
|
||||
Eth1DataVotes: b.eth1DataVotesVal(),
|
||||
Eth1DepositIndex: b.eth1DepositIndex,
|
||||
Validators: b.validatorsVal(),
|
||||
Balances: b.balancesVal(),
|
||||
RandaoMixes: rm,
|
||||
Slashings: b.slashingsVal(),
|
||||
PreviousEpochParticipation: b.previousEpochParticipationVal(),
|
||||
CurrentEpochParticipation: b.currentEpochParticipationVal(),
|
||||
JustificationBits: b.justificationBitsVal(),
|
||||
PreviousJustifiedCheckpoint: b.previousJustifiedCheckpointVal(),
|
||||
CurrentJustifiedCheckpoint: b.currentJustifiedCheckpointVal(),
|
||||
FinalizedCheckpoint: b.finalizedCheckpointVal(),
|
||||
InactivityScores: b.inactivityScoresVal(),
|
||||
CurrentSyncCommittee: b.currentSyncCommitteeVal(),
|
||||
NextSyncCommittee: b.nextSyncCommitteeVal(),
|
||||
LatestExecutionPayloadBid: b.latestExecutionPayloadBid.Copy(),
|
||||
NextWithdrawalIndex: b.nextWithdrawalIndex,
|
||||
NextWithdrawalValidatorIndex: b.nextWithdrawalValidatorIndex,
|
||||
HistoricalSummaries: b.historicalSummariesVal(),
|
||||
DepositRequestsStartIndex: b.depositRequestsStartIndex,
|
||||
DepositBalanceToConsume: b.depositBalanceToConsume,
|
||||
ExitBalanceToConsume: b.exitBalanceToConsume,
|
||||
EarliestExitEpoch: b.earliestExitEpoch,
|
||||
ConsolidationBalanceToConsume: b.consolidationBalanceToConsume,
|
||||
EarliestConsolidationEpoch: b.earliestConsolidationEpoch,
|
||||
PendingDeposits: b.pendingDepositsVal(),
|
||||
PendingPartialWithdrawals: b.pendingPartialWithdrawalsVal(),
|
||||
PendingConsolidations: b.pendingConsolidationsVal(),
|
||||
ProposerLookahead: lookahead,
|
||||
ExecutionPayloadAvailability: b.executionPayloadAvailabilityVal(),
|
||||
BuilderPendingPayments: b.builderPendingPaymentsVal(),
|
||||
BuilderPendingWithdrawals: b.builderPendingWithdrawalsVal(),
|
||||
LatestBlockHash: b.latestBlockHashVal(),
|
||||
LatestWithdrawalsRoot: b.latestWithdrawalsRootVal(),
|
||||
}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
|
||||
74
beacon-chain/state/state-native/gloas.go
Normal file
74
beacon-chain/state/state-native/gloas.go
Normal file
@@ -0,0 +1,74 @@
|
||||
package state_native
|
||||
|
||||
import (
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// executionPayloadAvailabilityVal returns a copy of the execution payload availability.
|
||||
// This assumes that a lock is already held on BeaconState.
|
||||
func (b *BeaconState) executionPayloadAvailabilityVal() []byte {
|
||||
if b.executionPayloadAvailability == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
availability := make([]byte, len(b.executionPayloadAvailability))
|
||||
copy(availability, b.executionPayloadAvailability)
|
||||
|
||||
return availability
|
||||
}
|
||||
|
||||
// builderPendingPaymentsVal returns a copy of the builder pending payments.
|
||||
// This assumes that a lock is already held on BeaconState.
|
||||
func (b *BeaconState) builderPendingPaymentsVal() []*ethpb.BuilderPendingPayment {
|
||||
if b.builderPendingPayments == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
payments := make([]*ethpb.BuilderPendingPayment, len(b.builderPendingPayments))
|
||||
for i, payment := range b.builderPendingPayments {
|
||||
payments[i] = payment.Copy()
|
||||
}
|
||||
|
||||
return payments
|
||||
}
|
||||
|
||||
// builderPendingWithdrawalsVal returns a copy of the builder pending withdrawals.
|
||||
// This assumes that a lock is already held on BeaconState.
|
||||
func (b *BeaconState) builderPendingWithdrawalsVal() []*ethpb.BuilderPendingWithdrawal {
|
||||
if b.builderPendingWithdrawals == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
withdrawals := make([]*ethpb.BuilderPendingWithdrawal, len(b.builderPendingWithdrawals))
|
||||
for i, withdrawal := range b.builderPendingWithdrawals {
|
||||
withdrawals[i] = withdrawal.Copy()
|
||||
}
|
||||
|
||||
return withdrawals
|
||||
}
|
||||
|
||||
// latestBlockHashVal returns a copy of the latest block hash.
|
||||
// This assumes that a lock is already held on BeaconState.
|
||||
func (b *BeaconState) latestBlockHashVal() []byte {
|
||||
if b.latestBlockHash == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
hash := make([]byte, len(b.latestBlockHash))
|
||||
copy(hash, b.latestBlockHash)
|
||||
|
||||
return hash
|
||||
}
|
||||
|
||||
// latestWithdrawalsRootVal returns a copy of the latest withdrawals root.
|
||||
// This assumes that a lock is already held on BeaconState.
|
||||
func (b *BeaconState) latestWithdrawalsRootVal() []byte {
|
||||
if b.latestWithdrawalsRoot == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
root := make([]byte, len(b.latestWithdrawalsRoot))
|
||||
copy(root, b.latestWithdrawalsRoot)
|
||||
|
||||
return root
|
||||
}
|
||||
@@ -43,6 +43,8 @@ func ComputeFieldRootsWithHasher(ctx context.Context, state *BeaconState) ([][]b
|
||||
fieldRoots = make([][]byte, params.BeaconConfig().BeaconStateElectraFieldCount)
|
||||
case version.Fulu:
|
||||
fieldRoots = make([][]byte, params.BeaconConfig().BeaconStateFuluFieldCount)
|
||||
case version.Gloas:
|
||||
fieldRoots = make([][]byte, params.BeaconConfig().BeaconStateGloasFieldCount)
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown state version %s", version.String(state.version))
|
||||
}
|
||||
@@ -245,7 +247,7 @@ func ComputeFieldRootsWithHasher(ctx context.Context, state *BeaconState) ([][]b
|
||||
fieldRoots[types.LatestExecutionPayloadHeaderCapella.RealPosition()] = executionPayloadRoot[:]
|
||||
}
|
||||
|
||||
if state.version >= version.Deneb {
|
||||
if state.version >= version.Deneb && state.version < version.Gloas {
|
||||
// Execution payload root.
|
||||
executionPayloadRoot, err := state.latestExecutionPayloadHeaderDeneb.HashTreeRoot()
|
||||
if err != nil {
|
||||
@@ -254,6 +256,16 @@ func ComputeFieldRootsWithHasher(ctx context.Context, state *BeaconState) ([][]b
|
||||
fieldRoots[types.LatestExecutionPayloadHeaderDeneb.RealPosition()] = executionPayloadRoot[:]
|
||||
}
|
||||
|
||||
if state.version >= version.Gloas {
|
||||
// Execution payload bid root for Gloas.
|
||||
bidRoot, err := state.latestExecutionPayloadBid.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fieldRoots[types.LatestExecutionPayloadBid.RealPosition()] = bidRoot[:]
|
||||
}
|
||||
|
||||
if state.version >= version.Capella {
|
||||
// Next withdrawal index root.
|
||||
nextWithdrawalIndexRoot := make([]byte, 32)
|
||||
@@ -328,5 +340,34 @@ func ComputeFieldRootsWithHasher(ctx context.Context, state *BeaconState) ([][]b
|
||||
}
|
||||
fieldRoots[types.ProposerLookahead.RealPosition()] = proposerLookaheadRoot[:]
|
||||
}
|
||||
|
||||
if state.version >= version.Gloas {
|
||||
epaRoot, err := stateutil.ExecutionPayloadAvailabilityRoot(state.executionPayloadAvailability)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute execution payload availability merkleization")
|
||||
}
|
||||
|
||||
fieldRoots[types.ExecutionPayloadAvailability.RealPosition()] = epaRoot[:]
|
||||
|
||||
bppRoot, err := stateutil.BuilderPendingPaymentsRoot(state.builderPendingPayments)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute builder pending payments merkleization")
|
||||
}
|
||||
|
||||
fieldRoots[types.BuilderPendingPayments.RealPosition()] = bppRoot[:]
|
||||
|
||||
bpwRoot, err := stateutil.BuilderPendingWithdrawalsRoot(state.builderPendingWithdrawals)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute builder pending withdrawals merkleization")
|
||||
}
|
||||
|
||||
fieldRoots[types.BuilderPendingWithdrawals.RealPosition()] = bpwRoot[:]
|
||||
|
||||
lbhRoot := bytesutil.ToBytes32(state.latestBlockHash)
|
||||
fieldRoots[types.LatestBlockHash.RealPosition()] = lbhRoot[:]
|
||||
|
||||
lwrRoot := bytesutil.ToBytes32(state.latestWithdrawalsRoot)
|
||||
fieldRoots[types.LatestWithdrawalsRoot.RealPosition()] = lwrRoot[:]
|
||||
}
|
||||
return fieldRoots, nil
|
||||
}
|
||||
|
||||
@@ -79,24 +79,25 @@ var (
|
||||
|
||||
bellatrixFields = append(altairFields, types.LatestExecutionPayloadHeader)
|
||||
|
||||
capellaFields = append(
|
||||
altairFields,
|
||||
types.LatestExecutionPayloadHeaderCapella,
|
||||
withdrawalAndHistoricalSummaryFields = []types.FieldIndex{
|
||||
types.NextWithdrawalIndex,
|
||||
types.NextWithdrawalValidatorIndex,
|
||||
types.HistoricalSummaries,
|
||||
)
|
||||
}
|
||||
|
||||
denebFields = append(
|
||||
capellaFields = slices.Concat(
|
||||
altairFields,
|
||||
types.LatestExecutionPayloadHeaderDeneb,
|
||||
types.NextWithdrawalIndex,
|
||||
types.NextWithdrawalValidatorIndex,
|
||||
types.HistoricalSummaries,
|
||||
[]types.FieldIndex{types.LatestExecutionPayloadHeaderCapella},
|
||||
withdrawalAndHistoricalSummaryFields,
|
||||
)
|
||||
|
||||
electraFields = append(
|
||||
denebFields,
|
||||
denebFields = slices.Concat(
|
||||
altairFields,
|
||||
[]types.FieldIndex{types.LatestExecutionPayloadHeaderDeneb},
|
||||
withdrawalAndHistoricalSummaryFields,
|
||||
)
|
||||
|
||||
electraAdditionalFields = []types.FieldIndex{
|
||||
types.DepositRequestsStartIndex,
|
||||
types.DepositBalanceToConsume,
|
||||
types.ExitBalanceToConsume,
|
||||
@@ -106,12 +107,34 @@ var (
|
||||
types.PendingDeposits,
|
||||
types.PendingPartialWithdrawals,
|
||||
types.PendingConsolidations,
|
||||
}
|
||||
|
||||
electraFields = slices.Concat(
|
||||
denebFields,
|
||||
electraAdditionalFields,
|
||||
)
|
||||
|
||||
fuluFields = append(
|
||||
electraFields,
|
||||
types.ProposerLookahead,
|
||||
)
|
||||
|
||||
gloasAdditionalFields = []types.FieldIndex{
|
||||
types.ExecutionPayloadAvailability,
|
||||
types.BuilderPendingPayments,
|
||||
types.BuilderPendingWithdrawals,
|
||||
types.LatestBlockHash,
|
||||
types.LatestWithdrawalsRoot,
|
||||
}
|
||||
|
||||
gloasFields = slices.Concat(
|
||||
altairFields,
|
||||
[]types.FieldIndex{types.LatestExecutionPayloadBid},
|
||||
withdrawalAndHistoricalSummaryFields,
|
||||
electraAdditionalFields,
|
||||
[]types.FieldIndex{types.ProposerLookahead},
|
||||
gloasAdditionalFields,
|
||||
)
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -122,6 +145,7 @@ const (
|
||||
denebSharedFieldRefCount = 7
|
||||
electraSharedFieldRefCount = 10
|
||||
fuluSharedFieldRefCount = 11
|
||||
gloasSharedFieldRefCount = 12 // Adds PendingBuilderWithdrawal to the shared-ref set and LatestExecutionPayloadHeader is removed
|
||||
)
|
||||
|
||||
// InitializeFromProtoPhase0 the beacon state from a protobuf representation.
|
||||
@@ -159,6 +183,11 @@ func InitializeFromProtoFulu(st *ethpb.BeaconStateFulu) (state.BeaconState, erro
|
||||
return InitializeFromProtoUnsafeFulu(proto.Clone(st).(*ethpb.BeaconStateFulu))
|
||||
}
|
||||
|
||||
// InitializeFromProtoGloas the beacon state from a protobuf representation.
|
||||
func InitializeFromProtoGloas(st *ethpb.BeaconStateGloas) (state.BeaconState, error) {
|
||||
return InitializeFromProtoUnsafeGloas(proto.Clone(st).(*ethpb.BeaconStateGloas))
|
||||
}
|
||||
|
||||
// InitializeFromProtoUnsafePhase0 directly uses the beacon state protobuf fields
|
||||
// and sets them as fields of the BeaconState type.
|
||||
func InitializeFromProtoUnsafePhase0(st *ethpb.BeaconState) (state.BeaconState, error) {
|
||||
@@ -736,6 +765,111 @@ func InitializeFromProtoUnsafeFulu(st *ethpb.BeaconStateFulu) (state.BeaconState
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// InitializeFromProtoUnsafeGloas directly uses the beacon state protobuf fields
|
||||
// and sets them as fields of the BeaconState type.
|
||||
func InitializeFromProtoUnsafeGloas(st *ethpb.BeaconStateGloas) (state.BeaconState, error) {
|
||||
if st == nil {
|
||||
return nil, errors.New("received nil state")
|
||||
}
|
||||
|
||||
hRoots := customtypes.HistoricalRoots(make([][32]byte, len(st.HistoricalRoots)))
|
||||
for i, r := range st.HistoricalRoots {
|
||||
hRoots[i] = bytesutil.ToBytes32(r)
|
||||
}
|
||||
|
||||
proposerLookahead := make([]primitives.ValidatorIndex, len(st.ProposerLookahead))
|
||||
for i, v := range st.ProposerLookahead {
|
||||
proposerLookahead[i] = primitives.ValidatorIndex(v)
|
||||
}
|
||||
|
||||
fieldCount := params.BeaconConfig().BeaconStateGloasFieldCount
|
||||
b := &BeaconState{
|
||||
version: version.Gloas,
|
||||
genesisTime: st.GenesisTime,
|
||||
genesisValidatorsRoot: bytesutil.ToBytes32(st.GenesisValidatorsRoot),
|
||||
slot: st.Slot,
|
||||
fork: st.Fork,
|
||||
latestBlockHeader: st.LatestBlockHeader,
|
||||
historicalRoots: hRoots,
|
||||
eth1Data: st.Eth1Data,
|
||||
eth1DataVotes: st.Eth1DataVotes,
|
||||
eth1DepositIndex: st.Eth1DepositIndex,
|
||||
slashings: st.Slashings,
|
||||
previousEpochParticipation: st.PreviousEpochParticipation,
|
||||
currentEpochParticipation: st.CurrentEpochParticipation,
|
||||
justificationBits: st.JustificationBits,
|
||||
previousJustifiedCheckpoint: st.PreviousJustifiedCheckpoint,
|
||||
currentJustifiedCheckpoint: st.CurrentJustifiedCheckpoint,
|
||||
finalizedCheckpoint: st.FinalizedCheckpoint,
|
||||
currentSyncCommittee: st.CurrentSyncCommittee,
|
||||
nextSyncCommittee: st.NextSyncCommittee,
|
||||
nextWithdrawalIndex: st.NextWithdrawalIndex,
|
||||
nextWithdrawalValidatorIndex: st.NextWithdrawalValidatorIndex,
|
||||
historicalSummaries: st.HistoricalSummaries,
|
||||
depositRequestsStartIndex: st.DepositRequestsStartIndex,
|
||||
depositBalanceToConsume: st.DepositBalanceToConsume,
|
||||
exitBalanceToConsume: st.ExitBalanceToConsume,
|
||||
earliestExitEpoch: st.EarliestExitEpoch,
|
||||
consolidationBalanceToConsume: st.ConsolidationBalanceToConsume,
|
||||
earliestConsolidationEpoch: st.EarliestConsolidationEpoch,
|
||||
pendingDeposits: st.PendingDeposits,
|
||||
pendingPartialWithdrawals: st.PendingPartialWithdrawals,
|
||||
pendingConsolidations: st.PendingConsolidations,
|
||||
proposerLookahead: proposerLookahead,
|
||||
latestExecutionPayloadBid: st.LatestExecutionPayloadBid,
|
||||
executionPayloadAvailability: st.ExecutionPayloadAvailability,
|
||||
builderPendingPayments: st.BuilderPendingPayments,
|
||||
builderPendingWithdrawals: st.BuilderPendingWithdrawals,
|
||||
latestBlockHash: st.LatestBlockHash,
|
||||
latestWithdrawalsRoot: st.LatestWithdrawalsRoot,
|
||||
dirtyFields: make(map[types.FieldIndex]bool, fieldCount),
|
||||
dirtyIndices: make(map[types.FieldIndex][]uint64, fieldCount),
|
||||
stateFieldLeaves: make(map[types.FieldIndex]*fieldtrie.FieldTrie, fieldCount),
|
||||
rebuildTrie: make(map[types.FieldIndex]bool, fieldCount),
|
||||
valMapHandler: stateutil.NewValMapHandler(st.Validators),
|
||||
}
|
||||
|
||||
b.blockRootsMultiValue = NewMultiValueBlockRoots(st.BlockRoots)
|
||||
b.stateRootsMultiValue = NewMultiValueStateRoots(st.StateRoots)
|
||||
b.randaoMixesMultiValue = NewMultiValueRandaoMixes(st.RandaoMixes)
|
||||
b.balancesMultiValue = NewMultiValueBalances(st.Balances)
|
||||
b.validatorsMultiValue = NewMultiValueValidators(st.Validators)
|
||||
b.inactivityScoresMultiValue = NewMultiValueInactivityScores(st.InactivityScores)
|
||||
b.sharedFieldReferences = make(map[types.FieldIndex]*stateutil.Reference, gloasSharedFieldRefCount)
|
||||
|
||||
for _, f := range gloasFields {
|
||||
b.dirtyFields[f] = true
|
||||
b.rebuildTrie[f] = true
|
||||
b.dirtyIndices[f] = []uint64{}
|
||||
|
||||
trie, err := fieldtrie.NewFieldTrie(f, types.BasicArray, nil, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
b.stateFieldLeaves[f] = trie
|
||||
}
|
||||
|
||||
// Initialize field reference tracking for shared data.
|
||||
b.sharedFieldReferences[types.HistoricalRoots] = stateutil.NewRef(1)
|
||||
b.sharedFieldReferences[types.Eth1DataVotes] = stateutil.NewRef(1)
|
||||
b.sharedFieldReferences[types.Slashings] = stateutil.NewRef(1)
|
||||
b.sharedFieldReferences[types.PreviousEpochParticipationBits] = stateutil.NewRef(1)
|
||||
b.sharedFieldReferences[types.CurrentEpochParticipationBits] = stateutil.NewRef(1)
|
||||
b.sharedFieldReferences[types.HistoricalSummaries] = stateutil.NewRef(1)
|
||||
b.sharedFieldReferences[types.PendingDeposits] = stateutil.NewRef(1)
|
||||
b.sharedFieldReferences[types.PendingPartialWithdrawals] = stateutil.NewRef(1)
|
||||
b.sharedFieldReferences[types.PendingConsolidations] = stateutil.NewRef(1)
|
||||
b.sharedFieldReferences[types.ProposerLookahead] = stateutil.NewRef(1)
|
||||
b.sharedFieldReferences[types.BuilderPendingWithdrawals] = stateutil.NewRef(1) // New in Gloas.
|
||||
|
||||
state.Count.Inc()
|
||||
// Finalizer runs when dst is being destroyed in garbage collection.
|
||||
runtime.SetFinalizer(b, finalizerCleanup)
|
||||
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// Copy returns a deep copy of the beacon state.
|
||||
func (b *BeaconState) Copy() state.BeaconState {
|
||||
b.lock.RLock()
|
||||
@@ -757,6 +891,8 @@ func (b *BeaconState) Copy() state.BeaconState {
|
||||
fieldCount = params.BeaconConfig().BeaconStateElectraFieldCount
|
||||
case version.Fulu:
|
||||
fieldCount = params.BeaconConfig().BeaconStateFuluFieldCount
|
||||
case version.Gloas:
|
||||
fieldCount = params.BeaconConfig().BeaconStateGloasFieldCount
|
||||
}
|
||||
|
||||
dst := &BeaconState{
|
||||
@@ -811,6 +947,12 @@ func (b *BeaconState) Copy() state.BeaconState {
|
||||
latestExecutionPayloadHeader: b.latestExecutionPayloadHeader.Copy(),
|
||||
latestExecutionPayloadHeaderCapella: b.latestExecutionPayloadHeaderCapella.Copy(),
|
||||
latestExecutionPayloadHeaderDeneb: b.latestExecutionPayloadHeaderDeneb.Copy(),
|
||||
latestExecutionPayloadBid: b.latestExecutionPayloadBid.Copy(),
|
||||
executionPayloadAvailability: b.executionPayloadAvailabilityVal(),
|
||||
builderPendingPayments: b.builderPendingPaymentsVal(),
|
||||
builderPendingWithdrawals: b.builderPendingWithdrawalsVal(),
|
||||
latestBlockHash: b.latestBlockHashVal(),
|
||||
latestWithdrawalsRoot: b.latestWithdrawalsRootVal(),
|
||||
|
||||
id: types.Enumerator.Inc(),
|
||||
|
||||
@@ -847,6 +989,8 @@ func (b *BeaconState) Copy() state.BeaconState {
|
||||
dst.sharedFieldReferences = make(map[types.FieldIndex]*stateutil.Reference, electraSharedFieldRefCount)
|
||||
case version.Fulu:
|
||||
dst.sharedFieldReferences = make(map[types.FieldIndex]*stateutil.Reference, fuluSharedFieldRefCount)
|
||||
case version.Gloas:
|
||||
dst.sharedFieldReferences = make(map[types.FieldIndex]*stateutil.Reference, gloasSharedFieldRefCount)
|
||||
}
|
||||
|
||||
for field, ref := range b.sharedFieldReferences {
|
||||
@@ -942,6 +1086,8 @@ func (b *BeaconState) initializeMerkleLayers(ctx context.Context) error {
|
||||
b.dirtyFields = make(map[types.FieldIndex]bool, params.BeaconConfig().BeaconStateElectraFieldCount)
|
||||
case version.Fulu:
|
||||
b.dirtyFields = make(map[types.FieldIndex]bool, params.BeaconConfig().BeaconStateFuluFieldCount)
|
||||
case version.Gloas:
|
||||
b.dirtyFields = make(map[types.FieldIndex]bool, params.BeaconConfig().BeaconStateGloasFieldCount)
|
||||
default:
|
||||
return fmt.Errorf("unknown state version (%s) when computing dirty fields in merklization", version.String(b.version))
|
||||
}
|
||||
@@ -1180,6 +1326,19 @@ func (b *BeaconState) rootSelector(ctx context.Context, field types.FieldIndex)
|
||||
return stateutil.PendingConsolidationsRoot(b.pendingConsolidations)
|
||||
case types.ProposerLookahead:
|
||||
return stateutil.ProposerLookaheadRoot(b.proposerLookahead)
|
||||
case types.LatestExecutionPayloadBid:
|
||||
return b.latestExecutionPayloadBid.HashTreeRoot()
|
||||
case types.ExecutionPayloadAvailability:
|
||||
return stateutil.ExecutionPayloadAvailabilityRoot(b.executionPayloadAvailability)
|
||||
|
||||
case types.BuilderPendingPayments:
|
||||
return stateutil.BuilderPendingPaymentsRoot(b.builderPendingPayments)
|
||||
case types.BuilderPendingWithdrawals:
|
||||
return stateutil.BuilderPendingWithdrawalsRoot(b.builderPendingWithdrawals)
|
||||
case types.LatestBlockHash:
|
||||
return bytesutil.ToBytes32(b.latestBlockHash), nil
|
||||
case types.LatestWithdrawalsRoot:
|
||||
return bytesutil.ToBytes32(b.latestWithdrawalsRoot), nil
|
||||
}
|
||||
return [32]byte{}, errors.New("invalid field index provided")
|
||||
}
|
||||
|
||||
@@ -88,6 +88,8 @@ func (f FieldIndex) String() string {
|
||||
return "latestExecutionPayloadHeaderCapella"
|
||||
case LatestExecutionPayloadHeaderDeneb:
|
||||
return "latestExecutionPayloadHeaderDeneb"
|
||||
case LatestExecutionPayloadBid:
|
||||
return "latestExecutionPayloadBid"
|
||||
case NextWithdrawalIndex:
|
||||
return "nextWithdrawalIndex"
|
||||
case NextWithdrawalValidatorIndex:
|
||||
@@ -114,6 +116,16 @@ func (f FieldIndex) String() string {
|
||||
return "pendingConsolidations"
|
||||
case ProposerLookahead:
|
||||
return "proposerLookahead"
|
||||
case ExecutionPayloadAvailability:
|
||||
return "executionPayloadAvailability"
|
||||
case BuilderPendingPayments:
|
||||
return "builderPendingPayments"
|
||||
case BuilderPendingWithdrawals:
|
||||
return "builderPendingWithdrawals"
|
||||
case LatestBlockHash:
|
||||
return "latestBlockHash"
|
||||
case LatestWithdrawalsRoot:
|
||||
return "latestWithdrawalsRoot"
|
||||
default:
|
||||
return fmt.Sprintf("unknown field index number: %d", f)
|
||||
}
|
||||
@@ -171,7 +183,7 @@ func (f FieldIndex) RealPosition() int {
|
||||
return 22
|
||||
case NextSyncCommittee:
|
||||
return 23
|
||||
case LatestExecutionPayloadHeader, LatestExecutionPayloadHeaderCapella, LatestExecutionPayloadHeaderDeneb:
|
||||
case LatestExecutionPayloadHeader, LatestExecutionPayloadHeaderCapella, LatestExecutionPayloadHeaderDeneb, LatestExecutionPayloadBid:
|
||||
return 24
|
||||
case NextWithdrawalIndex:
|
||||
return 25
|
||||
@@ -199,6 +211,16 @@ func (f FieldIndex) RealPosition() int {
|
||||
return 36
|
||||
case ProposerLookahead:
|
||||
return 37
|
||||
case ExecutionPayloadAvailability:
|
||||
return 38
|
||||
case BuilderPendingPayments:
|
||||
return 39
|
||||
case BuilderPendingWithdrawals:
|
||||
return 40
|
||||
case LatestBlockHash:
|
||||
return 41
|
||||
case LatestWithdrawalsRoot:
|
||||
return 42
|
||||
default:
|
||||
return -1
|
||||
}
|
||||
@@ -251,6 +273,7 @@ const (
|
||||
LatestExecutionPayloadHeader
|
||||
LatestExecutionPayloadHeaderCapella
|
||||
LatestExecutionPayloadHeaderDeneb
|
||||
LatestExecutionPayloadBid // Gloas: EIP-7732
|
||||
NextWithdrawalIndex
|
||||
NextWithdrawalValidatorIndex
|
||||
HistoricalSummaries
|
||||
@@ -264,6 +287,11 @@ const (
|
||||
PendingPartialWithdrawals // Electra: EIP-7251
|
||||
PendingConsolidations // Electra: EIP-7251
|
||||
ProposerLookahead // Fulu: EIP-7917
|
||||
ExecutionPayloadAvailability // Gloas: EIP-7732
|
||||
BuilderPendingPayments // Gloas: EIP-7732
|
||||
BuilderPendingWithdrawals // Gloas: EIP-7732
|
||||
LatestBlockHash // Gloas: EIP-7732
|
||||
LatestWithdrawalsRoot // Gloas: EIP-7732
|
||||
)
|
||||
|
||||
// Enumerator keeps track of the number of states created since the node's start.
|
||||
|
||||
@@ -4,7 +4,10 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"block_header_root.go",
|
||||
"builder_pending_payments_root.go",
|
||||
"builder_pending_withdrawals_root.go",
|
||||
"eth1_root.go",
|
||||
"execution_payload_availability_root.go",
|
||||
"field_root_attestation.go",
|
||||
"field_root_eth1.go",
|
||||
"field_root_validator.go",
|
||||
|
||||
@@ -0,0 +1,22 @@
|
||||
package stateutil
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/ssz"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// BuilderPendingPaymentsRoot computes the merkle root of a slice of BuilderPendingPayment.
|
||||
func BuilderPendingPaymentsRoot(slice []*ethpb.BuilderPendingPayment) ([32]byte, error) {
|
||||
roots := make([][32]byte, len(slice))
|
||||
|
||||
for i, payment := range slice {
|
||||
r, err := payment.HashTreeRoot()
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
|
||||
roots[i] = r
|
||||
}
|
||||
|
||||
return ssz.MerkleizeVector(roots, uint64(len(roots))), nil
|
||||
}
|
||||
@@ -0,0 +1,12 @@
|
||||
package stateutil
|
||||
|
||||
import (
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/ssz"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// BuilderPendingWithdrawalsRoot computes the SSZ root of a slice of BuilderPendingWithdrawal.
|
||||
func BuilderPendingWithdrawalsRoot(slice []*ethpb.BuilderPendingWithdrawal) ([32]byte, error) {
|
||||
return ssz.SliceRoot(slice, fieldparams.BuilderPendingWithdrawalsLimit)
|
||||
}
|
||||
@@ -0,0 +1,25 @@
|
||||
package stateutil
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/ssz"
|
||||
)
|
||||
|
||||
// ExecutionPayloadAvailabilityRoot computes the merkle root of an execution payload availability bitvector.
|
||||
func ExecutionPayloadAvailabilityRoot(bitvector []byte) ([32]byte, error) {
|
||||
chunkCount := (len(bitvector) + 31) / 32
|
||||
chunks := make([][32]byte, chunkCount)
|
||||
|
||||
for i := range chunks {
|
||||
start := i * 32
|
||||
end := min(start+32, len(bitvector))
|
||||
copy(chunks[i][:], bitvector[start:end])
|
||||
}
|
||||
|
||||
root, err := ssz.BitwiseMerkleize(chunks, uint64(len(chunks)), uint64(len(chunks)))
|
||||
if err != nil {
|
||||
return [32]byte{}, fmt.Errorf("could not merkleize execution payload availability: %w", err)
|
||||
}
|
||||
return root, nil
|
||||
}
|
||||
@@ -7,6 +7,7 @@ go_library(
|
||||
"block_batcher.go",
|
||||
"context.go",
|
||||
"custody.go",
|
||||
"data_column_assignment.go",
|
||||
"data_column_sidecars.go",
|
||||
"data_columns_reconstruct.go",
|
||||
"deadlines.go",
|
||||
@@ -135,6 +136,7 @@ go_library(
|
||||
"//time:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_hashicorp_golang_lru//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/host:go_default_library",
|
||||
@@ -167,6 +169,7 @@ go_test(
|
||||
"block_batcher_test.go",
|
||||
"context_test.go",
|
||||
"custody_test.go",
|
||||
"data_column_assignment_test.go",
|
||||
"data_column_sidecars_test.go",
|
||||
"data_columns_reconstruct_test.go",
|
||||
"decode_pubsub_test.go",
|
||||
|
||||
@@ -6,17 +6,22 @@ go_library(
|
||||
"batch.go",
|
||||
"batcher.go",
|
||||
"blobs.go",
|
||||
"columns.go",
|
||||
"error.go",
|
||||
"fulu_transition.go",
|
||||
"log.go",
|
||||
"metrics.go",
|
||||
"pool.go",
|
||||
"service.go",
|
||||
"status.go",
|
||||
"verify.go",
|
||||
"verify_column.go",
|
||||
"worker.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/sync/backfill",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/das:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
@@ -37,7 +42,6 @@ go_library(
|
||||
"//proto/dbval:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
@@ -51,19 +55,27 @@ go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"batch_test.go",
|
||||
"batcher_expiration_test.go",
|
||||
"batcher_test.go",
|
||||
"blobs_test.go",
|
||||
"columns_test.go",
|
||||
"fulu_transition_test.go",
|
||||
"log_test.go",
|
||||
"pool_test.go",
|
||||
"service_test.go",
|
||||
"status_test.go",
|
||||
"verify_column_test.go",
|
||||
"verify_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/das:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/p2p/peers:go_default_library",
|
||||
"//beacon-chain/p2p/testing:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
@@ -85,5 +97,7 @@ go_test(
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_stretchr_testify//require:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/das"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/sync"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
@@ -16,9 +15,13 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ErrChainBroken indicates a backfill batch can't be imported to the db because it is not known to be the ancestor
|
||||
// of the canonical chain.
|
||||
var ErrChainBroken = errors.New("batch is not the ancestor of a known finalized root")
|
||||
var errChainBroken = errors.New("batch is not the ancestor of a known finalized root")
|
||||
|
||||
// retryLogMod defines how often retryable errors are logged at debug level instead of trace.
|
||||
const retryLogMod = 5
|
||||
|
||||
// retryDelay defines the delay between retry attempts for a batch.
|
||||
const retryDelay = time.Second
|
||||
|
||||
type batchState int
|
||||
|
||||
@@ -30,16 +33,20 @@ func (s batchState) String() string {
|
||||
return "init"
|
||||
case batchSequenced:
|
||||
return "sequenced"
|
||||
case batchErrRetryable:
|
||||
return "error_retryable"
|
||||
case batchSyncBlobs:
|
||||
return "sync_blobs"
|
||||
case batchSyncColumns:
|
||||
return "sync_columns"
|
||||
case batchImportable:
|
||||
return "importable"
|
||||
case batchImportComplete:
|
||||
return "import_complete"
|
||||
case batchEndSequence:
|
||||
return "end_sequence"
|
||||
case batchBlobSync:
|
||||
return "blob_sync"
|
||||
case batchErrRetryable:
|
||||
return "error_retryable"
|
||||
case batchErrFatal:
|
||||
return "error_fatal"
|
||||
default:
|
||||
return "unknown"
|
||||
}
|
||||
@@ -49,15 +56,15 @@ const (
|
||||
batchNil batchState = iota
|
||||
batchInit
|
||||
batchSequenced
|
||||
batchErrRetryable
|
||||
batchBlobSync
|
||||
batchSyncBlobs
|
||||
batchSyncColumns
|
||||
batchImportable
|
||||
batchImportComplete
|
||||
batchErrRetryable
|
||||
batchErrFatal // if this is received in the main loop, the worker pool will be shut down.
|
||||
batchEndSequence
|
||||
)
|
||||
|
||||
var retryDelay = time.Second
|
||||
|
||||
type batchId string
|
||||
|
||||
type batch struct {
|
||||
@@ -67,35 +74,52 @@ type batch struct {
|
||||
retries int
|
||||
retryAfter time.Time
|
||||
begin primitives.Slot
|
||||
end primitives.Slot // half-open interval, [begin, end), ie >= start, < end.
|
||||
results verifiedROBlocks
|
||||
end primitives.Slot // half-open interval, [begin, end), ie >= begin, < end.
|
||||
blocks verifiedROBlocks
|
||||
err error
|
||||
state batchState
|
||||
busy peer.ID
|
||||
blockPid peer.ID
|
||||
blobPid peer.ID
|
||||
bs *blobSync
|
||||
// `assignedPeer` is used by the worker pool to assign and unassign peer.IDs to serve requests for the current batch state.
|
||||
// Depending on the state it will be copied to blockPeer, columns.Peer, blobs.Peer.
|
||||
assignedPeer peer.ID
|
||||
blockPeer peer.ID
|
||||
nextReqCols []uint64
|
||||
blobs *blobSync
|
||||
columns *columnSync
|
||||
}
|
||||
|
||||
func (b batch) logFields() logrus.Fields {
|
||||
f := map[string]any{
|
||||
"batchId": b.id(),
|
||||
"state": b.state.String(),
|
||||
"scheduled": b.scheduled.String(),
|
||||
"seq": b.seq,
|
||||
"retries": b.retries,
|
||||
"begin": b.begin,
|
||||
"end": b.end,
|
||||
"busyPid": b.busy,
|
||||
"blockPid": b.blockPid,
|
||||
"blobPid": b.blobPid,
|
||||
"batchId": b.id(),
|
||||
"state": b.state.String(),
|
||||
"scheduled": b.scheduled.String(),
|
||||
"seq": b.seq,
|
||||
"retries": b.retries,
|
||||
"retryAfter": b.retryAfter.String(),
|
||||
"begin": b.begin,
|
||||
"end": b.end,
|
||||
"busyPid": b.assignedPeer,
|
||||
"blockPid": b.blockPeer,
|
||||
}
|
||||
if b.blobs != nil {
|
||||
f["blobPid"] = b.blobs.peer
|
||||
}
|
||||
if b.columns != nil {
|
||||
f["colPid"] = b.columns.peer
|
||||
}
|
||||
if b.retries > 0 {
|
||||
f["retryAfter"] = b.retryAfter.String()
|
||||
}
|
||||
if b.state == batchSyncColumns {
|
||||
f["nextColumns"] = fmt.Sprintf("%v", b.nextReqCols)
|
||||
}
|
||||
if b.state == batchErrRetryable && b.blobs != nil {
|
||||
f["blobsMissing"] = b.blobs.needed()
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
// replaces returns true if `r` is a version of `b` that has been updated by a worker,
|
||||
// meaning it should replace `b` in the batch sequencing queue.
|
||||
func (b batch) replaces(r batch) bool {
|
||||
if r.state == batchImportComplete {
|
||||
return false
|
||||
@@ -114,9 +138,9 @@ func (b batch) id() batchId {
|
||||
}
|
||||
|
||||
func (b batch) ensureParent(expected [32]byte) error {
|
||||
tail := b.results[len(b.results)-1]
|
||||
tail := b.blocks[len(b.blocks)-1]
|
||||
if tail.Root() != expected {
|
||||
return errors.Wrapf(ErrChainBroken, "last parent_root=%#x, tail root=%#x", expected, tail.Root())
|
||||
return errors.Wrapf(errChainBroken, "last parent_root=%#x, tail root=%#x", expected, tail.Root())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -136,21 +160,15 @@ func (b batch) blobRequest() *eth.BlobSidecarsByRangeRequest {
|
||||
}
|
||||
}
|
||||
|
||||
func (b batch) withResults(results verifiedROBlocks, bs *blobSync) batch {
|
||||
b.results = results
|
||||
b.bs = bs
|
||||
if bs.blobsNeeded() > 0 {
|
||||
return b.withState(batchBlobSync)
|
||||
func (b batch) transitionToNext() batch {
|
||||
if len(b.blocks) == 0 {
|
||||
return b.withState(batchSequenced)
|
||||
}
|
||||
return b.withState(batchImportable)
|
||||
}
|
||||
|
||||
func (b batch) postBlobSync() batch {
|
||||
if b.blobsNeeded() > 0 {
|
||||
log.WithFields(b.logFields()).WithField("blobsMissing", b.blobsNeeded()).Error("Batch still missing blobs after downloading from peer")
|
||||
b.bs = nil
|
||||
b.results = []blocks.ROBlock{}
|
||||
return b.withState(batchErrRetryable)
|
||||
if len(b.columns.columnsNeeded()) > 0 {
|
||||
return b.withState(batchSyncColumns)
|
||||
}
|
||||
if b.blobs != nil && b.blobs.needed() > 0 {
|
||||
return b.withState(batchSyncBlobs)
|
||||
}
|
||||
return b.withState(batchImportable)
|
||||
}
|
||||
@@ -159,44 +177,89 @@ func (b batch) withState(s batchState) batch {
|
||||
if s == batchSequenced {
|
||||
b.scheduled = time.Now()
|
||||
switch b.state {
|
||||
case batchErrRetryable:
|
||||
b.retries += 1
|
||||
b.retryAfter = time.Now().Add(retryDelay)
|
||||
log.WithFields(b.logFields()).Info("Sequencing batch for retry after delay")
|
||||
case batchInit, batchNil:
|
||||
b.firstScheduled = b.scheduled
|
||||
}
|
||||
}
|
||||
if s == batchImportComplete {
|
||||
backfillBatchTimeRoundtrip.Observe(float64(time.Since(b.firstScheduled).Milliseconds()))
|
||||
log.WithFields(b.logFields()).Debug("Backfill batch imported")
|
||||
}
|
||||
b.state = s
|
||||
b.seq += 1
|
||||
return b
|
||||
}
|
||||
|
||||
func (b batch) withPeer(p peer.ID) batch {
|
||||
b.blockPid = p
|
||||
backfillBatchTimeWaiting.Observe(float64(time.Since(b.scheduled).Milliseconds()))
|
||||
return b
|
||||
}
|
||||
|
||||
func (b batch) withRetryableError(err error) batch {
|
||||
b.err = err
|
||||
b.retries += 1
|
||||
b.retryAfter = time.Now().Add(retryDelay)
|
||||
|
||||
msg := "Could not proceed with batch processing due to error"
|
||||
logBase := log.WithFields(b.logFields()).WithError(err)
|
||||
// Log at trace level to limit log noise,
|
||||
// but escalate to debug level every nth attempt for batches that have some peristent issue.
|
||||
if b.retries&retryLogMod != 0 {
|
||||
logBase.Trace(msg)
|
||||
} else {
|
||||
logBase.Debug(msg)
|
||||
}
|
||||
return b.withState(batchErrRetryable)
|
||||
}
|
||||
|
||||
func (b batch) blobsNeeded() int {
|
||||
return b.bs.blobsNeeded()
|
||||
func (b batch) withFatalError(err error) batch {
|
||||
log.WithFields(b.logFields()).WithError(err).Error("Fatal batch processing error")
|
||||
b.err = err
|
||||
return b.withState(batchErrFatal)
|
||||
}
|
||||
|
||||
func (b batch) blobResponseValidator() sync.BlobResponseValidation {
|
||||
return b.bs.validateNext
|
||||
func (b batch) withError(err error) batch {
|
||||
if isRetryable(err) {
|
||||
return b.withRetryableError(err)
|
||||
}
|
||||
return b.withFatalError(err)
|
||||
}
|
||||
|
||||
func (b batch) availabilityStore() das.AvailabilityStore {
|
||||
return b.bs.store
|
||||
func (b batch) validatingColumnRequest(cb *columnBisector) (*validatingColumnRequest, error) {
|
||||
req, err := b.columns.request(b.nextReqCols, columnRequestLimit)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "columns request")
|
||||
}
|
||||
if req == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return &validatingColumnRequest{
|
||||
req: req,
|
||||
columnSync: b.columns,
|
||||
bisector: cb,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// resetToRetryColumns is called after a partial batch failure. It adds column indices back
|
||||
// to the toDownload structure for any blocks where those columns failed, and resets the bisector state.
|
||||
// Note that this method will also prune any columns that have expired, meaning we no longer need them
|
||||
// per spec and/or our backfill & retention settings.
|
||||
func resetToRetryColumns(b batch, needs das.CurrentNeeds) batch {
|
||||
// return the given batch as-is if it isn't in a state that this func should handle.
|
||||
if b.columns == nil || b.columns.bisector == nil || len(b.columns.bisector.errs) == 0 {
|
||||
return b.transitionToNext()
|
||||
}
|
||||
pruned := make(map[[32]byte]struct{})
|
||||
b.columns.pruneExpired(needs, pruned)
|
||||
|
||||
// clear out failed column state in the bisector and add back to
|
||||
bisector := b.columns.bisector
|
||||
roots := bisector.failingRoots()
|
||||
// Add all the failed columns back to the toDownload structure and reset the bisector state.
|
||||
for _, root := range roots {
|
||||
if _, rm := pruned[root]; rm {
|
||||
continue
|
||||
}
|
||||
bc := b.columns.toDownload[root]
|
||||
bc.remaining.Merge(bisector.failuresFor(root))
|
||||
}
|
||||
b.columns.bisector.reset()
|
||||
|
||||
return b.transitionToNext()
|
||||
}
|
||||
|
||||
var batchBlockUntil = func(ctx context.Context, untilRetry time.Duration, b batch) error {
|
||||
@@ -223,6 +286,26 @@ func (b batch) waitUntilReady(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b batch) workComplete() bool {
|
||||
return b.state == batchImportable
|
||||
}
|
||||
|
||||
func (b batch) expired(needs das.CurrentNeeds) bool {
|
||||
if !needs.Block.At(b.end - 1) {
|
||||
log.WithFields(b.logFields()).WithField("retentionStartSlot", needs.Block.Begin).Debug("Batch outside retention window")
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (b batch) selectPeer(picker *sync.PeerPicker, busy map[peer.ID]bool) (peer.ID, []uint64, error) {
|
||||
if b.state == batchSyncColumns {
|
||||
return picker.ForColumns(b.columns.columnsNeeded(), busy)
|
||||
}
|
||||
peer, err := picker.ForBlocks(busy)
|
||||
return peer, nil, err
|
||||
}
|
||||
|
||||
func sortBatchDesc(bb []batch) {
|
||||
sort.Slice(bb, func(i, j int) bool {
|
||||
return bb[i].end > bb[j].end
|
||||
|
||||
@@ -24,17 +24,16 @@ func TestSortBatchDesc(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestWaitUntilReady(t *testing.T) {
|
||||
b := batch{}.withState(batchErrRetryable)
|
||||
require.Equal(t, time.Time{}, b.retryAfter)
|
||||
var got time.Duration
|
||||
wur := batchBlockUntil
|
||||
|
||||
var got time.Duration
|
||||
var errDerp = errors.New("derp")
|
||||
batchBlockUntil = func(_ context.Context, ur time.Duration, _ batch) error {
|
||||
got = ur
|
||||
return errDerp
|
||||
}
|
||||
// retries counter and timestamp are set when we mark the batch for sequencing, if it is in the retry state
|
||||
b = b.withState(batchSequenced)
|
||||
|
||||
b := batch{}.withRetryableError(errors.New("test error"))
|
||||
require.ErrorIs(t, b.waitUntilReady(t.Context()), errDerp)
|
||||
require.Equal(t, true, retryDelay-time.Until(b.retryAfter) < time.Millisecond)
|
||||
require.Equal(t, true, got < retryDelay && got > retryDelay-time.Millisecond)
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package backfill
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/das"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -10,8 +11,9 @@ var errEndSequence = errors.New("sequence has terminated, no more backfill batch
|
||||
var errCannotDecreaseMinimum = errors.New("the minimum backfill slot can only be increased, not decreased")
|
||||
|
||||
type batchSequencer struct {
|
||||
batcher batcher
|
||||
seq []batch
|
||||
batcher batcher
|
||||
seq []batch
|
||||
currentNeeds func() das.CurrentNeeds
|
||||
}
|
||||
|
||||
// sequence() is meant as a verb "arrange in a particular order".
|
||||
@@ -19,32 +21,38 @@ type batchSequencer struct {
|
||||
// in its internal view. sequence relies on update() for updates to its view of the
|
||||
// batches it has previously sequenced.
|
||||
func (c *batchSequencer) sequence() ([]batch, error) {
|
||||
needs := c.currentNeeds()
|
||||
s := make([]batch, 0)
|
||||
// batch start slots are in descending order, c.seq[n].begin == c.seq[n+1].end
|
||||
for i := range c.seq {
|
||||
switch c.seq[i].state {
|
||||
case batchInit, batchErrRetryable:
|
||||
c.seq[i] = c.seq[i].withState(batchSequenced)
|
||||
s = append(s, c.seq[i])
|
||||
case batchNil:
|
||||
if c.seq[i].state == batchNil {
|
||||
// batchNil is the zero value of the batch type.
|
||||
// This case means that we are initializing a batch that was created by the
|
||||
// initial allocation of the seq slice, so batcher need to compute its bounds.
|
||||
var b batch
|
||||
if i == 0 {
|
||||
// The first item in the list is a special case, subsequent items are initialized
|
||||
// relative to the preceding batches.
|
||||
b = c.batcher.before(c.batcher.max)
|
||||
c.seq[i] = c.batcher.before(c.batcher.max)
|
||||
} else {
|
||||
b = c.batcher.beforeBatch(c.seq[i-1])
|
||||
c.seq[i] = c.batcher.beforeBatch(c.seq[i-1])
|
||||
}
|
||||
c.seq[i] = b.withState(batchSequenced)
|
||||
s = append(s, c.seq[i])
|
||||
case batchEndSequence:
|
||||
if len(s) == 0 {
|
||||
}
|
||||
if c.seq[i].state == batchInit || c.seq[i].state == batchErrRetryable {
|
||||
// This means the batch has fallen outside the retention window so we no longer need to sync it.
|
||||
// Since we always create batches from high to low, we can assume we've already created the
|
||||
// descendent batches from the batch we're dropping, so there won't be another batch depending on
|
||||
// this one - we can stop adding batches and mark put this one in the batchEndSequence state.
|
||||
// When all batches are in batchEndSequence, worker pool spins down and marks backfill complete.
|
||||
if c.seq[i].expired(needs) {
|
||||
c.seq[i] = c.seq[i].withState(batchEndSequence)
|
||||
} else {
|
||||
c.seq[i] = c.seq[i].withState(batchSequenced)
|
||||
s = append(s, c.seq[i])
|
||||
continue
|
||||
}
|
||||
default:
|
||||
}
|
||||
if c.seq[i].state == batchEndSequence && len(s) == 0 {
|
||||
s = append(s, c.seq[i])
|
||||
continue
|
||||
}
|
||||
}
|
||||
@@ -62,6 +70,7 @@ func (c *batchSequencer) sequence() ([]batch, error) {
|
||||
// seq with new batches that are ready to be worked on.
|
||||
func (c *batchSequencer) update(b batch) {
|
||||
done := 0
|
||||
needs := c.currentNeeds()
|
||||
for i := 0; i < len(c.seq); i++ {
|
||||
if b.replaces(c.seq[i]) {
|
||||
c.seq[i] = b
|
||||
@@ -73,16 +82,23 @@ func (c *batchSequencer) update(b batch) {
|
||||
done += 1
|
||||
continue
|
||||
}
|
||||
|
||||
if c.seq[i].expired(needs) {
|
||||
c.seq[i] = c.seq[i].withState(batchEndSequence)
|
||||
done += 1
|
||||
continue
|
||||
}
|
||||
// Move the unfinished batches to overwrite the finished ones.
|
||||
// eg consider [a,b,c,d,e] where a,b are done
|
||||
// when i==2, done==2 (since done was incremented for a and b)
|
||||
// so we want to copy c to a, then on i=3, d to b, then on i=4 e to c.
|
||||
c.seq[i-done] = c.seq[i]
|
||||
}
|
||||
if done == 1 && len(c.seq) == 1 {
|
||||
if done == len(c.seq) {
|
||||
c.seq[0] = c.batcher.beforeBatch(c.seq[0])
|
||||
return
|
||||
}
|
||||
|
||||
// Overwrite the moved batches with the next ones in the sequence.
|
||||
// Continuing the example in the comment above, len(c.seq)==5, done=2, so i=3.
|
||||
// We want to replace index 3 with the batch that should be processed after index 2,
|
||||
@@ -113,18 +129,6 @@ func (c *batchSequencer) importable() []batch {
|
||||
return imp
|
||||
}
|
||||
|
||||
// moveMinimum enables the backfill service to change the slot where the batcher will start replying with
|
||||
// batch state batchEndSequence (signaling that no new batches will be produced). This is done in response to
|
||||
// epochs advancing, which shrinks the gap between <checkpoint slot> and <current slot>-MIN_EPOCHS_FOR_BLOCK_REQUESTS,
|
||||
// allowing the node to download a smaller number of blocks.
|
||||
func (c *batchSequencer) moveMinimum(min primitives.Slot) error {
|
||||
if min < c.batcher.min {
|
||||
return errCannotDecreaseMinimum
|
||||
}
|
||||
c.batcher.min = min
|
||||
return nil
|
||||
}
|
||||
|
||||
// countWithState provides a view into how many batches are in a particular state
|
||||
// to be used for logging or metrics purposes.
|
||||
func (c *batchSequencer) countWithState(s batchState) int {
|
||||
@@ -158,23 +162,24 @@ func (c *batchSequencer) numTodo() int {
|
||||
return todo
|
||||
}
|
||||
|
||||
func newBatchSequencer(seqLen int, min, max, size primitives.Slot) *batchSequencer {
|
||||
b := batcher{min: min, max: max, size: size}
|
||||
func newBatchSequencer(seqLen int, max, size primitives.Slot, needsCb func() das.CurrentNeeds) *batchSequencer {
|
||||
b := batcher{currentNeeds: needsCb, max: max, size: size}
|
||||
seq := make([]batch, seqLen)
|
||||
return &batchSequencer{batcher: b, seq: seq}
|
||||
return &batchSequencer{batcher: b, seq: seq, currentNeeds: needsCb}
|
||||
}
|
||||
|
||||
type batcher struct {
|
||||
min primitives.Slot
|
||||
max primitives.Slot
|
||||
size primitives.Slot
|
||||
currentNeeds func() das.CurrentNeeds
|
||||
max primitives.Slot
|
||||
size primitives.Slot
|
||||
}
|
||||
|
||||
func (r batcher) remaining(upTo primitives.Slot) int {
|
||||
if r.min >= upTo {
|
||||
needs := r.currentNeeds()
|
||||
if !needs.Block.At(upTo) {
|
||||
return 0
|
||||
}
|
||||
delta := upTo - r.min
|
||||
delta := upTo - needs.Block.Begin
|
||||
if delta%r.size != 0 {
|
||||
return int(delta/r.size) + 1
|
||||
}
|
||||
@@ -186,13 +191,18 @@ func (r batcher) beforeBatch(upTo batch) batch {
|
||||
}
|
||||
|
||||
func (r batcher) before(upTo primitives.Slot) batch {
|
||||
// upTo is an exclusive upper bound. Requesting a batch before the lower bound of backfill signals the end of the
|
||||
// backfill process.
|
||||
if upTo <= r.min {
|
||||
// upTo is an exclusive upper bound. If we do not need the block at the upTo slot,
|
||||
// we don't have anything left to sync, signaling the end of the backfill process.
|
||||
needs := r.currentNeeds()
|
||||
// The upper bound is exclusive, so we shouldn't return in this case where the previous
|
||||
// batch beginning sits at the exact slot of the start of the retention window. In that case
|
||||
// we've actually hit the end of the sync sequence.
|
||||
if !needs.Block.At(upTo) || needs.Block.Begin == upTo {
|
||||
return batch{begin: upTo, end: upTo, state: batchEndSequence}
|
||||
}
|
||||
begin := r.min
|
||||
if upTo > r.size+r.min {
|
||||
|
||||
begin := needs.Block.Begin
|
||||
if upTo > r.size+needs.Block.Begin {
|
||||
begin = upTo - r.size
|
||||
}
|
||||
|
||||
|
||||
831
beacon-chain/sync/backfill/batcher_expiration_test.go
Normal file
831
beacon-chain/sync/backfill/batcher_expiration_test.go
Normal file
@@ -0,0 +1,831 @@
|
||||
package backfill
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/das"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
)
|
||||
|
||||
// dynamicNeeds provides a mutable currentNeeds callback for testing scenarios
|
||||
// where the retention window changes over time.
|
||||
type dynamicNeeds struct {
|
||||
blockBegin primitives.Slot
|
||||
blockEnd primitives.Slot
|
||||
blobBegin primitives.Slot
|
||||
blobEnd primitives.Slot
|
||||
colBegin primitives.Slot
|
||||
colEnd primitives.Slot
|
||||
}
|
||||
|
||||
func newDynamicNeeds(blockBegin, blockEnd primitives.Slot) *dynamicNeeds {
|
||||
return &dynamicNeeds{
|
||||
blockBegin: blockBegin,
|
||||
blockEnd: blockEnd,
|
||||
blobBegin: blockBegin,
|
||||
blobEnd: blockEnd,
|
||||
colBegin: blockBegin,
|
||||
colEnd: blockEnd,
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dynamicNeeds) get() das.CurrentNeeds {
|
||||
return das.CurrentNeeds{
|
||||
Block: das.NeedSpan{Begin: d.blockBegin, End: d.blockEnd},
|
||||
Blob: das.NeedSpan{Begin: d.blobBegin, End: d.blobEnd},
|
||||
Col: das.NeedSpan{Begin: d.colBegin, End: d.colEnd},
|
||||
}
|
||||
}
|
||||
|
||||
// advance moves the retention window forward by the given number of slots.
|
||||
func (d *dynamicNeeds) advance(slots primitives.Slot) {
|
||||
d.blockBegin += slots
|
||||
d.blockEnd += slots
|
||||
d.blobBegin += slots
|
||||
d.blobEnd += slots
|
||||
d.colBegin += slots
|
||||
d.colEnd += slots
|
||||
}
|
||||
|
||||
// setBlockBegin sets only the block retention start slot.
|
||||
func (d *dynamicNeeds) setBlockBegin(begin primitives.Slot) {
|
||||
d.blockBegin = begin
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Category 1: Basic Expiration During sequence()
|
||||
// ============================================================================
|
||||
|
||||
func TestSequenceExpiration_SingleBatchExpires_Init(t *testing.T) {
|
||||
// Single batch in batchInit expires when needs.block.begin moves past it
|
||||
dn := newDynamicNeeds(100, 500)
|
||||
seq := newBatchSequencer(1, 200, 50, dn.get)
|
||||
|
||||
// Initialize batch: [150, 200)
|
||||
seq.seq[0] = batch{begin: 150, end: 200, state: batchInit}
|
||||
|
||||
// Move retention window past the batch
|
||||
dn.setBlockBegin(200)
|
||||
seq.batcher.currentNeeds = dn.get
|
||||
|
||||
got, err := seq.sequence()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(got))
|
||||
require.Equal(t, batchEndSequence, got[0].state)
|
||||
}
|
||||
|
||||
func TestSequenceExpiration_SingleBatchExpires_ErrRetryable(t *testing.T) {
|
||||
// Single batch in batchErrRetryable expires when needs change
|
||||
dn := newDynamicNeeds(100, 500)
|
||||
seq := newBatchSequencer(1, 200, 50, dn.get)
|
||||
|
||||
seq.seq[0] = batch{begin: 150, end: 200, state: batchErrRetryable}
|
||||
|
||||
// Move retention window past the batch
|
||||
dn.setBlockBegin(200)
|
||||
seq.batcher.currentNeeds = dn.get
|
||||
|
||||
got, err := seq.sequence()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(got))
|
||||
require.Equal(t, batchEndSequence, got[0].state)
|
||||
}
|
||||
|
||||
func TestSequenceExpiration_MultipleBatchesExpire_Partial(t *testing.T) {
|
||||
// 4 batches, 2 expire when needs change
|
||||
dn := newDynamicNeeds(100, 500)
|
||||
seq := newBatchSequencer(4, 400, 50, dn.get)
|
||||
|
||||
// Batches: [350,400), [300,350), [250,300), [200,250)
|
||||
seq.seq[0] = batch{begin: 350, end: 400, state: batchInit}
|
||||
seq.seq[1] = batch{begin: 300, end: 350, state: batchInit}
|
||||
seq.seq[2] = batch{begin: 250, end: 300, state: batchInit}
|
||||
seq.seq[3] = batch{begin: 200, end: 250, state: batchInit}
|
||||
|
||||
// Move retention to 300 - batches [250,300) and [200,250) should expire
|
||||
dn.setBlockBegin(300)
|
||||
seq.batcher.currentNeeds = dn.get
|
||||
|
||||
got, err := seq.sequence()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(got))
|
||||
|
||||
// First two batches should be sequenced (not expired)
|
||||
require.Equal(t, batchSequenced, got[0].state)
|
||||
require.Equal(t, primitives.Slot(350), got[0].begin)
|
||||
require.Equal(t, batchSequenced, got[1].state)
|
||||
require.Equal(t, primitives.Slot(300), got[1].begin)
|
||||
|
||||
// Verify expired batches are marked batchEndSequence in seq
|
||||
require.Equal(t, batchEndSequence, seq.seq[2].state)
|
||||
require.Equal(t, batchEndSequence, seq.seq[3].state)
|
||||
}
|
||||
|
||||
func TestSequenceExpiration_AllBatchesExpire(t *testing.T) {
|
||||
// All batches expire, returns one batchEndSequence
|
||||
dn := newDynamicNeeds(100, 500)
|
||||
seq := newBatchSequencer(3, 300, 50, dn.get)
|
||||
|
||||
seq.seq[0] = batch{begin: 250, end: 300, state: batchInit}
|
||||
seq.seq[1] = batch{begin: 200, end: 250, state: batchInit}
|
||||
seq.seq[2] = batch{begin: 150, end: 200, state: batchInit}
|
||||
|
||||
// Move retention past all batches
|
||||
dn.setBlockBegin(350)
|
||||
seq.batcher.currentNeeds = dn.get
|
||||
|
||||
got, err := seq.sequence()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(got))
|
||||
require.Equal(t, batchEndSequence, got[0].state)
|
||||
}
|
||||
|
||||
func TestSequenceExpiration_BatchAtExactBoundary(t *testing.T) {
|
||||
// Batch with end == needs.block.begin should expire
|
||||
// Because expired() checks !needs.block.at(b.end - 1)
|
||||
// If batch.end = 200 and needs.block.begin = 200, then at(199) = false → expired
|
||||
dn := newDynamicNeeds(200, 500)
|
||||
seq := newBatchSequencer(1, 250, 50, dn.get)
|
||||
|
||||
// Batch [150, 200) - end is exactly at retention start
|
||||
seq.seq[0] = batch{begin: 150, end: 200, state: batchInit}
|
||||
|
||||
got, err := seq.sequence()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(got))
|
||||
require.Equal(t, batchEndSequence, got[0].state)
|
||||
}
|
||||
|
||||
func TestSequenceExpiration_BatchJustInsideBoundary(t *testing.T) {
|
||||
// Batch with end == needs.block.begin + 1 should NOT expire
|
||||
// at(200) with begin=200 returns true
|
||||
dn := newDynamicNeeds(200, 500)
|
||||
seq := newBatchSequencer(1, 251, 50, dn.get)
|
||||
|
||||
// Batch [200, 251) - end-1 = 250 which is inside [200, 500)
|
||||
seq.seq[0] = batch{begin: 200, end: 251, state: batchInit}
|
||||
|
||||
got, err := seq.sequence()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(got))
|
||||
require.Equal(t, batchSequenced, got[0].state)
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Category 2: Expiration During update()
|
||||
// ============================================================================
|
||||
|
||||
func TestUpdateExpiration_UpdateCausesExpiration(t *testing.T) {
|
||||
// Update a batch while needs have changed, causing other batches to expire
|
||||
dn := newDynamicNeeds(100, 500)
|
||||
seq := newBatchSequencer(3, 300, 50, dn.get)
|
||||
|
||||
seq.seq[0] = batch{begin: 250, end: 300, state: batchSequenced}
|
||||
seq.seq[1] = batch{begin: 200, end: 250, state: batchSequenced}
|
||||
seq.seq[2] = batch{begin: 150, end: 200, state: batchInit}
|
||||
|
||||
// Move retention window
|
||||
dn.setBlockBegin(200)
|
||||
seq.batcher.currentNeeds = dn.get
|
||||
|
||||
// Update first batch (should still be valid)
|
||||
updated := batch{begin: 250, end: 300, state: batchImportable, seq: 1}
|
||||
seq.update(updated)
|
||||
|
||||
// First batch should be updated
|
||||
require.Equal(t, batchImportable, seq.seq[0].state)
|
||||
|
||||
// Third batch should have expired during update
|
||||
require.Equal(t, batchEndSequence, seq.seq[2].state)
|
||||
}
|
||||
|
||||
func TestUpdateExpiration_MultipleExpireDuringUpdate(t *testing.T) {
|
||||
// Several batches expire when needs advance significantly
|
||||
dn := newDynamicNeeds(100, 500)
|
||||
seq := newBatchSequencer(4, 400, 50, dn.get)
|
||||
|
||||
seq.seq[0] = batch{begin: 350, end: 400, state: batchSequenced}
|
||||
seq.seq[1] = batch{begin: 300, end: 350, state: batchSequenced}
|
||||
seq.seq[2] = batch{begin: 250, end: 300, state: batchInit}
|
||||
seq.seq[3] = batch{begin: 200, end: 250, state: batchInit}
|
||||
|
||||
// Move retention to expire last two batches
|
||||
dn.setBlockBegin(300)
|
||||
seq.batcher.currentNeeds = dn.get
|
||||
|
||||
// Update first batch
|
||||
updated := batch{begin: 350, end: 400, state: batchImportable, seq: 1}
|
||||
seq.update(updated)
|
||||
|
||||
// Check that expired batches are marked
|
||||
require.Equal(t, batchEndSequence, seq.seq[2].state)
|
||||
require.Equal(t, batchEndSequence, seq.seq[3].state)
|
||||
}
|
||||
|
||||
func TestUpdateExpiration_UpdateCompleteWhileExpiring(t *testing.T) {
|
||||
// Mark batch complete while other batches expire
|
||||
dn := newDynamicNeeds(100, 500)
|
||||
seq := newBatchSequencer(3, 300, 50, dn.get)
|
||||
|
||||
seq.seq[0] = batch{begin: 250, end: 300, state: batchImportable}
|
||||
seq.seq[1] = batch{begin: 200, end: 250, state: batchSequenced}
|
||||
seq.seq[2] = batch{begin: 150, end: 200, state: batchInit}
|
||||
|
||||
// Move retention to expire last batch
|
||||
dn.setBlockBegin(200)
|
||||
seq.batcher.currentNeeds = dn.get
|
||||
|
||||
// Mark first batch complete
|
||||
completed := batch{begin: 250, end: 300, state: batchImportComplete, seq: 1}
|
||||
seq.update(completed)
|
||||
|
||||
// Completed batch removed, third batch should have expired
|
||||
// Check that we still have 3 batches (shifted + new ones added)
|
||||
require.Equal(t, 3, len(seq.seq))
|
||||
|
||||
// The batch that was at index 2 should now be expired
|
||||
foundExpired := false
|
||||
for _, b := range seq.seq {
|
||||
if b.state == batchEndSequence {
|
||||
foundExpired = true
|
||||
break
|
||||
}
|
||||
}
|
||||
require.Equal(t, true, foundExpired, "should have an expired batch")
|
||||
}
|
||||
|
||||
func TestUpdateExpiration_ExpiredBatchNotShiftedIncorrectly(t *testing.T) {
|
||||
// Verify expired batches don't get incorrectly shifted
|
||||
dn := newDynamicNeeds(100, 500)
|
||||
seq := newBatchSequencer(3, 300, 50, dn.get)
|
||||
|
||||
seq.seq[0] = batch{begin: 250, end: 300, state: batchImportComplete}
|
||||
seq.seq[1] = batch{begin: 200, end: 250, state: batchInit}
|
||||
seq.seq[2] = batch{begin: 150, end: 200, state: batchInit}
|
||||
|
||||
// Move retention to expire all remaining init batches
|
||||
dn.setBlockBegin(250)
|
||||
seq.batcher.currentNeeds = dn.get
|
||||
|
||||
// Update with the completed batch
|
||||
completed := batch{begin: 250, end: 300, state: batchImportComplete, seq: 1}
|
||||
seq.update(completed)
|
||||
|
||||
// Verify sequence integrity
|
||||
require.Equal(t, 3, len(seq.seq))
|
||||
}
|
||||
|
||||
func TestUpdateExpiration_NewBatchCreatedRespectsNeeds(t *testing.T) {
|
||||
// When new batch is created after expiration, it should respect current needs
|
||||
dn := newDynamicNeeds(100, 500)
|
||||
seq := newBatchSequencer(2, 300, 50, dn.get)
|
||||
|
||||
seq.seq[0] = batch{begin: 250, end: 300, state: batchImportable}
|
||||
seq.seq[1] = batch{begin: 200, end: 250, state: batchInit}
|
||||
|
||||
// Mark first batch complete to trigger new batch creation
|
||||
completed := batch{begin: 250, end: 300, state: batchImportComplete, seq: 1}
|
||||
seq.update(completed)
|
||||
|
||||
// New batch should be created - verify it respects the needs
|
||||
require.Equal(t, 2, len(seq.seq))
|
||||
// New batch should have proper bounds
|
||||
for _, b := range seq.seq {
|
||||
if b.state == batchNil {
|
||||
continue
|
||||
}
|
||||
require.Equal(t, true, b.begin < b.end, "batch bounds should be valid")
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Category 3: Progressive Slot Advancement
|
||||
// ============================================================================
|
||||
|
||||
func TestProgressiveAdvancement_SlotAdvancesGradually(t *testing.T) {
|
||||
// Simulate gradual slot advancement with batches expiring one by one
|
||||
dn := newDynamicNeeds(100, 500)
|
||||
seq := newBatchSequencer(4, 400, 50, dn.get)
|
||||
|
||||
// Initialize batches
|
||||
seq.seq[0] = batch{begin: 350, end: 400, state: batchInit}
|
||||
seq.seq[1] = batch{begin: 300, end: 350, state: batchInit}
|
||||
seq.seq[2] = batch{begin: 250, end: 300, state: batchInit}
|
||||
seq.seq[3] = batch{begin: 200, end: 250, state: batchInit}
|
||||
|
||||
// First sequence - all should be returned
|
||||
got, err := seq.sequence()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 4, len(got))
|
||||
|
||||
// Advance by 50 slots - last batch should expire
|
||||
dn.setBlockBegin(250)
|
||||
seq.batcher.currentNeeds = dn.get
|
||||
|
||||
// Mark first batch importable and update
|
||||
seq.seq[0].state = batchImportable
|
||||
seq.update(seq.seq[0])
|
||||
|
||||
// Last batch should now be expired
|
||||
require.Equal(t, batchEndSequence, seq.seq[3].state)
|
||||
|
||||
// Advance again
|
||||
dn.setBlockBegin(300)
|
||||
seq.batcher.currentNeeds = dn.get
|
||||
|
||||
seq.seq[1].state = batchImportable
|
||||
seq.update(seq.seq[1])
|
||||
|
||||
// Count expired batches
|
||||
expiredCount := 0
|
||||
for _, b := range seq.seq {
|
||||
if b.state == batchEndSequence {
|
||||
expiredCount++
|
||||
}
|
||||
}
|
||||
require.Equal(t, true, expiredCount >= 2, "expected at least 2 expired batches")
|
||||
}
|
||||
|
||||
func TestProgressiveAdvancement_SlotAdvancesInBursts(t *testing.T) {
|
||||
// Large jump in slots causes multiple batches to expire at once
|
||||
dn := newDynamicNeeds(100, 600)
|
||||
seq := newBatchSequencer(6, 500, 50, dn.get)
|
||||
|
||||
// Initialize batches: [450,500), [400,450), [350,400), [300,350), [250,300), [200,250)
|
||||
for i := range 6 {
|
||||
seq.seq[i] = batch{
|
||||
begin: primitives.Slot(500 - (i+1)*50),
|
||||
end: primitives.Slot(500 - i*50),
|
||||
state: batchInit,
|
||||
}
|
||||
}
|
||||
|
||||
// Large jump - expire 4 batches at once
|
||||
dn.setBlockBegin(400)
|
||||
seq.batcher.currentNeeds = dn.get
|
||||
|
||||
got, err := seq.sequence()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Should have 2 non-expired batches returned
|
||||
nonExpired := 0
|
||||
for _, b := range got {
|
||||
if b.state == batchSequenced {
|
||||
nonExpired++
|
||||
}
|
||||
}
|
||||
require.Equal(t, 2, nonExpired)
|
||||
}
|
||||
|
||||
func TestProgressiveAdvancement_WorkerProcessingDuringAdvancement(t *testing.T) {
|
||||
// Batches in various processing states while needs advance
|
||||
dn := newDynamicNeeds(100, 500)
|
||||
seq := newBatchSequencer(4, 400, 50, dn.get)
|
||||
|
||||
seq.seq[0] = batch{begin: 350, end: 400, state: batchSyncBlobs}
|
||||
seq.seq[1] = batch{begin: 300, end: 350, state: batchSyncColumns}
|
||||
seq.seq[2] = batch{begin: 250, end: 300, state: batchSequenced}
|
||||
seq.seq[3] = batch{begin: 200, end: 250, state: batchInit}
|
||||
|
||||
// Advance past last batch
|
||||
dn.setBlockBegin(250)
|
||||
seq.batcher.currentNeeds = dn.get
|
||||
|
||||
// Call sequence - only batchInit should transition
|
||||
got, err := seq.sequence()
|
||||
require.NoError(t, err)
|
||||
|
||||
// batchInit batch should have expired
|
||||
require.Equal(t, batchEndSequence, seq.seq[3].state)
|
||||
|
||||
// Batches in other states should not be returned by sequence (already dispatched)
|
||||
for _, b := range got {
|
||||
require.NotEqual(t, batchSyncBlobs, b.state)
|
||||
require.NotEqual(t, batchSyncColumns, b.state)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProgressiveAdvancement_CompleteBeforeExpiration(t *testing.T) {
|
||||
// Batch completes just before it would expire
|
||||
dn := newDynamicNeeds(100, 500)
|
||||
seq := newBatchSequencer(2, 300, 50, dn.get)
|
||||
|
||||
seq.seq[0] = batch{begin: 250, end: 300, state: batchSequenced}
|
||||
seq.seq[1] = batch{begin: 200, end: 250, state: batchImportable}
|
||||
|
||||
// Complete the second batch BEFORE advancing needs
|
||||
completed := batch{begin: 200, end: 250, state: batchImportComplete, seq: 1}
|
||||
seq.update(completed)
|
||||
|
||||
// Now advance needs past where the batch was
|
||||
dn.setBlockBegin(250)
|
||||
seq.batcher.currentNeeds = dn.get
|
||||
|
||||
// The completed batch should have been removed successfully
|
||||
// Sequence should work normally
|
||||
got, err := seq.sequence()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, len(got) >= 1, "expected at least 1 batch")
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Category 4: Batch State Transitions Under Expiration
|
||||
// ============================================================================
|
||||
|
||||
func TestStateExpiration_NilBatchNotExpired(t *testing.T) {
|
||||
// batchNil should be initialized, not expired
|
||||
dn := newDynamicNeeds(200, 500)
|
||||
seq := newBatchSequencer(2, 300, 50, dn.get)
|
||||
|
||||
// Leave seq[0] as batchNil (zero value)
|
||||
seq.seq[1] = batch{begin: 200, end: 250, state: batchInit}
|
||||
|
||||
got, err := seq.sequence()
|
||||
require.NoError(t, err)
|
||||
|
||||
// batchNil should have been initialized and sequenced
|
||||
foundSequenced := false
|
||||
for _, b := range got {
|
||||
if b.state == batchSequenced {
|
||||
foundSequenced = true
|
||||
}
|
||||
}
|
||||
require.Equal(t, true, foundSequenced, "expected at least one sequenced batch")
|
||||
}
|
||||
|
||||
func TestStateExpiration_InitBatchExpires(t *testing.T) {
|
||||
// batchInit batches expire when outside retention
|
||||
dn := newDynamicNeeds(200, 500)
|
||||
seq := newBatchSequencer(1, 250, 50, dn.get)
|
||||
|
||||
seq.seq[0] = batch{begin: 150, end: 200, state: batchInit}
|
||||
|
||||
got, err := seq.sequence()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(got))
|
||||
require.Equal(t, batchEndSequence, got[0].state)
|
||||
}
|
||||
|
||||
func TestStateExpiration_SequencedBatchNotCheckedBySequence(t *testing.T) {
|
||||
// batchSequenced batches are not returned by sequence() (already dispatched)
|
||||
dn := newDynamicNeeds(100, 500)
|
||||
seq := newBatchSequencer(2, 300, 50, dn.get)
|
||||
|
||||
seq.seq[0] = batch{begin: 250, end: 300, state: batchSequenced}
|
||||
seq.seq[1] = batch{begin: 200, end: 250, state: batchInit}
|
||||
|
||||
// Move retention past second batch
|
||||
dn.setBlockBegin(250)
|
||||
seq.batcher.currentNeeds = dn.get
|
||||
|
||||
got, err := seq.sequence()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Init batch should expire, sequenced batch not returned
|
||||
for _, b := range got {
|
||||
require.NotEqual(t, batchSequenced, b.state)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateExpiration_SyncBlobsBatchNotCheckedBySequence(t *testing.T) {
|
||||
// batchSyncBlobs not returned by sequence
|
||||
dn := newDynamicNeeds(100, 500)
|
||||
seq := newBatchSequencer(1, 300, 50, dn.get)
|
||||
|
||||
seq.seq[0] = batch{begin: 250, end: 300, state: batchSyncBlobs}
|
||||
|
||||
_, err := seq.sequence()
|
||||
require.ErrorIs(t, err, errMaxBatches) // No batch to return
|
||||
}
|
||||
|
||||
func TestStateExpiration_SyncColumnsBatchNotCheckedBySequence(t *testing.T) {
|
||||
// batchSyncColumns not returned by sequence
|
||||
dn := newDynamicNeeds(100, 500)
|
||||
seq := newBatchSequencer(1, 300, 50, dn.get)
|
||||
|
||||
seq.seq[0] = batch{begin: 250, end: 300, state: batchSyncColumns}
|
||||
|
||||
_, err := seq.sequence()
|
||||
require.ErrorIs(t, err, errMaxBatches)
|
||||
}
|
||||
|
||||
func TestStateExpiration_ImportableBatchNotCheckedBySequence(t *testing.T) {
|
||||
// batchImportable not returned by sequence
|
||||
dn := newDynamicNeeds(100, 500)
|
||||
seq := newBatchSequencer(1, 300, 50, dn.get)
|
||||
|
||||
seq.seq[0] = batch{begin: 250, end: 300, state: batchImportable}
|
||||
|
||||
_, err := seq.sequence()
|
||||
require.ErrorIs(t, err, errMaxBatches)
|
||||
}
|
||||
|
||||
func TestStateExpiration_RetryableBatchExpires(t *testing.T) {
|
||||
// batchErrRetryable batches can expire
|
||||
dn := newDynamicNeeds(200, 500)
|
||||
seq := newBatchSequencer(1, 250, 50, dn.get)
|
||||
|
||||
seq.seq[0] = batch{begin: 150, end: 200, state: batchErrRetryable}
|
||||
|
||||
got, err := seq.sequence()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(got))
|
||||
require.Equal(t, batchEndSequence, got[0].state)
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Category 5: Edge Cases and Boundaries
|
||||
// ============================================================================
|
||||
|
||||
func TestEdgeCase_NeedsSpanShrinks(t *testing.T) {
|
||||
// Unusual case: retention window becomes smaller
|
||||
dn := newDynamicNeeds(100, 500)
|
||||
seq := newBatchSequencer(3, 400, 50, dn.get)
|
||||
|
||||
seq.seq[0] = batch{begin: 350, end: 400, state: batchInit}
|
||||
seq.seq[1] = batch{begin: 300, end: 350, state: batchInit}
|
||||
seq.seq[2] = batch{begin: 250, end: 300, state: batchInit}
|
||||
|
||||
// Shrink window from both ends
|
||||
dn.blockBegin = 300
|
||||
dn.blockEnd = 400
|
||||
seq.batcher.currentNeeds = dn.get
|
||||
|
||||
_, err := seq.sequence()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Third batch should have expired
|
||||
require.Equal(t, batchEndSequence, seq.seq[2].state)
|
||||
}
|
||||
|
||||
func TestEdgeCase_EmptySequenceAfterExpiration(t *testing.T) {
|
||||
// All batches in non-schedulable states, none can be sequenced
|
||||
dn := newDynamicNeeds(100, 500)
|
||||
seq := newBatchSequencer(2, 300, 50, dn.get)
|
||||
|
||||
seq.seq[0] = batch{begin: 250, end: 300, state: batchImportable}
|
||||
seq.seq[1] = batch{begin: 200, end: 250, state: batchImportable}
|
||||
|
||||
// No batchInit or batchErrRetryable to sequence
|
||||
_, err := seq.sequence()
|
||||
require.ErrorIs(t, err, errMaxBatches)
|
||||
}
|
||||
|
||||
func TestEdgeCase_EndSequenceChainReaction(t *testing.T) {
|
||||
// When batches expire, subsequent calls should handle them correctly
|
||||
dn := newDynamicNeeds(100, 500)
|
||||
seq := newBatchSequencer(3, 300, 50, dn.get)
|
||||
|
||||
seq.seq[0] = batch{begin: 250, end: 300, state: batchInit}
|
||||
seq.seq[1] = batch{begin: 200, end: 250, state: batchInit}
|
||||
seq.seq[2] = batch{begin: 150, end: 200, state: batchInit}
|
||||
|
||||
// Expire all
|
||||
dn.setBlockBegin(300)
|
||||
seq.batcher.currentNeeds = dn.get
|
||||
|
||||
got1, err := seq.sequence()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(got1))
|
||||
require.Equal(t, batchEndSequence, got1[0].state)
|
||||
|
||||
// Calling sequence again should still return batchEndSequence
|
||||
got2, err := seq.sequence()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(got2))
|
||||
require.Equal(t, batchEndSequence, got2[0].state)
|
||||
}
|
||||
|
||||
func TestEdgeCase_MixedExpirationAndCompletion(t *testing.T) {
|
||||
// Some batches complete while others expire simultaneously
|
||||
dn := newDynamicNeeds(100, 500)
|
||||
seq := newBatchSequencer(4, 400, 50, dn.get)
|
||||
|
||||
seq.seq[0] = batch{begin: 350, end: 400, state: batchImportComplete}
|
||||
seq.seq[1] = batch{begin: 300, end: 350, state: batchImportable}
|
||||
seq.seq[2] = batch{begin: 250, end: 300, state: batchInit}
|
||||
seq.seq[3] = batch{begin: 200, end: 250, state: batchInit}
|
||||
|
||||
// Expire last two batches
|
||||
dn.setBlockBegin(300)
|
||||
seq.batcher.currentNeeds = dn.get
|
||||
|
||||
// Update with completed batch to trigger processing
|
||||
completed := batch{begin: 350, end: 400, state: batchImportComplete, seq: 1}
|
||||
seq.update(completed)
|
||||
|
||||
// Verify expired batches are marked
|
||||
expiredCount := 0
|
||||
for _, b := range seq.seq {
|
||||
if b.state == batchEndSequence {
|
||||
expiredCount++
|
||||
}
|
||||
}
|
||||
require.Equal(t, true, expiredCount >= 2, "expected at least 2 expired batches")
|
||||
}
|
||||
|
||||
func TestEdgeCase_BatchExpiresAtSlotZero(t *testing.T) {
|
||||
// Edge case with very low slot numbers
|
||||
dn := newDynamicNeeds(50, 200)
|
||||
seq := newBatchSequencer(2, 100, 50, dn.get)
|
||||
|
||||
seq.seq[0] = batch{begin: 50, end: 100, state: batchInit}
|
||||
seq.seq[1] = batch{begin: 0, end: 50, state: batchInit}
|
||||
|
||||
// Move past first batch
|
||||
dn.setBlockBegin(100)
|
||||
seq.batcher.currentNeeds = dn.get
|
||||
|
||||
got, err := seq.sequence()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Both batches should have expired
|
||||
for _, b := range got {
|
||||
require.Equal(t, batchEndSequence, b.state)
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Category 6: Integration with numTodo/remaining
|
||||
// ============================================================================
|
||||
|
||||
func TestNumTodo_AfterExpiration(t *testing.T) {
|
||||
// numTodo should correctly reflect expired batches
|
||||
dn := newDynamicNeeds(100, 500)
|
||||
seq := newBatchSequencer(3, 300, 50, dn.get)
|
||||
|
||||
seq.seq[0] = batch{begin: 250, end: 300, state: batchSequenced}
|
||||
seq.seq[1] = batch{begin: 200, end: 250, state: batchSequenced}
|
||||
seq.seq[2] = batch{begin: 150, end: 200, state: batchInit}
|
||||
|
||||
todoBefore := seq.numTodo()
|
||||
|
||||
// Expire last batch
|
||||
dn.setBlockBegin(200)
|
||||
seq.batcher.currentNeeds = dn.get
|
||||
|
||||
// Force expiration via sequence
|
||||
_, err := seq.sequence()
|
||||
require.NoError(t, err)
|
||||
|
||||
todoAfter := seq.numTodo()
|
||||
|
||||
// Todo count should have decreased
|
||||
require.Equal(t, true, todoAfter < todoBefore, "expected todo count to decrease after expiration")
|
||||
}
|
||||
|
||||
func TestRemaining_AfterNeedsChange(t *testing.T) {
|
||||
// batcher.remaining() should use updated needs
|
||||
dn := newDynamicNeeds(100, 500)
|
||||
b := batcher{currentNeeds: dn.get, size: 50}
|
||||
|
||||
remainingBefore := b.remaining(300)
|
||||
|
||||
// Move retention window
|
||||
dn.setBlockBegin(250)
|
||||
b.currentNeeds = dn.get
|
||||
|
||||
remainingAfter := b.remaining(300)
|
||||
|
||||
// Remaining should have decreased
|
||||
require.Equal(t, true, remainingAfter < remainingBefore, "expected remaining to decrease after needs change")
|
||||
}
|
||||
|
||||
func TestCountWithState_AfterExpiration(t *testing.T) {
|
||||
// State counts should be accurate after expiration
|
||||
dn := newDynamicNeeds(100, 500)
|
||||
seq := newBatchSequencer(3, 300, 50, dn.get)
|
||||
|
||||
seq.seq[0] = batch{begin: 250, end: 300, state: batchInit}
|
||||
seq.seq[1] = batch{begin: 200, end: 250, state: batchInit}
|
||||
seq.seq[2] = batch{begin: 150, end: 200, state: batchInit}
|
||||
|
||||
require.Equal(t, 3, seq.countWithState(batchInit))
|
||||
require.Equal(t, 0, seq.countWithState(batchEndSequence))
|
||||
|
||||
// Expire all batches
|
||||
dn.setBlockBegin(300)
|
||||
seq.batcher.currentNeeds = dn.get
|
||||
|
||||
_, err := seq.sequence()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 0, seq.countWithState(batchInit))
|
||||
require.Equal(t, 3, seq.countWithState(batchEndSequence))
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Category 7: Fork Transition Scenarios (Blob/Column specific)
|
||||
// ============================================================================
|
||||
|
||||
func TestForkTransition_BlobNeedsChange(t *testing.T) {
|
||||
// Test when blob retention is different from block retention
|
||||
dn := newDynamicNeeds(100, 500)
|
||||
// Set blob begin to be further ahead
|
||||
dn.blobBegin = 200
|
||||
|
||||
seq := newBatchSequencer(3, 300, 50, dn.get)
|
||||
|
||||
seq.seq[0] = batch{begin: 250, end: 300, state: batchInit}
|
||||
seq.seq[1] = batch{begin: 200, end: 250, state: batchInit}
|
||||
seq.seq[2] = batch{begin: 150, end: 200, state: batchInit}
|
||||
|
||||
// Sequence should work based on block needs
|
||||
got, err := seq.sequence()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3, len(got))
|
||||
}
|
||||
|
||||
func TestForkTransition_ColumnNeedsChange(t *testing.T) {
|
||||
// Test when column retention is different from block retention
|
||||
dn := newDynamicNeeds(100, 500)
|
||||
// Set column begin to be further ahead
|
||||
dn.colBegin = 300
|
||||
|
||||
seq := newBatchSequencer(3, 400, 50, dn.get)
|
||||
|
||||
seq.seq[0] = batch{begin: 350, end: 400, state: batchInit}
|
||||
seq.seq[1] = batch{begin: 300, end: 350, state: batchInit}
|
||||
seq.seq[2] = batch{begin: 250, end: 300, state: batchInit}
|
||||
|
||||
// Batch expiration is based on block needs, not column needs
|
||||
got, err := seq.sequence()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3, len(got))
|
||||
}
|
||||
|
||||
func TestForkTransition_BlockNeedsVsBlobNeeds(t *testing.T) {
|
||||
// Blocks still needed but blobs have shorter retention
|
||||
dn := newDynamicNeeds(100, 500)
|
||||
dn.blobBegin = 300 // Blobs only needed from slot 300
|
||||
dn.blobEnd = 500
|
||||
|
||||
seq := newBatchSequencer(3, 400, 50, dn.get)
|
||||
|
||||
seq.seq[0] = batch{begin: 350, end: 400, state: batchInit}
|
||||
seq.seq[1] = batch{begin: 300, end: 350, state: batchInit}
|
||||
seq.seq[2] = batch{begin: 250, end: 300, state: batchInit}
|
||||
|
||||
// All batches should be returned (block expiration, not blob)
|
||||
got, err := seq.sequence()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3, len(got))
|
||||
|
||||
// Now change block needs to match blob needs
|
||||
dn.blockBegin = 300
|
||||
seq.batcher.currentNeeds = dn.get
|
||||
|
||||
// Re-sequence - last batch should expire
|
||||
seq.seq[0].state = batchInit
|
||||
seq.seq[1].state = batchInit
|
||||
seq.seq[2].state = batchInit
|
||||
|
||||
got2, err := seq.sequence()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Should have 2 non-expired batches
|
||||
nonExpired := 0
|
||||
for _, b := range got2 {
|
||||
if b.state == batchSequenced {
|
||||
nonExpired++
|
||||
}
|
||||
}
|
||||
require.Equal(t, 2, nonExpired)
|
||||
}
|
||||
|
||||
func TestForkTransition_AllResourceTypesAdvance(t *testing.T) {
|
||||
// Block, blob, and column spans all advance together
|
||||
dn := newDynamicNeeds(100, 500)
|
||||
|
||||
seq := newBatchSequencer(4, 400, 50, dn.get)
|
||||
|
||||
// Batches: [350,400), [300,350), [250,300), [200,250)
|
||||
for i := range 4 {
|
||||
seq.seq[i] = batch{
|
||||
begin: primitives.Slot(400 - (i+1)*50),
|
||||
end: primitives.Slot(400 - i*50),
|
||||
state: batchInit,
|
||||
}
|
||||
}
|
||||
|
||||
// Advance all needs together by 200 slots
|
||||
// blockBegin moves from 100 to 300
|
||||
dn.advance(200)
|
||||
seq.batcher.currentNeeds = dn.get
|
||||
|
||||
got, err := seq.sequence()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Count non-expired
|
||||
nonExpired := 0
|
||||
for _, b := range got {
|
||||
if b.state == batchSequenced {
|
||||
nonExpired++
|
||||
}
|
||||
}
|
||||
|
||||
// With begin=300, batches [200,250) and [250,300) should have expired
|
||||
// Batches [350,400) and [300,350) remain valid
|
||||
require.Equal(t, 2, nonExpired)
|
||||
}
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/das"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
)
|
||||
@@ -17,7 +18,7 @@ func TestBatcherBefore(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "size 10",
|
||||
b: batcher{min: 0, size: 10},
|
||||
b: batcher{currentNeeds: mockCurrentNeedsFunc(0, 100), size: 10},
|
||||
upTo: []primitives.Slot{33, 30, 10, 6},
|
||||
expect: []batch{
|
||||
{begin: 23, end: 33, state: batchInit},
|
||||
@@ -28,7 +29,7 @@ func TestBatcherBefore(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "size 4",
|
||||
b: batcher{min: 0, size: 4},
|
||||
b: batcher{currentNeeds: mockCurrentNeedsFunc(0, 100), size: 4},
|
||||
upTo: []primitives.Slot{33, 6, 4},
|
||||
expect: []batch{
|
||||
{begin: 29, end: 33, state: batchInit},
|
||||
@@ -38,7 +39,7 @@ func TestBatcherBefore(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "trigger end",
|
||||
b: batcher{min: 20, size: 10},
|
||||
b: batcher{currentNeeds: mockCurrentNeedsFunc(20, 100), size: 10},
|
||||
upTo: []primitives.Slot{33, 30, 25, 21, 20, 19},
|
||||
expect: []batch{
|
||||
{begin: 23, end: 33, state: batchInit},
|
||||
@@ -71,7 +72,7 @@ func TestBatchSingleItem(t *testing.T) {
|
||||
min = 0
|
||||
max = 11235
|
||||
size = 64
|
||||
seq := newBatchSequencer(seqLen, min, max, size)
|
||||
seq := newBatchSequencer(seqLen, max, size, mockCurrentNeedsFunc(min, max+1))
|
||||
got, err := seq.sequence()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(got))
|
||||
@@ -99,7 +100,7 @@ func TestBatchSequencer(t *testing.T) {
|
||||
min = 0
|
||||
max = 11235
|
||||
size = 64
|
||||
seq := newBatchSequencer(seqLen, min, max, size)
|
||||
seq := newBatchSequencer(seqLen, max, size, mockCurrentNeedsFunc(min, max+1))
|
||||
expected := []batch{
|
||||
{begin: 11171, end: 11235},
|
||||
{begin: 11107, end: 11171},
|
||||
@@ -212,7 +213,10 @@ func TestBatchSequencer(t *testing.T) {
|
||||
// set the min for the batcher close to the lowest slot. This will force the next batch to be partial and the batch
|
||||
// after that to be the final batch.
|
||||
newMin := seq.seq[len(seq.seq)-1].begin - 30
|
||||
seq.batcher.min = newMin
|
||||
seq.currentNeeds = func() das.CurrentNeeds {
|
||||
return das.CurrentNeeds{Block: das.NeedSpan{Begin: newMin, End: seq.batcher.max}}
|
||||
}
|
||||
seq.batcher.currentNeeds = seq.currentNeeds
|
||||
first = seq.seq[0]
|
||||
first.state = batchImportComplete
|
||||
// update() with a complete state will cause the sequence to be extended with an additional batch
|
||||
@@ -235,3 +239,863 @@ func TestBatchSequencer(t *testing.T) {
|
||||
//require.ErrorIs(t, err, errEndSequence)
|
||||
require.Equal(t, batchEndSequence, end.state)
|
||||
}
|
||||
|
||||
// initializeBatchWithSlots sets the begin and end slot values for a batch
|
||||
// in descending order (slot positions decrease as index increases)
|
||||
func initializeBatchWithSlots(batches []batch, min primitives.Slot, size primitives.Slot) {
|
||||
for i := range batches {
|
||||
// Batches are ordered descending by slot: earliest batches have lower indices
|
||||
// so batch[0] covers highest slots, batch[N] covers lowest slots
|
||||
end := min + primitives.Slot((len(batches)-i)*int(size))
|
||||
begin := end - size
|
||||
batches[i].begin = begin
|
||||
batches[i].end = end
|
||||
}
|
||||
}
|
||||
|
||||
// TestSequence tests the sequence() method with various batch states
|
||||
func TestSequence(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
seqLen int
|
||||
min primitives.Slot
|
||||
max primitives.Slot
|
||||
size primitives.Slot
|
||||
initialStates []batchState
|
||||
expectedCount int
|
||||
expectedErr error
|
||||
stateTransform func([]batch) // optional: transform states before test
|
||||
}{
|
||||
{
|
||||
name: "EmptySequence",
|
||||
seqLen: 0,
|
||||
min: 100,
|
||||
max: 1000,
|
||||
size: 64,
|
||||
initialStates: []batchState{},
|
||||
expectedCount: 0,
|
||||
expectedErr: errMaxBatches,
|
||||
},
|
||||
{
|
||||
name: "SingleBatchInit",
|
||||
seqLen: 1,
|
||||
min: 100,
|
||||
max: 1000,
|
||||
size: 64,
|
||||
initialStates: []batchState{batchInit},
|
||||
expectedCount: 1,
|
||||
},
|
||||
{
|
||||
name: "SingleBatchErrRetryable",
|
||||
seqLen: 1,
|
||||
min: 100,
|
||||
max: 1000,
|
||||
size: 64,
|
||||
initialStates: []batchState{batchErrRetryable},
|
||||
expectedCount: 1,
|
||||
},
|
||||
{
|
||||
name: "MultipleBatchesInit",
|
||||
seqLen: 3,
|
||||
min: 100,
|
||||
max: 1000,
|
||||
size: 200,
|
||||
initialStates: []batchState{batchInit, batchInit, batchInit},
|
||||
expectedCount: 3,
|
||||
},
|
||||
{
|
||||
name: "MixedStates_InitAndSequenced",
|
||||
seqLen: 2,
|
||||
min: 100,
|
||||
max: 1000,
|
||||
size: 100,
|
||||
initialStates: []batchState{batchInit, batchSequenced},
|
||||
expectedCount: 1,
|
||||
},
|
||||
{
|
||||
name: "MixedStates_SequencedFirst",
|
||||
seqLen: 2,
|
||||
min: 100,
|
||||
max: 1000,
|
||||
size: 100,
|
||||
initialStates: []batchState{batchSequenced, batchInit},
|
||||
expectedCount: 1,
|
||||
},
|
||||
{
|
||||
name: "AllBatchesSequenced",
|
||||
seqLen: 3,
|
||||
min: 100,
|
||||
max: 1000,
|
||||
size: 200,
|
||||
initialStates: []batchState{batchSequenced, batchSequenced, batchSequenced},
|
||||
expectedCount: 0,
|
||||
expectedErr: errMaxBatches,
|
||||
},
|
||||
{
|
||||
name: "EndSequenceOnly",
|
||||
seqLen: 1,
|
||||
min: 100,
|
||||
max: 1000,
|
||||
size: 64,
|
||||
initialStates: []batchState{batchEndSequence},
|
||||
expectedCount: 1,
|
||||
},
|
||||
{
|
||||
name: "EndSequenceWithOthers",
|
||||
seqLen: 2,
|
||||
min: 100,
|
||||
max: 1000,
|
||||
size: 64,
|
||||
initialStates: []batchState{batchInit, batchEndSequence},
|
||||
expectedCount: 1,
|
||||
},
|
||||
{
|
||||
name: "ImportableNotSequenced",
|
||||
seqLen: 1,
|
||||
min: 100,
|
||||
max: 1000,
|
||||
size: 64,
|
||||
initialStates: []batchState{batchImportable},
|
||||
expectedCount: 0,
|
||||
expectedErr: errMaxBatches,
|
||||
},
|
||||
{
|
||||
name: "ImportCompleteNotSequenced",
|
||||
seqLen: 1,
|
||||
min: 100,
|
||||
max: 1000,
|
||||
size: 64,
|
||||
initialStates: []batchState{batchImportComplete},
|
||||
expectedCount: 0,
|
||||
expectedErr: errMaxBatches,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
seq := newBatchSequencer(tc.seqLen, tc.max, tc.size, mockCurrentNeedsFunc(tc.min, tc.max+1))
|
||||
|
||||
// Initialize batches with valid slot ranges
|
||||
initializeBatchWithSlots(seq.seq, tc.min, tc.size)
|
||||
|
||||
// Set initial states
|
||||
for i, state := range tc.initialStates {
|
||||
seq.seq[i].state = state
|
||||
}
|
||||
|
||||
// Apply any transformations
|
||||
if tc.stateTransform != nil {
|
||||
tc.stateTransform(seq.seq)
|
||||
}
|
||||
|
||||
got, err := seq.sequence()
|
||||
|
||||
if tc.expectedErr != nil {
|
||||
require.ErrorIs(t, err, tc.expectedErr)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
require.Equal(t, tc.expectedCount, len(got))
|
||||
|
||||
// Verify returned batches are in batchSequenced state
|
||||
for _, b := range got {
|
||||
if b.state != batchEndSequence {
|
||||
require.Equal(t, batchSequenced, b.state)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestUpdate tests the update() method which: (1) updates batch state, (2) removes batchImportComplete batches,
|
||||
// (3) shifts remaining batches down, and (4) adds new batches to fill vacated positions.
|
||||
// NOTE: The sequence length can change! Completed batches are removed and new ones are added.
|
||||
func TestUpdate(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
seqLen int
|
||||
batches []batchState
|
||||
updateIdx int
|
||||
newState batchState
|
||||
expectedLen int // expected length after update
|
||||
expected []batchState // expected states of first N batches after update
|
||||
}{
|
||||
{
|
||||
name: "SingleBatchUpdate",
|
||||
seqLen: 1,
|
||||
batches: []batchState{batchInit},
|
||||
updateIdx: 0,
|
||||
newState: batchImportable,
|
||||
expectedLen: 1,
|
||||
expected: []batchState{batchImportable},
|
||||
},
|
||||
{
|
||||
name: "RemoveFirstCompleted_ShiftOthers",
|
||||
seqLen: 3,
|
||||
batches: []batchState{batchImportComplete, batchInit, batchInit},
|
||||
updateIdx: 0,
|
||||
newState: batchImportComplete,
|
||||
expectedLen: 3, // 1 removed + 2 new added
|
||||
expected: []batchState{batchInit, batchInit}, // shifted down
|
||||
},
|
||||
{
|
||||
name: "RemoveMultipleCompleted",
|
||||
seqLen: 3,
|
||||
batches: []batchState{batchImportComplete, batchImportComplete, batchInit},
|
||||
updateIdx: 0,
|
||||
newState: batchImportComplete,
|
||||
expectedLen: 3, // 2 removed + 2 new added
|
||||
expected: []batchState{batchInit}, // only 1 non-complete batch
|
||||
},
|
||||
{
|
||||
name: "RemoveMiddleCompleted_AlsoShifts",
|
||||
seqLen: 3,
|
||||
batches: []batchState{batchInit, batchImportComplete, batchInit},
|
||||
updateIdx: 1,
|
||||
newState: batchImportComplete,
|
||||
expectedLen: 3, // 1 removed + 1 new added
|
||||
expected: []batchState{batchInit, batchInit}, // middle complete removed, last shifted to middle
|
||||
},
|
||||
{
|
||||
name: "SingleBatchComplete_Replaced",
|
||||
seqLen: 1,
|
||||
batches: []batchState{batchInit},
|
||||
updateIdx: 0,
|
||||
newState: batchImportComplete,
|
||||
expectedLen: 1, // special case: replaced with new batch
|
||||
expected: []batchState{batchInit}, // new batch from beforeBatch
|
||||
},
|
||||
{
|
||||
name: "UpdateNonMatchingBatch",
|
||||
seqLen: 2,
|
||||
batches: []batchState{batchInit, batchInit},
|
||||
updateIdx: 0,
|
||||
newState: batchImportable,
|
||||
expectedLen: 2,
|
||||
expected: []batchState{batchImportable, batchInit},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
seq := newBatchSequencer(tc.seqLen, 1000, 64, mockCurrentNeedsFunc(0, 1000+1))
|
||||
|
||||
// Initialize batches with proper slot ranges
|
||||
for i := range seq.seq {
|
||||
seq.seq[i] = batch{
|
||||
begin: primitives.Slot(1000 - (i+1)*64),
|
||||
end: primitives.Slot(1000 - i*64),
|
||||
state: tc.batches[i],
|
||||
}
|
||||
}
|
||||
|
||||
// Create batch to update (must match begin/end to be replaced)
|
||||
updateBatch := seq.seq[tc.updateIdx]
|
||||
updateBatch.state = tc.newState
|
||||
seq.update(updateBatch)
|
||||
|
||||
// Verify expected length
|
||||
if len(seq.seq) != tc.expectedLen {
|
||||
t.Fatalf("expected length %d, got %d", tc.expectedLen, len(seq.seq))
|
||||
}
|
||||
|
||||
// Verify expected states of first N batches
|
||||
for i, expectedState := range tc.expected {
|
||||
if i >= len(seq.seq) {
|
||||
t.Fatalf("expected state at index %d but seq only has %d batches", i, len(seq.seq))
|
||||
}
|
||||
if seq.seq[i].state != expectedState {
|
||||
t.Fatalf("batch[%d]: expected state %s, got %s", i, expectedState.String(), seq.seq[i].state.String())
|
||||
}
|
||||
}
|
||||
|
||||
// Verify slot contiguity for non-newly-generated batches
|
||||
// (newly generated batches from beforeBatch() may not be contiguous with shifted batches)
|
||||
// For this test, we just verify they're in valid slot ranges
|
||||
for i := 0; i < len(seq.seq); i++ {
|
||||
if seq.seq[i].begin >= seq.seq[i].end {
|
||||
t.Fatalf("invalid batch[%d]: begin=%d should be < end=%d", i, seq.seq[i].begin, seq.seq[i].end)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestImportable tests the importable() method for contiguity checking
|
||||
func TestImportable(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
seqLen int
|
||||
states []batchState
|
||||
expectedCount int
|
||||
expectedBreak int // index where importable chain breaks (-1 if none)
|
||||
}{
|
||||
{
|
||||
name: "EmptySequence",
|
||||
seqLen: 0,
|
||||
states: []batchState{},
|
||||
expectedCount: 0,
|
||||
expectedBreak: -1,
|
||||
},
|
||||
{
|
||||
name: "FirstBatchNotImportable",
|
||||
seqLen: 2,
|
||||
states: []batchState{batchInit, batchImportable},
|
||||
expectedCount: 0,
|
||||
expectedBreak: 0,
|
||||
},
|
||||
{
|
||||
name: "FirstBatchImportable",
|
||||
seqLen: 1,
|
||||
states: []batchState{batchImportable},
|
||||
expectedCount: 1,
|
||||
expectedBreak: -1,
|
||||
},
|
||||
{
|
||||
name: "TwoImportableConsecutive",
|
||||
seqLen: 2,
|
||||
states: []batchState{batchImportable, batchImportable},
|
||||
expectedCount: 2,
|
||||
expectedBreak: -1,
|
||||
},
|
||||
{
|
||||
name: "ThreeImportableConsecutive",
|
||||
seqLen: 3,
|
||||
states: []batchState{batchImportable, batchImportable, batchImportable},
|
||||
expectedCount: 3,
|
||||
expectedBreak: -1,
|
||||
},
|
||||
{
|
||||
name: "ImportsBreak_SecondNotImportable",
|
||||
seqLen: 2,
|
||||
states: []batchState{batchImportable, batchInit},
|
||||
expectedCount: 1,
|
||||
expectedBreak: 1,
|
||||
},
|
||||
{
|
||||
name: "ImportsBreak_MiddleNotImportable",
|
||||
seqLen: 4,
|
||||
states: []batchState{batchImportable, batchImportable, batchInit, batchImportable},
|
||||
expectedCount: 2,
|
||||
expectedBreak: 2,
|
||||
},
|
||||
{
|
||||
name: "EndSequenceAfterImportable",
|
||||
seqLen: 3,
|
||||
states: []batchState{batchImportable, batchImportable, batchEndSequence},
|
||||
expectedCount: 2,
|
||||
expectedBreak: 2,
|
||||
},
|
||||
{
|
||||
name: "AllStatesNotImportable",
|
||||
seqLen: 3,
|
||||
states: []batchState{batchInit, batchSequenced, batchErrRetryable},
|
||||
expectedCount: 0,
|
||||
expectedBreak: 0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
seq := newBatchSequencer(tc.seqLen, 1000, 64, mockCurrentNeedsFunc(0, 1000+1))
|
||||
|
||||
for i, state := range tc.states {
|
||||
seq.seq[i] = batch{
|
||||
begin: primitives.Slot(1000 - (i+1)*64),
|
||||
end: primitives.Slot(1000 - i*64),
|
||||
state: state,
|
||||
}
|
||||
}
|
||||
|
||||
imp := seq.importable()
|
||||
require.Equal(t, tc.expectedCount, len(imp))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestMoveMinimumWithNonImportableUpdate tests integration of moveMinimum with update()
|
||||
func TestMoveMinimumWithNonImportableUpdate(t *testing.T) {
|
||||
t.Run("UpdateBatchAfterMinimumChange", func(t *testing.T) {
|
||||
seq := newBatchSequencer(3, 300, 50, mockCurrentNeedsFunc(100, 300+1))
|
||||
|
||||
// Initialize with batches
|
||||
seq.seq[0] = batch{begin: 200, end: 250, state: batchInit}
|
||||
seq.seq[1] = batch{begin: 150, end: 200, state: batchInit}
|
||||
seq.seq[2] = batch{begin: 100, end: 150, state: batchInit}
|
||||
|
||||
seq.currentNeeds = mockCurrentNeedsFunc(150, 300+1)
|
||||
seq.batcher.currentNeeds = seq.currentNeeds
|
||||
|
||||
// Update non-importable batch above new minimum
|
||||
batchToUpdate := batch{begin: 200, end: 250, state: batchSequenced}
|
||||
seq.update(batchToUpdate)
|
||||
|
||||
// Verify batch was updated
|
||||
require.Equal(t, batchSequenced, seq.seq[0].state)
|
||||
|
||||
// Verify numTodo reflects updated minimum
|
||||
todo := seq.numTodo()
|
||||
require.NotEqual(t, 0, todo, "numTodo should be greater than 0 after moveMinimum and update")
|
||||
})
|
||||
}
|
||||
|
||||
// TestCountWithState tests state counting
|
||||
func TestCountWithState(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
seqLen int
|
||||
states []batchState
|
||||
queryState batchState
|
||||
expectedCount int
|
||||
}{
|
||||
{
|
||||
name: "CountInit_NoBatches",
|
||||
seqLen: 0,
|
||||
states: []batchState{},
|
||||
queryState: batchInit,
|
||||
expectedCount: 0,
|
||||
},
|
||||
{
|
||||
name: "CountInit_OneBatch",
|
||||
seqLen: 1,
|
||||
states: []batchState{batchInit},
|
||||
queryState: batchInit,
|
||||
expectedCount: 1,
|
||||
},
|
||||
{
|
||||
name: "CountInit_MultipleBatches",
|
||||
seqLen: 3,
|
||||
states: []batchState{batchInit, batchInit, batchInit},
|
||||
queryState: batchInit,
|
||||
expectedCount: 3,
|
||||
},
|
||||
{
|
||||
name: "CountInit_MixedStates",
|
||||
seqLen: 3,
|
||||
states: []batchState{batchInit, batchSequenced, batchInit},
|
||||
queryState: batchInit,
|
||||
expectedCount: 2,
|
||||
},
|
||||
{
|
||||
name: "CountSequenced",
|
||||
seqLen: 3,
|
||||
states: []batchState{batchInit, batchSequenced, batchImportable},
|
||||
queryState: batchSequenced,
|
||||
expectedCount: 1,
|
||||
},
|
||||
{
|
||||
name: "CountImportable",
|
||||
seqLen: 3,
|
||||
states: []batchState{batchImportable, batchImportable, batchInit},
|
||||
queryState: batchImportable,
|
||||
expectedCount: 2,
|
||||
},
|
||||
{
|
||||
name: "CountComplete",
|
||||
seqLen: 3,
|
||||
states: []batchState{batchImportComplete, batchImportComplete, batchInit},
|
||||
queryState: batchImportComplete,
|
||||
expectedCount: 2,
|
||||
},
|
||||
{
|
||||
name: "CountEndSequence",
|
||||
seqLen: 3,
|
||||
states: []batchState{batchInit, batchEndSequence, batchInit},
|
||||
queryState: batchEndSequence,
|
||||
expectedCount: 1,
|
||||
},
|
||||
{
|
||||
name: "CountZero_NonexistentState",
|
||||
seqLen: 2,
|
||||
states: []batchState{batchInit, batchInit},
|
||||
queryState: batchImportable,
|
||||
expectedCount: 0,
|
||||
},
|
||||
{
|
||||
name: "CountNil",
|
||||
seqLen: 3,
|
||||
states: []batchState{batchNil, batchNil, batchInit},
|
||||
queryState: batchNil,
|
||||
expectedCount: 2,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
seq := newBatchSequencer(tc.seqLen, 1000, 64, mockCurrentNeedsFunc(0, 1000+1))
|
||||
|
||||
for i, state := range tc.states {
|
||||
seq.seq[i].state = state
|
||||
}
|
||||
|
||||
count := seq.countWithState(tc.queryState)
|
||||
require.Equal(t, tc.expectedCount, count)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestNumTodo tests remaining batch count calculation
|
||||
func TestNumTodo(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
seqLen int
|
||||
min primitives.Slot
|
||||
max primitives.Slot
|
||||
size primitives.Slot
|
||||
states []batchState
|
||||
expectedTodo int
|
||||
}{
|
||||
{
|
||||
name: "EmptySequence",
|
||||
seqLen: 0,
|
||||
min: 0,
|
||||
max: 1000,
|
||||
size: 64,
|
||||
states: []batchState{},
|
||||
expectedTodo: 0,
|
||||
},
|
||||
{
|
||||
name: "SingleBatchComplete",
|
||||
seqLen: 1,
|
||||
min: 0,
|
||||
max: 1000,
|
||||
size: 64,
|
||||
states: []batchState{batchImportComplete},
|
||||
expectedTodo: 0,
|
||||
},
|
||||
{
|
||||
name: "SingleBatchInit",
|
||||
seqLen: 1,
|
||||
min: 0,
|
||||
max: 100,
|
||||
size: 10,
|
||||
states: []batchState{batchInit},
|
||||
expectedTodo: 1,
|
||||
},
|
||||
{
|
||||
name: "AllBatchesIgnored",
|
||||
seqLen: 3,
|
||||
min: 0,
|
||||
max: 1000,
|
||||
size: 64,
|
||||
states: []batchState{batchImportComplete, batchImportComplete, batchNil},
|
||||
expectedTodo: 0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
seq := newBatchSequencer(tc.seqLen, tc.max, tc.size, mockCurrentNeedsFunc(tc.min, tc.max+1))
|
||||
|
||||
for i, state := range tc.states {
|
||||
seq.seq[i] = batch{
|
||||
begin: primitives.Slot(tc.max - primitives.Slot((i+1)*10)),
|
||||
end: primitives.Slot(tc.max - primitives.Slot(i*10)),
|
||||
state: state,
|
||||
}
|
||||
}
|
||||
|
||||
// Just verify numTodo doesn't panic
|
||||
_ = seq.numTodo()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestBatcherRemaining tests the remaining() calculation logic
|
||||
func TestBatcherRemaining(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
min primitives.Slot
|
||||
upTo primitives.Slot
|
||||
size primitives.Slot
|
||||
expected int
|
||||
}{
|
||||
{
|
||||
name: "UpToLessThanMin",
|
||||
min: 100,
|
||||
upTo: 50,
|
||||
size: 10,
|
||||
expected: 0,
|
||||
},
|
||||
{
|
||||
name: "UpToEqualsMin",
|
||||
min: 100,
|
||||
upTo: 100,
|
||||
size: 10,
|
||||
expected: 0,
|
||||
},
|
||||
{
|
||||
name: "ExactBoundary",
|
||||
min: 100,
|
||||
upTo: 110,
|
||||
size: 10,
|
||||
expected: 1,
|
||||
},
|
||||
{
|
||||
name: "ExactBoundary_Multiple",
|
||||
min: 100,
|
||||
upTo: 150,
|
||||
size: 10,
|
||||
expected: 5,
|
||||
},
|
||||
{
|
||||
name: "PartialBatch",
|
||||
min: 100,
|
||||
upTo: 115,
|
||||
size: 10,
|
||||
expected: 2,
|
||||
},
|
||||
{
|
||||
name: "PartialBatch_Small",
|
||||
min: 100,
|
||||
upTo: 105,
|
||||
size: 10,
|
||||
expected: 1,
|
||||
},
|
||||
{
|
||||
name: "LargeRange",
|
||||
min: 100,
|
||||
upTo: 500,
|
||||
size: 10,
|
||||
expected: 40,
|
||||
},
|
||||
{
|
||||
name: "LargeRange_Partial",
|
||||
min: 100,
|
||||
upTo: 505,
|
||||
size: 10,
|
||||
expected: 41,
|
||||
},
|
||||
{
|
||||
name: "PartialBatch_Size1",
|
||||
min: 100,
|
||||
upTo: 101,
|
||||
size: 1,
|
||||
expected: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
needs := func() das.CurrentNeeds {
|
||||
return das.CurrentNeeds{Block: das.NeedSpan{Begin: tc.min, End: tc.upTo + 1}}
|
||||
}
|
||||
b := batcher{size: tc.size, currentNeeds: needs}
|
||||
result := b.remaining(tc.upTo)
|
||||
require.Equal(t, tc.expected, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// assertAllBatchesAboveMinimum verifies all returned batches have end > minimum
|
||||
func assertAllBatchesAboveMinimum(t *testing.T, batches []batch, min primitives.Slot) {
|
||||
for _, b := range batches {
|
||||
if b.state != batchEndSequence {
|
||||
if b.end <= min {
|
||||
t.Fatalf("batch begin=%d end=%d has end <= minimum %d", b.begin, b.end, min)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// assertBatchesContiguous verifies contiguity of returned batches
|
||||
func assertBatchesContiguous(t *testing.T, batches []batch) {
|
||||
for i := 0; i < len(batches)-1; i++ {
|
||||
require.Equal(t, batches[i].begin, batches[i+1].end,
|
||||
"batch[%d] begin=%d not contiguous with batch[%d] end=%d", i, batches[i].begin, i+1, batches[i+1].end)
|
||||
}
|
||||
}
|
||||
|
||||
// assertBatchNotReturned verifies a specific batch is not in the returned list
|
||||
func assertBatchNotReturned(t *testing.T, batches []batch, shouldNotBe batch) {
|
||||
for _, b := range batches {
|
||||
if b.begin == shouldNotBe.begin && b.end == shouldNotBe.end {
|
||||
t.Fatalf("batch begin=%d end=%d should not be returned", shouldNotBe.begin, shouldNotBe.end)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestMoveMinimumFiltersOutOfRangeBatches tests that batches below new minimum are not returned by sequence()
|
||||
// after moveMinimum is called. The sequence() method marks expired batches (end <= min) as batchEndSequence
|
||||
// but does not return them (unless they're the only batches left).
|
||||
func TestMoveMinimumFiltersOutOfRangeBatches(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
seqLen int
|
||||
min primitives.Slot
|
||||
max primitives.Slot
|
||||
size primitives.Slot
|
||||
initialStates []batchState
|
||||
newMinimum primitives.Slot
|
||||
expectedReturned int
|
||||
expectedAllAbove primitives.Slot // all returned batches should have end > this value (except batchEndSequence)
|
||||
}{
|
||||
// Category 1: Single Batch Below New Minimum
|
||||
{
|
||||
name: "BatchBelowMinimum_Init",
|
||||
seqLen: 4,
|
||||
min: 100,
|
||||
max: 1000,
|
||||
size: 50,
|
||||
initialStates: []batchState{batchInit, batchInit, batchInit, batchInit},
|
||||
newMinimum: 175,
|
||||
expectedReturned: 3, // [250-300], [200-250], [150-200] are returned
|
||||
expectedAllAbove: 175,
|
||||
},
|
||||
{
|
||||
name: "BatchBelowMinimum_ErrRetryable",
|
||||
seqLen: 4,
|
||||
min: 100,
|
||||
max: 1000,
|
||||
size: 50,
|
||||
initialStates: []batchState{batchSequenced, batchSequenced, batchErrRetryable, batchErrRetryable},
|
||||
newMinimum: 175,
|
||||
expectedReturned: 1, // only [150-200] (ErrRetryable) is returned; [100-150] is expired and not returned
|
||||
expectedAllAbove: 175,
|
||||
},
|
||||
|
||||
// Category 2: Multiple Batches Below New Minimum
|
||||
{
|
||||
name: "MultipleBatchesBelowMinimum",
|
||||
seqLen: 8,
|
||||
min: 100,
|
||||
max: 500,
|
||||
size: 50,
|
||||
initialStates: []batchState{batchInit, batchInit, batchInit, batchInit, batchInit, batchInit, batchInit, batchInit},
|
||||
newMinimum: 320,
|
||||
expectedReturned: 4, // [450-500], [400-450], [350-400], [300-350] returned; rest expired/not returned
|
||||
expectedAllAbove: 320,
|
||||
},
|
||||
|
||||
// Category 3: Batches at Boundary - batch.end == minimum is expired
|
||||
{
|
||||
name: "BatchExactlyAtMinimum",
|
||||
seqLen: 3,
|
||||
min: 100,
|
||||
max: 1000,
|
||||
size: 50,
|
||||
initialStates: []batchState{batchInit, batchInit, batchInit},
|
||||
newMinimum: 200,
|
||||
expectedReturned: 1, // [250-300] returned; [200-250] (end==200) and [100-150] are expired
|
||||
expectedAllAbove: 200,
|
||||
},
|
||||
{
|
||||
name: "BatchJustAboveMinimum",
|
||||
seqLen: 3,
|
||||
min: 100,
|
||||
max: 1000,
|
||||
size: 50,
|
||||
initialStates: []batchState{batchInit, batchInit, batchInit},
|
||||
newMinimum: 199,
|
||||
expectedReturned: 2, // [250-300], [200-250] returned; [100-150] (end<=199) is expired
|
||||
expectedAllAbove: 199,
|
||||
},
|
||||
|
||||
// Category 4: No Batches Affected
|
||||
{
|
||||
name: "MoveMinimumNoAffect",
|
||||
seqLen: 3,
|
||||
min: 100,
|
||||
max: 1000,
|
||||
size: 50,
|
||||
initialStates: []batchState{batchInit, batchInit, batchInit},
|
||||
newMinimum: 120,
|
||||
expectedReturned: 3, // all batches returned, none below minimum
|
||||
expectedAllAbove: 120,
|
||||
},
|
||||
|
||||
// Category 5: Mixed States Below Minimum
|
||||
{
|
||||
name: "MixedStatesBelowMinimum",
|
||||
seqLen: 4,
|
||||
min: 100,
|
||||
max: 1000,
|
||||
size: 50,
|
||||
initialStates: []batchState{batchSequenced, batchInit, batchErrRetryable, batchInit},
|
||||
newMinimum: 175,
|
||||
expectedReturned: 2, // [200-250] (Init) and [150-200] (ErrRetryable) returned; others not in Init/ErrRetryable or expired
|
||||
expectedAllAbove: 175,
|
||||
},
|
||||
|
||||
// Category 6: Large moveMinimum
|
||||
{
|
||||
name: "LargeMoveMinimumSkipsMost",
|
||||
seqLen: 4,
|
||||
min: 100,
|
||||
max: 1000,
|
||||
size: 50,
|
||||
initialStates: []batchState{batchInit, batchInit, batchInit, batchInit},
|
||||
newMinimum: 290,
|
||||
expectedReturned: 1, // only [250-300] (end=300 > 290) returned
|
||||
expectedAllAbove: 290,
|
||||
},
|
||||
|
||||
// Category 7: All Batches Expired
|
||||
{
|
||||
name: "AllBatchesExpired",
|
||||
seqLen: 3,
|
||||
min: 100,
|
||||
max: 1000,
|
||||
size: 50,
|
||||
initialStates: []batchState{batchInit, batchInit, batchInit},
|
||||
newMinimum: 300,
|
||||
expectedReturned: 1, // when all expire, one batchEndSequence is returned
|
||||
expectedAllAbove: 0, // batchEndSequence may have any slot value, don't check
|
||||
},
|
||||
|
||||
// Category 8: Contiguity after filtering
|
||||
{
|
||||
name: "ContiguityMaintained",
|
||||
seqLen: 4,
|
||||
min: 100,
|
||||
max: 1000,
|
||||
size: 50,
|
||||
initialStates: []batchState{batchInit, batchInit, batchInit, batchInit},
|
||||
newMinimum: 150,
|
||||
expectedReturned: 3, // [250-300], [200-250], [150-200] returned
|
||||
expectedAllAbove: 150,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
seq := newBatchSequencer(tc.seqLen, tc.max, tc.size, mockCurrentNeedsFunc(tc.min, tc.max+1))
|
||||
|
||||
// Initialize batches with valid slot ranges
|
||||
initializeBatchWithSlots(seq.seq, tc.min, tc.size)
|
||||
|
||||
// Set initial states
|
||||
for i, state := range tc.initialStates {
|
||||
seq.seq[i].state = state
|
||||
}
|
||||
|
||||
// move minimum and call sequence to update set of batches
|
||||
seq.currentNeeds = mockCurrentNeedsFunc(tc.newMinimum, tc.max+1)
|
||||
seq.batcher.currentNeeds = seq.currentNeeds
|
||||
got, err := seq.sequence()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify count
|
||||
if len(got) != tc.expectedReturned {
|
||||
t.Fatalf("expected %d batches returned, got %d", tc.expectedReturned, len(got))
|
||||
}
|
||||
|
||||
// Verify all returned non-endSequence batches have end > newMinimum
|
||||
// (batchEndSequence may be returned when all batches are expired, so exclude from check)
|
||||
if tc.expectedAllAbove > 0 {
|
||||
for _, b := range got {
|
||||
if b.state != batchEndSequence && b.end <= tc.expectedAllAbove {
|
||||
t.Fatalf("batch begin=%d end=%d has end <= %d (should be filtered)",
|
||||
b.begin, b.end, tc.expectedAllAbove)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Verify contiguity is maintained for returned batches
|
||||
if len(got) > 1 {
|
||||
assertBatchesContiguous(t, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@@ -30,35 +31,47 @@ type blobSummary struct {
|
||||
}
|
||||
|
||||
type blobSyncConfig struct {
|
||||
retentionStart primitives.Slot
|
||||
nbv verification.NewBlobVerifier
|
||||
store *filesystem.BlobStorage
|
||||
nbv verification.NewBlobVerifier
|
||||
store *filesystem.BlobStorage
|
||||
currentNeeds func() das.CurrentNeeds
|
||||
}
|
||||
|
||||
func newBlobSync(current primitives.Slot, vbs verifiedROBlocks, cfg *blobSyncConfig) (*blobSync, error) {
|
||||
expected, err := vbs.blobIdents(cfg.retentionStart)
|
||||
expected, err := vbs.blobIdents(cfg.currentNeeds)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bbv := newBlobBatchVerifier(cfg.nbv)
|
||||
as := das.NewLazilyPersistentStore(cfg.store, bbv)
|
||||
shouldRetain := func(slot primitives.Slot) bool {
|
||||
needs := cfg.currentNeeds()
|
||||
return needs.Blob.At(slot)
|
||||
}
|
||||
as := das.NewLazilyPersistentStore(cfg.store, bbv, shouldRetain)
|
||||
|
||||
return &blobSync{current: current, expected: expected, bbv: bbv, store: as}, nil
|
||||
}
|
||||
|
||||
type blobVerifierMap map[[32]byte][]verification.BlobVerifier
|
||||
|
||||
type blobSync struct {
|
||||
store das.AvailabilityStore
|
||||
store *das.LazilyPersistentStoreBlob
|
||||
expected []blobSummary
|
||||
next int
|
||||
bbv *blobBatchVerifier
|
||||
current primitives.Slot
|
||||
peer peer.ID
|
||||
}
|
||||
|
||||
func (bs *blobSync) blobsNeeded() int {
|
||||
func (bs *blobSync) needed() int {
|
||||
return len(bs.expected) - bs.next
|
||||
}
|
||||
|
||||
// validateNext is given to the RPC request code as one of the a validation callbacks.
|
||||
// It orchestrates setting up the batch verifier (blobBatchVerifier) and calls Persist on the
|
||||
// AvailabilityStore. This enables the rest of the code in between RPC and the AvailabilityStore
|
||||
// to stay decoupled from each other. The AvailabilityStore holds the blobs in memory between the
|
||||
// call to Persist, and the call to IsDataAvailable (where the blobs are actually written to disk
|
||||
// if successfully verified).
|
||||
func (bs *blobSync) validateNext(rb blocks.ROBlob) error {
|
||||
if bs.next >= len(bs.expected) {
|
||||
return errUnexpectedResponseSize
|
||||
@@ -102,6 +115,7 @@ func newBlobBatchVerifier(nbv verification.NewBlobVerifier) *blobBatchVerifier {
|
||||
return &blobBatchVerifier{newBlobVerifier: nbv, verifiers: make(blobVerifierMap)}
|
||||
}
|
||||
|
||||
// blobBatchVerifier implements the BlobBatchVerifier interface required by the das store.
|
||||
type blobBatchVerifier struct {
|
||||
newBlobVerifier verification.NewBlobVerifier
|
||||
verifiers blobVerifierMap
|
||||
@@ -117,6 +131,7 @@ func (bbv *blobBatchVerifier) newVerifier(rb blocks.ROBlob) verification.BlobVer
|
||||
return m[rb.Index]
|
||||
}
|
||||
|
||||
// VerifiedROBlobs satisfies the BlobBatchVerifier interface expected by the AvailabilityChecker
|
||||
func (bbv *blobBatchVerifier) VerifiedROBlobs(_ context.Context, blk blocks.ROBlock, _ []blocks.ROBlob) ([]blocks.VerifiedROBlob, error) {
|
||||
m, ok := bbv.verifiers[blk.Root()]
|
||||
if !ok {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user