mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-09 13:28:01 -05:00
Compare commits
6 Commits
hdiff_star
...
22aa87019d
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
22aa87019d | ||
|
|
0db74365e0 | ||
|
|
6f90101364 | ||
|
|
49e1763ec2 | ||
|
|
c2527c82cd | ||
|
|
d4ea8fafd6 |
@@ -1053,40 +1053,3 @@ func TestKZGCommitmentToVersionedHashes(t *testing.T) {
|
||||
require.Equal(t, vhs[0].String(), vh0)
|
||||
require.Equal(t, vhs[1].String(), vh1)
|
||||
}
|
||||
|
||||
func TestComputePayloadAttribute(t *testing.T) {
|
||||
service, tr := minimalTestService(t, WithPayloadIDCache(cache.NewPayloadIDCache()))
|
||||
ctx := tr.ctx
|
||||
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
|
||||
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, Index: 0})
|
||||
// Cache hit, advance state, no fee recipient
|
||||
slot := primitives.Slot(1)
|
||||
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
signed, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(signed, [32]byte{'a'})
|
||||
require.NoError(t, err)
|
||||
cfg := &postBlockProcessConfig{
|
||||
ctx: ctx,
|
||||
roblock: roblock,
|
||||
}
|
||||
fcu := &fcuConfig{
|
||||
headState: st,
|
||||
proposingSlot: slot,
|
||||
headRoot: [32]byte{},
|
||||
}
|
||||
require.NoError(t, service.computePayloadAttributes(cfg, fcu))
|
||||
require.Equal(t, false, fcu.attributes.IsEmpty())
|
||||
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(fcu.attributes.SuggestedFeeRecipient()).String())
|
||||
|
||||
// Cache hit, advance state, has fee recipient
|
||||
suggestedAddr := common.HexToAddress("123")
|
||||
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, FeeRecipient: primitives.ExecutionAddress(suggestedAddr), Index: 0})
|
||||
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
|
||||
require.NoError(t, service.computePayloadAttributes(cfg, fcu))
|
||||
require.Equal(t, false, fcu.attributes.IsEmpty())
|
||||
require.Equal(t, suggestedAddr, common.BytesToAddress(fcu.attributes.SuggestedFeeRecipient()))
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
payloadattribute "github.com/OffchainLabs/prysm/v7/consensus-types/payload-attribute"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -53,58 +54,53 @@ type fcuConfig struct {
|
||||
}
|
||||
|
||||
// sendFCU handles the logic to notify the engine of a forckhoice update
|
||||
// for the first time when processing an incoming block during regular sync. It
|
||||
// always updates the shuffling caches and handles epoch transitions when the
|
||||
// incoming block is late, preparing payload attributes in this case while it
|
||||
// only sends a message with empty attributes for early blocks.
|
||||
func (s *Service) sendFCU(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error {
|
||||
if !s.isNewHead(cfg.headRoot) {
|
||||
return nil
|
||||
// when processing an incoming block during regular sync. It
|
||||
// always updates the shuffling caches and handles epoch transitions .
|
||||
func (s *Service) sendFCU(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) {
|
||||
if cfg.postState.Version() < version.Fulu {
|
||||
// update the caches to compute the right proposer index
|
||||
// this function is called under a forkchoice lock which we need to release.
|
||||
s.ForkChoicer().Unlock()
|
||||
s.updateCachesPostBlockProcessing(cfg)
|
||||
s.ForkChoicer().Lock()
|
||||
}
|
||||
if err := s.getFCUArgs(cfg, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("Could not get forkchoice update argument")
|
||||
return
|
||||
}
|
||||
// If head has not been updated and attributes are nil, we can skip the FCU.
|
||||
if !s.isNewHead(cfg.headRoot) && (fcuArgs.attributes == nil || fcuArgs.attributes.IsEmpty()) {
|
||||
return
|
||||
}
|
||||
// If we are proposing and we aim to reorg the block, we have already sent FCU with attributes on lateBlockTasks
|
||||
if fcuArgs.attributes != nil && !fcuArgs.attributes.IsEmpty() && s.shouldOverrideFCU(cfg.headRoot, s.CurrentSlot()+1) {
|
||||
return nil
|
||||
return
|
||||
}
|
||||
if s.inRegularSync() {
|
||||
go s.forkchoiceUpdateWithExecution(cfg.ctx, fcuArgs)
|
||||
}
|
||||
return s.forkchoiceUpdateWithExecution(cfg.ctx, fcuArgs)
|
||||
}
|
||||
|
||||
// sendFCUWithAttributes computes the payload attributes and sends an FCU message
|
||||
// to the engine if needed
|
||||
func (s *Service) sendFCUWithAttributes(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) {
|
||||
slotCtx, cancel := context.WithTimeout(context.Background(), slotDeadline)
|
||||
defer cancel()
|
||||
cfg.ctx = slotCtx
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
if err := s.computePayloadAttributes(cfg, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("Could not compute payload attributes")
|
||||
return
|
||||
}
|
||||
if fcuArgs.attributes.IsEmpty() {
|
||||
return
|
||||
}
|
||||
if _, err := s.notifyForkchoiceUpdate(cfg.ctx, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("Could not update forkchoice with payload attributes for proposal")
|
||||
if s.isNewHead(fcuArgs.headRoot) {
|
||||
if err := s.saveHead(cfg.ctx, fcuArgs.headRoot, fcuArgs.headBlock, fcuArgs.headState); err != nil {
|
||||
log.WithError(err).Error("Could not save head")
|
||||
}
|
||||
s.pruneAttsFromPool(s.ctx, fcuArgs.headState, fcuArgs.headBlock)
|
||||
}
|
||||
}
|
||||
|
||||
// fockchoiceUpdateWithExecution is a wrapper around notifyForkchoiceUpdate. It decides whether a new call to FCU should be made.
|
||||
func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, args *fcuConfig) error {
|
||||
// fockchoiceUpdateWithExecution is a wrapper around notifyForkchoiceUpdate. It gets a forkchoice lock and calls the engine.
|
||||
// The caller of this function should NOT have a lock in forkchoice store.
|
||||
func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, args *fcuConfig) {
|
||||
_, span := trace.StartSpan(ctx, "beacon-chain.blockchain.forkchoiceUpdateWithExecution")
|
||||
defer span.End()
|
||||
// Note: Use the service context here to avoid the parent context being ended during a forkchoice update.
|
||||
ctx = trace.NewContext(s.ctx, span)
|
||||
s.ForkChoicer().Lock()
|
||||
defer s.ForkChoicer().Unlock()
|
||||
_, err := s.notifyForkchoiceUpdate(ctx, args)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not notify forkchoice update")
|
||||
log.WithError(err).Error("Could not notify forkchoice update")
|
||||
}
|
||||
|
||||
if err := s.saveHead(ctx, args.headRoot, args.headBlock, args.headState); err != nil {
|
||||
log.WithError(err).Error("Could not save head")
|
||||
}
|
||||
|
||||
// Only need to prune attestations from pool if the head has changed.
|
||||
s.pruneAttsFromPool(s.ctx, args.headState, args.headBlock)
|
||||
return nil
|
||||
}
|
||||
|
||||
// shouldOverrideFCU checks whether the incoming block is still subject to being
|
||||
|
||||
@@ -97,7 +97,7 @@ func TestService_forkchoiceUpdateWithExecution_exceptionalCases(t *testing.T) {
|
||||
headBlock: wsb,
|
||||
proposingSlot: service.CurrentSlot() + 1,
|
||||
}
|
||||
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, args))
|
||||
service.forkchoiceUpdateWithExecution(ctx, args)
|
||||
|
||||
payloadID, has := service.cfg.PayloadIDCache.PayloadID(2, [32]byte{2})
|
||||
require.Equal(t, true, has)
|
||||
@@ -151,7 +151,7 @@ func TestService_forkchoiceUpdateWithExecution_SameHeadRootNewProposer(t *testin
|
||||
headRoot: r,
|
||||
proposingSlot: service.CurrentSlot() + 1,
|
||||
}
|
||||
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, args))
|
||||
service.forkchoiceUpdateWithExecution(ctx, args)
|
||||
}
|
||||
|
||||
func TestShouldOverrideFCU(t *testing.T) {
|
||||
|
||||
@@ -66,9 +66,6 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
|
||||
startTime := time.Now()
|
||||
fcuArgs := &fcuConfig{}
|
||||
|
||||
if s.inRegularSync() {
|
||||
defer s.handleSecondFCUCall(cfg, fcuArgs)
|
||||
}
|
||||
if features.Get().EnableLightClient && slots.ToEpoch(s.CurrentSlot()) >= params.BeaconConfig().AltairForkEpoch {
|
||||
defer s.processLightClientUpdates(cfg)
|
||||
}
|
||||
@@ -105,14 +102,17 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
|
||||
s.logNonCanonicalBlockReceived(cfg.roblock.Root(), cfg.headRoot)
|
||||
return nil
|
||||
}
|
||||
if err := s.getFCUArgs(cfg, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("Could not get forkchoice update argument")
|
||||
return nil
|
||||
}
|
||||
if err := s.sendFCU(cfg, fcuArgs); err != nil {
|
||||
return errors.Wrap(err, "could not send FCU to engine")
|
||||
}
|
||||
s.sendFCU(cfg, fcuArgs)
|
||||
|
||||
// Pre-Fulu the caches are updated when computing the payload attributes
|
||||
if cfg.postState.Version() >= version.Fulu {
|
||||
go func() {
|
||||
ctx, cancel := context.WithTimeout(s.ctx, slotDeadline)
|
||||
defer cancel()
|
||||
cfg.ctx = ctx
|
||||
s.updateCachesPostBlockProcessing(cfg)
|
||||
}()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -322,6 +322,7 @@ func (s *Service) areSidecarsAvailable(ctx context.Context, avs das.Availability
|
||||
return nil
|
||||
}
|
||||
|
||||
// the caller of this function must not hold a lock in forkchoice store.
|
||||
func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.BeaconState) error {
|
||||
e := coreTime.CurrentEpoch(st)
|
||||
if err := helpers.UpdateCommitteeCache(ctx, st, e); err != nil {
|
||||
@@ -351,7 +352,9 @@ func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.Beacon
|
||||
if e > 0 {
|
||||
e = e - 1
|
||||
}
|
||||
s.ForkChoicer().RLock()
|
||||
target, err := s.cfg.ForkChoiceStore.TargetRootForEpoch(r, e)
|
||||
s.ForkChoicer().RUnlock()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not update proposer index state-root map")
|
||||
return nil
|
||||
@@ -364,7 +367,7 @@ func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.Beacon
|
||||
}
|
||||
|
||||
// Epoch boundary tasks: it copies the headState and updates the epoch boundary
|
||||
// caches.
|
||||
// caches. The caller of this function must not hold a lock in forkchoice store.
|
||||
func (s *Service) handleEpochBoundary(ctx context.Context, slot primitives.Slot, headState state.BeaconState, blockRoot []byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.handleEpochBoundary")
|
||||
defer span.End()
|
||||
@@ -904,8 +907,6 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
if currentSlot == s.HeadSlot() {
|
||||
return
|
||||
}
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
// return early if we are in init sync
|
||||
if !s.inRegularSync() {
|
||||
return
|
||||
@@ -918,14 +919,32 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
if lastState == nil {
|
||||
lastRoot, lastState = headRoot[:], headState
|
||||
}
|
||||
// Copy all the field tries in our cached state in the event of late
|
||||
// blocks.
|
||||
lastState.CopyAllTries()
|
||||
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
|
||||
log.WithError(err).Debug("Could not update next slot state cache")
|
||||
}
|
||||
if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil {
|
||||
log.WithError(err).Error("Could not update epoch boundary caches")
|
||||
// Before Fulu we need to process the next slot to find out if we are proposing.
|
||||
if lastState.Version() < version.Fulu {
|
||||
// Copy all the field tries in our cached state in the event of late
|
||||
// blocks.
|
||||
lastState.CopyAllTries()
|
||||
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
|
||||
log.WithError(err).Debug("Could not update next slot state cache")
|
||||
}
|
||||
if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil {
|
||||
log.WithError(err).Error("Could not update epoch boundary caches")
|
||||
}
|
||||
} else {
|
||||
// After Fulu, we can update the caches asynchronously after sending FCU to the engine
|
||||
defer func() {
|
||||
go func() {
|
||||
ctx, cancel := context.WithTimeout(s.ctx, slotDeadline)
|
||||
defer cancel()
|
||||
lastState.CopyAllTries()
|
||||
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
|
||||
log.WithError(err).Debug("Could not update next slot state cache")
|
||||
}
|
||||
if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil {
|
||||
log.WithError(err).Error("Could not update epoch boundary caches")
|
||||
}
|
||||
}()
|
||||
}()
|
||||
}
|
||||
// return early if we already started building a block for the current
|
||||
// head root
|
||||
@@ -955,6 +974,8 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
headBlock: headBlock,
|
||||
attributes: attribute,
|
||||
}
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
_, err = s.notifyForkchoiceUpdate(ctx, fcuArgs)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("could not perform late block tasks: failed to update forkchoice with engine")
|
||||
|
||||
@@ -42,14 +42,8 @@ func (s *Service) getFCUArgs(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) er
|
||||
if err := s.getFCUArgsEarlyBlock(cfg, fcuArgs); err != nil {
|
||||
return err
|
||||
}
|
||||
if !s.inRegularSync() {
|
||||
return nil
|
||||
}
|
||||
slot := cfg.roblock.Block().Slot()
|
||||
if slots.WithinVotingWindow(s.genesisTime, slot) {
|
||||
return nil
|
||||
}
|
||||
return s.computePayloadAttributes(cfg, fcuArgs)
|
||||
fcuArgs.attributes = s.getPayloadAttribute(cfg.ctx, fcuArgs.headState, fcuArgs.proposingSlot, cfg.headRoot[:])
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) getFCUArgsEarlyBlock(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error {
|
||||
@@ -173,26 +167,19 @@ func (s *Service) processLightClientUpdates(cfg *postBlockProcessConfig) {
|
||||
|
||||
// updateCachesPostBlockProcessing updates the next slot cache and handles the epoch
|
||||
// boundary in order to compute the right proposer indices after processing
|
||||
// state transition. This function is called on late blocks while still locked,
|
||||
// before sending FCU to the engine.
|
||||
func (s *Service) updateCachesPostBlockProcessing(cfg *postBlockProcessConfig) error {
|
||||
// state transition. The caller of this function must not hold a lock in forkchoice store.
|
||||
func (s *Service) updateCachesPostBlockProcessing(cfg *postBlockProcessConfig) {
|
||||
slot := cfg.postState.Slot()
|
||||
root := cfg.roblock.Root()
|
||||
if err := transition.UpdateNextSlotCache(cfg.ctx, root[:], cfg.postState); err != nil {
|
||||
return errors.Wrap(err, "could not update next slot state cache")
|
||||
log.WithError(err).Error("Could not update next slot state cache")
|
||||
return
|
||||
}
|
||||
if !slots.IsEpochEnd(slot) {
|
||||
return nil
|
||||
return
|
||||
}
|
||||
return s.handleEpochBoundary(cfg.ctx, slot, cfg.postState, root[:])
|
||||
}
|
||||
|
||||
// handleSecondFCUCall handles a second call to FCU when syncing a new block.
|
||||
// This is useful when proposing in the next block and we want to defer the
|
||||
// computation of the next slot shuffling.
|
||||
func (s *Service) handleSecondFCUCall(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) {
|
||||
if (fcuArgs.attributes == nil || fcuArgs.attributes.IsEmpty()) && cfg.headRoot == cfg.roblock.Root() {
|
||||
go s.sendFCUWithAttributes(cfg, fcuArgs)
|
||||
if err := s.handleEpochBoundary(cfg.ctx, slot, cfg.postState, root[:]); err != nil {
|
||||
log.WithError(err).Error("Could not handle epoch boundary")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -202,20 +189,6 @@ func reportProcessingTime(startTime time.Time) {
|
||||
onBlockProcessingTime.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
}
|
||||
|
||||
// computePayloadAttributes modifies the passed FCU arguments to
|
||||
// contain the right payload attributes with the tracked proposer. It gets
|
||||
// called on blocks that arrive after the attestation voting window, or in a
|
||||
// background routine after syncing early blocks.
|
||||
func (s *Service) computePayloadAttributes(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error {
|
||||
if cfg.roblock.Root() == cfg.headRoot {
|
||||
if err := s.updateCachesPostBlockProcessing(cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
fcuArgs.attributes = s.getPayloadAttribute(cfg.ctx, fcuArgs.headState, fcuArgs.proposingSlot, cfg.headRoot[:])
|
||||
return nil
|
||||
}
|
||||
|
||||
// getBlockPreState returns the pre state of an incoming block. It uses the parent root of the block
|
||||
// to retrieve the state in DB. It verifies the pre state's validity and the incoming block
|
||||
// is in the correct time window.
|
||||
|
||||
@@ -738,7 +738,9 @@ func TestOnBlock_CanFinalize_WithOnTick(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
|
||||
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
|
||||
require.NoError(t, err)
|
||||
@@ -788,7 +790,9 @@ func TestOnBlock_CanFinalize(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
|
||||
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
|
||||
require.NoError(t, err)
|
||||
@@ -816,7 +820,9 @@ func TestOnBlock_NilBlock(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
signed := &consensusblocks.SignedBeaconBlock{}
|
||||
roblock := consensusblocks.ROBlock{ReadOnlySignedBeaconBlock: signed}
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err := service.postBlockProcess(&postBlockProcessConfig{tr.ctx, roblock, [32]byte{}, nil, true})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
}
|
||||
|
||||
@@ -848,7 +854,9 @@ func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
testState, err = service.cfg.StateGen.StateByRoot(ctx, r)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -1321,7 +1329,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
lock.Lock()
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb1, r1)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
@@ -1333,7 +1343,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
lock.Lock()
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb2, r2)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
@@ -1345,7 +1357,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
lock.Lock()
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb3, r3)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
@@ -1357,7 +1371,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
lock.Lock()
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb4, r4)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
@@ -1382,197 +1398,6 @@ func Test_verifyBlkFinalizedSlot_invalidBlock(t *testing.T) {
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
}
|
||||
|
||||
// See the description in #10777 and #10782 for the full setup
|
||||
// We sync optimistically a chain of blocks. Block 17 is the last block in Epoch
|
||||
// 2. Block 18 justifies block 12 (the first in Epoch 2) and Block 19 returns
|
||||
// INVALID from FCU, with LVH block 17. No head is viable. We check
|
||||
// that the node is optimistic and that we can actually import a block on top of
|
||||
// 17 and recover.
|
||||
func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.SlotsPerEpoch = 6
|
||||
config.AltairForkEpoch = 1
|
||||
config.BellatrixForkEpoch = 2
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
mockEngine := &mockExecution.EngineClient{ErrNewPayload: execution.ErrAcceptedSyncingPayloadStatus, ErrForkchoiceUpdated: execution.ErrAcceptedSyncingPayloadStatus}
|
||||
service, tr := minimalTestService(t, WithExecutionEngineCaller(mockEngine))
|
||||
ctx := tr.ctx
|
||||
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
stateRoot, err := st.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
|
||||
require.NoError(t, service.saveGenesisData(ctx, st))
|
||||
|
||||
genesis := blocks.NewGenesisBlock(stateRoot[:])
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb), "Could not save genesis block")
|
||||
|
||||
parentRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
for i := 1; i < 6; i++ {
|
||||
driftGenesisTime(service, primitives.Slot(i), 0)
|
||||
st, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
|
||||
require.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
driftGenesisTime(service, primitives.Slot(i), 0)
|
||||
st, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlockAltair(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
|
||||
require.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
for i := 12; i < 18; i++ {
|
||||
driftGenesisTime(service, primitives.Slot(i), 0)
|
||||
st, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlockBellatrix(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
|
||||
require.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
// Check that we haven't justified the second epoch yet
|
||||
jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(0), jc.Epoch)
|
||||
|
||||
// import a block that justifies the second epoch
|
||||
driftGenesisTime(service, 18, 0)
|
||||
validHeadState, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlockBellatrix(validHeadState, keys, util.DefaultBlockGenConfig(), 18)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
firstInvalidRoot, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, firstInvalidRoot, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, firstInvalidRoot)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
jc = service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(2), jc.Epoch)
|
||||
|
||||
sjc := validHeadState.CurrentJustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(0), sjc.Epoch)
|
||||
lvh := b.Block.Body.ExecutionPayload.ParentHash
|
||||
// check our head
|
||||
require.Equal(t, firstInvalidRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
|
||||
// import another block to find out that it was invalid
|
||||
mockEngine = &mockExecution.EngineClient{ErrNewPayload: execution.ErrAcceptedSyncingPayloadStatus, ErrForkchoiceUpdated: execution.ErrInvalidPayloadStatus, ForkChoiceUpdatedResp: lvh}
|
||||
service.cfg.ExecutionEngineCaller = mockEngine
|
||||
driftGenesisTime(service, 19, 0)
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err = util.GenerateFullBlockBellatrix(st, keys, util.DefaultBlockGenConfig(), 19)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err = service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
|
||||
// Check that forkchoice's head is the last invalid block imported. The
|
||||
// store's headroot is the previous head (since the invalid block did
|
||||
// not finish importing) one and that the node is optimistic
|
||||
require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
headRoot, err := service.HeadRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, firstInvalidRoot, bytesutil.ToBytes32(headRoot))
|
||||
optimistic, err := service.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, optimistic)
|
||||
|
||||
// import another block based on the last valid head state
|
||||
mockEngine = &mockExecution.EngineClient{}
|
||||
service.cfg.ExecutionEngineCaller = mockEngine
|
||||
driftGenesisTime(service, 20, 0)
|
||||
b, err = util.GenerateFullBlockBellatrix(validHeadState, keys, &util.BlockGenConfig{}, 20)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err = b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
preState, err = service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
|
||||
require.NoError(t, err)
|
||||
// Check the newly imported block is head, it justified the right
|
||||
// checkpoint and the node is no longer optimistic
|
||||
require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
sjc = service.CurrentJustifiedCheckpt()
|
||||
require.Equal(t, jc.Epoch, sjc.Epoch)
|
||||
require.Equal(t, jc.Root, bytesutil.ToBytes32(sjc.Root))
|
||||
optimistic, err = service.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, optimistic)
|
||||
}
|
||||
|
||||
// See the description in #10777 and #10782 for the full setup
|
||||
// We sync optimistically a chain of blocks. Block 17 is the last block in Epoch
|
||||
// 2. Block 18 justifies block 12 (the first in Epoch 2) and Block 19 returns
|
||||
@@ -1624,7 +1449,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
@@ -1644,8 +1471,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
|
||||
for i := 12; i < 18; i++ {
|
||||
@@ -1666,8 +1494,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
// Check that we haven't justified the second epoch yet
|
||||
jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
@@ -1690,7 +1519,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, firstInvalidRoot, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, firstInvalidRoot)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, err)
|
||||
jc = service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(2), jc.Epoch)
|
||||
@@ -1700,6 +1531,10 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
lvh := b.Block.Body.ExecutionPayload.ParentHash
|
||||
// check our head
|
||||
require.Equal(t, firstInvalidRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
isBlock18OptimisticAfterImport, err := service.IsOptimisticForRoot(ctx, firstInvalidRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, isBlock18OptimisticAfterImport)
|
||||
time.Sleep(20 * time.Millisecond) // wait for async forkchoice update to be processed
|
||||
|
||||
// import another block to find out that it was invalid
|
||||
mockEngine = &mockExecution.EngineClient{ErrNewPayload: execution.ErrInvalidPayloadStatus, NewPayloadResp: lvh}
|
||||
@@ -1750,7 +1585,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, err)
|
||||
// Check the newly imported block is head, it justified the right
|
||||
// checkpoint and the node is no longer optimistic
|
||||
@@ -1817,7 +1654,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
@@ -1838,8 +1677,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
|
||||
// import the merge block
|
||||
@@ -1859,7 +1699,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, lastValidRoot, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, lastValidRoot)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, err)
|
||||
// save the post state and the payload Hash of this block since it will
|
||||
// be the LVH
|
||||
@@ -1888,8 +1730,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, invalidRoots[i-13], wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, invalidRoots[i-13])
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
// Check that we have justified the second epoch
|
||||
jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
@@ -1957,7 +1800,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
// Check that the head is still INVALID and the node is still optimistic
|
||||
require.Equal(t, invalidHeadRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
optimistic, err = service.IsOptimistic(ctx)
|
||||
@@ -1982,7 +1827,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, err)
|
||||
st, err = service.cfg.StateGen.StateByRoot(ctx, root)
|
||||
require.NoError(t, err)
|
||||
@@ -2010,7 +1857,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
sjc = service.CurrentJustifiedCheckpt()
|
||||
@@ -2054,7 +1903,6 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveGenesisBlockRoot(ctx, genesisRoot), "Could not save genesis state")
|
||||
|
||||
for i := 1; i < 6; i++ {
|
||||
t.Log(i)
|
||||
driftGenesisTime(service, primitives.Slot(i), 0)
|
||||
st, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -2071,7 +1919,9 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
@@ -2091,8 +1941,9 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
|
||||
// import the merge block
|
||||
@@ -2112,7 +1963,9 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, lastValidRoot, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, lastValidRoot)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, err)
|
||||
// save the post state and the payload Hash of this block since it will
|
||||
// be the LVH
|
||||
@@ -2143,7 +1996,9 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
|
||||
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
|
||||
require.NoError(t, err)
|
||||
@@ -2264,7 +2119,9 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -2330,7 +2187,9 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -2613,7 +2472,10 @@ func TestRollbackBlock(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Rollback block insertion into db and caches.
|
||||
require.ErrorContains(t, fmt.Sprintf("could not insert block %d to fork choice store", roblock.Block().Slot()), service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.ErrorContains(t, fmt.Sprintf("could not insert block %d to fork choice store", roblock.Block().Slot()), err)
|
||||
|
||||
// The block should no longer exist.
|
||||
require.Equal(t, false, service.cfg.BeaconDB.HasBlock(ctx, root))
|
||||
@@ -2714,7 +2576,9 @@ func TestRollbackBlock_ContextDeadline(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
|
||||
b, err = util.GenerateFullBlock(postState, keys, util.DefaultBlockGenConfig(), 34)
|
||||
require.NoError(t, err)
|
||||
@@ -2748,7 +2612,10 @@ func TestRollbackBlock_ContextDeadline(t *testing.T) {
|
||||
require.NoError(t, postState.SetFinalizedCheckpoint(cj))
|
||||
|
||||
// Rollback block insertion into db and caches.
|
||||
require.ErrorContains(t, "context canceled", service.postBlockProcess(&postBlockProcessConfig{cancCtx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{cancCtx, roblock, [32]byte{}, postState, false})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.ErrorContains(t, "context canceled", err)
|
||||
|
||||
// The block should no longer exist.
|
||||
require.Equal(t, false, service.cfg.BeaconDB.HasBlock(ctx, root))
|
||||
@@ -3244,7 +3111,9 @@ func Test_postBlockProcess_EventSending(t *testing.T) {
|
||||
}
|
||||
|
||||
// Execute postBlockProcess
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(cfg)
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
|
||||
// Check error expectation
|
||||
if tt.expectError {
|
||||
|
||||
@@ -156,13 +156,15 @@ func (s *Service) UpdateHead(ctx context.Context, proposingSlot primitives.Slot)
|
||||
}
|
||||
if s.inRegularSync() {
|
||||
fcuArgs.attributes = s.getPayloadAttribute(ctx, headState, proposingSlot, newHeadRoot[:])
|
||||
if fcuArgs.attributes != nil && s.shouldOverrideFCU(newHeadRoot, proposingSlot) {
|
||||
return
|
||||
}
|
||||
go s.forkchoiceUpdateWithExecution(s.ctx, fcuArgs)
|
||||
}
|
||||
if fcuArgs.attributes != nil && s.shouldOverrideFCU(newHeadRoot, proposingSlot) {
|
||||
return
|
||||
}
|
||||
if err := s.forkchoiceUpdateWithExecution(s.ctx, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("Could not update forkchoice")
|
||||
if err := s.saveHead(s.ctx, fcuArgs.headRoot, fcuArgs.headBlock, fcuArgs.headState); err != nil {
|
||||
log.WithError(err).Error("Could not save head")
|
||||
}
|
||||
s.pruneAttsFromPool(s.ctx, fcuArgs.headState, fcuArgs.headBlock)
|
||||
}
|
||||
|
||||
// This processes fork choice attestations from the pool to account for validator votes and fork choice.
|
||||
|
||||
@@ -117,7 +117,9 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, tRoot, wsb, postState))
|
||||
roblock, err := blocks.NewROBlockWithRoot(wsb, tRoot)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
copied, err = service.cfg.StateGen.StateByRoot(ctx, tRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, fcs.NodeCount())
|
||||
@@ -177,7 +179,9 @@ func TestService_UpdateHead_NoAtts(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, tRoot, wsb, postState))
|
||||
roblock, err := blocks.NewROBlockWithRoot(wsb, tRoot)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.Equal(t, 2, fcs.NodeCount())
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
||||
require.Equal(t, tRoot, service.head.root)
|
||||
|
||||
@@ -19,6 +19,7 @@ go_library(
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/epoch:go_default_library",
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/core/gloas:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
@@ -16,6 +17,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@@ -75,7 +77,12 @@ func ProcessAttestationNoVerifySignature(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return SetParticipationAndRewardProposer(ctx, beaconState, att.GetData().Target.Epoch, indices, participatedFlags, totalBalance)
|
||||
beaconState, err = gloas.UpdatePendingPaymentWeight(beaconState, att, indices, participatedFlags)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return SetParticipationAndRewardProposer(ctx, beaconState, att.GetData().Target.Epoch, indices, participatedFlags, totalBalance, att)
|
||||
}
|
||||
|
||||
// SetParticipationAndRewardProposer retrieves and sets the epoch participation bits in state. Based on the epoch participation, it rewards
|
||||
@@ -105,7 +112,9 @@ func SetParticipationAndRewardProposer(
|
||||
beaconState state.BeaconState,
|
||||
targetEpoch primitives.Epoch,
|
||||
indices []uint64,
|
||||
participatedFlags map[uint8]bool, totalBalance uint64) (state.BeaconState, error) {
|
||||
participatedFlags map[uint8]bool,
|
||||
totalBalance uint64,
|
||||
att ethpb.Att) (state.BeaconState, error) {
|
||||
var proposerRewardNumerator uint64
|
||||
currentEpoch := time.CurrentEpoch(beaconState)
|
||||
var stateErr error
|
||||
@@ -299,6 +308,40 @@ func AttestationParticipationFlagIndices(beaconState state.ReadOnlyBeaconState,
|
||||
participatedFlags[targetFlagIndex] = true
|
||||
}
|
||||
matchedSrcTgtHead := matchedHead && matchedSrcTgt
|
||||
|
||||
// Spec v1.6.1 (pseudocode excerpt):
|
||||
//
|
||||
// # [New in Gloas:EIP7732]
|
||||
// if is_attestation_same_slot(state, data):
|
||||
// assert data.index == 0
|
||||
// payload_matches = True
|
||||
// else:
|
||||
// slot_index = data.slot % SLOTS_PER_HISTORICAL_ROOT
|
||||
// payload_index = state.execution_payload_availability[slot_index]
|
||||
// payload_matches = data.index == payload_index
|
||||
//
|
||||
// # [Modified in Gloas:EIP7732]
|
||||
// is_matching_head = is_matching_target and head_root_matches and payload_matches
|
||||
var matchingPayload bool
|
||||
if beaconState.Version() >= version.Gloas {
|
||||
ok, err := gloas.SameSlotAttestation(beaconState, [32]byte(data.BeaconBlockRoot), data.Slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ok {
|
||||
if data.CommitteeIndex != 0 {
|
||||
return nil, fmt.Errorf("committee index %d for same slot attestation must be 0", data.CommitteeIndex)
|
||||
}
|
||||
matchingPayload = true
|
||||
} else {
|
||||
executionPayloadAvail, err := beaconState.ExecutionPayloadAvailability(data.Slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
matchingPayload = executionPayloadAvail == uint64(data.CommitteeIndex)
|
||||
}
|
||||
matchedSrcTgtHead = matchedSrcTgtHead && matchingPayload
|
||||
}
|
||||
if matchedSrcTgtHead && delay == cfg.MinAttestationInclusionDelay {
|
||||
participatedFlags[headFlagIndex] = true
|
||||
}
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
package altair_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/go-bitfield"
|
||||
@@ -556,7 +558,7 @@ func TestSetParticipationAndRewardProposer(t *testing.T) {
|
||||
|
||||
b, err := helpers.TotalActiveBalance(beaconState)
|
||||
require.NoError(t, err)
|
||||
st, err := altair.SetParticipationAndRewardProposer(t.Context(), beaconState, test.epoch, test.indices, test.participatedFlags, b)
|
||||
st, err := altair.SetParticipationAndRewardProposer(t.Context(), beaconState, test.epoch, test.indices, test.participatedFlags, b, ðpb.Attestation{})
|
||||
require.NoError(t, err)
|
||||
|
||||
i, err := helpers.BeaconProposerIndex(t.Context(), st)
|
||||
@@ -775,11 +777,67 @@ func TestAttestationParticipationFlagIndices(t *testing.T) {
|
||||
headFlagIndex: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "gloas same-slot committee index non-zero errors",
|
||||
inputState: func() state.BeaconState {
|
||||
stateSlot := primitives.Slot(5)
|
||||
slot := primitives.Slot(3)
|
||||
targetRoot := bytes.Repeat([]byte{0xAA}, 32)
|
||||
headRoot := bytes.Repeat([]byte{0xBB}, 32)
|
||||
prevRoot := bytes.Repeat([]byte{0xCC}, 32)
|
||||
return buildGloasStateForFlags(t, stateSlot, slot, targetRoot, headRoot, prevRoot, 0, 0)
|
||||
}(),
|
||||
inputData: ðpb.AttestationData{
|
||||
Slot: 3,
|
||||
CommitteeIndex: 1, // invalid for same-slot
|
||||
BeaconBlockRoot: bytes.Repeat([]byte{0xBB}, 32),
|
||||
Source: ðpb.Checkpoint{Root: bytes.Repeat([]byte{0xDD}, 32)},
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: bytes.Repeat([]byte{0xAA}, 32),
|
||||
},
|
||||
},
|
||||
inputDelay: 1,
|
||||
participationIndices: nil,
|
||||
},
|
||||
{
|
||||
name: "gloas payload availability matches committee index",
|
||||
inputState: func() state.BeaconState {
|
||||
stateSlot := primitives.Slot(5)
|
||||
slot := primitives.Slot(3)
|
||||
targetRoot := bytes.Repeat([]byte{0xAA}, 32)
|
||||
headRoot := bytes.Repeat([]byte{0xBB}, 32)
|
||||
// Same prev root to make SameSlotAttestation false and use payload availability.
|
||||
return buildGloasStateForFlags(t, stateSlot, slot, targetRoot, headRoot, headRoot, 1, slot)
|
||||
}(),
|
||||
inputData: ðpb.AttestationData{
|
||||
Slot: 3,
|
||||
CommitteeIndex: 1,
|
||||
BeaconBlockRoot: bytes.Repeat([]byte{0xBB}, 32),
|
||||
Source: ðpb.Checkpoint{Root: bytes.Repeat([]byte{0xDD}, 32)},
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: bytes.Repeat([]byte{0xAA}, 32),
|
||||
},
|
||||
},
|
||||
inputDelay: 1,
|
||||
participationIndices: map[uint8]bool{
|
||||
sourceFlagIndex: true,
|
||||
targetFlagIndex: true,
|
||||
headFlagIndex: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
flagIndices, err := altair.AttestationParticipationFlagIndices(test.inputState, test.inputData, test.inputDelay)
|
||||
if test.participationIndices == nil {
|
||||
require.ErrorContains(t, "committee index", err)
|
||||
continue
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, test.participationIndices, flagIndices)
|
||||
if !reflect.DeepEqual(test.participationIndices, flagIndices) {
|
||||
t.Fatalf("unexpected participation indices: got %v want %v", flagIndices, test.participationIndices)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -858,3 +916,61 @@ func TestMatchingStatus(t *testing.T) {
|
||||
require.Equal(t, test.matchedHead, head)
|
||||
}
|
||||
}
|
||||
|
||||
func buildGloasStateForFlags(t *testing.T, stateSlot, slot primitives.Slot, targetRoot, headRoot, prevRoot []byte, availabilityBit uint8, availabilitySlot primitives.Slot) state.BeaconState {
|
||||
t.Helper()
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
blockRoots := make([][]byte, cfg.SlotsPerHistoricalRoot)
|
||||
blockRoots[0] = targetRoot
|
||||
blockRoots[slot%cfg.SlotsPerHistoricalRoot] = headRoot
|
||||
blockRoots[(slot-1)%cfg.SlotsPerHistoricalRoot] = prevRoot
|
||||
|
||||
stateRoots := make([][]byte, cfg.SlotsPerHistoricalRoot)
|
||||
for i := range stateRoots {
|
||||
stateRoots[i] = make([]byte, fieldparams.RootLength)
|
||||
}
|
||||
randaoMixes := make([][]byte, cfg.EpochsPerHistoricalVector)
|
||||
for i := range randaoMixes {
|
||||
randaoMixes[i] = make([]byte, fieldparams.RootLength)
|
||||
}
|
||||
|
||||
execPayloadAvailability := make([]byte, cfg.SlotsPerHistoricalRoot/8)
|
||||
idx := availabilitySlot % cfg.SlotsPerHistoricalRoot
|
||||
byteIndex := idx / 8
|
||||
bitIndex := idx % 8
|
||||
if availabilityBit == 1 {
|
||||
execPayloadAvailability[byteIndex] |= 1 << bitIndex
|
||||
}
|
||||
|
||||
checkpointRoot := bytes.Repeat([]byte{0xDD}, fieldparams.RootLength)
|
||||
justified := ðpb.Checkpoint{Root: checkpointRoot}
|
||||
|
||||
stProto := ðpb.BeaconStateGloas{
|
||||
Slot: stateSlot,
|
||||
GenesisValidatorsRoot: bytes.Repeat([]byte{0x11}, fieldparams.RootLength),
|
||||
BlockRoots: blockRoots,
|
||||
StateRoots: stateRoots,
|
||||
RandaoMixes: randaoMixes,
|
||||
ExecutionPayloadAvailability: execPayloadAvailability,
|
||||
CurrentJustifiedCheckpoint: justified,
|
||||
PreviousJustifiedCheckpoint: justified,
|
||||
Validators: []*ethpb.Validator{
|
||||
{
|
||||
EffectiveBalance: cfg.MinActivationBalance,
|
||||
WithdrawalCredentials: append([]byte{cfg.ETH1AddressWithdrawalPrefixByte}, bytes.Repeat([]byte{0x01}, 31)...),
|
||||
},
|
||||
},
|
||||
Balances: []uint64{cfg.MinActivationBalance},
|
||||
BuilderPendingPayments: make([]*ethpb.BuilderPendingPayment, cfg.SlotsPerEpoch*2),
|
||||
Fork: ðpb.Fork{
|
||||
CurrentVersion: bytes.Repeat([]byte{0x01}, 4),
|
||||
PreviousVersion: bytes.Repeat([]byte{0x01}, 4),
|
||||
Epoch: 0,
|
||||
},
|
||||
}
|
||||
|
||||
beaconState, err := state_native.InitializeFromProtoGloas(stProto)
|
||||
require.NoError(t, err)
|
||||
return beaconState
|
||||
}
|
||||
|
||||
@@ -111,7 +111,16 @@ func VerifyAttestationNoVerifySignature(
|
||||
var indexedAtt ethpb.IndexedAtt
|
||||
|
||||
if att.Version() >= version.Electra {
|
||||
if att.GetData().CommitteeIndex != 0 {
|
||||
// Spec v1.6.1 (pseudocode excerpt):
|
||||
//
|
||||
// # [Modified in Gloas:EIP7732]
|
||||
// assert data.index < 2
|
||||
//
|
||||
if beaconState.Version() >= version.Gloas {
|
||||
if att.GetData().CommitteeIndex >= 2 {
|
||||
return fmt.Errorf("incorrect committee index %d", att.GetData().CommitteeIndex)
|
||||
}
|
||||
} else if att.GetData().CommitteeIndex != 0 {
|
||||
return errors.New("committee index must be 0 post-Electra")
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package blocks_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
@@ -314,6 +315,75 @@ func TestVerifyAttestationNoVerifySignature_Electra(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestVerifyAttestationNoVerifySignature_GloasCommitteeIndexLimit(t *testing.T) {
|
||||
cfg := params.BeaconConfig()
|
||||
stateSlot := cfg.MinAttestationInclusionDelay + 1
|
||||
|
||||
blockRoots := make([][]byte, cfg.SlotsPerHistoricalRoot)
|
||||
for i := range blockRoots {
|
||||
blockRoots[i] = make([]byte, fieldparams.RootLength)
|
||||
}
|
||||
stateRoots := make([][]byte, cfg.SlotsPerHistoricalRoot)
|
||||
for i := range stateRoots {
|
||||
stateRoots[i] = make([]byte, fieldparams.RootLength)
|
||||
}
|
||||
randaoMixes := make([][]byte, cfg.EpochsPerHistoricalVector)
|
||||
for i := range randaoMixes {
|
||||
randaoMixes[i] = make([]byte, fieldparams.RootLength)
|
||||
}
|
||||
|
||||
checkpointRoot := bytes.Repeat([]byte{0xAA}, fieldparams.RootLength)
|
||||
justified := ðpb.Checkpoint{Epoch: 0, Root: checkpointRoot}
|
||||
|
||||
gloasStateProto := ðpb.BeaconStateGloas{
|
||||
Slot: stateSlot,
|
||||
GenesisValidatorsRoot: bytes.Repeat([]byte{0x11}, fieldparams.RootLength),
|
||||
BlockRoots: blockRoots,
|
||||
StateRoots: stateRoots,
|
||||
RandaoMixes: randaoMixes,
|
||||
ExecutionPayloadAvailability: make([]byte, cfg.SlotsPerHistoricalRoot/8),
|
||||
CurrentJustifiedCheckpoint: justified,
|
||||
PreviousJustifiedCheckpoint: justified,
|
||||
Validators: []*ethpb.Validator{
|
||||
{
|
||||
EffectiveBalance: cfg.MinActivationBalance,
|
||||
WithdrawalCredentials: append([]byte{cfg.ETH1AddressWithdrawalPrefixByte}, bytes.Repeat([]byte{0x01}, 31)...),
|
||||
},
|
||||
},
|
||||
Balances: []uint64{cfg.MinActivationBalance},
|
||||
BuilderPendingPayments: make([]*ethpb.BuilderPendingPayment, cfg.SlotsPerEpoch*2),
|
||||
Fork: ðpb.Fork{
|
||||
CurrentVersion: bytes.Repeat([]byte{0x01}, 4),
|
||||
PreviousVersion: bytes.Repeat([]byte{0x01}, 4),
|
||||
Epoch: 0,
|
||||
},
|
||||
}
|
||||
|
||||
beaconState, err := state_native.InitializeFromProtoGloas(gloasStateProto)
|
||||
require.NoError(t, err)
|
||||
|
||||
committeeBits := bitfield.NewBitvector64()
|
||||
committeeBits.SetBitAt(0, true)
|
||||
aggBits := bitfield.NewBitlist(1)
|
||||
aggBits.SetBitAt(0, true)
|
||||
|
||||
att := ðpb.AttestationElectra{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 0,
|
||||
CommitteeIndex: 2, // invalid for Gloas (must be <2)
|
||||
BeaconBlockRoot: blockRoots[0],
|
||||
Source: justified,
|
||||
Target: justified,
|
||||
},
|
||||
AggregationBits: aggBits,
|
||||
CommitteeBits: committeeBits,
|
||||
Signature: bytes.Repeat([]byte{0x00}, fieldparams.BLSSignatureLength),
|
||||
}
|
||||
|
||||
err = blocks.VerifyAttestationNoVerifySignature(context.TODO(), beaconState, att)
|
||||
assert.ErrorContains(t, "incorrect committee index 2", err)
|
||||
}
|
||||
|
||||
func TestConvertToIndexed_OK(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
validators := make([]*ethpb.Validator, 2*params.BeaconConfig().SlotsPerEpoch)
|
||||
|
||||
31
beacon-chain/core/gloas/BUILD.bazel
Normal file
31
beacon-chain/core/gloas/BUILD.bazel
Normal file
@@ -0,0 +1,31 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["attestation.go"],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["attestation_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
],
|
||||
)
|
||||
144
beacon-chain/core/gloas/attestation.go
Normal file
144
beacon-chain/core/gloas/attestation.go
Normal file
@@ -0,0 +1,144 @@
|
||||
package gloas
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
)
|
||||
|
||||
// SameSlotAttestation checks if the attestation is for the same slot as the block root in the state.
|
||||
// Spec v1.6.1 (pseudocode excerpt):
|
||||
//
|
||||
// is_attestation_same_slot(state, data):
|
||||
// block_root_at_slot = get_block_root_at_slot(state, data.slot)
|
||||
// block_root_at_prev_slot = get_block_root_at_slot(state, data.slot - 1)
|
||||
// return block_root_at_slot == data.beacon_block_root and block_root_at_prev_slot != data.beacon_block_root
|
||||
func SameSlotAttestation(state state.ReadOnlyBeaconState, blockRoot [32]byte, slot primitives.Slot) (bool, error) {
|
||||
if slot == 0 {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
blockRootAtSlot, err := helpers.BlockRootAtSlot(state, slot)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
matchingBlockRoot := bytes.Equal(blockRoot[:], blockRootAtSlot)
|
||||
|
||||
blockRootAtPrevSlot, err := helpers.BlockRootAtSlot(state, slot-1)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
matchingPrevBlockRoot := bytes.Equal(blockRoot[:], blockRootAtPrevSlot)
|
||||
|
||||
return matchingBlockRoot && !matchingPrevBlockRoot, nil
|
||||
}
|
||||
|
||||
// UpdatePendingPaymentWeight updates the builder pending payment weight based on attestation participation.
|
||||
// Spec v1.6.1 (pseudocode excerpt):
|
||||
//
|
||||
// if data.target.epoch == get_current_epoch(state):
|
||||
// epoch_participation = state.current_epoch_participation
|
||||
// payment = state.builder_pending_payments[SLOTS_PER_EPOCH + data.slot % SLOTS_PER_EPOCH]
|
||||
// else:
|
||||
// epoch_participation = state.previous_epoch_participation
|
||||
// payment = state.builder_pending_payments[data.slot % SLOTS_PER_EPOCH]
|
||||
//
|
||||
// for index in get_attesting_indices(state, attestation):
|
||||
// will_set_new_flag = False
|
||||
// for flag_index, weight in enumerate(PARTICIPATION_FLAG_WEIGHTS):
|
||||
// if flag_index in participation_flag_indices and not has_flag(epoch_participation[index], flag_index):
|
||||
// epoch_participation[index] = add_flag(epoch_participation[index], flag_index)
|
||||
// proposer_reward_numerator += get_base_reward(state, index) * weight
|
||||
// will_set_new_flag = True
|
||||
// if will_set_new_flag and is_attestation_same_slot(state, data) and payment.withdrawal.amount > 0:
|
||||
// payment.weight += state.validators[index].effective_balance
|
||||
//
|
||||
// if current_epoch_target:
|
||||
// state.builder_pending_payments[SLOTS_PER_EPOCH + data.slot % SLOTS_PER_EPOCH] = payment
|
||||
// else:
|
||||
// state.builder_pending_payments[data.slot % SLOTS_PER_EPOCH] = payment
|
||||
func UpdatePendingPaymentWeight(beaconState state.BeaconState, att ethpb.Att, indices []uint64, participatedFlags map[uint8]bool) (state.BeaconState, error) {
|
||||
if beaconState.Version() < version.Gloas {
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
data := att.GetData()
|
||||
epoch := slots.ToEpoch(beaconState.Slot())
|
||||
|
||||
isSameSlot, err := SameSlotAttestation(beaconState, [32]byte(data.BeaconBlockRoot), data.Slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !isSameSlot {
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
var (
|
||||
paymentSlot primitives.Slot
|
||||
payment *ethpb.BuilderPendingPayment
|
||||
epochParticipation []byte
|
||||
)
|
||||
if data.Target.Epoch == epoch {
|
||||
paymentSlot = slotsPerEpoch + (data.Slot % slotsPerEpoch)
|
||||
payment, err = beaconState.BuilderPendingPayment(uint64(paymentSlot))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
epochParticipation, err = beaconState.CurrentEpochParticipation()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
paymentSlot = data.Slot % slotsPerEpoch
|
||||
payment, err = beaconState.BuilderPendingPayment(uint64(paymentSlot))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
epochParticipation, err = beaconState.PreviousEpochParticipation()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if payment.Withdrawal.Amount == 0 {
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
flagIndices := []uint8{cfg.TimelySourceFlagIndex, cfg.TimelyTargetFlagIndex, cfg.TimelyHeadFlagIndex}
|
||||
hasNewFlag := func(idx uint64) bool {
|
||||
participation := epochParticipation[idx]
|
||||
for _, f := range flagIndices {
|
||||
if !participatedFlags[f] {
|
||||
continue
|
||||
}
|
||||
if participation&(1<<f) == 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
for _, idx := range indices {
|
||||
if !hasNewFlag(idx) {
|
||||
continue
|
||||
}
|
||||
validator, err := beaconState.ValidatorAtIndex(primitives.ValidatorIndex(idx))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
payment.Weight += primitives.Gwei(validator.EffectiveBalance)
|
||||
}
|
||||
|
||||
if err := beaconState.SetBuilderPendingPayment(uint64(paymentSlot), payment); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return beaconState, nil
|
||||
}
|
||||
288
beacon-chain/core/gloas/attestation_test.go
Normal file
288
beacon-chain/core/gloas/attestation_test.go
Normal file
@@ -0,0 +1,288 @@
|
||||
package gloas
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
)
|
||||
|
||||
func buildStateWithBlockRoots(t *testing.T, stateSlot primitives.Slot, roots map[primitives.Slot][]byte) *state_native.BeaconState {
|
||||
t.Helper()
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
blockRoots := make([][]byte, cfg.SlotsPerHistoricalRoot)
|
||||
for slot, root := range roots {
|
||||
blockRoots[slot%cfg.SlotsPerHistoricalRoot] = root
|
||||
}
|
||||
|
||||
stateRoots := make([][]byte, cfg.SlotsPerHistoricalRoot)
|
||||
for i := range stateRoots {
|
||||
stateRoots[i] = bytes.Repeat([]byte{0x11}, 32)
|
||||
}
|
||||
|
||||
randaoMixes := make([][]byte, cfg.EpochsPerHistoricalVector)
|
||||
for i := range randaoMixes {
|
||||
randaoMixes[i] = bytes.Repeat([]byte{0x22}, 32)
|
||||
}
|
||||
|
||||
execPayloadAvailability := make([]byte, cfg.SlotsPerHistoricalRoot/8)
|
||||
|
||||
stProto := ðpb.BeaconStateGloas{
|
||||
Slot: stateSlot,
|
||||
GenesisValidatorsRoot: bytes.Repeat([]byte{0x33}, 32),
|
||||
Fork: ðpb.Fork{
|
||||
CurrentVersion: bytes.Repeat([]byte{0x44}, 4),
|
||||
PreviousVersion: bytes.Repeat([]byte{0x44}, 4),
|
||||
Epoch: 0,
|
||||
},
|
||||
BlockRoots: blockRoots,
|
||||
StateRoots: stateRoots,
|
||||
RandaoMixes: randaoMixes,
|
||||
ExecutionPayloadAvailability: execPayloadAvailability,
|
||||
Validators: []*ethpb.Validator{},
|
||||
Balances: []uint64{},
|
||||
BuilderPendingPayments: make([]*ethpb.BuilderPendingPayment, cfg.SlotsPerEpoch*2),
|
||||
BuilderPendingWithdrawals: []*ethpb.BuilderPendingWithdrawal{},
|
||||
}
|
||||
|
||||
state, err := state_native.InitializeFromProtoGloas(stProto)
|
||||
require.NoError(t, err)
|
||||
return state.(*state_native.BeaconState)
|
||||
}
|
||||
|
||||
func TestSameSlotAttestation(t *testing.T) {
|
||||
rootA := bytes.Repeat([]byte{0xAA}, 32)
|
||||
rootB := bytes.Repeat([]byte{0xBB}, 32)
|
||||
rootC := bytes.Repeat([]byte{0xCC}, 32)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
stateSlot primitives.Slot
|
||||
slot primitives.Slot
|
||||
blockRoot []byte
|
||||
roots map[primitives.Slot][]byte
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "slot zero always true",
|
||||
stateSlot: 1,
|
||||
slot: 0,
|
||||
blockRoot: rootA,
|
||||
roots: map[primitives.Slot][]byte{},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "matching current different previous",
|
||||
stateSlot: 6,
|
||||
slot: 4,
|
||||
blockRoot: rootA,
|
||||
roots: map[primitives.Slot][]byte{
|
||||
4: rootA,
|
||||
3: rootB,
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "matching current same previous",
|
||||
stateSlot: 6,
|
||||
slot: 4,
|
||||
blockRoot: rootA,
|
||||
roots: map[primitives.Slot][]byte{
|
||||
4: rootA,
|
||||
3: rootA,
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "non matching current",
|
||||
stateSlot: 6,
|
||||
slot: 4,
|
||||
blockRoot: rootC,
|
||||
roots: map[primitives.Slot][]byte{
|
||||
4: rootA,
|
||||
3: rootB,
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
state := buildStateWithBlockRoots(t, tt.stateSlot, tt.roots)
|
||||
var rootArr [32]byte
|
||||
copy(rootArr[:], tt.blockRoot)
|
||||
|
||||
got, err := SameSlotAttestation(state, rootArr, tt.slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tt.want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func buildStateForPaymentTest(t *testing.T, stateSlot primitives.Slot, paymentIdx int, amount primitives.Gwei, weight primitives.Gwei, roots map[primitives.Slot][]byte) *state_native.BeaconState {
|
||||
t.Helper()
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
blockRoots := make([][]byte, cfg.SlotsPerHistoricalRoot)
|
||||
for slot, root := range roots {
|
||||
blockRoots[slot%cfg.SlotsPerHistoricalRoot] = root
|
||||
}
|
||||
|
||||
stateRoots := make([][]byte, cfg.SlotsPerHistoricalRoot)
|
||||
for i := range stateRoots {
|
||||
stateRoots[i] = bytes.Repeat([]byte{0x44}, 32)
|
||||
}
|
||||
randaoMixes := make([][]byte, cfg.EpochsPerHistoricalVector)
|
||||
for i := range randaoMixes {
|
||||
randaoMixes[i] = bytes.Repeat([]byte{0x55}, 32)
|
||||
}
|
||||
|
||||
validator := ðpb.Validator{
|
||||
PublicKey: bytes.Repeat([]byte{0x01}, 48),
|
||||
WithdrawalCredentials: append([]byte{cfg.ETH1AddressWithdrawalPrefixByte}, bytes.Repeat([]byte{0x02}, 31)...),
|
||||
EffectiveBalance: cfg.MinActivationBalance,
|
||||
}
|
||||
|
||||
payments := make([]*ethpb.BuilderPendingPayment, cfg.SlotsPerEpoch*2)
|
||||
for i := range payments {
|
||||
payments[i] = ðpb.BuilderPendingPayment{
|
||||
Withdrawal: ðpb.BuilderPendingWithdrawal{
|
||||
FeeRecipient: make([]byte, 20),
|
||||
},
|
||||
}
|
||||
}
|
||||
payments[paymentIdx] = ðpb.BuilderPendingPayment{
|
||||
Weight: weight,
|
||||
Withdrawal: ðpb.BuilderPendingWithdrawal{
|
||||
FeeRecipient: make([]byte, 20),
|
||||
Amount: amount,
|
||||
},
|
||||
}
|
||||
|
||||
execPayloadAvailability := make([]byte, cfg.SlotsPerHistoricalRoot/8)
|
||||
|
||||
stProto := ðpb.BeaconStateGloas{
|
||||
Slot: stateSlot,
|
||||
GenesisValidatorsRoot: bytes.Repeat([]byte{0x33}, 32),
|
||||
BlockRoots: blockRoots,
|
||||
StateRoots: stateRoots,
|
||||
RandaoMixes: randaoMixes,
|
||||
ExecutionPayloadAvailability: execPayloadAvailability,
|
||||
Validators: []*ethpb.Validator{validator},
|
||||
Balances: []uint64{cfg.MinActivationBalance},
|
||||
CurrentEpochParticipation: []byte{0},
|
||||
PreviousEpochParticipation: []byte{0},
|
||||
BuilderPendingPayments: payments,
|
||||
Fork: ðpb.Fork{
|
||||
CurrentVersion: bytes.Repeat([]byte{0x66}, 4),
|
||||
PreviousVersion: bytes.Repeat([]byte{0x66}, 4),
|
||||
Epoch: 0,
|
||||
},
|
||||
}
|
||||
|
||||
state, err := state_native.InitializeFromProtoGloas(stProto)
|
||||
require.NoError(t, err)
|
||||
return state.(*state_native.BeaconState)
|
||||
}
|
||||
|
||||
func TestUpdatePendingPaymentWeight(t *testing.T) {
|
||||
cfg := params.BeaconConfig()
|
||||
slotsPerEpoch := cfg.SlotsPerEpoch
|
||||
slot := primitives.Slot(4)
|
||||
stateSlot := slot + 1
|
||||
currentEpoch := slots.ToEpoch(stateSlot)
|
||||
|
||||
rootA := bytes.Repeat([]byte{0xAA}, 32)
|
||||
rootB := bytes.Repeat([]byte{0xBB}, 32)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
targetEpoch primitives.Epoch
|
||||
blockRoot []byte
|
||||
initialAmount primitives.Gwei
|
||||
initialWeight primitives.Gwei
|
||||
wantWeight primitives.Gwei
|
||||
}{
|
||||
{
|
||||
name: "same slot current epoch adds weight",
|
||||
targetEpoch: currentEpoch,
|
||||
blockRoot: rootA,
|
||||
initialAmount: 10,
|
||||
initialWeight: 0,
|
||||
wantWeight: primitives.Gwei(cfg.MinActivationBalance),
|
||||
},
|
||||
{
|
||||
name: "same slot zero amount no weight change",
|
||||
targetEpoch: currentEpoch,
|
||||
blockRoot: rootA,
|
||||
initialAmount: 0,
|
||||
initialWeight: 5,
|
||||
wantWeight: 5,
|
||||
},
|
||||
{
|
||||
name: "non matching block root no change",
|
||||
targetEpoch: currentEpoch,
|
||||
blockRoot: rootB,
|
||||
initialAmount: 10,
|
||||
initialWeight: 7,
|
||||
wantWeight: 7,
|
||||
},
|
||||
{
|
||||
name: "previous epoch target uses earlier slot",
|
||||
targetEpoch: currentEpoch - 1,
|
||||
blockRoot: rootA,
|
||||
initialAmount: 20,
|
||||
initialWeight: 0,
|
||||
wantWeight: primitives.Gwei(cfg.MinActivationBalance),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var paymentIdx int
|
||||
if tt.targetEpoch == currentEpoch {
|
||||
paymentIdx = int(slotsPerEpoch + (slot % slotsPerEpoch))
|
||||
} else {
|
||||
paymentIdx = int(slot % slotsPerEpoch)
|
||||
}
|
||||
state := buildStateForPaymentTest(t, stateSlot, paymentIdx, tt.initialAmount, tt.initialWeight, map[primitives.Slot][]byte{
|
||||
slot: tt.blockRoot,
|
||||
slot - 1: rootB,
|
||||
})
|
||||
|
||||
att := ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: slot,
|
||||
CommitteeIndex: 0,
|
||||
BeaconBlockRoot: tt.blockRoot,
|
||||
Source: ðpb.Checkpoint{},
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: tt.targetEpoch,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
participatedFlags := map[uint8]bool{
|
||||
cfg.TimelySourceFlagIndex: true,
|
||||
cfg.TimelyTargetFlagIndex: true,
|
||||
cfg.TimelyHeadFlagIndex: true,
|
||||
}
|
||||
indices := []uint64{0}
|
||||
|
||||
gotState, err := UpdatePendingPaymentWeight(state, att, indices, participatedFlags)
|
||||
require.NoError(t, err)
|
||||
|
||||
payment, err := gotState.BuilderPendingPayment(uint64(paymentIdx))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tt.wantWeight, payment.Weight)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -38,6 +38,7 @@ go_library(
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_spf13_afero//:go_default_library",
|
||||
"@org_golang_x_sync//errgroup:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/afero"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -185,73 +186,162 @@ func (dcs *DataColumnStorage) WarmCache() {
|
||||
|
||||
highestStoredEpoch := primitives.Epoch(0)
|
||||
|
||||
// Walk the data column filesystem to warm up the cache.
|
||||
if err := afero.Walk(dcs.fs, ".", func(path string, info os.FileInfo, fileErr error) (err error) {
|
||||
if fileErr != nil {
|
||||
return fileErr
|
||||
}
|
||||
|
||||
// If not a leaf, skip.
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Extract metadata from the file path.
|
||||
fileMetadata, err := extractFileMetadata(path)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error encountered while extracting file metadata")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Open the data column filesystem file.
|
||||
f, err := dcs.fs.Open(path)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error encountered while opening data column filesystem file")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close the file.
|
||||
defer func() {
|
||||
// Overwrite the existing error only if it is nil, since the close error is less important.
|
||||
closeErr := f.Close()
|
||||
if closeErr != nil && err == nil {
|
||||
err = closeErr
|
||||
}
|
||||
}()
|
||||
|
||||
// Read the metadata of the file.
|
||||
metadata, err := dcs.metadata(f)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error encountered while reading metadata from data column filesystem file")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check the indices.
|
||||
indices := metadata.indices.all()
|
||||
if len(indices) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Build the ident.
|
||||
dataColumnsIdent := DataColumnsIdent{Root: fileMetadata.blockRoot, Epoch: fileMetadata.epoch, Indices: indices}
|
||||
|
||||
// Update the highest stored epoch.
|
||||
highestStoredEpoch = max(highestStoredEpoch, fileMetadata.epoch)
|
||||
|
||||
// Set the ident in the cache.
|
||||
if err := dcs.cache.set(dataColumnsIdent); err != nil {
|
||||
log.WithError(err).Error("Error encountered while ensuring data column filesystem cache")
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
log.WithError(err).Error("Error encountered while walking data column filesystem.")
|
||||
// List all period directories
|
||||
periodFileInfos, err := afero.ReadDir(dcs.fs, ".")
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error reading top directory during warm cache")
|
||||
return
|
||||
}
|
||||
|
||||
// Prune the cache and the filesystem.
|
||||
// Iterate through periods
|
||||
for _, periodFileInfo := range periodFileInfos {
|
||||
if !periodFileInfo.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
periodPath := periodFileInfo.Name()
|
||||
|
||||
// List all epoch directories in this period
|
||||
epochFileInfos, err := afero.ReadDir(dcs.fs, periodPath)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("period", periodPath).Error("Error reading period directory during warm cache")
|
||||
continue
|
||||
}
|
||||
|
||||
// Iterate through epochs
|
||||
for _, epochFileInfo := range epochFileInfos {
|
||||
if !epochFileInfo.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
epochPath := path.Join(periodPath, epochFileInfo.Name())
|
||||
|
||||
// List all .sszs files in this epoch
|
||||
files, err := listEpochFiles(dcs.fs, epochPath)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("epoch", epochPath).Error("Error listing epoch files during warm cache")
|
||||
continue
|
||||
}
|
||||
|
||||
if len(files) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Process all files in this epoch in parallel
|
||||
epochHighest, err := dcs.processEpochFiles(files)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("epoch", epochPath).Error("Error processing epoch files during warm cache")
|
||||
}
|
||||
|
||||
highestStoredEpoch = max(highestStoredEpoch, epochHighest)
|
||||
}
|
||||
}
|
||||
|
||||
// Prune the cache and the filesystem
|
||||
dcs.prune()
|
||||
|
||||
log.WithField("elapsed", time.Since(start)).Info("Data column filesystem cache warm-up complete")
|
||||
totalElapsed := time.Since(start)
|
||||
|
||||
// Log summary
|
||||
log.WithField("elapsed", totalElapsed).Info("Data column filesystem cache warm-up complete")
|
||||
}
|
||||
|
||||
// listEpochFiles lists all .sszs files in an epoch directory.
|
||||
func listEpochFiles(fs afero.Fs, epochPath string) ([]string, error) {
|
||||
fileInfos, err := afero.ReadDir(fs, epochPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "read epoch directory")
|
||||
}
|
||||
|
||||
files := make([]string, 0, len(fileInfos))
|
||||
for _, fileInfo := range fileInfos {
|
||||
if fileInfo.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
fileName := fileInfo.Name()
|
||||
if strings.HasSuffix(fileName, "."+dataColumnsFileExtension) {
|
||||
files = append(files, path.Join(epochPath, fileName))
|
||||
}
|
||||
}
|
||||
|
||||
return files, nil
|
||||
}
|
||||
|
||||
// processEpochFiles processes all .sszs files in an epoch directory in parallel.
|
||||
func (dcs *DataColumnStorage) processEpochFiles(files []string) (primitives.Epoch, error) {
|
||||
var (
|
||||
eg errgroup.Group
|
||||
mu sync.Mutex
|
||||
)
|
||||
|
||||
highestEpoch := primitives.Epoch(0)
|
||||
for _, filePath := range files {
|
||||
eg.Go(func() error {
|
||||
epoch, err := dcs.processFile(filePath)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("file", filePath).Error("Error processing file during warm cache")
|
||||
return nil
|
||||
}
|
||||
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
highestEpoch = max(highestEpoch, epoch)
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := eg.Wait(); err != nil {
|
||||
return highestEpoch, err
|
||||
}
|
||||
|
||||
return highestEpoch, nil
|
||||
}
|
||||
|
||||
// processFile processes a single .sszs file.
|
||||
func (dcs *DataColumnStorage) processFile(filePath string) (primitives.Epoch, error) {
|
||||
// Extract metadata from the file path
|
||||
fileMetadata, err := extractFileMetadata(filePath)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "extract file metadata")
|
||||
}
|
||||
|
||||
// Open the file (each goroutine gets its own FD)
|
||||
f, err := dcs.fs.Open(filePath)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "open file")
|
||||
}
|
||||
defer func() {
|
||||
if closeErr := f.Close(); closeErr != nil {
|
||||
log.WithError(closeErr).WithField("file", filePath).Error("Error closing file during warm cache")
|
||||
}
|
||||
}()
|
||||
|
||||
// Read metadata
|
||||
metadata, err := dcs.metadata(f)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "read metadata")
|
||||
}
|
||||
|
||||
// Extract indices
|
||||
indices := metadata.indices.all()
|
||||
if len(indices) == 0 {
|
||||
return fileMetadata.epoch, nil // No indices, skip
|
||||
}
|
||||
|
||||
// Build ident and set in cache (thread-safe)
|
||||
dataColumnsIdent := DataColumnsIdent{
|
||||
Root: fileMetadata.blockRoot,
|
||||
Epoch: fileMetadata.epoch,
|
||||
Indices: indices,
|
||||
}
|
||||
|
||||
if err := dcs.cache.set(dataColumnsIdent); err != nil {
|
||||
return 0, errors.Wrap(err, "cache set")
|
||||
}
|
||||
|
||||
return fileMetadata.epoch, nil
|
||||
}
|
||||
|
||||
// Summary returns the DataColumnStorageSummary.
|
||||
|
||||
@@ -5,6 +5,7 @@ go_library(
|
||||
srcs = [
|
||||
"error.go",
|
||||
"interfaces.go",
|
||||
"interfaces_gloas.go",
|
||||
"prometheus.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/state",
|
||||
|
||||
@@ -63,6 +63,7 @@ type ReadOnlyBeaconState interface {
|
||||
ReadOnlyDeposits
|
||||
ReadOnlyConsolidations
|
||||
ReadOnlyProposerLookahead
|
||||
readOnlyGloasFields
|
||||
ToProtoUnsafe() any
|
||||
ToProto() any
|
||||
GenesisTime() time.Time
|
||||
@@ -98,6 +99,7 @@ type WriteOnlyBeaconState interface {
|
||||
WriteOnlyWithdrawals
|
||||
WriteOnlyDeposits
|
||||
WriteOnlyProposerLookahead
|
||||
writeOnlyGloasFields
|
||||
SetGenesisTime(val time.Time) error
|
||||
SetGenesisValidatorsRoot(val []byte) error
|
||||
SetSlot(val primitives.Slot) error
|
||||
|
||||
15
beacon-chain/state/interfaces_gloas.go
Normal file
15
beacon-chain/state/interfaces_gloas.go
Normal file
@@ -0,0 +1,15 @@
|
||||
package state
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
type writeOnlyGloasFields interface {
|
||||
SetBuilderPendingPayment(index uint64, payment *ethpb.BuilderPendingPayment) error
|
||||
}
|
||||
|
||||
type readOnlyGloasFields interface {
|
||||
BuilderPendingPayment(index uint64) (*ethpb.BuilderPendingPayment, error)
|
||||
ExecutionPayloadAvailability(slot primitives.Slot) (uint64, error)
|
||||
}
|
||||
@@ -14,6 +14,7 @@ go_library(
|
||||
"getters_deposits.go",
|
||||
"getters_eth1.go",
|
||||
"getters_exit.go",
|
||||
"getters_gloas.go",
|
||||
"getters_misc.go",
|
||||
"getters_participation.go",
|
||||
"getters_payload_header.go",
|
||||
@@ -36,6 +37,7 @@ go_library(
|
||||
"setters_deposit_requests.go",
|
||||
"setters_deposits.go",
|
||||
"setters_eth1.go",
|
||||
"setters_gloas.go",
|
||||
"setters_misc.go",
|
||||
"setters_participation.go",
|
||||
"setters_payload_header.go",
|
||||
|
||||
42
beacon-chain/state/state-native/getters_gloas.go
Normal file
42
beacon-chain/state/state-native/getters_gloas.go
Normal file
@@ -0,0 +1,42 @@
|
||||
package state_native
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
)
|
||||
|
||||
// BuilderPendingPayment returns the builder pending payment for the given index.
|
||||
func (b *BeaconState) BuilderPendingPayment(index uint64) (*ethpb.BuilderPendingPayment, error) {
|
||||
if b.version < version.Gloas {
|
||||
return nil, errNotSupported("BuilderPendingPayment", b.version)
|
||||
}
|
||||
|
||||
b.lock.RLock()
|
||||
defer b.lock.RUnlock()
|
||||
|
||||
return b.builderPendingPayments[index], nil
|
||||
}
|
||||
|
||||
// ExecutionPayloadAvailability returns the execution payload availability bit for the given slot.
|
||||
func (b *BeaconState) ExecutionPayloadAvailability(slot primitives.Slot) (uint64, error) {
|
||||
if b.version < version.Gloas {
|
||||
return 0, errNotSupported("ExecutionPayloadAvailability", b.version)
|
||||
}
|
||||
|
||||
b.lock.RLock()
|
||||
defer b.lock.RUnlock()
|
||||
|
||||
if b.executionPayloadAvailability == nil {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
slotIndex := slot % params.BeaconConfig().SlotsPerHistoricalRoot
|
||||
byteIndex := slotIndex / 8
|
||||
bitIndex := slotIndex % 8
|
||||
|
||||
bit := (b.executionPayloadAvailability[byteIndex] >> bitIndex) & 1
|
||||
|
||||
return uint64(bit), nil
|
||||
}
|
||||
22
beacon-chain/state/state-native/setters_gloas.go
Normal file
22
beacon-chain/state/state-native/setters_gloas.go
Normal file
@@ -0,0 +1,22 @@
|
||||
package state_native
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native/types"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
)
|
||||
|
||||
// SetBuilderPendingPayment sets a builder pending payment at the specified slot.
|
||||
func (b *BeaconState) SetBuilderPendingPayment(index uint64, payment *ethpb.BuilderPendingPayment) error {
|
||||
if b.version < version.Gloas {
|
||||
return errNotSupported("SetBuilderPendingPayment", b.version)
|
||||
}
|
||||
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
b.builderPendingPayments[index] = ethpb.CopyBuilderPendingPayment(payment)
|
||||
|
||||
b.markFieldAsDirty(types.BuilderPendingPayments)
|
||||
return nil
|
||||
}
|
||||
@@ -3,11 +3,12 @@ package sync
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/operation"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/verification"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
@@ -192,19 +193,13 @@ func (s *Service) validateDataColumn(ctx context.Context, pid peer.ID, msg *pubs
|
||||
dataColumnSidecarArrivalGossipSummary.Observe(float64(sinceSlotStartTime.Milliseconds()))
|
||||
dataColumnSidecarVerificationGossipHistogram.Observe(float64(validationTime.Milliseconds()))
|
||||
|
||||
peerGossipScore := s.cfg.p2p.Peers().Scorers().GossipScorer().Score(pid)
|
||||
|
||||
select {
|
||||
case s.dataColumnLogCh <- dataColumnLogEntry{
|
||||
Slot: roDataColumn.Slot(),
|
||||
ColIdx: roDataColumn.Index,
|
||||
PropIdx: roDataColumn.ProposerIndex(),
|
||||
BlockRoot: roDataColumn.BlockRoot(),
|
||||
ParentRoot: roDataColumn.ParentRoot(),
|
||||
PeerSuffix: pid.String()[len(pid.String())-6:],
|
||||
PeerGossipScore: peerGossipScore,
|
||||
validationTime: validationTime,
|
||||
sinceStartTime: sinceSlotStartTime,
|
||||
slot: roDataColumn.Slot(),
|
||||
index: roDataColumn.Index,
|
||||
root: roDataColumn.BlockRoot(),
|
||||
validationTime: validationTime,
|
||||
sinceStartTime: sinceSlotStartTime,
|
||||
}:
|
||||
default:
|
||||
log.WithField("slot", roDataColumn.Slot()).Warn("Failed to send data column log entry")
|
||||
@@ -249,68 +244,69 @@ func computeCacheKey(slot primitives.Slot, proposerIndex primitives.ValidatorInd
|
||||
}
|
||||
|
||||
type dataColumnLogEntry struct {
|
||||
Slot primitives.Slot
|
||||
ColIdx uint64
|
||||
PropIdx primitives.ValidatorIndex
|
||||
BlockRoot [32]byte
|
||||
ParentRoot [32]byte
|
||||
PeerSuffix string
|
||||
PeerGossipScore float64
|
||||
validationTime time.Duration
|
||||
sinceStartTime time.Duration
|
||||
slot primitives.Slot
|
||||
index uint64
|
||||
root [32]byte
|
||||
validationTime time.Duration
|
||||
sinceStartTime time.Duration
|
||||
}
|
||||
|
||||
func (s *Service) processDataColumnLogs() {
|
||||
ticker := time.NewTicker(1 * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
slotStats := make(map[primitives.Slot][fieldparams.NumberOfColumns]dataColumnLogEntry)
|
||||
slotStats := make(map[[fieldparams.RootLength]byte][]dataColumnLogEntry)
|
||||
|
||||
for {
|
||||
select {
|
||||
case entry := <-s.dataColumnLogCh:
|
||||
cols := slotStats[entry.Slot]
|
||||
cols[entry.ColIdx] = entry
|
||||
slotStats[entry.Slot] = cols
|
||||
case col := <-s.dataColumnLogCh:
|
||||
cols := slotStats[col.root]
|
||||
cols = append(cols, col)
|
||||
slotStats[col.root] = cols
|
||||
case <-ticker.C:
|
||||
for slot, columns := range slotStats {
|
||||
var (
|
||||
colIndices = make([]uint64, 0, fieldparams.NumberOfColumns)
|
||||
peers = make([]string, 0, fieldparams.NumberOfColumns)
|
||||
gossipScores = make([]float64, 0, fieldparams.NumberOfColumns)
|
||||
validationTimes = make([]string, 0, fieldparams.NumberOfColumns)
|
||||
sinceStartTimes = make([]string, 0, fieldparams.NumberOfColumns)
|
||||
)
|
||||
for root, columns := range slotStats {
|
||||
indices := make([]uint64, 0, fieldparams.NumberOfColumns)
|
||||
minValidationTime, maxValidationTime, sumValidationTime := time.Duration(0), time.Duration(0), time.Duration(0)
|
||||
minSinceStartTime, maxSinceStartTime, sumSinceStartTime := time.Duration(0), time.Duration(0), time.Duration(0)
|
||||
|
||||
totalReceived := 0
|
||||
for _, entry := range columns {
|
||||
if entry.PeerSuffix == "" {
|
||||
for _, column := range columns {
|
||||
indices = append(indices, column.index)
|
||||
|
||||
sumValidationTime += column.validationTime
|
||||
sumSinceStartTime += column.sinceStartTime
|
||||
|
||||
if totalReceived == 0 {
|
||||
minValidationTime, maxValidationTime = column.validationTime, column.validationTime
|
||||
minSinceStartTime, maxSinceStartTime = column.sinceStartTime, column.sinceStartTime
|
||||
totalReceived++
|
||||
continue
|
||||
}
|
||||
colIndices = append(colIndices, entry.ColIdx)
|
||||
peers = append(peers, entry.PeerSuffix)
|
||||
gossipScores = append(gossipScores, roundFloat(entry.PeerGossipScore, 2))
|
||||
validationTimes = append(validationTimes, fmt.Sprintf("%.2fms", float64(entry.validationTime.Milliseconds())))
|
||||
sinceStartTimes = append(sinceStartTimes, fmt.Sprintf("%.2fms", float64(entry.sinceStartTime.Milliseconds())))
|
||||
|
||||
minValidationTime, maxValidationTime = min(minValidationTime, column.validationTime), max(maxValidationTime, column.validationTime)
|
||||
minSinceStartTime, maxSinceStartTime = min(minSinceStartTime, column.sinceStartTime), max(maxSinceStartTime, column.sinceStartTime)
|
||||
totalReceived++
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": slot,
|
||||
"receivedCount": totalReceived,
|
||||
"columnIndices": colIndices,
|
||||
"peers": peers,
|
||||
"gossipScores": gossipScores,
|
||||
"validationTimes": validationTimes,
|
||||
"sinceStartTimes": sinceStartTimes,
|
||||
}).Debug("Accepted data column sidecars summary")
|
||||
if totalReceived > 0 {
|
||||
slices.Sort(indices)
|
||||
avgValidationTime := sumValidationTime / time.Duration(totalReceived)
|
||||
avgSinceStartTime := sumSinceStartTime / time.Duration(totalReceived)
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": columns[0].slot,
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"count": totalReceived,
|
||||
"indices": helpers.PrettySlice(indices),
|
||||
"validationTime": prettyMinMaxAverage(minValidationTime, maxValidationTime, avgValidationTime),
|
||||
"sinceStartTime": prettyMinMaxAverage(minSinceStartTime, maxSinceStartTime, avgSinceStartTime),
|
||||
}).Debug("Accepted data column sidecars summary")
|
||||
}
|
||||
}
|
||||
slotStats = make(map[primitives.Slot][fieldparams.NumberOfColumns]dataColumnLogEntry)
|
||||
|
||||
slotStats = make(map[[fieldparams.RootLength]byte][]dataColumnLogEntry)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func roundFloat(f float64, decimals int) float64 {
|
||||
mult := math.Pow(10, float64(decimals))
|
||||
return math.Round(f*mult) / mult
|
||||
func prettyMinMaxAverage(min, max, average time.Duration) string {
|
||||
return fmt.Sprintf("[min: %v, avg: %v, max: %v]", min, average, max)
|
||||
}
|
||||
|
||||
@@ -687,6 +687,12 @@ func sbrNotFound(t *testing.T, expectedRoot [32]byte) *mockStateByRooter {
|
||||
}}
|
||||
}
|
||||
|
||||
func sbrReturnsState(st state.BeaconState) *mockStateByRooter {
|
||||
return &mockStateByRooter{sbr: func(_ context.Context, _ [32]byte) (state.BeaconState, error) {
|
||||
return st, nil
|
||||
}}
|
||||
}
|
||||
|
||||
func sbrForValOverride(idx primitives.ValidatorIndex, val *ethpb.Validator) *mockStateByRooter {
|
||||
return sbrForValOverrideWithT(nil, idx, val)
|
||||
}
|
||||
|
||||
@@ -11,12 +11,10 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v7/beacon-chain/forkchoice/types"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/logging"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
@@ -484,88 +482,19 @@ func (dv *RODataColumnsVerifier) SidecarProposerExpected(ctx context.Context) (e
|
||||
|
||||
defer dv.recordResult(RequireSidecarProposerExpected, &err)
|
||||
|
||||
type slotParentRoot struct {
|
||||
slot primitives.Slot
|
||||
parentRoot [fieldparams.RootLength]byte
|
||||
}
|
||||
|
||||
targetRootBySlotParentRoot := make(map[slotParentRoot][fieldparams.RootLength]byte)
|
||||
|
||||
var targetRootFromCache = func(slot primitives.Slot, parentRoot [fieldparams.RootLength]byte) ([fieldparams.RootLength]byte, error) {
|
||||
// Use cached values if available.
|
||||
slotParentRoot := slotParentRoot{slot: slot, parentRoot: parentRoot}
|
||||
if root, ok := targetRootBySlotParentRoot[slotParentRoot]; ok {
|
||||
return root, nil
|
||||
}
|
||||
|
||||
// Compute the epoch of the data column slot.
|
||||
dataColumnEpoch := slots.ToEpoch(slot)
|
||||
if dataColumnEpoch > 0 {
|
||||
dataColumnEpoch = dataColumnEpoch - 1
|
||||
}
|
||||
|
||||
// Compute the target root for the epoch.
|
||||
targetRoot, err := dv.fc.TargetRootForEpoch(parentRoot, dataColumnEpoch)
|
||||
if err != nil {
|
||||
return [fieldparams.RootLength]byte{}, columnErrBuilder(errors.Wrap(err, "target root from epoch"))
|
||||
}
|
||||
|
||||
// Store the target root in the cache.
|
||||
targetRootBySlotParentRoot[slotParentRoot] = targetRoot
|
||||
|
||||
return targetRoot, nil
|
||||
}
|
||||
|
||||
for _, dataColumn := range dv.dataColumns {
|
||||
// Extract the slot of the data column.
|
||||
dataColumnSlot := dataColumn.Slot()
|
||||
|
||||
// Extract the root of the parent block corresponding to the data column.
|
||||
parentRoot := dataColumn.ParentRoot()
|
||||
|
||||
// Compute the target root for the data column.
|
||||
targetRoot, err := targetRootFromCache(dataColumnSlot, parentRoot)
|
||||
// Get the verifying state, it is guaranteed to have the correct proposer in the lookahead.
|
||||
verifyingState, err := dv.getVerifyingState(ctx, dataColumn)
|
||||
if err != nil {
|
||||
return columnErrBuilder(errors.Wrap(err, "target root"))
|
||||
return columnErrBuilder(errors.Wrap(err, "verifying state"))
|
||||
}
|
||||
|
||||
// Compute the epoch of the data column slot.
|
||||
dataColumnEpoch := slots.ToEpoch(dataColumnSlot)
|
||||
if dataColumnEpoch > 0 {
|
||||
dataColumnEpoch = dataColumnEpoch - 1
|
||||
}
|
||||
|
||||
// Create a checkpoint for the target root.
|
||||
checkpoint := &forkchoicetypes.Checkpoint{Root: targetRoot, Epoch: dataColumnEpoch}
|
||||
|
||||
// Try to extract the proposer index from the data column in the cache.
|
||||
idx, cached := dv.pc.Proposer(checkpoint, dataColumnSlot)
|
||||
|
||||
if !cached {
|
||||
parentRoot := dataColumn.ParentRoot()
|
||||
// Ensure the expensive index computation is only performed once for
|
||||
// concurrent requests for the same signature data.
|
||||
idxAny, err, _ := dv.sg.Do(concatRootSlot(parentRoot, dataColumnSlot), func() (any, error) {
|
||||
verifyingState, err := dv.getVerifyingState(ctx, dataColumn)
|
||||
if err != nil {
|
||||
return nil, columnErrBuilder(errors.Wrap(err, "verifying state"))
|
||||
}
|
||||
|
||||
idx, err = helpers.BeaconProposerIndexAtSlot(ctx, verifyingState, dataColumnSlot)
|
||||
if err != nil {
|
||||
return nil, columnErrBuilder(errors.Wrap(err, "compute proposer"))
|
||||
}
|
||||
|
||||
return idx, nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var ok bool
|
||||
if idx, ok = idxAny.(primitives.ValidatorIndex); !ok {
|
||||
return columnErrBuilder(errors.New("type assertion to ValidatorIndex failed"))
|
||||
}
|
||||
// Use proposer lookahead directly
|
||||
idx, err := helpers.BeaconProposerIndexAtSlot(ctx, verifyingState, dataColumnSlot)
|
||||
if err != nil {
|
||||
return columnErrBuilder(errors.Wrap(err, "proposer from lookahead"))
|
||||
}
|
||||
|
||||
if idx != dataColumn.ProposerIndex() {
|
||||
@@ -626,7 +555,3 @@ func inclusionProofKey(c blocks.RODataColumn) ([32]byte, error) {
|
||||
|
||||
return sha256.Sum256(unhashedKey), nil
|
||||
}
|
||||
|
||||
func concatRootSlot(root [fieldparams.RootLength]byte, slot primitives.Slot) string {
|
||||
return string(root[:]) + fmt.Sprintf("%d", slot)
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package verification
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -795,87 +794,90 @@ func TestDataColumnsSidecarProposerExpected(t *testing.T) {
|
||||
blobCount = 1
|
||||
)
|
||||
|
||||
parentRoot := [fieldparams.RootLength]byte{}
|
||||
columns := GenerateTestDataColumns(t, parentRoot, columnSlot, blobCount)
|
||||
firstColumn := columns[0]
|
||||
ctx := t.Context()
|
||||
testCases := []struct {
|
||||
name string
|
||||
stateByRooter StateByRooter
|
||||
proposerCache proposerCache
|
||||
columns []blocks.RODataColumn
|
||||
error string
|
||||
}{
|
||||
{
|
||||
name: "Cached, matches",
|
||||
stateByRooter: nil,
|
||||
proposerCache: &mockProposerCache{
|
||||
ProposerCB: pcReturnsIdx(firstColumn.ProposerIndex()),
|
||||
},
|
||||
columns: columns,
|
||||
},
|
||||
{
|
||||
name: "Cached, does not match",
|
||||
stateByRooter: nil,
|
||||
proposerCache: &mockProposerCache{
|
||||
ProposerCB: pcReturnsIdx(firstColumn.ProposerIndex() + 1),
|
||||
},
|
||||
columns: columns,
|
||||
error: errSidecarUnexpectedProposer.Error(),
|
||||
},
|
||||
{
|
||||
name: "Not cached, state lookup failure",
|
||||
stateByRooter: sbrNotFound(t, firstColumn.ParentRoot()),
|
||||
proposerCache: &mockProposerCache{
|
||||
ProposerCB: pcReturnsNotFound(),
|
||||
},
|
||||
columns: columns,
|
||||
error: "verifying state",
|
||||
},
|
||||
}
|
||||
parentRoot := [fieldparams.RootLength]byte{}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
initializer := Initializer{
|
||||
shared: &sharedResources{
|
||||
sr: tc.stateByRooter,
|
||||
pc: tc.proposerCache,
|
||||
hsp: &mockHeadStateProvider{},
|
||||
fc: &mockForkchoicer{
|
||||
TargetRootForEpochCB: fcReturnsTargetRoot([fieldparams.RootLength]byte{}),
|
||||
},
|
||||
// Create a Fulu state to get the expected proposer from the lookahead.
|
||||
fuluState, _ := util.DeterministicGenesisStateFulu(t, 32)
|
||||
expectedProposer, err := fuluState.ProposerLookahead()
|
||||
require.NoError(t, err)
|
||||
expectedProposerIdx := primitives.ValidatorIndex(expectedProposer[columnSlot])
|
||||
|
||||
// Generate data columns with the expected proposer index.
|
||||
matchingColumns := generateTestDataColumnsWithProposer(t, parentRoot, columnSlot, blobCount, expectedProposerIdx)
|
||||
// Generate data columns with wrong proposer index.
|
||||
wrongColumns := generateTestDataColumnsWithProposer(t, parentRoot, columnSlot, blobCount, expectedProposerIdx+1)
|
||||
|
||||
t.Run("Proposer matches", func(t *testing.T) {
|
||||
initializer := Initializer{
|
||||
shared: &sharedResources{
|
||||
sr: sbrReturnsState(fuluState),
|
||||
hsp: &mockHeadStateProvider{
|
||||
headRoot: parentRoot[:],
|
||||
headSlot: columnSlot, // Same epoch so HeadStateReadOnly is used
|
||||
headStateReadOnly: fuluState,
|
||||
},
|
||||
}
|
||||
fc: &mockForkchoicer{},
|
||||
},
|
||||
}
|
||||
|
||||
verifier := initializer.NewDataColumnsVerifier(tc.columns, GossipDataColumnSidecarRequirements)
|
||||
var wg sync.WaitGroup
|
||||
verifier := initializer.NewDataColumnsVerifier(matchingColumns, GossipDataColumnSidecarRequirements)
|
||||
err := verifier.SidecarProposerExpected(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, verifier.results.executed(RequireSidecarProposerExpected))
|
||||
require.NoError(t, verifier.results.result(RequireSidecarProposerExpected))
|
||||
})
|
||||
|
||||
var err1, err2 error
|
||||
wg.Go(func() {
|
||||
err1 = verifier.SidecarProposerExpected(ctx)
|
||||
})
|
||||
wg.Go(func() {
|
||||
err2 = verifier.SidecarProposerExpected(ctx)
|
||||
})
|
||||
wg.Wait()
|
||||
t.Run("Proposer does not match", func(t *testing.T) {
|
||||
initializer := Initializer{
|
||||
shared: &sharedResources{
|
||||
sr: sbrReturnsState(fuluState),
|
||||
hsp: &mockHeadStateProvider{
|
||||
headRoot: parentRoot[:],
|
||||
headSlot: columnSlot, // Same epoch so HeadStateReadOnly is used
|
||||
headStateReadOnly: fuluState,
|
||||
},
|
||||
fc: &mockForkchoicer{},
|
||||
},
|
||||
}
|
||||
|
||||
require.Equal(t, true, verifier.results.executed(RequireSidecarProposerExpected))
|
||||
verifier := initializer.NewDataColumnsVerifier(wrongColumns, GossipDataColumnSidecarRequirements)
|
||||
err := verifier.SidecarProposerExpected(ctx)
|
||||
require.ErrorContains(t, errSidecarUnexpectedProposer.Error(), err)
|
||||
require.Equal(t, true, verifier.results.executed(RequireSidecarProposerExpected))
|
||||
require.NotNil(t, verifier.results.result(RequireSidecarProposerExpected))
|
||||
})
|
||||
|
||||
if len(tc.error) > 0 {
|
||||
require.ErrorContains(t, tc.error, err1)
|
||||
require.ErrorContains(t, tc.error, err2)
|
||||
require.NotNil(t, verifier.results.result(RequireSidecarProposerExpected))
|
||||
return
|
||||
}
|
||||
t.Run("State lookup failure", func(t *testing.T) {
|
||||
columns := GenerateTestDataColumns(t, parentRoot, columnSlot, blobCount)
|
||||
initializer := Initializer{
|
||||
shared: &sharedResources{
|
||||
sr: sbrNotFound(t, columns[0].ParentRoot()),
|
||||
hsp: &mockHeadStateProvider{},
|
||||
fc: &mockForkchoicer{},
|
||||
},
|
||||
}
|
||||
|
||||
require.NoError(t, err1)
|
||||
require.NoError(t, err2)
|
||||
require.NoError(t, verifier.results.result(RequireSidecarProposerExpected))
|
||||
verifier := initializer.NewDataColumnsVerifier(columns, GossipDataColumnSidecarRequirements)
|
||||
err := verifier.SidecarProposerExpected(ctx)
|
||||
require.ErrorContains(t, "verifying state", err)
|
||||
require.Equal(t, true, verifier.results.executed(RequireSidecarProposerExpected))
|
||||
require.NotNil(t, verifier.results.result(RequireSidecarProposerExpected))
|
||||
})
|
||||
}
|
||||
|
||||
err := verifier.SidecarProposerExpected(ctx)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
func generateTestDataColumnsWithProposer(t *testing.T, parent [fieldparams.RootLength]byte, slot primitives.Slot, blobCount int, proposer primitives.ValidatorIndex) []blocks.RODataColumn {
|
||||
roBlock, roBlobs := util.GenerateTestDenebBlockWithSidecar(t, parent, slot, blobCount, util.WithProposer(proposer))
|
||||
blobs := make([]kzg.Blob, 0, len(roBlobs))
|
||||
for i := range roBlobs {
|
||||
blobs = append(blobs, kzg.Blob(roBlobs[i].Blob))
|
||||
}
|
||||
|
||||
cellsPerBlob, proofsPerBlob := util.GenerateCellsAndProofs(t, blobs)
|
||||
roDataColumnSidecars, err := peerdas.DataColumnSidecars(cellsPerBlob, proofsPerBlob, peerdas.PopulateFromBlock(roBlock))
|
||||
require.NoError(t, err)
|
||||
|
||||
return roDataColumnSidecars
|
||||
}
|
||||
|
||||
func TestColumnRequirementSatisfaction(t *testing.T) {
|
||||
@@ -922,12 +924,3 @@ func TestColumnRequirementSatisfaction(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestConcatRootSlot(t *testing.T) {
|
||||
root := [fieldparams.RootLength]byte{1, 2, 3}
|
||||
const slot = primitives.Slot(3210)
|
||||
|
||||
const expected = "\x01\x02\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x003210"
|
||||
|
||||
actual := concatRootSlot(root, slot)
|
||||
require.Equal(t, expected, actual)
|
||||
}
|
||||
|
||||
3
changelog/manu-cache-warmup.md
Normal file
3
changelog/manu-cache-warmup.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Data column sidecars cache warmup: Process in parallel all sidecars for a given epoch.
|
||||
3
changelog/manu-log.md
Normal file
3
changelog/manu-log.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Summarize DEBUG log corresponding to incoming via gossip data column sidecar.
|
||||
2
changelog/potuz_dcs_pc_removal.md
Normal file
2
changelog/potuz_dcs_pc_removal.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Changed
|
||||
- Use lookahead to validate data column sidecar proposer index.
|
||||
3
changelog/potuz_dont_lock_fcu.md
Normal file
3
changelog/potuz_dont_lock_fcu.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Notify the engine about forkchoice updates in the background.
|
||||
2
changelog/potuz_fcu_ctx.md
Normal file
2
changelog/potuz_fcu_ctx.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Changed
|
||||
- Use a separate context when updating the slot cache.
|
||||
@@ -174,3 +174,28 @@ func copyBeaconBlockBodyGloas(body *BeaconBlockBodyGloas) *BeaconBlockBodyGloas
|
||||
|
||||
return copied
|
||||
}
|
||||
|
||||
// CopyBuilderPendingPayment creates a deep copy of a builder pending payment.
|
||||
func CopyBuilderPendingPayment(original *BuilderPendingPayment) *BuilderPendingPayment {
|
||||
if original == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &BuilderPendingPayment{
|
||||
Weight: original.Weight,
|
||||
Withdrawal: copyBuilderPendingWithdrawal(original.Withdrawal),
|
||||
}
|
||||
}
|
||||
|
||||
// copyBuilderPendingWithdrawal creates a deep copy of a builder pending withdrawal.
|
||||
func copyBuilderPendingWithdrawal(original *BuilderPendingWithdrawal) *BuilderPendingWithdrawal {
|
||||
if original == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &BuilderPendingWithdrawal{
|
||||
FeeRecipient: bytesutil.SafeCopyBytes(original.FeeRecipient),
|
||||
Amount: original.Amount,
|
||||
BuilderIndex: original.BuilderIndex,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -200,6 +200,7 @@ go_test(
|
||||
"fulu__sanity__blocks_test.go",
|
||||
"fulu__sanity__slots_test.go",
|
||||
"fulu__ssz_static__ssz_static_test.go",
|
||||
"gloas__operations__attestation_test.go",
|
||||
"gloas__ssz_static__ssz_static_test.go",
|
||||
"phase0__epoch_processing__effective_balance_updates_test.go",
|
||||
"phase0__epoch_processing__epoch_processing_test.go",
|
||||
@@ -278,6 +279,7 @@ go_test(
|
||||
"//testing/spectest/shared/fulu/rewards:go_default_library",
|
||||
"//testing/spectest/shared/fulu/sanity:go_default_library",
|
||||
"//testing/spectest/shared/fulu/ssz_static:go_default_library",
|
||||
"//testing/spectest/shared/gloas/operations:go_default_library",
|
||||
"//testing/spectest/shared/gloas/ssz_static:go_default_library",
|
||||
"//testing/spectest/shared/phase0/epoch_processing:go_default_library",
|
||||
"//testing/spectest/shared/phase0/finality:go_default_library",
|
||||
|
||||
@@ -0,0 +1,11 @@
|
||||
package mainnet
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/testing/spectest/shared/gloas/operations"
|
||||
)
|
||||
|
||||
func TestMainnet_Gloas_Operations_Attestation(t *testing.T) {
|
||||
operations.RunAttestationTest(t, "mainnet")
|
||||
}
|
||||
@@ -206,6 +206,7 @@ go_test(
|
||||
"fulu__sanity__blocks_test.go",
|
||||
"fulu__sanity__slots_test.go",
|
||||
"fulu__ssz_static__ssz_static_test.go",
|
||||
"gloas__operations__attestation_test.go",
|
||||
"gloas__ssz_static__ssz_static_test.go",
|
||||
"phase0__epoch_processing__effective_balance_updates_test.go",
|
||||
"phase0__epoch_processing__epoch_processing_test.go",
|
||||
@@ -288,6 +289,7 @@ go_test(
|
||||
"//testing/spectest/shared/fulu/rewards:go_default_library",
|
||||
"//testing/spectest/shared/fulu/sanity:go_default_library",
|
||||
"//testing/spectest/shared/fulu/ssz_static:go_default_library",
|
||||
"//testing/spectest/shared/gloas/operations:go_default_library",
|
||||
"//testing/spectest/shared/gloas/ssz_static:go_default_library",
|
||||
"//testing/spectest/shared/phase0/epoch_processing:go_default_library",
|
||||
"//testing/spectest/shared/phase0/finality:go_default_library",
|
||||
|
||||
@@ -0,0 +1,11 @@
|
||||
package minimal
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/testing/spectest/shared/gloas/operations"
|
||||
)
|
||||
|
||||
func TestMinimal_Gloas_Operations_Attestation(t *testing.T) {
|
||||
operations.RunAttestationTest(t, "minimal")
|
||||
}
|
||||
22
testing/spectest/shared/gloas/operations/BUILD.bazel
Normal file
22
testing/spectest/shared/gloas/operations/BUILD.bazel
Normal file
@@ -0,0 +1,22 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"attestation.go",
|
||||
"helpers.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/testing/spectest/shared/gloas/operations",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/spectest/shared/common/operations:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
],
|
||||
)
|
||||
27
testing/spectest/shared/gloas/operations/attestation.go
Normal file
27
testing/spectest/shared/gloas/operations/attestation.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package operations
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/altair"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
common "github.com/OffchainLabs/prysm/v7/testing/spectest/shared/common/operations"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
)
|
||||
|
||||
func blockWithAttestation(attestationSSZ []byte) (interfaces.SignedBeaconBlock, error) {
|
||||
att := ðpb.AttestationElectra{}
|
||||
if err := att.UnmarshalSSZ(attestationSSZ); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b := util.NewBeaconBlockGloas()
|
||||
b.Block.Body = ðpb.BeaconBlockBodyGloas{Attestations: []*ethpb.AttestationElectra{att}}
|
||||
return blocks.NewSignedBeaconBlock(b)
|
||||
}
|
||||
|
||||
func RunAttestationTest(t *testing.T, config string) {
|
||||
common.RunAttestationTest(t, config, version.String(version.Gloas), blockWithAttestation, altair.ProcessAttestationsNoVerifySignature, sszToState)
|
||||
}
|
||||
33
testing/spectest/shared/gloas/operations/helpers.go
Normal file
33
testing/spectest/shared/gloas/operations/helpers.go
Normal file
@@ -0,0 +1,33 @@
|
||||
package operations
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
func sszToState(b []byte) (state.BeaconState, error) {
|
||||
base := ðpb.BeaconStateGloas{}
|
||||
if err := base.UnmarshalSSZ(b); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return state_native.InitializeFromProtoGloas(base)
|
||||
}
|
||||
|
||||
func sszToBlock(b []byte) (interfaces.SignedBeaconBlock, error) {
|
||||
base := ðpb.BeaconBlockGloas{}
|
||||
if err := base.UnmarshalSSZ(b); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockGloas{Block: base})
|
||||
}
|
||||
|
||||
func sszToBlockBody(b []byte) (interfaces.ReadOnlyBeaconBlockBody, error) {
|
||||
base := ðpb.BeaconBlockBodyGloas{}
|
||||
if err := base.UnmarshalSSZ(b); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return blocks.NewBeaconBlockBody(base)
|
||||
}
|
||||
@@ -15,29 +15,27 @@ import (
|
||||
|
||||
// RunSSZStaticTests executes "ssz_static" tests.
|
||||
func RunSSZStaticTests(t *testing.T, config string) {
|
||||
common.RunSSZStaticTests(t, config, "gloas", unmarshalledSSZ, customHtr)
|
||||
common.RunSSZStaticTests(t, config, "gloas", UnmarshalledSSZ, customHtr)
|
||||
}
|
||||
|
||||
func customHtr(t *testing.T, htrs []common.HTR, object any) []common.HTR {
|
||||
func customHtr(t *testing.T, htrs []common.HTR, object interface{}) []common.HTR {
|
||||
_, ok := object.(*ethpb.BeaconStateGloas)
|
||||
if !ok {
|
||||
return htrs
|
||||
}
|
||||
|
||||
htrs = append(htrs, func(s any) ([32]byte, error) {
|
||||
htrs = append(htrs, func(s interface{}) ([32]byte, error) {
|
||||
beaconState, err := state_native.InitializeFromProtoGloas(s.(*ethpb.BeaconStateGloas))
|
||||
require.NoError(t, err)
|
||||
|
||||
return beaconState.HashTreeRoot(context.Background())
|
||||
})
|
||||
|
||||
return htrs
|
||||
}
|
||||
|
||||
// unmarshalledSSZ unmarshalls serialized input.
|
||||
func unmarshalledSSZ(t *testing.T, serializedBytes []byte, folderName string) (any, error) {
|
||||
var obj any
|
||||
|
||||
// UnmarshalledSSZ unmarshalls serialized input.
|
||||
func UnmarshalledSSZ(t *testing.T, serializedBytes []byte, folderName string) (interface{}, error) {
|
||||
var obj interface{}
|
||||
switch folderName {
|
||||
// Gloas specific types
|
||||
case "ExecutionPayloadBid":
|
||||
@@ -54,28 +52,22 @@ func unmarshalledSSZ(t *testing.T, serializedBytes []byte, folderName string) (a
|
||||
obj = ðpb.BeaconBlockGloas{}
|
||||
case "BeaconBlockBody":
|
||||
obj = ðpb.BeaconBlockBodyGloas{}
|
||||
case "BeaconState":
|
||||
obj = ðpb.BeaconStateGloas{}
|
||||
case "BuilderPendingPayment":
|
||||
obj = ðpb.BuilderPendingPayment{}
|
||||
case "BuilderPendingWithdrawal":
|
||||
obj = ðpb.BuilderPendingWithdrawal{}
|
||||
case "ExecutionPayloadEnvelope":
|
||||
obj = ðpb.ExecutionPayloadEnvelope{}
|
||||
obj = &enginev1.ExecutionPayloadEnvelope{}
|
||||
case "SignedExecutionPayloadEnvelope":
|
||||
obj = ðpb.SignedExecutionPayloadEnvelope{}
|
||||
obj = &enginev1.SignedExecutionPayloadEnvelope{}
|
||||
case "ForkChoiceNode":
|
||||
t.Skip("Not a consensus type")
|
||||
case "IndexedPayloadAttestation":
|
||||
t.Skip("Not a consensus type")
|
||||
case "DataColumnSidecar":
|
||||
obj = ðpb.DataColumnSidecarGloas{}
|
||||
|
||||
// Standard types that also exist in gloas
|
||||
case "ExecutionPayload":
|
||||
obj = &enginev1.ExecutionPayloadDeneb{}
|
||||
case "ExecutionPayloadHeader":
|
||||
obj = &enginev1.ExecutionPayloadHeaderDeneb{}
|
||||
case "Attestation":
|
||||
obj = ðpb.AttestationElectra{}
|
||||
case "AttestationData":
|
||||
@@ -86,6 +78,8 @@ func unmarshalledSSZ(t *testing.T, serializedBytes []byte, folderName string) (a
|
||||
obj = ðpb.AggregateAttestationAndProofElectra{}
|
||||
case "BeaconBlockHeader":
|
||||
obj = ðpb.BeaconBlockHeader{}
|
||||
case "BeaconState":
|
||||
obj = ðpb.BeaconStateGloas{}
|
||||
case "Checkpoint":
|
||||
obj = ðpb.Checkpoint{}
|
||||
case "Deposit":
|
||||
@@ -98,6 +92,7 @@ func unmarshalledSSZ(t *testing.T, serializedBytes []byte, folderName string) (a
|
||||
obj = ðpb.Eth1Data{}
|
||||
case "Eth1Block":
|
||||
t.Skip("Unused type")
|
||||
return nil, nil
|
||||
case "Fork":
|
||||
obj = ðpb.Fork{}
|
||||
case "ForkData":
|
||||
@@ -141,15 +136,15 @@ func unmarshalledSSZ(t *testing.T, serializedBytes []byte, folderName string) (a
|
||||
case "SyncCommittee":
|
||||
obj = ðpb.SyncCommittee{}
|
||||
case "LightClientOptimisticUpdate":
|
||||
obj = ðpb.LightClientOptimisticUpdateDeneb{}
|
||||
t.Skip("Need to fix header type first")
|
||||
case "LightClientFinalityUpdate":
|
||||
obj = ðpb.LightClientFinalityUpdateElectra{}
|
||||
t.Skip("Need to fix header type first")
|
||||
case "LightClientBootstrap":
|
||||
obj = ðpb.LightClientBootstrapElectra{}
|
||||
t.Skip("Need to fix header type first")
|
||||
case "LightClientUpdate":
|
||||
obj = ðpb.LightClientUpdateElectra{}
|
||||
t.Skip("Need to fix header type first")
|
||||
case "LightClientHeader":
|
||||
obj = ðpb.LightClientHeaderDeneb{}
|
||||
t.Skip("Need to fix header type first")
|
||||
case "BlobIdentifier":
|
||||
obj = ðpb.BlobIdentifier{}
|
||||
case "BlobSidecar":
|
||||
@@ -178,6 +173,8 @@ func unmarshalledSSZ(t *testing.T, serializedBytes []byte, folderName string) (a
|
||||
obj = &enginev1.ConsolidationRequest{}
|
||||
case "ExecutionRequests":
|
||||
obj = &enginev1.ExecutionRequests{}
|
||||
case "DataColumnSidecar":
|
||||
t.Skip("TODO: fix inclusion proof but not a priority")
|
||||
case "DataColumnsByRootIdentifier":
|
||||
obj = ðpb.DataColumnsByRootIdentifier{}
|
||||
case "MatrixEntry":
|
||||
@@ -185,13 +182,11 @@ func unmarshalledSSZ(t *testing.T, serializedBytes []byte, folderName string) (a
|
||||
default:
|
||||
return nil, errors.New("type not found")
|
||||
}
|
||||
|
||||
var err error
|
||||
if o, ok := obj.(fssz.Unmarshaler); ok {
|
||||
err = o.UnmarshalSSZ(serializedBytes)
|
||||
} else {
|
||||
err = errors.New("could not unmarshal object, not a fastssz compatible object")
|
||||
}
|
||||
|
||||
return obj, err
|
||||
}
|
||||
|
||||
@@ -44,6 +44,13 @@ func WithProposerSigning(idx primitives.ValidatorIndex, sk bls.SecretKey, valRoo
|
||||
}
|
||||
}
|
||||
|
||||
// WithProposer sets the proposer index for the generated block without signing.
|
||||
func WithProposer(idx primitives.ValidatorIndex) DenebBlockGeneratorOption {
|
||||
return func(g *denebBlockGenerator) {
|
||||
g.proposer = idx
|
||||
}
|
||||
}
|
||||
|
||||
func WithPayloadSetter(p *enginev1.ExecutionPayloadDeneb) DenebBlockGeneratorOption {
|
||||
return func(g *denebBlockGenerator) {
|
||||
g.payload = p
|
||||
|
||||
Reference in New Issue
Block a user