Compare commits

..

1 Commits

Author SHA1 Message Date
satushh
9692993b6a remove redundant justified checkpoint update in pullTips 2025-12-19 23:07:40 +05:30
64 changed files with 781 additions and 894 deletions

View File

@@ -193,7 +193,7 @@ nogo(
"//tools/analyzers/featureconfig:go_default_library",
"//tools/analyzers/gocognit:go_default_library",
"//tools/analyzers/ineffassign:go_default_library",
"//tools/analyzers/httpwriter:go_default_library",
"//tools/analyzers/httperror:go_default_library",
"//tools/analyzers/interfacechecker:go_default_library",
"//tools/analyzers/logcapitalization:go_default_library",
"//tools/analyzers/logruswitherror:go_default_library",

View File

@@ -323,17 +323,14 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
var ok bool
e := slots.ToEpoch(slot)
stateEpoch := slots.ToEpoch(st.Slot())
fuluAndNextEpoch := st.Version() >= version.Fulu && e == stateEpoch+1
if e == stateEpoch || fuluAndNextEpoch {
if e == stateEpoch {
val, ok = s.trackedProposer(st, slot)
if !ok {
return emptyAttri
}
}
st = st.Copy()
if slot > st.Slot() {
// At this point either we know we are proposing on a future slot or we need to still compute the
// right proposer index pre-Fulu, either way we need to copy the state to process it.
st = st.Copy()
var err error
st, err = transition.ProcessSlotsUsingNextSlotCache(ctx, st, headRoot, slot)
if err != nil {
@@ -341,7 +338,7 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
return emptyAttri
}
}
if e > stateEpoch && !fuluAndNextEpoch {
if e > stateEpoch {
emptyAttri := payloadattribute.EmptyWithVersion(st.Version())
val, ok = s.trackedProposer(st, slot)
if !ok {

View File

@@ -1053,3 +1053,40 @@ func TestKZGCommitmentToVersionedHashes(t *testing.T) {
require.Equal(t, vhs[0].String(), vh0)
require.Equal(t, vhs[1].String(), vh1)
}
func TestComputePayloadAttribute(t *testing.T) {
service, tr := minimalTestService(t, WithPayloadIDCache(cache.NewPayloadIDCache()))
ctx := tr.ctx
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, Index: 0})
// Cache hit, advance state, no fee recipient
slot := primitives.Slot(1)
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
blk := util.NewBeaconBlockBellatrix()
signed, err := consensusblocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
roblock, err := consensusblocks.NewROBlockWithRoot(signed, [32]byte{'a'})
require.NoError(t, err)
cfg := &postBlockProcessConfig{
ctx: ctx,
roblock: roblock,
}
fcu := &fcuConfig{
headState: st,
proposingSlot: slot,
headRoot: [32]byte{},
}
require.NoError(t, service.computePayloadAttributes(cfg, fcu))
require.Equal(t, false, fcu.attributes.IsEmpty())
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(fcu.attributes.SuggestedFeeRecipient()).String())
// Cache hit, advance state, has fee recipient
suggestedAddr := common.HexToAddress("123")
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, FeeRecipient: primitives.ExecutionAddress(suggestedAddr), Index: 0})
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
require.NoError(t, service.computePayloadAttributes(cfg, fcu))
require.Equal(t, false, fcu.attributes.IsEmpty())
require.Equal(t, suggestedAddr, common.BytesToAddress(fcu.attributes.SuggestedFeeRecipient()))
}

View File

@@ -12,7 +12,6 @@ import (
payloadattribute "github.com/OffchainLabs/prysm/v7/consensus-types/payload-attribute"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/OffchainLabs/prysm/v7/time/slots"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -54,53 +53,58 @@ type fcuConfig struct {
}
// sendFCU handles the logic to notify the engine of a forckhoice update
// when processing an incoming block during regular sync. It
// always updates the shuffling caches and handles epoch transitions .
func (s *Service) sendFCU(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) {
if cfg.postState.Version() < version.Fulu {
// update the caches to compute the right proposer index
// this function is called under a forkchoice lock which we need to release.
s.ForkChoicer().Unlock()
s.updateCachesPostBlockProcessing(cfg)
s.ForkChoicer().Lock()
// for the first time when processing an incoming block during regular sync. It
// always updates the shuffling caches and handles epoch transitions when the
// incoming block is late, preparing payload attributes in this case while it
// only sends a message with empty attributes for early blocks.
func (s *Service) sendFCU(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error {
if !s.isNewHead(cfg.headRoot) {
return nil
}
if err := s.getFCUArgs(cfg, fcuArgs); err != nil {
log.WithError(err).Error("Could not get forkchoice update argument")
return
}
// If head has not been updated and attributes are nil, we can skip the FCU.
if !s.isNewHead(cfg.headRoot) && (fcuArgs.attributes == nil || fcuArgs.attributes.IsEmpty()) {
return
}
// If we are proposing and we aim to reorg the block, we have already sent FCU with attributes on lateBlockTasks
if fcuArgs.attributes != nil && !fcuArgs.attributes.IsEmpty() && s.shouldOverrideFCU(cfg.headRoot, s.CurrentSlot()+1) {
return nil
}
return s.forkchoiceUpdateWithExecution(cfg.ctx, fcuArgs)
}
// sendFCUWithAttributes computes the payload attributes and sends an FCU message
// to the engine if needed
func (s *Service) sendFCUWithAttributes(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) {
slotCtx, cancel := context.WithTimeout(context.Background(), slotDeadline)
defer cancel()
cfg.ctx = slotCtx
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
if err := s.computePayloadAttributes(cfg, fcuArgs); err != nil {
log.WithError(err).Error("Could not compute payload attributes")
return
}
if s.inRegularSync() {
go s.forkchoiceUpdateWithExecution(cfg.ctx, fcuArgs)
if fcuArgs.attributes.IsEmpty() {
return
}
if s.isNewHead(fcuArgs.headRoot) {
if err := s.saveHead(cfg.ctx, fcuArgs.headRoot, fcuArgs.headBlock, fcuArgs.headState); err != nil {
log.WithError(err).Error("Could not save head")
}
s.pruneAttsFromPool(s.ctx, fcuArgs.headState, fcuArgs.headBlock)
if _, err := s.notifyForkchoiceUpdate(cfg.ctx, fcuArgs); err != nil {
log.WithError(err).Error("Could not update forkchoice with payload attributes for proposal")
}
}
// fockchoiceUpdateWithExecution is a wrapper around notifyForkchoiceUpdate. It gets a forkchoice lock and calls the engine.
// The caller of this function should NOT have a lock in forkchoice store.
func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, args *fcuConfig) {
// fockchoiceUpdateWithExecution is a wrapper around notifyForkchoiceUpdate. It decides whether a new call to FCU should be made.
func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, args *fcuConfig) error {
_, span := trace.StartSpan(ctx, "beacon-chain.blockchain.forkchoiceUpdateWithExecution")
defer span.End()
// Note: Use the service context here to avoid the parent context being ended during a forkchoice update.
ctx = trace.NewContext(s.ctx, span)
s.ForkChoicer().Lock()
defer s.ForkChoicer().Unlock()
_, err := s.notifyForkchoiceUpdate(ctx, args)
if err != nil {
log.WithError(err).Error("Could not notify forkchoice update")
return errors.Wrap(err, "could not notify forkchoice update")
}
if err := s.saveHead(ctx, args.headRoot, args.headBlock, args.headState); err != nil {
log.WithError(err).Error("Could not save head")
}
// Only need to prune attestations from pool if the head has changed.
s.pruneAttsFromPool(s.ctx, args.headState, args.headBlock)
return nil
}
// shouldOverrideFCU checks whether the incoming block is still subject to being

View File

@@ -97,7 +97,7 @@ func TestService_forkchoiceUpdateWithExecution_exceptionalCases(t *testing.T) {
headBlock: wsb,
proposingSlot: service.CurrentSlot() + 1,
}
service.forkchoiceUpdateWithExecution(ctx, args)
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, args))
payloadID, has := service.cfg.PayloadIDCache.PayloadID(2, [32]byte{2})
require.Equal(t, true, has)
@@ -151,7 +151,7 @@ func TestService_forkchoiceUpdateWithExecution_SameHeadRootNewProposer(t *testin
headRoot: r,
proposingSlot: service.CurrentSlot() + 1,
}
service.forkchoiceUpdateWithExecution(ctx, args)
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, args))
}
func TestShouldOverrideFCU(t *testing.T) {

View File

@@ -22,7 +22,7 @@ import (
// The caller of this function must have a lock on forkchoice.
func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) state.ReadOnlyBeaconState {
headEpoch := slots.ToEpoch(s.HeadSlot())
if c.Epoch+1 < headEpoch || c.Epoch == 0 {
if c.Epoch < headEpoch || c.Epoch == 0 {
return nil
}
// Only use head state if the head state is compatible with the target checkpoint.
@@ -30,13 +30,11 @@ func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) st
if err != nil {
return nil
}
// headEpoch - 1 equals c.Epoch if c is from the previous epoch and equals c.Epoch - 1 if c is from the current epoch.
// We don't use the smaller c.Epoch - 1 because forkchoice would not have the data to answer that.
headDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(headRoot), headEpoch-1)
headDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(headRoot), c.Epoch-1)
if err != nil {
return nil
}
targetDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(c.Root), headEpoch-1)
targetDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(c.Root), c.Epoch-1)
if err != nil {
return nil
}
@@ -45,7 +43,7 @@ func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) st
}
// If the head state alone is enough, we can return it directly read only.
if c.Epoch <= headEpoch {
if c.Epoch == headEpoch {
st, err := s.HeadStateReadOnly(ctx)
if err != nil {
return nil

View File

@@ -170,13 +170,12 @@ func TestService_GetRecentPreState(t *testing.T) {
err = s.SetFinalizedCheckpoint(cp0)
require.NoError(t, err)
st, blk, err := prepareForkchoiceState(ctx, 31, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
st, root, err := prepareForkchoiceState(ctx, 31, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
service.head = &head{
root: [32]byte(ckRoot),
state: s,
block: blk,
slot: 31,
}
require.NotNil(t, service.getRecentPreState(ctx, &ethpb.Checkpoint{Epoch: 1, Root: ckRoot}))
@@ -198,13 +197,12 @@ func TestService_GetRecentPreState_Old_Checkpoint(t *testing.T) {
err = s.SetFinalizedCheckpoint(cp0)
require.NoError(t, err)
st, blk, err := prepareForkchoiceState(ctx, 33, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
st, root, err := prepareForkchoiceState(ctx, 33, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
service.head = &head{
root: [32]byte(ckRoot),
state: s,
block: blk,
slot: 33,
}
require.IsNil(t, service.getRecentPreState(ctx, &ethpb.Checkpoint{}))
@@ -229,7 +227,6 @@ func TestService_GetRecentPreState_Same_DependentRoots(t *testing.T) {
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
st, blk, err = prepareForkchoiceState(ctx, 64, [32]byte{'T'}, blk.Root(), [32]byte{}, cp0, cp0)
require.NoError(t, err)
headBlock := blk
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
st, blk, err = prepareForkchoiceState(ctx, 33, [32]byte{'U'}, [32]byte(ckRoot), [32]byte{}, cp0, cp0)
require.NoError(t, err)
@@ -238,9 +235,8 @@ func TestService_GetRecentPreState_Same_DependentRoots(t *testing.T) {
service.head = &head{
root: [32]byte{'T'},
block: headBlock,
slot: 64,
state: s,
slot: 64,
}
require.NotNil(t, service.getRecentPreState(ctx, &ethpb.Checkpoint{Epoch: 2, Root: cpRoot[:]}))
}
@@ -267,7 +263,6 @@ func TestService_GetRecentPreState_Different_DependentRoots(t *testing.T) {
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
st, blk, err = prepareForkchoiceState(ctx, 64, [32]byte{'U'}, blk.Root(), [32]byte{}, cp0, cp0)
require.NoError(t, err)
headBlock := blk
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
st, blk, err = prepareForkchoiceState(ctx, 33, [32]byte{'V'}, [32]byte(ckRoot), [32]byte{}, cp0, cp0)
require.NoError(t, err)
@@ -275,8 +270,7 @@ func TestService_GetRecentPreState_Different_DependentRoots(t *testing.T) {
cpRoot := blk.Root()
service.head = &head{
root: [32]byte{'U'},
block: headBlock,
root: [32]byte{'T'},
state: s,
slot: 64,
}
@@ -293,13 +287,12 @@ func TestService_GetRecentPreState_Different(t *testing.T) {
err = s.SetFinalizedCheckpoint(cp0)
require.NoError(t, err)
st, blk, err := prepareForkchoiceState(ctx, 33, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
st, root, err := prepareForkchoiceState(ctx, 33, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
service.head = &head{
root: [32]byte(ckRoot),
state: s,
block: blk,
slot: 33,
}
require.IsNil(t, service.getRecentPreState(ctx, &ethpb.Checkpoint{}))

View File

@@ -66,6 +66,9 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
startTime := time.Now()
fcuArgs := &fcuConfig{}
if s.inRegularSync() {
defer s.handleSecondFCUCall(cfg, fcuArgs)
}
if features.Get().EnableLightClient && slots.ToEpoch(s.CurrentSlot()) >= params.BeaconConfig().AltairForkEpoch {
defer s.processLightClientUpdates(cfg)
}
@@ -102,17 +105,14 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
s.logNonCanonicalBlockReceived(cfg.roblock.Root(), cfg.headRoot)
return nil
}
s.sendFCU(cfg, fcuArgs)
// Pre-Fulu the caches are updated when computing the payload attributes
if cfg.postState.Version() >= version.Fulu {
go func() {
ctx, cancel := context.WithTimeout(s.ctx, slotDeadline)
defer cancel()
cfg.ctx = ctx
s.updateCachesPostBlockProcessing(cfg)
}()
if err := s.getFCUArgs(cfg, fcuArgs); err != nil {
log.WithError(err).Error("Could not get forkchoice update argument")
return nil
}
if err := s.sendFCU(cfg, fcuArgs); err != nil {
return errors.Wrap(err, "could not send FCU to engine")
}
return nil
}
@@ -295,6 +295,14 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
return errors.Wrap(err, "could not set optimistic block to valid")
}
}
arg := &fcuConfig{
headState: preState,
headRoot: lastBR,
headBlock: lastB,
}
if _, err := s.notifyForkchoiceUpdate(ctx, arg); err != nil {
return err
}
return s.saveHeadNoDB(ctx, lastB, lastBR, preState, !isValidPayload)
}
@@ -322,7 +330,6 @@ func (s *Service) areSidecarsAvailable(ctx context.Context, avs das.Availability
return nil
}
// the caller of this function must not hold a lock in forkchoice store.
func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.BeaconState) error {
e := coreTime.CurrentEpoch(st)
if err := helpers.UpdateCommitteeCache(ctx, st, e); err != nil {
@@ -352,9 +359,7 @@ func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.Beacon
if e > 0 {
e = e - 1
}
s.ForkChoicer().RLock()
target, err := s.cfg.ForkChoiceStore.TargetRootForEpoch(r, e)
s.ForkChoicer().RUnlock()
if err != nil {
log.WithError(err).Error("Could not update proposer index state-root map")
return nil
@@ -367,7 +372,7 @@ func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.Beacon
}
// Epoch boundary tasks: it copies the headState and updates the epoch boundary
// caches. The caller of this function must not hold a lock in forkchoice store.
// caches.
func (s *Service) handleEpochBoundary(ctx context.Context, slot primitives.Slot, headState state.BeaconState, blockRoot []byte) error {
ctx, span := trace.StartSpan(ctx, "blockChain.handleEpochBoundary")
defer span.End()
@@ -907,6 +912,8 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
if currentSlot == s.HeadSlot() {
return
}
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
// return early if we are in init sync
if !s.inRegularSync() {
return
@@ -919,32 +926,14 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
if lastState == nil {
lastRoot, lastState = headRoot[:], headState
}
// Before Fulu we need to process the next slot to find out if we are proposing.
if lastState.Version() < version.Fulu {
// Copy all the field tries in our cached state in the event of late
// blocks.
lastState.CopyAllTries()
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
log.WithError(err).Debug("Could not update next slot state cache")
}
if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil {
log.WithError(err).Error("Could not update epoch boundary caches")
}
} else {
// After Fulu, we can update the caches asynchronously after sending FCU to the engine
defer func() {
go func() {
ctx, cancel := context.WithTimeout(s.ctx, slotDeadline)
defer cancel()
lastState.CopyAllTries()
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
log.WithError(err).Debug("Could not update next slot state cache")
}
if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil {
log.WithError(err).Error("Could not update epoch boundary caches")
}
}()
}()
// Copy all the field tries in our cached state in the event of late
// blocks.
lastState.CopyAllTries()
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
log.WithError(err).Debug("Could not update next slot state cache")
}
if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil {
log.WithError(err).Error("Could not update epoch boundary caches")
}
// return early if we already started building a block for the current
// head root
@@ -974,8 +963,6 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
headBlock: headBlock,
attributes: attribute,
}
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
_, err = s.notifyForkchoiceUpdate(ctx, fcuArgs)
if err != nil {
log.WithError(err).Debug("could not perform late block tasks: failed to update forkchoice with engine")

View File

@@ -42,8 +42,14 @@ func (s *Service) getFCUArgs(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) er
if err := s.getFCUArgsEarlyBlock(cfg, fcuArgs); err != nil {
return err
}
fcuArgs.attributes = s.getPayloadAttribute(cfg.ctx, fcuArgs.headState, fcuArgs.proposingSlot, cfg.headRoot[:])
return nil
if !s.inRegularSync() {
return nil
}
slot := cfg.roblock.Block().Slot()
if slots.WithinVotingWindow(s.genesisTime, slot) {
return nil
}
return s.computePayloadAttributes(cfg, fcuArgs)
}
func (s *Service) getFCUArgsEarlyBlock(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error {
@@ -167,19 +173,26 @@ func (s *Service) processLightClientUpdates(cfg *postBlockProcessConfig) {
// updateCachesPostBlockProcessing updates the next slot cache and handles the epoch
// boundary in order to compute the right proposer indices after processing
// state transition. The caller of this function must not hold a lock in forkchoice store.
func (s *Service) updateCachesPostBlockProcessing(cfg *postBlockProcessConfig) {
// state transition. This function is called on late blocks while still locked,
// before sending FCU to the engine.
func (s *Service) updateCachesPostBlockProcessing(cfg *postBlockProcessConfig) error {
slot := cfg.postState.Slot()
root := cfg.roblock.Root()
if err := transition.UpdateNextSlotCache(cfg.ctx, root[:], cfg.postState); err != nil {
log.WithError(err).Error("Could not update next slot state cache")
return
return errors.Wrap(err, "could not update next slot state cache")
}
if !slots.IsEpochEnd(slot) {
return
return nil
}
if err := s.handleEpochBoundary(cfg.ctx, slot, cfg.postState, root[:]); err != nil {
log.WithError(err).Error("Could not handle epoch boundary")
return s.handleEpochBoundary(cfg.ctx, slot, cfg.postState, root[:])
}
// handleSecondFCUCall handles a second call to FCU when syncing a new block.
// This is useful when proposing in the next block and we want to defer the
// computation of the next slot shuffling.
func (s *Service) handleSecondFCUCall(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) {
if (fcuArgs.attributes == nil || fcuArgs.attributes.IsEmpty()) && cfg.headRoot == cfg.roblock.Root() {
go s.sendFCUWithAttributes(cfg, fcuArgs)
}
}
@@ -189,6 +202,20 @@ func reportProcessingTime(startTime time.Time) {
onBlockProcessingTime.Observe(float64(time.Since(startTime).Milliseconds()))
}
// computePayloadAttributes modifies the passed FCU arguments to
// contain the right payload attributes with the tracked proposer. It gets
// called on blocks that arrive after the attestation voting window, or in a
// background routine after syncing early blocks.
func (s *Service) computePayloadAttributes(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error {
if cfg.roblock.Root() == cfg.headRoot {
if err := s.updateCachesPostBlockProcessing(cfg); err != nil {
return err
}
}
fcuArgs.attributes = s.getPayloadAttribute(cfg.ctx, fcuArgs.headState, fcuArgs.proposingSlot, cfg.headRoot[:])
return nil
}
// getBlockPreState returns the pre state of an incoming block. It uses the parent root of the block
// to retrieve the state in DB. It verifies the pre state's validity and the incoming block
// is in the correct time window.

View File

@@ -738,9 +738,7 @@ func TestOnBlock_CanFinalize_WithOnTick(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
service.cfg.ForkChoiceStore.Unlock()
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
require.NoError(t, err)
@@ -790,9 +788,7 @@ func TestOnBlock_CanFinalize(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
service.cfg.ForkChoiceStore.Unlock()
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
require.NoError(t, err)
@@ -820,9 +816,25 @@ func TestOnBlock_NilBlock(t *testing.T) {
service, tr := minimalTestService(t)
signed := &consensusblocks.SignedBeaconBlock{}
roblock := consensusblocks.ROBlock{ReadOnlySignedBeaconBlock: signed}
service.cfg.ForkChoiceStore.Lock()
err := service.postBlockProcess(&postBlockProcessConfig{tr.ctx, roblock, [32]byte{}, nil, true})
service.cfg.ForkChoiceStore.Unlock()
require.Equal(t, true, IsInvalidBlock(err))
}
func TestOnBlock_InvalidSignature(t *testing.T) {
service, tr := minimalTestService(t)
ctx := tr.ctx
gs, keys := util.DeterministicGenesisState(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
blk, err := util.GenerateFullBlock(gs, keys, util.DefaultBlockGenConfig(), 1)
require.NoError(t, err)
blk.Signature = []byte{'a'} // Mutate the signature.
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
_, err = service.validateStateTransition(ctx, preState, wsb)
require.Equal(t, true, IsInvalidBlock(err))
}
@@ -854,9 +866,7 @@ func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
testState, err = service.cfg.StateGen.StateByRoot(ctx, r)
require.NoError(t, err)
}
@@ -1329,9 +1339,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
lock.Lock()
roblock, err := consensusblocks.NewROBlockWithRoot(wsb1, r1)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
service.cfg.ForkChoiceStore.Unlock()
lock.Unlock()
wg.Done()
}()
@@ -1343,9 +1351,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
lock.Lock()
roblock, err := consensusblocks.NewROBlockWithRoot(wsb2, r2)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
service.cfg.ForkChoiceStore.Unlock()
lock.Unlock()
wg.Done()
}()
@@ -1357,9 +1363,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
lock.Lock()
roblock, err := consensusblocks.NewROBlockWithRoot(wsb3, r3)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
service.cfg.ForkChoiceStore.Unlock()
lock.Unlock()
wg.Done()
}()
@@ -1371,9 +1375,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
lock.Lock()
roblock, err := consensusblocks.NewROBlockWithRoot(wsb4, r4)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
service.cfg.ForkChoiceStore.Unlock()
lock.Unlock()
wg.Done()
}()
@@ -1398,6 +1400,197 @@ func Test_verifyBlkFinalizedSlot_invalidBlock(t *testing.T) {
require.Equal(t, true, IsInvalidBlock(err))
}
// See the description in #10777 and #10782 for the full setup
// We sync optimistically a chain of blocks. Block 17 is the last block in Epoch
// 2. Block 18 justifies block 12 (the first in Epoch 2) and Block 19 returns
// INVALID from FCU, with LVH block 17. No head is viable. We check
// that the node is optimistic and that we can actually import a block on top of
// 17 and recover.
func TestStore_NoViableHead_FCU(t *testing.T) {
params.SetupTestConfigCleanup(t)
config := params.BeaconConfig()
config.SlotsPerEpoch = 6
config.AltairForkEpoch = 1
config.BellatrixForkEpoch = 2
params.OverrideBeaconConfig(config)
mockEngine := &mockExecution.EngineClient{ErrNewPayload: execution.ErrAcceptedSyncingPayloadStatus, ErrForkchoiceUpdated: execution.ErrAcceptedSyncingPayloadStatus}
service, tr := minimalTestService(t, WithExecutionEngineCaller(mockEngine))
ctx := tr.ctx
st, keys := util.DeterministicGenesisState(t, 64)
stateRoot, err := st.HashTreeRoot(ctx)
require.NoError(t, err, "Could not hash genesis state")
require.NoError(t, service.saveGenesisData(ctx, st))
genesis := blocks.NewGenesisBlock(stateRoot[:])
wsb, err := consensusblocks.NewSignedBeaconBlock(genesis)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb), "Could not save genesis block")
parentRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err, "Could not get signing root")
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, parentRoot), "Could not save genesis state")
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
for i := 1; i < 6; i++ {
driftGenesisTime(service, primitives.Slot(i), 0)
st, err := service.HeadState(ctx)
require.NoError(t, err)
b, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
require.NoError(t, err)
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
}
for i := 6; i < 12; i++ {
driftGenesisTime(service, primitives.Slot(i), 0)
st, err := service.HeadState(ctx)
require.NoError(t, err)
b, err := util.GenerateFullBlockAltair(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
require.NoError(t, err)
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
require.NoError(t, err)
}
for i := 12; i < 18; i++ {
driftGenesisTime(service, primitives.Slot(i), 0)
st, err := service.HeadState(ctx)
require.NoError(t, err)
b, err := util.GenerateFullBlockBellatrix(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
require.NoError(t, err)
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
require.NoError(t, err)
}
// Check that we haven't justified the second epoch yet
jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint()
require.Equal(t, primitives.Epoch(0), jc.Epoch)
// import a block that justifies the second epoch
driftGenesisTime(service, 18, 0)
validHeadState, err := service.HeadState(ctx)
require.NoError(t, err)
b, err := util.GenerateFullBlockBellatrix(validHeadState, keys, util.DefaultBlockGenConfig(), 18)
require.NoError(t, err)
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
firstInvalidRoot, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, firstInvalidRoot, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, firstInvalidRoot)
require.NoError(t, err)
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
require.NoError(t, err)
jc = service.cfg.ForkChoiceStore.JustifiedCheckpoint()
require.Equal(t, primitives.Epoch(2), jc.Epoch)
sjc := validHeadState.CurrentJustifiedCheckpoint()
require.Equal(t, primitives.Epoch(0), sjc.Epoch)
lvh := b.Block.Body.ExecutionPayload.ParentHash
// check our head
require.Equal(t, firstInvalidRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
// import another block to find out that it was invalid
mockEngine = &mockExecution.EngineClient{ErrNewPayload: execution.ErrAcceptedSyncingPayloadStatus, ErrForkchoiceUpdated: execution.ErrInvalidPayloadStatus, ForkChoiceUpdatedResp: lvh}
service.cfg.ExecutionEngineCaller = mockEngine
driftGenesisTime(service, 19, 0)
st, err = service.HeadState(ctx)
require.NoError(t, err)
b, err = util.GenerateFullBlockBellatrix(st, keys, util.DefaultBlockGenConfig(), 19)
require.NoError(t, err)
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err = service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err = service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
// Check that forkchoice's head is the last invalid block imported. The
// store's headroot is the previous head (since the invalid block did
// not finish importing) one and that the node is optimistic
require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot())
headRoot, err := service.HeadRoot(ctx)
require.NoError(t, err)
require.Equal(t, firstInvalidRoot, bytesutil.ToBytes32(headRoot))
optimistic, err := service.IsOptimistic(ctx)
require.NoError(t, err)
require.Equal(t, true, optimistic)
// import another block based on the last valid head state
mockEngine = &mockExecution.EngineClient{}
service.cfg.ExecutionEngineCaller = mockEngine
driftGenesisTime(service, 20, 0)
b, err = util.GenerateFullBlockBellatrix(validHeadState, keys, &util.BlockGenConfig{}, 20)
require.NoError(t, err)
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
root, err = b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err = service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err = service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
require.NoError(t, err)
// Check the newly imported block is head, it justified the right
// checkpoint and the node is no longer optimistic
require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot())
sjc = service.CurrentJustifiedCheckpt()
require.Equal(t, jc.Epoch, sjc.Epoch)
require.Equal(t, jc.Root, bytesutil.ToBytes32(sjc.Root))
optimistic, err = service.IsOptimistic(ctx)
require.NoError(t, err)
require.Equal(t, false, optimistic)
}
// See the description in #10777 and #10782 for the full setup
// We sync optimistically a chain of blocks. Block 17 is the last block in Epoch
// 2. Block 18 justifies block 12 (the first in Epoch 2) and Block 19 returns
@@ -1449,9 +1642,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
}
for i := 6; i < 12; i++ {
@@ -1471,9 +1662,8 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
require.NoError(t, err)
}
for i := 12; i < 18; i++ {
@@ -1494,9 +1684,8 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
require.NoError(t, err)
}
// Check that we haven't justified the second epoch yet
jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint()
@@ -1519,9 +1708,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, firstInvalidRoot, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, firstInvalidRoot)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
service.cfg.ForkChoiceStore.Unlock()
require.NoError(t, err)
jc = service.cfg.ForkChoiceStore.JustifiedCheckpoint()
require.Equal(t, primitives.Epoch(2), jc.Epoch)
@@ -1531,10 +1718,6 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
lvh := b.Block.Body.ExecutionPayload.ParentHash
// check our head
require.Equal(t, firstInvalidRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
isBlock18OptimisticAfterImport, err := service.IsOptimisticForRoot(ctx, firstInvalidRoot)
require.NoError(t, err)
require.Equal(t, true, isBlock18OptimisticAfterImport)
time.Sleep(20 * time.Millisecond) // wait for async forkchoice update to be processed
// import another block to find out that it was invalid
mockEngine = &mockExecution.EngineClient{ErrNewPayload: execution.ErrInvalidPayloadStatus, NewPayloadResp: lvh}
@@ -1585,9 +1768,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
service.cfg.ForkChoiceStore.Unlock()
require.NoError(t, err)
// Check the newly imported block is head, it justified the right
// checkpoint and the node is no longer optimistic
@@ -1654,9 +1835,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
}
for i := 6; i < 12; i++ {
@@ -1677,9 +1856,8 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
require.NoError(t, err)
}
// import the merge block
@@ -1699,9 +1877,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, lastValidRoot, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, lastValidRoot)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
service.cfg.ForkChoiceStore.Unlock()
require.NoError(t, err)
// save the post state and the payload Hash of this block since it will
// be the LVH
@@ -1730,9 +1906,8 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, invalidRoots[i-13], wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, invalidRoots[i-13])
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
require.NoError(t, err)
}
// Check that we have justified the second epoch
jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint()
@@ -1800,9 +1975,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
service.cfg.ForkChoiceStore.Unlock()
// Check that the head is still INVALID and the node is still optimistic
require.Equal(t, invalidHeadRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
optimistic, err = service.IsOptimistic(ctx)
@@ -1827,9 +2000,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
service.cfg.ForkChoiceStore.Unlock()
require.NoError(t, err)
st, err = service.cfg.StateGen.StateByRoot(ctx, root)
require.NoError(t, err)
@@ -1857,9 +2028,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
service.cfg.ForkChoiceStore.Unlock()
require.NoError(t, err)
require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot())
sjc = service.CurrentJustifiedCheckpt()
@@ -1903,6 +2072,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
require.NoError(t, service.cfg.BeaconDB.SaveGenesisBlockRoot(ctx, genesisRoot), "Could not save genesis state")
for i := 1; i < 6; i++ {
t.Log(i)
driftGenesisTime(service, primitives.Slot(i), 0)
st, err := service.HeadState(ctx)
require.NoError(t, err)
@@ -1919,9 +2089,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
}
for i := 6; i < 12; i++ {
@@ -1941,9 +2109,8 @@ func TestNoViableHead_Reboot(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
require.NoError(t, err)
}
// import the merge block
@@ -1963,9 +2130,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, lastValidRoot, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, lastValidRoot)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
service.cfg.ForkChoiceStore.Unlock()
require.NoError(t, err)
// save the post state and the payload Hash of this block since it will
// be the LVH
@@ -1996,9 +2161,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
require.NoError(t, err)
@@ -2119,9 +2282,7 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
st, err = service.HeadState(ctx)
require.NoError(t, err)
@@ -2187,9 +2348,7 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
st, err = service.HeadState(ctx)
require.NoError(t, err)
@@ -2472,10 +2631,7 @@ func TestRollbackBlock(t *testing.T) {
require.NoError(t, err)
// Rollback block insertion into db and caches.
service.cfg.ForkChoiceStore.Lock()
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
service.cfg.ForkChoiceStore.Unlock()
require.ErrorContains(t, fmt.Sprintf("could not insert block %d to fork choice store", roblock.Block().Slot()), err)
require.ErrorContains(t, fmt.Sprintf("could not insert block %d to fork choice store", roblock.Block().Slot()), service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
// The block should no longer exist.
require.Equal(t, false, service.cfg.BeaconDB.HasBlock(ctx, root))
@@ -2576,9 +2732,7 @@ func TestRollbackBlock_ContextDeadline(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
b, err = util.GenerateFullBlock(postState, keys, util.DefaultBlockGenConfig(), 34)
require.NoError(t, err)
@@ -2612,10 +2766,7 @@ func TestRollbackBlock_ContextDeadline(t *testing.T) {
require.NoError(t, postState.SetFinalizedCheckpoint(cj))
// Rollback block insertion into db and caches.
service.cfg.ForkChoiceStore.Lock()
err = service.postBlockProcess(&postBlockProcessConfig{cancCtx, roblock, [32]byte{}, postState, false})
service.cfg.ForkChoiceStore.Unlock()
require.ErrorContains(t, "context canceled", err)
require.ErrorContains(t, "context canceled", service.postBlockProcess(&postBlockProcessConfig{cancCtx, roblock, [32]byte{}, postState, false}))
// The block should no longer exist.
require.Equal(t, false, service.cfg.BeaconDB.HasBlock(ctx, root))
@@ -3111,9 +3262,7 @@ func Test_postBlockProcess_EventSending(t *testing.T) {
}
// Execute postBlockProcess
service.cfg.ForkChoiceStore.Lock()
err = service.postBlockProcess(cfg)
service.cfg.ForkChoiceStore.Unlock()
// Check error expectation
if tt.expectError {

View File

@@ -156,15 +156,13 @@ func (s *Service) UpdateHead(ctx context.Context, proposingSlot primitives.Slot)
}
if s.inRegularSync() {
fcuArgs.attributes = s.getPayloadAttribute(ctx, headState, proposingSlot, newHeadRoot[:])
if fcuArgs.attributes != nil && s.shouldOverrideFCU(newHeadRoot, proposingSlot) {
return
}
go s.forkchoiceUpdateWithExecution(s.ctx, fcuArgs)
}
if err := s.saveHead(s.ctx, fcuArgs.headRoot, fcuArgs.headBlock, fcuArgs.headState); err != nil {
log.WithError(err).Error("Could not save head")
if fcuArgs.attributes != nil && s.shouldOverrideFCU(newHeadRoot, proposingSlot) {
return
}
if err := s.forkchoiceUpdateWithExecution(s.ctx, fcuArgs); err != nil {
log.WithError(err).Error("Could not update forkchoice")
}
s.pruneAttsFromPool(s.ctx, fcuArgs.headState, fcuArgs.headBlock)
}
// This processes fork choice attestations from the pool to account for validator votes and fork choice.

View File

@@ -117,9 +117,7 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, tRoot, wsb, postState))
roblock, err := blocks.NewROBlockWithRoot(wsb, tRoot)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
copied, err = service.cfg.StateGen.StateByRoot(ctx, tRoot)
require.NoError(t, err)
require.Equal(t, 2, fcs.NodeCount())
@@ -179,9 +177,7 @@ func TestService_UpdateHead_NoAtts(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, tRoot, wsb, postState))
roblock, err := blocks.NewROBlockWithRoot(wsb, tRoot)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
require.Equal(t, 2, fcs.NodeCount())
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
require.Equal(t, tRoot, service.head.root)

View File

@@ -290,3 +290,52 @@ func TestProcessBlockHeader_OK(t *testing.T) {
}
assert.Equal(t, true, proto.Equal(nsh, expected), "Expected %v, received %v", expected, nsh)
}
func TestBlockSignatureSet_OK(t *testing.T) {
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
for i := range validators {
validators[i] = &ethpb.Validator{
PublicKey: make([]byte, 32),
WithdrawalCredentials: make([]byte, 32),
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
Slashed: true,
}
}
state, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, state.SetValidators(validators))
require.NoError(t, state.SetSlot(10))
require.NoError(t, state.SetLatestBlockHeader(util.HydrateBeaconHeader(&ethpb.BeaconBlockHeader{
Slot: 9,
ProposerIndex: 0,
})))
latestBlockSignedRoot, err := state.LatestBlockHeader().HashTreeRoot()
require.NoError(t, err)
currentEpoch := time.CurrentEpoch(state)
priv, err := bls.RandKey()
require.NoError(t, err)
pID, err := helpers.BeaconProposerIndex(t.Context(), state)
require.NoError(t, err)
block := util.NewBeaconBlock()
block.Block.Slot = 10
block.Block.ProposerIndex = pID
block.Block.Body.RandaoReveal = bytesutil.PadTo([]byte{'A', 'B', 'C'}, 96)
block.Block.ParentRoot = latestBlockSignedRoot[:]
block.Signature, err = signing.ComputeDomainAndSign(state, currentEpoch, block.Block, params.BeaconConfig().DomainBeaconProposer, priv)
require.NoError(t, err)
proposerIdx, err := helpers.BeaconProposerIndex(t.Context(), state)
require.NoError(t, err)
validators[proposerIdx].Slashed = false
validators[proposerIdx].PublicKey = priv.PublicKey().Marshal()
err = state.UpdateValidatorAtIndex(proposerIdx, validators[proposerIdx])
require.NoError(t, err)
set, err := blocks.BlockSignatureBatch(state, block.Block.ProposerIndex, block.Signature, block.Block.HashTreeRoot)
require.NoError(t, err)
verified, err := set.Verify()
require.NoError(t, err)
assert.Equal(t, true, verified, "Block signature set returned a set which was unable to be verified")
}

View File

@@ -122,6 +122,24 @@ func VerifyBlockSignatureUsingCurrentFork(beaconState state.ReadOnlyBeaconState,
return nil
}
// BlockSignatureBatch retrieves the block signature batch from the provided block and its corresponding state.
func BlockSignatureBatch(beaconState state.ReadOnlyBeaconState,
proposerIndex primitives.ValidatorIndex,
sig []byte,
rootFunc func() ([32]byte, error)) (*bls.SignatureBatch, error) {
currentEpoch := slots.ToEpoch(beaconState.Slot())
domain, err := signing.Domain(beaconState.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorsRoot())
if err != nil {
return nil, err
}
proposer, err := beaconState.ValidatorAtIndex(proposerIndex)
if err != nil {
return nil, err
}
proposerPubKey := proposer.PublicKey
return signing.BlockSignatureBatch(proposerPubKey, sig, domain, rootFunc)
}
// RandaoSignatureBatch retrieves the relevant randao specific signature batch object
// from a block and its corresponding state.
func RandaoSignatureBatch(

View File

@@ -278,12 +278,12 @@ func ProcessConsolidationRequests(ctx context.Context, st state.BeaconState, req
if uint64(curEpoch) < e {
continue
}
hasBal, err := st.HasPendingBalanceToWithdraw(srcIdx)
bal, err := st.PendingBalanceToWithdraw(srcIdx)
if err != nil {
log.WithError(err).Error("Failed to fetch pending balance to withdraw")
continue
}
if hasBal {
if bal > 0 {
continue
}

View File

@@ -182,6 +182,12 @@ func ProcessBlockNoVerifyAnySig(
return nil, nil, err
}
sig := signed.Signature()
bSet, err := b.BlockSignatureBatch(st, blk.ProposerIndex(), sig[:], blk.HashTreeRoot)
if err != nil {
tracing.AnnotateError(span, err)
return nil, nil, errors.Wrap(err, "could not retrieve block signature set")
}
randaoReveal := signed.Block().Body().RandaoReveal()
rSet, err := b.RandaoSignatureBatch(ctx, st, randaoReveal[:])
if err != nil {
@@ -195,7 +201,7 @@ func ProcessBlockNoVerifyAnySig(
// Merge beacon block, randao and attestations signatures into a set.
set := bls.NewSet()
set.Join(rSet).Join(aSet)
set.Join(bSet).Join(rSet).Join(aSet)
if blk.Version() >= version.Capella {
changes, err := signed.Block().Body().BLSToExecutionChanges()

View File

@@ -157,8 +157,9 @@ func TestProcessBlockNoVerify_SigSetContainsDescriptions(t *testing.T) {
set, _, err := transition.ProcessBlockNoVerifyAnySig(t.Context(), beaconState, wsb)
require.NoError(t, err)
assert.Equal(t, len(set.Signatures), len(set.Descriptions), "Signatures and descriptions do not match up")
assert.Equal(t, "randao signature", set.Descriptions[0])
assert.Equal(t, "attestation signature", set.Descriptions[1])
assert.Equal(t, "block signature", set.Descriptions[0])
assert.Equal(t, "randao signature", set.Descriptions[1])
assert.Equal(t, "attestation signature", set.Descriptions[2])
}
func TestProcessOperationsNoVerifyAttsSigs_OK(t *testing.T) {

View File

@@ -67,9 +67,9 @@ func NewSyncNeeds(current CurrentSlotter, oldestSlotFlagPtr *primitives.Slot, bl
// Override spec minimum block retention with user-provided flag only if it is lower than the spec minimum.
sn.blockRetention = primitives.Epoch(params.BeaconConfig().MinEpochsForBlockRequests)
if oldestSlotFlagPtr != nil {
if *oldestSlotFlagPtr <= syncEpochOffset(current(), sn.blockRetention) {
oldestEpoch := slots.ToEpoch(*oldestSlotFlagPtr)
if oldestEpoch < sn.blockRetention {
sn.validOldestSlotPtr = oldestSlotFlagPtr
} else {
log.WithField("backfill-oldest-slot", *oldestSlotFlagPtr).

View File

@@ -128,9 +128,6 @@ func TestSyncNeedsInitialize(t *testing.T) {
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
minBlobEpochs := params.BeaconConfig().MinEpochsForBlobsSidecarsRequest
minColEpochs := params.BeaconConfig().MinEpochsForDataColumnSidecarsRequest
denebSlot := slots.UnsafeEpochStart(params.BeaconConfig().DenebForkEpoch)
fuluSlot := slots.UnsafeEpochStart(params.BeaconConfig().FuluForkEpoch)
minSlots := slots.UnsafeEpochStart(primitives.Epoch(params.BeaconConfig().MinEpochsForBlockRequests))
currentSlot := primitives.Slot(10000)
currentFunc := func() primitives.Slot { return currentSlot }
@@ -144,7 +141,6 @@ func TestSyncNeedsInitialize(t *testing.T) {
expectedCol primitives.Epoch
name string
input SyncNeeds
current func() primitives.Slot
}{
{
name: "basic initialization with no flags",
@@ -178,13 +174,13 @@ func TestSyncNeedsInitialize(t *testing.T) {
{
name: "valid oldestSlotFlagPtr (earlier than spec minimum)",
blobRetentionFlag: 0,
oldestSlotFlagPtr: &denebSlot,
oldestSlotFlagPtr: func() *primitives.Slot {
slot := primitives.Slot(10)
return &slot
}(),
expectValidOldest: true,
expectedBlob: minBlobEpochs,
expectedCol: minColEpochs,
current: func() primitives.Slot {
return fuluSlot + minSlots
},
},
{
name: "invalid oldestSlotFlagPtr (later than spec minimum)",
@@ -214,9 +210,6 @@ func TestSyncNeedsInitialize(t *testing.T) {
{
name: "both blob retention flag and oldest slot set",
blobRetentionFlag: minBlobEpochs + 5,
current: func() primitives.Slot {
return fuluSlot + minSlots
},
oldestSlotFlagPtr: func() *primitives.Slot {
slot := primitives.Slot(100)
return &slot
@@ -239,27 +232,16 @@ func TestSyncNeedsInitialize(t *testing.T) {
expectedBlob: 5000,
expectedCol: 5000,
},
{
name: "regression for deneb start",
blobRetentionFlag: 8212500,
expectValidOldest: true,
oldestSlotFlagPtr: &denebSlot,
current: func() primitives.Slot {
return fuluSlot + minSlots
},
expectedBlob: 8212500,
expectedCol: 8212500,
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
if tc.current == nil {
tc.current = currentFunc
}
result, err := NewSyncNeeds(tc.current, tc.oldestSlotFlagPtr, tc.blobRetentionFlag)
result, err := NewSyncNeeds(currentFunc, tc.oldestSlotFlagPtr, tc.blobRetentionFlag)
require.NoError(t, err)
// Check that current, deneb, fulu are set correctly
require.Equal(t, currentSlot, result.current())
// Check retention calculations
require.Equal(t, tc.expectedBlob, result.blobRetention)
require.Equal(t, tc.expectedCol, result.colRetention)

View File

@@ -38,7 +38,6 @@ go_library(
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_spf13_afero//:go_default_library",
"@org_golang_x_sync//errgroup:go_default_library",
],
)

View File

@@ -25,7 +25,6 @@ import (
"github.com/OffchainLabs/prysm/v7/time/slots"
"github.com/pkg/errors"
"github.com/spf13/afero"
"golang.org/x/sync/errgroup"
)
const (
@@ -186,162 +185,73 @@ func (dcs *DataColumnStorage) WarmCache() {
highestStoredEpoch := primitives.Epoch(0)
// List all period directories
periodFileInfos, err := afero.ReadDir(dcs.fs, ".")
if err != nil {
log.WithError(err).Error("Error reading top directory during warm cache")
return
}
// Iterate through periods
for _, periodFileInfo := range periodFileInfos {
if !periodFileInfo.IsDir() {
continue
// Walk the data column filesystem to warm up the cache.
if err := afero.Walk(dcs.fs, ".", func(path string, info os.FileInfo, fileErr error) (err error) {
if fileErr != nil {
return fileErr
}
periodPath := periodFileInfo.Name()
// If not a leaf, skip.
if info.IsDir() {
return nil
}
// List all epoch directories in this period
epochFileInfos, err := afero.ReadDir(dcs.fs, periodPath)
// Extract metadata from the file path.
fileMetadata, err := extractFileMetadata(path)
if err != nil {
log.WithError(err).WithField("period", periodPath).Error("Error reading period directory during warm cache")
continue
log.WithError(err).Error("Error encountered while extracting file metadata")
return nil
}
// Iterate through epochs
for _, epochFileInfo := range epochFileInfos {
if !epochFileInfo.IsDir() {
continue
}
epochPath := path.Join(periodPath, epochFileInfo.Name())
// List all .sszs files in this epoch
files, err := listEpochFiles(dcs.fs, epochPath)
if err != nil {
log.WithError(err).WithField("epoch", epochPath).Error("Error listing epoch files during warm cache")
continue
}
if len(files) == 0 {
continue
}
// Process all files in this epoch in parallel
epochHighest, err := dcs.processEpochFiles(files)
if err != nil {
log.WithError(err).WithField("epoch", epochPath).Error("Error processing epoch files during warm cache")
}
highestStoredEpoch = max(highestStoredEpoch, epochHighest)
// Open the data column filesystem file.
f, err := dcs.fs.Open(path)
if err != nil {
log.WithError(err).Error("Error encountered while opening data column filesystem file")
return nil
}
// Close the file.
defer func() {
// Overwrite the existing error only if it is nil, since the close error is less important.
closeErr := f.Close()
if closeErr != nil && err == nil {
err = closeErr
}
}()
// Read the metadata of the file.
metadata, err := dcs.metadata(f)
if err != nil {
log.WithError(err).Error("Error encountered while reading metadata from data column filesystem file")
return nil
}
// Check the indices.
indices := metadata.indices.all()
if len(indices) == 0 {
return nil
}
// Build the ident.
dataColumnsIdent := DataColumnsIdent{Root: fileMetadata.blockRoot, Epoch: fileMetadata.epoch, Indices: indices}
// Update the highest stored epoch.
highestStoredEpoch = max(highestStoredEpoch, fileMetadata.epoch)
// Set the ident in the cache.
if err := dcs.cache.set(dataColumnsIdent); err != nil {
log.WithError(err).Error("Error encountered while ensuring data column filesystem cache")
}
return nil
}); err != nil {
log.WithError(err).Error("Error encountered while walking data column filesystem.")
}
// Prune the cache and the filesystem
// Prune the cache and the filesystem.
dcs.prune()
totalElapsed := time.Since(start)
// Log summary
log.WithField("elapsed", totalElapsed).Info("Data column filesystem cache warm-up complete")
}
// listEpochFiles lists all .sszs files in an epoch directory.
func listEpochFiles(fs afero.Fs, epochPath string) ([]string, error) {
fileInfos, err := afero.ReadDir(fs, epochPath)
if err != nil {
return nil, errors.Wrap(err, "read epoch directory")
}
files := make([]string, 0, len(fileInfos))
for _, fileInfo := range fileInfos {
if fileInfo.IsDir() {
continue
}
fileName := fileInfo.Name()
if strings.HasSuffix(fileName, "."+dataColumnsFileExtension) {
files = append(files, path.Join(epochPath, fileName))
}
}
return files, nil
}
// processEpochFiles processes all .sszs files in an epoch directory in parallel.
func (dcs *DataColumnStorage) processEpochFiles(files []string) (primitives.Epoch, error) {
var (
eg errgroup.Group
mu sync.Mutex
)
highestEpoch := primitives.Epoch(0)
for _, filePath := range files {
eg.Go(func() error {
epoch, err := dcs.processFile(filePath)
if err != nil {
log.WithError(err).WithField("file", filePath).Error("Error processing file during warm cache")
return nil
}
mu.Lock()
defer mu.Unlock()
highestEpoch = max(highestEpoch, epoch)
return nil
})
}
if err := eg.Wait(); err != nil {
return highestEpoch, err
}
return highestEpoch, nil
}
// processFile processes a single .sszs file.
func (dcs *DataColumnStorage) processFile(filePath string) (primitives.Epoch, error) {
// Extract metadata from the file path
fileMetadata, err := extractFileMetadata(filePath)
if err != nil {
return 0, errors.Wrap(err, "extract file metadata")
}
// Open the file (each goroutine gets its own FD)
f, err := dcs.fs.Open(filePath)
if err != nil {
return 0, errors.Wrap(err, "open file")
}
defer func() {
if closeErr := f.Close(); closeErr != nil {
log.WithError(closeErr).WithField("file", filePath).Error("Error closing file during warm cache")
}
}()
// Read metadata
metadata, err := dcs.metadata(f)
if err != nil {
return 0, errors.Wrap(err, "read metadata")
}
// Extract indices
indices := metadata.indices.all()
if len(indices) == 0 {
return fileMetadata.epoch, nil // No indices, skip
}
// Build ident and set in cache (thread-safe)
dataColumnsIdent := DataColumnsIdent{
Root: fileMetadata.blockRoot,
Epoch: fileMetadata.epoch,
Indices: indices,
}
if err := dcs.cache.set(dataColumnsIdent); err != nil {
return 0, errors.Wrap(err, "cache set")
}
return fileMetadata.epoch, nil
log.WithField("elapsed", time.Since(start)).Info("Data column filesystem cache warm-up complete")
}
// Summary returns the DataColumnStorageSummary.

View File

@@ -88,9 +88,6 @@ func (s *Store) pullTips(state state.BeaconState, node *Node, jc, fc *ethpb.Chec
}
}
if uf.Epoch > s.unrealizedFinalizedCheckpoint.Epoch {
s.unrealizedJustifiedCheckpoint = &forkchoicetypes.Checkpoint{
Epoch: uj.Epoch, Root: bytesutil.ToBytes32(uj.Root),
}
s.unrealizedFinalizedCheckpoint = &forkchoicetypes.Checkpoint{
Epoch: uf.Epoch, Root: bytesutil.ToBytes32(uf.Root),
}

View File

@@ -14,7 +14,6 @@ import (
"github.com/OffchainLabs/prysm/v7/api"
"github.com/OffchainLabs/prysm/v7/api/server/structs"
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
coreblocks "github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
corehelpers "github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/filters"
@@ -958,13 +957,6 @@ func (s *Server) validateConsensus(ctx context.Context, b *eth.GenericSignedBeac
}
}
}
blockRoot, err := blk.Block().HashTreeRoot()
if err != nil {
return errors.Wrap(err, "could not hash block")
}
if err := coreblocks.VerifyBlockSignatureUsingCurrentFork(parentState, blk, blockRoot); err != nil {
return errors.Wrap(err, "could not verify block signature")
}
_, err = transition.ExecuteStateTransition(ctx, parentState, blk)
if err != nil {
return errors.Wrap(err, "could not execute state transition")

View File

@@ -40,7 +40,6 @@ func GetForkSchedule(w http.ResponseWriter, r *http.Request) {
httputil.WriteJson(w, &structs.GetForkScheduleResponse{
Data: data,
})
return
}
previous := schedule[0]
for _, entry := range schedule {

View File

@@ -3,12 +3,11 @@ package sync
import (
"context"
"fmt"
"slices"
"math"
"time"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/operation"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
"github.com/OffchainLabs/prysm/v7/beacon-chain/verification"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
@@ -193,13 +192,19 @@ func (s *Service) validateDataColumn(ctx context.Context, pid peer.ID, msg *pubs
dataColumnSidecarArrivalGossipSummary.Observe(float64(sinceSlotStartTime.Milliseconds()))
dataColumnSidecarVerificationGossipHistogram.Observe(float64(validationTime.Milliseconds()))
peerGossipScore := s.cfg.p2p.Peers().Scorers().GossipScorer().Score(pid)
select {
case s.dataColumnLogCh <- dataColumnLogEntry{
slot: roDataColumn.Slot(),
index: roDataColumn.Index,
root: roDataColumn.BlockRoot(),
validationTime: validationTime,
sinceStartTime: sinceSlotStartTime,
Slot: roDataColumn.Slot(),
ColIdx: roDataColumn.Index,
PropIdx: roDataColumn.ProposerIndex(),
BlockRoot: roDataColumn.BlockRoot(),
ParentRoot: roDataColumn.ParentRoot(),
PeerSuffix: pid.String()[len(pid.String())-6:],
PeerGossipScore: peerGossipScore,
validationTime: validationTime,
sinceStartTime: sinceSlotStartTime,
}:
default:
log.WithField("slot", roDataColumn.Slot()).Warn("Failed to send data column log entry")
@@ -244,69 +249,68 @@ func computeCacheKey(slot primitives.Slot, proposerIndex primitives.ValidatorInd
}
type dataColumnLogEntry struct {
slot primitives.Slot
index uint64
root [32]byte
validationTime time.Duration
sinceStartTime time.Duration
Slot primitives.Slot
ColIdx uint64
PropIdx primitives.ValidatorIndex
BlockRoot [32]byte
ParentRoot [32]byte
PeerSuffix string
PeerGossipScore float64
validationTime time.Duration
sinceStartTime time.Duration
}
func (s *Service) processDataColumnLogs() {
ticker := time.NewTicker(1 * time.Second)
slotStats := make(map[[fieldparams.RootLength]byte][]dataColumnLogEntry)
defer ticker.Stop()
slotStats := make(map[primitives.Slot][fieldparams.NumberOfColumns]dataColumnLogEntry)
for {
select {
case col := <-s.dataColumnLogCh:
cols := slotStats[col.root]
cols = append(cols, col)
slotStats[col.root] = cols
case entry := <-s.dataColumnLogCh:
cols := slotStats[entry.Slot]
cols[entry.ColIdx] = entry
slotStats[entry.Slot] = cols
case <-ticker.C:
for root, columns := range slotStats {
indices := make([]uint64, 0, fieldparams.NumberOfColumns)
minValidationTime, maxValidationTime, sumValidationTime := time.Duration(0), time.Duration(0), time.Duration(0)
minSinceStartTime, maxSinceStartTime, sumSinceStartTime := time.Duration(0), time.Duration(0), time.Duration(0)
for slot, columns := range slotStats {
var (
colIndices = make([]uint64, 0, fieldparams.NumberOfColumns)
peers = make([]string, 0, fieldparams.NumberOfColumns)
gossipScores = make([]float64, 0, fieldparams.NumberOfColumns)
validationTimes = make([]string, 0, fieldparams.NumberOfColumns)
sinceStartTimes = make([]string, 0, fieldparams.NumberOfColumns)
)
totalReceived := 0
for _, column := range columns {
indices = append(indices, column.index)
sumValidationTime += column.validationTime
sumSinceStartTime += column.sinceStartTime
if totalReceived == 0 {
minValidationTime, maxValidationTime = column.validationTime, column.validationTime
minSinceStartTime, maxSinceStartTime = column.sinceStartTime, column.sinceStartTime
totalReceived++
for _, entry := range columns {
if entry.PeerSuffix == "" {
continue
}
minValidationTime, maxValidationTime = min(minValidationTime, column.validationTime), max(maxValidationTime, column.validationTime)
minSinceStartTime, maxSinceStartTime = min(minSinceStartTime, column.sinceStartTime), max(maxSinceStartTime, column.sinceStartTime)
colIndices = append(colIndices, entry.ColIdx)
peers = append(peers, entry.PeerSuffix)
gossipScores = append(gossipScores, roundFloat(entry.PeerGossipScore, 2))
validationTimes = append(validationTimes, fmt.Sprintf("%.2fms", float64(entry.validationTime.Milliseconds())))
sinceStartTimes = append(sinceStartTimes, fmt.Sprintf("%.2fms", float64(entry.sinceStartTime.Milliseconds())))
totalReceived++
}
if totalReceived > 0 {
slices.Sort(indices)
avgValidationTime := sumValidationTime / time.Duration(totalReceived)
avgSinceStartTime := sumSinceStartTime / time.Duration(totalReceived)
log.WithFields(logrus.Fields{
"slot": columns[0].slot,
"root": fmt.Sprintf("%#x", root),
"count": totalReceived,
"indices": helpers.PrettySlice(indices),
"validationTime": prettyMinMaxAverage(minValidationTime, maxValidationTime, avgValidationTime),
"sinceStartTime": prettyMinMaxAverage(minSinceStartTime, maxSinceStartTime, avgSinceStartTime),
}).Debug("Accepted data column sidecars summary")
}
log.WithFields(logrus.Fields{
"slot": slot,
"receivedCount": totalReceived,
"columnIndices": colIndices,
"peers": peers,
"gossipScores": gossipScores,
"validationTimes": validationTimes,
"sinceStartTimes": sinceStartTimes,
}).Debug("Accepted data column sidecars summary")
}
slotStats = make(map[[fieldparams.RootLength]byte][]dataColumnLogEntry)
slotStats = make(map[primitives.Slot][fieldparams.NumberOfColumns]dataColumnLogEntry)
}
}
}
func prettyMinMaxAverage(min, max, average time.Duration) string {
return fmt.Sprintf("[min: %v, avg: %v, max: %v]", min, average, max)
func roundFloat(f float64, decimals int) float64 {
mult := math.Pow(10, float64(decimals))
return math.Round(f*mult) / mult
}

View File

@@ -687,12 +687,6 @@ func sbrNotFound(t *testing.T, expectedRoot [32]byte) *mockStateByRooter {
}}
}
func sbrReturnsState(st state.BeaconState) *mockStateByRooter {
return &mockStateByRooter{sbr: func(_ context.Context, _ [32]byte) (state.BeaconState, error) {
return st, nil
}}
}
func sbrForValOverride(idx primitives.ValidatorIndex, val *ethpb.Validator) *mockStateByRooter {
return sbrForValOverrideWithT(nil, idx, val)
}

View File

@@ -11,10 +11,12 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
forkchoicetypes "github.com/OffchainLabs/prysm/v7/beacon-chain/forkchoice/types"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v7/runtime/logging"
"github.com/OffchainLabs/prysm/v7/time/slots"
@@ -482,19 +484,88 @@ func (dv *RODataColumnsVerifier) SidecarProposerExpected(ctx context.Context) (e
defer dv.recordResult(RequireSidecarProposerExpected, &err)
for _, dataColumn := range dv.dataColumns {
dataColumnSlot := dataColumn.Slot()
type slotParentRoot struct {
slot primitives.Slot
parentRoot [fieldparams.RootLength]byte
}
// Get the verifying state, it is guaranteed to have the correct proposer in the lookahead.
verifyingState, err := dv.getVerifyingState(ctx, dataColumn)
if err != nil {
return columnErrBuilder(errors.Wrap(err, "verifying state"))
targetRootBySlotParentRoot := make(map[slotParentRoot][fieldparams.RootLength]byte)
var targetRootFromCache = func(slot primitives.Slot, parentRoot [fieldparams.RootLength]byte) ([fieldparams.RootLength]byte, error) {
// Use cached values if available.
slotParentRoot := slotParentRoot{slot: slot, parentRoot: parentRoot}
if root, ok := targetRootBySlotParentRoot[slotParentRoot]; ok {
return root, nil
}
// Use proposer lookahead directly
idx, err := helpers.BeaconProposerIndexAtSlot(ctx, verifyingState, dataColumnSlot)
// Compute the epoch of the data column slot.
dataColumnEpoch := slots.ToEpoch(slot)
if dataColumnEpoch > 0 {
dataColumnEpoch = dataColumnEpoch - 1
}
// Compute the target root for the epoch.
targetRoot, err := dv.fc.TargetRootForEpoch(parentRoot, dataColumnEpoch)
if err != nil {
return columnErrBuilder(errors.Wrap(err, "proposer from lookahead"))
return [fieldparams.RootLength]byte{}, columnErrBuilder(errors.Wrap(err, "target root from epoch"))
}
// Store the target root in the cache.
targetRootBySlotParentRoot[slotParentRoot] = targetRoot
return targetRoot, nil
}
for _, dataColumn := range dv.dataColumns {
// Extract the slot of the data column.
dataColumnSlot := dataColumn.Slot()
// Extract the root of the parent block corresponding to the data column.
parentRoot := dataColumn.ParentRoot()
// Compute the target root for the data column.
targetRoot, err := targetRootFromCache(dataColumnSlot, parentRoot)
if err != nil {
return columnErrBuilder(errors.Wrap(err, "target root"))
}
// Compute the epoch of the data column slot.
dataColumnEpoch := slots.ToEpoch(dataColumnSlot)
if dataColumnEpoch > 0 {
dataColumnEpoch = dataColumnEpoch - 1
}
// Create a checkpoint for the target root.
checkpoint := &forkchoicetypes.Checkpoint{Root: targetRoot, Epoch: dataColumnEpoch}
// Try to extract the proposer index from the data column in the cache.
idx, cached := dv.pc.Proposer(checkpoint, dataColumnSlot)
if !cached {
parentRoot := dataColumn.ParentRoot()
// Ensure the expensive index computation is only performed once for
// concurrent requests for the same signature data.
idxAny, err, _ := dv.sg.Do(concatRootSlot(parentRoot, dataColumnSlot), func() (any, error) {
verifyingState, err := dv.getVerifyingState(ctx, dataColumn)
if err != nil {
return nil, columnErrBuilder(errors.Wrap(err, "verifying state"))
}
idx, err = helpers.BeaconProposerIndexAtSlot(ctx, verifyingState, dataColumnSlot)
if err != nil {
return nil, columnErrBuilder(errors.Wrap(err, "compute proposer"))
}
return idx, nil
})
if err != nil {
return err
}
var ok bool
if idx, ok = idxAny.(primitives.ValidatorIndex); !ok {
return columnErrBuilder(errors.New("type assertion to ValidatorIndex failed"))
}
}
if idx != dataColumn.ProposerIndex() {
@@ -555,3 +626,7 @@ func inclusionProofKey(c blocks.RODataColumn) ([32]byte, error) {
return sha256.Sum256(unhashedKey), nil
}
func concatRootSlot(root [fieldparams.RootLength]byte, slot primitives.Slot) string {
return string(root[:]) + fmt.Sprintf("%d", slot)
}

View File

@@ -2,6 +2,7 @@ package verification
import (
"reflect"
"sync"
"testing"
"time"
@@ -794,90 +795,87 @@ func TestDataColumnsSidecarProposerExpected(t *testing.T) {
blobCount = 1
)
ctx := t.Context()
parentRoot := [fieldparams.RootLength]byte{}
// Create a Fulu state to get the expected proposer from the lookahead.
fuluState, _ := util.DeterministicGenesisStateFulu(t, 32)
expectedProposer, err := fuluState.ProposerLookahead()
require.NoError(t, err)
expectedProposerIdx := primitives.ValidatorIndex(expectedProposer[columnSlot])
// Generate data columns with the expected proposer index.
matchingColumns := generateTestDataColumnsWithProposer(t, parentRoot, columnSlot, blobCount, expectedProposerIdx)
// Generate data columns with wrong proposer index.
wrongColumns := generateTestDataColumnsWithProposer(t, parentRoot, columnSlot, blobCount, expectedProposerIdx+1)
t.Run("Proposer matches", func(t *testing.T) {
initializer := Initializer{
shared: &sharedResources{
sr: sbrReturnsState(fuluState),
hsp: &mockHeadStateProvider{
headRoot: parentRoot[:],
headSlot: columnSlot, // Same epoch so HeadStateReadOnly is used
headStateReadOnly: fuluState,
},
fc: &mockForkchoicer{},
columns := GenerateTestDataColumns(t, parentRoot, columnSlot, blobCount)
firstColumn := columns[0]
ctx := t.Context()
testCases := []struct {
name string
stateByRooter StateByRooter
proposerCache proposerCache
columns []blocks.RODataColumn
error string
}{
{
name: "Cached, matches",
stateByRooter: nil,
proposerCache: &mockProposerCache{
ProposerCB: pcReturnsIdx(firstColumn.ProposerIndex()),
},
}
verifier := initializer.NewDataColumnsVerifier(matchingColumns, GossipDataColumnSidecarRequirements)
err := verifier.SidecarProposerExpected(ctx)
require.NoError(t, err)
require.Equal(t, true, verifier.results.executed(RequireSidecarProposerExpected))
require.NoError(t, verifier.results.result(RequireSidecarProposerExpected))
})
t.Run("Proposer does not match", func(t *testing.T) {
initializer := Initializer{
shared: &sharedResources{
sr: sbrReturnsState(fuluState),
hsp: &mockHeadStateProvider{
headRoot: parentRoot[:],
headSlot: columnSlot, // Same epoch so HeadStateReadOnly is used
headStateReadOnly: fuluState,
},
fc: &mockForkchoicer{},
columns: columns,
},
{
name: "Cached, does not match",
stateByRooter: nil,
proposerCache: &mockProposerCache{
ProposerCB: pcReturnsIdx(firstColumn.ProposerIndex() + 1),
},
}
verifier := initializer.NewDataColumnsVerifier(wrongColumns, GossipDataColumnSidecarRequirements)
err := verifier.SidecarProposerExpected(ctx)
require.ErrorContains(t, errSidecarUnexpectedProposer.Error(), err)
require.Equal(t, true, verifier.results.executed(RequireSidecarProposerExpected))
require.NotNil(t, verifier.results.result(RequireSidecarProposerExpected))
})
t.Run("State lookup failure", func(t *testing.T) {
columns := GenerateTestDataColumns(t, parentRoot, columnSlot, blobCount)
initializer := Initializer{
shared: &sharedResources{
sr: sbrNotFound(t, columns[0].ParentRoot()),
hsp: &mockHeadStateProvider{},
fc: &mockForkchoicer{},
columns: columns,
error: errSidecarUnexpectedProposer.Error(),
},
{
name: "Not cached, state lookup failure",
stateByRooter: sbrNotFound(t, firstColumn.ParentRoot()),
proposerCache: &mockProposerCache{
ProposerCB: pcReturnsNotFound(),
},
}
verifier := initializer.NewDataColumnsVerifier(columns, GossipDataColumnSidecarRequirements)
err := verifier.SidecarProposerExpected(ctx)
require.ErrorContains(t, "verifying state", err)
require.Equal(t, true, verifier.results.executed(RequireSidecarProposerExpected))
require.NotNil(t, verifier.results.result(RequireSidecarProposerExpected))
})
}
func generateTestDataColumnsWithProposer(t *testing.T, parent [fieldparams.RootLength]byte, slot primitives.Slot, blobCount int, proposer primitives.ValidatorIndex) []blocks.RODataColumn {
roBlock, roBlobs := util.GenerateTestDenebBlockWithSidecar(t, parent, slot, blobCount, util.WithProposer(proposer))
blobs := make([]kzg.Blob, 0, len(roBlobs))
for i := range roBlobs {
blobs = append(blobs, kzg.Blob(roBlobs[i].Blob))
columns: columns,
error: "verifying state",
},
}
cellsPerBlob, proofsPerBlob := util.GenerateCellsAndProofs(t, blobs)
roDataColumnSidecars, err := peerdas.DataColumnSidecars(cellsPerBlob, proofsPerBlob, peerdas.PopulateFromBlock(roBlock))
require.NoError(t, err)
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
initializer := Initializer{
shared: &sharedResources{
sr: tc.stateByRooter,
pc: tc.proposerCache,
hsp: &mockHeadStateProvider{},
fc: &mockForkchoicer{
TargetRootForEpochCB: fcReturnsTargetRoot([fieldparams.RootLength]byte{}),
},
},
}
return roDataColumnSidecars
verifier := initializer.NewDataColumnsVerifier(tc.columns, GossipDataColumnSidecarRequirements)
var wg sync.WaitGroup
var err1, err2 error
wg.Go(func() {
err1 = verifier.SidecarProposerExpected(ctx)
})
wg.Go(func() {
err2 = verifier.SidecarProposerExpected(ctx)
})
wg.Wait()
require.Equal(t, true, verifier.results.executed(RequireSidecarProposerExpected))
if len(tc.error) > 0 {
require.ErrorContains(t, tc.error, err1)
require.ErrorContains(t, tc.error, err2)
require.NotNil(t, verifier.results.result(RequireSidecarProposerExpected))
return
}
require.NoError(t, err1)
require.NoError(t, err2)
require.NoError(t, verifier.results.result(RequireSidecarProposerExpected))
err := verifier.SidecarProposerExpected(ctx)
require.NoError(t, err)
})
}
}
func TestColumnRequirementSatisfaction(t *testing.T) {
@@ -924,3 +922,12 @@ func TestColumnRequirementSatisfaction(t *testing.T) {
require.NoError(t, err)
}
func TestConcatRootSlot(t *testing.T) {
root := [fieldparams.RootLength]byte{1, 2, 3}
const slot = primitives.Slot(3210)
const expected = "\x01\x02\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x003210"
actual := concatRootSlot(root, slot)
require.Equal(t, expected, actual)
}

View File

@@ -1,3 +0,0 @@
### Added
- `primitives.BuilderIndex`: SSZ `uint64` wrapper for builder registry indices.

View File

@@ -1,2 +0,0 @@
#### Fixed
- Fix validation logic for `--backfill-oldest-slot`, which was rejecting slots newer than 1056767.

View File

@@ -1,3 +0,0 @@
### Changed
- Data column sidecars cache warmup: Process in parallel all sidecars for a given epoch.

View File

@@ -1,3 +0,0 @@
### Changed
- Summarize DEBUG log corresponding to incoming via gossip data column sidecar.

View File

@@ -1,2 +0,0 @@
### Changed
- Use lookahead to validate data column sidecar proposer index.

View File

@@ -1,3 +0,0 @@
### Changed
- Notify the engine about forkchoice updates in the background.

View File

@@ -1,2 +0,0 @@
### Changed
- Use a separate context when updating the slot cache.

View File

@@ -1,3 +0,0 @@
### Fixed
- Do not process slots and copy states for next epoch proposers after Fulu

View File

@@ -1,2 +0,0 @@
### Ignored
- D not send FCU on block batches.

View File

@@ -1,3 +0,0 @@
### Changed
- Do not check block signature on state transition.

View File

@@ -1,2 +0,0 @@
### Added
- Add feature flag to use hashtree instead of gohashtre.

View File

@@ -1,3 +0,0 @@
### Added
- Use the head state to validate attestations for the previous epoch if head is compatible with the target checkpoint.

View File

@@ -1,3 +0,0 @@
### Changed
- Extend `httperror` analyzer to more functions.

View File

@@ -1,3 +0,0 @@
### Fixed
- avoid panic when fork schedule is empty [#16175](https://github.com/OffchainLabs/prysm/pull/16175)

View File

@@ -1,3 +0,0 @@
### Changed
- Performance improvement in ProcessConsolidationRequests: Use more performance HasPendingBalanceToWithdraw instead of PendingBalanceToWithdraw as no need to calculate full total pending balance.

View File

@@ -0,0 +1,3 @@
### Fixed
- Removed redundant justified checkpoint update in pullTips.

View File

@@ -73,7 +73,6 @@ type Flags struct {
DisableStakinContractCheck bool // Disables check for deposit contract when proposing blocks
IgnoreUnviableAttestations bool // Ignore attestations whose target state is not viable (avoids lagging-node DoS).
EnableHashtree bool // Enables usage of the hashtree library for hashing
EnableVerboseSigVerification bool // EnableVerboseSigVerification specifies whether to verify individual signature if batch verification fails
PrepareAllPayloads bool // PrepareAllPayloads informs the engine to prepare a block on every slot.
@@ -240,10 +239,6 @@ func ConfigureBeaconChain(ctx *cli.Context) error {
logEnabled(enableFullSSZDataLogging)
cfg.EnableFullSSZDataLogging = true
}
if ctx.IsSet(enableHashtree.Name) {
logEnabled(enableHashtree)
cfg.EnableHashtree = true
}
cfg.EnableVerboseSigVerification = true
if ctx.IsSet(disableVerboseSigVerification.Name) {
logEnabled(disableVerboseSigVerification)

View File

@@ -133,10 +133,6 @@ var (
Name: "enable-beacon-rest-api",
Usage: "(Experimental): Enables of the beacon REST API when querying a beacon node.",
}
enableHashtree = &cli.BoolFlag{
Name: "enable-hashtree",
Usage: "(Experimental): Enables the hashthree hashing library.",
}
disableVerboseSigVerification = &cli.BoolFlag{
Name: "disable-verbose-sig-verification",
Usage: "Disables identifying invalid signatures if batch verification fails when processing block.",
@@ -276,7 +272,6 @@ var BeaconChainFlags = combinedFlags([]cli.Flag{
enableExperimentalAttestationPool,
forceHeadFlag,
blacklistRoots,
enableHashtree,
}, deprecatedBeaconFlags, deprecatedFlags, upcomingDeprecation)
func combinedFlags(flags ...[]cli.Flag) []cli.Flag {

View File

@@ -20,7 +20,6 @@ go_library(
visibility = ["//visibility:public"],
deps = [
"//beacon-chain/state/stateutil:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types:go_default_library",
@@ -35,11 +34,9 @@ go_library(
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/validator-client:go_default_library",
"//runtime/version:go_default_library",
"@com_github_offchainlabs_hashtree//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_fastssz//:go_default_library",
"@com_github_prysmaticlabs_gohashtree//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
],
)

View File

@@ -1,8 +1,6 @@
package blocks
import (
"github.com/OffchainLabs/hashtree"
"github.com/OffchainLabs/prysm/v7/config/features"
field_params "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
@@ -11,7 +9,6 @@ import (
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/pkg/errors"
"github.com/prysmaticlabs/gohashtree"
"github.com/sirupsen/logrus"
)
const (
@@ -48,14 +45,7 @@ func VerifyKZGInclusionProof(blob ROBlob) error {
return errInvalidBodyRoot
}
chunks := makeChunk(blob.KzgCommitment)
if features.Get().EnableHashtree {
err := hashtree.Hash(chunks, chunks)
if err != nil {
return err
}
} else {
gohashtree.HashChunks(chunks, chunks)
}
gohashtree.HashChunks(chunks, chunks)
verified := trie.VerifyMerkleProof(root, chunks[0][:], blob.Index+KZGOffset, blob.CommitmentInclusionProof)
if !verified {
return errInvalidInclusionProof
@@ -192,14 +182,7 @@ func LeavesFromCommitments(commitments [][]byte) [][]byte {
leaves := make([][]byte, len(commitments))
for i, kzg := range commitments {
chunk := makeChunk(kzg)
if features.Get().EnableHashtree {
err := hashtree.Hash(chunk, chunk)
if err != nil {
logrus.WithError(err).Error("Could not hash KZG commitment chunk")
}
} else {
gohashtree.HashChunks(chunk, chunk)
}
gohashtree.HashChunks(chunk, chunk)
leaves[i] = chunk[0][:]
}
return leaves

View File

@@ -4,7 +4,6 @@ go_library(
name = "go_default_library",
srcs = [
"basis_points.go",
"builder_index.go",
"committee_bits_mainnet.go",
"committee_bits_minimal.go", # keep
"committee_index.go",
@@ -32,7 +31,6 @@ go_library(
go_test(
name = "go_default_test",
srcs = [
"builder_index_test.go",
"committee_index_test.go",
"domain_test.go",
"epoch_test.go",

View File

@@ -1,54 +0,0 @@
package primitives
import (
"fmt"
fssz "github.com/prysmaticlabs/fastssz"
)
var _ fssz.HashRoot = (BuilderIndex)(0)
var _ fssz.Marshaler = (*BuilderIndex)(nil)
var _ fssz.Unmarshaler = (*BuilderIndex)(nil)
// BuilderIndex is an index into the builder registry.
type BuilderIndex uint64
// HashTreeRoot returns the SSZ hash tree root of the index.
func (b BuilderIndex) HashTreeRoot() ([32]byte, error) {
return fssz.HashWithDefaultHasher(b)
}
// HashTreeRootWith appends the SSZ uint64 representation of the index to the given hasher.
func (b BuilderIndex) HashTreeRootWith(hh *fssz.Hasher) error {
hh.PutUint64(uint64(b))
return nil
}
// UnmarshalSSZ decodes the SSZ-encoded uint64 index from buf.
func (b *BuilderIndex) UnmarshalSSZ(buf []byte) error {
if len(buf) != b.SizeSSZ() {
return fmt.Errorf("expected buffer of length %d received %d", b.SizeSSZ(), len(buf))
}
*b = BuilderIndex(fssz.UnmarshallUint64(buf))
return nil
}
// MarshalSSZTo appends the SSZ-encoded index to dst and returns the extended buffer.
func (b *BuilderIndex) MarshalSSZTo(dst []byte) ([]byte, error) {
marshalled, err := b.MarshalSSZ()
if err != nil {
return nil, err
}
return append(dst, marshalled...), nil
}
// MarshalSSZ encodes the index as an SSZ uint64.
func (b *BuilderIndex) MarshalSSZ() ([]byte, error) {
marshalled := fssz.MarshalUint64([]byte{}, uint64(*b))
return marshalled, nil
}
// SizeSSZ returns the size of the SSZ-encoded index in bytes.
func (b *BuilderIndex) SizeSSZ() int {
return 8
}

View File

@@ -1,86 +0,0 @@
package primitives_test
import (
"encoding/binary"
"slices"
"strconv"
"testing"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/testing/require"
)
func TestBuilderIndex_SSZRoundTripAndHashRoot(t *testing.T) {
cases := []uint64{
0,
1,
42,
(1 << 32) - 1,
1 << 32,
^uint64(0),
}
for _, v := range cases {
t.Run("v="+u64name(v), func(t *testing.T) {
t.Parallel()
val := primitives.BuilderIndex(v)
require.Equal(t, 8, (&val).SizeSSZ())
enc, err := (&val).MarshalSSZ()
require.NoError(t, err)
require.Equal(t, 8, len(enc))
wantEnc := make([]byte, 8)
binary.LittleEndian.PutUint64(wantEnc, v)
require.DeepEqual(t, wantEnc, enc)
dstPrefix := []byte("prefix:")
dst, err := (&val).MarshalSSZTo(slices.Clone(dstPrefix))
require.NoError(t, err)
wantDst := append(dstPrefix, wantEnc...)
require.DeepEqual(t, wantDst, dst)
var decoded primitives.BuilderIndex
require.NoError(t, (&decoded).UnmarshalSSZ(enc))
require.Equal(t, val, decoded)
root, err := val.HashTreeRoot()
require.NoError(t, err)
var wantRoot [32]byte
binary.LittleEndian.PutUint64(wantRoot[:8], v)
require.Equal(t, wantRoot, root)
})
}
}
func TestBuilderIndex_UnmarshalSSZRejectsWrongSize(t *testing.T) {
for _, size := range []int{7, 9} {
t.Run("size="+strconv.Itoa(size), func(t *testing.T) {
t.Parallel()
var v primitives.BuilderIndex
err := (&v).UnmarshalSSZ(make([]byte, size))
require.ErrorContains(t, "expected buffer of length 8", err)
})
}
}
func u64name(v uint64) string {
switch v {
case 0:
return "0"
case 1:
return "1"
case 42:
return "42"
case (1 << 32) - 1:
return "2^32-1"
case 1 << 32:
return "2^32"
case ^uint64(0):
return "max"
default:
return "custom"
}
}

View File

@@ -5,11 +5,7 @@ go_library(
srcs = ["hashtree.go"],
importpath = "github.com/OffchainLabs/prysm/v7/crypto/hash/htr",
visibility = ["//visibility:public"],
deps = [
"//config/features:go_default_library",
"@com_github_offchainlabs_hashtree//:go_default_library",
"@com_github_prysmaticlabs_gohashtree//:go_default_library",
],
deps = ["@com_github_prysmaticlabs_gohashtree//:go_default_library"],
)
go_test(
@@ -17,8 +13,5 @@ go_test(
size = "small",
srcs = ["hashtree_test.go"],
embed = [":go_default_library"],
deps = [
"//config/features:go_default_library",
"//testing/require:go_default_library",
],
deps = ["//testing/require:go_default_library"],
)

View File

@@ -4,8 +4,6 @@ import (
"runtime"
"sync"
"github.com/OffchainLabs/hashtree"
"github.com/OffchainLabs/prysm/v7/config/features"
"github.com/prysmaticlabs/gohashtree"
)
@@ -13,12 +11,7 @@ const minSliceSizeToParallelize = 5000
func hashParallel(inputList [][32]byte, outputList [][32]byte, wg *sync.WaitGroup) {
defer wg.Done()
var err error
if features.Get().EnableHashtree {
err = hashtree.Hash(outputList, inputList)
} else {
err = gohashtree.Hash(outputList, inputList)
}
err := gohashtree.Hash(outputList, inputList)
if err != nil {
panic(err) // lint:nopanic -- This should never panic.
}
@@ -32,16 +25,9 @@ func hashParallel(inputList [][32]byte, outputList [][32]byte, wg *sync.WaitGrou
func VectorizedSha256(inputList [][32]byte) [][32]byte {
outputList := make([][32]byte, len(inputList)/2)
if len(inputList) < minSliceSizeToParallelize {
if features.Get().EnableHashtree {
err := hashtree.Hash(outputList, inputList)
if err != nil {
panic(err) // lint:nopanic -- This should never panic.
}
} else {
err := gohashtree.Hash(outputList, inputList)
if err != nil {
panic(err) // lint:nopanic -- This should never panic.
}
err := gohashtree.Hash(outputList, inputList)
if err != nil {
panic(err) // lint:nopanic -- This should never panic.
}
return outputList
}
@@ -52,12 +38,7 @@ func VectorizedSha256(inputList [][32]byte) [][32]byte {
for j := range n {
go hashParallel(inputList[j*2*groupSize:(j+1)*2*groupSize], outputList[j*groupSize:], &wg)
}
var err error
if features.Get().EnableHashtree {
err = hashtree.Hash(outputList[n*groupSize:], inputList[n*2*groupSize:])
} else {
err = gohashtree.Hash(outputList[n*groupSize:], inputList[n*2*groupSize:])
}
err := gohashtree.Hash(outputList[n*groupSize:], inputList[n*2*groupSize:])
if err != nil {
panic(err) // lint:nopanic -- This should never panic.
}

View File

@@ -4,7 +4,6 @@ import (
"sync"
"testing"
"github.com/OffchainLabs/prysm/v7/config/features"
"github.com/OffchainLabs/prysm/v7/testing/require"
)
@@ -24,25 +23,3 @@ func Test_VectorizedSha256(t *testing.T) {
require.Equal(t, r, hash2[i])
}
}
func Test_VectorizedSha256_hashtree_enabled(t *testing.T) {
resetCfg := features.InitWithReset(&features.Flags{
EnableHashtree: true,
})
defer resetCfg()
largeSlice := make([][32]byte, 32*minSliceSizeToParallelize)
secondLargeSlice := make([][32]byte, 32*minSliceSizeToParallelize)
hash1 := make([][32]byte, 16*minSliceSizeToParallelize)
wg := sync.WaitGroup{}
wg.Go(func() {
tempHash := VectorizedSha256(largeSlice)
copy(hash1, tempHash)
})
wg.Wait()
hash2 := VectorizedSha256(secondLargeSlice)
require.Equal(t, len(hash1), len(hash2))
for i, r := range hash1 {
require.Equal(t, r, hash2[i])
}
}

View File

@@ -2449,15 +2449,6 @@ def prysm_deps():
sum = "h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY=",
version = "v1.4.11",
)
go_repository(
name = "com_github_offchainlabs_hashtree",
build_file_generation = "off",
importpath = "github.com/OffchainLabs/hashtree",
patch_args = ["-p1"],
patches = ["//third_party:com_github_offchainlabs_hashtree.patch"],
sum = "h1:Ws38dQSEtTbRihiAnqoV5HHubSC/zaxE4Yq3j2Z8S7Y=",
version = "v0.2.1",
)
go_repository(
name = "com_github_oklog_oklog",
importpath = "github.com/oklog/oklog",

View File

@@ -12,7 +12,6 @@ go_library(
importpath = "github.com/OffchainLabs/prysm/v7/encoding/ssz",
visibility = ["//visibility:public"],
deps = [
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//container/trie:go_default_library",
"//crypto/hash/htr:go_default_library",
@@ -20,7 +19,6 @@ go_library(
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"@com_github_minio_sha256_simd//:go_default_library",
"@com_github_offchainlabs_hashtree//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_prysmaticlabs_gohashtree//:go_default_library",

View File

@@ -3,8 +3,6 @@ package ssz
import (
"encoding/binary"
"github.com/OffchainLabs/hashtree"
"github.com/OffchainLabs/prysm/v7/config/features"
"github.com/OffchainLabs/prysm/v7/container/trie"
"github.com/OffchainLabs/prysm/v7/crypto/hash/htr"
"github.com/pkg/errors"
@@ -184,12 +182,7 @@ func MerkleizeListSSZ[T Hashable](elements []T, limit uint64) ([32]byte, error)
chunks := make([][32]byte, 2)
chunks[0] = body
binary.LittleEndian.PutUint64(chunks[1][:], uint64(len(elements)))
if features.Get().EnableHashtree {
err = hashtree.Hash(chunks, chunks)
} else {
err = gohashtree.Hash(chunks, chunks)
}
if err != nil {
if err := gohashtree.Hash(chunks, chunks); err != nil {
return [32]byte{}, err
}
return chunks[0], err

1
go.mod
View File

@@ -6,7 +6,6 @@ require (
github.com/MariusVanDerWijden/FuzzyVM v0.0.0-20240516070431-7828990cad7d
github.com/MariusVanDerWijden/tx-fuzz v1.4.0
github.com/OffchainLabs/go-bitfield v0.0.0-20251031151322-f427d04d8506
github.com/OffchainLabs/hashtree v0.2.1
github.com/aristanetworks/goarista v0.0.0-20200805130819-fd197cf57d96
github.com/bazelbuild/rules_go v0.23.2
github.com/btcsuite/btcd/btcec/v2 v2.3.4

2
go.sum
View File

@@ -59,8 +59,6 @@ github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERo
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
github.com/OffchainLabs/go-bitfield v0.0.0-20251031151322-f427d04d8506 h1:d/SJkN8/9Ca+1YmuDiUJxAiV4w/a9S8NcsG7GMQSrVI=
github.com/OffchainLabs/go-bitfield v0.0.0-20251031151322-f427d04d8506/go.mod h1:6TZI4FU6zT8x6ZfWa1J8YQ2NgW0wLV/W3fHRca8ISBo=
github.com/OffchainLabs/hashtree v0.2.1 h1:Ws38dQSEtTbRihiAnqoV5HHubSC/zaxE4Yq3j2Z8S7Y=
github.com/OffchainLabs/hashtree v0.2.1/go.mod h1:b07+cRZs+eAR8TR57CB9TQlt5Gnl/06Xs76xt/1wq0M=
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
github.com/Shopify/sarama v1.26.1/go.mod h1:NbSGBSSndYaIhRcBtY9V0U7AyH+x71bG668AuWys/yU=
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=

View File

@@ -43,7 +43,6 @@ func WriteJson(w http.ResponseWriter, v any) {
func WriteSsz(w http.ResponseWriter, respSsz []byte) {
w.Header().Set("Content-Length", strconv.Itoa(len(respSsz)))
w.Header().Set("Content-Type", api.OctetStreamMediaType)
w.WriteHeader(http.StatusOK)
if _, err := io.Copy(w, io.NopCloser(bytes.NewReader(respSsz))); err != nil {
log.WithError(err).Error("Could not write response message")
}

View File

@@ -44,13 +44,6 @@ func WithProposerSigning(idx primitives.ValidatorIndex, sk bls.SecretKey, valRoo
}
}
// WithProposer sets the proposer index for the generated block without signing.
func WithProposer(idx primitives.ValidatorIndex) DenebBlockGeneratorOption {
return func(g *denebBlockGenerator) {
g.proposer = idx
}
}
func WithPayloadSetter(p *enginev1.ExecutionPayloadDeneb) DenebBlockGeneratorOption {
return func(g *denebBlockGenerator) {
g.payload = p

View File

@@ -1,43 +0,0 @@
diff -urN a/BUILD.bazel b/BUILD.bazel
--- a/BUILD.bazel 1969-12-31 18:00:00.000000000 -0600
+++ b/BUILD.bazel 2025-01-05 12:00:00.000000000 -0600
@@ -0,0 +1,39 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "bindings.go",
+ "sha256_1_generic.go",
+ ] + select({
+ "@io_bazel_rules_go//go/platform:linux_amd64": [
+ "bindings_amd64.go",
+ "wrapper_linux_amd64.s",
+ "hashtree_amd64.syso",
+ ],
+ "@io_bazel_rules_go//go/platform:darwin_amd64": [
+ "bindings_amd64.go",
+ "wrapper_linux_amd64.s",
+ "hashtree_amd64.syso",
+ ],
+ "@io_bazel_rules_go//go/platform:windows_amd64": [
+ "bindings_amd64.go",
+ "wrapper_windows_amd64.s",
+ "hashtree_windows_amd64.syso",
+ ],
+ "@io_bazel_rules_go//go/platform:linux_arm64": [
+ "bindings_arm64.go",
+ "wrapper_arm64.s",
+ "hashtree_linux_arm64.syso",
+ ],
+ "@io_bazel_rules_go//go/platform:darwin_arm64": [
+ "bindings_arm64.go",
+ "wrapper_arm64.s",
+ "hashtree_darwin_arm64.syso",
+ ],
+ "//conditions:default": [],
+ }),
+ importpath = "github.com/OffchainLabs/hashtree",
+ visibility = ["//visibility:public"],
+ deps = ["@com_github_klauspost_cpuid_v2//:go_default_library"],
+)

View File

@@ -3,7 +3,7 @@ load("@prysm//tools/go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["analyzer.go"],
importpath = "github.com/OffchainLabs/prysm/v7/tools/analyzers/httpwriter",
importpath = "github.com/OffchainLabs/prysm/v7/tools/analyzers/httperror",
visibility = ["//visibility:public"],
deps = [
"@org_golang_x_tools//go/analysis:go_default_library",

View File

@@ -1,4 +1,4 @@
package httpwriter
package httperror
import (
"go/ast"
@@ -9,8 +9,8 @@ import (
)
var Analyzer = &analysis.Analyzer{
Name: "httpwriter",
Doc: "Ensures that httputil functions which make use of the writer are immediately followed by a return statement.",
Name: "httperror",
Doc: "Ensures calls to httputil.HandleError are immediately followed by a return statement.",
Requires: []*analysis.Analyzer{
inspect.Analyzer,
},
@@ -99,7 +99,7 @@ func checkBlock(pass *analysis.Pass, fn *ast.FuncDecl, block *ast.BlockStmt, nex
// Now check the current statement itself: is it (or does it contain) a direct call to httputil.HandleError?
// We only consider ExprStmt that are direct CallExpr to httputil.HandleError.
call, name := findHandleErrorCall(stmt)
call := findHandleErrorCall(stmt)
if call == nil {
continue
}
@@ -121,7 +121,7 @@ func checkBlock(pass *analysis.Pass, fn *ast.FuncDecl, block *ast.BlockStmt, nex
continue
}
// otherwise it's not a return (even if it's an if/for etc) -> violation
pass.Reportf(stmt.Pos(), "call to httputil.%s must be immediately followed by a return statement", name)
pass.Reportf(stmt.Pos(), "call to httputil.HandleError must be immediately followed by a return statement")
continue
}
@@ -133,33 +133,31 @@ func checkBlock(pass *analysis.Pass, fn *ast.FuncDecl, block *ast.BlockStmt, nex
}
// Non-void function and it's the last statement → violation
pass.Reportf(stmt.Pos(), "call to httputil.%s must be immediately followed by a return statement", name)
pass.Reportf(stmt.Pos(), "call to httputil.HandleError must be followed by return because function has return values")
}
}
// findHandleErrorCall returns the call expression if stmt is a direct call to httputil.HandleError(...),
// otherwise nil. We only match direct ExprStmt -> CallExpr -> SelectorExpr where selector is httputil.HandleError.
func findHandleErrorCall(stmt ast.Stmt) (*ast.CallExpr, string) {
func findHandleErrorCall(stmt ast.Stmt) *ast.CallExpr {
es, ok := stmt.(*ast.ExprStmt)
if !ok {
return nil, ""
return nil
}
call, ok := es.X.(*ast.CallExpr)
if !ok {
return nil, ""
return nil
}
sel, ok := call.Fun.(*ast.SelectorExpr)
if !ok {
return nil, ""
return nil
}
pkgIdent, ok := sel.X.(*ast.Ident)
if !ok {
return nil, ""
return nil
}
selectorName := sel.Sel.Name
if pkgIdent.Name == "httputil" &&
(selectorName == "HandleError" || selectorName == "WriteError" || selectorName == "WriteJson" || selectorName == "WriteSSZ") {
return call, selectorName
if pkgIdent.Name == "httputil" && sel.Sel.Name == "HandleError" {
return call
}
return nil, ""
return nil
}