mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 05:47:59 -05:00
Compare commits
19 Commits
v7.1.1
...
kzg-verifi
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6397093627 | ||
|
|
0db74365e0 | ||
|
|
6f90101364 | ||
|
|
49e1763ec2 | ||
|
|
c2527c82cd | ||
|
|
d4ea8fafd6 | ||
|
|
07d1d6bdf9 | ||
|
|
f938da99d9 | ||
|
|
9deec69cc7 | ||
|
|
2767f08f4d | ||
|
|
d46c620783 | ||
|
|
dd05e44ef3 | ||
|
|
9da36a5de6 | ||
|
|
7950a24926 | ||
|
|
ea51253be9 | ||
|
|
2ac30f5ce6 | ||
|
|
7418c00ad6 | ||
|
|
66342655fd | ||
|
|
18eca953c1 |
@@ -193,7 +193,7 @@ nogo(
|
||||
"//tools/analyzers/featureconfig:go_default_library",
|
||||
"//tools/analyzers/gocognit:go_default_library",
|
||||
"//tools/analyzers/ineffassign:go_default_library",
|
||||
"//tools/analyzers/httperror:go_default_library",
|
||||
"//tools/analyzers/httpwriter:go_default_library",
|
||||
"//tools/analyzers/interfacechecker:go_default_library",
|
||||
"//tools/analyzers/logcapitalization:go_default_library",
|
||||
"//tools/analyzers/logruswitherror:go_default_library",
|
||||
|
||||
@@ -323,14 +323,17 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
|
||||
var ok bool
|
||||
e := slots.ToEpoch(slot)
|
||||
stateEpoch := slots.ToEpoch(st.Slot())
|
||||
if e == stateEpoch {
|
||||
fuluAndNextEpoch := st.Version() >= version.Fulu && e == stateEpoch+1
|
||||
if e == stateEpoch || fuluAndNextEpoch {
|
||||
val, ok = s.trackedProposer(st, slot)
|
||||
if !ok {
|
||||
return emptyAttri
|
||||
}
|
||||
}
|
||||
st = st.Copy()
|
||||
if slot > st.Slot() {
|
||||
// At this point either we know we are proposing on a future slot or we need to still compute the
|
||||
// right proposer index pre-Fulu, either way we need to copy the state to process it.
|
||||
st = st.Copy()
|
||||
var err error
|
||||
st, err = transition.ProcessSlotsUsingNextSlotCache(ctx, st, headRoot, slot)
|
||||
if err != nil {
|
||||
@@ -338,7 +341,7 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
|
||||
return emptyAttri
|
||||
}
|
||||
}
|
||||
if e > stateEpoch {
|
||||
if e > stateEpoch && !fuluAndNextEpoch {
|
||||
emptyAttri := payloadattribute.EmptyWithVersion(st.Version())
|
||||
val, ok = s.trackedProposer(st, slot)
|
||||
if !ok {
|
||||
|
||||
@@ -1053,40 +1053,3 @@ func TestKZGCommitmentToVersionedHashes(t *testing.T) {
|
||||
require.Equal(t, vhs[0].String(), vh0)
|
||||
require.Equal(t, vhs[1].String(), vh1)
|
||||
}
|
||||
|
||||
func TestComputePayloadAttribute(t *testing.T) {
|
||||
service, tr := minimalTestService(t, WithPayloadIDCache(cache.NewPayloadIDCache()))
|
||||
ctx := tr.ctx
|
||||
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
|
||||
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, Index: 0})
|
||||
// Cache hit, advance state, no fee recipient
|
||||
slot := primitives.Slot(1)
|
||||
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
signed, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(signed, [32]byte{'a'})
|
||||
require.NoError(t, err)
|
||||
cfg := &postBlockProcessConfig{
|
||||
ctx: ctx,
|
||||
roblock: roblock,
|
||||
}
|
||||
fcu := &fcuConfig{
|
||||
headState: st,
|
||||
proposingSlot: slot,
|
||||
headRoot: [32]byte{},
|
||||
}
|
||||
require.NoError(t, service.computePayloadAttributes(cfg, fcu))
|
||||
require.Equal(t, false, fcu.attributes.IsEmpty())
|
||||
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(fcu.attributes.SuggestedFeeRecipient()).String())
|
||||
|
||||
// Cache hit, advance state, has fee recipient
|
||||
suggestedAddr := common.HexToAddress("123")
|
||||
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, FeeRecipient: primitives.ExecutionAddress(suggestedAddr), Index: 0})
|
||||
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
|
||||
require.NoError(t, service.computePayloadAttributes(cfg, fcu))
|
||||
require.Equal(t, false, fcu.attributes.IsEmpty())
|
||||
require.Equal(t, suggestedAddr, common.BytesToAddress(fcu.attributes.SuggestedFeeRecipient()))
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
payloadattribute "github.com/OffchainLabs/prysm/v7/consensus-types/payload-attribute"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -53,58 +54,53 @@ type fcuConfig struct {
|
||||
}
|
||||
|
||||
// sendFCU handles the logic to notify the engine of a forckhoice update
|
||||
// for the first time when processing an incoming block during regular sync. It
|
||||
// always updates the shuffling caches and handles epoch transitions when the
|
||||
// incoming block is late, preparing payload attributes in this case while it
|
||||
// only sends a message with empty attributes for early blocks.
|
||||
func (s *Service) sendFCU(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error {
|
||||
if !s.isNewHead(cfg.headRoot) {
|
||||
return nil
|
||||
// when processing an incoming block during regular sync. It
|
||||
// always updates the shuffling caches and handles epoch transitions .
|
||||
func (s *Service) sendFCU(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) {
|
||||
if cfg.postState.Version() < version.Fulu {
|
||||
// update the caches to compute the right proposer index
|
||||
// this function is called under a forkchoice lock which we need to release.
|
||||
s.ForkChoicer().Unlock()
|
||||
s.updateCachesPostBlockProcessing(cfg)
|
||||
s.ForkChoicer().Lock()
|
||||
}
|
||||
if err := s.getFCUArgs(cfg, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("Could not get forkchoice update argument")
|
||||
return
|
||||
}
|
||||
// If head has not been updated and attributes are nil, we can skip the FCU.
|
||||
if !s.isNewHead(cfg.headRoot) && (fcuArgs.attributes == nil || fcuArgs.attributes.IsEmpty()) {
|
||||
return
|
||||
}
|
||||
// If we are proposing and we aim to reorg the block, we have already sent FCU with attributes on lateBlockTasks
|
||||
if fcuArgs.attributes != nil && !fcuArgs.attributes.IsEmpty() && s.shouldOverrideFCU(cfg.headRoot, s.CurrentSlot()+1) {
|
||||
return nil
|
||||
return
|
||||
}
|
||||
if s.inRegularSync() {
|
||||
go s.forkchoiceUpdateWithExecution(cfg.ctx, fcuArgs)
|
||||
}
|
||||
return s.forkchoiceUpdateWithExecution(cfg.ctx, fcuArgs)
|
||||
}
|
||||
|
||||
// sendFCUWithAttributes computes the payload attributes and sends an FCU message
|
||||
// to the engine if needed
|
||||
func (s *Service) sendFCUWithAttributes(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) {
|
||||
slotCtx, cancel := context.WithTimeout(context.Background(), slotDeadline)
|
||||
defer cancel()
|
||||
cfg.ctx = slotCtx
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
if err := s.computePayloadAttributes(cfg, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("Could not compute payload attributes")
|
||||
return
|
||||
}
|
||||
if fcuArgs.attributes.IsEmpty() {
|
||||
return
|
||||
}
|
||||
if _, err := s.notifyForkchoiceUpdate(cfg.ctx, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("Could not update forkchoice with payload attributes for proposal")
|
||||
if s.isNewHead(fcuArgs.headRoot) {
|
||||
if err := s.saveHead(cfg.ctx, fcuArgs.headRoot, fcuArgs.headBlock, fcuArgs.headState); err != nil {
|
||||
log.WithError(err).Error("Could not save head")
|
||||
}
|
||||
s.pruneAttsFromPool(s.ctx, fcuArgs.headState, fcuArgs.headBlock)
|
||||
}
|
||||
}
|
||||
|
||||
// fockchoiceUpdateWithExecution is a wrapper around notifyForkchoiceUpdate. It decides whether a new call to FCU should be made.
|
||||
func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, args *fcuConfig) error {
|
||||
// fockchoiceUpdateWithExecution is a wrapper around notifyForkchoiceUpdate. It gets a forkchoice lock and calls the engine.
|
||||
// The caller of this function should NOT have a lock in forkchoice store.
|
||||
func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, args *fcuConfig) {
|
||||
_, span := trace.StartSpan(ctx, "beacon-chain.blockchain.forkchoiceUpdateWithExecution")
|
||||
defer span.End()
|
||||
// Note: Use the service context here to avoid the parent context being ended during a forkchoice update.
|
||||
ctx = trace.NewContext(s.ctx, span)
|
||||
s.ForkChoicer().Lock()
|
||||
defer s.ForkChoicer().Unlock()
|
||||
_, err := s.notifyForkchoiceUpdate(ctx, args)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not notify forkchoice update")
|
||||
log.WithError(err).Error("Could not notify forkchoice update")
|
||||
}
|
||||
|
||||
if err := s.saveHead(ctx, args.headRoot, args.headBlock, args.headState); err != nil {
|
||||
log.WithError(err).Error("Could not save head")
|
||||
}
|
||||
|
||||
// Only need to prune attestations from pool if the head has changed.
|
||||
s.pruneAttsFromPool(s.ctx, args.headState, args.headBlock)
|
||||
return nil
|
||||
}
|
||||
|
||||
// shouldOverrideFCU checks whether the incoming block is still subject to being
|
||||
|
||||
@@ -97,7 +97,7 @@ func TestService_forkchoiceUpdateWithExecution_exceptionalCases(t *testing.T) {
|
||||
headBlock: wsb,
|
||||
proposingSlot: service.CurrentSlot() + 1,
|
||||
}
|
||||
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, args))
|
||||
service.forkchoiceUpdateWithExecution(ctx, args)
|
||||
|
||||
payloadID, has := service.cfg.PayloadIDCache.PayloadID(2, [32]byte{2})
|
||||
require.Equal(t, true, has)
|
||||
@@ -151,7 +151,7 @@ func TestService_forkchoiceUpdateWithExecution_SameHeadRootNewProposer(t *testin
|
||||
headRoot: r,
|
||||
proposingSlot: service.CurrentSlot() + 1,
|
||||
}
|
||||
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, args))
|
||||
service.forkchoiceUpdateWithExecution(ctx, args)
|
||||
}
|
||||
|
||||
func TestShouldOverrideFCU(t *testing.T) {
|
||||
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
// The caller of this function must have a lock on forkchoice.
|
||||
func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) state.ReadOnlyBeaconState {
|
||||
headEpoch := slots.ToEpoch(s.HeadSlot())
|
||||
if c.Epoch < headEpoch || c.Epoch == 0 {
|
||||
if c.Epoch+1 < headEpoch || c.Epoch == 0 {
|
||||
return nil
|
||||
}
|
||||
// Only use head state if the head state is compatible with the target checkpoint.
|
||||
@@ -30,11 +30,13 @@ func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) st
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
headDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(headRoot), c.Epoch-1)
|
||||
// headEpoch - 1 equals c.Epoch if c is from the previous epoch and equals c.Epoch - 1 if c is from the current epoch.
|
||||
// We don't use the smaller c.Epoch - 1 because forkchoice would not have the data to answer that.
|
||||
headDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(headRoot), headEpoch-1)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
targetDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(c.Root), c.Epoch-1)
|
||||
targetDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(c.Root), headEpoch-1)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
@@ -43,7 +45,7 @@ func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) st
|
||||
}
|
||||
|
||||
// If the head state alone is enough, we can return it directly read only.
|
||||
if c.Epoch == headEpoch {
|
||||
if c.Epoch <= headEpoch {
|
||||
st, err := s.HeadStateReadOnly(ctx)
|
||||
if err != nil {
|
||||
return nil
|
||||
|
||||
@@ -170,12 +170,13 @@ func TestService_GetRecentPreState(t *testing.T) {
|
||||
err = s.SetFinalizedCheckpoint(cp0)
|
||||
require.NoError(t, err)
|
||||
|
||||
st, root, err := prepareForkchoiceState(ctx, 31, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
|
||||
st, blk, err := prepareForkchoiceState(ctx, 31, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
service.head = &head{
|
||||
root: [32]byte(ckRoot),
|
||||
state: s,
|
||||
block: blk,
|
||||
slot: 31,
|
||||
}
|
||||
require.NotNil(t, service.getRecentPreState(ctx, ðpb.Checkpoint{Epoch: 1, Root: ckRoot}))
|
||||
@@ -197,12 +198,13 @@ func TestService_GetRecentPreState_Old_Checkpoint(t *testing.T) {
|
||||
err = s.SetFinalizedCheckpoint(cp0)
|
||||
require.NoError(t, err)
|
||||
|
||||
st, root, err := prepareForkchoiceState(ctx, 33, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
|
||||
st, blk, err := prepareForkchoiceState(ctx, 33, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
service.head = &head{
|
||||
root: [32]byte(ckRoot),
|
||||
state: s,
|
||||
block: blk,
|
||||
slot: 33,
|
||||
}
|
||||
require.IsNil(t, service.getRecentPreState(ctx, ðpb.Checkpoint{}))
|
||||
@@ -227,6 +229,7 @@ func TestService_GetRecentPreState_Same_DependentRoots(t *testing.T) {
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
st, blk, err = prepareForkchoiceState(ctx, 64, [32]byte{'T'}, blk.Root(), [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
headBlock := blk
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
st, blk, err = prepareForkchoiceState(ctx, 33, [32]byte{'U'}, [32]byte(ckRoot), [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
@@ -235,8 +238,9 @@ func TestService_GetRecentPreState_Same_DependentRoots(t *testing.T) {
|
||||
|
||||
service.head = &head{
|
||||
root: [32]byte{'T'},
|
||||
state: s,
|
||||
block: headBlock,
|
||||
slot: 64,
|
||||
state: s,
|
||||
}
|
||||
require.NotNil(t, service.getRecentPreState(ctx, ðpb.Checkpoint{Epoch: 2, Root: cpRoot[:]}))
|
||||
}
|
||||
@@ -263,6 +267,7 @@ func TestService_GetRecentPreState_Different_DependentRoots(t *testing.T) {
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
st, blk, err = prepareForkchoiceState(ctx, 64, [32]byte{'U'}, blk.Root(), [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
headBlock := blk
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
st, blk, err = prepareForkchoiceState(ctx, 33, [32]byte{'V'}, [32]byte(ckRoot), [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
@@ -270,7 +275,8 @@ func TestService_GetRecentPreState_Different_DependentRoots(t *testing.T) {
|
||||
cpRoot := blk.Root()
|
||||
|
||||
service.head = &head{
|
||||
root: [32]byte{'T'},
|
||||
root: [32]byte{'U'},
|
||||
block: headBlock,
|
||||
state: s,
|
||||
slot: 64,
|
||||
}
|
||||
@@ -287,12 +293,13 @@ func TestService_GetRecentPreState_Different(t *testing.T) {
|
||||
err = s.SetFinalizedCheckpoint(cp0)
|
||||
require.NoError(t, err)
|
||||
|
||||
st, root, err := prepareForkchoiceState(ctx, 33, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
|
||||
st, blk, err := prepareForkchoiceState(ctx, 33, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
service.head = &head{
|
||||
root: [32]byte(ckRoot),
|
||||
state: s,
|
||||
block: blk,
|
||||
slot: 33,
|
||||
}
|
||||
require.IsNil(t, service.getRecentPreState(ctx, ðpb.Checkpoint{}))
|
||||
|
||||
@@ -66,9 +66,6 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
|
||||
startTime := time.Now()
|
||||
fcuArgs := &fcuConfig{}
|
||||
|
||||
if s.inRegularSync() {
|
||||
defer s.handleSecondFCUCall(cfg, fcuArgs)
|
||||
}
|
||||
if features.Get().EnableLightClient && slots.ToEpoch(s.CurrentSlot()) >= params.BeaconConfig().AltairForkEpoch {
|
||||
defer s.processLightClientUpdates(cfg)
|
||||
}
|
||||
@@ -105,14 +102,17 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
|
||||
s.logNonCanonicalBlockReceived(cfg.roblock.Root(), cfg.headRoot)
|
||||
return nil
|
||||
}
|
||||
if err := s.getFCUArgs(cfg, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("Could not get forkchoice update argument")
|
||||
return nil
|
||||
}
|
||||
if err := s.sendFCU(cfg, fcuArgs); err != nil {
|
||||
return errors.Wrap(err, "could not send FCU to engine")
|
||||
}
|
||||
s.sendFCU(cfg, fcuArgs)
|
||||
|
||||
// Pre-Fulu the caches are updated when computing the payload attributes
|
||||
if cfg.postState.Version() >= version.Fulu {
|
||||
go func() {
|
||||
ctx, cancel := context.WithTimeout(s.ctx, slotDeadline)
|
||||
defer cancel()
|
||||
cfg.ctx = ctx
|
||||
s.updateCachesPostBlockProcessing(cfg)
|
||||
}()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -295,14 +295,6 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
return errors.Wrap(err, "could not set optimistic block to valid")
|
||||
}
|
||||
}
|
||||
arg := &fcuConfig{
|
||||
headState: preState,
|
||||
headRoot: lastBR,
|
||||
headBlock: lastB,
|
||||
}
|
||||
if _, err := s.notifyForkchoiceUpdate(ctx, arg); err != nil {
|
||||
return err
|
||||
}
|
||||
return s.saveHeadNoDB(ctx, lastB, lastBR, preState, !isValidPayload)
|
||||
}
|
||||
|
||||
@@ -330,6 +322,7 @@ func (s *Service) areSidecarsAvailable(ctx context.Context, avs das.Availability
|
||||
return nil
|
||||
}
|
||||
|
||||
// the caller of this function must not hold a lock in forkchoice store.
|
||||
func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.BeaconState) error {
|
||||
e := coreTime.CurrentEpoch(st)
|
||||
if err := helpers.UpdateCommitteeCache(ctx, st, e); err != nil {
|
||||
@@ -359,7 +352,9 @@ func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.Beacon
|
||||
if e > 0 {
|
||||
e = e - 1
|
||||
}
|
||||
s.ForkChoicer().RLock()
|
||||
target, err := s.cfg.ForkChoiceStore.TargetRootForEpoch(r, e)
|
||||
s.ForkChoicer().RUnlock()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not update proposer index state-root map")
|
||||
return nil
|
||||
@@ -372,7 +367,7 @@ func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.Beacon
|
||||
}
|
||||
|
||||
// Epoch boundary tasks: it copies the headState and updates the epoch boundary
|
||||
// caches.
|
||||
// caches. The caller of this function must not hold a lock in forkchoice store.
|
||||
func (s *Service) handleEpochBoundary(ctx context.Context, slot primitives.Slot, headState state.BeaconState, blockRoot []byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.handleEpochBoundary")
|
||||
defer span.End()
|
||||
@@ -912,8 +907,6 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
if currentSlot == s.HeadSlot() {
|
||||
return
|
||||
}
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
// return early if we are in init sync
|
||||
if !s.inRegularSync() {
|
||||
return
|
||||
@@ -926,14 +919,32 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
if lastState == nil {
|
||||
lastRoot, lastState = headRoot[:], headState
|
||||
}
|
||||
// Copy all the field tries in our cached state in the event of late
|
||||
// blocks.
|
||||
lastState.CopyAllTries()
|
||||
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
|
||||
log.WithError(err).Debug("Could not update next slot state cache")
|
||||
}
|
||||
if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil {
|
||||
log.WithError(err).Error("Could not update epoch boundary caches")
|
||||
// Before Fulu we need to process the next slot to find out if we are proposing.
|
||||
if lastState.Version() < version.Fulu {
|
||||
// Copy all the field tries in our cached state in the event of late
|
||||
// blocks.
|
||||
lastState.CopyAllTries()
|
||||
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
|
||||
log.WithError(err).Debug("Could not update next slot state cache")
|
||||
}
|
||||
if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil {
|
||||
log.WithError(err).Error("Could not update epoch boundary caches")
|
||||
}
|
||||
} else {
|
||||
// After Fulu, we can update the caches asynchronously after sending FCU to the engine
|
||||
defer func() {
|
||||
go func() {
|
||||
ctx, cancel := context.WithTimeout(s.ctx, slotDeadline)
|
||||
defer cancel()
|
||||
lastState.CopyAllTries()
|
||||
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
|
||||
log.WithError(err).Debug("Could not update next slot state cache")
|
||||
}
|
||||
if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil {
|
||||
log.WithError(err).Error("Could not update epoch boundary caches")
|
||||
}
|
||||
}()
|
||||
}()
|
||||
}
|
||||
// return early if we already started building a block for the current
|
||||
// head root
|
||||
@@ -963,6 +974,8 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
headBlock: headBlock,
|
||||
attributes: attribute,
|
||||
}
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
_, err = s.notifyForkchoiceUpdate(ctx, fcuArgs)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("could not perform late block tasks: failed to update forkchoice with engine")
|
||||
|
||||
@@ -42,14 +42,8 @@ func (s *Service) getFCUArgs(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) er
|
||||
if err := s.getFCUArgsEarlyBlock(cfg, fcuArgs); err != nil {
|
||||
return err
|
||||
}
|
||||
if !s.inRegularSync() {
|
||||
return nil
|
||||
}
|
||||
slot := cfg.roblock.Block().Slot()
|
||||
if slots.WithinVotingWindow(s.genesisTime, slot) {
|
||||
return nil
|
||||
}
|
||||
return s.computePayloadAttributes(cfg, fcuArgs)
|
||||
fcuArgs.attributes = s.getPayloadAttribute(cfg.ctx, fcuArgs.headState, fcuArgs.proposingSlot, cfg.headRoot[:])
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) getFCUArgsEarlyBlock(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error {
|
||||
@@ -173,26 +167,19 @@ func (s *Service) processLightClientUpdates(cfg *postBlockProcessConfig) {
|
||||
|
||||
// updateCachesPostBlockProcessing updates the next slot cache and handles the epoch
|
||||
// boundary in order to compute the right proposer indices after processing
|
||||
// state transition. This function is called on late blocks while still locked,
|
||||
// before sending FCU to the engine.
|
||||
func (s *Service) updateCachesPostBlockProcessing(cfg *postBlockProcessConfig) error {
|
||||
// state transition. The caller of this function must not hold a lock in forkchoice store.
|
||||
func (s *Service) updateCachesPostBlockProcessing(cfg *postBlockProcessConfig) {
|
||||
slot := cfg.postState.Slot()
|
||||
root := cfg.roblock.Root()
|
||||
if err := transition.UpdateNextSlotCache(cfg.ctx, root[:], cfg.postState); err != nil {
|
||||
return errors.Wrap(err, "could not update next slot state cache")
|
||||
log.WithError(err).Error("Could not update next slot state cache")
|
||||
return
|
||||
}
|
||||
if !slots.IsEpochEnd(slot) {
|
||||
return nil
|
||||
return
|
||||
}
|
||||
return s.handleEpochBoundary(cfg.ctx, slot, cfg.postState, root[:])
|
||||
}
|
||||
|
||||
// handleSecondFCUCall handles a second call to FCU when syncing a new block.
|
||||
// This is useful when proposing in the next block and we want to defer the
|
||||
// computation of the next slot shuffling.
|
||||
func (s *Service) handleSecondFCUCall(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) {
|
||||
if (fcuArgs.attributes == nil || fcuArgs.attributes.IsEmpty()) && cfg.headRoot == cfg.roblock.Root() {
|
||||
go s.sendFCUWithAttributes(cfg, fcuArgs)
|
||||
if err := s.handleEpochBoundary(cfg.ctx, slot, cfg.postState, root[:]); err != nil {
|
||||
log.WithError(err).Error("Could not handle epoch boundary")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -202,20 +189,6 @@ func reportProcessingTime(startTime time.Time) {
|
||||
onBlockProcessingTime.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
}
|
||||
|
||||
// computePayloadAttributes modifies the passed FCU arguments to
|
||||
// contain the right payload attributes with the tracked proposer. It gets
|
||||
// called on blocks that arrive after the attestation voting window, or in a
|
||||
// background routine after syncing early blocks.
|
||||
func (s *Service) computePayloadAttributes(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error {
|
||||
if cfg.roblock.Root() == cfg.headRoot {
|
||||
if err := s.updateCachesPostBlockProcessing(cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
fcuArgs.attributes = s.getPayloadAttribute(cfg.ctx, fcuArgs.headState, fcuArgs.proposingSlot, cfg.headRoot[:])
|
||||
return nil
|
||||
}
|
||||
|
||||
// getBlockPreState returns the pre state of an incoming block. It uses the parent root of the block
|
||||
// to retrieve the state in DB. It verifies the pre state's validity and the incoming block
|
||||
// is in the correct time window.
|
||||
|
||||
@@ -738,7 +738,9 @@ func TestOnBlock_CanFinalize_WithOnTick(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
|
||||
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
|
||||
require.NoError(t, err)
|
||||
@@ -788,7 +790,9 @@ func TestOnBlock_CanFinalize(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
|
||||
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
|
||||
require.NoError(t, err)
|
||||
@@ -816,25 +820,9 @@ func TestOnBlock_NilBlock(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
signed := &consensusblocks.SignedBeaconBlock{}
|
||||
roblock := consensusblocks.ROBlock{ReadOnlySignedBeaconBlock: signed}
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err := service.postBlockProcess(&postBlockProcessConfig{tr.ctx, roblock, [32]byte{}, nil, true})
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
}
|
||||
|
||||
func TestOnBlock_InvalidSignature(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
|
||||
gs, keys := util.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
|
||||
blk, err := util.GenerateFullBlock(gs, keys, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
blk.Signature = []byte{'a'} // Mutate the signature.
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
_, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
}
|
||||
|
||||
@@ -866,7 +854,9 @@ func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
testState, err = service.cfg.StateGen.StateByRoot(ctx, r)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -1339,7 +1329,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
lock.Lock()
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb1, r1)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
@@ -1351,7 +1343,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
lock.Lock()
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb2, r2)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
@@ -1363,7 +1357,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
lock.Lock()
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb3, r3)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
@@ -1375,7 +1371,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
lock.Lock()
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb4, r4)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
@@ -1400,197 +1398,6 @@ func Test_verifyBlkFinalizedSlot_invalidBlock(t *testing.T) {
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
}
|
||||
|
||||
// See the description in #10777 and #10782 for the full setup
|
||||
// We sync optimistically a chain of blocks. Block 17 is the last block in Epoch
|
||||
// 2. Block 18 justifies block 12 (the first in Epoch 2) and Block 19 returns
|
||||
// INVALID from FCU, with LVH block 17. No head is viable. We check
|
||||
// that the node is optimistic and that we can actually import a block on top of
|
||||
// 17 and recover.
|
||||
func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.SlotsPerEpoch = 6
|
||||
config.AltairForkEpoch = 1
|
||||
config.BellatrixForkEpoch = 2
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
mockEngine := &mockExecution.EngineClient{ErrNewPayload: execution.ErrAcceptedSyncingPayloadStatus, ErrForkchoiceUpdated: execution.ErrAcceptedSyncingPayloadStatus}
|
||||
service, tr := minimalTestService(t, WithExecutionEngineCaller(mockEngine))
|
||||
ctx := tr.ctx
|
||||
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
stateRoot, err := st.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
|
||||
require.NoError(t, service.saveGenesisData(ctx, st))
|
||||
|
||||
genesis := blocks.NewGenesisBlock(stateRoot[:])
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb), "Could not save genesis block")
|
||||
|
||||
parentRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
for i := 1; i < 6; i++ {
|
||||
driftGenesisTime(service, primitives.Slot(i), 0)
|
||||
st, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
|
||||
require.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
driftGenesisTime(service, primitives.Slot(i), 0)
|
||||
st, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlockAltair(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
|
||||
require.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
for i := 12; i < 18; i++ {
|
||||
driftGenesisTime(service, primitives.Slot(i), 0)
|
||||
st, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlockBellatrix(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
|
||||
require.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
// Check that we haven't justified the second epoch yet
|
||||
jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(0), jc.Epoch)
|
||||
|
||||
// import a block that justifies the second epoch
|
||||
driftGenesisTime(service, 18, 0)
|
||||
validHeadState, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlockBellatrix(validHeadState, keys, util.DefaultBlockGenConfig(), 18)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
firstInvalidRoot, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, firstInvalidRoot, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, firstInvalidRoot)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
jc = service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(2), jc.Epoch)
|
||||
|
||||
sjc := validHeadState.CurrentJustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(0), sjc.Epoch)
|
||||
lvh := b.Block.Body.ExecutionPayload.ParentHash
|
||||
// check our head
|
||||
require.Equal(t, firstInvalidRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
|
||||
// import another block to find out that it was invalid
|
||||
mockEngine = &mockExecution.EngineClient{ErrNewPayload: execution.ErrAcceptedSyncingPayloadStatus, ErrForkchoiceUpdated: execution.ErrInvalidPayloadStatus, ForkChoiceUpdatedResp: lvh}
|
||||
service.cfg.ExecutionEngineCaller = mockEngine
|
||||
driftGenesisTime(service, 19, 0)
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err = util.GenerateFullBlockBellatrix(st, keys, util.DefaultBlockGenConfig(), 19)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err = service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
|
||||
// Check that forkchoice's head is the last invalid block imported. The
|
||||
// store's headroot is the previous head (since the invalid block did
|
||||
// not finish importing) one and that the node is optimistic
|
||||
require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
headRoot, err := service.HeadRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, firstInvalidRoot, bytesutil.ToBytes32(headRoot))
|
||||
optimistic, err := service.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, optimistic)
|
||||
|
||||
// import another block based on the last valid head state
|
||||
mockEngine = &mockExecution.EngineClient{}
|
||||
service.cfg.ExecutionEngineCaller = mockEngine
|
||||
driftGenesisTime(service, 20, 0)
|
||||
b, err = util.GenerateFullBlockBellatrix(validHeadState, keys, &util.BlockGenConfig{}, 20)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err = b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
preState, err = service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
|
||||
require.NoError(t, err)
|
||||
// Check the newly imported block is head, it justified the right
|
||||
// checkpoint and the node is no longer optimistic
|
||||
require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
sjc = service.CurrentJustifiedCheckpt()
|
||||
require.Equal(t, jc.Epoch, sjc.Epoch)
|
||||
require.Equal(t, jc.Root, bytesutil.ToBytes32(sjc.Root))
|
||||
optimistic, err = service.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, optimistic)
|
||||
}
|
||||
|
||||
// See the description in #10777 and #10782 for the full setup
|
||||
// We sync optimistically a chain of blocks. Block 17 is the last block in Epoch
|
||||
// 2. Block 18 justifies block 12 (the first in Epoch 2) and Block 19 returns
|
||||
@@ -1642,7 +1449,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
@@ -1662,8 +1471,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
|
||||
for i := 12; i < 18; i++ {
|
||||
@@ -1684,8 +1494,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
// Check that we haven't justified the second epoch yet
|
||||
jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
@@ -1708,7 +1519,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, firstInvalidRoot, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, firstInvalidRoot)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, err)
|
||||
jc = service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(2), jc.Epoch)
|
||||
@@ -1718,6 +1531,10 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
lvh := b.Block.Body.ExecutionPayload.ParentHash
|
||||
// check our head
|
||||
require.Equal(t, firstInvalidRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
isBlock18OptimisticAfterImport, err := service.IsOptimisticForRoot(ctx, firstInvalidRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, isBlock18OptimisticAfterImport)
|
||||
time.Sleep(20 * time.Millisecond) // wait for async forkchoice update to be processed
|
||||
|
||||
// import another block to find out that it was invalid
|
||||
mockEngine = &mockExecution.EngineClient{ErrNewPayload: execution.ErrInvalidPayloadStatus, NewPayloadResp: lvh}
|
||||
@@ -1768,7 +1585,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, err)
|
||||
// Check the newly imported block is head, it justified the right
|
||||
// checkpoint and the node is no longer optimistic
|
||||
@@ -1835,7 +1654,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
@@ -1856,8 +1677,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
|
||||
// import the merge block
|
||||
@@ -1877,7 +1699,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, lastValidRoot, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, lastValidRoot)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, err)
|
||||
// save the post state and the payload Hash of this block since it will
|
||||
// be the LVH
|
||||
@@ -1906,8 +1730,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, invalidRoots[i-13], wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, invalidRoots[i-13])
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
// Check that we have justified the second epoch
|
||||
jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
@@ -1975,7 +1800,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
// Check that the head is still INVALID and the node is still optimistic
|
||||
require.Equal(t, invalidHeadRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
optimistic, err = service.IsOptimistic(ctx)
|
||||
@@ -2000,7 +1827,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, err)
|
||||
st, err = service.cfg.StateGen.StateByRoot(ctx, root)
|
||||
require.NoError(t, err)
|
||||
@@ -2028,7 +1857,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
sjc = service.CurrentJustifiedCheckpt()
|
||||
@@ -2072,7 +1903,6 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveGenesisBlockRoot(ctx, genesisRoot), "Could not save genesis state")
|
||||
|
||||
for i := 1; i < 6; i++ {
|
||||
t.Log(i)
|
||||
driftGenesisTime(service, primitives.Slot(i), 0)
|
||||
st, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -2089,7 +1919,9 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
@@ -2109,8 +1941,9 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
|
||||
// import the merge block
|
||||
@@ -2130,7 +1963,9 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, lastValidRoot, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, lastValidRoot)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, err)
|
||||
// save the post state and the payload Hash of this block since it will
|
||||
// be the LVH
|
||||
@@ -2161,7 +1996,9 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
|
||||
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
|
||||
require.NoError(t, err)
|
||||
@@ -2282,7 +2119,9 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -2348,7 +2187,9 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -2631,7 +2472,10 @@ func TestRollbackBlock(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Rollback block insertion into db and caches.
|
||||
require.ErrorContains(t, fmt.Sprintf("could not insert block %d to fork choice store", roblock.Block().Slot()), service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.ErrorContains(t, fmt.Sprintf("could not insert block %d to fork choice store", roblock.Block().Slot()), err)
|
||||
|
||||
// The block should no longer exist.
|
||||
require.Equal(t, false, service.cfg.BeaconDB.HasBlock(ctx, root))
|
||||
@@ -2732,7 +2576,9 @@ func TestRollbackBlock_ContextDeadline(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
|
||||
b, err = util.GenerateFullBlock(postState, keys, util.DefaultBlockGenConfig(), 34)
|
||||
require.NoError(t, err)
|
||||
@@ -2766,7 +2612,10 @@ func TestRollbackBlock_ContextDeadline(t *testing.T) {
|
||||
require.NoError(t, postState.SetFinalizedCheckpoint(cj))
|
||||
|
||||
// Rollback block insertion into db and caches.
|
||||
require.ErrorContains(t, "context canceled", service.postBlockProcess(&postBlockProcessConfig{cancCtx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{cancCtx, roblock, [32]byte{}, postState, false})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.ErrorContains(t, "context canceled", err)
|
||||
|
||||
// The block should no longer exist.
|
||||
require.Equal(t, false, service.cfg.BeaconDB.HasBlock(ctx, root))
|
||||
@@ -3262,7 +3111,9 @@ func Test_postBlockProcess_EventSending(t *testing.T) {
|
||||
}
|
||||
|
||||
// Execute postBlockProcess
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(cfg)
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
|
||||
// Check error expectation
|
||||
if tt.expectError {
|
||||
|
||||
@@ -156,13 +156,15 @@ func (s *Service) UpdateHead(ctx context.Context, proposingSlot primitives.Slot)
|
||||
}
|
||||
if s.inRegularSync() {
|
||||
fcuArgs.attributes = s.getPayloadAttribute(ctx, headState, proposingSlot, newHeadRoot[:])
|
||||
if fcuArgs.attributes != nil && s.shouldOverrideFCU(newHeadRoot, proposingSlot) {
|
||||
return
|
||||
}
|
||||
go s.forkchoiceUpdateWithExecution(s.ctx, fcuArgs)
|
||||
}
|
||||
if fcuArgs.attributes != nil && s.shouldOverrideFCU(newHeadRoot, proposingSlot) {
|
||||
return
|
||||
}
|
||||
if err := s.forkchoiceUpdateWithExecution(s.ctx, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("Could not update forkchoice")
|
||||
if err := s.saveHead(s.ctx, fcuArgs.headRoot, fcuArgs.headBlock, fcuArgs.headState); err != nil {
|
||||
log.WithError(err).Error("Could not save head")
|
||||
}
|
||||
s.pruneAttsFromPool(s.ctx, fcuArgs.headState, fcuArgs.headBlock)
|
||||
}
|
||||
|
||||
// This processes fork choice attestations from the pool to account for validator votes and fork choice.
|
||||
|
||||
@@ -117,7 +117,9 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, tRoot, wsb, postState))
|
||||
roblock, err := blocks.NewROBlockWithRoot(wsb, tRoot)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
copied, err = service.cfg.StateGen.StateByRoot(ctx, tRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, fcs.NodeCount())
|
||||
@@ -177,7 +179,9 @@ func TestService_UpdateHead_NoAtts(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, tRoot, wsb, postState))
|
||||
roblock, err := blocks.NewROBlockWithRoot(wsb, tRoot)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.Equal(t, 2, fcs.NodeCount())
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
||||
require.Equal(t, tRoot, service.head.root)
|
||||
|
||||
@@ -290,52 +290,3 @@ func TestProcessBlockHeader_OK(t *testing.T) {
|
||||
}
|
||||
assert.Equal(t, true, proto.Equal(nsh, expected), "Expected %v, received %v", expected, nsh)
|
||||
}
|
||||
|
||||
func TestBlockSignatureSet_OK(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: make([]byte, 32),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
Slashed: true,
|
||||
}
|
||||
}
|
||||
|
||||
state, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, state.SetValidators(validators))
|
||||
require.NoError(t, state.SetSlot(10))
|
||||
require.NoError(t, state.SetLatestBlockHeader(util.HydrateBeaconHeader(ðpb.BeaconBlockHeader{
|
||||
Slot: 9,
|
||||
ProposerIndex: 0,
|
||||
})))
|
||||
|
||||
latestBlockSignedRoot, err := state.LatestBlockHeader().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
currentEpoch := time.CurrentEpoch(state)
|
||||
priv, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
pID, err := helpers.BeaconProposerIndex(t.Context(), state)
|
||||
require.NoError(t, err)
|
||||
block := util.NewBeaconBlock()
|
||||
block.Block.Slot = 10
|
||||
block.Block.ProposerIndex = pID
|
||||
block.Block.Body.RandaoReveal = bytesutil.PadTo([]byte{'A', 'B', 'C'}, 96)
|
||||
block.Block.ParentRoot = latestBlockSignedRoot[:]
|
||||
block.Signature, err = signing.ComputeDomainAndSign(state, currentEpoch, block.Block, params.BeaconConfig().DomainBeaconProposer, priv)
|
||||
require.NoError(t, err)
|
||||
proposerIdx, err := helpers.BeaconProposerIndex(t.Context(), state)
|
||||
require.NoError(t, err)
|
||||
validators[proposerIdx].Slashed = false
|
||||
validators[proposerIdx].PublicKey = priv.PublicKey().Marshal()
|
||||
err = state.UpdateValidatorAtIndex(proposerIdx, validators[proposerIdx])
|
||||
require.NoError(t, err)
|
||||
set, err := blocks.BlockSignatureBatch(state, block.Block.ProposerIndex, block.Signature, block.Block.HashTreeRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
verified, err := set.Verify()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, verified, "Block signature set returned a set which was unable to be verified")
|
||||
}
|
||||
|
||||
@@ -122,24 +122,6 @@ func VerifyBlockSignatureUsingCurrentFork(beaconState state.ReadOnlyBeaconState,
|
||||
return nil
|
||||
}
|
||||
|
||||
// BlockSignatureBatch retrieves the block signature batch from the provided block and its corresponding state.
|
||||
func BlockSignatureBatch(beaconState state.ReadOnlyBeaconState,
|
||||
proposerIndex primitives.ValidatorIndex,
|
||||
sig []byte,
|
||||
rootFunc func() ([32]byte, error)) (*bls.SignatureBatch, error) {
|
||||
currentEpoch := slots.ToEpoch(beaconState.Slot())
|
||||
domain, err := signing.Domain(beaconState.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorsRoot())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
proposer, err := beaconState.ValidatorAtIndex(proposerIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
proposerPubKey := proposer.PublicKey
|
||||
return signing.BlockSignatureBatch(proposerPubKey, sig, domain, rootFunc)
|
||||
}
|
||||
|
||||
// RandaoSignatureBatch retrieves the relevant randao specific signature batch object
|
||||
// from a block and its corresponding state.
|
||||
func RandaoSignatureBatch(
|
||||
|
||||
@@ -278,12 +278,12 @@ func ProcessConsolidationRequests(ctx context.Context, st state.BeaconState, req
|
||||
if uint64(curEpoch) < e {
|
||||
continue
|
||||
}
|
||||
bal, err := st.PendingBalanceToWithdraw(srcIdx)
|
||||
hasBal, err := st.HasPendingBalanceToWithdraw(srcIdx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to fetch pending balance to withdraw")
|
||||
continue
|
||||
}
|
||||
if bal > 0 {
|
||||
if hasBal {
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
@@ -182,12 +182,6 @@ func ProcessBlockNoVerifyAnySig(
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
sig := signed.Signature()
|
||||
bSet, err := b.BlockSignatureBatch(st, blk.ProposerIndex(), sig[:], blk.HashTreeRoot)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, nil, errors.Wrap(err, "could not retrieve block signature set")
|
||||
}
|
||||
randaoReveal := signed.Block().Body().RandaoReveal()
|
||||
rSet, err := b.RandaoSignatureBatch(ctx, st, randaoReveal[:])
|
||||
if err != nil {
|
||||
@@ -201,7 +195,7 @@ func ProcessBlockNoVerifyAnySig(
|
||||
|
||||
// Merge beacon block, randao and attestations signatures into a set.
|
||||
set := bls.NewSet()
|
||||
set.Join(bSet).Join(rSet).Join(aSet)
|
||||
set.Join(rSet).Join(aSet)
|
||||
|
||||
if blk.Version() >= version.Capella {
|
||||
changes, err := signed.Block().Body().BLSToExecutionChanges()
|
||||
|
||||
@@ -157,9 +157,8 @@ func TestProcessBlockNoVerify_SigSetContainsDescriptions(t *testing.T) {
|
||||
set, _, err := transition.ProcessBlockNoVerifyAnySig(t.Context(), beaconState, wsb)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, len(set.Signatures), len(set.Descriptions), "Signatures and descriptions do not match up")
|
||||
assert.Equal(t, "block signature", set.Descriptions[0])
|
||||
assert.Equal(t, "randao signature", set.Descriptions[1])
|
||||
assert.Equal(t, "attestation signature", set.Descriptions[2])
|
||||
assert.Equal(t, "randao signature", set.Descriptions[0])
|
||||
assert.Equal(t, "attestation signature", set.Descriptions[1])
|
||||
}
|
||||
|
||||
func TestProcessOperationsNoVerifyAttsSigs_OK(t *testing.T) {
|
||||
|
||||
@@ -67,9 +67,9 @@ func NewSyncNeeds(current CurrentSlotter, oldestSlotFlagPtr *primitives.Slot, bl
|
||||
|
||||
// Override spec minimum block retention with user-provided flag only if it is lower than the spec minimum.
|
||||
sn.blockRetention = primitives.Epoch(params.BeaconConfig().MinEpochsForBlockRequests)
|
||||
|
||||
if oldestSlotFlagPtr != nil {
|
||||
oldestEpoch := slots.ToEpoch(*oldestSlotFlagPtr)
|
||||
if oldestEpoch < sn.blockRetention {
|
||||
if *oldestSlotFlagPtr <= syncEpochOffset(current(), sn.blockRetention) {
|
||||
sn.validOldestSlotPtr = oldestSlotFlagPtr
|
||||
} else {
|
||||
log.WithField("backfill-oldest-slot", *oldestSlotFlagPtr).
|
||||
|
||||
@@ -128,6 +128,9 @@ func TestSyncNeedsInitialize(t *testing.T) {
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
minBlobEpochs := params.BeaconConfig().MinEpochsForBlobsSidecarsRequest
|
||||
minColEpochs := params.BeaconConfig().MinEpochsForDataColumnSidecarsRequest
|
||||
denebSlot := slots.UnsafeEpochStart(params.BeaconConfig().DenebForkEpoch)
|
||||
fuluSlot := slots.UnsafeEpochStart(params.BeaconConfig().FuluForkEpoch)
|
||||
minSlots := slots.UnsafeEpochStart(primitives.Epoch(params.BeaconConfig().MinEpochsForBlockRequests))
|
||||
|
||||
currentSlot := primitives.Slot(10000)
|
||||
currentFunc := func() primitives.Slot { return currentSlot }
|
||||
@@ -141,6 +144,7 @@ func TestSyncNeedsInitialize(t *testing.T) {
|
||||
expectedCol primitives.Epoch
|
||||
name string
|
||||
input SyncNeeds
|
||||
current func() primitives.Slot
|
||||
}{
|
||||
{
|
||||
name: "basic initialization with no flags",
|
||||
@@ -174,13 +178,13 @@ func TestSyncNeedsInitialize(t *testing.T) {
|
||||
{
|
||||
name: "valid oldestSlotFlagPtr (earlier than spec minimum)",
|
||||
blobRetentionFlag: 0,
|
||||
oldestSlotFlagPtr: func() *primitives.Slot {
|
||||
slot := primitives.Slot(10)
|
||||
return &slot
|
||||
}(),
|
||||
oldestSlotFlagPtr: &denebSlot,
|
||||
expectValidOldest: true,
|
||||
expectedBlob: minBlobEpochs,
|
||||
expectedCol: minColEpochs,
|
||||
current: func() primitives.Slot {
|
||||
return fuluSlot + minSlots
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid oldestSlotFlagPtr (later than spec minimum)",
|
||||
@@ -210,6 +214,9 @@ func TestSyncNeedsInitialize(t *testing.T) {
|
||||
{
|
||||
name: "both blob retention flag and oldest slot set",
|
||||
blobRetentionFlag: minBlobEpochs + 5,
|
||||
current: func() primitives.Slot {
|
||||
return fuluSlot + minSlots
|
||||
},
|
||||
oldestSlotFlagPtr: func() *primitives.Slot {
|
||||
slot := primitives.Slot(100)
|
||||
return &slot
|
||||
@@ -232,16 +239,27 @@ func TestSyncNeedsInitialize(t *testing.T) {
|
||||
expectedBlob: 5000,
|
||||
expectedCol: 5000,
|
||||
},
|
||||
{
|
||||
name: "regression for deneb start",
|
||||
blobRetentionFlag: 8212500,
|
||||
expectValidOldest: true,
|
||||
oldestSlotFlagPtr: &denebSlot,
|
||||
current: func() primitives.Slot {
|
||||
return fuluSlot + minSlots
|
||||
},
|
||||
expectedBlob: 8212500,
|
||||
expectedCol: 8212500,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
result, err := NewSyncNeeds(currentFunc, tc.oldestSlotFlagPtr, tc.blobRetentionFlag)
|
||||
if tc.current == nil {
|
||||
tc.current = currentFunc
|
||||
}
|
||||
result, err := NewSyncNeeds(tc.current, tc.oldestSlotFlagPtr, tc.blobRetentionFlag)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check that current, deneb, fulu are set correctly
|
||||
require.Equal(t, currentSlot, result.current())
|
||||
|
||||
// Check retention calculations
|
||||
require.Equal(t, tc.expectedBlob, result.blobRetention)
|
||||
require.Equal(t, tc.expectedCol, result.colRetention)
|
||||
|
||||
@@ -38,6 +38,7 @@ go_library(
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_spf13_afero//:go_default_library",
|
||||
"@org_golang_x_sync//errgroup:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/afero"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -185,73 +186,162 @@ func (dcs *DataColumnStorage) WarmCache() {
|
||||
|
||||
highestStoredEpoch := primitives.Epoch(0)
|
||||
|
||||
// Walk the data column filesystem to warm up the cache.
|
||||
if err := afero.Walk(dcs.fs, ".", func(path string, info os.FileInfo, fileErr error) (err error) {
|
||||
if fileErr != nil {
|
||||
return fileErr
|
||||
}
|
||||
|
||||
// If not a leaf, skip.
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Extract metadata from the file path.
|
||||
fileMetadata, err := extractFileMetadata(path)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error encountered while extracting file metadata")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Open the data column filesystem file.
|
||||
f, err := dcs.fs.Open(path)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error encountered while opening data column filesystem file")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close the file.
|
||||
defer func() {
|
||||
// Overwrite the existing error only if it is nil, since the close error is less important.
|
||||
closeErr := f.Close()
|
||||
if closeErr != nil && err == nil {
|
||||
err = closeErr
|
||||
}
|
||||
}()
|
||||
|
||||
// Read the metadata of the file.
|
||||
metadata, err := dcs.metadata(f)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error encountered while reading metadata from data column filesystem file")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check the indices.
|
||||
indices := metadata.indices.all()
|
||||
if len(indices) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Build the ident.
|
||||
dataColumnsIdent := DataColumnsIdent{Root: fileMetadata.blockRoot, Epoch: fileMetadata.epoch, Indices: indices}
|
||||
|
||||
// Update the highest stored epoch.
|
||||
highestStoredEpoch = max(highestStoredEpoch, fileMetadata.epoch)
|
||||
|
||||
// Set the ident in the cache.
|
||||
if err := dcs.cache.set(dataColumnsIdent); err != nil {
|
||||
log.WithError(err).Error("Error encountered while ensuring data column filesystem cache")
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
log.WithError(err).Error("Error encountered while walking data column filesystem.")
|
||||
// List all period directories
|
||||
periodFileInfos, err := afero.ReadDir(dcs.fs, ".")
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error reading top directory during warm cache")
|
||||
return
|
||||
}
|
||||
|
||||
// Prune the cache and the filesystem.
|
||||
// Iterate through periods
|
||||
for _, periodFileInfo := range periodFileInfos {
|
||||
if !periodFileInfo.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
periodPath := periodFileInfo.Name()
|
||||
|
||||
// List all epoch directories in this period
|
||||
epochFileInfos, err := afero.ReadDir(dcs.fs, periodPath)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("period", periodPath).Error("Error reading period directory during warm cache")
|
||||
continue
|
||||
}
|
||||
|
||||
// Iterate through epochs
|
||||
for _, epochFileInfo := range epochFileInfos {
|
||||
if !epochFileInfo.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
epochPath := path.Join(periodPath, epochFileInfo.Name())
|
||||
|
||||
// List all .sszs files in this epoch
|
||||
files, err := listEpochFiles(dcs.fs, epochPath)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("epoch", epochPath).Error("Error listing epoch files during warm cache")
|
||||
continue
|
||||
}
|
||||
|
||||
if len(files) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Process all files in this epoch in parallel
|
||||
epochHighest, err := dcs.processEpochFiles(files)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("epoch", epochPath).Error("Error processing epoch files during warm cache")
|
||||
}
|
||||
|
||||
highestStoredEpoch = max(highestStoredEpoch, epochHighest)
|
||||
}
|
||||
}
|
||||
|
||||
// Prune the cache and the filesystem
|
||||
dcs.prune()
|
||||
|
||||
log.WithField("elapsed", time.Since(start)).Info("Data column filesystem cache warm-up complete")
|
||||
totalElapsed := time.Since(start)
|
||||
|
||||
// Log summary
|
||||
log.WithField("elapsed", totalElapsed).Info("Data column filesystem cache warm-up complete")
|
||||
}
|
||||
|
||||
// listEpochFiles lists all .sszs files in an epoch directory.
|
||||
func listEpochFiles(fs afero.Fs, epochPath string) ([]string, error) {
|
||||
fileInfos, err := afero.ReadDir(fs, epochPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "read epoch directory")
|
||||
}
|
||||
|
||||
files := make([]string, 0, len(fileInfos))
|
||||
for _, fileInfo := range fileInfos {
|
||||
if fileInfo.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
fileName := fileInfo.Name()
|
||||
if strings.HasSuffix(fileName, "."+dataColumnsFileExtension) {
|
||||
files = append(files, path.Join(epochPath, fileName))
|
||||
}
|
||||
}
|
||||
|
||||
return files, nil
|
||||
}
|
||||
|
||||
// processEpochFiles processes all .sszs files in an epoch directory in parallel.
|
||||
func (dcs *DataColumnStorage) processEpochFiles(files []string) (primitives.Epoch, error) {
|
||||
var (
|
||||
eg errgroup.Group
|
||||
mu sync.Mutex
|
||||
)
|
||||
|
||||
highestEpoch := primitives.Epoch(0)
|
||||
for _, filePath := range files {
|
||||
eg.Go(func() error {
|
||||
epoch, err := dcs.processFile(filePath)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("file", filePath).Error("Error processing file during warm cache")
|
||||
return nil
|
||||
}
|
||||
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
highestEpoch = max(highestEpoch, epoch)
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := eg.Wait(); err != nil {
|
||||
return highestEpoch, err
|
||||
}
|
||||
|
||||
return highestEpoch, nil
|
||||
}
|
||||
|
||||
// processFile processes a single .sszs file.
|
||||
func (dcs *DataColumnStorage) processFile(filePath string) (primitives.Epoch, error) {
|
||||
// Extract metadata from the file path
|
||||
fileMetadata, err := extractFileMetadata(filePath)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "extract file metadata")
|
||||
}
|
||||
|
||||
// Open the file (each goroutine gets its own FD)
|
||||
f, err := dcs.fs.Open(filePath)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "open file")
|
||||
}
|
||||
defer func() {
|
||||
if closeErr := f.Close(); closeErr != nil {
|
||||
log.WithError(closeErr).WithField("file", filePath).Error("Error closing file during warm cache")
|
||||
}
|
||||
}()
|
||||
|
||||
// Read metadata
|
||||
metadata, err := dcs.metadata(f)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "read metadata")
|
||||
}
|
||||
|
||||
// Extract indices
|
||||
indices := metadata.indices.all()
|
||||
if len(indices) == 0 {
|
||||
return fileMetadata.epoch, nil // No indices, skip
|
||||
}
|
||||
|
||||
// Build ident and set in cache (thread-safe)
|
||||
dataColumnsIdent := DataColumnsIdent{
|
||||
Root: fileMetadata.blockRoot,
|
||||
Epoch: fileMetadata.epoch,
|
||||
Indices: indices,
|
||||
}
|
||||
|
||||
if err := dcs.cache.set(dataColumnsIdent); err != nil {
|
||||
return 0, errors.Wrap(err, "cache set")
|
||||
}
|
||||
|
||||
return fileMetadata.epoch, nil
|
||||
}
|
||||
|
||||
// Summary returns the DataColumnStorageSummary.
|
||||
|
||||
@@ -204,6 +204,9 @@ func InitializeDataMaps() {
|
||||
bytesutil.ToBytes4(params.BeaconConfig().ElectraForkVersion): func() (interfaces.LightClientOptimisticUpdate, error) {
|
||||
return lightclientConsensusTypes.NewEmptyOptimisticUpdateDeneb(), nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().FuluForkVersion): func() (interfaces.LightClientOptimisticUpdate, error) {
|
||||
return lightclientConsensusTypes.NewEmptyOptimisticUpdateDeneb(), nil
|
||||
},
|
||||
}
|
||||
|
||||
// Reset our light client finality update map.
|
||||
@@ -223,5 +226,8 @@ func InitializeDataMaps() {
|
||||
bytesutil.ToBytes4(params.BeaconConfig().ElectraForkVersion): func() (interfaces.LightClientFinalityUpdate, error) {
|
||||
return lightclientConsensusTypes.NewEmptyFinalityUpdateElectra(), nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().FuluForkVersion): func() (interfaces.LightClientFinalityUpdate, error) {
|
||||
return lightclientConsensusTypes.NewEmptyFinalityUpdateElectra(), nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/api"
|
||||
"github.com/OffchainLabs/prysm/v7/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
||||
coreblocks "github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
corehelpers "github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/filters"
|
||||
@@ -957,6 +958,13 @@ func (s *Server) validateConsensus(ctx context.Context, b *eth.GenericSignedBeac
|
||||
}
|
||||
}
|
||||
}
|
||||
blockRoot, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not hash block")
|
||||
}
|
||||
if err := coreblocks.VerifyBlockSignatureUsingCurrentFork(parentState, blk, blockRoot); err != nil {
|
||||
return errors.Wrap(err, "could not verify block signature")
|
||||
}
|
||||
_, err = transition.ExecuteStateTransition(ctx, parentState, blk)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not execute state transition")
|
||||
|
||||
@@ -130,6 +130,10 @@ func (s *Server) SubmitAttestationsV2(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "beacon.SubmitAttestationsV2")
|
||||
defer span.End()
|
||||
|
||||
if shared.IsSyncing(ctx, w, s.SyncChecker, s.HeadFetcher, s.TimeFetcher, s.OptimisticModeFetcher) {
|
||||
return
|
||||
}
|
||||
|
||||
versionHeader := r.Header.Get(api.VersionHeader)
|
||||
if versionHeader == "" {
|
||||
httputil.HandleError(w, api.VersionHeader+" header is required", http.StatusBadRequest)
|
||||
@@ -238,22 +242,14 @@ func (s *Server) handleAttestationsElectra(
|
||||
},
|
||||
})
|
||||
|
||||
targetState, err := s.AttestationStateFetcher.AttestationTargetState(ctx, singleAtt.Data.Target)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get target state for attestation")
|
||||
}
|
||||
committee, err := corehelpers.BeaconCommitteeFromState(ctx, targetState, singleAtt.Data.Slot, singleAtt.CommitteeId)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get committee for attestation")
|
||||
}
|
||||
att := singleAtt.ToAttestationElectra(committee)
|
||||
|
||||
wantedEpoch := slots.ToEpoch(att.Data.Slot)
|
||||
// Broadcast first using CommitteeId directly (fast path)
|
||||
// This matches gRPC behavior and avoids blocking on state fetching
|
||||
wantedEpoch := slots.ToEpoch(singleAtt.Data.Slot)
|
||||
vals, err := s.HeadFetcher.HeadValidatorsIndices(ctx, wantedEpoch)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get head validator indices")
|
||||
}
|
||||
subnet := corehelpers.ComputeSubnetFromCommitteeAndSlot(uint64(len(vals)), att.GetCommitteeIndex(), att.Data.Slot)
|
||||
subnet := corehelpers.ComputeSubnetFromCommitteeAndSlot(uint64(len(vals)), singleAtt.CommitteeId, singleAtt.Data.Slot)
|
||||
if err = s.Broadcaster.BroadcastAttestation(ctx, subnet, singleAtt); err != nil {
|
||||
failedBroadcasts = append(failedBroadcasts, &server.IndexedError{
|
||||
Index: i,
|
||||
@@ -264,17 +260,35 @@ func (s *Server) handleAttestationsElectra(
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if features.Get().EnableExperimentalAttestationPool {
|
||||
if err = s.AttestationCache.Add(att); err != nil {
|
||||
log.WithError(err).Error("Could not save attestation")
|
||||
// Save to pool after broadcast (slow path - requires state fetching)
|
||||
// Run in goroutine to avoid blocking the HTTP response
|
||||
go func() {
|
||||
for _, singleAtt := range validAttestations {
|
||||
targetState, err := s.AttestationStateFetcher.AttestationTargetState(context.Background(), singleAtt.Data.Target)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get target state for attestation")
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
if err = s.AttestationsPool.SaveUnaggregatedAttestation(att); err != nil {
|
||||
log.WithError(err).Error("Could not save attestation")
|
||||
committee, err := corehelpers.BeaconCommitteeFromState(context.Background(), targetState, singleAtt.Data.Slot, singleAtt.CommitteeId)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get committee for attestation")
|
||||
continue
|
||||
}
|
||||
att := singleAtt.ToAttestationElectra(committee)
|
||||
|
||||
if features.Get().EnableExperimentalAttestationPool {
|
||||
if err = s.AttestationCache.Add(att); err != nil {
|
||||
log.WithError(err).Error("Could not save attestation")
|
||||
}
|
||||
} else {
|
||||
if err = s.AttestationsPool.SaveUnaggregatedAttestation(att); err != nil {
|
||||
log.WithError(err).Error("Could not save attestation")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
if len(failedBroadcasts) > 0 {
|
||||
log.WithFields(logrus.Fields{
|
||||
@@ -470,6 +484,10 @@ func (s *Server) SubmitSyncCommitteeSignatures(w http.ResponseWriter, r *http.Re
|
||||
ctx, span := trace.StartSpan(r.Context(), "beacon.SubmitPoolSyncCommitteeSignatures")
|
||||
defer span.End()
|
||||
|
||||
if shared.IsSyncing(ctx, w, s.SyncChecker, s.HeadFetcher, s.TimeFetcher, s.OptimisticModeFetcher) {
|
||||
return
|
||||
}
|
||||
|
||||
var req structs.SubmitSyncCommitteeSignaturesRequest
|
||||
err := json.NewDecoder(r.Body).Decode(&req.Data)
|
||||
switch {
|
||||
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/voluntaryexits/mock"
|
||||
p2pMock "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/core"
|
||||
mockSync "github.com/OffchainLabs/prysm/v7/beacon-chain/sync/initial-sync/testing"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
@@ -622,6 +623,8 @@ func TestSubmitAttestationsV2(t *testing.T) {
|
||||
HeadFetcher: chainService,
|
||||
ChainInfoFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OperationNotifier: &blockchainmock.MockOperationNotifier{},
|
||||
AttestationStateFetcher: chainService,
|
||||
}
|
||||
@@ -654,6 +657,7 @@ func TestSubmitAttestationsV2(t *testing.T) {
|
||||
assert.Equal(t, primitives.Epoch(0), broadcaster.BroadcastAttestations[0].GetData().Source.Epoch)
|
||||
assert.Equal(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", hexutil.Encode(broadcaster.BroadcastAttestations[0].GetData().Target.Root))
|
||||
assert.Equal(t, primitives.Epoch(0), broadcaster.BroadcastAttestations[0].GetData().Target.Epoch)
|
||||
time.Sleep(100 * time.Millisecond) // Wait for async pool save
|
||||
assert.Equal(t, 1, s.AttestationsPool.UnaggregatedAttestationCount())
|
||||
})
|
||||
t.Run("multiple", func(t *testing.T) {
|
||||
@@ -673,6 +677,7 @@ func TestSubmitAttestationsV2(t *testing.T) {
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
assert.Equal(t, true, broadcaster.BroadcastCalled.Load())
|
||||
assert.Equal(t, 2, broadcaster.NumAttestations())
|
||||
time.Sleep(100 * time.Millisecond) // Wait for async pool save
|
||||
assert.Equal(t, 2, s.AttestationsPool.UnaggregatedAttestationCount())
|
||||
})
|
||||
t.Run("phase0 att post electra", func(t *testing.T) {
|
||||
@@ -793,6 +798,7 @@ func TestSubmitAttestationsV2(t *testing.T) {
|
||||
assert.Equal(t, primitives.Epoch(0), broadcaster.BroadcastAttestations[0].GetData().Source.Epoch)
|
||||
assert.Equal(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", hexutil.Encode(broadcaster.BroadcastAttestations[0].GetData().Target.Root))
|
||||
assert.Equal(t, primitives.Epoch(0), broadcaster.BroadcastAttestations[0].GetData().Target.Epoch)
|
||||
time.Sleep(100 * time.Millisecond) // Wait for async pool save
|
||||
assert.Equal(t, 1, s.AttestationsPool.UnaggregatedAttestationCount())
|
||||
})
|
||||
t.Run("multiple", func(t *testing.T) {
|
||||
@@ -812,6 +818,7 @@ func TestSubmitAttestationsV2(t *testing.T) {
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
assert.Equal(t, true, broadcaster.BroadcastCalled.Load())
|
||||
assert.Equal(t, 2, broadcaster.NumAttestations())
|
||||
time.Sleep(100 * time.Millisecond) // Wait for async pool save
|
||||
assert.Equal(t, 2, s.AttestationsPool.UnaggregatedAttestationCount())
|
||||
})
|
||||
t.Run("no body", func(t *testing.T) {
|
||||
@@ -861,6 +868,27 @@ func TestSubmitAttestationsV2(t *testing.T) {
|
||||
assert.Equal(t, true, strings.Contains(e.Failures[0].Message, "Incorrect attestation signature"))
|
||||
})
|
||||
})
|
||||
t.Run("syncing", func(t *testing.T) {
|
||||
chainService := &blockchainmock.ChainService{}
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
}
|
||||
|
||||
var body bytes.Buffer
|
||||
_, err := body.WriteString(singleAtt)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://example.com", &body)
|
||||
request.Header.Set(api.VersionHeader, version.String(version.Phase0))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.SubmitAttestationsV2(writer, request)
|
||||
assert.Equal(t, http.StatusServiceUnavailable, writer.Code)
|
||||
assert.Equal(t, true, strings.Contains(writer.Body.String(), "Beacon node is currently syncing"))
|
||||
})
|
||||
}
|
||||
|
||||
func TestListVoluntaryExits(t *testing.T) {
|
||||
@@ -1057,14 +1085,19 @@ func TestSubmitSyncCommitteeSignatures(t *testing.T) {
|
||||
|
||||
t.Run("single", func(t *testing.T) {
|
||||
broadcaster := &p2pMock.MockBroadcaster{}
|
||||
chainService := &blockchainmock.ChainService{
|
||||
State: st,
|
||||
SyncCommitteeIndices: []primitives.CommitteeIndex{0},
|
||||
}
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
CoreService: &core.Service{
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
P2P: broadcaster,
|
||||
HeadFetcher: &blockchainmock.ChainService{
|
||||
State: st,
|
||||
SyncCommitteeIndices: []primitives.CommitteeIndex{0},
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1089,14 +1122,19 @@ func TestSubmitSyncCommitteeSignatures(t *testing.T) {
|
||||
})
|
||||
t.Run("multiple", func(t *testing.T) {
|
||||
broadcaster := &p2pMock.MockBroadcaster{}
|
||||
chainService := &blockchainmock.ChainService{
|
||||
State: st,
|
||||
SyncCommitteeIndices: []primitives.CommitteeIndex{0},
|
||||
}
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
CoreService: &core.Service{
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
P2P: broadcaster,
|
||||
HeadFetcher: &blockchainmock.ChainService{
|
||||
State: st,
|
||||
SyncCommitteeIndices: []primitives.CommitteeIndex{0},
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1120,13 +1158,18 @@ func TestSubmitSyncCommitteeSignatures(t *testing.T) {
|
||||
})
|
||||
t.Run("invalid", func(t *testing.T) {
|
||||
broadcaster := &p2pMock.MockBroadcaster{}
|
||||
chainService := &blockchainmock.ChainService{
|
||||
State: st,
|
||||
}
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
CoreService: &core.Service{
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
P2P: broadcaster,
|
||||
HeadFetcher: &blockchainmock.ChainService{
|
||||
State: st,
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1149,7 +1192,13 @@ func TestSubmitSyncCommitteeSignatures(t *testing.T) {
|
||||
assert.Equal(t, false, broadcaster.BroadcastCalled.Load())
|
||||
})
|
||||
t.Run("empty", func(t *testing.T) {
|
||||
s := &Server{}
|
||||
chainService := &blockchainmock.ChainService{State: st}
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
var body bytes.Buffer
|
||||
_, err := body.WriteString("[]")
|
||||
@@ -1166,7 +1215,13 @@ func TestSubmitSyncCommitteeSignatures(t *testing.T) {
|
||||
assert.Equal(t, true, strings.Contains(e.Message, "No data submitted"))
|
||||
})
|
||||
t.Run("no body", func(t *testing.T) {
|
||||
s := &Server{}
|
||||
chainService := &blockchainmock.ChainService{State: st}
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodPost, "http://example.com", nil)
|
||||
writer := httptest.NewRecorder()
|
||||
@@ -1179,6 +1234,26 @@ func TestSubmitSyncCommitteeSignatures(t *testing.T) {
|
||||
assert.Equal(t, http.StatusBadRequest, e.Code)
|
||||
assert.Equal(t, true, strings.Contains(e.Message, "No data submitted"))
|
||||
})
|
||||
t.Run("syncing", func(t *testing.T) {
|
||||
chainService := &blockchainmock.ChainService{State: st}
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
}
|
||||
|
||||
var body bytes.Buffer
|
||||
_, err := body.WriteString(singleSyncCommitteeMsg)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://example.com", &body)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.SubmitSyncCommitteeSignatures(writer, request)
|
||||
assert.Equal(t, http.StatusServiceUnavailable, writer.Code)
|
||||
assert.Equal(t, true, strings.Contains(writer.Body.String(), "Beacon node is currently syncing"))
|
||||
})
|
||||
}
|
||||
|
||||
func TestListBLSToExecutionChanges(t *testing.T) {
|
||||
|
||||
@@ -40,6 +40,7 @@ func GetForkSchedule(w http.ResponseWriter, r *http.Request) {
|
||||
httputil.WriteJson(w, &structs.GetForkScheduleResponse{
|
||||
Data: data,
|
||||
})
|
||||
return
|
||||
}
|
||||
previous := schedule[0]
|
||||
for _, entry := range schedule {
|
||||
|
||||
@@ -52,24 +52,27 @@ func (vs *Server) ProposeAttestation(ctx context.Context, att *ethpb.Attestation
|
||||
ctx, span := trace.StartSpan(ctx, "AttesterServer.ProposeAttestation")
|
||||
defer span.End()
|
||||
|
||||
if vs.SyncChecker.Syncing() {
|
||||
return nil, status.Errorf(codes.Unavailable, "Syncing to latest head, not ready to respond")
|
||||
}
|
||||
|
||||
resp, err := vs.proposeAtt(ctx, att, att.GetData().CommitteeIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if features.Get().EnableExperimentalAttestationPool {
|
||||
if err = vs.AttestationCache.Add(att); err != nil {
|
||||
log.WithError(err).Error("Could not save attestation")
|
||||
}
|
||||
} else {
|
||||
go func() {
|
||||
go func() {
|
||||
if features.Get().EnableExperimentalAttestationPool {
|
||||
if err := vs.AttestationCache.Add(att); err != nil {
|
||||
log.WithError(err).Error("Could not save attestation")
|
||||
}
|
||||
} else {
|
||||
attCopy := att.Copy()
|
||||
if err := vs.AttPool.SaveUnaggregatedAttestation(attCopy); err != nil {
|
||||
log.WithError(err).Error("Could not save unaggregated attestation")
|
||||
return
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
@@ -82,6 +85,10 @@ func (vs *Server) ProposeAttestationElectra(ctx context.Context, singleAtt *ethp
|
||||
ctx, span := trace.StartSpan(ctx, "AttesterServer.ProposeAttestationElectra")
|
||||
defer span.End()
|
||||
|
||||
if vs.SyncChecker.Syncing() {
|
||||
return nil, status.Errorf(codes.Unavailable, "Syncing to latest head, not ready to respond")
|
||||
}
|
||||
|
||||
resp, err := vs.proposeAtt(ctx, singleAtt, singleAtt.GetCommitteeIndex())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -98,18 +105,17 @@ func (vs *Server) ProposeAttestationElectra(ctx context.Context, singleAtt *ethp
|
||||
|
||||
singleAttCopy := singleAtt.Copy()
|
||||
att := singleAttCopy.ToAttestationElectra(committee)
|
||||
if features.Get().EnableExperimentalAttestationPool {
|
||||
if err = vs.AttestationCache.Add(att); err != nil {
|
||||
log.WithError(err).Error("Could not save attestation")
|
||||
}
|
||||
} else {
|
||||
go func() {
|
||||
go func() {
|
||||
if features.Get().EnableExperimentalAttestationPool {
|
||||
if err := vs.AttestationCache.Add(att); err != nil {
|
||||
log.WithError(err).Error("Could not save attestation")
|
||||
}
|
||||
} else {
|
||||
if err := vs.AttPool.SaveUnaggregatedAttestation(att); err != nil {
|
||||
log.WithError(err).Error("Could not save unaggregated attestation")
|
||||
return
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
@@ -38,6 +38,7 @@ func TestProposeAttestation(t *testing.T) {
|
||||
OperationNotifier: (&mock.ChainService{}).OperationNotifier(),
|
||||
TimeFetcher: chainService,
|
||||
AttestationStateFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
head := util.NewBeaconBlock()
|
||||
head.Block.Slot = 999
|
||||
@@ -141,6 +142,7 @@ func TestProposeAttestation_IncorrectSignature(t *testing.T) {
|
||||
P2P: &mockp2p.MockBroadcaster{},
|
||||
AttPool: attestations.NewPool(),
|
||||
OperationNotifier: (&mock.ChainService{}).OperationNotifier(),
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
req := util.HydrateAttestation(ðpb.Attestation{})
|
||||
@@ -149,6 +151,37 @@ func TestProposeAttestation_IncorrectSignature(t *testing.T) {
|
||||
assert.ErrorContains(t, wanted, err)
|
||||
}
|
||||
|
||||
func TestProposeAttestation_Syncing(t *testing.T) {
|
||||
attesterServer := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
}
|
||||
|
||||
req := util.HydrateAttestation(ðpb.Attestation{})
|
||||
_, err := attesterServer.ProposeAttestation(t.Context(), req)
|
||||
assert.ErrorContains(t, "Syncing to latest head", err)
|
||||
s, ok := status.FromError(err)
|
||||
require.Equal(t, true, ok)
|
||||
assert.Equal(t, codes.Unavailable, s.Code())
|
||||
}
|
||||
|
||||
func TestProposeAttestationElectra_Syncing(t *testing.T) {
|
||||
attesterServer := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
}
|
||||
|
||||
req := ðpb.SingleAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
Target: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
},
|
||||
}
|
||||
_, err := attesterServer.ProposeAttestationElectra(t.Context(), req)
|
||||
assert.ErrorContains(t, "Syncing to latest head", err)
|
||||
s, ok := status.FromError(err)
|
||||
require.Equal(t, true, ok)
|
||||
assert.Equal(t, codes.Unavailable, s.Code())
|
||||
}
|
||||
|
||||
func TestGetAttestationData_OK(t *testing.T) {
|
||||
block := util.NewBeaconBlock()
|
||||
block.Block.Slot = 3*params.BeaconConfig().SlotsPerEpoch + 1
|
||||
|
||||
@@ -3,6 +3,7 @@ package sync
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"maps"
|
||||
"slices"
|
||||
"sync"
|
||||
@@ -243,8 +244,10 @@ func requestDirectSidecarsFromPeers(
|
||||
}
|
||||
|
||||
// Compute missing indices by root, excluding those already in storage.
|
||||
var lastRoot [fieldparams.RootLength]byte
|
||||
missingIndicesByRoot := make(map[[fieldparams.RootLength]byte]map[uint64]bool, len(incompleteRoots))
|
||||
for root := range incompleteRoots {
|
||||
lastRoot = root
|
||||
storedIndices := storedIndicesByRoot[root]
|
||||
|
||||
missingIndices := make(map[uint64]bool, len(requestedIndices))
|
||||
@@ -259,6 +262,7 @@ func requestDirectSidecarsFromPeers(
|
||||
}
|
||||
}
|
||||
|
||||
initialMissingRootCount := len(missingIndicesByRoot)
|
||||
initialMissingCount := computeTotalCount(missingIndicesByRoot)
|
||||
|
||||
indicesByRootByPeer, err := computeIndicesByRootByPeer(params.P2P, slotByRoot, missingIndicesByRoot, connectedPeers)
|
||||
@@ -301,11 +305,19 @@ func requestDirectSidecarsFromPeers(
|
||||
}
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"duration": time.Since(start),
|
||||
"initialMissingCount": initialMissingCount,
|
||||
"finalMissingCount": computeTotalCount(missingIndicesByRoot),
|
||||
}).Debug("Requested direct data column sidecars from peers")
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"duration": time.Since(start),
|
||||
"initialMissingRootCount": initialMissingRootCount,
|
||||
"initialMissingCount": initialMissingCount,
|
||||
"finalMissingRootCount": len(missingIndicesByRoot),
|
||||
"finalMissingCount": computeTotalCount(missingIndicesByRoot),
|
||||
})
|
||||
|
||||
if initialMissingRootCount == 1 {
|
||||
log = log.WithField("root", fmt.Sprintf("%#x", lastRoot))
|
||||
}
|
||||
|
||||
log.Debug("Requested direct data column sidecars from peers")
|
||||
|
||||
return verifiedColumnsByRoot, nil
|
||||
}
|
||||
|
||||
@@ -3,9 +3,9 @@ package sync
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
@@ -21,13 +21,23 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/time"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var pendingAttsLimit = 32768
|
||||
const pendingAttsLimit = 32768
|
||||
|
||||
// aggregatorIndexFilter defines how aggregator index should be handled in equality checks.
|
||||
type aggregatorIndexFilter int
|
||||
|
||||
const (
|
||||
// ignoreAggregatorIndex means aggregates differing only by aggregator index are considered equal.
|
||||
ignoreAggregatorIndex aggregatorIndexFilter = iota
|
||||
// includeAggregatorIndex means aggregator index must also match for aggregates to be considered equal.
|
||||
includeAggregatorIndex
|
||||
)
|
||||
|
||||
// This method processes pending attestations as a "known" block as arrived. With validations,
|
||||
// the valid attestations get saved into the operation mem pool, and the invalid attestations gets deleted
|
||||
@@ -50,16 +60,7 @@ func (s *Service) processPendingAttsForBlock(ctx context.Context, bRoot [32]byte
|
||||
attestations := s.blkRootToPendingAtts[bRoot]
|
||||
s.pendingAttsLock.RUnlock()
|
||||
|
||||
if len(attestations) > 0 {
|
||||
start := time.Now()
|
||||
s.processAttestations(ctx, attestations)
|
||||
duration := time.Since(start)
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockRoot": hex.EncodeToString(bytesutil.Trunc(bRoot[:])),
|
||||
"pendingAttsCount": len(attestations),
|
||||
"duration": duration,
|
||||
}).Debug("Verified and saved pending attestations to pool")
|
||||
}
|
||||
s.processAttestations(ctx, attestations)
|
||||
|
||||
randGen := rand.NewGenerator()
|
||||
// Delete the missing block root key from pending attestation queue so a node will not request for the block again.
|
||||
@@ -79,26 +80,71 @@ func (s *Service) processPendingAttsForBlock(ctx context.Context, bRoot [32]byte
|
||||
return s.sendBatchRootRequest(ctx, pendingRoots, randGen)
|
||||
}
|
||||
|
||||
// processAttestations processes a list of attestations.
|
||||
// It assumes (for logging purposes only) that all attestations pertain to the same block.
|
||||
func (s *Service) processAttestations(ctx context.Context, attestations []any) {
|
||||
if len(attestations) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
firstAttestation := attestations[0]
|
||||
var blockRoot []byte
|
||||
switch v := firstAttestation.(type) {
|
||||
case ethpb.Att:
|
||||
blockRoot = v.GetData().BeaconBlockRoot
|
||||
case ethpb.SignedAggregateAttAndProof:
|
||||
blockRoot = v.AggregateAttestationAndProof().AggregateVal().GetData().BeaconBlockRoot
|
||||
default:
|
||||
log.Warnf("Unexpected attestation type %T, skipping processing", v)
|
||||
return
|
||||
}
|
||||
|
||||
validAggregates := make([]ethpb.SignedAggregateAttAndProof, 0, len(attestations))
|
||||
startAggregate := time.Now()
|
||||
atts := make([]ethpb.Att, 0, len(attestations))
|
||||
aggregateAttAndProofCount := 0
|
||||
for _, att := range attestations {
|
||||
switch v := att.(type) {
|
||||
case ethpb.Att:
|
||||
atts = append(atts, v)
|
||||
case ethpb.SignedAggregateAttAndProof:
|
||||
s.processAggregate(ctx, v)
|
||||
aggregateAttAndProofCount++
|
||||
// Avoid processing multiple aggregates only differing by aggregator index.
|
||||
if slices.ContainsFunc(validAggregates, func(other ethpb.SignedAggregateAttAndProof) bool {
|
||||
return pendingAggregatesAreEqual(v, other, ignoreAggregatorIndex)
|
||||
}) {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := s.processAggregate(ctx, v); err != nil {
|
||||
log.WithError(err).Debug("Pending aggregate attestation could not be processed")
|
||||
continue
|
||||
}
|
||||
|
||||
validAggregates = append(validAggregates, v)
|
||||
default:
|
||||
log.Warnf("Unexpected attestation type %T, skipping", v)
|
||||
}
|
||||
}
|
||||
durationAggregateAttAndProof := time.Since(startAggregate)
|
||||
|
||||
startAtts := time.Now()
|
||||
for _, bucket := range bucketAttestationsByData(atts) {
|
||||
s.processAttestationBucket(ctx, bucket)
|
||||
}
|
||||
|
||||
durationAtts := time.Since(startAtts)
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockRoot": fmt.Sprintf("%#x", blockRoot),
|
||||
"totalCount": len(attestations),
|
||||
"aggregateAttAndProofCount": aggregateAttAndProofCount,
|
||||
"uniqueAggregateAttAndProofCount": len(validAggregates),
|
||||
"attCount": len(atts),
|
||||
"durationTotal": durationAggregateAttAndProof + durationAtts,
|
||||
"durationAggregateAttAndProof": durationAggregateAttAndProof,
|
||||
"durationAtts": durationAtts,
|
||||
}).Debug("Verified and saved pending attestations to pool")
|
||||
}
|
||||
|
||||
// attestationBucket groups attestations with the same AttestationData for batch processing.
|
||||
@@ -303,21 +349,20 @@ func (s *Service) processVerifiedAttestation(
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Service) processAggregate(ctx context.Context, aggregate ethpb.SignedAggregateAttAndProof) {
|
||||
func (s *Service) processAggregate(ctx context.Context, aggregate ethpb.SignedAggregateAttAndProof) error {
|
||||
res, err := s.validateAggregatedAtt(ctx, aggregate)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Pending aggregated attestation failed validation")
|
||||
return
|
||||
return errors.Wrap(err, "validate aggregated att")
|
||||
}
|
||||
|
||||
if res != pubsub.ValidationAccept || !s.validateBlockInAttestation(ctx, aggregate) {
|
||||
log.Debug("Pending aggregated attestation failed validation")
|
||||
return
|
||||
return errors.New("Pending aggregated attestation failed validation")
|
||||
}
|
||||
|
||||
att := aggregate.AggregateAttestationAndProof().AggregateVal()
|
||||
if err := s.saveAttestation(att); err != nil {
|
||||
log.WithError(err).Debug("Could not save aggregated attestation")
|
||||
return
|
||||
return errors.Wrap(err, "save attestation")
|
||||
}
|
||||
|
||||
_ = s.setAggregatorIndexEpochSeen(att.GetData().Target.Epoch, aggregate.AggregateAttestationAndProof().GetAggregatorIndex())
|
||||
@@ -325,6 +370,8 @@ func (s *Service) processAggregate(ctx context.Context, aggregate ethpb.SignedAg
|
||||
if err := s.cfg.p2p.Broadcast(ctx, aggregate); err != nil {
|
||||
log.WithError(err).Debug("Could not broadcast aggregated attestation")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// This defines how pending aggregates are saved in the map. The key is the
|
||||
@@ -336,7 +383,7 @@ func (s *Service) savePendingAggregate(agg ethpb.SignedAggregateAttAndProof) {
|
||||
|
||||
s.savePending(root, agg, func(other any) bool {
|
||||
a, ok := other.(ethpb.SignedAggregateAttAndProof)
|
||||
return ok && pendingAggregatesAreEqual(agg, a)
|
||||
return ok && pendingAggregatesAreEqual(agg, a, includeAggregatorIndex)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -391,13 +438,19 @@ func (s *Service) savePending(root [32]byte, pending any, isEqual func(other any
|
||||
s.blkRootToPendingAtts[root] = append(s.blkRootToPendingAtts[root], pending)
|
||||
}
|
||||
|
||||
func pendingAggregatesAreEqual(a, b ethpb.SignedAggregateAttAndProof) bool {
|
||||
// pendingAggregatesAreEqual checks if two pending aggregate attestations are equal.
|
||||
// The filter parameter controls whether aggregator index is considered in the equality check.
|
||||
func pendingAggregatesAreEqual(a, b ethpb.SignedAggregateAttAndProof, filter aggregatorIndexFilter) bool {
|
||||
if a.Version() != b.Version() {
|
||||
return false
|
||||
}
|
||||
if a.AggregateAttestationAndProof().GetAggregatorIndex() != b.AggregateAttestationAndProof().GetAggregatorIndex() {
|
||||
return false
|
||||
|
||||
if filter == includeAggregatorIndex {
|
||||
if a.AggregateAttestationAndProof().GetAggregatorIndex() != b.AggregateAttestationAndProof().GetAggregatorIndex() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
aAtt := a.AggregateAttestationAndProof().AggregateVal()
|
||||
bAtt := b.AggregateAttestationAndProof().AggregateVal()
|
||||
if aAtt.GetData().Slot != bAtt.GetData().Slot {
|
||||
|
||||
@@ -94,7 +94,7 @@ func TestProcessPendingAtts_NoBlockRequestBlock(t *testing.T) {
|
||||
// Process block A (which exists and has no pending attestations)
|
||||
// This should skip processing attestations for A and request blocks B and C
|
||||
require.NoError(t, r.processPendingAttsForBlock(t.Context(), rootA))
|
||||
require.LogsContain(t, hook, "Requesting block by root")
|
||||
require.LogsContain(t, hook, "Requesting blocks by root")
|
||||
}
|
||||
|
||||
func TestProcessPendingAtts_HasBlockSaveUnaggregatedAtt(t *testing.T) {
|
||||
@@ -911,17 +911,17 @@ func Test_pendingAggregatesAreEqual(t *testing.T) {
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b1111},
|
||||
}}}
|
||||
assert.Equal(t, true, pendingAggregatesAreEqual(a, b))
|
||||
assert.Equal(t, true, pendingAggregatesAreEqual(a, b, includeAggregatorIndex))
|
||||
})
|
||||
t.Run("different version", func(t *testing.T) {
|
||||
a := ðpb.SignedAggregateAttestationAndProof{Message: ðpb.AggregateAttestationAndProof{AggregatorIndex: 1}}
|
||||
b := ðpb.SignedAggregateAttestationAndProofElectra{Message: ðpb.AggregateAttestationAndProofElectra{AggregatorIndex: 1}}
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b))
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b, includeAggregatorIndex))
|
||||
})
|
||||
t.Run("different aggregator index", func(t *testing.T) {
|
||||
a := ðpb.SignedAggregateAttestationAndProof{Message: ðpb.AggregateAttestationAndProof{AggregatorIndex: 1}}
|
||||
b := ðpb.SignedAggregateAttestationAndProof{Message: ðpb.AggregateAttestationAndProof{AggregatorIndex: 2}}
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b))
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b, includeAggregatorIndex))
|
||||
})
|
||||
t.Run("different slot", func(t *testing.T) {
|
||||
a := ðpb.SignedAggregateAttestationAndProof{
|
||||
@@ -942,7 +942,7 @@ func Test_pendingAggregatesAreEqual(t *testing.T) {
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b1111},
|
||||
}}}
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b))
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b, includeAggregatorIndex))
|
||||
})
|
||||
t.Run("different committee index", func(t *testing.T) {
|
||||
a := ðpb.SignedAggregateAttestationAndProof{
|
||||
@@ -963,7 +963,7 @@ func Test_pendingAggregatesAreEqual(t *testing.T) {
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b1111},
|
||||
}}}
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b))
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b, includeAggregatorIndex))
|
||||
})
|
||||
t.Run("different aggregation bits", func(t *testing.T) {
|
||||
a := ðpb.SignedAggregateAttestationAndProof{
|
||||
@@ -984,7 +984,30 @@ func Test_pendingAggregatesAreEqual(t *testing.T) {
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b1000},
|
||||
}}}
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b))
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b, includeAggregatorIndex))
|
||||
})
|
||||
t.Run("different aggregator index should be equal while ignoring aggregator index", func(t *testing.T) {
|
||||
a := ðpb.SignedAggregateAttestationAndProof{
|
||||
Message: ðpb.AggregateAttestationAndProof{
|
||||
AggregatorIndex: 1,
|
||||
Aggregate: ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b1111},
|
||||
}}}
|
||||
b := ðpb.SignedAggregateAttestationAndProof{
|
||||
Message: ðpb.AggregateAttestationAndProof{
|
||||
AggregatorIndex: 2,
|
||||
Aggregate: ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b1111},
|
||||
}}}
|
||||
assert.Equal(t, true, pendingAggregatesAreEqual(a, b, ignoreAggregatorIndex))
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -2,7 +2,6 @@ package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"slices"
|
||||
"sync"
|
||||
@@ -44,11 +43,13 @@ func (s *Service) processPendingBlocksQueue() {
|
||||
if !s.chainIsStarted() {
|
||||
return
|
||||
}
|
||||
|
||||
locker.Lock()
|
||||
defer locker.Unlock()
|
||||
|
||||
if err := s.processPendingBlocks(s.ctx); err != nil {
|
||||
log.WithError(err).Debug("Could not process pending blocks")
|
||||
}
|
||||
locker.Unlock()
|
||||
})
|
||||
}
|
||||
|
||||
@@ -73,8 +74,10 @@ func (s *Service) processPendingBlocks(ctx context.Context) error {
|
||||
randGen := rand.NewGenerator()
|
||||
var parentRoots [][32]byte
|
||||
|
||||
blkRoots := make([][32]byte, 0, len(sortedSlots)*maxBlocksPerSlot)
|
||||
|
||||
// Iterate through sorted slots.
|
||||
for _, slot := range sortedSlots {
|
||||
for i, slot := range sortedSlots {
|
||||
// Skip processing if slot is in the future.
|
||||
if slot > s.cfg.clock.CurrentSlot() {
|
||||
continue
|
||||
@@ -91,6 +94,9 @@ func (s *Service) processPendingBlocks(ctx context.Context) error {
|
||||
|
||||
// Process each block in the queue.
|
||||
for _, b := range blocksInCache {
|
||||
start := time.Now()
|
||||
totalDuration := time.Duration(0)
|
||||
|
||||
if err := blocks.BeaconBlockIsNil(b); err != nil {
|
||||
continue
|
||||
}
|
||||
@@ -147,19 +153,34 @@ func (s *Service) processPendingBlocks(ctx context.Context) error {
|
||||
}
|
||||
cancelFunction()
|
||||
|
||||
// Process pending attestations for this block.
|
||||
if err := s.processPendingAttsForBlock(ctx, blkRoot); err != nil {
|
||||
log.WithError(err).Debug("Failed to process pending attestations for block")
|
||||
}
|
||||
blkRoots = append(blkRoots, blkRoot)
|
||||
|
||||
// Remove the processed block from the queue.
|
||||
if err := s.removeBlockFromQueue(b, blkRoot); err != nil {
|
||||
return err
|
||||
}
|
||||
log.WithFields(logrus.Fields{"slot": slot, "blockRoot": hex.EncodeToString(bytesutil.Trunc(blkRoot[:]))}).Debug("Processed pending block and cleared it in cache")
|
||||
|
||||
duration := time.Since(start)
|
||||
totalDuration += duration
|
||||
log.WithFields(logrus.Fields{
|
||||
"slotIndex": fmt.Sprintf("%d/%d", i+1, len(sortedSlots)),
|
||||
"slot": slot,
|
||||
"root": fmt.Sprintf("%#x", blkRoot),
|
||||
"duration": duration,
|
||||
"totalDuration": totalDuration,
|
||||
}).Debug("Processed pending block and cleared it in cache")
|
||||
}
|
||||
|
||||
span.End()
|
||||
}
|
||||
|
||||
for _, blkRoot := range blkRoots {
|
||||
// Process pending attestations for this block.
|
||||
if err := s.processPendingAttsForBlock(ctx, blkRoot); err != nil {
|
||||
log.WithError(err).Debug("Failed to process pending attestations for block")
|
||||
}
|
||||
}
|
||||
|
||||
return s.sendBatchRootRequest(ctx, parentRoots, randGen)
|
||||
}
|
||||
|
||||
@@ -379,6 +400,19 @@ func (s *Service) sendBatchRootRequest(ctx context.Context, roots [][32]byte, ra
|
||||
req = roots[:maxReqBlock]
|
||||
}
|
||||
|
||||
if logrus.GetLevel() >= logrus.DebugLevel {
|
||||
rootsStr := make([]string, 0, len(roots))
|
||||
for _, req := range roots {
|
||||
rootsStr = append(rootsStr, fmt.Sprintf("%#x", req))
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"peer": pid,
|
||||
"count": len(req),
|
||||
"roots": rootsStr,
|
||||
}).Debug("Requesting blocks by root")
|
||||
}
|
||||
|
||||
// Send the request to the peer.
|
||||
if err := s.sendBeaconBlocksRequest(ctx, &req, pid); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
@@ -438,8 +472,6 @@ func (s *Service) filterOutPendingAndSynced(roots [][fieldparams.RootLength]byte
|
||||
roots = append(roots[:i], roots[i+1:]...)
|
||||
continue
|
||||
}
|
||||
|
||||
log.WithField("blockRoot", fmt.Sprintf("%#x", r)).Debug("Requesting block by root")
|
||||
}
|
||||
return roots
|
||||
}
|
||||
|
||||
@@ -3,11 +3,12 @@ package sync
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/operation"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/verification"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
@@ -51,14 +52,12 @@ func (s *Service) validateDataColumn(ctx context.Context, pid peer.ID, msg *pubs
|
||||
// Decode the message, reject if it fails.
|
||||
m, err := s.decodePubsubMessage(msg)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to decode message")
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
// Reject messages that are not of the expected type.
|
||||
dcsc, ok := m.(*eth.DataColumnSidecar)
|
||||
if !ok {
|
||||
log.WithField("message", m).Error("Message is not of type *eth.DataColumnSidecar")
|
||||
return pubsub.ValidationReject, errWrongMessage
|
||||
}
|
||||
|
||||
@@ -145,12 +144,9 @@ func (s *Service) validateDataColumn(ctx context.Context, pid peer.ID, msg *pubs
|
||||
}
|
||||
|
||||
// [REJECT] The sidecar's column data is valid as verified by `verify_data_column_sidecar_kzg_proofs(sidecar)`.
|
||||
validationResult, err := s.validateWithKzgBatchVerifier(ctx, roDataColumns)
|
||||
if validationResult != pubsub.ValidationAccept {
|
||||
return validationResult, err
|
||||
if err := verifier.SidecarKzgProofVerified(); err != nil {
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
// Mark KZG verification as satisfied since we did it via batch verifier
|
||||
verifier.SatisfyRequirement(verification.RequireSidecarKzgProofVerified)
|
||||
|
||||
// [IGNORE] The sidecar is the first sidecar for the tuple `(block_header.slot, block_header.proposer_index, sidecar.index)`
|
||||
// with valid header signature, sidecar inclusion proof, and kzg proof.
|
||||
@@ -194,19 +190,13 @@ func (s *Service) validateDataColumn(ctx context.Context, pid peer.ID, msg *pubs
|
||||
dataColumnSidecarArrivalGossipSummary.Observe(float64(sinceSlotStartTime.Milliseconds()))
|
||||
dataColumnSidecarVerificationGossipHistogram.Observe(float64(validationTime.Milliseconds()))
|
||||
|
||||
peerGossipScore := s.cfg.p2p.Peers().Scorers().GossipScorer().Score(pid)
|
||||
|
||||
select {
|
||||
case s.dataColumnLogCh <- dataColumnLogEntry{
|
||||
Slot: roDataColumn.Slot(),
|
||||
ColIdx: roDataColumn.Index,
|
||||
PropIdx: roDataColumn.ProposerIndex(),
|
||||
BlockRoot: roDataColumn.BlockRoot(),
|
||||
ParentRoot: roDataColumn.ParentRoot(),
|
||||
PeerSuffix: pid.String()[len(pid.String())-6:],
|
||||
PeerGossipScore: peerGossipScore,
|
||||
validationTime: validationTime,
|
||||
sinceStartTime: sinceSlotStartTime,
|
||||
slot: roDataColumn.Slot(),
|
||||
index: roDataColumn.Index,
|
||||
root: roDataColumn.BlockRoot(),
|
||||
validationTime: validationTime,
|
||||
sinceStartTime: sinceSlotStartTime,
|
||||
}:
|
||||
default:
|
||||
log.WithField("slot", roDataColumn.Slot()).Warn("Failed to send data column log entry")
|
||||
@@ -251,68 +241,69 @@ func computeCacheKey(slot primitives.Slot, proposerIndex primitives.ValidatorInd
|
||||
}
|
||||
|
||||
type dataColumnLogEntry struct {
|
||||
Slot primitives.Slot
|
||||
ColIdx uint64
|
||||
PropIdx primitives.ValidatorIndex
|
||||
BlockRoot [32]byte
|
||||
ParentRoot [32]byte
|
||||
PeerSuffix string
|
||||
PeerGossipScore float64
|
||||
validationTime time.Duration
|
||||
sinceStartTime time.Duration
|
||||
slot primitives.Slot
|
||||
index uint64
|
||||
root [32]byte
|
||||
validationTime time.Duration
|
||||
sinceStartTime time.Duration
|
||||
}
|
||||
|
||||
func (s *Service) processDataColumnLogs() {
|
||||
ticker := time.NewTicker(1 * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
slotStats := make(map[primitives.Slot][fieldparams.NumberOfColumns]dataColumnLogEntry)
|
||||
slotStats := make(map[[fieldparams.RootLength]byte][]dataColumnLogEntry)
|
||||
|
||||
for {
|
||||
select {
|
||||
case entry := <-s.dataColumnLogCh:
|
||||
cols := slotStats[entry.Slot]
|
||||
cols[entry.ColIdx] = entry
|
||||
slotStats[entry.Slot] = cols
|
||||
case col := <-s.dataColumnLogCh:
|
||||
cols := slotStats[col.root]
|
||||
cols = append(cols, col)
|
||||
slotStats[col.root] = cols
|
||||
case <-ticker.C:
|
||||
for slot, columns := range slotStats {
|
||||
var (
|
||||
colIndices = make([]uint64, 0, fieldparams.NumberOfColumns)
|
||||
peers = make([]string, 0, fieldparams.NumberOfColumns)
|
||||
gossipScores = make([]float64, 0, fieldparams.NumberOfColumns)
|
||||
validationTimes = make([]string, 0, fieldparams.NumberOfColumns)
|
||||
sinceStartTimes = make([]string, 0, fieldparams.NumberOfColumns)
|
||||
)
|
||||
for root, columns := range slotStats {
|
||||
indices := make([]uint64, 0, fieldparams.NumberOfColumns)
|
||||
minValidationTime, maxValidationTime, sumValidationTime := time.Duration(0), time.Duration(0), time.Duration(0)
|
||||
minSinceStartTime, maxSinceStartTime, sumSinceStartTime := time.Duration(0), time.Duration(0), time.Duration(0)
|
||||
|
||||
totalReceived := 0
|
||||
for _, entry := range columns {
|
||||
if entry.PeerSuffix == "" {
|
||||
for _, column := range columns {
|
||||
indices = append(indices, column.index)
|
||||
|
||||
sumValidationTime += column.validationTime
|
||||
sumSinceStartTime += column.sinceStartTime
|
||||
|
||||
if totalReceived == 0 {
|
||||
minValidationTime, maxValidationTime = column.validationTime, column.validationTime
|
||||
minSinceStartTime, maxSinceStartTime = column.sinceStartTime, column.sinceStartTime
|
||||
totalReceived++
|
||||
continue
|
||||
}
|
||||
colIndices = append(colIndices, entry.ColIdx)
|
||||
peers = append(peers, entry.PeerSuffix)
|
||||
gossipScores = append(gossipScores, roundFloat(entry.PeerGossipScore, 2))
|
||||
validationTimes = append(validationTimes, fmt.Sprintf("%.2fms", float64(entry.validationTime.Milliseconds())))
|
||||
sinceStartTimes = append(sinceStartTimes, fmt.Sprintf("%.2fms", float64(entry.sinceStartTime.Milliseconds())))
|
||||
|
||||
minValidationTime, maxValidationTime = min(minValidationTime, column.validationTime), max(maxValidationTime, column.validationTime)
|
||||
minSinceStartTime, maxSinceStartTime = min(minSinceStartTime, column.sinceStartTime), max(maxSinceStartTime, column.sinceStartTime)
|
||||
totalReceived++
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": slot,
|
||||
"receivedCount": totalReceived,
|
||||
"columnIndices": colIndices,
|
||||
"peers": peers,
|
||||
"gossipScores": gossipScores,
|
||||
"validationTimes": validationTimes,
|
||||
"sinceStartTimes": sinceStartTimes,
|
||||
}).Debug("Accepted data column sidecars summary")
|
||||
if totalReceived > 0 {
|
||||
slices.Sort(indices)
|
||||
avgValidationTime := sumValidationTime / time.Duration(totalReceived)
|
||||
avgSinceStartTime := sumSinceStartTime / time.Duration(totalReceived)
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": columns[0].slot,
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"count": totalReceived,
|
||||
"indices": helpers.PrettySlice(indices),
|
||||
"validationTime": prettyMinMaxAverage(minValidationTime, maxValidationTime, avgValidationTime),
|
||||
"sinceStartTime": prettyMinMaxAverage(minSinceStartTime, maxSinceStartTime, avgSinceStartTime),
|
||||
}).Debug("Accepted data column sidecars summary")
|
||||
}
|
||||
}
|
||||
slotStats = make(map[primitives.Slot][fieldparams.NumberOfColumns]dataColumnLogEntry)
|
||||
|
||||
slotStats = make(map[[fieldparams.RootLength]byte][]dataColumnLogEntry)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func roundFloat(f float64, decimals int) float64 {
|
||||
mult := math.Pow(10, float64(decimals))
|
||||
return math.Round(f*mult) / mult
|
||||
func prettyMinMaxAverage(min, max, average time.Duration) string {
|
||||
return fmt.Sprintf("[min: %v, avg: %v, max: %v]", min, average, max)
|
||||
}
|
||||
|
||||
@@ -54,11 +54,13 @@ func TestValidateLightClientOptimisticUpdate(t *testing.T) {
|
||||
cfg.CapellaForkEpoch = 3
|
||||
cfg.DenebForkEpoch = 4
|
||||
cfg.ElectraForkEpoch = 5
|
||||
cfg.FuluForkEpoch = 6
|
||||
cfg.ForkVersionSchedule[[4]byte{1, 0, 0, 0}] = 1
|
||||
cfg.ForkVersionSchedule[[4]byte{2, 0, 0, 0}] = 2
|
||||
cfg.ForkVersionSchedule[[4]byte{3, 0, 0, 0}] = 3
|
||||
cfg.ForkVersionSchedule[[4]byte{4, 0, 0, 0}] = 4
|
||||
cfg.ForkVersionSchedule[[4]byte{5, 0, 0, 0}] = 5
|
||||
cfg.ForkVersionSchedule[[4]byte{6, 0, 0, 0}] = 6
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
secondsPerSlot := int(params.BeaconConfig().SecondsPerSlot)
|
||||
@@ -101,7 +103,10 @@ func TestValidateLightClientOptimisticUpdate(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
for v := 1; v < 6; v++ {
|
||||
for v := range version.All() {
|
||||
if v == version.Phase0 {
|
||||
continue
|
||||
}
|
||||
t.Run(test.name+"_"+version.String(v), func(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
@@ -180,11 +185,13 @@ func TestValidateLightClientFinalityUpdate(t *testing.T) {
|
||||
cfg.CapellaForkEpoch = 3
|
||||
cfg.DenebForkEpoch = 4
|
||||
cfg.ElectraForkEpoch = 5
|
||||
cfg.FuluForkEpoch = 6
|
||||
cfg.ForkVersionSchedule[[4]byte{1, 0, 0, 0}] = 1
|
||||
cfg.ForkVersionSchedule[[4]byte{2, 0, 0, 0}] = 2
|
||||
cfg.ForkVersionSchedule[[4]byte{3, 0, 0, 0}] = 3
|
||||
cfg.ForkVersionSchedule[[4]byte{4, 0, 0, 0}] = 4
|
||||
cfg.ForkVersionSchedule[[4]byte{5, 0, 0, 0}] = 5
|
||||
cfg.ForkVersionSchedule[[4]byte{6, 0, 0, 0}] = 6
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
secondsPerSlot := int(params.BeaconConfig().SecondsPerSlot)
|
||||
@@ -227,7 +234,10 @@ func TestValidateLightClientFinalityUpdate(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
for v := 1; v < 6; v++ {
|
||||
for v := range version.All() {
|
||||
if v == version.Phase0 {
|
||||
continue
|
||||
}
|
||||
t.Run(test.name+"_"+version.String(v), func(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
|
||||
@@ -687,6 +687,12 @@ func sbrNotFound(t *testing.T, expectedRoot [32]byte) *mockStateByRooter {
|
||||
}}
|
||||
}
|
||||
|
||||
func sbrReturnsState(st state.BeaconState) *mockStateByRooter {
|
||||
return &mockStateByRooter{sbr: func(_ context.Context, _ [32]byte) (state.BeaconState, error) {
|
||||
return st, nil
|
||||
}}
|
||||
}
|
||||
|
||||
func sbrForValOverride(idx primitives.ValidatorIndex, val *ethpb.Validator) *mockStateByRooter {
|
||||
return sbrForValOverrideWithT(nil, idx, val)
|
||||
}
|
||||
|
||||
@@ -11,12 +11,10 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v7/beacon-chain/forkchoice/types"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/logging"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
@@ -361,7 +359,7 @@ func (dv *RODataColumnsVerifier) SidecarParentSeen(parentSeen func([fieldparams.
|
||||
}
|
||||
|
||||
if !dv.fc.HasNode(parentRoot) {
|
||||
return columnErrBuilder(errSidecarParentNotSeen)
|
||||
return columnErrBuilder(errors.Wrapf(errSidecarParentNotSeen, "parent root: %#x", parentRoot))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -484,88 +482,19 @@ func (dv *RODataColumnsVerifier) SidecarProposerExpected(ctx context.Context) (e
|
||||
|
||||
defer dv.recordResult(RequireSidecarProposerExpected, &err)
|
||||
|
||||
type slotParentRoot struct {
|
||||
slot primitives.Slot
|
||||
parentRoot [fieldparams.RootLength]byte
|
||||
}
|
||||
|
||||
targetRootBySlotParentRoot := make(map[slotParentRoot][fieldparams.RootLength]byte)
|
||||
|
||||
var targetRootFromCache = func(slot primitives.Slot, parentRoot [fieldparams.RootLength]byte) ([fieldparams.RootLength]byte, error) {
|
||||
// Use cached values if available.
|
||||
slotParentRoot := slotParentRoot{slot: slot, parentRoot: parentRoot}
|
||||
if root, ok := targetRootBySlotParentRoot[slotParentRoot]; ok {
|
||||
return root, nil
|
||||
}
|
||||
|
||||
// Compute the epoch of the data column slot.
|
||||
dataColumnEpoch := slots.ToEpoch(slot)
|
||||
if dataColumnEpoch > 0 {
|
||||
dataColumnEpoch = dataColumnEpoch - 1
|
||||
}
|
||||
|
||||
// Compute the target root for the epoch.
|
||||
targetRoot, err := dv.fc.TargetRootForEpoch(parentRoot, dataColumnEpoch)
|
||||
if err != nil {
|
||||
return [fieldparams.RootLength]byte{}, columnErrBuilder(errors.Wrap(err, "target root from epoch"))
|
||||
}
|
||||
|
||||
// Store the target root in the cache.
|
||||
targetRootBySlotParentRoot[slotParentRoot] = targetRoot
|
||||
|
||||
return targetRoot, nil
|
||||
}
|
||||
|
||||
for _, dataColumn := range dv.dataColumns {
|
||||
// Extract the slot of the data column.
|
||||
dataColumnSlot := dataColumn.Slot()
|
||||
|
||||
// Extract the root of the parent block corresponding to the data column.
|
||||
parentRoot := dataColumn.ParentRoot()
|
||||
|
||||
// Compute the target root for the data column.
|
||||
targetRoot, err := targetRootFromCache(dataColumnSlot, parentRoot)
|
||||
// Get the verifying state, it is guaranteed to have the correct proposer in the lookahead.
|
||||
verifyingState, err := dv.getVerifyingState(ctx, dataColumn)
|
||||
if err != nil {
|
||||
return columnErrBuilder(errors.Wrap(err, "target root"))
|
||||
return columnErrBuilder(errors.Wrap(err, "verifying state"))
|
||||
}
|
||||
|
||||
// Compute the epoch of the data column slot.
|
||||
dataColumnEpoch := slots.ToEpoch(dataColumnSlot)
|
||||
if dataColumnEpoch > 0 {
|
||||
dataColumnEpoch = dataColumnEpoch - 1
|
||||
}
|
||||
|
||||
// Create a checkpoint for the target root.
|
||||
checkpoint := &forkchoicetypes.Checkpoint{Root: targetRoot, Epoch: dataColumnEpoch}
|
||||
|
||||
// Try to extract the proposer index from the data column in the cache.
|
||||
idx, cached := dv.pc.Proposer(checkpoint, dataColumnSlot)
|
||||
|
||||
if !cached {
|
||||
parentRoot := dataColumn.ParentRoot()
|
||||
// Ensure the expensive index computation is only performed once for
|
||||
// concurrent requests for the same signature data.
|
||||
idxAny, err, _ := dv.sg.Do(concatRootSlot(parentRoot, dataColumnSlot), func() (any, error) {
|
||||
verifyingState, err := dv.getVerifyingState(ctx, dataColumn)
|
||||
if err != nil {
|
||||
return nil, columnErrBuilder(errors.Wrap(err, "verifying state"))
|
||||
}
|
||||
|
||||
idx, err = helpers.BeaconProposerIndexAtSlot(ctx, verifyingState, dataColumnSlot)
|
||||
if err != nil {
|
||||
return nil, columnErrBuilder(errors.Wrap(err, "compute proposer"))
|
||||
}
|
||||
|
||||
return idx, nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var ok bool
|
||||
if idx, ok = idxAny.(primitives.ValidatorIndex); !ok {
|
||||
return columnErrBuilder(errors.New("type assertion to ValidatorIndex failed"))
|
||||
}
|
||||
// Use proposer lookahead directly
|
||||
idx, err := helpers.BeaconProposerIndexAtSlot(ctx, verifyingState, dataColumnSlot)
|
||||
if err != nil {
|
||||
return columnErrBuilder(errors.Wrap(err, "proposer from lookahead"))
|
||||
}
|
||||
|
||||
if idx != dataColumn.ProposerIndex() {
|
||||
@@ -626,7 +555,3 @@ func inclusionProofKey(c blocks.RODataColumn) ([32]byte, error) {
|
||||
|
||||
return sha256.Sum256(unhashedKey), nil
|
||||
}
|
||||
|
||||
func concatRootSlot(root [fieldparams.RootLength]byte, slot primitives.Slot) string {
|
||||
return string(root[:]) + fmt.Sprintf("%d", slot)
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package verification
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -795,87 +794,90 @@ func TestDataColumnsSidecarProposerExpected(t *testing.T) {
|
||||
blobCount = 1
|
||||
)
|
||||
|
||||
parentRoot := [fieldparams.RootLength]byte{}
|
||||
columns := GenerateTestDataColumns(t, parentRoot, columnSlot, blobCount)
|
||||
firstColumn := columns[0]
|
||||
ctx := t.Context()
|
||||
testCases := []struct {
|
||||
name string
|
||||
stateByRooter StateByRooter
|
||||
proposerCache proposerCache
|
||||
columns []blocks.RODataColumn
|
||||
error string
|
||||
}{
|
||||
{
|
||||
name: "Cached, matches",
|
||||
stateByRooter: nil,
|
||||
proposerCache: &mockProposerCache{
|
||||
ProposerCB: pcReturnsIdx(firstColumn.ProposerIndex()),
|
||||
},
|
||||
columns: columns,
|
||||
},
|
||||
{
|
||||
name: "Cached, does not match",
|
||||
stateByRooter: nil,
|
||||
proposerCache: &mockProposerCache{
|
||||
ProposerCB: pcReturnsIdx(firstColumn.ProposerIndex() + 1),
|
||||
},
|
||||
columns: columns,
|
||||
error: errSidecarUnexpectedProposer.Error(),
|
||||
},
|
||||
{
|
||||
name: "Not cached, state lookup failure",
|
||||
stateByRooter: sbrNotFound(t, firstColumn.ParentRoot()),
|
||||
proposerCache: &mockProposerCache{
|
||||
ProposerCB: pcReturnsNotFound(),
|
||||
},
|
||||
columns: columns,
|
||||
error: "verifying state",
|
||||
},
|
||||
}
|
||||
parentRoot := [fieldparams.RootLength]byte{}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
initializer := Initializer{
|
||||
shared: &sharedResources{
|
||||
sr: tc.stateByRooter,
|
||||
pc: tc.proposerCache,
|
||||
hsp: &mockHeadStateProvider{},
|
||||
fc: &mockForkchoicer{
|
||||
TargetRootForEpochCB: fcReturnsTargetRoot([fieldparams.RootLength]byte{}),
|
||||
},
|
||||
// Create a Fulu state to get the expected proposer from the lookahead.
|
||||
fuluState, _ := util.DeterministicGenesisStateFulu(t, 32)
|
||||
expectedProposer, err := fuluState.ProposerLookahead()
|
||||
require.NoError(t, err)
|
||||
expectedProposerIdx := primitives.ValidatorIndex(expectedProposer[columnSlot])
|
||||
|
||||
// Generate data columns with the expected proposer index.
|
||||
matchingColumns := generateTestDataColumnsWithProposer(t, parentRoot, columnSlot, blobCount, expectedProposerIdx)
|
||||
// Generate data columns with wrong proposer index.
|
||||
wrongColumns := generateTestDataColumnsWithProposer(t, parentRoot, columnSlot, blobCount, expectedProposerIdx+1)
|
||||
|
||||
t.Run("Proposer matches", func(t *testing.T) {
|
||||
initializer := Initializer{
|
||||
shared: &sharedResources{
|
||||
sr: sbrReturnsState(fuluState),
|
||||
hsp: &mockHeadStateProvider{
|
||||
headRoot: parentRoot[:],
|
||||
headSlot: columnSlot, // Same epoch so HeadStateReadOnly is used
|
||||
headStateReadOnly: fuluState,
|
||||
},
|
||||
}
|
||||
fc: &mockForkchoicer{},
|
||||
},
|
||||
}
|
||||
|
||||
verifier := initializer.NewDataColumnsVerifier(tc.columns, GossipDataColumnSidecarRequirements)
|
||||
var wg sync.WaitGroup
|
||||
verifier := initializer.NewDataColumnsVerifier(matchingColumns, GossipDataColumnSidecarRequirements)
|
||||
err := verifier.SidecarProposerExpected(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, verifier.results.executed(RequireSidecarProposerExpected))
|
||||
require.NoError(t, verifier.results.result(RequireSidecarProposerExpected))
|
||||
})
|
||||
|
||||
var err1, err2 error
|
||||
wg.Go(func() {
|
||||
err1 = verifier.SidecarProposerExpected(ctx)
|
||||
})
|
||||
wg.Go(func() {
|
||||
err2 = verifier.SidecarProposerExpected(ctx)
|
||||
})
|
||||
wg.Wait()
|
||||
t.Run("Proposer does not match", func(t *testing.T) {
|
||||
initializer := Initializer{
|
||||
shared: &sharedResources{
|
||||
sr: sbrReturnsState(fuluState),
|
||||
hsp: &mockHeadStateProvider{
|
||||
headRoot: parentRoot[:],
|
||||
headSlot: columnSlot, // Same epoch so HeadStateReadOnly is used
|
||||
headStateReadOnly: fuluState,
|
||||
},
|
||||
fc: &mockForkchoicer{},
|
||||
},
|
||||
}
|
||||
|
||||
require.Equal(t, true, verifier.results.executed(RequireSidecarProposerExpected))
|
||||
verifier := initializer.NewDataColumnsVerifier(wrongColumns, GossipDataColumnSidecarRequirements)
|
||||
err := verifier.SidecarProposerExpected(ctx)
|
||||
require.ErrorContains(t, errSidecarUnexpectedProposer.Error(), err)
|
||||
require.Equal(t, true, verifier.results.executed(RequireSidecarProposerExpected))
|
||||
require.NotNil(t, verifier.results.result(RequireSidecarProposerExpected))
|
||||
})
|
||||
|
||||
if len(tc.error) > 0 {
|
||||
require.ErrorContains(t, tc.error, err1)
|
||||
require.ErrorContains(t, tc.error, err2)
|
||||
require.NotNil(t, verifier.results.result(RequireSidecarProposerExpected))
|
||||
return
|
||||
}
|
||||
t.Run("State lookup failure", func(t *testing.T) {
|
||||
columns := GenerateTestDataColumns(t, parentRoot, columnSlot, blobCount)
|
||||
initializer := Initializer{
|
||||
shared: &sharedResources{
|
||||
sr: sbrNotFound(t, columns[0].ParentRoot()),
|
||||
hsp: &mockHeadStateProvider{},
|
||||
fc: &mockForkchoicer{},
|
||||
},
|
||||
}
|
||||
|
||||
require.NoError(t, err1)
|
||||
require.NoError(t, err2)
|
||||
require.NoError(t, verifier.results.result(RequireSidecarProposerExpected))
|
||||
verifier := initializer.NewDataColumnsVerifier(columns, GossipDataColumnSidecarRequirements)
|
||||
err := verifier.SidecarProposerExpected(ctx)
|
||||
require.ErrorContains(t, "verifying state", err)
|
||||
require.Equal(t, true, verifier.results.executed(RequireSidecarProposerExpected))
|
||||
require.NotNil(t, verifier.results.result(RequireSidecarProposerExpected))
|
||||
})
|
||||
}
|
||||
|
||||
err := verifier.SidecarProposerExpected(ctx)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
func generateTestDataColumnsWithProposer(t *testing.T, parent [fieldparams.RootLength]byte, slot primitives.Slot, blobCount int, proposer primitives.ValidatorIndex) []blocks.RODataColumn {
|
||||
roBlock, roBlobs := util.GenerateTestDenebBlockWithSidecar(t, parent, slot, blobCount, util.WithProposer(proposer))
|
||||
blobs := make([]kzg.Blob, 0, len(roBlobs))
|
||||
for i := range roBlobs {
|
||||
blobs = append(blobs, kzg.Blob(roBlobs[i].Blob))
|
||||
}
|
||||
|
||||
cellsPerBlob, proofsPerBlob := util.GenerateCellsAndProofs(t, blobs)
|
||||
roDataColumnSidecars, err := peerdas.DataColumnSidecars(cellsPerBlob, proofsPerBlob, peerdas.PopulateFromBlock(roBlock))
|
||||
require.NoError(t, err)
|
||||
|
||||
return roDataColumnSidecars
|
||||
}
|
||||
|
||||
func TestColumnRequirementSatisfaction(t *testing.T) {
|
||||
@@ -922,12 +924,3 @@ func TestColumnRequirementSatisfaction(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestConcatRootSlot(t *testing.T) {
|
||||
root := [fieldparams.RootLength]byte{1, 2, 3}
|
||||
const slot = primitives.Slot(3210)
|
||||
|
||||
const expected = "\x01\x02\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x003210"
|
||||
|
||||
actual := concatRootSlot(root, slot)
|
||||
require.Equal(t, expected, actual)
|
||||
}
|
||||
|
||||
3
changelog/bastin_fix-lcp2p-bug.md
Normal file
3
changelog/bastin_fix-lcp2p-bug.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- Fix the missing fork version object mapping for Fulu in light client p2p.
|
||||
3
changelog/builder-index.md
Normal file
3
changelog/builder-index.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Added
|
||||
|
||||
- `primitives.BuilderIndex`: SSZ `uint64` wrapper for builder registry indices.
|
||||
3
changelog/james-prysm_align-atter-pool-apis.md
Normal file
3
changelog/james-prysm_align-atter-pool-apis.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- the /eth/v2/beacon/pool/attestations and /eth/v1/beacon/pool/sync_committees now returns a 503 error if the node is still syncing, the rest api is also working in a similar process to gRPC broadcasting immediately now.
|
||||
2
changelog/kasey_fix-backfill-flag.md
Normal file
2
changelog/kasey_fix-backfill-flag.md
Normal file
@@ -0,0 +1,2 @@
|
||||
#### Fixed
|
||||
- Fix validation logic for `--backfill-oldest-slot`, which was rejecting slots newer than 1056767.
|
||||
3
changelog/manu-agg.md
Normal file
3
changelog/manu-agg.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Pending aggregates: When multiple aggregated attestations only differing by the aggregator index are in the pending queue, only process one of them.
|
||||
3
changelog/manu-cache-warmup.md
Normal file
3
changelog/manu-cache-warmup.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Data column sidecars cache warmup: Process in parallel all sidecars for a given epoch.
|
||||
3
changelog/manu-log.md
Normal file
3
changelog/manu-log.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Summarize DEBUG log corresponding to incoming via gossip data column sidecar.
|
||||
2
changelog/manu-remove-error-logs.md
Normal file
2
changelog/manu-remove-error-logs.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Changed
|
||||
- `validateDataColumn`: Remove error logs.
|
||||
2
changelog/potuz_dcs_pc_removal.md
Normal file
2
changelog/potuz_dcs_pc_removal.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Changed
|
||||
- Use lookahead to validate data column sidecar proposer index.
|
||||
3
changelog/potuz_dont_lock_fcu.md
Normal file
3
changelog/potuz_dont_lock_fcu.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Notify the engine about forkchoice updates in the background.
|
||||
2
changelog/potuz_fcu_ctx.md
Normal file
2
changelog/potuz_fcu_ctx.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Changed
|
||||
- Use a separate context when updating the slot cache.
|
||||
3
changelog/potuz_next_epoch_attributes.md
Normal file
3
changelog/potuz_next_epoch_attributes.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- Do not process slots and copy states for next epoch proposers after Fulu
|
||||
2
changelog/potuz_no_fcu_on_batches.md
Normal file
2
changelog/potuz_no_fcu_on_batches.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Ignored
|
||||
- D not send FCU on block batches.
|
||||
3
changelog/potuz_remove_signature_check.md
Normal file
3
changelog/potuz_remove_signature_check.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Do not check block signature on state transition.
|
||||
3
changelog/potuz_use_head_previous_epoch.md
Normal file
3
changelog/potuz_use_head_previous_epoch.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Added
|
||||
|
||||
- Use the head state to validate attestations for the previous epoch if head is compatible with the target checkpoint.
|
||||
3
changelog/radek_extend-http-analyzer.md
Normal file
3
changelog/radek_extend-http-analyzer.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Extend `httperror` analyzer to more functions.
|
||||
3
changelog/sashass1315_fix-panic.md
Normal file
3
changelog/sashass1315_fix-panic.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- avoid panic when fork schedule is empty [#16175](https://github.com/OffchainLabs/prysm/pull/16175)
|
||||
3
changelog/satushh-consolidation.md
Normal file
3
changelog/satushh-consolidation.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Performance improvement in ProcessConsolidationRequests: Use more performance HasPendingBalanceToWithdraw instead of PendingBalanceToWithdraw as no need to calculate full total pending balance.
|
||||
@@ -4,6 +4,7 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"basis_points.go",
|
||||
"builder_index.go",
|
||||
"committee_bits_mainnet.go",
|
||||
"committee_bits_minimal.go", # keep
|
||||
"committee_index.go",
|
||||
@@ -31,6 +32,7 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"builder_index_test.go",
|
||||
"committee_index_test.go",
|
||||
"domain_test.go",
|
||||
"epoch_test.go",
|
||||
|
||||
54
consensus-types/primitives/builder_index.go
Normal file
54
consensus-types/primitives/builder_index.go
Normal file
@@ -0,0 +1,54 @@
|
||||
package primitives
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
fssz "github.com/prysmaticlabs/fastssz"
|
||||
)
|
||||
|
||||
var _ fssz.HashRoot = (BuilderIndex)(0)
|
||||
var _ fssz.Marshaler = (*BuilderIndex)(nil)
|
||||
var _ fssz.Unmarshaler = (*BuilderIndex)(nil)
|
||||
|
||||
// BuilderIndex is an index into the builder registry.
|
||||
type BuilderIndex uint64
|
||||
|
||||
// HashTreeRoot returns the SSZ hash tree root of the index.
|
||||
func (b BuilderIndex) HashTreeRoot() ([32]byte, error) {
|
||||
return fssz.HashWithDefaultHasher(b)
|
||||
}
|
||||
|
||||
// HashTreeRootWith appends the SSZ uint64 representation of the index to the given hasher.
|
||||
func (b BuilderIndex) HashTreeRootWith(hh *fssz.Hasher) error {
|
||||
hh.PutUint64(uint64(b))
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnmarshalSSZ decodes the SSZ-encoded uint64 index from buf.
|
||||
func (b *BuilderIndex) UnmarshalSSZ(buf []byte) error {
|
||||
if len(buf) != b.SizeSSZ() {
|
||||
return fmt.Errorf("expected buffer of length %d received %d", b.SizeSSZ(), len(buf))
|
||||
}
|
||||
*b = BuilderIndex(fssz.UnmarshallUint64(buf))
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalSSZTo appends the SSZ-encoded index to dst and returns the extended buffer.
|
||||
func (b *BuilderIndex) MarshalSSZTo(dst []byte) ([]byte, error) {
|
||||
marshalled, err := b.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return append(dst, marshalled...), nil
|
||||
}
|
||||
|
||||
// MarshalSSZ encodes the index as an SSZ uint64.
|
||||
func (b *BuilderIndex) MarshalSSZ() ([]byte, error) {
|
||||
marshalled := fssz.MarshalUint64([]byte{}, uint64(*b))
|
||||
return marshalled, nil
|
||||
}
|
||||
|
||||
// SizeSSZ returns the size of the SSZ-encoded index in bytes.
|
||||
func (b *BuilderIndex) SizeSSZ() int {
|
||||
return 8
|
||||
}
|
||||
86
consensus-types/primitives/builder_index_test.go
Normal file
86
consensus-types/primitives/builder_index_test.go
Normal file
@@ -0,0 +1,86 @@
|
||||
package primitives_test
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"slices"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
)
|
||||
|
||||
func TestBuilderIndex_SSZRoundTripAndHashRoot(t *testing.T) {
|
||||
cases := []uint64{
|
||||
0,
|
||||
1,
|
||||
42,
|
||||
(1 << 32) - 1,
|
||||
1 << 32,
|
||||
^uint64(0),
|
||||
}
|
||||
|
||||
for _, v := range cases {
|
||||
t.Run("v="+u64name(v), func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
val := primitives.BuilderIndex(v)
|
||||
require.Equal(t, 8, (&val).SizeSSZ())
|
||||
|
||||
enc, err := (&val).MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 8, len(enc))
|
||||
|
||||
wantEnc := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(wantEnc, v)
|
||||
require.DeepEqual(t, wantEnc, enc)
|
||||
|
||||
dstPrefix := []byte("prefix:")
|
||||
dst, err := (&val).MarshalSSZTo(slices.Clone(dstPrefix))
|
||||
require.NoError(t, err)
|
||||
wantDst := append(dstPrefix, wantEnc...)
|
||||
require.DeepEqual(t, wantDst, dst)
|
||||
|
||||
var decoded primitives.BuilderIndex
|
||||
require.NoError(t, (&decoded).UnmarshalSSZ(enc))
|
||||
require.Equal(t, val, decoded)
|
||||
|
||||
root, err := val.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
var wantRoot [32]byte
|
||||
binary.LittleEndian.PutUint64(wantRoot[:8], v)
|
||||
require.Equal(t, wantRoot, root)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuilderIndex_UnmarshalSSZRejectsWrongSize(t *testing.T) {
|
||||
for _, size := range []int{7, 9} {
|
||||
t.Run("size="+strconv.Itoa(size), func(t *testing.T) {
|
||||
t.Parallel()
|
||||
var v primitives.BuilderIndex
|
||||
err := (&v).UnmarshalSSZ(make([]byte, size))
|
||||
require.ErrorContains(t, "expected buffer of length 8", err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func u64name(v uint64) string {
|
||||
switch v {
|
||||
case 0:
|
||||
return "0"
|
||||
case 1:
|
||||
return "1"
|
||||
case 42:
|
||||
return "42"
|
||||
case (1 << 32) - 1:
|
||||
return "2^32-1"
|
||||
case 1 << 32:
|
||||
return "2^32"
|
||||
case ^uint64(0):
|
||||
return "max"
|
||||
default:
|
||||
return "custom"
|
||||
}
|
||||
}
|
||||
@@ -43,6 +43,7 @@ func WriteJson(w http.ResponseWriter, v any) {
|
||||
func WriteSsz(w http.ResponseWriter, respSsz []byte) {
|
||||
w.Header().Set("Content-Length", strconv.Itoa(len(respSsz)))
|
||||
w.Header().Set("Content-Type", api.OctetStreamMediaType)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
if _, err := io.Copy(w, io.NopCloser(bytes.NewReader(respSsz))); err != nil {
|
||||
log.WithError(err).Error("Could not write response message")
|
||||
}
|
||||
|
||||
@@ -44,6 +44,13 @@ func WithProposerSigning(idx primitives.ValidatorIndex, sk bls.SecretKey, valRoo
|
||||
}
|
||||
}
|
||||
|
||||
// WithProposer sets the proposer index for the generated block without signing.
|
||||
func WithProposer(idx primitives.ValidatorIndex) DenebBlockGeneratorOption {
|
||||
return func(g *denebBlockGenerator) {
|
||||
g.proposer = idx
|
||||
}
|
||||
}
|
||||
|
||||
func WithPayloadSetter(p *enginev1.ExecutionPayloadDeneb) DenebBlockGeneratorOption {
|
||||
return func(g *denebBlockGenerator) {
|
||||
g.payload = p
|
||||
|
||||
@@ -3,7 +3,7 @@ load("@prysm//tools/go:def.bzl", "go_library")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["analyzer.go"],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/tools/analyzers/httperror",
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/tools/analyzers/httpwriter",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"@org_golang_x_tools//go/analysis:go_default_library",
|
||||
@@ -1,4 +1,4 @@
|
||||
package httperror
|
||||
package httpwriter
|
||||
|
||||
import (
|
||||
"go/ast"
|
||||
@@ -9,8 +9,8 @@ import (
|
||||
)
|
||||
|
||||
var Analyzer = &analysis.Analyzer{
|
||||
Name: "httperror",
|
||||
Doc: "Ensures calls to httputil.HandleError are immediately followed by a return statement.",
|
||||
Name: "httpwriter",
|
||||
Doc: "Ensures that httputil functions which make use of the writer are immediately followed by a return statement.",
|
||||
Requires: []*analysis.Analyzer{
|
||||
inspect.Analyzer,
|
||||
},
|
||||
@@ -99,7 +99,7 @@ func checkBlock(pass *analysis.Pass, fn *ast.FuncDecl, block *ast.BlockStmt, nex
|
||||
|
||||
// Now check the current statement itself: is it (or does it contain) a direct call to httputil.HandleError?
|
||||
// We only consider ExprStmt that are direct CallExpr to httputil.HandleError.
|
||||
call := findHandleErrorCall(stmt)
|
||||
call, name := findHandleErrorCall(stmt)
|
||||
if call == nil {
|
||||
continue
|
||||
}
|
||||
@@ -121,7 +121,7 @@ func checkBlock(pass *analysis.Pass, fn *ast.FuncDecl, block *ast.BlockStmt, nex
|
||||
continue
|
||||
}
|
||||
// otherwise it's not a return (even if it's an if/for etc) -> violation
|
||||
pass.Reportf(stmt.Pos(), "call to httputil.HandleError must be immediately followed by a return statement")
|
||||
pass.Reportf(stmt.Pos(), "call to httputil.%s must be immediately followed by a return statement", name)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -133,31 +133,33 @@ func checkBlock(pass *analysis.Pass, fn *ast.FuncDecl, block *ast.BlockStmt, nex
|
||||
}
|
||||
|
||||
// Non-void function and it's the last statement → violation
|
||||
pass.Reportf(stmt.Pos(), "call to httputil.HandleError must be followed by return because function has return values")
|
||||
pass.Reportf(stmt.Pos(), "call to httputil.%s must be immediately followed by a return statement", name)
|
||||
}
|
||||
}
|
||||
|
||||
// findHandleErrorCall returns the call expression if stmt is a direct call to httputil.HandleError(...),
|
||||
// otherwise nil. We only match direct ExprStmt -> CallExpr -> SelectorExpr where selector is httputil.HandleError.
|
||||
func findHandleErrorCall(stmt ast.Stmt) *ast.CallExpr {
|
||||
func findHandleErrorCall(stmt ast.Stmt) (*ast.CallExpr, string) {
|
||||
es, ok := stmt.(*ast.ExprStmt)
|
||||
if !ok {
|
||||
return nil
|
||||
return nil, ""
|
||||
}
|
||||
call, ok := es.X.(*ast.CallExpr)
|
||||
if !ok {
|
||||
return nil
|
||||
return nil, ""
|
||||
}
|
||||
sel, ok := call.Fun.(*ast.SelectorExpr)
|
||||
if !ok {
|
||||
return nil
|
||||
return nil, ""
|
||||
}
|
||||
pkgIdent, ok := sel.X.(*ast.Ident)
|
||||
if !ok {
|
||||
return nil
|
||||
return nil, ""
|
||||
}
|
||||
if pkgIdent.Name == "httputil" && sel.Sel.Name == "HandleError" {
|
||||
return call
|
||||
selectorName := sel.Sel.Name
|
||||
if pkgIdent.Name == "httputil" &&
|
||||
(selectorName == "HandleError" || selectorName == "WriteError" || selectorName == "WriteJson" || selectorName == "WriteSSZ") {
|
||||
return call, selectorName
|
||||
}
|
||||
return nil
|
||||
return nil, ""
|
||||
}
|
||||
Reference in New Issue
Block a user