diff --git a/beacon-chain/blockchain/execution_engine_test.go b/beacon-chain/blockchain/execution_engine_test.go index e7eda0532c..3df1118463 100644 --- a/beacon-chain/blockchain/execution_engine_test.go +++ b/beacon-chain/blockchain/execution_engine_test.go @@ -1053,3 +1053,40 @@ func TestKZGCommitmentToVersionedHashes(t *testing.T) { require.Equal(t, vhs[0].String(), vh0) require.Equal(t, vhs[1].String(), vh1) } + +func TestComputePayloadAttribute(t *testing.T) { + service, tr := minimalTestService(t, WithPayloadIDCache(cache.NewPayloadIDCache())) + ctx := tr.ctx + + st, _ := util.DeterministicGenesisStateBellatrix(t, 1) + + service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, Index: 0}) + // Cache hit, advance state, no fee recipient + slot := primitives.Slot(1) + service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{}) + blk := util.NewBeaconBlockBellatrix() + signed, err := consensusblocks.NewSignedBeaconBlock(blk) + require.NoError(t, err) + roblock, err := consensusblocks.NewROBlockWithRoot(signed, [32]byte{'a'}) + require.NoError(t, err) + cfg := &postBlockProcessConfig{ + ctx: ctx, + roblock: roblock, + } + fcu := &fcuConfig{ + headState: st, + proposingSlot: slot, + headRoot: [32]byte{}, + } + require.NoError(t, service.computePayloadAttributes(cfg, fcu)) + require.Equal(t, false, fcu.attributes.IsEmpty()) + require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(fcu.attributes.SuggestedFeeRecipient()).String()) + + // Cache hit, advance state, has fee recipient + suggestedAddr := common.HexToAddress("123") + service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, FeeRecipient: primitives.ExecutionAddress(suggestedAddr), Index: 0}) + service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{}) + require.NoError(t, service.computePayloadAttributes(cfg, fcu)) + require.Equal(t, false, fcu.attributes.IsEmpty()) + require.Equal(t, suggestedAddr, common.BytesToAddress(fcu.attributes.SuggestedFeeRecipient())) +} diff --git a/beacon-chain/blockchain/forkchoice_update_execution.go b/beacon-chain/blockchain/forkchoice_update_execution.go index 57b07019b8..79d9132e74 100644 --- a/beacon-chain/blockchain/forkchoice_update_execution.go +++ b/beacon-chain/blockchain/forkchoice_update_execution.go @@ -12,7 +12,6 @@ import ( payloadattribute "github.com/OffchainLabs/prysm/v7/consensus-types/payload-attribute" "github.com/OffchainLabs/prysm/v7/consensus-types/primitives" "github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace" - "github.com/OffchainLabs/prysm/v7/runtime/version" "github.com/OffchainLabs/prysm/v7/time/slots" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -54,53 +53,58 @@ type fcuConfig struct { } // sendFCU handles the logic to notify the engine of a forckhoice update -// when processing an incoming block during regular sync. It -// always updates the shuffling caches and handles epoch transitions . -func (s *Service) sendFCU(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) { - if cfg.postState.Version() < version.Fulu { - // update the caches to compute the right proposer index - // this function is called under a forkchoice lock which we need to release. - s.ForkChoicer().Unlock() - s.updateCachesPostBlockProcessing(cfg) - s.ForkChoicer().Lock() +// for the first time when processing an incoming block during regular sync. It +// always updates the shuffling caches and handles epoch transitions when the +// incoming block is late, preparing payload attributes in this case while it +// only sends a message with empty attributes for early blocks. +func (s *Service) sendFCU(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error { + if !s.isNewHead(cfg.headRoot) { + return nil } - if err := s.getFCUArgs(cfg, fcuArgs); err != nil { - log.WithError(err).Error("Could not get forkchoice update argument") - return - } - // If head has not been updated and attributes are nil, we can skip the FCU. - if !s.isNewHead(cfg.headRoot) && (fcuArgs.attributes == nil || fcuArgs.attributes.IsEmpty()) { - return - } - // If we are proposing and we aim to reorg the block, we have already sent FCU with attributes on lateBlockTasks if fcuArgs.attributes != nil && !fcuArgs.attributes.IsEmpty() && s.shouldOverrideFCU(cfg.headRoot, s.CurrentSlot()+1) { + return nil + } + return s.forkchoiceUpdateWithExecution(cfg.ctx, fcuArgs) +} + +// sendFCUWithAttributes computes the payload attributes and sends an FCU message +// to the engine if needed +func (s *Service) sendFCUWithAttributes(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) { + slotCtx, cancel := context.WithTimeout(context.Background(), slotDeadline) + defer cancel() + cfg.ctx = slotCtx + s.cfg.ForkChoiceStore.RLock() + defer s.cfg.ForkChoiceStore.RUnlock() + if err := s.computePayloadAttributes(cfg, fcuArgs); err != nil { + log.WithError(err).Error("Could not compute payload attributes") return } - if s.inRegularSync() { - go s.forkchoiceUpdateWithExecution(cfg.ctx, fcuArgs) + if fcuArgs.attributes.IsEmpty() { + return } - - if s.isNewHead(fcuArgs.headRoot) { - if err := s.saveHead(cfg.ctx, fcuArgs.headRoot, fcuArgs.headBlock, fcuArgs.headState); err != nil { - log.WithError(err).Error("Could not save head") - } - s.pruneAttsFromPool(s.ctx, fcuArgs.headState, fcuArgs.headBlock) + if _, err := s.notifyForkchoiceUpdate(cfg.ctx, fcuArgs); err != nil { + log.WithError(err).Error("Could not update forkchoice with payload attributes for proposal") } } -// fockchoiceUpdateWithExecution is a wrapper around notifyForkchoiceUpdate. It gets a forkchoice lock and calls the engine. -// The caller of this function should NOT have a lock in forkchoice store. -func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, args *fcuConfig) { +// fockchoiceUpdateWithExecution is a wrapper around notifyForkchoiceUpdate. It decides whether a new call to FCU should be made. +func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, args *fcuConfig) error { _, span := trace.StartSpan(ctx, "beacon-chain.blockchain.forkchoiceUpdateWithExecution") defer span.End() // Note: Use the service context here to avoid the parent context being ended during a forkchoice update. ctx = trace.NewContext(s.ctx, span) - s.ForkChoicer().Lock() - defer s.ForkChoicer().Unlock() _, err := s.notifyForkchoiceUpdate(ctx, args) if err != nil { - log.WithError(err).Error("Could not notify forkchoice update") + return errors.Wrap(err, "could not notify forkchoice update") } + + if err := s.saveHead(ctx, args.headRoot, args.headBlock, args.headState); err != nil { + log.WithError(err).Error("Could not save head") + } + + // Only need to prune attestations from pool if the head has changed. + s.pruneAttsFromPool(s.ctx, args.headState, args.headBlock) + return nil } // shouldOverrideFCU checks whether the incoming block is still subject to being diff --git a/beacon-chain/blockchain/forkchoice_update_execution_test.go b/beacon-chain/blockchain/forkchoice_update_execution_test.go index b08d307157..2f7746147d 100644 --- a/beacon-chain/blockchain/forkchoice_update_execution_test.go +++ b/beacon-chain/blockchain/forkchoice_update_execution_test.go @@ -97,7 +97,7 @@ func TestService_forkchoiceUpdateWithExecution_exceptionalCases(t *testing.T) { headBlock: wsb, proposingSlot: service.CurrentSlot() + 1, } - service.forkchoiceUpdateWithExecution(ctx, args) + require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, args)) payloadID, has := service.cfg.PayloadIDCache.PayloadID(2, [32]byte{2}) require.Equal(t, true, has) @@ -151,7 +151,7 @@ func TestService_forkchoiceUpdateWithExecution_SameHeadRootNewProposer(t *testin headRoot: r, proposingSlot: service.CurrentSlot() + 1, } - service.forkchoiceUpdateWithExecution(ctx, args) + require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, args)) } func TestShouldOverrideFCU(t *testing.T) { diff --git a/beacon-chain/blockchain/process_block.go b/beacon-chain/blockchain/process_block.go index 94859ac837..f3297312d9 100644 --- a/beacon-chain/blockchain/process_block.go +++ b/beacon-chain/blockchain/process_block.go @@ -66,6 +66,9 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error { startTime := time.Now() fcuArgs := &fcuConfig{} + if s.inRegularSync() { + defer s.handleSecondFCUCall(cfg, fcuArgs) + } if features.Get().EnableLightClient && slots.ToEpoch(s.CurrentSlot()) >= params.BeaconConfig().AltairForkEpoch { defer s.processLightClientUpdates(cfg) } @@ -102,12 +105,14 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error { s.logNonCanonicalBlockReceived(cfg.roblock.Root(), cfg.headRoot) return nil } - s.sendFCU(cfg, fcuArgs) - - // Pre-Fulu the caches are updated when computing the payload attributes - if cfg.postState.Version() >= version.Fulu { - go s.updateCachesPostBlockProcessing(cfg) + if err := s.getFCUArgs(cfg, fcuArgs); err != nil { + log.WithError(err).Error("Could not get forkchoice update argument") + return nil } + if err := s.sendFCU(cfg, fcuArgs); err != nil { + return errors.Wrap(err, "could not send FCU to engine") + } + return nil } @@ -317,7 +322,6 @@ func (s *Service) areSidecarsAvailable(ctx context.Context, avs das.Availability return nil } -// the caller of this function must not hold a lock in forkchoice store. func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.BeaconState) error { e := coreTime.CurrentEpoch(st) if err := helpers.UpdateCommitteeCache(ctx, st, e); err != nil { @@ -347,9 +351,7 @@ func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.Beacon if e > 0 { e = e - 1 } - s.ForkChoicer().RLock() target, err := s.cfg.ForkChoiceStore.TargetRootForEpoch(r, e) - s.ForkChoicer().RUnlock() if err != nil { log.WithError(err).Error("Could not update proposer index state-root map") return nil @@ -362,7 +364,7 @@ func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.Beacon } // Epoch boundary tasks: it copies the headState and updates the epoch boundary -// caches. The caller of this function must not hold a lock in forkchoice store. +// caches. func (s *Service) handleEpochBoundary(ctx context.Context, slot primitives.Slot, headState state.BeaconState, blockRoot []byte) error { ctx, span := trace.StartSpan(ctx, "blockChain.handleEpochBoundary") defer span.End() @@ -902,6 +904,8 @@ func (s *Service) lateBlockTasks(ctx context.Context) { if currentSlot == s.HeadSlot() { return } + s.cfg.ForkChoiceStore.RLock() + defer s.cfg.ForkChoiceStore.RUnlock() // return early if we are in init sync if !s.inRegularSync() { return @@ -914,30 +918,14 @@ func (s *Service) lateBlockTasks(ctx context.Context) { if lastState == nil { lastRoot, lastState = headRoot[:], headState } - // Before Fulu we need to process the next slot to find out if we are proposing. - if lastState.Version() < version.Fulu { - // Copy all the field tries in our cached state in the event of late - // blocks. - lastState.CopyAllTries() - if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil { - log.WithError(err).Debug("Could not update next slot state cache") - } - if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil { - log.WithError(err).Error("Could not update epoch boundary caches") - } - } else { - // After Fulu, we can update the caches asynchronously after sending FCU to the engine - defer func() { - go func() { - lastState.CopyAllTries() - if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil { - log.WithError(err).Debug("Could not update next slot state cache") - } - if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil { - log.WithError(err).Error("Could not update epoch boundary caches") - } - }() - }() + // Copy all the field tries in our cached state in the event of late + // blocks. + lastState.CopyAllTries() + if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil { + log.WithError(err).Debug("Could not update next slot state cache") + } + if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil { + log.WithError(err).Error("Could not update epoch boundary caches") } // return early if we already started building a block for the current // head root @@ -967,8 +955,6 @@ func (s *Service) lateBlockTasks(ctx context.Context) { headBlock: headBlock, attributes: attribute, } - s.cfg.ForkChoiceStore.Lock() - defer s.cfg.ForkChoiceStore.Unlock() _, err = s.notifyForkchoiceUpdate(ctx, fcuArgs) if err != nil { log.WithError(err).Debug("could not perform late block tasks: failed to update forkchoice with engine") diff --git a/beacon-chain/blockchain/process_block_helpers.go b/beacon-chain/blockchain/process_block_helpers.go index 3f60e78e3f..df7790752a 100644 --- a/beacon-chain/blockchain/process_block_helpers.go +++ b/beacon-chain/blockchain/process_block_helpers.go @@ -42,8 +42,14 @@ func (s *Service) getFCUArgs(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) er if err := s.getFCUArgsEarlyBlock(cfg, fcuArgs); err != nil { return err } - fcuArgs.attributes = s.getPayloadAttribute(cfg.ctx, fcuArgs.headState, fcuArgs.proposingSlot, cfg.headRoot[:]) - return nil + if !s.inRegularSync() { + return nil + } + slot := cfg.roblock.Block().Slot() + if slots.WithinVotingWindow(s.genesisTime, slot) { + return nil + } + return s.computePayloadAttributes(cfg, fcuArgs) } func (s *Service) getFCUArgsEarlyBlock(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error { @@ -167,19 +173,26 @@ func (s *Service) processLightClientUpdates(cfg *postBlockProcessConfig) { // updateCachesPostBlockProcessing updates the next slot cache and handles the epoch // boundary in order to compute the right proposer indices after processing -// state transition. The caller of this function must not hold a lock in forkchoice store. -func (s *Service) updateCachesPostBlockProcessing(cfg *postBlockProcessConfig) { +// state transition. This function is called on late blocks while still locked, +// before sending FCU to the engine. +func (s *Service) updateCachesPostBlockProcessing(cfg *postBlockProcessConfig) error { slot := cfg.postState.Slot() root := cfg.roblock.Root() if err := transition.UpdateNextSlotCache(cfg.ctx, root[:], cfg.postState); err != nil { - log.WithError(err).Error("Could not update next slot state cache") - return + return errors.Wrap(err, "could not update next slot state cache") } if !slots.IsEpochEnd(slot) { - return + return nil } - if err := s.handleEpochBoundary(cfg.ctx, slot, cfg.postState, root[:]); err != nil { - log.WithError(err).Error("Could not handle epoch boundary") + return s.handleEpochBoundary(cfg.ctx, slot, cfg.postState, root[:]) +} + +// handleSecondFCUCall handles a second call to FCU when syncing a new block. +// This is useful when proposing in the next block and we want to defer the +// computation of the next slot shuffling. +func (s *Service) handleSecondFCUCall(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) { + if (fcuArgs.attributes == nil || fcuArgs.attributes.IsEmpty()) && cfg.headRoot == cfg.roblock.Root() { + go s.sendFCUWithAttributes(cfg, fcuArgs) } } @@ -189,6 +202,20 @@ func reportProcessingTime(startTime time.Time) { onBlockProcessingTime.Observe(float64(time.Since(startTime).Milliseconds())) } +// computePayloadAttributes modifies the passed FCU arguments to +// contain the right payload attributes with the tracked proposer. It gets +// called on blocks that arrive after the attestation voting window, or in a +// background routine after syncing early blocks. +func (s *Service) computePayloadAttributes(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error { + if cfg.roblock.Root() == cfg.headRoot { + if err := s.updateCachesPostBlockProcessing(cfg); err != nil { + return err + } + } + fcuArgs.attributes = s.getPayloadAttribute(cfg.ctx, fcuArgs.headState, fcuArgs.proposingSlot, cfg.headRoot[:]) + return nil +} + // getBlockPreState returns the pre state of an incoming block. It uses the parent root of the block // to retrieve the state in DB. It verifies the pre state's validity and the incoming block // is in the correct time window. diff --git a/beacon-chain/blockchain/process_block_test.go b/beacon-chain/blockchain/process_block_test.go index 7ef16025f9..3379266d3d 100644 --- a/beacon-chain/blockchain/process_block_test.go +++ b/beacon-chain/blockchain/process_block_test.go @@ -738,9 +738,7 @@ func TestOnBlock_CanFinalize_WithOnTick(t *testing.T) { require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState)) roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r) require.NoError(t, err) - service.cfg.ForkChoiceStore.Lock() require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})) - service.cfg.ForkChoiceStore.Unlock() require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch)) _, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch) require.NoError(t, err) @@ -790,9 +788,7 @@ func TestOnBlock_CanFinalize(t *testing.T) { require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState)) roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r) require.NoError(t, err) - service.cfg.ForkChoiceStore.Lock() require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})) - service.cfg.ForkChoiceStore.Unlock() require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch)) _, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch) require.NoError(t, err) @@ -820,9 +816,7 @@ func TestOnBlock_NilBlock(t *testing.T) { service, tr := minimalTestService(t) signed := &consensusblocks.SignedBeaconBlock{} roblock := consensusblocks.ROBlock{ReadOnlySignedBeaconBlock: signed} - service.cfg.ForkChoiceStore.Lock() err := service.postBlockProcess(&postBlockProcessConfig{tr.ctx, roblock, [32]byte{}, nil, true}) - service.cfg.ForkChoiceStore.Unlock() require.Equal(t, true, IsInvalidBlock(err)) } @@ -854,9 +848,7 @@ func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) { require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState)) roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r) require.NoError(t, err) - service.cfg.ForkChoiceStore.Lock() require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})) - service.cfg.ForkChoiceStore.Unlock() testState, err = service.cfg.StateGen.StateByRoot(ctx, r) require.NoError(t, err) } @@ -1329,9 +1321,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) { lock.Lock() roblock, err := consensusblocks.NewROBlockWithRoot(wsb1, r1) require.NoError(t, err) - service.cfg.ForkChoiceStore.Lock() require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})) - service.cfg.ForkChoiceStore.Unlock() lock.Unlock() wg.Done() }() @@ -1343,9 +1333,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) { lock.Lock() roblock, err := consensusblocks.NewROBlockWithRoot(wsb2, r2) require.NoError(t, err) - service.cfg.ForkChoiceStore.Lock() require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})) - service.cfg.ForkChoiceStore.Unlock() lock.Unlock() wg.Done() }() @@ -1357,9 +1345,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) { lock.Lock() roblock, err := consensusblocks.NewROBlockWithRoot(wsb3, r3) require.NoError(t, err) - service.cfg.ForkChoiceStore.Lock() require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})) - service.cfg.ForkChoiceStore.Unlock() lock.Unlock() wg.Done() }() @@ -1371,9 +1357,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) { lock.Lock() roblock, err := consensusblocks.NewROBlockWithRoot(wsb4, r4) require.NoError(t, err) - service.cfg.ForkChoiceStore.Lock() require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})) - service.cfg.ForkChoiceStore.Unlock() lock.Unlock() wg.Done() }() @@ -1398,6 +1382,197 @@ func Test_verifyBlkFinalizedSlot_invalidBlock(t *testing.T) { require.Equal(t, true, IsInvalidBlock(err)) } +// See the description in #10777 and #10782 for the full setup +// We sync optimistically a chain of blocks. Block 17 is the last block in Epoch +// 2. Block 18 justifies block 12 (the first in Epoch 2) and Block 19 returns +// INVALID from FCU, with LVH block 17. No head is viable. We check +// that the node is optimistic and that we can actually import a block on top of +// 17 and recover. +func TestStore_NoViableHead_FCU(t *testing.T) { + params.SetupTestConfigCleanup(t) + config := params.BeaconConfig() + config.SlotsPerEpoch = 6 + config.AltairForkEpoch = 1 + config.BellatrixForkEpoch = 2 + params.OverrideBeaconConfig(config) + + mockEngine := &mockExecution.EngineClient{ErrNewPayload: execution.ErrAcceptedSyncingPayloadStatus, ErrForkchoiceUpdated: execution.ErrAcceptedSyncingPayloadStatus} + service, tr := minimalTestService(t, WithExecutionEngineCaller(mockEngine)) + ctx := tr.ctx + + st, keys := util.DeterministicGenesisState(t, 64) + stateRoot, err := st.HashTreeRoot(ctx) + require.NoError(t, err, "Could not hash genesis state") + + require.NoError(t, service.saveGenesisData(ctx, st)) + + genesis := blocks.NewGenesisBlock(stateRoot[:]) + wsb, err := consensusblocks.NewSignedBeaconBlock(genesis) + require.NoError(t, err) + require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb), "Could not save genesis block") + + parentRoot, err := genesis.Block.HashTreeRoot() + require.NoError(t, err, "Could not get signing root") + require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, parentRoot), "Could not save genesis state") + require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state") + + for i := 1; i < 6; i++ { + driftGenesisTime(service, primitives.Slot(i), 0) + st, err := service.HeadState(ctx) + require.NoError(t, err) + b, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i)) + require.NoError(t, err) + wsb, err := consensusblocks.NewSignedBeaconBlock(b) + require.NoError(t, err) + root, err := b.Block.HashTreeRoot() + require.NoError(t, err) + + preState, err := service.getBlockPreState(ctx, wsb.Block()) + require.NoError(t, err) + postState, err := service.validateStateTransition(ctx, preState, wsb) + require.NoError(t, err) + require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState)) + roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root) + require.NoError(t, err) + require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})) + } + + for i := 6; i < 12; i++ { + driftGenesisTime(service, primitives.Slot(i), 0) + st, err := service.HeadState(ctx) + require.NoError(t, err) + b, err := util.GenerateFullBlockAltair(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i)) + require.NoError(t, err) + wsb, err := consensusblocks.NewSignedBeaconBlock(b) + require.NoError(t, err) + root, err := b.Block.HashTreeRoot() + require.NoError(t, err) + preState, err := service.getBlockPreState(ctx, wsb.Block()) + require.NoError(t, err) + postState, err := service.validateStateTransition(ctx, preState, wsb) + require.NoError(t, err) + require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState)) + roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root) + require.NoError(t, err) + err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}) + require.NoError(t, err) + } + + for i := 12; i < 18; i++ { + driftGenesisTime(service, primitives.Slot(i), 0) + st, err := service.HeadState(ctx) + require.NoError(t, err) + b, err := util.GenerateFullBlockBellatrix(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i)) + require.NoError(t, err) + wsb, err := consensusblocks.NewSignedBeaconBlock(b) + require.NoError(t, err) + root, err := b.Block.HashTreeRoot() + require.NoError(t, err) + preState, err := service.getBlockPreState(ctx, wsb.Block()) + require.NoError(t, err) + postState, err := service.validateStateTransition(ctx, preState, wsb) + require.NoError(t, err) + require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState)) + roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root) + require.NoError(t, err) + err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}) + require.NoError(t, err) + } + // Check that we haven't justified the second epoch yet + jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint() + require.Equal(t, primitives.Epoch(0), jc.Epoch) + + // import a block that justifies the second epoch + driftGenesisTime(service, 18, 0) + validHeadState, err := service.HeadState(ctx) + require.NoError(t, err) + b, err := util.GenerateFullBlockBellatrix(validHeadState, keys, util.DefaultBlockGenConfig(), 18) + require.NoError(t, err) + wsb, err = consensusblocks.NewSignedBeaconBlock(b) + require.NoError(t, err) + firstInvalidRoot, err := b.Block.HashTreeRoot() + require.NoError(t, err) + preState, err := service.getBlockPreState(ctx, wsb.Block()) + require.NoError(t, err) + postState, err := service.validateStateTransition(ctx, preState, wsb) + require.NoError(t, err) + require.NoError(t, service.savePostStateInfo(ctx, firstInvalidRoot, wsb, postState)) + roblock, err := consensusblocks.NewROBlockWithRoot(wsb, firstInvalidRoot) + require.NoError(t, err) + err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}) + require.NoError(t, err) + jc = service.cfg.ForkChoiceStore.JustifiedCheckpoint() + require.Equal(t, primitives.Epoch(2), jc.Epoch) + + sjc := validHeadState.CurrentJustifiedCheckpoint() + require.Equal(t, primitives.Epoch(0), sjc.Epoch) + lvh := b.Block.Body.ExecutionPayload.ParentHash + // check our head + require.Equal(t, firstInvalidRoot, service.cfg.ForkChoiceStore.CachedHeadRoot()) + + // import another block to find out that it was invalid + mockEngine = &mockExecution.EngineClient{ErrNewPayload: execution.ErrAcceptedSyncingPayloadStatus, ErrForkchoiceUpdated: execution.ErrInvalidPayloadStatus, ForkChoiceUpdatedResp: lvh} + service.cfg.ExecutionEngineCaller = mockEngine + driftGenesisTime(service, 19, 0) + st, err = service.HeadState(ctx) + require.NoError(t, err) + b, err = util.GenerateFullBlockBellatrix(st, keys, util.DefaultBlockGenConfig(), 19) + require.NoError(t, err) + wsb, err = consensusblocks.NewSignedBeaconBlock(b) + require.NoError(t, err) + root, err := b.Block.HashTreeRoot() + require.NoError(t, err) + preState, err = service.getBlockPreState(ctx, wsb.Block()) + require.NoError(t, err) + postState, err = service.validateStateTransition(ctx, preState, wsb) + require.NoError(t, err) + require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState)) + roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root) + require.NoError(t, err) + err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}) + require.ErrorContains(t, "received an INVALID payload from execution engine", err) + // Check that forkchoice's head is the last invalid block imported. The + // store's headroot is the previous head (since the invalid block did + // not finish importing) one and that the node is optimistic + require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot()) + headRoot, err := service.HeadRoot(ctx) + require.NoError(t, err) + require.Equal(t, firstInvalidRoot, bytesutil.ToBytes32(headRoot)) + optimistic, err := service.IsOptimistic(ctx) + require.NoError(t, err) + require.Equal(t, true, optimistic) + + // import another block based on the last valid head state + mockEngine = &mockExecution.EngineClient{} + service.cfg.ExecutionEngineCaller = mockEngine + driftGenesisTime(service, 20, 0) + b, err = util.GenerateFullBlockBellatrix(validHeadState, keys, &util.BlockGenConfig{}, 20) + require.NoError(t, err) + wsb, err = consensusblocks.NewSignedBeaconBlock(b) + require.NoError(t, err) + root, err = b.Block.HashTreeRoot() + require.NoError(t, err) + + preState, err = service.getBlockPreState(ctx, wsb.Block()) + require.NoError(t, err) + postState, err = service.validateStateTransition(ctx, preState, wsb) + require.NoError(t, err) + require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState)) + roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root) + require.NoError(t, err) + err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}) + require.NoError(t, err) + // Check the newly imported block is head, it justified the right + // checkpoint and the node is no longer optimistic + require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot()) + sjc = service.CurrentJustifiedCheckpt() + require.Equal(t, jc.Epoch, sjc.Epoch) + require.Equal(t, jc.Root, bytesutil.ToBytes32(sjc.Root)) + optimistic, err = service.IsOptimistic(ctx) + require.NoError(t, err) + require.Equal(t, false, optimistic) +} + // See the description in #10777 and #10782 for the full setup // We sync optimistically a chain of blocks. Block 17 is the last block in Epoch // 2. Block 18 justifies block 12 (the first in Epoch 2) and Block 19 returns @@ -1449,9 +1624,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) { require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState)) roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root) require.NoError(t, err) - service.cfg.ForkChoiceStore.Lock() require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})) - service.cfg.ForkChoiceStore.Unlock() } for i := 6; i < 12; i++ { @@ -1471,9 +1644,8 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) { require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState)) roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root) require.NoError(t, err) - service.cfg.ForkChoiceStore.Lock() - require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})) - service.cfg.ForkChoiceStore.Unlock() + err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}) + require.NoError(t, err) } for i := 12; i < 18; i++ { @@ -1494,9 +1666,8 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) { require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState)) roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root) require.NoError(t, err) - service.cfg.ForkChoiceStore.Lock() - require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})) - service.cfg.ForkChoiceStore.Unlock() + err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}) + require.NoError(t, err) } // Check that we haven't justified the second epoch yet jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint() @@ -1519,9 +1690,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) { require.NoError(t, service.savePostStateInfo(ctx, firstInvalidRoot, wsb, postState)) roblock, err := consensusblocks.NewROBlockWithRoot(wsb, firstInvalidRoot) require.NoError(t, err) - service.cfg.ForkChoiceStore.Lock() err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}) - service.cfg.ForkChoiceStore.Unlock() require.NoError(t, err) jc = service.cfg.ForkChoiceStore.JustifiedCheckpoint() require.Equal(t, primitives.Epoch(2), jc.Epoch) @@ -1531,10 +1700,6 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) { lvh := b.Block.Body.ExecutionPayload.ParentHash // check our head require.Equal(t, firstInvalidRoot, service.cfg.ForkChoiceStore.CachedHeadRoot()) - isBlock18OptimisticAfterImport, err := service.IsOptimisticForRoot(ctx, firstInvalidRoot) - require.NoError(t, err) - require.Equal(t, true, isBlock18OptimisticAfterImport) - time.Sleep(20 * time.Millisecond) // wait for async forkchoice update to be processed // import another block to find out that it was invalid mockEngine = &mockExecution.EngineClient{ErrNewPayload: execution.ErrInvalidPayloadStatus, NewPayloadResp: lvh} @@ -1585,9 +1750,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) { require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState)) roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root) require.NoError(t, err) - service.cfg.ForkChoiceStore.Lock() err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}) - service.cfg.ForkChoiceStore.Unlock() require.NoError(t, err) // Check the newly imported block is head, it justified the right // checkpoint and the node is no longer optimistic @@ -1654,9 +1817,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) { require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState)) roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root) require.NoError(t, err) - service.cfg.ForkChoiceStore.Lock() require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})) - service.cfg.ForkChoiceStore.Unlock() } for i := 6; i < 12; i++ { @@ -1677,9 +1838,8 @@ func TestStore_NoViableHead_Liveness(t *testing.T) { require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState)) roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root) require.NoError(t, err) - service.cfg.ForkChoiceStore.Lock() - require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})) - service.cfg.ForkChoiceStore.Unlock() + err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}) + require.NoError(t, err) } // import the merge block @@ -1699,9 +1859,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) { require.NoError(t, service.savePostStateInfo(ctx, lastValidRoot, wsb, postState)) roblock, err := consensusblocks.NewROBlockWithRoot(wsb, lastValidRoot) require.NoError(t, err) - service.cfg.ForkChoiceStore.Lock() err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}) - service.cfg.ForkChoiceStore.Unlock() require.NoError(t, err) // save the post state and the payload Hash of this block since it will // be the LVH @@ -1730,9 +1888,8 @@ func TestStore_NoViableHead_Liveness(t *testing.T) { require.NoError(t, service.savePostStateInfo(ctx, invalidRoots[i-13], wsb, postState)) roblock, err := consensusblocks.NewROBlockWithRoot(wsb, invalidRoots[i-13]) require.NoError(t, err) - service.cfg.ForkChoiceStore.Lock() - require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})) - service.cfg.ForkChoiceStore.Unlock() + err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}) + require.NoError(t, err) } // Check that we have justified the second epoch jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint() @@ -1800,9 +1957,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) { require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState)) roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root) require.NoError(t, err) - service.cfg.ForkChoiceStore.Lock() require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})) - service.cfg.ForkChoiceStore.Unlock() // Check that the head is still INVALID and the node is still optimistic require.Equal(t, invalidHeadRoot, service.cfg.ForkChoiceStore.CachedHeadRoot()) optimistic, err = service.IsOptimistic(ctx) @@ -1827,9 +1982,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) { require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState)) roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root) require.NoError(t, err) - service.cfg.ForkChoiceStore.Lock() err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}) - service.cfg.ForkChoiceStore.Unlock() require.NoError(t, err) st, err = service.cfg.StateGen.StateByRoot(ctx, root) require.NoError(t, err) @@ -1857,9 +2010,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) { require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState)) roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root) require.NoError(t, err) - service.cfg.ForkChoiceStore.Lock() err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}) - service.cfg.ForkChoiceStore.Unlock() require.NoError(t, err) require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot()) sjc = service.CurrentJustifiedCheckpt() @@ -1903,6 +2054,7 @@ func TestNoViableHead_Reboot(t *testing.T) { require.NoError(t, service.cfg.BeaconDB.SaveGenesisBlockRoot(ctx, genesisRoot), "Could not save genesis state") for i := 1; i < 6; i++ { + t.Log(i) driftGenesisTime(service, primitives.Slot(i), 0) st, err := service.HeadState(ctx) require.NoError(t, err) @@ -1919,9 +2071,7 @@ func TestNoViableHead_Reboot(t *testing.T) { require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState)) roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root) require.NoError(t, err) - service.cfg.ForkChoiceStore.Lock() require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})) - service.cfg.ForkChoiceStore.Unlock() } for i := 6; i < 12; i++ { @@ -1941,9 +2091,8 @@ func TestNoViableHead_Reboot(t *testing.T) { require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState)) roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root) require.NoError(t, err) - service.cfg.ForkChoiceStore.Lock() - require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})) - service.cfg.ForkChoiceStore.Unlock() + err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}) + require.NoError(t, err) } // import the merge block @@ -1963,9 +2112,7 @@ func TestNoViableHead_Reboot(t *testing.T) { require.NoError(t, service.savePostStateInfo(ctx, lastValidRoot, wsb, postState)) roblock, err := consensusblocks.NewROBlockWithRoot(wsb, lastValidRoot) require.NoError(t, err) - service.cfg.ForkChoiceStore.Lock() err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}) - service.cfg.ForkChoiceStore.Unlock() require.NoError(t, err) // save the post state and the payload Hash of this block since it will // be the LVH @@ -1996,9 +2143,7 @@ func TestNoViableHead_Reboot(t *testing.T) { require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState)) roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root) require.NoError(t, err) - service.cfg.ForkChoiceStore.Lock() require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})) - service.cfg.ForkChoiceStore.Unlock() require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch)) _, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch) require.NoError(t, err) @@ -2119,9 +2264,7 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) { require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState)) roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root) require.NoError(t, err) - service.cfg.ForkChoiceStore.Lock() require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})) - service.cfg.ForkChoiceStore.Unlock() st, err = service.HeadState(ctx) require.NoError(t, err) @@ -2187,9 +2330,7 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) { require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState)) roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root) require.NoError(t, err) - service.cfg.ForkChoiceStore.Lock() require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})) - service.cfg.ForkChoiceStore.Unlock() st, err = service.HeadState(ctx) require.NoError(t, err) @@ -2472,10 +2613,7 @@ func TestRollbackBlock(t *testing.T) { require.NoError(t, err) // Rollback block insertion into db and caches. - service.cfg.ForkChoiceStore.Lock() - err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}) - service.cfg.ForkChoiceStore.Unlock() - require.ErrorContains(t, fmt.Sprintf("could not insert block %d to fork choice store", roblock.Block().Slot()), err) + require.ErrorContains(t, fmt.Sprintf("could not insert block %d to fork choice store", roblock.Block().Slot()), service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})) // The block should no longer exist. require.Equal(t, false, service.cfg.BeaconDB.HasBlock(ctx, root)) @@ -2576,9 +2714,7 @@ func TestRollbackBlock_ContextDeadline(t *testing.T) { require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState)) roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root) require.NoError(t, err) - service.cfg.ForkChoiceStore.Lock() require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})) - service.cfg.ForkChoiceStore.Unlock() b, err = util.GenerateFullBlock(postState, keys, util.DefaultBlockGenConfig(), 34) require.NoError(t, err) @@ -2612,10 +2748,7 @@ func TestRollbackBlock_ContextDeadline(t *testing.T) { require.NoError(t, postState.SetFinalizedCheckpoint(cj)) // Rollback block insertion into db and caches. - service.cfg.ForkChoiceStore.Lock() - err = service.postBlockProcess(&postBlockProcessConfig{cancCtx, roblock, [32]byte{}, postState, false}) - service.cfg.ForkChoiceStore.Unlock() - require.ErrorContains(t, "context canceled", err) + require.ErrorContains(t, "context canceled", service.postBlockProcess(&postBlockProcessConfig{cancCtx, roblock, [32]byte{}, postState, false})) // The block should no longer exist. require.Equal(t, false, service.cfg.BeaconDB.HasBlock(ctx, root)) @@ -3111,9 +3244,7 @@ func Test_postBlockProcess_EventSending(t *testing.T) { } // Execute postBlockProcess - service.cfg.ForkChoiceStore.Lock() err = service.postBlockProcess(cfg) - service.cfg.ForkChoiceStore.Unlock() // Check error expectation if tt.expectError { diff --git a/beacon-chain/blockchain/receive_attestation.go b/beacon-chain/blockchain/receive_attestation.go index aba81ba87a..e0d2f9abef 100644 --- a/beacon-chain/blockchain/receive_attestation.go +++ b/beacon-chain/blockchain/receive_attestation.go @@ -156,15 +156,13 @@ func (s *Service) UpdateHead(ctx context.Context, proposingSlot primitives.Slot) } if s.inRegularSync() { fcuArgs.attributes = s.getPayloadAttribute(ctx, headState, proposingSlot, newHeadRoot[:]) - if fcuArgs.attributes != nil && s.shouldOverrideFCU(newHeadRoot, proposingSlot) { - return - } - go s.forkchoiceUpdateWithExecution(s.ctx, fcuArgs) } - if err := s.saveHead(s.ctx, fcuArgs.headRoot, fcuArgs.headBlock, fcuArgs.headState); err != nil { - log.WithError(err).Error("Could not save head") + if fcuArgs.attributes != nil && s.shouldOverrideFCU(newHeadRoot, proposingSlot) { + return + } + if err := s.forkchoiceUpdateWithExecution(s.ctx, fcuArgs); err != nil { + log.WithError(err).Error("Could not update forkchoice") } - s.pruneAttsFromPool(s.ctx, fcuArgs.headState, fcuArgs.headBlock) } // This processes fork choice attestations from the pool to account for validator votes and fork choice. diff --git a/beacon-chain/blockchain/receive_attestation_test.go b/beacon-chain/blockchain/receive_attestation_test.go index c4afb45ed2..98a5afd8af 100644 --- a/beacon-chain/blockchain/receive_attestation_test.go +++ b/beacon-chain/blockchain/receive_attestation_test.go @@ -117,9 +117,7 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) { require.NoError(t, service.savePostStateInfo(ctx, tRoot, wsb, postState)) roblock, err := blocks.NewROBlockWithRoot(wsb, tRoot) require.NoError(t, err) - service.cfg.ForkChoiceStore.Lock() require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})) - service.cfg.ForkChoiceStore.Unlock() copied, err = service.cfg.StateGen.StateByRoot(ctx, tRoot) require.NoError(t, err) require.Equal(t, 2, fcs.NodeCount()) @@ -179,9 +177,7 @@ func TestService_UpdateHead_NoAtts(t *testing.T) { require.NoError(t, service.savePostStateInfo(ctx, tRoot, wsb, postState)) roblock, err := blocks.NewROBlockWithRoot(wsb, tRoot) require.NoError(t, err) - service.cfg.ForkChoiceStore.Lock() require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})) - service.cfg.ForkChoiceStore.Unlock() require.Equal(t, 2, fcs.NodeCount()) require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb)) require.Equal(t, tRoot, service.head.root) diff --git a/changelog/manu-revert-16149.md b/changelog/manu-revert-16149.md new file mode 100644 index 0000000000..b1a31c7d71 --- /dev/null +++ b/changelog/manu-revert-16149.md @@ -0,0 +1,3 @@ +### Ignored + +- Revert #16149 \ No newline at end of file diff --git a/changelog/potuz_dont_lock_fcu.md b/changelog/potuz_dont_lock_fcu.md deleted file mode 100644 index 8b3303bb31..0000000000 --- a/changelog/potuz_dont_lock_fcu.md +++ /dev/null @@ -1,3 +0,0 @@ -### Changed - -- Notify the engine about forkchoice updates in the background.