diff --git a/beacon-chain/blockchain/execution_engine_test.go b/beacon-chain/blockchain/execution_engine_test.go index 3df1118463..e7eda0532c 100644 --- a/beacon-chain/blockchain/execution_engine_test.go +++ b/beacon-chain/blockchain/execution_engine_test.go @@ -1053,40 +1053,3 @@ func TestKZGCommitmentToVersionedHashes(t *testing.T) { require.Equal(t, vhs[0].String(), vh0) require.Equal(t, vhs[1].String(), vh1) } - -func TestComputePayloadAttribute(t *testing.T) { - service, tr := minimalTestService(t, WithPayloadIDCache(cache.NewPayloadIDCache())) - ctx := tr.ctx - - st, _ := util.DeterministicGenesisStateBellatrix(t, 1) - - service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, Index: 0}) - // Cache hit, advance state, no fee recipient - slot := primitives.Slot(1) - service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{}) - blk := util.NewBeaconBlockBellatrix() - signed, err := consensusblocks.NewSignedBeaconBlock(blk) - require.NoError(t, err) - roblock, err := consensusblocks.NewROBlockWithRoot(signed, [32]byte{'a'}) - require.NoError(t, err) - cfg := &postBlockProcessConfig{ - ctx: ctx, - roblock: roblock, - } - fcu := &fcuConfig{ - headState: st, - proposingSlot: slot, - headRoot: [32]byte{}, - } - require.NoError(t, service.computePayloadAttributes(cfg, fcu)) - require.Equal(t, false, fcu.attributes.IsEmpty()) - require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(fcu.attributes.SuggestedFeeRecipient()).String()) - - // Cache hit, advance state, has fee recipient - suggestedAddr := common.HexToAddress("123") - service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, FeeRecipient: primitives.ExecutionAddress(suggestedAddr), Index: 0}) - service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{}) - require.NoError(t, service.computePayloadAttributes(cfg, fcu)) - require.Equal(t, false, fcu.attributes.IsEmpty()) - require.Equal(t, suggestedAddr, common.BytesToAddress(fcu.attributes.SuggestedFeeRecipient())) -} diff --git a/beacon-chain/blockchain/forkchoice_update_execution.go b/beacon-chain/blockchain/forkchoice_update_execution.go index 79d9132e74..57b07019b8 100644 --- a/beacon-chain/blockchain/forkchoice_update_execution.go +++ b/beacon-chain/blockchain/forkchoice_update_execution.go @@ -12,6 +12,7 @@ import ( payloadattribute "github.com/OffchainLabs/prysm/v7/consensus-types/payload-attribute" "github.com/OffchainLabs/prysm/v7/consensus-types/primitives" "github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace" + "github.com/OffchainLabs/prysm/v7/runtime/version" "github.com/OffchainLabs/prysm/v7/time/slots" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -53,58 +54,53 @@ type fcuConfig struct { } // sendFCU handles the logic to notify the engine of a forckhoice update -// for the first time when processing an incoming block during regular sync. It -// always updates the shuffling caches and handles epoch transitions when the -// incoming block is late, preparing payload attributes in this case while it -// only sends a message with empty attributes for early blocks. -func (s *Service) sendFCU(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error { - if !s.isNewHead(cfg.headRoot) { - return nil +// when processing an incoming block during regular sync. It +// always updates the shuffling caches and handles epoch transitions . +func (s *Service) sendFCU(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) { + if cfg.postState.Version() < version.Fulu { + // update the caches to compute the right proposer index + // this function is called under a forkchoice lock which we need to release. + s.ForkChoicer().Unlock() + s.updateCachesPostBlockProcessing(cfg) + s.ForkChoicer().Lock() } + if err := s.getFCUArgs(cfg, fcuArgs); err != nil { + log.WithError(err).Error("Could not get forkchoice update argument") + return + } + // If head has not been updated and attributes are nil, we can skip the FCU. + if !s.isNewHead(cfg.headRoot) && (fcuArgs.attributes == nil || fcuArgs.attributes.IsEmpty()) { + return + } + // If we are proposing and we aim to reorg the block, we have already sent FCU with attributes on lateBlockTasks if fcuArgs.attributes != nil && !fcuArgs.attributes.IsEmpty() && s.shouldOverrideFCU(cfg.headRoot, s.CurrentSlot()+1) { - return nil + return + } + if s.inRegularSync() { + go s.forkchoiceUpdateWithExecution(cfg.ctx, fcuArgs) } - return s.forkchoiceUpdateWithExecution(cfg.ctx, fcuArgs) -} -// sendFCUWithAttributes computes the payload attributes and sends an FCU message -// to the engine if needed -func (s *Service) sendFCUWithAttributes(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) { - slotCtx, cancel := context.WithTimeout(context.Background(), slotDeadline) - defer cancel() - cfg.ctx = slotCtx - s.cfg.ForkChoiceStore.RLock() - defer s.cfg.ForkChoiceStore.RUnlock() - if err := s.computePayloadAttributes(cfg, fcuArgs); err != nil { - log.WithError(err).Error("Could not compute payload attributes") - return - } - if fcuArgs.attributes.IsEmpty() { - return - } - if _, err := s.notifyForkchoiceUpdate(cfg.ctx, fcuArgs); err != nil { - log.WithError(err).Error("Could not update forkchoice with payload attributes for proposal") + if s.isNewHead(fcuArgs.headRoot) { + if err := s.saveHead(cfg.ctx, fcuArgs.headRoot, fcuArgs.headBlock, fcuArgs.headState); err != nil { + log.WithError(err).Error("Could not save head") + } + s.pruneAttsFromPool(s.ctx, fcuArgs.headState, fcuArgs.headBlock) } } -// fockchoiceUpdateWithExecution is a wrapper around notifyForkchoiceUpdate. It decides whether a new call to FCU should be made. -func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, args *fcuConfig) error { +// fockchoiceUpdateWithExecution is a wrapper around notifyForkchoiceUpdate. It gets a forkchoice lock and calls the engine. +// The caller of this function should NOT have a lock in forkchoice store. +func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, args *fcuConfig) { _, span := trace.StartSpan(ctx, "beacon-chain.blockchain.forkchoiceUpdateWithExecution") defer span.End() // Note: Use the service context here to avoid the parent context being ended during a forkchoice update. ctx = trace.NewContext(s.ctx, span) + s.ForkChoicer().Lock() + defer s.ForkChoicer().Unlock() _, err := s.notifyForkchoiceUpdate(ctx, args) if err != nil { - return errors.Wrap(err, "could not notify forkchoice update") + log.WithError(err).Error("Could not notify forkchoice update") } - - if err := s.saveHead(ctx, args.headRoot, args.headBlock, args.headState); err != nil { - log.WithError(err).Error("Could not save head") - } - - // Only need to prune attestations from pool if the head has changed. - s.pruneAttsFromPool(s.ctx, args.headState, args.headBlock) - return nil } // shouldOverrideFCU checks whether the incoming block is still subject to being diff --git a/beacon-chain/blockchain/forkchoice_update_execution_test.go b/beacon-chain/blockchain/forkchoice_update_execution_test.go index 2f7746147d..b08d307157 100644 --- a/beacon-chain/blockchain/forkchoice_update_execution_test.go +++ b/beacon-chain/blockchain/forkchoice_update_execution_test.go @@ -97,7 +97,7 @@ func TestService_forkchoiceUpdateWithExecution_exceptionalCases(t *testing.T) { headBlock: wsb, proposingSlot: service.CurrentSlot() + 1, } - require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, args)) + service.forkchoiceUpdateWithExecution(ctx, args) payloadID, has := service.cfg.PayloadIDCache.PayloadID(2, [32]byte{2}) require.Equal(t, true, has) @@ -151,7 +151,7 @@ func TestService_forkchoiceUpdateWithExecution_SameHeadRootNewProposer(t *testin headRoot: r, proposingSlot: service.CurrentSlot() + 1, } - require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, args)) + service.forkchoiceUpdateWithExecution(ctx, args) } func TestShouldOverrideFCU(t *testing.T) { diff --git a/beacon-chain/blockchain/process_block.go b/beacon-chain/blockchain/process_block.go index f3297312d9..94859ac837 100644 --- a/beacon-chain/blockchain/process_block.go +++ b/beacon-chain/blockchain/process_block.go @@ -66,9 +66,6 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error { startTime := time.Now() fcuArgs := &fcuConfig{} - if s.inRegularSync() { - defer s.handleSecondFCUCall(cfg, fcuArgs) - } if features.Get().EnableLightClient && slots.ToEpoch(s.CurrentSlot()) >= params.BeaconConfig().AltairForkEpoch { defer s.processLightClientUpdates(cfg) } @@ -105,14 +102,12 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error { s.logNonCanonicalBlockReceived(cfg.roblock.Root(), cfg.headRoot) return nil } - if err := s.getFCUArgs(cfg, fcuArgs); err != nil { - log.WithError(err).Error("Could not get forkchoice update argument") - return nil - } - if err := s.sendFCU(cfg, fcuArgs); err != nil { - return errors.Wrap(err, "could not send FCU to engine") - } + s.sendFCU(cfg, fcuArgs) + // Pre-Fulu the caches are updated when computing the payload attributes + if cfg.postState.Version() >= version.Fulu { + go s.updateCachesPostBlockProcessing(cfg) + } return nil } @@ -322,6 +317,7 @@ func (s *Service) areSidecarsAvailable(ctx context.Context, avs das.Availability return nil } +// the caller of this function must not hold a lock in forkchoice store. func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.BeaconState) error { e := coreTime.CurrentEpoch(st) if err := helpers.UpdateCommitteeCache(ctx, st, e); err != nil { @@ -351,7 +347,9 @@ func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.Beacon if e > 0 { e = e - 1 } + s.ForkChoicer().RLock() target, err := s.cfg.ForkChoiceStore.TargetRootForEpoch(r, e) + s.ForkChoicer().RUnlock() if err != nil { log.WithError(err).Error("Could not update proposer index state-root map") return nil @@ -364,7 +362,7 @@ func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.Beacon } // Epoch boundary tasks: it copies the headState and updates the epoch boundary -// caches. +// caches. The caller of this function must not hold a lock in forkchoice store. func (s *Service) handleEpochBoundary(ctx context.Context, slot primitives.Slot, headState state.BeaconState, blockRoot []byte) error { ctx, span := trace.StartSpan(ctx, "blockChain.handleEpochBoundary") defer span.End() @@ -904,8 +902,6 @@ func (s *Service) lateBlockTasks(ctx context.Context) { if currentSlot == s.HeadSlot() { return } - s.cfg.ForkChoiceStore.RLock() - defer s.cfg.ForkChoiceStore.RUnlock() // return early if we are in init sync if !s.inRegularSync() { return @@ -918,14 +914,30 @@ func (s *Service) lateBlockTasks(ctx context.Context) { if lastState == nil { lastRoot, lastState = headRoot[:], headState } - // Copy all the field tries in our cached state in the event of late - // blocks. - lastState.CopyAllTries() - if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil { - log.WithError(err).Debug("Could not update next slot state cache") - } - if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil { - log.WithError(err).Error("Could not update epoch boundary caches") + // Before Fulu we need to process the next slot to find out if we are proposing. + if lastState.Version() < version.Fulu { + // Copy all the field tries in our cached state in the event of late + // blocks. + lastState.CopyAllTries() + if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil { + log.WithError(err).Debug("Could not update next slot state cache") + } + if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil { + log.WithError(err).Error("Could not update epoch boundary caches") + } + } else { + // After Fulu, we can update the caches asynchronously after sending FCU to the engine + defer func() { + go func() { + lastState.CopyAllTries() + if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil { + log.WithError(err).Debug("Could not update next slot state cache") + } + if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil { + log.WithError(err).Error("Could not update epoch boundary caches") + } + }() + }() } // return early if we already started building a block for the current // head root @@ -955,6 +967,8 @@ func (s *Service) lateBlockTasks(ctx context.Context) { headBlock: headBlock, attributes: attribute, } + s.cfg.ForkChoiceStore.Lock() + defer s.cfg.ForkChoiceStore.Unlock() _, err = s.notifyForkchoiceUpdate(ctx, fcuArgs) if err != nil { log.WithError(err).Debug("could not perform late block tasks: failed to update forkchoice with engine") diff --git a/beacon-chain/blockchain/process_block_helpers.go b/beacon-chain/blockchain/process_block_helpers.go index df7790752a..3f60e78e3f 100644 --- a/beacon-chain/blockchain/process_block_helpers.go +++ b/beacon-chain/blockchain/process_block_helpers.go @@ -42,14 +42,8 @@ func (s *Service) getFCUArgs(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) er if err := s.getFCUArgsEarlyBlock(cfg, fcuArgs); err != nil { return err } - if !s.inRegularSync() { - return nil - } - slot := cfg.roblock.Block().Slot() - if slots.WithinVotingWindow(s.genesisTime, slot) { - return nil - } - return s.computePayloadAttributes(cfg, fcuArgs) + fcuArgs.attributes = s.getPayloadAttribute(cfg.ctx, fcuArgs.headState, fcuArgs.proposingSlot, cfg.headRoot[:]) + return nil } func (s *Service) getFCUArgsEarlyBlock(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error { @@ -173,26 +167,19 @@ func (s *Service) processLightClientUpdates(cfg *postBlockProcessConfig) { // updateCachesPostBlockProcessing updates the next slot cache and handles the epoch // boundary in order to compute the right proposer indices after processing -// state transition. This function is called on late blocks while still locked, -// before sending FCU to the engine. -func (s *Service) updateCachesPostBlockProcessing(cfg *postBlockProcessConfig) error { +// state transition. The caller of this function must not hold a lock in forkchoice store. +func (s *Service) updateCachesPostBlockProcessing(cfg *postBlockProcessConfig) { slot := cfg.postState.Slot() root := cfg.roblock.Root() if err := transition.UpdateNextSlotCache(cfg.ctx, root[:], cfg.postState); err != nil { - return errors.Wrap(err, "could not update next slot state cache") + log.WithError(err).Error("Could not update next slot state cache") + return } if !slots.IsEpochEnd(slot) { - return nil + return } - return s.handleEpochBoundary(cfg.ctx, slot, cfg.postState, root[:]) -} - -// handleSecondFCUCall handles a second call to FCU when syncing a new block. -// This is useful when proposing in the next block and we want to defer the -// computation of the next slot shuffling. -func (s *Service) handleSecondFCUCall(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) { - if (fcuArgs.attributes == nil || fcuArgs.attributes.IsEmpty()) && cfg.headRoot == cfg.roblock.Root() { - go s.sendFCUWithAttributes(cfg, fcuArgs) + if err := s.handleEpochBoundary(cfg.ctx, slot, cfg.postState, root[:]); err != nil { + log.WithError(err).Error("Could not handle epoch boundary") } } @@ -202,20 +189,6 @@ func reportProcessingTime(startTime time.Time) { onBlockProcessingTime.Observe(float64(time.Since(startTime).Milliseconds())) } -// computePayloadAttributes modifies the passed FCU arguments to -// contain the right payload attributes with the tracked proposer. It gets -// called on blocks that arrive after the attestation voting window, or in a -// background routine after syncing early blocks. -func (s *Service) computePayloadAttributes(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error { - if cfg.roblock.Root() == cfg.headRoot { - if err := s.updateCachesPostBlockProcessing(cfg); err != nil { - return err - } - } - fcuArgs.attributes = s.getPayloadAttribute(cfg.ctx, fcuArgs.headState, fcuArgs.proposingSlot, cfg.headRoot[:]) - return nil -} - // getBlockPreState returns the pre state of an incoming block. It uses the parent root of the block // to retrieve the state in DB. It verifies the pre state's validity and the incoming block // is in the correct time window. diff --git a/beacon-chain/blockchain/process_block_test.go b/beacon-chain/blockchain/process_block_test.go index 3379266d3d..7ef16025f9 100644 --- a/beacon-chain/blockchain/process_block_test.go +++ b/beacon-chain/blockchain/process_block_test.go @@ -738,7 +738,9 @@ func TestOnBlock_CanFinalize_WithOnTick(t *testing.T) { require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState)) roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r) require.NoError(t, err) + service.cfg.ForkChoiceStore.Lock() require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})) + service.cfg.ForkChoiceStore.Unlock() require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch)) _, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch) require.NoError(t, err) @@ -788,7 +790,9 @@ func TestOnBlock_CanFinalize(t *testing.T) { require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState)) roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r) require.NoError(t, err) + service.cfg.ForkChoiceStore.Lock() require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})) + service.cfg.ForkChoiceStore.Unlock() require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch)) _, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch) require.NoError(t, err) @@ -816,7 +820,9 @@ func TestOnBlock_NilBlock(t *testing.T) { service, tr := minimalTestService(t) signed := &consensusblocks.SignedBeaconBlock{} roblock := consensusblocks.ROBlock{ReadOnlySignedBeaconBlock: signed} + service.cfg.ForkChoiceStore.Lock() err := service.postBlockProcess(&postBlockProcessConfig{tr.ctx, roblock, [32]byte{}, nil, true}) + service.cfg.ForkChoiceStore.Unlock() require.Equal(t, true, IsInvalidBlock(err)) } @@ -848,7 +854,9 @@ func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) { require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState)) roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r) require.NoError(t, err) + service.cfg.ForkChoiceStore.Lock() require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})) + service.cfg.ForkChoiceStore.Unlock() testState, err = service.cfg.StateGen.StateByRoot(ctx, r) require.NoError(t, err) } @@ -1321,7 +1329,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) { lock.Lock() roblock, err := consensusblocks.NewROBlockWithRoot(wsb1, r1) require.NoError(t, err) + service.cfg.ForkChoiceStore.Lock() require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})) + service.cfg.ForkChoiceStore.Unlock() lock.Unlock() wg.Done() }() @@ -1333,7 +1343,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) { lock.Lock() roblock, err := consensusblocks.NewROBlockWithRoot(wsb2, r2) require.NoError(t, err) + service.cfg.ForkChoiceStore.Lock() require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})) + service.cfg.ForkChoiceStore.Unlock() lock.Unlock() wg.Done() }() @@ -1345,7 +1357,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) { lock.Lock() roblock, err := consensusblocks.NewROBlockWithRoot(wsb3, r3) require.NoError(t, err) + service.cfg.ForkChoiceStore.Lock() require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})) + service.cfg.ForkChoiceStore.Unlock() lock.Unlock() wg.Done() }() @@ -1357,7 +1371,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) { lock.Lock() roblock, err := consensusblocks.NewROBlockWithRoot(wsb4, r4) require.NoError(t, err) + service.cfg.ForkChoiceStore.Lock() require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})) + service.cfg.ForkChoiceStore.Unlock() lock.Unlock() wg.Done() }() @@ -1382,197 +1398,6 @@ func Test_verifyBlkFinalizedSlot_invalidBlock(t *testing.T) { require.Equal(t, true, IsInvalidBlock(err)) } -// See the description in #10777 and #10782 for the full setup -// We sync optimistically a chain of blocks. Block 17 is the last block in Epoch -// 2. Block 18 justifies block 12 (the first in Epoch 2) and Block 19 returns -// INVALID from FCU, with LVH block 17. No head is viable. We check -// that the node is optimistic and that we can actually import a block on top of -// 17 and recover. -func TestStore_NoViableHead_FCU(t *testing.T) { - params.SetupTestConfigCleanup(t) - config := params.BeaconConfig() - config.SlotsPerEpoch = 6 - config.AltairForkEpoch = 1 - config.BellatrixForkEpoch = 2 - params.OverrideBeaconConfig(config) - - mockEngine := &mockExecution.EngineClient{ErrNewPayload: execution.ErrAcceptedSyncingPayloadStatus, ErrForkchoiceUpdated: execution.ErrAcceptedSyncingPayloadStatus} - service, tr := minimalTestService(t, WithExecutionEngineCaller(mockEngine)) - ctx := tr.ctx - - st, keys := util.DeterministicGenesisState(t, 64) - stateRoot, err := st.HashTreeRoot(ctx) - require.NoError(t, err, "Could not hash genesis state") - - require.NoError(t, service.saveGenesisData(ctx, st)) - - genesis := blocks.NewGenesisBlock(stateRoot[:]) - wsb, err := consensusblocks.NewSignedBeaconBlock(genesis) - require.NoError(t, err) - require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb), "Could not save genesis block") - - parentRoot, err := genesis.Block.HashTreeRoot() - require.NoError(t, err, "Could not get signing root") - require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, parentRoot), "Could not save genesis state") - require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state") - - for i := 1; i < 6; i++ { - driftGenesisTime(service, primitives.Slot(i), 0) - st, err := service.HeadState(ctx) - require.NoError(t, err) - b, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i)) - require.NoError(t, err) - wsb, err := consensusblocks.NewSignedBeaconBlock(b) - require.NoError(t, err) - root, err := b.Block.HashTreeRoot() - require.NoError(t, err) - - preState, err := service.getBlockPreState(ctx, wsb.Block()) - require.NoError(t, err) - postState, err := service.validateStateTransition(ctx, preState, wsb) - require.NoError(t, err) - require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState)) - roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root) - require.NoError(t, err) - require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})) - } - - for i := 6; i < 12; i++ { - driftGenesisTime(service, primitives.Slot(i), 0) - st, err := service.HeadState(ctx) - require.NoError(t, err) - b, err := util.GenerateFullBlockAltair(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i)) - require.NoError(t, err) - wsb, err := consensusblocks.NewSignedBeaconBlock(b) - require.NoError(t, err) - root, err := b.Block.HashTreeRoot() - require.NoError(t, err) - preState, err := service.getBlockPreState(ctx, wsb.Block()) - require.NoError(t, err) - postState, err := service.validateStateTransition(ctx, preState, wsb) - require.NoError(t, err) - require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState)) - roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root) - require.NoError(t, err) - err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}) - require.NoError(t, err) - } - - for i := 12; i < 18; i++ { - driftGenesisTime(service, primitives.Slot(i), 0) - st, err := service.HeadState(ctx) - require.NoError(t, err) - b, err := util.GenerateFullBlockBellatrix(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i)) - require.NoError(t, err) - wsb, err := consensusblocks.NewSignedBeaconBlock(b) - require.NoError(t, err) - root, err := b.Block.HashTreeRoot() - require.NoError(t, err) - preState, err := service.getBlockPreState(ctx, wsb.Block()) - require.NoError(t, err) - postState, err := service.validateStateTransition(ctx, preState, wsb) - require.NoError(t, err) - require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState)) - roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root) - require.NoError(t, err) - err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}) - require.NoError(t, err) - } - // Check that we haven't justified the second epoch yet - jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint() - require.Equal(t, primitives.Epoch(0), jc.Epoch) - - // import a block that justifies the second epoch - driftGenesisTime(service, 18, 0) - validHeadState, err := service.HeadState(ctx) - require.NoError(t, err) - b, err := util.GenerateFullBlockBellatrix(validHeadState, keys, util.DefaultBlockGenConfig(), 18) - require.NoError(t, err) - wsb, err = consensusblocks.NewSignedBeaconBlock(b) - require.NoError(t, err) - firstInvalidRoot, err := b.Block.HashTreeRoot() - require.NoError(t, err) - preState, err := service.getBlockPreState(ctx, wsb.Block()) - require.NoError(t, err) - postState, err := service.validateStateTransition(ctx, preState, wsb) - require.NoError(t, err) - require.NoError(t, service.savePostStateInfo(ctx, firstInvalidRoot, wsb, postState)) - roblock, err := consensusblocks.NewROBlockWithRoot(wsb, firstInvalidRoot) - require.NoError(t, err) - err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}) - require.NoError(t, err) - jc = service.cfg.ForkChoiceStore.JustifiedCheckpoint() - require.Equal(t, primitives.Epoch(2), jc.Epoch) - - sjc := validHeadState.CurrentJustifiedCheckpoint() - require.Equal(t, primitives.Epoch(0), sjc.Epoch) - lvh := b.Block.Body.ExecutionPayload.ParentHash - // check our head - require.Equal(t, firstInvalidRoot, service.cfg.ForkChoiceStore.CachedHeadRoot()) - - // import another block to find out that it was invalid - mockEngine = &mockExecution.EngineClient{ErrNewPayload: execution.ErrAcceptedSyncingPayloadStatus, ErrForkchoiceUpdated: execution.ErrInvalidPayloadStatus, ForkChoiceUpdatedResp: lvh} - service.cfg.ExecutionEngineCaller = mockEngine - driftGenesisTime(service, 19, 0) - st, err = service.HeadState(ctx) - require.NoError(t, err) - b, err = util.GenerateFullBlockBellatrix(st, keys, util.DefaultBlockGenConfig(), 19) - require.NoError(t, err) - wsb, err = consensusblocks.NewSignedBeaconBlock(b) - require.NoError(t, err) - root, err := b.Block.HashTreeRoot() - require.NoError(t, err) - preState, err = service.getBlockPreState(ctx, wsb.Block()) - require.NoError(t, err) - postState, err = service.validateStateTransition(ctx, preState, wsb) - require.NoError(t, err) - require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState)) - roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root) - require.NoError(t, err) - err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}) - require.ErrorContains(t, "received an INVALID payload from execution engine", err) - // Check that forkchoice's head is the last invalid block imported. The - // store's headroot is the previous head (since the invalid block did - // not finish importing) one and that the node is optimistic - require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot()) - headRoot, err := service.HeadRoot(ctx) - require.NoError(t, err) - require.Equal(t, firstInvalidRoot, bytesutil.ToBytes32(headRoot)) - optimistic, err := service.IsOptimistic(ctx) - require.NoError(t, err) - require.Equal(t, true, optimistic) - - // import another block based on the last valid head state - mockEngine = &mockExecution.EngineClient{} - service.cfg.ExecutionEngineCaller = mockEngine - driftGenesisTime(service, 20, 0) - b, err = util.GenerateFullBlockBellatrix(validHeadState, keys, &util.BlockGenConfig{}, 20) - require.NoError(t, err) - wsb, err = consensusblocks.NewSignedBeaconBlock(b) - require.NoError(t, err) - root, err = b.Block.HashTreeRoot() - require.NoError(t, err) - - preState, err = service.getBlockPreState(ctx, wsb.Block()) - require.NoError(t, err) - postState, err = service.validateStateTransition(ctx, preState, wsb) - require.NoError(t, err) - require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState)) - roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root) - require.NoError(t, err) - err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}) - require.NoError(t, err) - // Check the newly imported block is head, it justified the right - // checkpoint and the node is no longer optimistic - require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot()) - sjc = service.CurrentJustifiedCheckpt() - require.Equal(t, jc.Epoch, sjc.Epoch) - require.Equal(t, jc.Root, bytesutil.ToBytes32(sjc.Root)) - optimistic, err = service.IsOptimistic(ctx) - require.NoError(t, err) - require.Equal(t, false, optimistic) -} - // See the description in #10777 and #10782 for the full setup // We sync optimistically a chain of blocks. Block 17 is the last block in Epoch // 2. Block 18 justifies block 12 (the first in Epoch 2) and Block 19 returns @@ -1624,7 +1449,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) { require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState)) roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root) require.NoError(t, err) + service.cfg.ForkChoiceStore.Lock() require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})) + service.cfg.ForkChoiceStore.Unlock() } for i := 6; i < 12; i++ { @@ -1644,8 +1471,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) { require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState)) roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root) require.NoError(t, err) - err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}) - require.NoError(t, err) + service.cfg.ForkChoiceStore.Lock() + require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})) + service.cfg.ForkChoiceStore.Unlock() } for i := 12; i < 18; i++ { @@ -1666,8 +1494,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) { require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState)) roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root) require.NoError(t, err) - err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}) - require.NoError(t, err) + service.cfg.ForkChoiceStore.Lock() + require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})) + service.cfg.ForkChoiceStore.Unlock() } // Check that we haven't justified the second epoch yet jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint() @@ -1690,7 +1519,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) { require.NoError(t, service.savePostStateInfo(ctx, firstInvalidRoot, wsb, postState)) roblock, err := consensusblocks.NewROBlockWithRoot(wsb, firstInvalidRoot) require.NoError(t, err) + service.cfg.ForkChoiceStore.Lock() err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}) + service.cfg.ForkChoiceStore.Unlock() require.NoError(t, err) jc = service.cfg.ForkChoiceStore.JustifiedCheckpoint() require.Equal(t, primitives.Epoch(2), jc.Epoch) @@ -1700,6 +1531,10 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) { lvh := b.Block.Body.ExecutionPayload.ParentHash // check our head require.Equal(t, firstInvalidRoot, service.cfg.ForkChoiceStore.CachedHeadRoot()) + isBlock18OptimisticAfterImport, err := service.IsOptimisticForRoot(ctx, firstInvalidRoot) + require.NoError(t, err) + require.Equal(t, true, isBlock18OptimisticAfterImport) + time.Sleep(20 * time.Millisecond) // wait for async forkchoice update to be processed // import another block to find out that it was invalid mockEngine = &mockExecution.EngineClient{ErrNewPayload: execution.ErrInvalidPayloadStatus, NewPayloadResp: lvh} @@ -1750,7 +1585,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) { require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState)) roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root) require.NoError(t, err) + service.cfg.ForkChoiceStore.Lock() err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}) + service.cfg.ForkChoiceStore.Unlock() require.NoError(t, err) // Check the newly imported block is head, it justified the right // checkpoint and the node is no longer optimistic @@ -1817,7 +1654,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) { require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState)) roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root) require.NoError(t, err) + service.cfg.ForkChoiceStore.Lock() require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})) + service.cfg.ForkChoiceStore.Unlock() } for i := 6; i < 12; i++ { @@ -1838,8 +1677,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) { require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState)) roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root) require.NoError(t, err) - err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}) - require.NoError(t, err) + service.cfg.ForkChoiceStore.Lock() + require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})) + service.cfg.ForkChoiceStore.Unlock() } // import the merge block @@ -1859,7 +1699,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) { require.NoError(t, service.savePostStateInfo(ctx, lastValidRoot, wsb, postState)) roblock, err := consensusblocks.NewROBlockWithRoot(wsb, lastValidRoot) require.NoError(t, err) + service.cfg.ForkChoiceStore.Lock() err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}) + service.cfg.ForkChoiceStore.Unlock() require.NoError(t, err) // save the post state and the payload Hash of this block since it will // be the LVH @@ -1888,8 +1730,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) { require.NoError(t, service.savePostStateInfo(ctx, invalidRoots[i-13], wsb, postState)) roblock, err := consensusblocks.NewROBlockWithRoot(wsb, invalidRoots[i-13]) require.NoError(t, err) - err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}) - require.NoError(t, err) + service.cfg.ForkChoiceStore.Lock() + require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})) + service.cfg.ForkChoiceStore.Unlock() } // Check that we have justified the second epoch jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint() @@ -1957,7 +1800,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) { require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState)) roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root) require.NoError(t, err) + service.cfg.ForkChoiceStore.Lock() require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})) + service.cfg.ForkChoiceStore.Unlock() // Check that the head is still INVALID and the node is still optimistic require.Equal(t, invalidHeadRoot, service.cfg.ForkChoiceStore.CachedHeadRoot()) optimistic, err = service.IsOptimistic(ctx) @@ -1982,7 +1827,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) { require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState)) roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root) require.NoError(t, err) + service.cfg.ForkChoiceStore.Lock() err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}) + service.cfg.ForkChoiceStore.Unlock() require.NoError(t, err) st, err = service.cfg.StateGen.StateByRoot(ctx, root) require.NoError(t, err) @@ -2010,7 +1857,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) { require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState)) roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root) require.NoError(t, err) + service.cfg.ForkChoiceStore.Lock() err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}) + service.cfg.ForkChoiceStore.Unlock() require.NoError(t, err) require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot()) sjc = service.CurrentJustifiedCheckpt() @@ -2054,7 +1903,6 @@ func TestNoViableHead_Reboot(t *testing.T) { require.NoError(t, service.cfg.BeaconDB.SaveGenesisBlockRoot(ctx, genesisRoot), "Could not save genesis state") for i := 1; i < 6; i++ { - t.Log(i) driftGenesisTime(service, primitives.Slot(i), 0) st, err := service.HeadState(ctx) require.NoError(t, err) @@ -2071,7 +1919,9 @@ func TestNoViableHead_Reboot(t *testing.T) { require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState)) roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root) require.NoError(t, err) + service.cfg.ForkChoiceStore.Lock() require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})) + service.cfg.ForkChoiceStore.Unlock() } for i := 6; i < 12; i++ { @@ -2091,8 +1941,9 @@ func TestNoViableHead_Reboot(t *testing.T) { require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState)) roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root) require.NoError(t, err) - err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}) - require.NoError(t, err) + service.cfg.ForkChoiceStore.Lock() + require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})) + service.cfg.ForkChoiceStore.Unlock() } // import the merge block @@ -2112,7 +1963,9 @@ func TestNoViableHead_Reboot(t *testing.T) { require.NoError(t, service.savePostStateInfo(ctx, lastValidRoot, wsb, postState)) roblock, err := consensusblocks.NewROBlockWithRoot(wsb, lastValidRoot) require.NoError(t, err) + service.cfg.ForkChoiceStore.Lock() err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}) + service.cfg.ForkChoiceStore.Unlock() require.NoError(t, err) // save the post state and the payload Hash of this block since it will // be the LVH @@ -2143,7 +1996,9 @@ func TestNoViableHead_Reboot(t *testing.T) { require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState)) roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root) require.NoError(t, err) + service.cfg.ForkChoiceStore.Lock() require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})) + service.cfg.ForkChoiceStore.Unlock() require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch)) _, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch) require.NoError(t, err) @@ -2264,7 +2119,9 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) { require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState)) roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root) require.NoError(t, err) + service.cfg.ForkChoiceStore.Lock() require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})) + service.cfg.ForkChoiceStore.Unlock() st, err = service.HeadState(ctx) require.NoError(t, err) @@ -2330,7 +2187,9 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) { require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState)) roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root) require.NoError(t, err) + service.cfg.ForkChoiceStore.Lock() require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})) + service.cfg.ForkChoiceStore.Unlock() st, err = service.HeadState(ctx) require.NoError(t, err) @@ -2613,7 +2472,10 @@ func TestRollbackBlock(t *testing.T) { require.NoError(t, err) // Rollback block insertion into db and caches. - require.ErrorContains(t, fmt.Sprintf("could not insert block %d to fork choice store", roblock.Block().Slot()), service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})) + service.cfg.ForkChoiceStore.Lock() + err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}) + service.cfg.ForkChoiceStore.Unlock() + require.ErrorContains(t, fmt.Sprintf("could not insert block %d to fork choice store", roblock.Block().Slot()), err) // The block should no longer exist. require.Equal(t, false, service.cfg.BeaconDB.HasBlock(ctx, root)) @@ -2714,7 +2576,9 @@ func TestRollbackBlock_ContextDeadline(t *testing.T) { require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState)) roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root) require.NoError(t, err) + service.cfg.ForkChoiceStore.Lock() require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})) + service.cfg.ForkChoiceStore.Unlock() b, err = util.GenerateFullBlock(postState, keys, util.DefaultBlockGenConfig(), 34) require.NoError(t, err) @@ -2748,7 +2612,10 @@ func TestRollbackBlock_ContextDeadline(t *testing.T) { require.NoError(t, postState.SetFinalizedCheckpoint(cj)) // Rollback block insertion into db and caches. - require.ErrorContains(t, "context canceled", service.postBlockProcess(&postBlockProcessConfig{cancCtx, roblock, [32]byte{}, postState, false})) + service.cfg.ForkChoiceStore.Lock() + err = service.postBlockProcess(&postBlockProcessConfig{cancCtx, roblock, [32]byte{}, postState, false}) + service.cfg.ForkChoiceStore.Unlock() + require.ErrorContains(t, "context canceled", err) // The block should no longer exist. require.Equal(t, false, service.cfg.BeaconDB.HasBlock(ctx, root)) @@ -3244,7 +3111,9 @@ func Test_postBlockProcess_EventSending(t *testing.T) { } // Execute postBlockProcess + service.cfg.ForkChoiceStore.Lock() err = service.postBlockProcess(cfg) + service.cfg.ForkChoiceStore.Unlock() // Check error expectation if tt.expectError { diff --git a/beacon-chain/blockchain/receive_attestation.go b/beacon-chain/blockchain/receive_attestation.go index e0d2f9abef..aba81ba87a 100644 --- a/beacon-chain/blockchain/receive_attestation.go +++ b/beacon-chain/blockchain/receive_attestation.go @@ -156,13 +156,15 @@ func (s *Service) UpdateHead(ctx context.Context, proposingSlot primitives.Slot) } if s.inRegularSync() { fcuArgs.attributes = s.getPayloadAttribute(ctx, headState, proposingSlot, newHeadRoot[:]) + if fcuArgs.attributes != nil && s.shouldOverrideFCU(newHeadRoot, proposingSlot) { + return + } + go s.forkchoiceUpdateWithExecution(s.ctx, fcuArgs) } - if fcuArgs.attributes != nil && s.shouldOverrideFCU(newHeadRoot, proposingSlot) { - return - } - if err := s.forkchoiceUpdateWithExecution(s.ctx, fcuArgs); err != nil { - log.WithError(err).Error("Could not update forkchoice") + if err := s.saveHead(s.ctx, fcuArgs.headRoot, fcuArgs.headBlock, fcuArgs.headState); err != nil { + log.WithError(err).Error("Could not save head") } + s.pruneAttsFromPool(s.ctx, fcuArgs.headState, fcuArgs.headBlock) } // This processes fork choice attestations from the pool to account for validator votes and fork choice. diff --git a/beacon-chain/blockchain/receive_attestation_test.go b/beacon-chain/blockchain/receive_attestation_test.go index 98a5afd8af..c4afb45ed2 100644 --- a/beacon-chain/blockchain/receive_attestation_test.go +++ b/beacon-chain/blockchain/receive_attestation_test.go @@ -117,7 +117,9 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) { require.NoError(t, service.savePostStateInfo(ctx, tRoot, wsb, postState)) roblock, err := blocks.NewROBlockWithRoot(wsb, tRoot) require.NoError(t, err) + service.cfg.ForkChoiceStore.Lock() require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})) + service.cfg.ForkChoiceStore.Unlock() copied, err = service.cfg.StateGen.StateByRoot(ctx, tRoot) require.NoError(t, err) require.Equal(t, 2, fcs.NodeCount()) @@ -177,7 +179,9 @@ func TestService_UpdateHead_NoAtts(t *testing.T) { require.NoError(t, service.savePostStateInfo(ctx, tRoot, wsb, postState)) roblock, err := blocks.NewROBlockWithRoot(wsb, tRoot) require.NoError(t, err) + service.cfg.ForkChoiceStore.Lock() require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})) + service.cfg.ForkChoiceStore.Unlock() require.Equal(t, 2, fcs.NodeCount()) require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb)) require.Equal(t, tRoot, service.head.root) diff --git a/changelog/potuz_dont_lock_fcu.md b/changelog/potuz_dont_lock_fcu.md new file mode 100644 index 0000000000..8b3303bb31 --- /dev/null +++ b/changelog/potuz_dont_lock_fcu.md @@ -0,0 +1,3 @@ +### Changed + +- Notify the engine about forkchoice updates in the background.