From 918129cf36fb9b7331ccf3e327368164d77980ba Mon Sep 17 00:00:00 2001 From: kasey <489222+kasey@users.noreply.github.com> Date: Tue, 2 May 2023 23:34:01 -0500 Subject: [PATCH] Replace statefeed Initialize (#12285) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * refactor initialization to blocking startup method * require genesisSetter in blockchain, fix tests * work-around gazelle weirdness * fix dep gazelle ignores * only call SetGenesis once * fix typo * validator test setup and fix to return right error * move waitForChainStart to Start * wire up sync Service.genesisWaiter * fix p2p genesisWaiter plumbing * remove extra clock type, integrate into genesis and rename * use time.Now when no Nower is specified * remove unused ClockSetter * simplify rpc context checking * fix typo * use clock everywhere in sync; [32]byte val root * don't use DeepEqual to compare [32]byte and []byte * don't use clock in init sync, not wired up yet * use clock waiter in blockchain as well * use cancelable contexts in tests with goroutines * missed a reference to WithClockSetter * Update beacon-chain/startup/genesis.go Co-authored-by: Radosław Kapka * Update beacon-chain/blockchain/service_test.go Co-authored-by: Radosław Kapka * more clear docs * doc for NewClock * move clock typedef to more logical file name * adding documentation * gaz * fixes for capella * reducing test raciness * fix races in committee cache tests * lint * add tests on Duration slot math helper * startup package test coverage * fix bad merge * set non-zero genesis time in tests that call Start * happy deepsource, happy me-epsource * replace Synced event with channel * remove unused error * remove accidental wip commit * gaz! * remove unused event constants * remove sync statefeed subscription to fix deadlock * remove state notifier * fix build --------- Co-authored-by: Kasey Kirkham Co-authored-by: Radosław Kapka Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com> Co-authored-by: nisdas --- beacon-chain/blockchain/BUILD.bazel | 3 + beacon-chain/blockchain/chain_info.go | 8 +- beacon-chain/blockchain/chain_info_test.go | 62 +-- .../blockchain/execution_engine_test.go | 154 ++----- .../forkchoice_update_execution_test.go | 30 +- beacon-chain/blockchain/head_test.go | 15 +- beacon-chain/blockchain/mock_test.go | 6 +- beacon-chain/blockchain/options.go | 9 + beacon-chain/blockchain/pow_block_test.go | 39 +- .../blockchain/process_attestation_test.go | 52 +-- beacon-chain/blockchain/process_block.go | 20 +- beacon-chain/blockchain/process_block_test.go | 408 +++--------------- .../blockchain/receive_attestation.go | 17 +- .../blockchain/receive_attestation_test.go | 64 +-- beacon-chain/blockchain/receive_block_test.go | 88 +--- beacon-chain/blockchain/service.go | 36 +- beacon-chain/blockchain/service_test.go | 160 +++---- beacon-chain/blockchain/setup_test.go | 115 +++++ beacon-chain/core/feed/state/events.go | 8 +- beacon-chain/core/helpers/BUILD.bazel | 1 + beacon-chain/core/helpers/main_test.go | 13 + beacon-chain/monitor/process_block_test.go | 39 +- beacon-chain/monitor/service.go | 37 +- beacon-chain/monitor/service_test.go | 77 +--- beacon-chain/node/BUILD.bazel | 1 + beacon-chain/node/node.go | 42 +- beacon-chain/node/node_test.go | 2 +- beacon-chain/p2p/BUILD.bazel | 6 +- beacon-chain/p2p/config.go | 2 + beacon-chain/p2p/discovery_test.go | 19 +- beacon-chain/p2p/fork_test.go | 2 - beacon-chain/p2p/pubsub_filter_test.go | 24 +- beacon-chain/p2p/pubsub_test.go | 5 +- beacon-chain/p2p/service.go | 55 +-- beacon-chain/p2p/service_test.go | 86 ++-- beacon-chain/p2p/subnets_test.go | 19 +- beacon-chain/rpc/BUILD.bazel | 1 + .../rpc/prysm/v1alpha1/validator/BUILD.bazel | 1 + .../rpc/prysm/v1alpha1/validator/server.go | 41 +- .../prysm/v1alpha1/validator/server_test.go | 36 +- beacon-chain/rpc/service.go | 3 + beacon-chain/slasher/BUILD.bazel | 5 +- .../slasher/detect_attestations_test.go | 11 + beacon-chain/slasher/detect_blocks_test.go | 3 + beacon-chain/slasher/receive_test.go | 5 + beacon-chain/slasher/service.go | 43 +- beacon-chain/slasher/service_test.go | 11 +- beacon-chain/startup/BUILD.bazel | 31 ++ beacon-chain/startup/clock.go | 75 ++++ beacon-chain/startup/clock_test.go | 49 +++ beacon-chain/startup/synchronizer.go | 60 +++ beacon-chain/startup/synchronizer_test.go | 51 +++ beacon-chain/sync/BUILD.bazel | 4 +- .../sync/broadcast_bls_changes_test.go | 3 - beacon-chain/sync/context.go | 40 +- beacon-chain/sync/context_test.go | 4 +- beacon-chain/sync/decode_pubsub.go | 2 +- beacon-chain/sync/decode_pubsub_test.go | 4 +- beacon-chain/sync/error.go | 3 + beacon-chain/sync/fork_watcher.go | 8 +- beacon-chain/sync/fork_watcher_test.go | 76 ++-- beacon-chain/sync/initial-sync/BUILD.bazel | 6 +- .../sync/initial-sync/blocks_fetcher.go | 2 + .../sync/initial-sync/blocks_fetcher_test.go | 55 +-- .../sync/initial-sync/initial_sync_test.go | 7 +- beacon-chain/sync/initial-sync/service.go | 96 ++--- .../sync/initial-sync/service_test.go | 162 +++---- beacon-chain/sync/metrics.go | 6 +- beacon-chain/sync/options.go | 23 +- .../sync/pending_attestations_queue.go | 4 +- .../sync/pending_attestations_queue_test.go | 66 +-- beacon-chain/sync/pending_blocks_queue.go | 10 +- .../sync/pending_blocks_queue_test.go | 25 +- beacon-chain/sync/rpc.go | 2 +- .../sync/rpc_beacon_blocks_by_range.go | 2 +- .../sync/rpc_beacon_blocks_by_range_test.go | 53 ++- .../sync/rpc_beacon_blocks_by_root.go | 2 +- .../sync/rpc_beacon_blocks_by_root_test.go | 24 +- beacon-chain/sync/rpc_chunked_response.go | 42 +- beacon-chain/sync/rpc_goodbye.go | 2 +- beacon-chain/sync/rpc_goodbye_test.go | 9 +- beacon-chain/sync/rpc_metadata.go | 14 +- beacon-chain/sync/rpc_metadata_test.go | 27 +- beacon-chain/sync/rpc_ping.go | 2 +- beacon-chain/sync/rpc_ping_test.go | 9 +- beacon-chain/sync/rpc_send_request.go | 12 +- beacon-chain/sync/rpc_send_request_test.go | 61 +-- beacon-chain/sync/rpc_status.go | 8 +- beacon-chain/sync/rpc_status_test.go | 325 ++++++++------ beacon-chain/sync/service.go | 101 ++--- beacon-chain/sync/service_test.go | 131 ++---- beacon-chain/sync/subscriber.go | 24 +- beacon-chain/sync/subscriber_test.go | 145 ++++--- beacon-chain/sync/sync_fuzz_test.go | 4 + beacon-chain/sync/validate_aggregate_proof.go | 2 +- .../sync/validate_aggregate_proof_test.go | 54 +-- .../sync/validate_attester_slashing_test.go | 17 +- .../sync/validate_beacon_attestation.go | 2 +- .../sync/validate_beacon_attestation_test.go | 8 +- beacon-chain/sync/validate_beacon_blocks.go | 4 +- .../sync/validate_beacon_blocks_test.go | 38 +- .../validate_bls_to_execution_change_test.go | 110 +++-- .../sync/validate_proposer_slashing_test.go | 5 +- .../sync/validate_sync_committee_message.go | 2 +- .../validate_sync_committee_message_test.go | 170 ++++---- .../sync/validate_sync_contribution_proof.go | 2 +- .../validate_sync_contribution_proof_test.go | 209 ++++----- .../sync/validate_voluntary_exit_test.go | 5 +- cmd/prysmctl/testnet/generate_genesis.go | 3 + runtime/interop/BUILD.bazel | 1 + runtime/interop/premine-state.go | 131 ++++-- testing/endtoend/BUILD.bazel | 1 + .../endtoend/slasher_simulator_e2e_test.go | 4 + testing/slasher/simulator/BUILD.bazel | 2 +- testing/slasher/simulator/simulator.go | 13 +- .../shared/common/forkchoice/BUILD.bazel | 1 + .../shared/common/forkchoice/service.go | 2 + time/slots/slottime.go | 10 +- time/slots/slottime_test.go | 72 ++++ 119 files changed, 2207 insertions(+), 2416 deletions(-) create mode 100644 beacon-chain/blockchain/setup_test.go create mode 100644 beacon-chain/core/helpers/main_test.go create mode 100644 beacon-chain/startup/BUILD.bazel create mode 100644 beacon-chain/startup/clock.go create mode 100644 beacon-chain/startup/clock_test.go create mode 100644 beacon-chain/startup/synchronizer.go create mode 100644 beacon-chain/startup/synchronizer_test.go diff --git a/beacon-chain/blockchain/BUILD.bazel b/beacon-chain/blockchain/BUILD.bazel index 7b02d71922..fab212e329 100644 --- a/beacon-chain/blockchain/BUILD.bazel +++ b/beacon-chain/blockchain/BUILD.bazel @@ -58,6 +58,7 @@ go_library( "//beacon-chain/operations/slashings:go_default_library", "//beacon-chain/operations/voluntaryexits:go_default_library", "//beacon-chain/p2p:go_default_library", + "//beacon-chain/startup:go_default_library", "//beacon-chain/state:go_default_library", "//beacon-chain/state/stategen:go_default_library", "//config/features:go_default_library", @@ -119,6 +120,7 @@ go_test( "receive_attestation_test.go", "receive_block_test.go", "service_test.go", + "setup_test.go", "weak_subjectivity_checks_test.go", ], embed = [":go_default_library"], @@ -168,6 +170,7 @@ go_test( "mock_test.go", "receive_block_test.go", "service_norace_test.go", + "setup_test.go", ], embed = [":go_default_library"], gc_goopts = [ diff --git a/beacon-chain/blockchain/chain_info.go b/beacon-chain/blockchain/chain_info.go index 08e7a280fc..b4ca3dac08 100644 --- a/beacon-chain/blockchain/chain_info.go +++ b/beacon-chain/blockchain/chain_info.go @@ -85,6 +85,12 @@ type ForkFetcher interface { TimeFetcher } +// TemporalOracle is like ForkFetcher minus CurrentFork() +type TemporalOracle interface { + GenesisFetcher + TimeFetcher +} + // CanonicalFetcher retrieves the current chain's canonical information. type CanonicalFetcher interface { IsCanonical(ctx context.Context, blockRoot [32]byte) (bool, error) @@ -327,7 +333,7 @@ func (s *Service) HeadValidatorIndexToPublicKey(_ context.Context, index primiti } // IsOptimistic returns true if the current head is optimistic. -func (s *Service) IsOptimistic(ctx context.Context) (bool, error) { +func (s *Service) IsOptimistic(_ context.Context) (bool, error) { if slots.ToEpoch(s.CurrentSlot()) < params.BeaconConfig().BellatrixForkEpoch { return false, nil } diff --git a/beacon-chain/blockchain/chain_info_test.go b/beacon-chain/blockchain/chain_info_test.go index 45f73fdaee..660471dd39 100644 --- a/beacon-chain/blockchain/chain_info_test.go +++ b/beacon-chain/blockchain/chain_info_test.go @@ -10,7 +10,6 @@ import ( forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types" "github.com/prysmaticlabs/prysm/v4/beacon-chain/state" state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native" - "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen" fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams" "github.com/prysmaticlabs/prysm/v4/config/params" "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks" @@ -72,16 +71,8 @@ func TestHeadRoot_Nil(t *testing.T) { } func TestFinalizedCheckpt_GenesisRootOk(t *testing.T) { - ctx := context.Background() - beaconDB := testDB.SetupDB(t) - fcs := doublylinkedtree.New() - opts := []Option{ - WithDatabase(beaconDB), - WithForkChoiceStore(fcs), - WithStateGen(stategen.New(beaconDB, fcs)), - } - service, err := NewService(ctx, opts...) - require.NoError(t, err) + service, tr := minimalTestService(t) + ctx, fcs := tr.ctx, tr.fcs gs, _ := util.DeterministicGenesisState(t, 32) require.NoError(t, service.saveGenesisData(ctx, gs)) @@ -97,16 +88,8 @@ func TestFinalizedCheckpt_GenesisRootOk(t *testing.T) { } func TestCurrentJustifiedCheckpt_CanRetrieve(t *testing.T) { - ctx := context.Background() - beaconDB := testDB.SetupDB(t) - fcs := doublylinkedtree.New() - opts := []Option{ - WithDatabase(beaconDB), - WithForkChoiceStore(fcs), - WithStateGen(stategen.New(beaconDB, fcs)), - } - service, err := NewService(ctx, opts...) - require.NoError(t, err) + service, tr := minimalTestService(t) + ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs jroot := [32]byte{'j'} cp := &forkchoicetypes.Checkpoint{Epoch: 6, Root: jroot} @@ -120,16 +103,8 @@ func TestCurrentJustifiedCheckpt_CanRetrieve(t *testing.T) { } func TestFinalizedBlockHash(t *testing.T) { - ctx := context.Background() - beaconDB := testDB.SetupDB(t) - fcs := doublylinkedtree.New() - opts := []Option{ - WithDatabase(beaconDB), - WithForkChoiceStore(fcs), - WithStateGen(stategen.New(beaconDB, fcs)), - } - service, err := NewService(ctx, opts...) - require.NoError(t, err) + service, tr := minimalTestService(t) + ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs r := [32]byte{'f'} cp := &forkchoicetypes.Checkpoint{Epoch: 6, Root: r} @@ -170,16 +145,9 @@ func TestHeadSlot_CanRetrieve(t *testing.T) { } func TestHeadRoot_CanRetrieve(t *testing.T) { - ctx := context.Background() - beaconDB := testDB.SetupDB(t) - fcs := doublylinkedtree.New() - opts := []Option{ - WithDatabase(beaconDB), - WithForkChoiceStore(fcs), - WithStateGen(stategen.New(beaconDB, fcs)), - } - service, err := NewService(ctx, opts...) - require.NoError(t, err) + service, tr := minimalTestService(t) + ctx := tr.ctx + gs, _ := util.DeterministicGenesisState(t, 32) require.NoError(t, service.saveGenesisData(ctx, gs)) @@ -189,16 +157,8 @@ func TestHeadRoot_CanRetrieve(t *testing.T) { } func TestHeadRoot_UseDB(t *testing.T) { - ctx := context.Background() - beaconDB := testDB.SetupDB(t) - fcs := doublylinkedtree.New() - opts := []Option{ - WithDatabase(beaconDB), - WithForkChoiceStore(fcs), - WithStateGen(stategen.New(beaconDB, fcs)), - } - service, err := NewService(ctx, opts...) - require.NoError(t, err) + service, tr := minimalTestService(t) + ctx, beaconDB := tr.ctx, tr.db service.head = &head{root: params.BeaconConfig().ZeroHash} b := util.NewBeaconBlock() diff --git a/beacon-chain/blockchain/execution_engine_test.go b/beacon-chain/blockchain/execution_engine_test.go index 0462e82b95..11b8842e0e 100644 --- a/beacon-chain/blockchain/execution_engine_test.go +++ b/beacon-chain/blockchain/execution_engine_test.go @@ -9,14 +9,11 @@ import ( gethtypes "github.com/ethereum/go-ethereum/core/types" "github.com/prysmaticlabs/prysm/v4/beacon-chain/cache" "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks" - testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing" "github.com/prysmaticlabs/prysm/v4/beacon-chain/execution" mockExecution "github.com/prysmaticlabs/prysm/v4/beacon-chain/execution/testing" - doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree" forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types" bstate "github.com/prysmaticlabs/prysm/v4/beacon-chain/state" state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native" - "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen" "github.com/prysmaticlabs/prysm/v4/config/features" fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams" "github.com/prysmaticlabs/prysm/v4/config/params" @@ -33,23 +30,16 @@ import ( ) func Test_NotifyForkchoiceUpdate_GetPayloadAttrErrorCanContinue(t *testing.T) { - ctx := context.Background() - beaconDB := testDB.SetupDB(t) + service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache())) + ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs + altairBlk := util.SaveBlock(t, ctx, beaconDB, util.NewBeaconBlockAltair()) altairBlkRoot, err := altairBlk.Block().HashTreeRoot() require.NoError(t, err) bellatrixBlk := util.SaveBlock(t, ctx, beaconDB, util.NewBeaconBlockBellatrix()) bellatrixBlkRoot, err := bellatrixBlk.Block().HashTreeRoot() require.NoError(t, err) - fcs := doublylinkedtree.New() - opts := []Option{ - WithDatabase(beaconDB), - WithStateGen(stategen.New(beaconDB, fcs)), - WithForkChoiceStore(fcs), - WithProposerIdsCache(cache.NewProposerPayloadIDsCache()), - } - service, err := NewService(ctx, opts...) - require.NoError(t, err) + st, _ := util.DeterministicGenesisState(t, 10) service.head = &head{ state: st, @@ -96,23 +86,15 @@ func Test_NotifyForkchoiceUpdate_GetPayloadAttrErrorCanContinue(t *testing.T) { } func Test_NotifyForkchoiceUpdate(t *testing.T) { - ctx := context.Background() - beaconDB := testDB.SetupDB(t) + service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache())) + ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs + altairBlk := util.SaveBlock(t, ctx, beaconDB, util.NewBeaconBlockAltair()) altairBlkRoot, err := altairBlk.Block().HashTreeRoot() require.NoError(t, err) bellatrixBlk := util.SaveBlock(t, ctx, beaconDB, util.NewBeaconBlockBellatrix()) bellatrixBlkRoot, err := bellatrixBlk.Block().HashTreeRoot() require.NoError(t, err) - fcs := doublylinkedtree.New() - opts := []Option{ - WithDatabase(beaconDB), - WithStateGen(stategen.New(beaconDB, fcs)), - WithForkChoiceStore(fcs), - WithProposerIdsCache(cache.NewProposerPayloadIDsCache()), - } - service, err := NewService(ctx, opts...) - require.NoError(t, err) st, _ := util.DeterministicGenesisState(t, 10) service.head = &head{ state: st, @@ -264,8 +246,8 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) { } func Test_NotifyForkchoiceUpdate_NIlLVH(t *testing.T) { - ctx := context.Background() - beaconDB := testDB.SetupDB(t) + service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache())) + ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs // Prepare blocks ba := util.NewBeaconBlockBellatrix() @@ -297,12 +279,6 @@ func Test_NotifyForkchoiceUpdate_NIlLVH(t *testing.T) { brd, err := wbd.Block().HashTreeRoot() require.NoError(t, err) - // Insert blocks into forkchoice - service := setupBeaconChain(t, beaconDB) - fcs := doublylinkedtree.New() - service.cfg.ForkChoiceStore = fcs - service.cfg.ProposerSlotIndexCache = cache.NewProposerPayloadIDsCache() - fcs.SetBalancesByRooter(func(context.Context, [32]byte) ([]uint64, error) { return []uint64{50, 100, 200}, nil }) require.NoError(t, fcs.UpdateJustifiedCheckpoint(ctx, &forkchoicetypes.Checkpoint{})) ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]} @@ -358,8 +334,8 @@ func Test_NotifyForkchoiceUpdate_NIlLVH(t *testing.T) { // 3. the blockchain package calls fcu to obtain heads G -> F -> D. func Test_NotifyForkchoiceUpdateRecursive_DoublyLinkedTree(t *testing.T) { - ctx := context.Background() - beaconDB := testDB.SetupDB(t) + service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache())) + ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs // Prepare blocks ba := util.NewBeaconBlockBellatrix() @@ -414,12 +390,6 @@ func Test_NotifyForkchoiceUpdateRecursive_DoublyLinkedTree(t *testing.T) { brg, err := wbg.Block().HashTreeRoot() require.NoError(t, err) - // Insert blocks into forkchoice - service := setupBeaconChain(t, beaconDB) - fcs := doublylinkedtree.New() - service.cfg.ForkChoiceStore = fcs - service.cfg.ProposerSlotIndexCache = cache.NewProposerPayloadIDsCache() - fcs.SetBalancesByRooter(func(context.Context, [32]byte) ([]uint64, error) { return []uint64{50, 100, 200}, nil }) require.NoError(t, fcs.UpdateJustifiedCheckpoint(ctx, &forkchoicetypes.Checkpoint{})) ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]} @@ -497,15 +467,9 @@ func Test_NotifyNewPayload(t *testing.T) { cfg := params.BeaconConfig() cfg.TerminalTotalDifficulty = "2" params.OverrideBeaconConfig(cfg) + service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache())) + ctx, fcs := tr.ctx, tr.fcs - ctx := context.Background() - beaconDB := testDB.SetupDB(t) - fcs := doublylinkedtree.New() - opts := []Option{ - WithDatabase(beaconDB), - WithStateGen(stategen.New(beaconDB, fcs)), - WithForkChoiceStore(fcs), - } phase0State, _ := util.DeterministicGenesisState(t, 1) altairState, _ := util.DeterministicGenesisStateAltair(t, 1) bellatrixState, _ := util.DeterministicGenesisStateBellatrix(t, 2) @@ -536,8 +500,6 @@ func Test_NotifyNewPayload(t *testing.T) { } bellatrixBlk, err := consensusblocks.NewSignedBeaconBlock(util.HydrateSignedBeaconBlockBellatrix(blk)) require.NoError(t, err) - service, err := NewService(ctx, opts...) - require.NoError(t, err) st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB)) service.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second) r, err := bellatrixBlk.Block().HashTreeRoot() @@ -744,14 +706,10 @@ func Test_NotifyNewPayload_SetOptimisticToValid(t *testing.T) { cfg := params.BeaconConfig() cfg.TerminalTotalDifficulty = "2" params.OverrideBeaconConfig(cfg) - ctx := context.Background() - beaconDB := testDB.SetupDB(t) - fcs := doublylinkedtree.New() - opts := []Option{ - WithDatabase(beaconDB), - WithStateGen(stategen.New(beaconDB, fcs)), - WithForkChoiceStore(fcs), - } + + service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache())) + ctx := tr.ctx + bellatrixState, _ := util.DeterministicGenesisStateBellatrix(t, 2) blk := ðpb.SignedBeaconBlockBellatrix{ Block: ðpb.BeaconBlockBellatrix{ @@ -764,8 +722,6 @@ func Test_NotifyNewPayload_SetOptimisticToValid(t *testing.T) { } bellatrixBlk, err := consensusblocks.NewSignedBeaconBlock(blk) require.NoError(t, err) - service, err := NewService(ctx, opts...) - require.NoError(t, err) e := &mockExecution.EngineClient{BlockByHashMap: map[[32]byte]*v1.ExecutionBlock{}} e.BlockByHashMap[[32]byte{'a'}] = &v1.ExecutionBlock{ Header: gethtypes.Header{ @@ -788,17 +744,9 @@ func Test_NotifyNewPayload_SetOptimisticToValid(t *testing.T) { } func Test_GetPayloadAttribute(t *testing.T) { - ctx := context.Background() - beaconDB := testDB.SetupDB(t) - opts := []Option{ - WithDatabase(beaconDB), - WithStateGen(stategen.New(beaconDB, doublylinkedtree.New())), - WithProposerIdsCache(cache.NewProposerPayloadIDsCache()), - } + service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache())) + ctx := tr.ctx - // Cache miss - service, err := NewService(ctx, opts...) - require.NoError(t, err) st, _ := util.DeterministicGenesisStateBellatrix(t, 1) hasPayload, _, vId := service.getPayloadAttribute(ctx, st, 0, []byte{}) require.Equal(t, false, hasPayload) @@ -826,22 +774,15 @@ func Test_GetPayloadAttribute(t *testing.T) { } func Test_GetPayloadAttribute_PrepareAllPayloads(t *testing.T) { - ctx := context.Background() + hook := logTest.NewGlobal() resetCfg := features.InitWithReset(&features.Flags{ PrepareAllPayloads: true, }) defer resetCfg() - beaconDB := testDB.SetupDB(t) - opts := []Option{ - WithDatabase(beaconDB), - WithStateGen(stategen.New(beaconDB, doublylinkedtree.New())), - WithProposerIdsCache(cache.NewProposerPayloadIDsCache()), - } + service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache())) + ctx := tr.ctx - hook := logTest.NewGlobal() - service, err := NewService(ctx, opts...) - require.NoError(t, err) st, _ := util.DeterministicGenesisStateBellatrix(t, 1) hasPayload, attr, vId := service.getPayloadAttribute(ctx, st, 0, []byte{}) require.Equal(t, true, hasPayload) @@ -851,17 +792,9 @@ func Test_GetPayloadAttribute_PrepareAllPayloads(t *testing.T) { } func Test_GetPayloadAttributeV2(t *testing.T) { - ctx := context.Background() - beaconDB := testDB.SetupDB(t) - opts := []Option{ - WithDatabase(beaconDB), - WithStateGen(stategen.New(beaconDB, doublylinkedtree.New())), - WithProposerIdsCache(cache.NewProposerPayloadIDsCache()), - } + service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache())) + ctx := tr.ctx - // Cache miss - service, err := NewService(ctx, opts...) - require.NoError(t, err) st, _ := util.DeterministicGenesisStateCapella(t, 1) hasPayload, _, vId := service.getPayloadAttribute(ctx, st, 0, []byte{}) require.Equal(t, false, hasPayload) @@ -897,18 +830,9 @@ func Test_GetPayloadAttributeV2(t *testing.T) { func Test_UpdateLastValidatedCheckpoint(t *testing.T) { params.SetupTestConfigCleanup(t) params.OverrideBeaconConfig(params.MainnetConfig()) + service, tr := minimalTestService(t) + ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs - ctx := context.Background() - beaconDB := testDB.SetupDB(t) - fcs := doublylinkedtree.New() - stateGen := stategen.New(beaconDB, fcs) - opts := []Option{ - WithDatabase(beaconDB), - WithStateGen(stateGen), - WithForkChoiceStore(fcs), - } - service, err := NewService(ctx, opts...) - require.NoError(t, err) var genesisStateRoot [32]byte genesisBlk := blocks.NewGenesisBlock(genesisStateRoot[:]) util.SaveBlock(t, ctx, beaconDB, genesisBlk) @@ -1013,16 +937,8 @@ func Test_UpdateLastValidatedCheckpoint(t *testing.T) { } func TestService_removeInvalidBlockAndState(t *testing.T) { - ctx := context.Background() - beaconDB := testDB.SetupDB(t) - fc := doublylinkedtree.New() - opts := []Option{ - WithDatabase(beaconDB), - WithStateGen(stategen.New(beaconDB, fc)), - WithForkChoiceStore(fc), - } - service, err := NewService(ctx, opts...) - require.NoError(t, err) + service, tr := minimalTestService(t) + ctx := tr.ctx // Deleting unknown block should not error. require.NoError(t, service.removeInvalidBlockAndState(ctx, [][32]byte{{'a'}, {'b'}, {'c'}})) @@ -1066,18 +982,10 @@ func TestService_removeInvalidBlockAndState(t *testing.T) { } func TestService_getPayloadHash(t *testing.T) { - ctx := context.Background() - beaconDB := testDB.SetupDB(t) - fc := doublylinkedtree.New() - opts := []Option{ - WithDatabase(beaconDB), - WithStateGen(stategen.New(beaconDB, fc)), - WithForkChoiceStore(fc), - } - service, err := NewService(ctx, opts...) - require.NoError(t, err) + service, tr := minimalTestService(t) + ctx := tr.ctx - _, err = service.getPayloadHash(ctx, []byte{}) + _, err := service.getPayloadHash(ctx, []byte{}) require.ErrorIs(t, errBlockNotFoundInCacheOrDB, err) b := util.NewBeaconBlock() diff --git a/beacon-chain/blockchain/forkchoice_update_execution_test.go b/beacon-chain/blockchain/forkchoice_update_execution_test.go index d727e2f5f2..95e7ad0dca 100644 --- a/beacon-chain/blockchain/forkchoice_update_execution_test.go +++ b/beacon-chain/blockchain/forkchoice_update_execution_test.go @@ -8,8 +8,6 @@ import ( "github.com/prysmaticlabs/prysm/v4/beacon-chain/cache" testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing" mockExecution "github.com/prysmaticlabs/prysm/v4/beacon-chain/execution/testing" - doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree" - "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen" "github.com/prysmaticlabs/prysm/v4/config/params" "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks" "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives" @@ -145,23 +143,15 @@ func TestService_forkchoiceUpdateWithExecution_exceptionalCases(t *testing.T) { } func TestService_forkchoiceUpdateWithExecution_SameHeadRootNewProposer(t *testing.T) { - ctx := context.Background() - beaconDB := testDB.SetupDB(t) + service, tr := minimalTestService(t) + ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs + altairBlk := util.SaveBlock(t, ctx, beaconDB, util.NewBeaconBlockAltair()) altairBlkRoot, err := altairBlk.Block().HashTreeRoot() require.NoError(t, err) bellatrixBlk := util.SaveBlock(t, ctx, beaconDB, util.NewBeaconBlockBellatrix()) bellatrixBlkRoot, err := bellatrixBlk.Block().HashTreeRoot() require.NoError(t, err) - fcs := doublylinkedtree.New() - opts := []Option{ - WithDatabase(beaconDB), - WithStateGen(stategen.New(beaconDB, fcs)), - WithForkChoiceStore(fcs), - WithProposerIdsCache(cache.NewProposerPayloadIDsCache()), - } - service, err := NewService(ctx, opts...) - require.NoError(t, err) st, _ := util.DeterministicGenesisState(t, 10) service.head = &head{ state: st, @@ -200,18 +190,10 @@ func TestService_forkchoiceUpdateWithExecution_SameHeadRootNewProposer(t *testin func TestShouldOverrideFCU(t *testing.T) { hook := logTest.NewGlobal() - ctx := context.Background() - beaconDB := testDB.SetupDB(t) - fcs := doublylinkedtree.New() - opts := []Option{ - WithDatabase(beaconDB), - WithStateGen(stategen.New(beaconDB, fcs)), - WithForkChoiceStore(fcs), - WithProposerIdsCache(cache.NewProposerPayloadIDsCache()), - } - service, err := NewService(ctx, opts...) + service, tr := minimalTestService(t) + ctx, fcs := tr.ctx, tr.fcs + service.SetGenesisTime(time.Now().Add(-time.Duration(2*params.BeaconConfig().SecondsPerSlot) * time.Second)) - require.NoError(t, err) headRoot := [32]byte{'b'} parentRoot := [32]byte{'a'} ojc := ðpb.Checkpoint{} diff --git a/beacon-chain/blockchain/head_test.go b/beacon-chain/blockchain/head_test.go index 809247c7b6..0a2d1467dd 100644 --- a/beacon-chain/blockchain/head_test.go +++ b/beacon-chain/blockchain/head_test.go @@ -9,10 +9,8 @@ import ( mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing" testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing" - doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree" forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types" "github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/blstoexec" - "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen" "github.com/prysmaticlabs/prysm/v4/config/params" "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks" "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives" @@ -581,18 +579,9 @@ func TestSaveOrphanedAtts_CanFilter_DoublyLinkedTrie(t *testing.T) { } func TestUpdateHead_noSavedChanges(t *testing.T) { - ctx := context.Background() + service, tr := minimalTestService(t) + ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs - beaconDB := testDB.SetupDB(t) - fcs := doublylinkedtree.New() - opts := []Option{ - WithDatabase(beaconDB), - WithStateGen(stategen.New(beaconDB, fcs)), - WithForkChoiceStore(fcs), - } - - service, err := NewService(ctx, opts...) - require.NoError(t, err) ojp := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]} st, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, [32]byte{}, ojp, ojp) require.NoError(t, err) diff --git a/beacon-chain/blockchain/mock_test.go b/beacon-chain/blockchain/mock_test.go index bc2b88d9f7..fe5ce86779 100644 --- a/beacon-chain/blockchain/mock_test.go +++ b/beacon-chain/blockchain/mock_test.go @@ -5,16 +5,19 @@ import ( testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing" doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/startup" "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen" ) func testServiceOptsWithDB(t *testing.T) []Option { beaconDB := testDB.SetupDB(t) fcs := doublylinkedtree.New() + cs := startup.NewClockSynchronizer() return []Option{ WithDatabase(beaconDB), WithStateGen(stategen.New(beaconDB, fcs)), WithForkChoiceStore(fcs), + WithClockSynchronizer(cs), } } @@ -22,5 +25,6 @@ func testServiceOptsWithDB(t *testing.T) []Option { // in your code path. this is a lightweight way to satisfy the stategen/beacondb // initialization requirements w/o the overhead of db init. func testServiceOptsNoDB() []Option { - return []Option{} + cs := startup.NewClockSynchronizer() + return []Option{WithClockSynchronizer(cs)} } diff --git a/beacon-chain/blockchain/options.go b/beacon-chain/blockchain/options.go index 3ab69b5318..2ad8cce8ff 100644 --- a/beacon-chain/blockchain/options.go +++ b/beacon-chain/blockchain/options.go @@ -13,6 +13,7 @@ import ( "github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/slashings" "github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/voluntaryexits" "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/startup" "github.com/prysmaticlabs/prysm/v4/beacon-chain/state" "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen" ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1" @@ -163,3 +164,11 @@ func WithFinalizedStateAtStartUp(st state.BeaconState) Option { return nil } } + +func WithClockSynchronizer(gs *startup.ClockSynchronizer) Option { + return func(s *Service) error { + s.clockSetter = gs + s.clockWaiter = gs + return nil + } +} diff --git a/beacon-chain/blockchain/pow_block_test.go b/beacon-chain/blockchain/pow_block_test.go index 0509ec7b05..78947372da 100644 --- a/beacon-chain/blockchain/pow_block_test.go +++ b/beacon-chain/blockchain/pow_block_test.go @@ -1,17 +1,13 @@ package blockchain import ( - "context" "fmt" "math/big" "testing" gethtypes "github.com/ethereum/go-ethereum/core/types" "github.com/holiman/uint256" - testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing" mocks "github.com/prysmaticlabs/prysm/v4/beacon-chain/execution/testing" - doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree" - "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen" "github.com/prysmaticlabs/prysm/v4/config/params" "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks" "github.com/prysmaticlabs/prysm/v4/encoding/bytesutil" @@ -108,16 +104,8 @@ func Test_validateMergeBlock(t *testing.T) { cfg.TerminalTotalDifficulty = "2" params.OverrideBeaconConfig(cfg) - ctx := context.Background() - beaconDB := testDB.SetupDB(t) - fcs := doublylinkedtree.New() - opts := []Option{ - WithDatabase(beaconDB), - WithStateGen(stategen.New(beaconDB, fcs)), - WithForkChoiceStore(fcs), - } - service, err := NewService(ctx, opts...) - require.NoError(t, err) + service, tr := minimalTestService(t) + ctx := tr.ctx engine := &mocks.EngineClient{BlockByHashMap: map[[32]byte]*enginev1.ExecutionBlock{}} service.cfg.ExecutionEngineCaller = engine @@ -158,16 +146,8 @@ func Test_validateMergeBlock(t *testing.T) { } func Test_getBlkParentHashAndTD(t *testing.T) { - ctx := context.Background() - beaconDB := testDB.SetupDB(t) - fcs := doublylinkedtree.New() - opts := []Option{ - WithDatabase(beaconDB), - WithStateGen(stategen.New(beaconDB, fcs)), - WithForkChoiceStore(fcs), - } - service, err := NewService(ctx, opts...) - require.NoError(t, err) + service, tr := minimalTestService(t) + ctx := tr.ctx engine := &mocks.EngineClient{BlockByHashMap: map[[32]byte]*enginev1.ExecutionBlock{}} service.cfg.ExecutionEngineCaller = engine @@ -239,14 +219,9 @@ func Test_validateTerminalBlockHash(t *testing.T) { require.NoError(t, err) require.Equal(t, true, ok) - ctx := context.Background() - beaconDB := testDB.SetupDB(t) - opts := []Option{ - WithDatabase(beaconDB), - WithStateGen(stategen.New(beaconDB, doublylinkedtree.New())), - } - service, err := NewService(ctx, opts...) - require.NoError(t, err) + service, tr := minimalTestService(t) + ctx := tr.ctx + blk, err := blocks.NewSignedBeaconBlock(util.HydrateSignedBeaconBlockBellatrix(ðpb.SignedBeaconBlockBellatrix{})) require.NoError(t, err) blk.SetSlot(1) diff --git a/beacon-chain/blockchain/process_attestation_test.go b/beacon-chain/blockchain/process_attestation_test.go index dc5fae1700..5d08a9d671 100644 --- a/beacon-chain/blockchain/process_attestation_test.go +++ b/beacon-chain/blockchain/process_attestation_test.go @@ -6,9 +6,6 @@ import ( "time" "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition" - testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing" - doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree" - "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen" fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams" "github.com/prysmaticlabs/prysm/v4/config/params" "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives" @@ -21,19 +18,10 @@ import ( ) func TestStore_OnAttestation_ErrorConditions(t *testing.T) { - ctx := context.Background() - beaconDB := testDB.SetupDB(t) + service, tr := minimalTestService(t) + ctx, beaconDB := tr.ctx, tr.db - fc := doublylinkedtree.New() - opts := []Option{ - WithDatabase(beaconDB), - WithForkChoiceStore(fc), - WithStateGen(stategen.New(beaconDB, fc)), - } - service, err := NewService(ctx, opts...) - require.NoError(t, err) - - _, err = blockTree1(t, beaconDB, []byte{'g'}) + _, err := blockTree1(t, beaconDB, []byte{'g'}) require.NoError(t, err) blkWithoutState := util.NewBeaconBlock() @@ -128,17 +116,9 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) { } func TestStore_OnAttestation_Ok_DoublyLinkedTree(t *testing.T) { - ctx := context.Background() - beaconDB := testDB.SetupDB(t) + service, tr := minimalTestService(t) + ctx := tr.ctx - fcs := doublylinkedtree.New() - opts := []Option{ - WithDatabase(beaconDB), - WithStateGen(stategen.New(beaconDB, fcs)), - WithForkChoiceStore(fcs), - } - service, err := NewService(ctx, opts...) - require.NoError(t, err) genesisState, pks := util.DeterministicGenesisState(t, 64) service.SetGenesisTime(time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot), 0)) require.NoError(t, service.saveGenesisData(ctx, genesisState)) @@ -158,15 +138,8 @@ func TestStore_OnAttestation_Ok_DoublyLinkedTree(t *testing.T) { } func TestStore_SaveCheckpointState(t *testing.T) { - ctx := context.Background() - beaconDB := testDB.SetupDB(t) - - opts := []Option{ - WithDatabase(beaconDB), - WithStateGen(stategen.New(beaconDB, doublylinkedtree.New())), - } - service, err := NewService(ctx, opts...) - require.NoError(t, err) + service, tr := minimalTestService(t) + ctx := tr.ctx s, err := util.NewBeaconState() require.NoError(t, err) @@ -220,15 +193,8 @@ func TestStore_SaveCheckpointState(t *testing.T) { } func TestStore_UpdateCheckpointState(t *testing.T) { - ctx := context.Background() - beaconDB := testDB.SetupDB(t) - - opts := []Option{ - WithDatabase(beaconDB), - WithStateGen(stategen.New(beaconDB, doublylinkedtree.New())), - } - service, err := NewService(ctx, opts...) - require.NoError(t, err) + service, tr := minimalTestService(t) + ctx := tr.ctx epoch := primitives.Epoch(1) baseState, _ := util.DeterministicGenesisState(t, 1) diff --git a/beacon-chain/blockchain/process_block.go b/beacon-chain/blockchain/process_block.go index 8bdc79cd4b..8852abd49f 100644 --- a/beacon-chain/blockchain/process_block.go +++ b/beacon-chain/blockchain/process_block.go @@ -6,7 +6,6 @@ import ( "time" "github.com/pkg/errors" - "github.com/prysmaticlabs/prysm/v4/async/event" "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks" "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed" statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state" @@ -652,28 +651,21 @@ func (s *Service) validateMergeTransitionBlock(ctx context.Context, stateVersion // This routine checks if there is a cached proposer payload ID available for the next slot proposer. // If there is not, it will call forkchoice updated with the correct payload attribute then cache the payload ID. -func (s *Service) fillMissingPayloadIDRoutine(ctx context.Context, stateFeed *event.Feed) { - // Wait for state to be initialized. - stateChannel := make(chan *feed.Event, 1) - stateSub := stateFeed.Subscribe(stateChannel) +func (s *Service) spawnLateBlockTasksLoop() { go func() { - select { - case <-s.ctx.Done(): - stateSub.Unsubscribe() + _, err := s.clockWaiter.WaitForClock(s.ctx) + if err != nil { + log.WithError(err).Error("spawnLateBlockTasksLoop encountered an error waiting for initialization") return - case <-stateChannel: - stateSub.Unsubscribe() - break } - attThreshold := params.BeaconConfig().SecondsPerSlot / 3 ticker := slots.NewSlotTickerWithOffset(s.genesisTime, time.Duration(attThreshold)*time.Second, params.BeaconConfig().SecondsPerSlot) for { select { case <-ticker.C(): - s.lateBlockTasks(ctx) + s.lateBlockTasks(s.ctx) - case <-ctx.Done(): + case <-s.ctx.Done(): log.Debug("Context closed, exiting routine") return } diff --git a/beacon-chain/blockchain/process_block_test.go b/beacon-chain/blockchain/process_block_test.go index 327e812b03..08d703d671 100644 --- a/beacon-chain/blockchain/process_block_test.go +++ b/beacon-chain/blockchain/process_block_test.go @@ -12,9 +12,7 @@ import ( "github.com/ethereum/go-ethereum/common" gethtypes "github.com/ethereum/go-ethereum/core/types" "github.com/pkg/errors" - mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing" "github.com/prysmaticlabs/prysm/v4/beacon-chain/cache" - "github.com/prysmaticlabs/prysm/v4/beacon-chain/cache/depositcache" "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks" "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/signing" "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition" @@ -24,9 +22,7 @@ import ( mockExecution "github.com/prysmaticlabs/prysm/v4/beacon-chain/execution/testing" doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree" forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types" - "github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations" "github.com/prysmaticlabs/prysm/v4/beacon-chain/state" - "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen" "github.com/prysmaticlabs/prysm/v4/config/features" fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams" "github.com/prysmaticlabs/prysm/v4/config/params" @@ -46,18 +42,9 @@ import ( ) func TestStore_OnBlock(t *testing.T) { - ctx := context.Background() + service, tr := minimalTestService(t) + ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs - beaconDB := testDB.SetupDB(t) - fcs := doublylinkedtree.New() - opts := []Option{ - WithDatabase(beaconDB), - WithStateGen(stategen.New(beaconDB, fcs)), - WithForkChoiceStore(fcs), - } - - service, err := NewService(ctx, opts...) - require.NoError(t, err) var genesisStateRoot [32]byte genesis := blocks.NewGenesisBlock(genesisStateRoot[:]) util.SaveBlock(t, ctx, beaconDB, genesis) @@ -152,17 +139,8 @@ func TestStore_OnBlock(t *testing.T) { } func TestStore_OnBlockBatch(t *testing.T) { - ctx := context.Background() - beaconDB := testDB.SetupDB(t) - - fc := doublylinkedtree.New() - opts := []Option{ - WithDatabase(beaconDB), - WithStateGen(stategen.New(beaconDB, fc)), - WithForkChoiceStore(fc), - } - service, err := NewService(ctx, opts...) - require.NoError(t, err) + service, tr := minimalTestService(t) + ctx := tr.ctx st, keys := util.DeterministicGenesisState(t, 64) require.NoError(t, service.saveGenesisData(ctx, st)) @@ -185,7 +163,7 @@ func TestStore_OnBlockBatch(t *testing.T) { blks = append(blks, wsb) blkRoots = append(blkRoots, root) } - err = service.onBlockBatch(ctx, blks, blkRoots[1:]) + err := service.onBlockBatch(ctx, blks, blkRoots[1:]) require.ErrorIs(t, errWrongBlockCount, err) err = service.onBlockBatch(ctx, blks, blkRoots) require.NoError(t, err) @@ -196,17 +174,9 @@ func TestStore_OnBlockBatch(t *testing.T) { } func TestStore_OnBlockBatch_NotifyNewPayload(t *testing.T) { - ctx := context.Background() - beaconDB := testDB.SetupDB(t) + service, tr := minimalTestService(t) + ctx := tr.ctx - fc := doublylinkedtree.New() - opts := []Option{ - WithDatabase(beaconDB), - WithStateGen(stategen.New(beaconDB, fc)), - WithForkChoiceStore(fc), - } - service, err := NewService(ctx, opts...) - require.NoError(t, err) st, keys := util.DeterministicGenesisState(t, 64) require.NoError(t, service.saveGenesisData(ctx, st)) bState := st.Copy() @@ -227,22 +197,12 @@ func TestStore_OnBlockBatch_NotifyNewPayload(t *testing.T) { blks = append(blks, wsb) blkRoots = append(blkRoots, root) } - err = service.onBlockBatch(ctx, blks, blkRoots) - require.NoError(t, err) + require.NoError(t, service.onBlockBatch(ctx, blks, blkRoots)) } func TestCachedPreState_CanGetFromStateSummary(t *testing.T) { - ctx := context.Background() - beaconDB := testDB.SetupDB(t) - - fc := doublylinkedtree.New() - opts := []Option{ - WithDatabase(beaconDB), - WithStateGen(stategen.New(beaconDB, fc)), - WithForkChoiceStore(fc), - } - service, err := NewService(ctx, opts...) - require.NoError(t, err) + service, tr := minimalTestService(t) + ctx, beaconDB := tr.ctx, tr.db st, keys := util.DeterministicGenesisState(t, 64) require.NoError(t, service.saveGenesisData(ctx, st)) @@ -260,16 +220,8 @@ func TestCachedPreState_CanGetFromStateSummary(t *testing.T) { } func TestFillForkChoiceMissingBlocks_CanSave(t *testing.T) { - ctx := context.Background() - beaconDB := testDB.SetupDB(t) - - opts := []Option{ - WithDatabase(beaconDB), - WithStateGen(stategen.New(beaconDB, doublylinkedtree.New())), - } - service, err := NewService(ctx, opts...) - require.NoError(t, err) - service.cfg.ForkChoiceStore = doublylinkedtree.New() + service, tr := minimalTestService(t) + ctx, beaconDB := tr.ctx, tr.db st, _ := util.DeterministicGenesisState(t, 64) require.NoError(t, service.saveGenesisData(ctx, st)) @@ -309,16 +261,8 @@ func TestFillForkChoiceMissingBlocks_CanSave(t *testing.T) { } func TestFillForkChoiceMissingBlocks_RootsMatch(t *testing.T) { - ctx := context.Background() - beaconDB := testDB.SetupDB(t) - - opts := []Option{ - WithDatabase(beaconDB), - WithStateGen(stategen.New(beaconDB, doublylinkedtree.New())), - } - service, err := NewService(ctx, opts...) - require.NoError(t, err) - service.cfg.ForkChoiceStore = doublylinkedtree.New() + service, tr := minimalTestService(t) + ctx, beaconDB := tr.ctx, tr.db st, _ := util.DeterministicGenesisState(t, 64) require.NoError(t, service.saveGenesisData(ctx, st)) @@ -360,16 +304,8 @@ func TestFillForkChoiceMissingBlocks_RootsMatch(t *testing.T) { } func TestFillForkChoiceMissingBlocks_FilterFinalized(t *testing.T) { - ctx := context.Background() - beaconDB := testDB.SetupDB(t) - - opts := []Option{ - WithDatabase(beaconDB), - WithStateGen(stategen.New(beaconDB, doublylinkedtree.New())), - } - service, err := NewService(ctx, opts...) - require.NoError(t, err) - service.cfg.ForkChoiceStore = doublylinkedtree.New() + service, tr := minimalTestService(t) + ctx, beaconDB := tr.ctx, tr.db var genesisStateRoot [32]byte genesis := blocks.NewGenesisBlock(genesisStateRoot[:]) @@ -418,17 +354,8 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized(t *testing.T) { } func TestFillForkChoiceMissingBlocks_FinalizedSibling(t *testing.T) { - ctx := context.Background() - beaconDB := testDB.SetupDB(t) - - fc := doublylinkedtree.New() - opts := []Option{ - WithDatabase(beaconDB), - WithStateGen(stategen.New(beaconDB, fc)), - } - service, err := NewService(ctx, opts...) - require.NoError(t, err) - service.cfg.ForkChoiceStore = doublylinkedtree.New() + service, tr := minimalTestService(t) + ctx, beaconDB := tr.ctx, tr.db var genesisStateRoot [32]byte genesis := blocks.NewGenesisBlock(genesisStateRoot[:]) @@ -566,17 +493,8 @@ func TestAncestorByDB_CtxErr(t *testing.T) { } func TestAncestor_HandleSkipSlot(t *testing.T) { - ctx := context.Background() - beaconDB := testDB.SetupDB(t) - - fcs := doublylinkedtree.New() - opts := []Option{ - WithDatabase(beaconDB), - WithStateGen(stategen.New(beaconDB, fcs)), - WithForkChoiceStore(fcs), - } - service, err := NewService(ctx, opts...) - require.NoError(t, err) + service, tr := minimalTestService(t) + beaconDB := tr.db b1 := util.NewBeaconBlock() b1.Block.Slot = 1 @@ -657,17 +575,8 @@ func TestAncestor_CanUseForkchoice(t *testing.T) { } func TestAncestor_CanUseDB(t *testing.T) { - ctx := context.Background() - beaconDB := testDB.SetupDB(t) - - fcs := doublylinkedtree.New() - opts := []Option{ - WithDatabase(beaconDB), - WithStateGen(stategen.New(beaconDB, fcs)), - WithForkChoiceStore(fcs), - } - service, err := NewService(ctx, opts...) - require.NoError(t, err) + service, tr := minimalTestService(t) + ctx, beaconDB := tr.ctx, tr.db b1 := util.NewBeaconBlock() b1.Block.Slot = 1 @@ -732,21 +641,8 @@ func TestHandleEpochBoundary_UpdateFirstSlot(t *testing.T) { } func TestOnBlock_CanFinalize_WithOnTick(t *testing.T) { - ctx := context.Background() - beaconDB := testDB.SetupDB(t) - fcs := doublylinkedtree.New() - depositCache, err := depositcache.New() - require.NoError(t, err) - opts := []Option{ - WithDatabase(beaconDB), - WithStateGen(stategen.New(beaconDB, fcs)), - WithForkChoiceStore(fcs), - WithDepositCache(depositCache), - WithStateNotifier(&mock.MockStateNotifier{}), - WithAttestationPool(attestations.NewPool()), - } - service, err := NewService(ctx, opts...) - require.NoError(t, err) + service, tr := minimalTestService(t) + ctx, fcs := tr.ctx, tr.fcs gs, keys := util.DeterministicGenesisState(t, 32) require.NoError(t, service.saveGenesisData(ctx, gs)) @@ -782,21 +678,8 @@ func TestOnBlock_CanFinalize_WithOnTick(t *testing.T) { } func TestOnBlock_CanFinalize(t *testing.T) { - ctx := context.Background() - beaconDB := testDB.SetupDB(t) - fcs := doublylinkedtree.New() - depositCache, err := depositcache.New() - require.NoError(t, err) - opts := []Option{ - WithDatabase(beaconDB), - WithStateGen(stategen.New(beaconDB, fcs)), - WithForkChoiceStore(fcs), - WithDepositCache(depositCache), - WithStateNotifier(&mock.MockStateNotifier{}), - WithAttestationPool(attestations.NewPool()), - } - service, err := NewService(ctx, opts...) - require.NoError(t, err) + service, tr := minimalTestService(t) + ctx := tr.ctx gs, keys := util.DeterministicGenesisState(t, 32) require.NoError(t, service.saveGenesisData(ctx, gs)) @@ -830,39 +713,15 @@ func TestOnBlock_CanFinalize(t *testing.T) { } func TestOnBlock_NilBlock(t *testing.T) { - ctx := context.Background() - beaconDB := testDB.SetupDB(t) - fcs := doublylinkedtree.New() - depositCache, err := depositcache.New() - require.NoError(t, err) - opts := []Option{ - WithDatabase(beaconDB), - WithStateGen(stategen.New(beaconDB, fcs)), - WithForkChoiceStore(fcs), - WithDepositCache(depositCache), - } - service, err := NewService(ctx, opts...) - require.NoError(t, err) + service, tr := minimalTestService(t) - err = service.onBlock(ctx, nil, [32]byte{}) + err := service.onBlock(tr.ctx, nil, [32]byte{}) require.Equal(t, true, IsInvalidBlock(err)) } func TestOnBlock_InvalidSignature(t *testing.T) { - ctx := context.Background() - beaconDB := testDB.SetupDB(t) - fcs := doublylinkedtree.New() - depositCache, err := depositcache.New() - require.NoError(t, err) - opts := []Option{ - WithDatabase(beaconDB), - WithStateGen(stategen.New(beaconDB, fcs)), - WithForkChoiceStore(fcs), - WithDepositCache(depositCache), - WithStateNotifier(&mock.MockStateNotifier{}), - } - service, err := NewService(ctx, opts...) - require.NoError(t, err) + service, tr := minimalTestService(t) + ctx := tr.ctx gs, keys := util.DeterministicGenesisState(t, 32) require.NoError(t, service.saveGenesisData(ctx, gs)) @@ -885,21 +744,8 @@ func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) { config.BellatrixForkEpoch = 2 params.OverrideBeaconConfig(config) - ctx := context.Background() - beaconDB := testDB.SetupDB(t) - fcs := doublylinkedtree.New() - depositCache, err := depositcache.New() - require.NoError(t, err) - opts := []Option{ - WithDatabase(beaconDB), - WithStateGen(stategen.New(beaconDB, fcs)), - WithForkChoiceStore(fcs), - WithDepositCache(depositCache), - WithStateNotifier(&mock.MockStateNotifier{}), - WithAttestationPool(attestations.NewPool()), - } - service, err := NewService(ctx, opts...) - require.NoError(t, err) + service, tr := minimalTestService(t) + ctx := tr.ctx gs, keys := util.DeterministicGenesisState(t, 32) require.NoError(t, service.saveGenesisData(ctx, gs)) @@ -918,13 +764,8 @@ func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) { } func TestInsertFinalizedDeposits(t *testing.T) { - ctx := context.Background() - opts := testServiceOptsWithDB(t) - depositCache, err := depositcache.New() - require.NoError(t, err) - opts = append(opts, WithDepositCache(depositCache)) - service, err := NewService(ctx, opts...) - require.NoError(t, err) + service, tr := minimalTestService(t) + ctx, depositCache := tr.ctx, tr.dc gs, _ := util.DeterministicGenesisState(t, 32) require.NoError(t, service.saveGenesisData(ctx, gs)) @@ -952,13 +793,8 @@ func TestInsertFinalizedDeposits(t *testing.T) { } func TestInsertFinalizedDeposits_MultipleFinalizedRoutines(t *testing.T) { - ctx := context.Background() - opts := testServiceOptsWithDB(t) - depositCache, err := depositcache.New() - require.NoError(t, err) - opts = append(opts, WithDepositCache(depositCache)) - service, err := NewService(ctx, opts...) - require.NoError(t, err) + service, tr := minimalTestService(t) + ctx, depositCache := tr.ctx, tr.dc gs, _ := util.DeterministicGenesisState(t, 32) require.NoError(t, service.saveGenesisData(ctx, gs)) @@ -1085,18 +921,8 @@ func Test_validateMergeTransitionBlock(t *testing.T) { cfg.TerminalBlockHash = params.BeaconConfig().ZeroHash params.OverrideBeaconConfig(cfg) - ctx := context.Background() - beaconDB := testDB.SetupDB(t) - fcs := doublylinkedtree.New() - opts := []Option{ - WithDatabase(beaconDB), - WithStateGen(stategen.New(beaconDB, fcs)), - WithForkChoiceStore(fcs), - WithProposerIdsCache(cache.NewProposerPayloadIDsCache()), - WithAttestationPool(attestations.NewPool()), - } - service, err := NewService(ctx, opts...) - require.NoError(t, err) + service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache())) + ctx := tr.ctx aHash := common.BytesToHash([]byte("a")) bHash := common.BytesToHash([]byte("b")) @@ -1223,17 +1049,8 @@ func Test_validateMergeTransitionBlock(t *testing.T) { } func TestService_insertSlashingsToForkChoiceStore(t *testing.T) { - ctx := context.Background() - beaconDB := testDB.SetupDB(t) - fcs := doublylinkedtree.New() - opts := []Option{ - WithDatabase(beaconDB), - WithStateGen(stategen.New(beaconDB, fcs)), - WithForkChoiceStore(fcs), - WithProposerIdsCache(cache.NewProposerPayloadIDsCache()), - } - service, err := NewService(ctx, opts...) - require.NoError(t, err) + service, tr := minimalTestService(t) + ctx := tr.ctx beaconState, privKeys := util.DeterministicGenesisState(t, 100) att1 := util.HydrateIndexedAttestation(ðpb.IndexedAttestation{ @@ -1274,21 +1091,8 @@ func TestService_insertSlashingsToForkChoiceStore(t *testing.T) { } func TestOnBlock_ProcessBlocksParallel(t *testing.T) { - ctx := context.Background() - beaconDB := testDB.SetupDB(t) - fcs := doublylinkedtree.New() - depositCache, err := depositcache.New() - require.NoError(t, err) - opts := []Option{ - WithDatabase(beaconDB), - WithStateGen(stategen.New(beaconDB, fcs)), - WithForkChoiceStore(fcs), - WithDepositCache(depositCache), - WithStateNotifier(&mock.MockStateNotifier{}), - WithAttestationPool(attestations.NewPool()), - } - service, err := NewService(ctx, opts...) - require.NoError(t, err) + service, tr := minimalTestService(t) + ctx := tr.ctx gs, keys := util.DeterministicGenesisState(t, 32) require.NoError(t, service.saveGenesisData(ctx, gs)) @@ -1353,17 +1157,8 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) { } func Test_verifyBlkFinalizedSlot_invalidBlock(t *testing.T) { - ctx := context.Background() - beaconDB := testDB.SetupDB(t) + service, _ := minimalTestService(t) - fcs := doublylinkedtree.New() - opts := []Option{ - WithDatabase(beaconDB), - WithStateGen(stategen.New(beaconDB, fcs)), - WithForkChoiceStore(fcs), - } - service, err := NewService(ctx, opts...) - require.NoError(t, err) require.NoError(t, service.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: 1})) blk := util.HydrateBeaconBlock(ðpb.BeaconBlock{Slot: 1}) wb, err := consensusblocks.NewBeaconBlock(blk) @@ -1386,22 +1181,9 @@ func TestStore_NoViableHead_FCU(t *testing.T) { config.BellatrixForkEpoch = 2 params.OverrideBeaconConfig(config) - ctx := context.Background() - beaconDB := testDB.SetupDB(t) - mockEngine := &mockExecution.EngineClient{ErrNewPayload: execution.ErrAcceptedSyncingPayloadStatus, ErrForkchoiceUpdated: execution.ErrAcceptedSyncingPayloadStatus} - fc := doublylinkedtree.New() - opts := []Option{ - WithDatabase(beaconDB), - WithAttestationPool(attestations.NewPool()), - WithStateGen(stategen.New(beaconDB, fc)), - WithForkChoiceStore(fc), - WithStateNotifier(&mock.MockStateNotifier{}), - WithExecutionEngineCaller(mockEngine), - WithProposerIdsCache(cache.NewProposerPayloadIDsCache()), - } - service, err := NewService(ctx, opts...) - require.NoError(t, err) + service, tr := minimalTestService(t, WithExecutionEngineCaller(mockEngine)) + ctx := tr.ctx st, keys := util.DeterministicGenesisState(t, 64) stateRoot, err := st.HashTreeRoot(ctx) @@ -1546,22 +1328,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) { config.BellatrixForkEpoch = 2 params.OverrideBeaconConfig(config) - ctx := context.Background() - beaconDB := testDB.SetupDB(t) - mockEngine := &mockExecution.EngineClient{ErrNewPayload: execution.ErrAcceptedSyncingPayloadStatus, ErrForkchoiceUpdated: execution.ErrAcceptedSyncingPayloadStatus} - fc := doublylinkedtree.New() - opts := []Option{ - WithDatabase(beaconDB), - WithAttestationPool(attestations.NewPool()), - WithStateGen(stategen.New(beaconDB, fc)), - WithForkChoiceStore(fc), - WithStateNotifier(&mock.MockStateNotifier{}), - WithExecutionEngineCaller(mockEngine), - WithProposerIdsCache(cache.NewProposerPayloadIDsCache()), - } - service, err := NewService(ctx, opts...) - require.NoError(t, err) + service, tr := minimalTestService(t, WithExecutionEngineCaller(mockEngine)) + ctx := tr.ctx st, keys := util.DeterministicGenesisState(t, 64) stateRoot, err := st.HashTreeRoot(ctx) @@ -1707,22 +1476,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) { config.BellatrixForkEpoch = 2 params.OverrideBeaconConfig(config) - ctx := context.Background() - beaconDB := testDB.SetupDB(t) - mockEngine := &mockExecution.EngineClient{ErrNewPayload: execution.ErrAcceptedSyncingPayloadStatus, ErrForkchoiceUpdated: execution.ErrAcceptedSyncingPayloadStatus} - fc := doublylinkedtree.New() - opts := []Option{ - WithDatabase(beaconDB), - WithAttestationPool(attestations.NewPool()), - WithStateGen(stategen.New(beaconDB, fc)), - WithForkChoiceStore(fc), - WithStateNotifier(&mock.MockStateNotifier{}), - WithExecutionEngineCaller(mockEngine), - WithProposerIdsCache(cache.NewProposerPayloadIDsCache()), - } - service, err := NewService(ctx, opts...) - require.NoError(t, err) + service, tr := minimalTestService(t, WithExecutionEngineCaller(mockEngine)) + ctx := tr.ctx st, keys := util.DeterministicGenesisState(t, 64) stateRoot, err := st.HashTreeRoot(ctx) @@ -1915,27 +1671,9 @@ func TestNoViableHead_Reboot(t *testing.T) { config.BellatrixForkEpoch = 2 params.OverrideBeaconConfig(config) - ctx := context.Background() - beaconDB := testDB.SetupDB(t) - mockEngine := &mockExecution.EngineClient{ErrNewPayload: execution.ErrAcceptedSyncingPayloadStatus, ErrForkchoiceUpdated: execution.ErrAcceptedSyncingPayloadStatus} - attSrv, err := attestations.NewService(ctx, &attestations.Config{}) - require.NoError(t, err) - newfc := doublylinkedtree.New() - newStateGen := stategen.New(beaconDB, newfc) - newfc.SetBalancesByRooter(newStateGen.ActiveNonSlashedBalancesByRoot) - opts := []Option{ - WithDatabase(beaconDB), - WithAttestationPool(attestations.NewPool()), - WithStateGen(newStateGen), - WithForkChoiceStore(newfc), - WithStateNotifier(&mock.MockStateNotifier{}), - WithExecutionEngineCaller(mockEngine), - WithProposerIdsCache(cache.NewProposerPayloadIDsCache()), - WithAttestationService(attSrv), - } - service, err := NewService(ctx, opts...) - require.NoError(t, err) + service, tr := minimalTestService(t, WithExecutionEngineCaller(mockEngine)) + ctx := tr.ctx genesisState, keys := util.DeterministicGenesisState(t, 64) stateRoot, err := genesisState.HashTreeRoot(ctx) @@ -2084,18 +1822,8 @@ func TestNoViableHead_Reboot(t *testing.T) { } func TestOnBlock_HandleBlockAttestations(t *testing.T) { - ctx := context.Background() - beaconDB := testDB.SetupDB(t) - fc := doublylinkedtree.New() - opts := []Option{ - WithDatabase(beaconDB), - WithAttestationPool(attestations.NewPool()), - WithStateGen(stategen.New(beaconDB, fc)), - WithForkChoiceStore(fc), - WithStateNotifier(&mock.MockStateNotifier{}), - } - service, err := NewService(ctx, opts...) - require.NoError(t, err) + service, tr := minimalTestService(t) + ctx := tr.ctx st, keys := util.DeterministicGenesisState(t, 64) stateRoot, err := st.HashTreeRoot(ctx) @@ -2155,18 +1883,8 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) { func TestFillMissingBlockPayloadId_DiffSlotExitEarly(t *testing.T) { logHook := logTest.NewGlobal() - fc := doublylinkedtree.New() - ctx := context.Background() - beaconDB := testDB.SetupDB(t) - - opts := []Option{ - WithForkChoiceStore(fc), - WithStateGen(stategen.New(beaconDB, fc)), - } - - service, err := NewService(ctx, opts...) - require.NoError(t, err) - service.lateBlockTasks(ctx) + service, tr := minimalTestService(t) + service.lateBlockTasks(tr.ctx) require.LogsDoNotContain(t, logHook, "could not perform late block tasks") } @@ -2177,24 +1895,14 @@ func TestFillMissingBlockPayloadId_PrepareAllPayloads(t *testing.T) { }) defer resetCfg() - fc := doublylinkedtree.New() - ctx := context.Background() - beaconDB := testDB.SetupDB(t) - - opts := []Option{ - WithForkChoiceStore(fc), - WithStateGen(stategen.New(beaconDB, fc)), - } - - service, err := NewService(ctx, opts...) - require.NoError(t, err) - service.lateBlockTasks(ctx) + service, tr := minimalTestService(t) + service.lateBlockTasks(tr.ctx) require.LogsDoNotContain(t, logHook, "could not perform late block tasks") } // Helper function to simulate the block being on time or delayed for proposer // boost. It alters the genesisTime tracked by the store. -func driftGenesisTime(s *Service, slot int64, delay int64) { +func driftGenesisTime(s *Service, slot, delay int64) { offset := slot*int64(params.BeaconConfig().SecondsPerSlot) - delay s.SetGenesisTime(time.Unix(time.Now().Unix()-offset, 0)) } diff --git a/beacon-chain/blockchain/receive_attestation.go b/beacon-chain/blockchain/receive_attestation.go index 3b4d9742b2..9b8e665132 100644 --- a/beacon-chain/blockchain/receive_attestation.go +++ b/beacon-chain/blockchain/receive_attestation.go @@ -7,8 +7,6 @@ import ( "time" "github.com/pkg/errors" - "github.com/prysmaticlabs/prysm/v4/async/event" - "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed" "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers" "github.com/prysmaticlabs/prysm/v4/beacon-chain/state" "github.com/prysmaticlabs/prysm/v4/config/features" @@ -67,20 +65,13 @@ func (s *Service) VerifyLmdFfgConsistency(ctx context.Context, a *ethpb.Attestat } // This routine processes fork choice attestations from the pool to account for validator votes and fork choice. -func (s *Service) spawnProcessAttestationsRoutine(stateFeed *event.Feed) { - // Wait for state to be initialized. - stateChannel := make(chan *feed.Event, 1) - stateSub := stateFeed.Subscribe(stateChannel) +func (s *Service) spawnProcessAttestationsRoutine() { go func() { - select { - case <-s.ctx.Done(): - stateSub.Unsubscribe() + _, err := s.clockWaiter.WaitForClock(s.ctx) + if err != nil { + log.WithError(err).Error("spawnProcessAttestationsRoutine failed to receive genesis data") return - case <-stateChannel: - stateSub.Unsubscribe() - break } - if s.genesisTime.IsZero() { log.Warn("ProcessAttestations routine waiting for genesis time") for s.genesisTime.IsZero() { diff --git a/beacon-chain/blockchain/receive_attestation_test.go b/beacon-chain/blockchain/receive_attestation_test.go index 6ab970bb79..4ca446086e 100644 --- a/beacon-chain/blockchain/receive_attestation_test.go +++ b/beacon-chain/blockchain/receive_attestation_test.go @@ -7,11 +7,7 @@ import ( "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers" "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition" - testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing" - doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree" forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types" - "github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations" - "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen" "github.com/prysmaticlabs/prysm/v4/config/params" "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks" "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives" @@ -31,22 +27,18 @@ var ( func TestAttestationCheckPtState_FarFutureSlot(t *testing.T) { helpers.ClearCache() - beaconDB := testDB.SetupDB(t) + service, _ := minimalTestService(t) - chainService := setupBeaconChain(t, beaconDB) - chainService.genesisTime = time.Now() + service.genesisTime = time.Now() e := primitives.Epoch(slots.MaxSlotBuffer/uint64(params.BeaconConfig().SlotsPerEpoch) + 1) - _, err := chainService.AttestationTargetState(context.Background(), ðpb.Checkpoint{Epoch: e}) + _, err := service.AttestationTargetState(context.Background(), ðpb.Checkpoint{Epoch: e}) require.ErrorContains(t, "exceeds max allowed value relative to the local clock", err) } func TestVerifyLMDFFGConsistent_NotOK(t *testing.T) { - ctx := context.Background() - opts := testServiceOptsWithDB(t) - - service, err := NewService(ctx, opts...) - require.NoError(t, err) + service, tr := minimalTestService(t) + ctx := tr.ctx b32 := util.NewBeaconBlock() b32.Block.Slot = 32 @@ -69,11 +61,8 @@ func TestVerifyLMDFFGConsistent_NotOK(t *testing.T) { } func TestVerifyLMDFFGConsistent_OK(t *testing.T) { - ctx := context.Background() - - opts := testServiceOptsWithDB(t) - service, err := NewService(ctx, opts...) - require.NoError(t, err) + service, tr := minimalTestService(t) + ctx := tr.ctx b32 := util.NewBeaconBlock() b32.Block.Slot = 32 @@ -96,13 +85,10 @@ func TestVerifyLMDFFGConsistent_OK(t *testing.T) { } func TestProcessAttestations_Ok(t *testing.T) { + service, tr := minimalTestService(t) hook := logTest.NewGlobal() - ctx := context.Background() - opts := testServiceOptsWithDB(t) - opts = append(opts, WithAttestationPool(attestations.NewPool())) + ctx := tr.ctx - service, err := NewService(ctx, opts...) - require.NoError(t, err) service.genesisTime = prysmTime.Now().Add(-1 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second) genesisState, pks := util.DeterministicGenesisState(t, 64) require.NoError(t, genesisState.SetGenesisTime(uint64(prysmTime.Now().Unix())-params.BeaconConfig().SecondsPerSlot)) @@ -126,21 +112,9 @@ func TestProcessAttestations_Ok(t *testing.T) { } func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) { - ctx := context.Background() - beaconDB := testDB.SetupDB(t) - fcs := doublylinkedtree.New() - newStateGen := stategen.New(beaconDB, fcs) - fcs.SetBalancesByRooter(newStateGen.ActiveNonSlashedBalancesByRoot) - opts := []Option{ - WithDatabase(beaconDB), - WithStateGen(newStateGen), - WithAttestationPool(attestations.NewPool()), - WithStateNotifier(&mockBeaconNode{}), - WithForkChoiceStore(fcs), - } + service, tr := minimalTestService(t) + ctx, fcs := tr.ctx, tr.fcs - service, err := NewService(ctx, opts...) - require.NoError(t, err) service.genesisTime = prysmTime.Now().Add(-2 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second) genesisState, pks := util.DeterministicGenesisState(t, 64) require.NoError(t, service.saveGenesisData(ctx, genesisState)) @@ -189,21 +163,9 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) { } func TestService_UpdateHead_NoAtts(t *testing.T) { - ctx := context.Background() - beaconDB := testDB.SetupDB(t) - fcs := doublylinkedtree.New() - newStateGen := stategen.New(beaconDB, fcs) - fcs.SetBalancesByRooter(newStateGen.ActiveNonSlashedBalancesByRoot) - opts := []Option{ - WithDatabase(beaconDB), - WithAttestationPool(attestations.NewPool()), - WithStateNotifier(&mockBeaconNode{}), - WithStateGen(newStateGen), - WithForkChoiceStore(fcs), - } + service, tr := minimalTestService(t) + ctx, fcs := tr.ctx, tr.fcs - service, err := NewService(ctx, opts...) - require.NoError(t, err) service.genesisTime = prysmTime.Now().Add(-2 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second) genesisState, pks := util.DeterministicGenesisState(t, 64) require.NoError(t, service.saveGenesisData(ctx, genesisState)) diff --git a/beacon-chain/blockchain/receive_block_test.go b/beacon-chain/blockchain/receive_block_test.go index 53a2f2e7f6..39f4e9301a 100644 --- a/beacon-chain/blockchain/receive_block_test.go +++ b/beacon-chain/blockchain/receive_block_test.go @@ -7,12 +7,7 @@ import ( "time" blockchainTesting "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing" - testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing" - doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree" - "github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations" - "github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/blstoexec" "github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/voluntaryexits" - "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen" "github.com/prysmaticlabs/prysm/v4/config/params" "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks" "github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces" @@ -125,22 +120,15 @@ func TestService_ReceiveBlock(t *testing.T) { for _, tt := range tests { wg.Add(1) t.Run(tt.name, func(t *testing.T) { - beaconDB := testDB.SetupDB(t) + s, tr := minimalTestService(t, + WithFinalizedStateAtStartUp(genesis), + WithExitPool(voluntaryexits.NewPool()), + WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true})) + + beaconDB := tr.db genesisBlockRoot := bytesutil.ToBytes32(nil) require.NoError(t, beaconDB.SaveState(ctx, genesis, genesisBlockRoot)) - fc := doublylinkedtree.New() - opts := []Option{ - WithDatabase(beaconDB), - WithForkChoiceStore(fc), - WithAttestationPool(attestations.NewPool()), - WithExitPool(voluntaryexits.NewPool()), - WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}), - WithStateGen(stategen.New(beaconDB, fc)), - WithFinalizedStateAtStartUp(genesis), - } - s, err := NewService(ctx, opts...) - require.NoError(t, err) // Initialize it here. _ = s.cfg.StateNotifier.StateFeed() require.NoError(t, s.saveGenesisData(ctx, genesis)) @@ -162,25 +150,16 @@ func TestService_ReceiveBlock(t *testing.T) { } func TestService_ReceiveBlockUpdateHead(t *testing.T) { - ctx := context.Background() + s, tr := minimalTestService(t, + WithExitPool(voluntaryexits.NewPool()), + WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true})) + ctx, beaconDB := tr.ctx, tr.db genesis, keys := util.DeterministicGenesisState(t, 64) b, err := util.GenerateFullBlock(genesis, keys, util.DefaultBlockGenConfig(), 1) assert.NoError(t, err) - beaconDB := testDB.SetupDB(t) genesisBlockRoot := bytesutil.ToBytes32(nil) require.NoError(t, beaconDB.SaveState(ctx, genesis, genesisBlockRoot)) - fc := doublylinkedtree.New() - opts := []Option{ - WithDatabase(beaconDB), - WithForkChoiceStore(fc), - WithAttestationPool(attestations.NewPool()), - WithExitPool(voluntaryexits.NewPool()), - WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}), - WithStateGen(stategen.New(beaconDB, fc)), - } - s, err := NewService(ctx, opts...) - require.NoError(t, err) // Initialize it here. _ = s.cfg.StateNotifier.StateFeed() require.NoError(t, s.saveGenesisData(ctx, genesis)) @@ -246,17 +225,8 @@ func TestService_ReceiveBlockBatch(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - fc := doublylinkedtree.New() - beaconDB := testDB.SetupDB(t) - opts := []Option{ - WithDatabase(beaconDB), - WithForkChoiceStore(fc), - WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}), - WithStateGen(stategen.New(beaconDB, fc)), - } - s, err := NewService(ctx, opts...) - require.NoError(t, err) - err = s.saveGenesisData(ctx, genesis) + s, _ := minimalTestService(t, WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true})) + err := s.saveGenesisData(ctx, genesis) require.NoError(t, err) root, err := tt.args.block.Block.HashTreeRoot() require.NoError(t, err) @@ -276,10 +246,7 @@ func TestService_ReceiveBlockBatch(t *testing.T) { } func TestService_HasBlock(t *testing.T) { - opts := testServiceOptsWithDB(t) - opts = append(opts, WithStateNotifier(&blockchainTesting.MockStateNotifier{})) - s, err := NewService(context.Background(), opts...) - require.NoError(t, err) + s, _ := minimalTestService(t) r := [32]byte{'a'} if s.HasBlock(context.Background(), r) { t.Error("Should not have block") @@ -299,10 +266,8 @@ func TestService_HasBlock(t *testing.T) { } func TestCheckSaveHotStateDB_Enabling(t *testing.T) { - opts := testServiceOptsWithDB(t) hook := logTest.NewGlobal() - s, err := NewService(context.Background(), opts...) - require.NoError(t, err) + s, _ := minimalTestService(t) st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB)) s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second) @@ -312,9 +277,9 @@ func TestCheckSaveHotStateDB_Enabling(t *testing.T) { func TestCheckSaveHotStateDB_Disabling(t *testing.T) { hook := logTest.NewGlobal() - opts := testServiceOptsWithDB(t) - s, err := NewService(context.Background(), opts...) - require.NoError(t, err) + + s, _ := minimalTestService(t) + st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB)) s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second) require.NoError(t, s.checkSaveHotStateDB(context.Background())) @@ -326,9 +291,7 @@ func TestCheckSaveHotStateDB_Disabling(t *testing.T) { func TestCheckSaveHotStateDB_Overflow(t *testing.T) { hook := logTest.NewGlobal() - opts := testServiceOptsWithDB(t) - s, err := NewService(context.Background(), opts...) - require.NoError(t, err) + s, _ := minimalTestService(t) s.genesisTime = time.Now() require.NoError(t, s.checkSaveHotStateDB(context.Background())) @@ -336,19 +299,8 @@ func TestCheckSaveHotStateDB_Overflow(t *testing.T) { } func TestHandleBlockBLSToExecutionChanges(t *testing.T) { - ctx := context.Background() - beaconDB := testDB.SetupDB(t) - fc := doublylinkedtree.New() - pool := blstoexec.NewPool() - opts := []Option{ - WithDatabase(beaconDB), - WithStateGen(stategen.New(beaconDB, fc)), - WithForkChoiceStore(fc), - WithStateNotifier(&blockchainTesting.MockStateNotifier{}), - WithBLSToExecPool(pool), - } - service, err := NewService(ctx, opts...) - require.NoError(t, err) + service, tr := minimalTestService(t) + pool := tr.blsPool t.Run("pre Capella block", func(t *testing.T) { body := ðpb.BeaconBlockBodyBellatrix{} diff --git a/beacon-chain/blockchain/service.go b/beacon-chain/blockchain/service.go index ab62c1aa0b..5a61c69a43 100644 --- a/beacon-chain/blockchain/service.go +++ b/beacon-chain/blockchain/service.go @@ -27,6 +27,7 @@ import ( "github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/slashings" "github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/voluntaryexits" "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/startup" "github.com/prysmaticlabs/prysm/v4/beacon-chain/state" "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen" "github.com/prysmaticlabs/prysm/v4/config/features" @@ -57,6 +58,8 @@ type Service struct { initSyncBlocks map[[32]byte]interfaces.ReadOnlySignedBeaconBlock initSyncBlocksLock sync.RWMutex wsVerifier *WeakSubjectivityVerifier + clockSetter startup.ClockSetter + clockWaiter startup.ClockWaiter } // config options for the service. @@ -83,6 +86,8 @@ type config struct { ExecutionEngineCaller execution.EngineCaller } +var ErrMissingClockSetter = errors.New("blockchain Service initialized without a startup.ClockSetter") + // NewService instantiates a new block service instance that will // be registered into a running beacon node. func NewService(ctx context.Context, opts ...Option) (*Service, error) { @@ -100,6 +105,9 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) { return nil, err } } + if srv.clockSetter == nil { + return nil, ErrMissingClockSetter + } var err error srv.wsVerifier, err = NewWeakSubjectivityVerifier(srv.cfg.WeakSubjectivityCheckpt, srv.cfg.BeaconDB) if err != nil { @@ -121,8 +129,8 @@ func (s *Service) Start() { log.Fatal(err) } } - s.spawnProcessAttestationsRoutine(s.cfg.StateNotifier.StateFeed()) - s.fillMissingPayloadIDRoutine(s.ctx, s.cfg.StateNotifier.StateFeed()) + s.spawnProcessAttestationsRoutine() + s.spawnLateBlockTasksLoop() } // Stop the blockchain service's main event loop and associated goroutines. @@ -236,13 +244,10 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error { return errors.Wrap(err, "could not verify initial checkpoint provided for chain sync") } - s.cfg.StateNotifier.StateFeed().Send(&feed.Event{ - Type: statefeed.Initialized, - Data: &statefeed.InitializedData{ - StartTime: s.genesisTime, - GenesisValidatorsRoot: saved.GenesisValidatorsRoot(), - }, - }) + vr := bytesutil.ToBytes32(saved.GenesisValidatorsRoot()) + if err := s.clockSetter.SetClock(startup.NewClock(s.genesisTime, vr)); err != nil { + return errors.Wrap(err, "failed to initialize blockchain service") + } return nil } @@ -359,15 +364,10 @@ func (s *Service) onExecutionChainStart(ctx context.Context, genesisTime time.Ti } go slots.CountdownToGenesis(ctx, genesisTime, uint64(initializedState.NumValidators()), gRoot) - // We send out a state initialized event to the rest of the services - // running in the beacon node. - s.cfg.StateNotifier.StateFeed().Send(&feed.Event{ - Type: statefeed.Initialized, - Data: &statefeed.InitializedData{ - StartTime: genesisTime, - GenesisValidatorsRoot: initializedState.GenesisValidatorsRoot(), - }, - }) + vr := bytesutil.ToBytes32(initializedState.GenesisValidatorsRoot()) + if err := s.clockSetter.SetClock(startup.NewClock(genesisTime, vr)); err != nil { + log.WithError(err).Fatal("failed to initialize blockchain service from execution start event") + } } // initializes the state and genesis block of the beacon chain to persistent storage diff --git a/beacon-chain/blockchain/service_test.go b/beacon-chain/blockchain/service_test.go index 4ca92e73d4..b53147b7e7 100644 --- a/beacon-chain/blockchain/service_test.go +++ b/beacon-chain/blockchain/service_test.go @@ -8,13 +8,9 @@ import ( "time" "github.com/ethereum/go-ethereum/common" - "github.com/prysmaticlabs/prysm/v4/async/event" - mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing" "github.com/prysmaticlabs/prysm/v4/beacon-chain/cache" "github.com/prysmaticlabs/prysm/v4/beacon-chain/cache/depositcache" "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks" - "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed" - statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state" "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers" "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition" "github.com/prysmaticlabs/prysm/v4/beacon-chain/db" @@ -25,7 +21,7 @@ import ( "github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations" "github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/slashings" "github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/voluntaryexits" - "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/startup" state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native" "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen" "github.com/prysmaticlabs/prysm/v4/config/features" @@ -40,45 +36,8 @@ import ( "github.com/prysmaticlabs/prysm/v4/testing/util" "github.com/prysmaticlabs/prysm/v4/time/slots" logTest "github.com/sirupsen/logrus/hooks/test" - "google.golang.org/protobuf/proto" ) -type mockBeaconNode struct { - stateFeed *event.Feed -} - -// StateFeed mocks the same method in the beacon node. -func (mbn *mockBeaconNode) StateFeed() *event.Feed { - if mbn.stateFeed == nil { - mbn.stateFeed = new(event.Feed) - } - return mbn.stateFeed -} - -type mockBroadcaster struct { - broadcastCalled bool -} - -func (mb *mockBroadcaster) Broadcast(_ context.Context, _ proto.Message) error { - mb.broadcastCalled = true - return nil -} - -func (mb *mockBroadcaster) BroadcastAttestation(_ context.Context, _ uint64, _ *ethpb.Attestation) error { - mb.broadcastCalled = true - return nil -} - -func (mb *mockBroadcaster) BroadcastSyncCommitteeMessage(_ context.Context, _ uint64, _ *ethpb.SyncCommitteeMessage) error { - mb.broadcastCalled = true - return nil -} - -func (mb *mockBroadcaster) BroadcastBLSChanges(_ context.Context, _ []*ethpb.SignedBLSToExecutionChange) { -} - -var _ p2p.Broadcaster = (*mockBroadcaster)(nil) - func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service { ctx := context.Background() var web3Service *execution.Service @@ -141,6 +100,7 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service { WithAttestationService(attService), WithStateGen(stateGen), WithProposerIdsCache(cache.NewProposerPayloadIDsCache()), + WithClockSynchronizer(startup.NewClockSynchronizer()), } chainService, err := NewService(ctx, opts...) @@ -157,12 +117,14 @@ func TestChainStartStop_Initialized(t *testing.T) { chainService := setupBeaconChain(t, beaconDB) + gt := time.Unix(23, 0) genesisBlk := util.NewBeaconBlock() blkRoot, err := genesisBlk.Block.HashTreeRoot() require.NoError(t, err) util.SaveBlock(t, ctx, beaconDB, genesisBlk) s, err := util.NewBeaconState() require.NoError(t, err) + require.NoError(t, s.SetGenesisTime(uint64(gt.Unix()))) require.NoError(t, s.SetSlot(1)) require.NoError(t, beaconDB.SaveState(ctx, s, blkRoot)) require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, blkRoot)) @@ -192,12 +154,14 @@ func TestChainStartStop_GenesisZeroHashes(t *testing.T) { chainService := setupBeaconChain(t, beaconDB) + gt := time.Unix(23, 0) genesisBlk := util.NewBeaconBlock() blkRoot, err := genesisBlk.Block.HashTreeRoot() require.NoError(t, err) wsb := util.SaveBlock(t, ctx, beaconDB, genesisBlk) s, err := util.NewBeaconState() require.NoError(t, err) + require.NoError(t, s.SetGenesisTime(uint64(gt.Unix()))) require.NoError(t, beaconDB.SaveState(ctx, s, blkRoot)) require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, blkRoot)) require.NoError(t, beaconDB.SaveBlock(ctx, wsb)) @@ -264,12 +228,14 @@ func TestChainService_CorrectGenesisRoots(t *testing.T) { chainService := setupBeaconChain(t, beaconDB) + gt := time.Unix(23, 0) genesisBlk := util.NewBeaconBlock() blkRoot, err := genesisBlk.Block.HashTreeRoot() require.NoError(t, err) util.SaveBlock(t, ctx, beaconDB, genesisBlk) s, err := util.NewBeaconState() require.NoError(t, err) + require.NoError(t, s.SetGenesisTime(uint64(gt.Unix()))) require.NoError(t, s.SetSlot(0)) require.NoError(t, beaconDB.SaveState(ctx, s, blkRoot)) require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, blkRoot)) @@ -290,14 +256,9 @@ func TestChainService_CorrectGenesisRoots(t *testing.T) { } func TestChainService_InitializeChainInfo(t *testing.T) { - beaconDB := testDB.SetupDB(t) - ctx := context.Background() - genesis := util.NewBeaconBlock() genesisRoot, err := genesis.Block.HashTreeRoot() require.NoError(t, err) - require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot)) - util.SaveBlock(t, ctx, beaconDB, genesis) finalizedSlot := params.BeaconConfig().SlotsPerEpoch*2 + 1 headBlock := util.NewBeaconBlock() @@ -309,23 +270,18 @@ func TestChainService_InitializeChainInfo(t *testing.T) { require.NoError(t, headState.SetGenesisValidatorsRoot(params.BeaconConfig().ZeroHash[:])) headRoot, err := headBlock.Block.HashTreeRoot() require.NoError(t, err) + + c, tr := minimalTestService(t, WithFinalizedStateAtStartUp(headState)) + ctx, beaconDB, stateGen := tr.ctx, tr.db, tr.sg + + require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot)) + util.SaveBlock(t, ctx, beaconDB, genesis) require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot)) require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot)) util.SaveBlock(t, ctx, beaconDB, headBlock) require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Epoch: slots.ToEpoch(finalizedSlot), Root: headRoot[:]})) - attSrv, err := attestations.NewService(ctx, &attestations.Config{}) - require.NoError(t, err) - fc := doublylinkedtree.New() - stateGen := stategen.New(beaconDB, fc) - c, err := NewService(ctx, - WithForkChoiceStore(fc), - WithDatabase(beaconDB), - WithStateGen(stateGen), - WithAttestationService(attSrv), - WithStateNotifier(&mock.MockStateNotifier{}), - WithFinalizedStateAtStartUp(headState)) - require.NoError(t, err) require.NoError(t, stateGen.SaveState(ctx, headRoot, headState)) + require.NoError(t, c.StartFromSavedState(headState)) headBlk, err := c.HeadBlock(ctx) require.NoError(t, err) @@ -345,14 +301,9 @@ func TestChainService_InitializeChainInfo(t *testing.T) { } func TestChainService_InitializeChainInfo_SetHeadAtGenesis(t *testing.T) { - beaconDB := testDB.SetupDB(t) - ctx := context.Background() - genesis := util.NewBeaconBlock() genesisRoot, err := genesis.Block.HashTreeRoot() require.NoError(t, err) - require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot)) - util.SaveBlock(t, ctx, beaconDB, genesis) finalizedSlot := params.BeaconConfig().SlotsPerEpoch*2 + 1 headBlock := util.NewBeaconBlock() @@ -364,27 +315,21 @@ func TestChainService_InitializeChainInfo_SetHeadAtGenesis(t *testing.T) { require.NoError(t, headState.SetGenesisValidatorsRoot(params.BeaconConfig().ZeroHash[:])) headRoot, err := headBlock.Block.HashTreeRoot() require.NoError(t, err) - require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot)) + + c, tr := minimalTestService(t, WithFinalizedStateAtStartUp(headState)) + ctx, beaconDB := tr.ctx, tr.db + + util.SaveBlock(t, ctx, beaconDB, genesis) + require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot)) require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot)) + require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot)) util.SaveBlock(t, ctx, beaconDB, headBlock) - attSrv, err := attestations.NewService(ctx, &attestations.Config{}) - require.NoError(t, err) ss := ðpb.StateSummary{ Slot: finalizedSlot, Root: headRoot[:], } require.NoError(t, beaconDB.SaveStateSummary(ctx, ss)) require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Root: headRoot[:], Epoch: slots.ToEpoch(finalizedSlot)})) - fc := doublylinkedtree.New() - stateGen := stategen.New(beaconDB, fc) - c, err := NewService(ctx, - WithForkChoiceStore(fc), - WithDatabase(beaconDB), - WithStateGen(stateGen), - WithAttestationService(attSrv), - WithStateNotifier(&mock.MockStateNotifier{}), - WithFinalizedStateAtStartUp(headState)) - require.NoError(t, err) require.NoError(t, c.StartFromSavedState(headState)) s, err := c.HeadState(ctx) @@ -460,17 +405,21 @@ func TestServiceStop_SaveCachedBlocks(t *testing.T) { } func TestProcessChainStartTime_ReceivedFeed(t *testing.T) { + ctx := context.Background() beaconDB := testDB.SetupDB(t) service := setupBeaconChain(t, beaconDB) - stateChannel := make(chan *feed.Event, 1) - stateSub := service.cfg.StateNotifier.StateFeed().Subscribe(stateChannel) - defer stateSub.Unsubscribe() - service.onExecutionChainStart(context.Background(), time.Now()) - - stateEvent := <-stateChannel - require.Equal(t, int(stateEvent.Type), statefeed.Initialized) - _, ok := stateEvent.Data.(*statefeed.InitializedData) - require.Equal(t, true, ok) + mgs := &MockClockSetter{} + service.clockSetter = mgs + gt := time.Now() + service.onExecutionChainStart(context.Background(), gt) + gs, err := beaconDB.GenesisState(ctx) + require.NoError(t, err) + require.NotEqual(t, nil, gs) + require.Equal(t, 32, len(gs.GenesisValidatorsRoot())) + var zero [32]byte + require.DeepNotEqual(t, gs.GenesisValidatorsRoot(), zero[:]) + require.Equal(t, gt, mgs.G.GenesisTime()) + require.Equal(t, bytesutil.ToBytes32(gs.GenesisValidatorsRoot()), mgs.G.GenesisValidatorsRoot()) } func BenchmarkHasBlockDB(b *testing.B) { @@ -519,15 +468,10 @@ func TestChainService_EverythingOptimistic(t *testing.T) { EnableStartOptimistic: true, }) defer resetFn() - beaconDB := testDB.SetupDB(t) - ctx := context.Background() genesis := util.NewBeaconBlock() genesisRoot, err := genesis.Block.HashTreeRoot() require.NoError(t, err) - require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot)) - util.SaveBlock(t, ctx, beaconDB, genesis) - finalizedSlot := params.BeaconConfig().SlotsPerEpoch*2 + 1 headBlock := util.NewBeaconBlock() headBlock.Block.Slot = finalizedSlot @@ -538,21 +482,17 @@ func TestChainService_EverythingOptimistic(t *testing.T) { require.NoError(t, headState.SetGenesisValidatorsRoot(params.BeaconConfig().ZeroHash[:])) headRoot, err := headBlock.Block.HashTreeRoot() require.NoError(t, err) + + c, tr := minimalTestService(t, WithFinalizedStateAtStartUp(headState)) + ctx, beaconDB, stateGen := tr.ctx, tr.db, tr.sg + + require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot)) + util.SaveBlock(t, ctx, beaconDB, genesis) require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot)) require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot)) util.SaveBlock(t, ctx, beaconDB, headBlock) require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Epoch: slots.ToEpoch(finalizedSlot), Root: headRoot[:]})) - attSrv, err := attestations.NewService(ctx, &attestations.Config{}) - require.NoError(t, err) - fc := doublylinkedtree.New() - stateGen := stategen.New(beaconDB, fc) - c, err := NewService(ctx, - WithForkChoiceStore(fc), - WithDatabase(beaconDB), - WithStateGen(stateGen), - WithAttestationService(attSrv), - WithStateNotifier(&mock.MockStateNotifier{}), - WithFinalizedStateAtStartUp(headState)) + require.NoError(t, err) require.NoError(t, stateGen.SaveState(ctx, headRoot, headState)) require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, ðpb.Checkpoint{Epoch: slots.ToEpoch(finalizedSlot), Root: headRoot[:]})) @@ -562,3 +502,19 @@ func TestChainService_EverythingOptimistic(t *testing.T) { require.NoError(t, err) require.Equal(t, true, op) } + +// MockClockSetter satisfies the ClockSetter interface for testing the conditions where blockchain.Service should +// call SetGenesis. +type MockClockSetter struct { + G *startup.Clock + Err error +} + +var _ startup.ClockSetter = &MockClockSetter{} + +// SetClock satisfies the ClockSetter interface. +// The value is written to an exported field 'G' so that it can be accessed in tests. +func (s *MockClockSetter) SetClock(g *startup.Clock) error { + s.G = g + return s.Err +} diff --git a/beacon-chain/blockchain/setup_test.go b/beacon-chain/blockchain/setup_test.go new file mode 100644 index 0000000000..89b4ad04ed --- /dev/null +++ b/beacon-chain/blockchain/setup_test.go @@ -0,0 +1,115 @@ +package blockchain + +import ( + "context" + "testing" + + "github.com/prysmaticlabs/prysm/v4/async/event" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/cache/depositcache" + statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/db" + testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice" + doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/blstoexec" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/startup" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen" + ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1" + "github.com/prysmaticlabs/prysm/v4/testing/require" + "google.golang.org/protobuf/proto" +) + +type mockBeaconNode struct { + stateFeed *event.Feed +} + +// StateFeed mocks the same method in the beacon node. +func (mbn *mockBeaconNode) StateFeed() *event.Feed { + if mbn.stateFeed == nil { + mbn.stateFeed = new(event.Feed) + } + return mbn.stateFeed +} + +type mockBroadcaster struct { + broadcastCalled bool +} + +func (mb *mockBroadcaster) Broadcast(_ context.Context, _ proto.Message) error { + mb.broadcastCalled = true + return nil +} + +func (mb *mockBroadcaster) BroadcastAttestation(_ context.Context, _ uint64, _ *ethpb.Attestation) error { + mb.broadcastCalled = true + return nil +} + +func (mb *mockBroadcaster) BroadcastSyncCommitteeMessage(_ context.Context, _ uint64, _ *ethpb.SyncCommitteeMessage) error { + mb.broadcastCalled = true + return nil +} + +func (mb *mockBroadcaster) BroadcastBLSChanges(_ context.Context, _ []*ethpb.SignedBLSToExecutionChange) { +} + +var _ p2p.Broadcaster = (*mockBroadcaster)(nil) + +type testServiceRequirements struct { + ctx context.Context + db db.Database + fcs forkchoice.ForkChoicer + sg *stategen.State + notif statefeed.Notifier + cs *startup.ClockSynchronizer + attPool attestations.Pool + attSrv *attestations.Service + blsPool *blstoexec.Pool + dc *depositcache.DepositCache +} + +func minimalTestService(t *testing.T, opts ...Option) (*Service, *testServiceRequirements) { + ctx := context.Background() + beaconDB := testDB.SetupDB(t) + fcs := doublylinkedtree.New() + sg := stategen.New(beaconDB, fcs) + notif := &mockBeaconNode{} + fcs.SetBalancesByRooter(sg.ActiveNonSlashedBalancesByRoot) + cs := startup.NewClockSynchronizer() + attPool := attestations.NewPool() + attSrv, err := attestations.NewService(ctx, &attestations.Config{Pool: attPool}) + require.NoError(t, err) + blsPool := blstoexec.NewPool() + dc, err := depositcache.New() + require.NoError(t, err) + req := &testServiceRequirements{ + ctx: ctx, + db: beaconDB, + fcs: fcs, + sg: sg, + notif: notif, + cs: cs, + attPool: attPool, + attSrv: attSrv, + blsPool: blsPool, + dc: dc, + } + defOpts := []Option{WithDatabase(req.db), + WithStateNotifier(req.notif), + WithStateGen(req.sg), + WithForkChoiceStore(req.fcs), + WithClockSynchronizer(req.cs), + WithAttestationPool(req.attPool), + WithAttestationService(req.attSrv), + WithBLSToExecPool(req.blsPool), + WithDepositCache(dc), + } + // append the variadic opts so they override the defaults by being processed afterwards + opts = append(defOpts, opts...) + s, err := NewService(req.ctx, opts...) + + require.NoError(t, err) + return s, req +} diff --git a/beacon-chain/core/feed/state/events.go b/beacon-chain/core/feed/state/events.go index d3ff900e8d..1f0af7ac46 100644 --- a/beacon-chain/core/feed/state/events.go +++ b/beacon-chain/core/feed/state/events.go @@ -15,10 +15,10 @@ const ( BlockProcessed = iota + 1 // ChainStarted is sent when enough validators are active to start proposing blocks. ChainStarted - // Initialized is sent when the internal beacon node's state is ready to be accessed. - Initialized - // Synced is sent when the beacon node has completed syncing and is ready to participate in the network. - Synced + // deprecated: Initialized is sent when the internal beacon node's state is ready to be accessed. + _ + // deprecated: Synced is sent when the beacon node has completed syncing and is ready to participate in the network. + _ // Reorg is an event sent when the new head is not a descendant of the previous head. Reorg // FinalizedCheckpoint event. diff --git a/beacon-chain/core/helpers/BUILD.bazel b/beacon-chain/core/helpers/BUILD.bazel index 8d984936b4..8f9ca1439b 100644 --- a/beacon-chain/core/helpers/BUILD.bazel +++ b/beacon-chain/core/helpers/BUILD.bazel @@ -48,6 +48,7 @@ go_test( "attestation_test.go", "beacon_committee_test.go", "block_test.go", + "main_test.go", "randao_test.go", "rewards_penalties_test.go", "shuffle_test.go", diff --git a/beacon-chain/core/helpers/main_test.go b/beacon-chain/core/helpers/main_test.go new file mode 100644 index 0000000000..9232793a45 --- /dev/null +++ b/beacon-chain/core/helpers/main_test.go @@ -0,0 +1,13 @@ +package helpers + +import ( + "os" + "testing" +) + +// run ClearCache before each test to prevent cross-test side effects +func TestMain(m *testing.M) { + ClearCache() + code := m.Run() + os.Exit(code) +} diff --git a/beacon-chain/monitor/process_block_test.go b/beacon-chain/monitor/process_block_test.go index fbf87819b3..b3007e3d7f 100644 --- a/beacon-chain/monitor/process_block_test.go +++ b/beacon-chain/monitor/process_block_test.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "testing" - "time" "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/altair" "github.com/prysmaticlabs/prysm/v4/config/params" @@ -241,10 +240,44 @@ func TestProcessBlock_AllEventsTrackedVals(t *testing.T) { func TestLogAggregatedPerformance(t *testing.T) { hook := logTest.NewGlobal() - s := setupService(t) + latestPerformance := map[primitives.ValidatorIndex]ValidatorLatestPerformance{ + 1: { + balance: 32000000000, + }, + 2: { + balance: 32000000000, + }, + 12: { + balance: 31900000000, + }, + 15: { + balance: 31900000000, + }, + } + aggregatedPerformance := map[primitives.ValidatorIndex]ValidatorAggregatedPerformance{ + 1: { + startEpoch: 0, + startBalance: 31700000000, + totalAttestedCount: 12, + totalRequestedCount: 15, + totalDistance: 14, + totalCorrectHead: 8, + totalCorrectSource: 11, + totalCorrectTarget: 12, + totalProposedCount: 1, + totalSyncCommitteeContributions: 0, + totalSyncCommitteeAggregations: 0, + }, + 2: {}, + 12: {}, + 15: {}, + } + s := &Service{ + latestPerformance: latestPerformance, + aggregatedPerformance: aggregatedPerformance, + } s.logAggregatedPerformance() - time.Sleep(3000 * time.Millisecond) wanted := "\"Aggregated performance since launch\" AttestationInclusion=\"80.00%\"" + " AverageInclusionDistance=1.2 BalanceChangePct=\"0.95%\" CorrectlyVotedHeadPct=\"66.67%\" " + "CorrectlyVotedSourcePct=\"91.67%\" CorrectlyVotedTargetPct=\"100.00%\" StartBalance=31700000000 " + diff --git a/beacon-chain/monitor/service.go b/beacon-chain/monitor/service.go index 2eebef8063..8825e0efa9 100644 --- a/beacon-chain/monitor/service.go +++ b/beacon-chain/monitor/service.go @@ -19,13 +19,8 @@ import ( "github.com/sirupsen/logrus" ) -var ( - // Error when event feed data is not statefeed.SyncedData. - errNotSyncedData = errors.New("event feed data is not of type *statefeed.SyncedData") - - // Error when the context is closed while waiting for sync. - errContextClosedWhileWaiting = errors.New("context closed while waiting for beacon to sync to latest Head") -) +// Error when the context is closed while waiting for sync. +var errContextClosedWhileWaiting = errors.New("context closed while waiting for beacon to sync to latest Head") // ValidatorLatestPerformance keeps track of the latest participation of the validator type ValidatorLatestPerformance struct { @@ -63,6 +58,7 @@ type ValidatorMonitorConfig struct { AttestationNotifier operation.Notifier HeadFetcher blockchain.HeadFetcher StateGen stategen.StateManager + InitialSyncComplete chan struct{} } // Service is the main structure that tracks validators and reports logs and @@ -131,7 +127,7 @@ func (s *Service) run(stateChannel chan *feed.Event, stateSub event.Subscription return } - if err := s.waitForSync(stateChannel, stateSub); err != nil { + if err := s.waitForSync(s.config.InitialSyncComplete); err != nil { log.WithError(err) return } @@ -197,24 +193,13 @@ func (s *Service) Stop() error { } // waitForSync waits until the beacon node is synced to the latest head. -func (s *Service) waitForSync(stateChannel chan *feed.Event, stateSub event.Subscription) error { - for { - select { - case e := <-stateChannel: - if e.Type == statefeed.Synced { - _, ok := e.Data.(*statefeed.SyncedData) - if !ok { - return errNotSyncedData - } - return nil - } - case <-s.ctx.Done(): - log.Debug("Context closed, exiting goroutine") - return errContextClosedWhileWaiting - case err := <-stateSub.Err(): - log.WithError(err).Error("Could not subscribe to state notifier") - return err - } +func (s *Service) waitForSync(syncChan chan struct{}) error { + select { + case <-syncChan: + return nil + case <-s.ctx.Done(): + log.Debug("Context closed, exiting goroutine") + return errContextClosedWhileWaiting } } diff --git a/beacon-chain/monitor/service_test.go b/beacon-chain/monitor/service_test.go index f3460223be..e0637a8a96 100644 --- a/beacon-chain/monitor/service_test.go +++ b/beacon-chain/monitor/service_test.go @@ -93,6 +93,7 @@ func setupService(t *testing.T) *Service { StateNotifier: chainService.StateNotifier(), HeadFetcher: chainService, AttestationNotifier: chainService.OperationNotifier(), + InitialSyncComplete: make(chan struct{}), }, ctx: context.Background(), @@ -140,34 +141,9 @@ func TestNewService(t *testing.T) { func TestStart(t *testing.T) { hook := logTest.NewGlobal() s := setupService(t) - stateChannel := make(chan *feed.Event, 1) - stateSub := s.config.StateNotifier.StateFeed().Subscribe(stateChannel) - defer stateSub.Unsubscribe() - wg := &sync.WaitGroup{} - wg.Add(1) s.Start() - - go func() { - select { - case stateEvent := <-stateChannel: - if stateEvent.Type == statefeed.Synced { - _, ok := stateEvent.Data.(*statefeed.SyncedData) - require.Equal(t, true, ok, "Event feed data is not type *statefeed.SyncedData") - } - case <-s.ctx.Done(): - } - wg.Done() - }() - - for sent := 0; sent == 0; { - sent = s.config.StateNotifier.StateFeed().Send(&feed.Event{ - Type: statefeed.Synced, - Data: &statefeed.SyncedData{ - StartTime: time.Now(), - }, - }) - } + close(s.config.InitialSyncComplete) // wait for Logrus time.Sleep(1000 * time.Millisecond) @@ -267,26 +243,29 @@ func TestMonitorRoutine(t *testing.T) { } func TestWaitForSync(t *testing.T) { - s := setupService(t) - stateChannel := make(chan *feed.Event, 1) - stateSub := s.config.StateNotifier.StateFeed().Subscribe(stateChannel) - defer stateSub.Unsubscribe() - - wg := &sync.WaitGroup{} - wg.Add(1) + ctx, cancel := context.WithCancel(context.Background()) + s := &Service{ctx: ctx} + syncChan := make(chan struct{}) go func() { - err := s.waitForSync(stateChannel, stateSub) - require.NoError(t, err) - wg.Done() + // Failsafe to make sure tests never get deadlocked; we should always go through the happy path before 500ms. + // Otherwise, the NoError assertion below will fail. + time.Sleep(500 * time.Millisecond) + cancel() }() + go func() { + close(syncChan) + }() + require.NoError(t, s.waitForSync(syncChan)) +} - stateChannel <- &feed.Event{ - Type: statefeed.Synced, - Data: &statefeed.SyncedData{ - StartTime: time.Now(), - }, - } +func TestWaitForSyncCanceled(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + s := &Service{ctx: ctx} + syncChan := make(chan struct{}) + + cancel() + require.ErrorIs(t, s.waitForSync(syncChan), errContextClosedWhileWaiting) } func TestRun(t *testing.T) { @@ -295,21 +274,11 @@ func TestRun(t *testing.T) { stateChannel := make(chan *feed.Event, 1) stateSub := s.config.StateNotifier.StateFeed().Subscribe(stateChannel) - wg := &sync.WaitGroup{} - wg.Add(1) - go func() { s.run(stateChannel, stateSub) - wg.Done() }() + close(s.config.InitialSyncComplete) - stateChannel <- &feed.Event{ - Type: statefeed.Synced, - Data: &statefeed.SyncedData{ - StartTime: time.Now(), - }, - } - //wait for Logrus - time.Sleep(1000 * time.Millisecond) + time.Sleep(100 * time.Millisecond) require.LogsContain(t, hook, "Synced to head epoch, starting reporting performance") } diff --git a/beacon-chain/node/BUILD.bazel b/beacon-chain/node/BUILD.bazel index 4e55b377ef..102f9a602e 100644 --- a/beacon-chain/node/BUILD.bazel +++ b/beacon-chain/node/BUILD.bazel @@ -40,6 +40,7 @@ go_library( "//beacon-chain/rpc:go_default_library", "//beacon-chain/rpc/apimiddleware:go_default_library", "//beacon-chain/slasher:go_default_library", + "//beacon-chain/startup:go_default_library", "//beacon-chain/state:go_default_library", "//beacon-chain/state/stategen:go_default_library", "//beacon-chain/sync:go_default_library", diff --git a/beacon-chain/node/node.go b/beacon-chain/node/node.go index 39dd04c78f..057b279267 100644 --- a/beacon-chain/node/node.go +++ b/beacon-chain/node/node.go @@ -42,6 +42,7 @@ import ( "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc" "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/apimiddleware" "github.com/prysmaticlabs/prysm/v4/beacon-chain/slasher" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/startup" "github.com/prysmaticlabs/prysm/v4/beacon-chain/state" "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen" regularsync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync" @@ -107,6 +108,8 @@ type BeaconNode struct { GenesisInitializer genesis.Initializer CheckpointInitializer checkpoint.Initializer forkChoicer forkchoice.ForkChoicer + clockWaiter startup.ClockWaiter + initialSyncComplete chan struct{} } // New creates a new node instance, sets up configuration options, and registers @@ -177,12 +180,16 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) { proposerIdsCache: cache.NewProposerPayloadIDsCache(), } + beacon.initialSyncComplete = make(chan struct{}) for _, opt := range opts { if err := opt(beacon); err != nil { return nil, err } } + synchronizer := startup.NewClockSynchronizer() + beacon.clockWaiter = synchronizer + beacon.forkChoicer = doublylinkedtree.New() depositAddress, err := execution.DepositContractAddress() if err != nil { @@ -229,17 +236,17 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) { } log.Debugln("Registering Blockchain Service") - if err := beacon.registerBlockchainService(beacon.forkChoicer); err != nil { + if err := beacon.registerBlockchainService(beacon.forkChoicer, synchronizer); err != nil { return nil, err } log.Debugln("Registering Initial Sync Service") - if err := beacon.registerInitialSyncService(); err != nil { + if err := beacon.registerInitialSyncService(beacon.initialSyncComplete); err != nil { return nil, err } log.Debugln("Registering Sync Service") - if err := beacon.registerSyncService(); err != nil { + if err := beacon.registerSyncService(beacon.initialSyncComplete); err != nil { return nil, err } @@ -265,7 +272,7 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) { } log.Debugln("Registering Validator Monitoring Service") - if err := beacon.registerValidatorMonitorService(); err != nil { + if err := beacon.registerValidatorMonitorService(beacon.initialSyncComplete); err != nil { return nil, err } @@ -548,6 +555,7 @@ func (b *BeaconNode) registerP2P(cliCtx *cli.Context) error { EnableUPnP: cliCtx.Bool(cmd.EnableUPnPFlag.Name), StateNotifier: b, DB: b.db, + ClockWaiter: b.clockWaiter, }) if err != nil { return err @@ -581,7 +589,7 @@ func (b *BeaconNode) registerAttestationPool() error { return b.services.RegisterService(s) } -func (b *BeaconNode) registerBlockchainService(fc forkchoice.ForkChoicer) error { +func (b *BeaconNode) registerBlockchainService(fc forkchoice.ForkChoicer, gs *startup.ClockSynchronizer) error { var web3Service *execution.Service if err := b.services.FetchService(&web3Service); err != nil { return err @@ -611,6 +619,7 @@ func (b *BeaconNode) registerBlockchainService(fc forkchoice.ForkChoicer) error blockchain.WithSlasherAttestationsFeed(b.slasherAttestationsFeed), blockchain.WithFinalizedStateAtStartUp(b.finalizedStateAtStartUp), blockchain.WithProposerIdsCache(b.proposerIdsCache), + blockchain.WithClockSynchronizer(gs), ) blockchainService, err := blockchain.NewService(b.ctx, opts...) @@ -652,7 +661,7 @@ func (b *BeaconNode) registerPOWChainService() error { return b.services.RegisterService(web3Service) } -func (b *BeaconNode) registerSyncService() error { +func (b *BeaconNode) registerSyncService(initialSyncComplete chan struct{}) error { var web3Service *execution.Service if err := b.services.FetchService(&web3Service); err != nil { return err @@ -674,7 +683,6 @@ func (b *BeaconNode) registerSyncService() error { regularsync.WithP2P(b.fetchP2P()), regularsync.WithChainService(chainService), regularsync.WithInitialSync(initSync), - regularsync.WithStateNotifier(b), regularsync.WithBlockNotifier(b), regularsync.WithAttestationNotifier(b), regularsync.WithOperationNotifier(b), @@ -687,22 +695,26 @@ func (b *BeaconNode) registerSyncService() error { regularsync.WithSlasherAttestationsFeed(b.slasherAttestationsFeed), regularsync.WithSlasherBlockHeadersFeed(b.slasherBlockHeadersFeed), regularsync.WithExecutionPayloadReconstructor(web3Service), + regularsync.WithClockWaiter(b.clockWaiter), + regularsync.WithInitialSyncComplete(initialSyncComplete), ) return b.services.RegisterService(rs) } -func (b *BeaconNode) registerInitialSyncService() error { +func (b *BeaconNode) registerInitialSyncService(complete chan struct{}) error { var chainService *blockchain.Service if err := b.services.FetchService(&chainService); err != nil { return err } is := initialsync.NewService(b.ctx, &initialsync.Config{ - DB: b.db, - Chain: chainService, - P2P: b.fetchP2P(), - StateNotifier: b, - BlockNotifier: b, + DB: b.db, + Chain: chainService, + P2P: b.fetchP2P(), + StateNotifier: b, + BlockNotifier: b, + ClockWaiter: b.clockWaiter, + InitialSyncComplete: complete, }) return b.services.RegisterService(is) } @@ -834,6 +846,7 @@ func (b *BeaconNode) registerRPCService(router *mux.Router) error { ProposerIdsCache: b.proposerIdsCache, BlockBuilder: b.fetchBuilderService(), Router: router, + ClockWaiter: b.clockWaiter, }) return b.services.RegisterService(rpcService) @@ -934,7 +947,7 @@ func (b *BeaconNode) registerDeterminsticGenesisService() error { return nil } -func (b *BeaconNode) registerValidatorMonitorService() error { +func (b *BeaconNode) registerValidatorMonitorService(initialSyncComplete chan struct{}) error { cliSlice := b.cliCtx.IntSlice(cmd.ValidatorMonitorIndicesFlag.Name) if cliSlice == nil { return nil @@ -953,6 +966,7 @@ func (b *BeaconNode) registerValidatorMonitorService() error { AttestationNotifier: b, StateGen: b.stateGen, HeadFetcher: chainService, + InitialSyncComplete: initialSyncComplete, } svc, err := monitor.NewService(b.ctx, monitorConfig, tracked) if err != nil { diff --git a/beacon-chain/node/node_test.go b/beacon-chain/node/node_test.go index 35ab01bdbb..c3f6baa316 100644 --- a/beacon-chain/node/node_test.go +++ b/beacon-chain/node/node_test.go @@ -164,7 +164,7 @@ func TestMonitor_RegisteredCorrectly(t *testing.T) { require.NoError(t, cliCtx.Set(cmd.ValidatorMonitorIndicesFlag.Name, "1,2")) n := &BeaconNode{ctx: context.Background(), cliCtx: cliCtx, services: runtime.NewServiceRegistry()} require.NoError(t, n.services.RegisterService(&blockchain.Service{})) - require.NoError(t, n.registerValidatorMonitorService()) + require.NoError(t, n.registerValidatorMonitorService(make(chan struct{}))) var mService *monitor.Service require.NoError(t, n.services.FetchService(&mService)) diff --git a/beacon-chain/p2p/BUILD.bazel b/beacon-chain/p2p/BUILD.bazel index 4058bf3ccf..9b00505bd7 100644 --- a/beacon-chain/p2p/BUILD.bazel +++ b/beacon-chain/p2p/BUILD.bazel @@ -44,7 +44,6 @@ go_library( "//async:go_default_library", "//beacon-chain/cache:go_default_library", "//beacon-chain/core/altair:go_default_library", - "//beacon-chain/core/feed:go_default_library", "//beacon-chain/core/feed/state:go_default_library", "//beacon-chain/core/helpers:go_default_library", "//beacon-chain/core/time:go_default_library", @@ -54,6 +53,7 @@ go_library( "//beacon-chain/p2p/peers/peerdata:go_default_library", "//beacon-chain/p2p/peers/scorers:go_default_library", "//beacon-chain/p2p/types:go_default_library", + "//beacon-chain/startup:go_default_library", "//cmd/beacon-chain/flags:go_default_library", "//config/params:go_default_library", "//consensus-types/primitives:go_default_library", @@ -132,11 +132,8 @@ go_test( flaky = True, tags = ["requires-network"], deps = [ - "//async/event:go_default_library", "//beacon-chain/blockchain/testing:go_default_library", "//beacon-chain/cache:go_default_library", - "//beacon-chain/core/feed:go_default_library", - "//beacon-chain/core/feed/state:go_default_library", "//beacon-chain/core/helpers:go_default_library", "//beacon-chain/core/signing:go_default_library", "//beacon-chain/db/testing:go_default_library", @@ -146,6 +143,7 @@ go_test( "//beacon-chain/p2p/peers/scorers:go_default_library", "//beacon-chain/p2p/testing:go_default_library", "//beacon-chain/p2p/types:go_default_library", + "//beacon-chain/startup:go_default_library", "//cmd/beacon-chain/flags:go_default_library", "//config/fieldparams:go_default_library", "//config/params:go_default_library", diff --git a/beacon-chain/p2p/config.go b/beacon-chain/p2p/config.go index 8c12a2ee50..bb18dd8da6 100644 --- a/beacon-chain/p2p/config.go +++ b/beacon-chain/p2p/config.go @@ -3,6 +3,7 @@ package p2p import ( statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state" "github.com/prysmaticlabs/prysm/v4/beacon-chain/db" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/startup" ) // Config for the p2p service. These parameters are set from application level flags @@ -28,4 +29,5 @@ type Config struct { DenyListCIDR []string StateNotifier statefeed.Notifier DB db.ReadOnlyDatabase + ClockWaiter startup.ClockWaiter } diff --git a/beacon-chain/p2p/discovery_test.go b/beacon-chain/p2p/discovery_test.go index 598f5d86d5..9a2701884b 100644 --- a/beacon-chain/p2p/discovery_test.go +++ b/beacon-chain/p2p/discovery_test.go @@ -22,12 +22,11 @@ import ( "github.com/prysmaticlabs/go-bitfield" mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing" "github.com/prysmaticlabs/prysm/v4/beacon-chain/cache" - "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed" - statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state" "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers" "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers/peerdata" "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers/scorers" testp2p "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/startup" "github.com/prysmaticlabs/prysm/v4/config/params" "github.com/prysmaticlabs/prysm/v4/consensus-types/wrapper" leakybucket "github.com/prysmaticlabs/prysm/v4/container/leaky-bucket" @@ -169,8 +168,10 @@ func TestMultiAddrConversion_OK(t *testing.T) { } func TestStaticPeering_PeersAreAdded(t *testing.T) { + cs := startup.NewClockSynchronizer() cfg := &Config{ - MaxPeers: 30, + MaxPeers: 30, + ClockWaiter: cs, } port := 6000 var staticPeers []string @@ -204,16 +205,8 @@ func TestStaticPeering_PeersAreAdded(t *testing.T) { <-exitRoutine }() time.Sleep(50 * time.Millisecond) - // Send in a loop to ensure it is delivered (busy wait for the service to subscribe to the state feed). - for sent := 0; sent == 0; { - sent = s.stateNotifier.StateFeed().Send(&feed.Event{ - Type: statefeed.Initialized, - Data: &statefeed.InitializedData{ - StartTime: time.Now(), - GenesisValidatorsRoot: make([]byte, 32), - }, - }) - } + var vr [32]byte + require.NoError(t, cs.SetClock(startup.NewClock(time.Now(), vr))) time.Sleep(4 * time.Second) ps := s.host.Network().Peers() assert.Equal(t, 5, len(ps), "Not all peers added to peerstore") diff --git a/beacon-chain/p2p/fork_test.go b/beacon-chain/p2p/fork_test.go index f628c6b62a..056d716371 100644 --- a/beacon-chain/p2p/fork_test.go +++ b/beacon-chain/p2p/fork_test.go @@ -125,7 +125,6 @@ func TestStartDiscv5_SameForkDigests_DifferentNextForkData(t *testing.T) { cfg: &Config{UDPPort: uint(port)}, genesisTime: genesisTime, genesisValidatorsRoot: genesisValidatorsRoot, - stateNotifier: &mock.MockStateNotifier{}, } bootListener, err := s.createListener(ipAddr, pkey) require.NoError(t, err) @@ -155,7 +154,6 @@ func TestStartDiscv5_SameForkDigests_DifferentNextForkData(t *testing.T) { cfg: cfg, genesisTime: genesisTime, genesisValidatorsRoot: genesisValidatorsRoot, - stateNotifier: &mock.MockStateNotifier{}, } listener, err := s.startDiscoveryV5(ipAddr, pkey) assert.NoError(t, err, "Could not start discovery for node") diff --git a/beacon-chain/p2p/pubsub_filter_test.go b/beacon-chain/p2p/pubsub_filter_test.go index b5eea44730..aea8303b3b 100644 --- a/beacon-chain/p2p/pubsub_filter_test.go +++ b/beacon-chain/p2p/pubsub_filter_test.go @@ -9,10 +9,8 @@ import ( pubsubpb "github.com/libp2p/go-libp2p-pubsub/pb" "github.com/libp2p/go-libp2p/core/peer" - mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing" - "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed" - statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state" "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/encoder" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/startup" "github.com/prysmaticlabs/prysm/v4/config/params" "github.com/prysmaticlabs/prysm/v4/encoding/bytesutil" "github.com/prysmaticlabs/prysm/v4/network/forks" @@ -337,28 +335,16 @@ func TestService_MonitorsStateForkUpdates(t *testing.T) { params.SetupTestConfigCleanup(t) ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) defer cancel() - notifier := &mock.MockStateNotifier{} - s, err := NewService(ctx, &Config{ - StateNotifier: notifier, - }) + cs := startup.NewClockSynchronizer() + s, err := NewService(ctx, &Config{ClockWaiter: cs}) require.NoError(t, err) require.Equal(t, false, s.isInitialized()) go s.awaitStateInitialized() - for n := 0; n == 0; { - if ctx.Err() != nil { - t.Fatal(ctx.Err()) - } - n = notifier.StateFeed().Send(&feed.Event{ - Type: statefeed.Initialized, - Data: &statefeed.InitializedData{ - StartTime: prysmTime.Now(), - GenesisValidatorsRoot: bytesutil.PadTo([]byte("genesis"), 32), - }, - }) - } + vr := bytesutil.ToBytes32(bytesutil.PadTo([]byte("genesis"), 32)) + require.NoError(t, cs.SetClock(startup.NewClock(prysmTime.Now(), vr))) time.Sleep(50 * time.Millisecond) diff --git a/beacon-chain/p2p/pubsub_test.go b/beacon-chain/p2p/pubsub_test.go index 0c0a3a32d9..f41b64c9c2 100644 --- a/beacon-chain/p2p/pubsub_test.go +++ b/beacon-chain/p2p/pubsub_test.go @@ -11,20 +11,23 @@ import ( mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing" "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/encoder" testp2p "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/startup" "github.com/prysmaticlabs/prysm/v4/testing/assert" "github.com/prysmaticlabs/prysm/v4/testing/require" ) func TestService_PublishToTopicConcurrentMapWrite(t *testing.T) { + cs := startup.NewClockSynchronizer() s, err := NewService(context.Background(), &Config{ StateNotifier: &mock.MockStateNotifier{}, + ClockWaiter: cs, }) require.NoError(t, err) ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) defer cancel() go s.awaitStateInitialized() - fd := initializeStateWithForkDigest(ctx, t, s.stateNotifier.StateFeed()) + fd := initializeStateWithForkDigest(ctx, t, cs) if !s.isInitialized() { t.Fatal("service was not initialized") diff --git a/beacon-chain/p2p/service.go b/beacon-chain/p2p/service.go index 63e61f782a..02953b1e25 100644 --- a/beacon-chain/p2p/service.go +++ b/beacon-chain/p2p/service.go @@ -20,8 +20,6 @@ import ( "github.com/multiformats/go-multiaddr" "github.com/pkg/errors" "github.com/prysmaticlabs/prysm/v4/async" - "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed" - statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state" "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/encoder" "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers" "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers/scorers" @@ -77,7 +75,6 @@ type Service struct { initializationLock sync.Mutex dv5Listener Listener startupErr error - stateNotifier statefeed.Notifier ctx context.Context host host.Host genesisTime time.Time @@ -93,13 +90,12 @@ func NewService(ctx context.Context, cfg *Config) (*Service, error) { _ = cancel // govet fix for lost cancel. Cancel is handled in service.Stop(). s := &Service{ - ctx: ctx, - stateNotifier: cfg.StateNotifier, - cancel: cancel, - cfg: cfg, - isPreGenesis: true, - joinedTopics: make(map[string]*pubsub.Topic, len(gossipTopicMappings)), - subnetsLock: make(map[uint64]*sync.RWMutex), + ctx: ctx, + cancel: cancel, + cfg: cfg, + isPreGenesis: true, + joinedTopics: make(map[string]*pubsub.Topic, len(gossipTopicMappings)), + subnetsLock: make(map[uint64]*sync.RWMutex), } dv5Nodes := parseBootStrapAddrs(s.cfg.BootstrapNodeAddr) @@ -383,38 +379,19 @@ func (s *Service) pingPeers() { func (s *Service) awaitStateInitialized() { s.initializationLock.Lock() defer s.initializationLock.Unlock() - if s.isInitialized() { return } - - stateChannel := make(chan *feed.Event, 1) - stateSub := s.stateNotifier.StateFeed().Subscribe(stateChannel) - cleanup := stateSub.Unsubscribe - defer cleanup() - for { - select { - case event := <-stateChannel: - if event.Type == statefeed.Initialized { - data, ok := event.Data.(*statefeed.InitializedData) - if !ok { - // log.Fatalf will prevent defer from being called - cleanup() - log.Fatalf("Received wrong data over state initialized feed: %v", data) - } - s.genesisTime = data.StartTime - s.genesisValidatorsRoot = data.GenesisValidatorsRoot - _, err := s.currentForkDigest() // initialize fork digest cache - if err != nil { - log.WithError(err).Error("Could not initialize fork digest") - } - - return - } - case <-s.ctx.Done(): - log.Debug("Context closed, exiting goroutine") - return - } + clock, err := s.cfg.ClockWaiter.WaitForClock(s.ctx) + if err != nil { + log.WithError(err).Fatal("failed to receive initial genesis data") + } + s.genesisTime = clock.GenesisTime() + gvr := clock.GenesisValidatorsRoot() + s.genesisValidatorsRoot = gvr[:] + _, err = s.currentForkDigest() // initialize fork digest cache + if err != nil { + log.WithError(err).Error("Could not initialize fork digest") } } diff --git a/beacon-chain/p2p/service_test.go b/beacon-chain/p2p/service_test.go index 00d2d7c339..4a333c6dea 100644 --- a/beacon-chain/p2p/service_test.go +++ b/beacon-chain/p2p/service_test.go @@ -15,13 +15,11 @@ import ( "github.com/libp2p/go-libp2p/core/peer" noise "github.com/libp2p/go-libp2p/p2p/security/noise" "github.com/multiformats/go-multiaddr" - "github.com/prysmaticlabs/prysm/v4/async/event" mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing" - "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed" - statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state" "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/encoder" "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers" "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers/scorers" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/startup" "github.com/prysmaticlabs/prysm/v4/config/params" "github.com/prysmaticlabs/prysm/v4/encoding/bytesutil" "github.com/prysmaticlabs/prysm/v4/network/forks" @@ -102,30 +100,22 @@ func TestService_Start_OnlyStartsOnce(t *testing.T) { params.SetupTestConfigCleanup(t) hook := logTest.NewGlobal() + cs := startup.NewClockSynchronizer() cfg := &Config{ - TCPPort: 2000, - UDPPort: 2000, - StateNotifier: &mock.MockStateNotifier{}, + TCPPort: 2000, + UDPPort: 2000, + ClockWaiter: cs, } s, err := NewService(context.Background(), cfg) require.NoError(t, err) - s.stateNotifier = &mock.MockStateNotifier{} s.dv5Listener = &mockListener{} exitRoutine := make(chan bool) go func() { s.Start() <-exitRoutine }() - // Send in a loop to ensure it is delivered (busy wait for the service to subscribe to the state feed). - for sent := 0; sent == 0; { - sent = s.stateNotifier.StateFeed().Send(&feed.Event{ - Type: statefeed.Initialized, - Data: &statefeed.InitializedData{ - StartTime: time.Now(), - GenesisValidatorsRoot: make([]byte, 32), - }, - }) - } + var vr [32]byte + require.NoError(t, cs.SetClock(startup.NewClock(time.Now(), vr))) time.Sleep(time.Second * 2) assert.Equal(t, true, s.started, "Expected service to be started") s.Start() @@ -155,17 +145,17 @@ func TestService_Status_NoGenesisTimeSet(t *testing.T) { func TestService_Start_NoDiscoverFlag(t *testing.T) { params.SetupTestConfigCleanup(t) + cs := startup.NewClockSynchronizer() cfg := &Config{ TCPPort: 2000, UDPPort: 2000, StateNotifier: &mock.MockStateNotifier{}, NoDiscovery: true, // <-- no s.dv5Listener is created + ClockWaiter: cs, } s, err := NewService(context.Background(), cfg) require.NoError(t, err) - s.stateNotifier = &mock.MockStateNotifier{} - // required params to addForkEntry in s.forkWatcher s.genesisTime = time.Now() beaconCfg := params.BeaconConfig().Copy() @@ -181,16 +171,8 @@ func TestService_Start_NoDiscoverFlag(t *testing.T) { <-exitRoutine }() - // Send in a loop to ensure it is delivered (busy wait for the service to subscribe to the state feed). - for sent := 0; sent == 0; { - sent = s.stateNotifier.StateFeed().Send(&feed.Event{ - Type: statefeed.Initialized, - Data: &statefeed.InitializedData{ - StartTime: time.Now(), - GenesisValidatorsRoot: make([]byte, 32), - }, - }) - } + var vr [32]byte + require.NoError(t, cs.SetClock(startup.NewClock(time.Now(), vr))) time.Sleep(time.Second * 2) @@ -207,11 +189,11 @@ func TestListenForNewNodes(t *testing.T) { _, pkey := createAddrAndPrivKey(t) ipAddr := net.ParseIP("127.0.0.1") genesisTime := prysmTime.Now() - genesisValidatorsRoot := make([]byte, 32) + var gvr [32]byte s := &Service{ cfg: cfg, genesisTime: genesisTime, - genesisValidatorsRoot: genesisValidatorsRoot, + genesisValidatorsRoot: gvr[:], } bootListener, err := s.createListener(ipAddr, pkey) require.NoError(t, err) @@ -229,11 +211,12 @@ func TestListenForNewNodes(t *testing.T) { var listeners []*discover.UDPv5 var hosts []host.Host // setup other nodes. + cs := startup.NewClockSynchronizer() cfg = &Config{ BootstrapNodeAddr: []string{bootNode.String()}, Discv5BootStrapAddr: []string{bootNode.String()}, MaxPeers: 30, - StateNotifier: notifier, + ClockWaiter: cs, } for i := 1; i <= 5; i++ { h, pkey, ipAddr := createHost(t, port+i) @@ -242,7 +225,7 @@ func TestListenForNewNodes(t *testing.T) { s := &Service{ cfg: cfg, genesisTime: genesisTime, - genesisValidatorsRoot: genesisValidatorsRoot, + genesisValidatorsRoot: gvr[:], } listener, err := s.startDiscoveryV5(ipAddr, pkey) assert.NoError(t, err, "Could not start discovery for node") @@ -276,16 +259,9 @@ func TestListenForNewNodes(t *testing.T) { <-exitRoutine }() time.Sleep(1 * time.Second) - // Send in a loop to ensure it is delivered (busy wait for the service to subscribe to the state feed). - for sent := 0; sent == 0; { - sent = s.stateNotifier.StateFeed().Send(&feed.Event{ - Type: statefeed.Initialized, - Data: &statefeed.InitializedData{ - StartTime: genesisTime, - GenesisValidatorsRoot: genesisValidatorsRoot, - }, - }) - } + + require.NoError(t, cs.SetClock(startup.NewClock(genesisTime, gvr))) + time.Sleep(4 * time.Second) assert.Equal(t, 5, len(s.host.Network().Peers()), "Not all peers added to peerstore") require.NoError(t, s.Stop()) @@ -327,11 +303,12 @@ func TestService_JoinLeaveTopic(t *testing.T) { params.SetupTestConfigCleanup(t) ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) defer cancel() - s, err := NewService(ctx, &Config{StateNotifier: &mock.MockStateNotifier{}}) + gs := startup.NewClockSynchronizer() + s, err := NewService(ctx, &Config{StateNotifier: &mock.MockStateNotifier{}, ClockWaiter: gs}) require.NoError(t, err) go s.awaitStateInitialized() - fd := initializeStateWithForkDigest(ctx, t, s.stateNotifier.StateFeed()) + fd := initializeStateWithForkDigest(ctx, t, gs) assert.Equal(t, 0, len(s.joinedTopics)) @@ -358,23 +335,12 @@ func TestService_JoinLeaveTopic(t *testing.T) { // initializeStateWithForkDigest sets up the state feed initialized event and returns the fork // digest associated with that genesis event. -func initializeStateWithForkDigest(ctx context.Context, t *testing.T, ef *event.Feed) [4]byte { +func initializeStateWithForkDigest(_ context.Context, t *testing.T, gs startup.ClockSetter) [4]byte { gt := prysmTime.Now() - gvr := bytesutil.PadTo([]byte("genesis validators root"), 32) - for n := 0; n == 0; { - if ctx.Err() != nil { - t.Fatal(ctx.Err()) - } - n = ef.Send(&feed.Event{ - Type: statefeed.Initialized, - Data: &statefeed.InitializedData{ - StartTime: gt, - GenesisValidatorsRoot: gvr, - }, - }) - } + gvr := bytesutil.ToBytes32(bytesutil.PadTo([]byte("genesis validators root"), 32)) + require.NoError(t, gs.SetClock(startup.NewClock(gt, gvr))) - fd, err := forks.CreateForkDigest(gt, gvr) + fd, err := forks.CreateForkDigest(gt, gvr[:]) require.NoError(t, err) time.Sleep(50 * time.Millisecond) // wait for pubsub filter to initialize. diff --git a/beacon-chain/p2p/subnets_test.go b/beacon-chain/p2p/subnets_test.go index 8d0f594e18..b7d2362698 100644 --- a/beacon-chain/p2p/subnets_test.go +++ b/beacon-chain/p2p/subnets_test.go @@ -12,10 +12,8 @@ import ( "github.com/ethereum/go-ethereum/p2p/enr" "github.com/libp2p/go-libp2p/core/crypto" "github.com/prysmaticlabs/go-bitfield" - mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing" "github.com/prysmaticlabs/prysm/v4/beacon-chain/cache" - "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed" - statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/startup" "github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags" "github.com/prysmaticlabs/prysm/v4/config/params" "github.com/prysmaticlabs/prysm/v4/consensus-types/wrapper" @@ -88,15 +86,17 @@ func TestStartDiscV5_DiscoverPeersWithSubnets(t *testing.T) { // Make one service on port 4001. port = 4001 + gs := startup.NewClockSynchronizer() cfg := &Config{ BootstrapNodeAddr: []string{bootNode.String()}, Discv5BootStrapAddr: []string{bootNode.String()}, MaxPeers: 30, UDPPort: uint(port), + ClockWaiter: gs, } - cfg.StateNotifier = &mock.MockStateNotifier{} s, err = NewService(context.Background(), cfg) require.NoError(t, err) + exitRoutine := make(chan bool) go func() { s.Start() @@ -104,15 +104,8 @@ func TestStartDiscV5_DiscoverPeersWithSubnets(t *testing.T) { }() time.Sleep(50 * time.Millisecond) // Send in a loop to ensure it is delivered (busy wait for the service to subscribe to the state feed). - for sent := 0; sent == 0; { - sent = s.stateNotifier.StateFeed().Send(&feed.Event{ - Type: statefeed.Initialized, - Data: &statefeed.InitializedData{ - StartTime: time.Now(), - GenesisValidatorsRoot: make([]byte, 32), - }, - }) - } + var vr [32]byte + require.NoError(t, gs.SetClock(startup.NewClock(time.Now(), vr))) // Wait for the nodes to have their local routing tables to be populated with the other nodes time.Sleep(6 * discoveryWaitTime) diff --git a/beacon-chain/rpc/BUILD.bazel b/beacon-chain/rpc/BUILD.bazel index ae5473e360..e0d275ae2f 100644 --- a/beacon-chain/rpc/BUILD.bazel +++ b/beacon-chain/rpc/BUILD.bazel @@ -36,6 +36,7 @@ go_library( "//beacon-chain/rpc/prysm/v1alpha1/node:go_default_library", "//beacon-chain/rpc/prysm/v1alpha1/validator:go_default_library", "//beacon-chain/slasher:go_default_library", + "//beacon-chain/startup:go_default_library", "//beacon-chain/state/stategen:go_default_library", "//beacon-chain/sync:go_default_library", "//config/features:go_default_library", diff --git a/beacon-chain/rpc/prysm/v1alpha1/validator/BUILD.bazel b/beacon-chain/rpc/prysm/v1alpha1/validator/BUILD.bazel index 2038803ccb..1fcce9eb48 100644 --- a/beacon-chain/rpc/prysm/v1alpha1/validator/BUILD.bazel +++ b/beacon-chain/rpc/prysm/v1alpha1/validator/BUILD.bazel @@ -55,6 +55,7 @@ go_library( "//beacon-chain/operations/synccommittee:go_default_library", "//beacon-chain/operations/voluntaryexits:go_default_library", "//beacon-chain/p2p:go_default_library", + "//beacon-chain/startup:go_default_library", "//beacon-chain/state:go_default_library", "//beacon-chain/state/stategen:go_default_library", "//beacon-chain/sync:go_default_library", diff --git a/beacon-chain/rpc/prysm/v1alpha1/validator/server.go b/beacon-chain/rpc/prysm/v1alpha1/validator/server.go index 93bf2d274f..3cc14207e4 100644 --- a/beacon-chain/rpc/prysm/v1alpha1/validator/server.go +++ b/beacon-chain/rpc/prysm/v1alpha1/validator/server.go @@ -7,12 +7,10 @@ import ( "context" "time" - "github.com/pkg/errors" "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain" "github.com/prysmaticlabs/prysm/v4/beacon-chain/builder" "github.com/prysmaticlabs/prysm/v4/beacon-chain/cache" "github.com/prysmaticlabs/prysm/v4/beacon-chain/cache/depositcache" - "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed" blockfeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/block" opfeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/operation" statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state" @@ -25,6 +23,7 @@ import ( "github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/synccommittee" "github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/voluntaryexits" "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/startup" "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen" "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync" "github.com/prysmaticlabs/prysm/v4/config/params" @@ -74,6 +73,7 @@ type Server struct { ExecutionEngineCaller execution.EngineCaller BlockBuilder builder.BlockBuilder BLSChangesPool blstoexec.PoolManager + ClockWaiter startup.ClockWaiter } // WaitForActivation checks if a validator public key exists in the active validator registry of the current @@ -170,30 +170,17 @@ func (vs *Server) WaitForChainStart(_ *emptypb.Empty, stream ethpb.BeaconNodeVal return stream.Send(res) } - stateChannel := make(chan *feed.Event, 1) - stateSub := vs.StateNotifier.StateFeed().Subscribe(stateChannel) - defer stateSub.Unsubscribe() - for { - select { - case event := <-stateChannel: - if event.Type == statefeed.Initialized { - data, ok := event.Data.(*statefeed.InitializedData) - if !ok { - return errors.New("event data is not type *statefeed.InitializedData") - } - log.WithField("starttime", data.StartTime).Debug("Received chain started event") - log.Debug("Sending genesis time notification to connected validator clients") - res := ðpb.ChainStartResponse{ - Started: true, - GenesisTime: uint64(data.StartTime.Unix()), - GenesisValidatorsRoot: data.GenesisValidatorsRoot, - } - return stream.Send(res) - } - case <-stateSub.Err(): - return status.Error(codes.Aborted, "Subscriber closed, exiting goroutine") - case <-vs.Ctx.Done(): - return status.Error(codes.Canceled, "Context canceled") - } + clock, err := vs.ClockWaiter.WaitForClock(vs.Ctx) + if err != nil { + return status.Error(codes.Canceled, "Context canceled") } + log.WithField("starttime", clock.GenesisTime()).Debug("Received chain started event") + log.Debug("Sending genesis time notification to connected validator clients") + gvr := clock.GenesisValidatorsRoot() + res := ðpb.ChainStartResponse{ + Started: true, + GenesisTime: uint64(clock.GenesisTime().Unix()), + GenesisValidatorsRoot: gvr[:], + } + return stream.Send(res) } diff --git a/beacon-chain/rpc/prysm/v1alpha1/validator/server_test.go b/beacon-chain/rpc/prysm/v1alpha1/validator/server_test.go index 1c3f802a27..39161d367b 100644 --- a/beacon-chain/rpc/prysm/v1alpha1/validator/server_test.go +++ b/beacon-chain/rpc/prysm/v1alpha1/validator/server_test.go @@ -10,9 +10,8 @@ import ( "github.com/prysmaticlabs/prysm/v4/async/event" mockChain "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing" "github.com/prysmaticlabs/prysm/v4/beacon-chain/cache/depositcache" - "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed" - statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state" mockExecution "github.com/prysmaticlabs/prysm/v4/beacon-chain/execution/testing" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/startup" state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native" "github.com/prysmaticlabs/prysm/v4/config/params" "github.com/prysmaticlabs/prysm/v4/crypto/bls" @@ -189,13 +188,14 @@ func TestWaitForActivation_MultipleStatuses(t *testing.T) { func TestWaitForChainStart_ContextClosed(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) chainService := &mockChain.ChainService{} - Server := &Server{ + server := &Server{ Ctx: ctx, ChainStartFetcher: &mockExecution.FaultyExecutionChain{ ChainFeed: new(event.Feed), }, StateNotifier: chainService.StateNotifier(), HeadFetcher: chainService, + ClockWaiter: startup.NewClockSynchronizer(), } exitRoutine := make(chan bool) @@ -204,7 +204,7 @@ func TestWaitForChainStart_ContextClosed(t *testing.T) { mockStream := mock.NewMockBeaconNodeValidator_WaitForChainStartServer(ctrl) mockStream.EXPECT().Context().Return(ctx) go func(tt *testing.T) { - err := Server.WaitForChainStart(&emptypb.Empty{}, mockStream) + err := server.WaitForChainStart(&emptypb.Empty{}, mockStream) assert.ErrorContains(tt, "Context canceled", err) <-exitRoutine }(t) @@ -243,11 +243,9 @@ func TestWaitForChainStart_AlreadyStarted(t *testing.T) { } func TestWaitForChainStart_HeadStateDoesNotExist(t *testing.T) { - genesisValidatorsRoot := params.BeaconConfig().ZeroHash - // Set head state to nil chainService := &mockChain.ChainService{State: nil} - notifier := chainService.StateNotifier() + gs := startup.NewClockSynchronizer() Server := &Server{ Ctx: context.Background(), ChainStartFetcher: &mockExecution.Chain{ @@ -255,6 +253,7 @@ func TestWaitForChainStart_HeadStateDoesNotExist(t *testing.T) { }, StateNotifier: chainService.StateNotifier(), HeadFetcher: chainService, + ClockWaiter: gs, } ctrl := gomock.NewController(t) defer ctrl.Finish() @@ -267,15 +266,7 @@ func TestWaitForChainStart_HeadStateDoesNotExist(t *testing.T) { assert.NoError(t, Server.WaitForChainStart(&emptypb.Empty{}, mockStream), "Could not call RPC method") wg.Done() }() - // Simulate a late state initialization event, so that - // method is able to handle race condition here. - notifier.StateFeed().Send(&feed.Event{ - Type: statefeed.Initialized, - Data: &statefeed.InitializedData{ - StartTime: time.Unix(0, 0), - GenesisValidatorsRoot: genesisValidatorsRoot[:], - }, - }) + util.WaitTimeout(wg, time.Second) } @@ -284,6 +275,8 @@ func TestWaitForChainStart_NotStartedThenLogFired(t *testing.T) { genesisValidatorsRoot := bytesutil.ToBytes32([]byte("validators")) chainService := &mockChain.ChainService{} + gs := startup.NewClockSynchronizer() + Server := &Server{ Ctx: context.Background(), ChainStartFetcher: &mockExecution.FaultyExecutionChain{ @@ -291,6 +284,7 @@ func TestWaitForChainStart_NotStartedThenLogFired(t *testing.T) { }, StateNotifier: chainService.StateNotifier(), HeadFetcher: chainService, + ClockWaiter: gs, } exitRoutine := make(chan bool) ctrl := gomock.NewController(t) @@ -310,15 +304,7 @@ func TestWaitForChainStart_NotStartedThenLogFired(t *testing.T) { }(t) // Send in a loop to ensure it is delivered (busy wait for the service to subscribe to the state feed). - for sent := 0; sent == 0; { - sent = Server.StateNotifier.StateFeed().Send(&feed.Event{ - Type: statefeed.Initialized, - Data: &statefeed.InitializedData{ - StartTime: time.Unix(0, 0), - GenesisValidatorsRoot: genesisValidatorsRoot[:], - }, - }) - } + require.NoError(t, gs.SetClock(startup.NewClock(time.Unix(0, 0), genesisValidatorsRoot))) exitRoutine <- true require.LogsContain(t, hook, "Sending genesis time") diff --git a/beacon-chain/rpc/service.go b/beacon-chain/rpc/service.go index e91532c106..cb41a77641 100644 --- a/beacon-chain/rpc/service.go +++ b/beacon-chain/rpc/service.go @@ -41,6 +41,7 @@ import ( nodev1alpha1 "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/prysm/v1alpha1/node" validatorv1alpha1 "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/prysm/v1alpha1/validator" slasherservice "github.com/prysmaticlabs/prysm/v4/beacon-chain/slasher" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/startup" "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen" chainSync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync" "github.com/prysmaticlabs/prysm/v4/config/features" @@ -120,6 +121,7 @@ type Config struct { OptimisticModeFetcher blockchain.OptimisticModeFetcher BlockBuilder builder.BlockBuilder Router *mux.Router + ClockWaiter startup.ClockWaiter } // NewService instantiates a new RPC service instance that will @@ -246,6 +248,7 @@ func (s *Service) Start() { ProposerSlotIndexCache: s.cfg.ProposerIdsCache, BlockBuilder: s.cfg.BlockBuilder, BLSChangesPool: s.cfg.BLSChangesPool, + ClockWaiter: s.cfg.ClockWaiter, } validatorServerV1 := &validator.Server{ HeadFetcher: s.cfg.HeadFetcher, diff --git a/beacon-chain/slasher/BUILD.bazel b/beacon-chain/slasher/BUILD.bazel index 900331f177..9ddf024495 100644 --- a/beacon-chain/slasher/BUILD.bazel +++ b/beacon-chain/slasher/BUILD.bazel @@ -26,11 +26,11 @@ go_library( "//async/event:go_default_library", "//beacon-chain/blockchain:go_default_library", "//beacon-chain/core/blocks:go_default_library", - "//beacon-chain/core/feed:go_default_library", "//beacon-chain/core/feed/state:go_default_library", "//beacon-chain/db:go_default_library", "//beacon-chain/operations/slashings:go_default_library", "//beacon-chain/slasher/types:go_default_library", + "//beacon-chain/startup:go_default_library", "//beacon-chain/state:go_default_library", "//beacon-chain/state/stategen:go_default_library", "//beacon-chain/sync:go_default_library", @@ -70,14 +70,13 @@ go_test( deps = [ "//async/event:go_default_library", "//beacon-chain/blockchain/testing:go_default_library", - "//beacon-chain/core/feed:go_default_library", - "//beacon-chain/core/feed/state:go_default_library", "//beacon-chain/core/signing:go_default_library", "//beacon-chain/db/testing:go_default_library", "//beacon-chain/forkchoice/doubly-linked-tree:go_default_library", "//beacon-chain/operations/slashings/mock:go_default_library", "//beacon-chain/slasher/mock:go_default_library", "//beacon-chain/slasher/types:go_default_library", + "//beacon-chain/startup:go_default_library", "//beacon-chain/state/stategen:go_default_library", "//beacon-chain/sync/initial-sync/testing:go_default_library", "//config/fieldparams:go_default_library", diff --git a/beacon-chain/slasher/detect_attestations_test.go b/beacon-chain/slasher/detect_attestations_test.go index 3c7b165f44..303710dd5f 100644 --- a/beacon-chain/slasher/detect_attestations_test.go +++ b/beacon-chain/slasher/detect_attestations_test.go @@ -11,6 +11,7 @@ import ( dbtest "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing" slashingsmock "github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/slashings/mock" slashertypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/slasher/types" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/startup" "github.com/prysmaticlabs/prysm/v4/config/params" "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives" "github.com/prysmaticlabs/prysm/v4/crypto/bls" @@ -239,6 +240,7 @@ func Test_processQueuedAttestations(t *testing.T) { HeadStateFetcher: mockChain, AttestationStateFetcher: mockChain, SlashingPoolInserter: &slashingsmock.PoolMock{}, + ClockWaiter: startup.NewClockSynchronizer(), }) require.NoError(t, err) s.genesisTime = genesisTime @@ -296,6 +298,7 @@ func Test_processQueuedAttestations_MultipleChunkIndices(t *testing.T) { HeadStateFetcher: mockChain, AttestationStateFetcher: mockChain, SlashingPoolInserter: &slashingsmock.PoolMock{}, + ClockWaiter: startup.NewClockSynchronizer(), }) require.NoError(t, err) s.genesisTime = genesisTime @@ -361,6 +364,7 @@ func Test_processQueuedAttestations_OverlappingChunkIndices(t *testing.T) { HeadStateFetcher: mockChain, AttestationStateFetcher: mockChain, SlashingPoolInserter: &slashingsmock.PoolMock{}, + ClockWaiter: startup.NewClockSynchronizer(), }) require.NoError(t, err) s.genesisTime = genesisTime @@ -475,6 +479,7 @@ func Test_applyAttestationForValidator_MinSpanChunk(t *testing.T) { &ServiceConfig{ Database: slasherDB, StateNotifier: &mock.MockStateNotifier{}, + ClockWaiter: startup.NewClockSynchronizer(), }) require.NoError(t, err) @@ -535,6 +540,7 @@ func Test_applyAttestationForValidator_MaxSpanChunk(t *testing.T) { &ServiceConfig{ Database: slasherDB, StateNotifier: &mock.MockStateNotifier{}, + ClockWaiter: startup.NewClockSynchronizer(), }) require.NoError(t, err) @@ -602,6 +608,7 @@ func Test_checkDoubleVotes_SlashableInputAttestations(t *testing.T) { &ServiceConfig{ Database: slasherDB, StateNotifier: &mock.MockStateNotifier{}, + ClockWaiter: startup.NewClockSynchronizer(), }) require.NoError(t, err) @@ -638,6 +645,7 @@ func Test_checkDoubleVotes_SlashableAttestationsOnDisk(t *testing.T) { &ServiceConfig{ Database: slasherDB, StateNotifier: &mock.MockStateNotifier{}, + ClockWaiter: startup.NewClockSynchronizer(), }) require.NoError(t, err) @@ -683,6 +691,7 @@ func testLoadChunks(t *testing.T, kind slashertypes.ChunkKind) { &ServiceConfig{ Database: slasherDB, StateNotifier: &mock.MockStateNotifier{}, + ClockWaiter: startup.NewClockSynchronizer(), }) require.NoError(t, err) @@ -769,6 +778,7 @@ func TestService_processQueuedAttestations(t *testing.T) { Database: slasherDB, StateNotifier: &mock.MockStateNotifier{}, HeadStateFetcher: mockChain, + ClockWaiter: startup.NewClockSynchronizer(), }) require.NoError(t, err) @@ -805,6 +815,7 @@ func BenchmarkCheckSlashableAttestations(b *testing.B) { Database: slasherDB, StateNotifier: &mock.MockStateNotifier{}, HeadStateFetcher: mockChain, + ClockWaiter: startup.NewClockSynchronizer(), }) require.NoError(b, err) diff --git a/beacon-chain/slasher/detect_blocks_test.go b/beacon-chain/slasher/detect_blocks_test.go index 3fdb52b4e9..5434588c9c 100644 --- a/beacon-chain/slasher/detect_blocks_test.go +++ b/beacon-chain/slasher/detect_blocks_test.go @@ -10,6 +10,7 @@ import ( doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree" slashingsmock "github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/slashings/mock" slashertypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/slasher/types" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/startup" "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen" "github.com/prysmaticlabs/prysm/v4/config/params" "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives" @@ -63,6 +64,7 @@ func Test_processQueuedBlocks_DetectsDoubleProposals(t *testing.T) { HeadStateFetcher: mockChain, StateGen: stategen.New(beaconDB, doublylinkedtree.New()), SlashingPoolInserter: &slashingsmock.PoolMock{}, + ClockWaiter: startup.NewClockSynchronizer(), }, params: DefaultParams(), blksQueue: newBlocksQueue(), @@ -129,6 +131,7 @@ func Test_processQueuedBlocks_NotSlashable(t *testing.T) { Database: slasherDB, StateNotifier: &mock.MockStateNotifier{}, HeadStateFetcher: mockChain, + ClockWaiter: startup.NewClockSynchronizer(), }, params: DefaultParams(), blksQueue: newBlocksQueue(), diff --git a/beacon-chain/slasher/receive_test.go b/beacon-chain/slasher/receive_test.go index e7c498d998..8396ff5654 100644 --- a/beacon-chain/slasher/receive_test.go +++ b/beacon-chain/slasher/receive_test.go @@ -8,6 +8,7 @@ import ( mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing" dbtest "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing" slashertypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/slasher/types" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/startup" params2 "github.com/prysmaticlabs/prysm/v4/config/params" "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives" "github.com/prysmaticlabs/prysm/v4/encoding/bytesutil" @@ -24,6 +25,7 @@ func TestSlasher_receiveAttestations_OK(t *testing.T) { serviceCfg: &ServiceConfig{ IndexedAttestationsFeed: new(event.Feed), StateNotifier: &mock.MockStateNotifier{}, + ClockWaiter: startup.NewClockSynchronizer(), }, attsQueue: newAttestationsQueue(), } @@ -207,6 +209,7 @@ func TestSlasher_receiveAttestations_OnlyValidAttestations(t *testing.T) { serviceCfg: &ServiceConfig{ IndexedAttestationsFeed: new(event.Feed), StateNotifier: &mock.MockStateNotifier{}, + ClockWaiter: startup.NewClockSynchronizer(), }, attsQueue: newAttestationsQueue(), } @@ -245,6 +248,7 @@ func TestSlasher_receiveBlocks_OK(t *testing.T) { serviceCfg: &ServiceConfig{ BeaconBlockHeadersFeed: new(event.Feed), StateNotifier: &mock.MockStateNotifier{}, + ClockWaiter: startup.NewClockSynchronizer(), }, blksQueue: newBlocksQueue(), } @@ -288,6 +292,7 @@ func TestService_processQueuedBlocks(t *testing.T) { Database: slasherDB, StateNotifier: &mock.MockStateNotifier{}, HeadStateFetcher: mockChain, + ClockWaiter: startup.NewClockSynchronizer(), }, blksQueue: newBlocksQueue(), } diff --git a/beacon-chain/slasher/service.go b/beacon-chain/slasher/service.go index 9c27b63068..b61592fba1 100644 --- a/beacon-chain/slasher/service.go +++ b/beacon-chain/slasher/service.go @@ -10,10 +10,10 @@ import ( "github.com/prysmaticlabs/prysm/v4/async/event" "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain" - "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed" statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state" "github.com/prysmaticlabs/prysm/v4/beacon-chain/db" "github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/slashings" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/startup" "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen" "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync" "github.com/prysmaticlabs/prysm/v4/config/params" @@ -39,6 +39,7 @@ type ServiceConfig struct { SlashingPoolInserter slashings.PoolInserter HeadStateFetcher blockchain.HeadFetcher SyncChecker sync.Checker + ClockWaiter startup.ClockWaiter } // SlashingChecker is an interface for defining services that the beacon node may interact with to provide slashing data. @@ -167,43 +168,19 @@ func (s *Service) Stop() error { } // Status of the slasher service. -func (_ *Service) Status() error { +func (*Service) Status() error { return nil } func (s *Service) waitForChainInitialization() { - stateChannel := make(chan *feed.Event, 1) - stateSub := s.serviceCfg.StateNotifier.StateFeed().Subscribe(stateChannel) - defer stateSub.Unsubscribe() - for { - select { - case stateEvent := <-stateChannel: - // Wait for us to receive the genesis time via a chain started notification. - if stateEvent.Type == statefeed.Initialized { - // Alternatively, if the chain has already started, we then read the genesis - // time value from this data. - data, ok := stateEvent.Data.(*statefeed.InitializedData) - if !ok { - log.Error( - "Could not receive chain start notification, want *statefeed.ChainStartedData", - ) - return - } - s.genesisTime = data.StartTime - log.WithField("genesisTime", s.genesisTime).Info( - "Slasher received chain initialization event", - ) - return - } - case err := <-stateSub.Err(): - log.WithError(err).Error( - "Slasher could not subscribe to state events", - ) - return - case <-s.ctx.Done(): - return - } + clock, err := s.serviceCfg.ClockWaiter.WaitForClock(s.ctx) + if err != nil { + log.WithError(err).Error("Could not receive chain start notification") } + s.genesisTime = clock.GenesisTime() + log.WithField("genesisTime", s.genesisTime).Info( + "Slasher received chain initialization event", + ) } func (s *Service) waitForSync(genesisTime time.Time) { diff --git a/beacon-chain/slasher/service_test.go b/beacon-chain/slasher/service_test.go index 5ceb0ec29c..272d7d0a4a 100644 --- a/beacon-chain/slasher/service_test.go +++ b/beacon-chain/slasher/service_test.go @@ -8,10 +8,9 @@ import ( "github.com/prysmaticlabs/prysm/v4/async/event" mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing" - "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed" - statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state" dbtest "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing" mockslasher "github.com/prysmaticlabs/prysm/v4/beacon-chain/slasher/mock" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/startup" mockSync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/initial-sync/testing" "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives" "github.com/prysmaticlabs/prysm/v4/testing/require" @@ -42,6 +41,7 @@ func TestService_StartStop_ChainInitialized(t *testing.T) { State: beaconState, Slot: ¤tSlot, } + gs := startup.NewClockSynchronizer() srv, err := New(context.Background(), &ServiceConfig{ IndexedAttestationsFeed: new(event.Feed), BeaconBlockHeadersFeed: new(event.Feed), @@ -49,14 +49,13 @@ func TestService_StartStop_ChainInitialized(t *testing.T) { Database: slasherDB, HeadStateFetcher: mockChain, SyncChecker: &mockSync.Sync{IsSyncing: false}, + ClockWaiter: gs, }) require.NoError(t, err) go srv.Start() time.Sleep(time.Millisecond * 100) - srv.serviceCfg.StateNotifier.StateFeed().Send(&feed.Event{ - Type: statefeed.Initialized, - Data: &statefeed.InitializedData{StartTime: time.Now()}, - }) + var vr [32]byte + require.NoError(t, gs.SetClock(startup.NewClock(time.Now(), vr))) time.Sleep(time.Millisecond * 100) srv.attsSlotTicker = &slots.SlotTicker{} srv.blocksSlotTicker = &slots.SlotTicker{} diff --git a/beacon-chain/startup/BUILD.bazel b/beacon-chain/startup/BUILD.bazel new file mode 100644 index 0000000000..1e286dfde2 --- /dev/null +++ b/beacon-chain/startup/BUILD.bazel @@ -0,0 +1,31 @@ +load("@prysm//tools/go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "clock.go", + "synchronizer.go", + ], + importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/startup", + visibility = ["//visibility:public"], + deps = [ + "//consensus-types/primitives:go_default_library", + "//time/slots:go_default_library", + "@com_github_pkg_errors//:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = [ + "clock_test.go", + "synchronizer_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//config/params:go_default_library", + "//consensus-types/primitives:go_default_library", + "//encoding/bytesutil:go_default_library", + "//testing/require:go_default_library", + ], +) diff --git a/beacon-chain/startup/clock.go b/beacon-chain/startup/clock.go new file mode 100644 index 0000000000..a35b3ff99b --- /dev/null +++ b/beacon-chain/startup/clock.go @@ -0,0 +1,75 @@ +package startup + +import ( + "time" + + types "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives" + "github.com/prysmaticlabs/prysm/v4/time/slots" +) + +// Nower is a function that can return the current time. +// In Clock, Now() will use time.Now by default, but a Nower can be set using WithNower in NewClock +// to customize the return value for Now() in tests. +type Nower func() time.Time + +// Clock abstracts important time-related concerns in the beacon chain: +// - provides a time.Now() construct that can be overridden in tests +// - GenesisTime() to know the genesis time or use genesis time determination as a synchronization point. +// - CurrentSlot: convenience conversion for current time -> slot +// (support backwards compatibility with the TimeFetcher interface) +// - GenesisValidatorsRoot: is determined at the same point as genesis time and is needed by some of the same code, +// so it is also bundled for convenience. +type Clock struct { + t time.Time + vr [32]byte + now Nower +} + +// GenesisTime returns the genesis timestamp. +func (g *Clock) GenesisTime() time.Time { + return g.t +} + +// GenesisValidatorsRoot returns the genesis state validator root +func (g *Clock) GenesisValidatorsRoot() [32]byte { + return g.vr +} + +// CurrentSlot returns the current slot relative to the time.Time value that Clock embeds. +func (g *Clock) CurrentSlot() types.Slot { + now := g.now() + return slots.Duration(g.t, now) +} + +// Now provides a value for time.Now() that can be overridden in tests. +func (g *Clock) Now() time.Time { + return g.now() +} + +// ClockOpt is a functional option to change the behavior of a clock value made by NewClock. +// It is primarily intended as a way to inject an alternate time.Now() callback (WithNower) for testing. +type ClockOpt func(*Clock) + +// WithNower allows tests in particular to inject an alternate implementation of time.Now (vs using system time) +func WithNower(n Nower) ClockOpt { + return func(g *Clock) { + g.now = n + } +} + +// NewClock constructs a Clock value from a genesis timestamp (t) and a Genesis Validator Root (vr). +// The WithNower ClockOpt can be used in tests to specify an alternate `time.Now` implementation, +// for instance to return a value for `Now` spanning a certain number of slots from genesis time, to control the current slot. +func NewClock(t time.Time, vr [32]byte, opts ...ClockOpt) *Clock { + c := &Clock{ + t: t, + vr: vr, + } + for _, o := range opts { + o(c) + } + if c.now == nil { + c.now = time.Now + } + return c +} diff --git a/beacon-chain/startup/clock_test.go b/beacon-chain/startup/clock_test.go new file mode 100644 index 0000000000..811f75d819 --- /dev/null +++ b/beacon-chain/startup/clock_test.go @@ -0,0 +1,49 @@ +package startup + +import ( + "testing" + "time" + + "github.com/prysmaticlabs/prysm/v4/config/params" + "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives" + "github.com/prysmaticlabs/prysm/v4/testing/require" +) + +func TestClock(t *testing.T) { + vr := [32]byte{} + cases := []struct { + name string + nSlots primitives.Slot + }{ + { + name: "3 slots", + nSlots: 3, + }, + { + name: "0 slots", + nSlots: 0, + }, + { + name: "1 epoch", + nSlots: params.BeaconConfig().SlotsPerEpoch, + }, + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + genesis, now := testInterval(c.nSlots) + nower := func() time.Time { return now } + cl := NewClock(genesis, vr, WithNower(nower)) + require.Equal(t, genesis, cl.GenesisTime()) + require.Equal(t, now, cl.Now()) + require.Equal(t, c.nSlots, cl.CurrentSlot()) + }) + } +} + +func testInterval(nSlots primitives.Slot) (time.Time, time.Time) { + oneSlot := time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot) + var start uint64 = 23 + endOffset := oneSlot * time.Duration(nSlots) + startTime := time.Unix(int64(start), 0) + return startTime, startTime.Add(endOffset) +} diff --git a/beacon-chain/startup/synchronizer.go b/beacon-chain/startup/synchronizer.go new file mode 100644 index 0000000000..38c4e2c2ca --- /dev/null +++ b/beacon-chain/startup/synchronizer.go @@ -0,0 +1,60 @@ +package startup + +import ( + "context" + + "github.com/pkg/errors" +) + +var errClockSet = errors.New("refusing to change clock after it is set") + +// ClockSynchronizer provides a synchronization mechanism for services that rely on the genesis time and validator root +// being known before getting to work. +type ClockSynchronizer struct { + ready chan struct{} + c *Clock +} + +// ClockWaiter specifies the WaitForClock method. ClockSynchronizer works in a 1:N pattern, with 1 thread calling +// SetClock, and the others blocking on a call to WaitForClock until the expected *Clock value is set. +type ClockWaiter interface { + WaitForClock(context.Context) (*Clock, error) +} + +// ClockSetter specifies the SetClock method. ClockSynchronizer works in a 1:N pattern, so in a given graph of services, +// only one service should be given the ClockSetter, and all others relying on the service's activation should use +// ClockWaiter. +type ClockSetter interface { + SetClock(c *Clock) error +} + +// SetClock sets the Clock value `c` and unblocks all threads waiting for `c` via WaitForClock. +// Calling SetClock more than once will return an error, as calling this function is meant to be a signal +// that the system is ready to start. +func (w *ClockSynchronizer) SetClock(c *Clock) error { + if w.c != nil { + return errors.Wrapf(errClockSet, "when SetClock called, Clock already set to time=%d", w.c.GenesisTime().Unix()) + } + w.c = c + close(w.ready) + return nil +} + +// WaitForClock will block the caller until the *Clock value is available. If the provided context is canceled (eg via +// a deadline set upstream), the function will return the error given by ctx.Err(). +func (w *ClockSynchronizer) WaitForClock(ctx context.Context) (*Clock, error) { + select { + case <-w.ready: + return w.c, nil + case <-ctx.Done(): + return nil, ctx.Err() + } +} + +// NewClockSynchronizer initializes a single instance of ClockSynchronizer that must be used by all ClockWaiters that +// need to be synchronized to a ClockSetter (ie blockchain service). +func NewClockSynchronizer() *ClockSynchronizer { + return &ClockSynchronizer{ + ready: make(chan struct{}), + } +} diff --git a/beacon-chain/startup/synchronizer_test.go b/beacon-chain/startup/synchronizer_test.go new file mode 100644 index 0000000000..b52423e8fe --- /dev/null +++ b/beacon-chain/startup/synchronizer_test.go @@ -0,0 +1,51 @@ +package startup + +import ( + "context" + "testing" + "time" + + "github.com/prysmaticlabs/prysm/v4/encoding/bytesutil" + "github.com/prysmaticlabs/prysm/v4/testing/require" +) + +func TestSynchronizerErrOnSecondSet(t *testing.T) { + s := NewClockSynchronizer() + require.NoError(t, s.SetClock(NewClock(time.Now(), [32]byte{}))) + require.ErrorIs(t, s.SetClock(NewClock(time.Now(), [32]byte{})), errClockSet) +} + +func TestWaitForClockCanceled(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + cancel() + s := NewClockSynchronizer() + c, err := s.WaitForClock(ctx) + require.Equal(t, true, c == nil) + require.ErrorIs(t, err, context.Canceled) +} + +func TestWaitForClock(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + s := NewClockSynchronizer() + var vr [32]byte + copy(vr[:], bytesutil.PadTo([]byte("valroot"), 32)) + genesis := time.Unix(23, 0) + later := time.Unix(42, 0) + nower := func() time.Time { return later } + expect := NewClock(genesis, vr, WithNower(nower)) + go func() { + // This is just to ensure the test doesn't hang. + // If we hit this cancellation case, then the happy path failed and the NoError assertion etc below will fail. + time.Sleep(time.Second) + cancel() + }() + go func() { + require.NoError(t, s.SetClock(expect)) + }() + c, err := s.WaitForClock(ctx) + require.NoError(t, err) + require.Equal(t, later, c.Now()) + require.Equal(t, genesis, c.GenesisTime()) + require.Equal(t, vr, c.GenesisValidatorsRoot()) +} diff --git a/beacon-chain/sync/BUILD.bazel b/beacon-chain/sync/BUILD.bazel index 80f47e2e54..734acce1d1 100644 --- a/beacon-chain/sync/BUILD.bazel +++ b/beacon-chain/sync/BUILD.bazel @@ -65,7 +65,6 @@ go_library( "//beacon-chain/core/feed:go_default_library", "//beacon-chain/core/feed/block:go_default_library", "//beacon-chain/core/feed/operation:go_default_library", - "//beacon-chain/core/feed/state:go_default_library", "//beacon-chain/core/helpers:go_default_library", "//beacon-chain/core/signing:go_default_library", "//beacon-chain/core/transition:go_default_library", @@ -82,6 +81,7 @@ go_library( "//beacon-chain/p2p/encoder:go_default_library", "//beacon-chain/p2p/peers:go_default_library", "//beacon-chain/p2p/types:go_default_library", + "//beacon-chain/startup:go_default_library", "//beacon-chain/state:go_default_library", "//beacon-chain/state/stategen:go_default_library", "//cache/lru:go_default_library", @@ -179,7 +179,6 @@ go_test( "//beacon-chain/core/altair:go_default_library", "//beacon-chain/core/feed:go_default_library", "//beacon-chain/core/feed/operation:go_default_library", - "//beacon-chain/core/feed/state:go_default_library", "//beacon-chain/core/helpers:go_default_library", "//beacon-chain/core/signing:go_default_library", "//beacon-chain/core/time:go_default_library", @@ -198,6 +197,7 @@ go_test( "//beacon-chain/p2p/peers:go_default_library", "//beacon-chain/p2p/testing:go_default_library", "//beacon-chain/p2p/types:go_default_library", + "//beacon-chain/startup:go_default_library", "//beacon-chain/state:go_default_library", "//beacon-chain/state/state-native:go_default_library", "//beacon-chain/state/stategen:go_default_library", diff --git a/beacon-chain/sync/broadcast_bls_changes_test.go b/beacon-chain/sync/broadcast_bls_changes_test.go index 27a6de5b87..8684a5d991 100644 --- a/beacon-chain/sync/broadcast_bls_changes_test.go +++ b/beacon-chain/sync/broadcast_bls_changes_test.go @@ -37,7 +37,6 @@ func TestBroadcastBLSChanges(t *testing.T) { WithP2P(mockp2p.NewTestP2P(t)), WithInitialSync(&mockSync.Sync{IsSyncing: false}), WithChainService(chainService), - WithStateNotifier(chainService.StateNotifier()), WithOperationNotifier(chainService.OperationNotifier()), WithBlsToExecPool(blstoexec.NewPool()), ) @@ -71,7 +70,6 @@ func TestRateBLSChanges(t *testing.T) { WithP2P(p1), WithInitialSync(&mockSync.Sync{IsSyncing: false}), WithChainService(chainService), - WithStateNotifier(chainService.StateNotifier()), WithOperationNotifier(chainService.OperationNotifier()), WithBlsToExecPool(blstoexec.NewPool()), ) @@ -141,7 +139,6 @@ func TestBroadcastBLSBatch_changes_slice(t *testing.T) { WithP2P(p1), WithInitialSync(&mockSync.Sync{IsSyncing: false}), WithChainService(chainService), - WithStateNotifier(chainService.StateNotifier()), WithOperationNotifier(chainService.OperationNotifier()), WithBlsToExecPool(blstoexec.NewPool()), ) diff --git a/beacon-chain/sync/context.go b/beacon-chain/sync/context.go index da0092fc36..875af143bc 100644 --- a/beacon-chain/sync/context.go +++ b/beacon-chain/sync/context.go @@ -4,8 +4,6 @@ import ( "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/protocol" "github.com/pkg/errors" - "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain" - "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/signing" "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p" ) @@ -13,33 +11,29 @@ import ( const forkDigestLength = 4 // writes peer's current context for the expected payload to the stream. -func writeContextToStream(objCtx []byte, stream network.Stream, chain blockchain.ForkFetcher) error { +func writeContextToStream(objCtx []byte, stream network.Stream) error { // The rpc context for our v2 methods is the fork-digest of // the relevant payload. We write the associated fork-digest(context) // into the stream for the payload. - rpcCtx, err := rpcContext(stream, chain) + rpcCtx, err := expectRpcContext(stream) if err != nil { return err } - // Exit early if there is an empty context. - if len(rpcCtx) == 0 { + // Exit early if an empty context is expected. + if !rpcCtx { return nil } - // Always choose the object's context when writing to the stream. - if objCtx != nil { - rpcCtx = objCtx - } - _, err = stream.Write(rpcCtx) + _, err = stream.Write(objCtx) return err } // reads any attached context-bytes to the payload. -func readContextFromStream(stream network.Stream, chain blockchain.ForkFetcher) ([]byte, error) { - rpcCtx, err := rpcContext(stream, chain) +func readContextFromStream(stream network.Stream) ([]byte, error) { + hasCtx, err := expectRpcContext(stream) if err != nil { return nil, err } - if len(rpcCtx) == 0 { + if !hasCtx { return []byte{}, nil } // Read context (fork-digest) from stream @@ -50,26 +44,18 @@ func readContextFromStream(stream network.Stream, chain blockchain.ForkFetcher) return b, nil } -// retrieve expected context depending on rpc topic schema version. -func rpcContext(stream network.Stream, chain blockchain.ForkFetcher) ([]byte, error) { +func expectRpcContext(stream network.Stream) (bool, error) { _, _, version, err := p2p.TopicDeconstructor(string(stream.Protocol())) if err != nil { - return nil, err + return false, err } switch version { case p2p.SchemaVersionV1: - // Return empty context for a v1 method. - return []byte{}, nil + return false, nil case p2p.SchemaVersionV2: - currFork := chain.CurrentFork() - genRoot := chain.GenesisValidatorsRoot() - digest, err := signing.ComputeForkDigest(currFork.CurrentVersion, genRoot[:]) - if err != nil { - return nil, err - } - return digest[:], nil + return true, nil default: - return nil, errors.New("invalid version of %s registered for topic: %s") + return false, errors.New("invalid version of %s registered for topic: %s") } } diff --git a/beacon-chain/sync/context_test.go b/beacon-chain/sync/context_test.go index 883f5e223b..89b99d3d0c 100644 --- a/beacon-chain/sync/context_test.go +++ b/beacon-chain/sync/context_test.go @@ -31,7 +31,7 @@ func TestContextWrite_NoWrites(t *testing.T) { assert.NoError(t, err) // Nothing will be written to the stream - assert.NoError(t, writeContextToStream(nil, strm, nil)) + assert.NoError(t, writeContextToStream([]byte{}, strm)) if util.WaitTimeout(wg, 1*time.Second) { t.Fatal("Did not receive stream within 1 sec") } @@ -48,7 +48,7 @@ func TestContextRead_NoReads(t *testing.T) { wantedData := []byte{'A', 'B', 'C', 'D'} nPeer.BHost.SetStreamHandler(core.ProtocolID(prID), func(stream network.Stream) { // No Context will be read from it - dt, err := readContextFromStream(stream, nil) + dt, err := readContextFromStream(stream) assert.NoError(t, err) assert.Equal(t, 0, len(dt)) diff --git a/beacon-chain/sync/decode_pubsub.go b/beacon-chain/sync/decode_pubsub.go index 45b0b57920..1f2bb32bcc 100644 --- a/beacon-chain/sync/decode_pubsub.go +++ b/beacon-chain/sync/decode_pubsub.go @@ -49,7 +49,7 @@ func (s *Service) decodePubsubMessage(msg *pubsub.Message) (ssz.Unmarshaler, err } // Handle different message types across forks. if topic == p2p.BlockSubnetTopicFormat { - m, err = extractBlockDataType(fDigest[:], s.cfg.chain) + m, err = extractBlockDataType(fDigest[:], s.cfg.clock) if err != nil { return nil, err } diff --git a/beacon-chain/sync/decode_pubsub_test.go b/beacon-chain/sync/decode_pubsub_test.go index feaf3b2187..3f64198f2c 100644 --- a/beacon-chain/sync/decode_pubsub_test.go +++ b/beacon-chain/sync/decode_pubsub_test.go @@ -15,6 +15,7 @@ import ( "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/signing" "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p" p2ptesting "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/startup" "github.com/prysmaticlabs/prysm/v4/config/params" "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks" "github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces" @@ -81,8 +82,9 @@ func TestService_decodePubsubMessage(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + chain := &mock.ChainService{ValidatorsRoot: [32]byte{}, Genesis: time.Now()} s := &Service{ - cfg: &config{p2p: p2ptesting.NewTestP2P(t), chain: &mock.ChainService{ValidatorsRoot: [32]byte{}, Genesis: time.Now()}}, + cfg: &config{p2p: p2ptesting.NewTestP2P(t), chain: chain, clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot)}, } if tt.topic != "" { if tt.input == nil { diff --git a/beacon-chain/sync/error.go b/beacon-chain/sync/error.go index 769ad05e94..650afdeccb 100644 --- a/beacon-chain/sync/error.go +++ b/beacon-chain/sync/error.go @@ -13,6 +13,9 @@ import ( "github.com/sirupsen/logrus" ) +var ErrNoValidDigest = errors.New("no valid digest matched") +var ErrUnrecognizedVersion = errors.New("cannot determine context bytes for unrecognized object") + var responseCodeSuccess = byte(0x00) var responseCodeInvalidRequest = byte(0x01) var responseCodeServerError = byte(0x02) diff --git a/beacon-chain/sync/fork_watcher.go b/beacon-chain/sync/fork_watcher.go index 3b89d774aa..756954fcfa 100644 --- a/beacon-chain/sync/fork_watcher.go +++ b/beacon-chain/sync/fork_watcher.go @@ -12,7 +12,7 @@ import ( // Is a background routine that observes for new incoming forks. Depending on the epoch // it will be in charge of subscribing/unsubscribing the relevant topics at the fork boundaries. func (s *Service) forkWatcher() { - slotTicker := slots.NewSlotTicker(s.cfg.chain.GenesisTime(), params.BeaconConfig().SecondsPerSlot) + slotTicker := slots.NewSlotTicker(s.cfg.clock.GenesisTime(), params.BeaconConfig().SecondsPerSlot) for { select { // In the event of a node restart, we will still end up subscribing to the correct @@ -42,8 +42,8 @@ func (s *Service) forkWatcher() { // Checks if there is a fork in the next epoch and if there is // it registers the appropriate gossip and rpc topics. func (s *Service) registerForUpcomingFork(currEpoch primitives.Epoch) error { - genRoot := s.cfg.chain.GenesisValidatorsRoot() - isNextForkEpoch, err := forks.IsForkNextEpoch(s.cfg.chain.GenesisTime(), genRoot[:]) + genRoot := s.cfg.clock.GenesisValidatorsRoot() + isNextForkEpoch, err := forks.IsForkNextEpoch(s.cfg.clock.GenesisTime(), genRoot[:]) if err != nil { return errors.Wrap(err, "Could not retrieve next fork epoch") } @@ -70,7 +70,7 @@ func (s *Service) registerForUpcomingFork(currEpoch primitives.Epoch) error { // Checks if there was a fork in the previous epoch, and if there // was then we deregister the topics from that particular fork. func (s *Service) deregisterFromPastFork(currEpoch primitives.Epoch) error { - genRoot := s.cfg.chain.GenesisValidatorsRoot() + genRoot := s.cfg.clock.GenesisValidatorsRoot() // This method takes care of the de-registration of // old gossip pubsub handlers. Once we are at the epoch // after the fork, we de-register from all the outdated topics. diff --git a/beacon-chain/sync/fork_watcher_test.go b/beacon-chain/sync/fork_watcher_test.go index ec6d7ff8ef..f0441ac9e9 100644 --- a/beacon-chain/sync/fork_watcher_test.go +++ b/beacon-chain/sync/fork_watcher_test.go @@ -9,6 +9,7 @@ import ( mockChain "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing" "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p" p2ptest "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/startup" mockSync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/initial-sync/testing" "github.com/prysmaticlabs/prysm/v4/config/params" "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives" @@ -29,19 +30,21 @@ func TestService_CheckForNextEpochFork(t *testing.T) { name: "no fork in the next epoch", svcCreator: func(t *testing.T) *Service { peer2peer := p2ptest.NewTestP2P(t) + gt := time.Now().Add(time.Duration(-params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().SlotsPerEpoch))) * time.Second) + vr := [32]byte{'A'} chainService := &mockChain.ChainService{ - Genesis: time.Now().Add(time.Duration(-params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().SlotsPerEpoch))) * time.Second), - ValidatorsRoot: [32]byte{'A'}, + Genesis: gt, + ValidatorsRoot: vr, } ctx, cancel := context.WithCancel(context.Background()) r := &Service{ ctx: ctx, cancel: cancel, cfg: &config{ - p2p: peer2peer, - chain: chainService, - stateNotifier: chainService.StateNotifier(), - initialSync: &mockSync.Sync{IsSyncing: false}, + p2p: peer2peer, + chain: chainService, + clock: startup.NewClock(gt, vr), + initialSync: &mockSync.Sync{IsSyncing: false}, }, chainStarted: abool.New(), subHandler: newSubTopicHandler(), @@ -58,9 +61,11 @@ func TestService_CheckForNextEpochFork(t *testing.T) { name: "altair fork in the next epoch", svcCreator: func(t *testing.T) *Service { peer2peer := p2ptest.NewTestP2P(t) + gt := time.Now().Add(-4 * oneEpoch()) + vr := [32]byte{'A'} chainService := &mockChain.ChainService{ - Genesis: time.Now().Add(-4 * oneEpoch()), - ValidatorsRoot: [32]byte{'A'}, + Genesis: gt, + ValidatorsRoot: vr, } bCfg := params.BeaconConfig().Copy() bCfg.AltairForkEpoch = 5 @@ -71,10 +76,10 @@ func TestService_CheckForNextEpochFork(t *testing.T) { ctx: ctx, cancel: cancel, cfg: &config{ - p2p: peer2peer, - chain: chainService, - stateNotifier: chainService.StateNotifier(), - initialSync: &mockSync.Sync{IsSyncing: false}, + p2p: peer2peer, + chain: chainService, + clock: startup.NewClock(gt, vr), + initialSync: &mockSync.Sync{IsSyncing: false}, }, chainStarted: abool.New(), subHandler: newSubTopicHandler(), @@ -84,7 +89,7 @@ func TestService_CheckForNextEpochFork(t *testing.T) { currEpoch: 4, wantErr: false, postSvcCheck: func(t *testing.T, s *Service) { - genRoot := s.cfg.chain.GenesisValidatorsRoot() + genRoot := s.cfg.clock.GenesisValidatorsRoot() digest, err := forks.ForkDigestFromEpoch(5, genRoot[:]) assert.NoError(t, err) assert.Equal(t, true, s.subHandler.digestExists(digest)) @@ -115,10 +120,10 @@ func TestService_CheckForNextEpochFork(t *testing.T) { ctx: ctx, cancel: cancel, cfg: &config{ - p2p: peer2peer, - chain: chainService, - stateNotifier: chainService.StateNotifier(), - initialSync: &mockSync.Sync{IsSyncing: false}, + p2p: peer2peer, + chain: chainService, + clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot), + initialSync: &mockSync.Sync{IsSyncing: false}, }, chainStarted: abool.New(), subHandler: newSubTopicHandler(), @@ -128,7 +133,7 @@ func TestService_CheckForNextEpochFork(t *testing.T) { currEpoch: 4, wantErr: false, postSvcCheck: func(t *testing.T, s *Service) { - genRoot := s.cfg.chain.GenesisValidatorsRoot() + genRoot := s.cfg.clock.GenesisValidatorsRoot() digest, err := forks.ForkDigestFromEpoch(5, genRoot[:]) assert.NoError(t, err) assert.Equal(t, true, s.subHandler.digestExists(digest)) @@ -167,15 +172,16 @@ func TestService_CheckForPreviousEpochFork(t *testing.T) { Genesis: time.Now().Add(-oneEpoch()), ValidatorsRoot: [32]byte{'A'}, } + clock := startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot) ctx, cancel := context.WithCancel(context.Background()) r := &Service{ ctx: ctx, cancel: cancel, cfg: &config{ - p2p: peer2peer, - chain: chainService, - stateNotifier: chainService.StateNotifier(), - initialSync: &mockSync.Sync{IsSyncing: false}, + p2p: peer2peer, + chain: chainService, + clock: clock, + initialSync: &mockSync.Sync{IsSyncing: false}, }, chainStarted: abool.New(), subHandler: newSubTopicHandler(), @@ -207,6 +213,7 @@ func TestService_CheckForPreviousEpochFork(t *testing.T) { Genesis: time.Now().Add(-4 * oneEpoch()), ValidatorsRoot: [32]byte{'A'}, } + clock := startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot) bCfg := params.BeaconConfig().Copy() bCfg.AltairForkEpoch = 3 params.OverrideBeaconConfig(bCfg) @@ -216,10 +223,10 @@ func TestService_CheckForPreviousEpochFork(t *testing.T) { ctx: ctx, cancel: cancel, cfg: &config{ - p2p: peer2peer, - chain: chainService, - stateNotifier: chainService.StateNotifier(), - initialSync: &mockSync.Sync{IsSyncing: false}, + p2p: peer2peer, + chain: chainService, + clock: clock, + initialSync: &mockSync.Sync{IsSyncing: false}, }, chainStarted: abool.New(), subHandler: newSubTopicHandler(), @@ -232,7 +239,7 @@ func TestService_CheckForPreviousEpochFork(t *testing.T) { chainService.Genesis = prevGenesis r.registerRPCHandlersAltair() - genRoot := r.cfg.chain.GenesisValidatorsRoot() + genRoot := r.cfg.clock.GenesisValidatorsRoot() digest, err := forks.ForkDigestFromEpoch(0, genRoot[:]) assert.NoError(t, err) r.registerSubscribers(0, digest) @@ -248,7 +255,7 @@ func TestService_CheckForPreviousEpochFork(t *testing.T) { currEpoch: 4, wantErr: false, postSvcCheck: func(t *testing.T, s *Service) { - genRoot := s.cfg.chain.GenesisValidatorsRoot() + genRoot := s.cfg.clock.GenesisValidatorsRoot() digest, err := forks.ForkDigestFromEpoch(0, genRoot[:]) assert.NoError(t, err) assert.Equal(t, false, s.subHandler.digestExists(digest)) @@ -281,6 +288,7 @@ func TestService_CheckForPreviousEpochFork(t *testing.T) { Genesis: time.Now().Add(-4 * oneEpoch()), ValidatorsRoot: [32]byte{'A'}, } + clock := startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot) bCfg := params.BeaconConfig().Copy() bCfg.AltairForkEpoch = 1 bCfg.BellatrixForkEpoch = 3 @@ -291,15 +299,15 @@ func TestService_CheckForPreviousEpochFork(t *testing.T) { ctx: ctx, cancel: cancel, cfg: &config{ - p2p: peer2peer, - chain: chainService, - stateNotifier: chainService.StateNotifier(), - initialSync: &mockSync.Sync{IsSyncing: false}, + p2p: peer2peer, + chain: chainService, + clock: clock, + initialSync: &mockSync.Sync{IsSyncing: false}, }, chainStarted: abool.New(), subHandler: newSubTopicHandler(), } - genRoot := r.cfg.chain.GenesisValidatorsRoot() + genRoot := r.cfg.clock.GenesisValidatorsRoot() digest, err := forks.ForkDigestFromEpoch(1, genRoot[:]) assert.NoError(t, err) r.registerSubscribers(1, digest) @@ -315,7 +323,7 @@ func TestService_CheckForPreviousEpochFork(t *testing.T) { currEpoch: 4, wantErr: false, postSvcCheck: func(t *testing.T, s *Service) { - genRoot := s.cfg.chain.GenesisValidatorsRoot() + genRoot := s.cfg.clock.GenesisValidatorsRoot() digest, err := forks.ForkDigestFromEpoch(1, genRoot[:]) assert.NoError(t, err) assert.Equal(t, false, s.subHandler.digestExists(digest)) diff --git a/beacon-chain/sync/initial-sync/BUILD.bazel b/beacon-chain/sync/initial-sync/BUILD.bazel index 36f3d1641c..6527b5ae62 100644 --- a/beacon-chain/sync/initial-sync/BUILD.bazel +++ b/beacon-chain/sync/initial-sync/BUILD.bazel @@ -18,7 +18,6 @@ go_library( deps = [ "//async/abool:go_default_library", "//beacon-chain/blockchain:go_default_library", - "//beacon-chain/core/feed:go_default_library", "//beacon-chain/core/feed/block:go_default_library", "//beacon-chain/core/feed/state:go_default_library", "//beacon-chain/core/transition:go_default_library", @@ -26,6 +25,7 @@ go_library( "//beacon-chain/p2p:go_default_library", "//beacon-chain/p2p/peers/scorers:go_default_library", "//beacon-chain/p2p/types:go_default_library", + "//beacon-chain/startup:go_default_library", "//beacon-chain/sync:go_default_library", "//cmd/beacon-chain/flags:go_default_library", "//config/params:go_default_library", @@ -107,10 +107,7 @@ go_test( embed = [":go_default_library"], deps = [ "//async/abool:go_default_library", - "//async/event:go_default_library", "//beacon-chain/blockchain/testing:go_default_library", - "//beacon-chain/core/feed:go_default_library", - "//beacon-chain/core/feed/state:go_default_library", "//beacon-chain/db:go_default_library", "//beacon-chain/db/testing:go_default_library", "//beacon-chain/p2p:go_default_library", @@ -118,6 +115,7 @@ go_test( "//beacon-chain/p2p/peers/scorers:go_default_library", "//beacon-chain/p2p/testing:go_default_library", "//beacon-chain/p2p/types:go_default_library", + "//beacon-chain/startup:go_default_library", "//beacon-chain/sync:go_default_library", "//cmd/beacon-chain/flags:go_default_library", "//config/params:go_default_library", diff --git a/beacon-chain/sync/initial-sync/blocks_fetcher.go b/beacon-chain/sync/initial-sync/blocks_fetcher.go index 6b1297c523..93d28411bf 100644 --- a/beacon-chain/sync/initial-sync/blocks_fetcher.go +++ b/beacon-chain/sync/initial-sync/blocks_fetcher.go @@ -11,6 +11,7 @@ import ( "github.com/prysmaticlabs/prysm/v4/beacon-chain/db" "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p" p2pTypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/types" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/startup" prysmsync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync" "github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags" "github.com/prysmaticlabs/prysm/v4/config/params" @@ -86,6 +87,7 @@ type blocksFetcher struct { capacityWeight float64 // how remaining capacity affects peer selection mode syncMode // allows to use fetcher in different sync scenarios quit chan struct{} // termination notifier + clock *startup.Clock } // peerLock restricts fetcher actions on per peer basis. Currently, used for rate limiting. diff --git a/beacon-chain/sync/initial-sync/blocks_fetcher_test.go b/beacon-chain/sync/initial-sync/blocks_fetcher_test.go index b51f8605cc..f77407c932 100644 --- a/beacon-chain/sync/initial-sync/blocks_fetcher_test.go +++ b/beacon-chain/sync/initial-sync/blocks_fetcher_test.go @@ -14,6 +14,7 @@ import ( dbtest "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing" p2pm "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p" p2pt "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/startup" beaconsync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync" "github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags" "github.com/prysmaticlabs/prysm/v4/config/params" @@ -548,7 +549,10 @@ func TestBlocksFetcher_RequestBlocksRateLimitingLocks(t *testing.T) { defer cancel() fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{p2p: p1}) fetcher.rateLimiter = leakybucket.NewCollector(float64(req.Count), int64(req.Count*burstFactor), 1*time.Second, false) - fetcher.chain = &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}} + gt := time.Now() + vr := [32]byte{} + fetcher.chain = &mock.ChainService{Genesis: gt, ValidatorsRoot: vr} + fetcher.clock = startup.NewClock(gt, vr) hook := logTest.NewGlobal() wg := new(sync.WaitGroup) wg.Add(1) @@ -614,7 +618,10 @@ func TestBlocksFetcher_WaitForBandwidth(t *testing.T) { defer cancel() fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{p2p: p1}) fetcher.rateLimiter = leakybucket.NewCollector(float64(req.Count), int64(req.Count*burstFactor), 5*time.Second, false) - fetcher.chain = &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}} + gt := time.Now() + vr := [32]byte{} + fetcher.chain = &mock.ChainService{Genesis: gt, ValidatorsRoot: vr} + fetcher.clock = startup.NewClock(gt, vr) start := time.Now() assert.NoError(t, fetcher.waitForBandwidth(p2.PeerID(), 10)) dur := time.Since(start) @@ -647,10 +654,10 @@ func TestBlocksFetcher_requestBlocksFromPeerReturningInvalidBlocks(t *testing.T) for i := req.StartSlot; i < req.StartSlot.Add(req.Count*req.Step); i += primitives.Slot(req.Step) { blk := util.NewBeaconBlock() blk.Block.Slot = i - mchain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}} + tor := startup.NewClock(time.Now(), [32]byte{}) wsb, err := blocks.NewSignedBeaconBlock(blk) require.NoError(t, err) - assert.NoError(t, beaconsync.WriteBlockChunk(stream, mchain, p1.Encoding(), wsb)) + assert.NoError(t, beaconsync.WriteBlockChunk(stream, tor, p1.Encoding(), wsb)) } assert.NoError(t, stream.Close()) } @@ -671,10 +678,10 @@ func TestBlocksFetcher_requestBlocksFromPeerReturningInvalidBlocks(t *testing.T) for i := req.StartSlot; i < req.StartSlot.Add(req.Count*req.Step+1); i += primitives.Slot(req.Step) { blk := util.NewBeaconBlock() blk.Block.Slot = i - chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}} + tor := startup.NewClock(time.Now(), [32]byte{}) wsb, err := blocks.NewSignedBeaconBlock(blk) require.NoError(t, err) - assert.NoError(t, beaconsync.WriteBlockChunk(stream, chain, p1.Encoding(), wsb)) + assert.NoError(t, beaconsync.WriteBlockChunk(stream, tor, p1.Encoding(), wsb)) } assert.NoError(t, stream.Close()) } @@ -695,16 +702,16 @@ func TestBlocksFetcher_requestBlocksFromPeerReturningInvalidBlocks(t *testing.T) return func(stream network.Stream) { blk := util.NewBeaconBlock() blk.Block.Slot = 163 - chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}} + tor := startup.NewClock(time.Now(), [32]byte{}) wsb, err := blocks.NewSignedBeaconBlock(blk) require.NoError(t, err) - assert.NoError(t, beaconsync.WriteBlockChunk(stream, chain, p1.Encoding(), wsb)) + assert.NoError(t, beaconsync.WriteBlockChunk(stream, tor, p1.Encoding(), wsb)) blk = util.NewBeaconBlock() blk.Block.Slot = 162 wsb, err = blocks.NewSignedBeaconBlock(blk) require.NoError(t, err) - assert.NoError(t, beaconsync.WriteBlockChunk(stream, chain, p1.Encoding(), wsb)) + assert.NoError(t, beaconsync.WriteBlockChunk(stream, tor, p1.Encoding(), wsb)) assert.NoError(t, stream.Close()) } }, @@ -724,17 +731,17 @@ func TestBlocksFetcher_requestBlocksFromPeerReturningInvalidBlocks(t *testing.T) return func(stream network.Stream) { blk := util.NewBeaconBlock() blk.Block.Slot = 160 - chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}} + tor := startup.NewClock(time.Now(), [32]byte{}) wsb, err := blocks.NewSignedBeaconBlock(blk) require.NoError(t, err) - assert.NoError(t, beaconsync.WriteBlockChunk(stream, chain, p1.Encoding(), wsb)) + assert.NoError(t, beaconsync.WriteBlockChunk(stream, tor, p1.Encoding(), wsb)) blk = util.NewBeaconBlock() blk.Block.Slot = 160 wsb, err = blocks.NewSignedBeaconBlock(blk) require.NoError(t, err) - assert.NoError(t, beaconsync.WriteBlockChunk(stream, chain, p1.Encoding(), wsb)) + assert.NoError(t, beaconsync.WriteBlockChunk(stream, tor, p1.Encoding(), wsb)) assert.NoError(t, stream.Close()) } }, @@ -757,19 +764,19 @@ func TestBlocksFetcher_requestBlocksFromPeerReturningInvalidBlocks(t *testing.T) }() for i := req.StartSlot; i < req.StartSlot.Add(req.Count*req.Step); i += primitives.Slot(req.Step) { blk := util.NewBeaconBlock() - chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}} + tor := startup.NewClock(time.Now(), [32]byte{}) // Patch mid block, with invalid slot number. if i == req.StartSlot.Add(req.Count*req.Step/2) { blk.Block.Slot = req.StartSlot - 1 wsb, err := blocks.NewSignedBeaconBlock(blk) require.NoError(t, err) - assert.NoError(t, beaconsync.WriteBlockChunk(stream, chain, p1.Encoding(), wsb)) + assert.NoError(t, beaconsync.WriteBlockChunk(stream, tor, p1.Encoding(), wsb)) break } blk.Block.Slot = i wsb, err := blocks.NewSignedBeaconBlock(blk) require.NoError(t, err) - assert.NoError(t, beaconsync.WriteBlockChunk(stream, chain, p1.Encoding(), wsb)) + assert.NoError(t, beaconsync.WriteBlockChunk(stream, tor, p1.Encoding(), wsb)) } } }, @@ -792,19 +799,19 @@ func TestBlocksFetcher_requestBlocksFromPeerReturningInvalidBlocks(t *testing.T) }() for i := req.StartSlot; i < req.StartSlot.Add(req.Count*req.Step); i += primitives.Slot(req.Step) { blk := util.NewBeaconBlock() - chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}} + tor := startup.NewClock(time.Now(), [32]byte{}) // Patch mid block, with invalid slot number. if i == req.StartSlot.Add(req.Count*req.Step/2) { blk.Block.Slot = req.StartSlot.Add(req.Count * req.Step) wsb, err := blocks.NewSignedBeaconBlock(blk) require.NoError(t, err) - assert.NoError(t, beaconsync.WriteBlockChunk(stream, chain, p1.Encoding(), wsb)) + assert.NoError(t, beaconsync.WriteBlockChunk(stream, tor, p1.Encoding(), wsb)) break } blk.Block.Slot = i wsb, err := blocks.NewSignedBeaconBlock(blk) require.NoError(t, err) - assert.NoError(t, beaconsync.WriteBlockChunk(stream, chain, p1.Encoding(), wsb)) + assert.NoError(t, beaconsync.WriteBlockChunk(stream, tor, p1.Encoding(), wsb)) } } }, @@ -824,16 +831,16 @@ func TestBlocksFetcher_requestBlocksFromPeerReturningInvalidBlocks(t *testing.T) return func(stream network.Stream) { blk := util.NewBeaconBlock() blk.Block.Slot = 100 - chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}} + tor := startup.NewClock(time.Now(), [32]byte{}) wsb, err := blocks.NewSignedBeaconBlock(blk) require.NoError(t, err) - assert.NoError(t, beaconsync.WriteBlockChunk(stream, chain, p1.Encoding(), wsb)) + assert.NoError(t, beaconsync.WriteBlockChunk(stream, tor, p1.Encoding(), wsb)) blk = util.NewBeaconBlock() blk.Block.Slot = 105 wsb, err = blocks.NewSignedBeaconBlock(blk) require.NoError(t, err) - assert.NoError(t, beaconsync.WriteBlockChunk(stream, chain, p1.Encoding(), wsb)) + assert.NoError(t, beaconsync.WriteBlockChunk(stream, tor, p1.Encoding(), wsb)) assert.NoError(t, stream.Close()) } }, @@ -852,16 +859,16 @@ func TestBlocksFetcher_requestBlocksFromPeerReturningInvalidBlocks(t *testing.T) return func(stream network.Stream) { blk := util.NewBeaconBlock() blk.Block.Slot = 100 - chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}} + tor := startup.NewClock(time.Now(), [32]byte{}) wsb, err := blocks.NewSignedBeaconBlock(blk) require.NoError(t, err) - assert.NoError(t, beaconsync.WriteBlockChunk(stream, chain, p1.Encoding(), wsb)) + assert.NoError(t, beaconsync.WriteBlockChunk(stream, tor, p1.Encoding(), wsb)) blk = util.NewBeaconBlock() blk.Block.Slot = 103 wsb, err = blocks.NewSignedBeaconBlock(blk) require.NoError(t, err) - assert.NoError(t, beaconsync.WriteBlockChunk(stream, chain, p1.Encoding(), wsb)) + assert.NoError(t, beaconsync.WriteBlockChunk(stream, tor, p1.Encoding(), wsb)) assert.NoError(t, stream.Close()) } }, diff --git a/beacon-chain/sync/initial-sync/initial_sync_test.go b/beacon-chain/sync/initial-sync/initial_sync_test.go index 30dba0262a..85ac72eb73 100644 --- a/beacon-chain/sync/initial-sync/initial_sync_test.go +++ b/beacon-chain/sync/initial-sync/initial_sync_test.go @@ -17,6 +17,7 @@ import ( "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers" p2pt "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing" p2pTypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/types" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/startup" beaconsync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync" "github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags" "github.com/prysmaticlabs/prysm/v4/config/params" @@ -210,11 +211,10 @@ func connectPeer(t *testing.T, host *p2pt.TestP2P, datum *peerData, peerStatus * ret = ret[:req.Count] } - mChain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}} for i := 0; i < len(ret); i++ { wsb, err := blocks.NewSignedBeaconBlock(ret[i]) require.NoError(t, err) - assert.NoError(t, beaconsync.WriteBlockChunk(stream, mChain, p.Encoding(), wsb)) + assert.NoError(t, beaconsync.WriteBlockChunk(stream, startup.NewClock(time.Now(), [32]byte{}), p.Encoding(), wsb)) } }) @@ -283,10 +283,9 @@ func connectPeerHavingBlocks( if uint64(i) >= uint64(len(blks)) { break } - chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}} wsb, err := blocks.NewSignedBeaconBlock(blks[i]) require.NoError(t, err) - require.NoError(t, beaconsync.WriteBlockChunk(stream, chain, p.Encoding(), wsb)) + require.NoError(t, beaconsync.WriteBlockChunk(stream, startup.NewClock(time.Now(), [32]byte{}), p.Encoding(), wsb)) } }) diff --git a/beacon-chain/sync/initial-sync/service.go b/beacon-chain/sync/initial-sync/service.go index 94f61ba16a..6bf7ea1c52 100644 --- a/beacon-chain/sync/initial-sync/service.go +++ b/beacon-chain/sync/initial-sync/service.go @@ -11,11 +11,11 @@ import ( "github.com/pkg/errors" "github.com/prysmaticlabs/prysm/v4/async/abool" "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain" - "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed" blockfeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/block" statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state" "github.com/prysmaticlabs/prysm/v4/beacon-chain/db" "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/startup" "github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags" "github.com/prysmaticlabs/prysm/v4/config/params" "github.com/prysmaticlabs/prysm/v4/runtime" @@ -34,11 +34,13 @@ type blockchainService interface { // Config to set up the initial sync service. type Config struct { - P2P p2p.P2P - DB db.ReadOnlyDatabase - Chain blockchainService - StateNotifier statefeed.Notifier - BlockNotifier blockfeed.Notifier + P2P p2p.P2P + DB db.ReadOnlyDatabase + Chain blockchainService + StateNotifier statefeed.Notifier + BlockNotifier blockfeed.Notifier + ClockWaiter startup.ClockWaiter + InitialSyncComplete chan struct{} } // Service service. @@ -50,6 +52,7 @@ type Service struct { chainStarted *abool.AtomicBool counter *ratecounter.RateCounter genesisChan chan time.Time + clock *startup.Clock } // NewService configures the initial sync service responsible for bringing the node up to the @@ -66,31 +69,34 @@ func NewService(ctx context.Context, cfg *Config) *Service { genesisChan: make(chan time.Time), } - // The reason why we have this goroutine in the constructor is to avoid a race condition - // between services' Start method and the initialization event. - // See https://github.com/prysmaticlabs/prysm/issues/10602 for details. - go s.waitForStateInitialization() - return s } // Start the initial sync service. func (s *Service) Start() { - // Wait for state initialized event. - genesis := <-s.genesisChan - if genesis.IsZero() { + log.Info("Waiting for state to be initialized") + clock, err := s.cfg.ClockWaiter.WaitForClock(s.ctx) + if err != nil { + log.WithError(err).Error("initial-sync failed to receive startup event") + return + } + s.clock = clock + log.Info("Received state initialized event") + + gt := clock.GenesisTime() + if gt.IsZero() { log.Debug("Exiting Initial Sync Service") return } - if genesis.After(prysmTime.Now()) { - s.markSynced(genesis) - log.WithField("genesisTime", genesis).Info("Genesis time has not arrived - not syncing") + if gt.After(prysmTime.Now()) { + s.markSynced() + log.WithField("genesisTime", gt).Info("Genesis time has not arrived - not syncing") return } - currentSlot := slots.Since(genesis) + currentSlot := clock.CurrentSlot() if slots.ToEpoch(currentSlot) == 0 { - log.WithField("genesisTime", genesis).Info("Chain started within the last epoch - not syncing") - s.markSynced(genesis) + log.WithField("genesisTime", gt).Info("Chain started within the last epoch - not syncing") + s.markSynced() return } s.chainStarted.Set() @@ -98,18 +104,18 @@ func (s *Service) Start() { // Are we already in sync, or close to it? if slots.ToEpoch(s.cfg.Chain.HeadSlot()) == slots.ToEpoch(currentSlot) { log.Info("Already synced to the current chain head") - s.markSynced(genesis) + s.markSynced() return } s.waitForMinimumPeers() - if err := s.roundRobinSync(genesis); err != nil { + if err := s.roundRobinSync(gt); err != nil { if errors.Is(s.ctx.Err(), context.Canceled) { return } panic(err) } log.Infof("Synced up to slot %d", s.cfg.Chain.HeadSlot()) - s.markSynced(genesis) + s.markSynced() } // Stop initial sync. @@ -181,48 +187,8 @@ func (s *Service) waitForMinimumPeers() { } } -// waitForStateInitialization makes sure that beacon node is ready to be accessed: it is either -// already properly configured or system waits up until state initialized event is triggered. -func (s *Service) waitForStateInitialization() { - // Wait for state to be initialized. - stateChannel := make(chan *feed.Event, 1) - stateSub := s.cfg.StateNotifier.StateFeed().Subscribe(stateChannel) - defer stateSub.Unsubscribe() - log.Info("Waiting for state to be initialized") - for { - select { - case event := <-stateChannel: - if event.Type == statefeed.Initialized { - data, ok := event.Data.(*statefeed.InitializedData) - if !ok { - log.Error("Event feed data is not type *statefeed.InitializedData") - continue - } - log.WithField("starttime", data.StartTime).Debug("Received state initialized event") - s.genesisChan <- data.StartTime - return - } - case <-s.ctx.Done(): - log.Debug("Context closed, exiting goroutine") - // Send a zero time in the event we are exiting. - s.genesisChan <- time.Time{} - return - case err := <-stateSub.Err(): - log.WithError(err).Error("Subscription to state notifier failed") - // Send a zero time in the event we are exiting. - s.genesisChan <- time.Time{} - return - } - } -} - // markSynced marks node as synced and notifies feed listeners. -func (s *Service) markSynced(genesis time.Time) { +func (s *Service) markSynced() { s.synced.Set() - s.cfg.StateNotifier.StateFeed().Send(&feed.Event{ - Type: statefeed.Synced, - Data: &statefeed.SyncedData{ - StartTime: genesis, - }, - }) + close(s.cfg.InitialSyncComplete) } diff --git a/beacon-chain/sync/initial-sync/service_test.go b/beacon-chain/sync/initial-sync/service_test.go index d861b72292..4659a99eba 100644 --- a/beacon-chain/sync/initial-sync/service_test.go +++ b/beacon-chain/sync/initial-sync/service_test.go @@ -8,12 +8,10 @@ import ( "github.com/paulbellamy/ratecounter" "github.com/prysmaticlabs/prysm/v4/async/abool" - "github.com/prysmaticlabs/prysm/v4/async/event" mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing" - "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed" - statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state" dbtest "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing" p2pt "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/startup" "github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags" "github.com/prysmaticlabs/prysm/v4/config/params" "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives" @@ -36,7 +34,7 @@ func TestService_InitStartStop(t *testing.T) { tests := []struct { name string assert func() - methodRuns func(fd *event.Feed) + setGenesis func() *startup.Clock chainService func() *mock.ChainService }{ { @@ -61,15 +59,9 @@ func TestService_InitStartStop(t *testing.T) { ValidatorsRoot: [32]byte{}, } }, - methodRuns: func(fd *event.Feed) { - // Send valid event. - fd.Send(&feed.Event{ - Type: statefeed.Initialized, - Data: &statefeed.InitializedData{ - StartTime: time.Unix(4113849600, 0), - GenesisValidatorsRoot: make([]byte, 32), - }, - }) + setGenesis: func() *startup.Clock { + var vr [32]byte + return startup.NewClock(time.Unix(4113849600, 0), vr) }, assert: func() { assert.LogsContain(t, hook, "Genesis time has not arrived - not syncing") @@ -91,15 +83,9 @@ func TestService_InitStartStop(t *testing.T) { ValidatorsRoot: [32]byte{}, } }, - methodRuns: func(fd *event.Feed) { - // Send valid event. - fd.Send(&feed.Event{ - Type: statefeed.Initialized, - Data: &statefeed.InitializedData{ - StartTime: time.Now().Add(-5 * time.Minute), - GenesisValidatorsRoot: make([]byte, 32), - }, - }) + setGenesis: func() *startup.Clock { + var vr [32]byte + return startup.NewClock(time.Now().Add(-5*time.Minute), vr) }, assert: func() { assert.LogsContain(t, hook, "Chain started within the last epoch - not syncing") @@ -124,16 +110,10 @@ func TestService_InitStartStop(t *testing.T) { ValidatorsRoot: [32]byte{}, } }, - methodRuns: func(fd *event.Feed) { + setGenesis: func() *startup.Clock { futureSlot := primitives.Slot(27354) - // Send valid event. - fd.Send(&feed.Event{ - Type: statefeed.Initialized, - Data: &statefeed.InitializedData{ - StartTime: makeGenesisTime(futureSlot), - GenesisValidatorsRoot: make([]byte, 32), - }, - }) + var vr [32]byte + return startup.NewClock(makeGenesisTime(futureSlot), vr) }, assert: func() { assert.LogsContain(t, hook, "Starting initial chain sync...") @@ -161,16 +141,18 @@ func TestService_InitStartStop(t *testing.T) { mc = tt.chainService() } // Initialize feed - notifier := &mock.MockStateNotifier{} + gs := startup.NewClockSynchronizer() s := NewService(ctx, &Config{ - P2P: p, - Chain: mc, - StateNotifier: notifier, + P2P: p, + Chain: mc, + ClockWaiter: gs, + StateNotifier: &mock.MockStateNotifier{}, + InitialSyncComplete: make(chan struct{}), }) time.Sleep(500 * time.Millisecond) assert.NotNil(t, s) - if tt.methodRuns != nil { - tt.methodRuns(notifier.StateFeed()) + if tt.setGenesis != nil { + require.NoError(t, gs.SetClock(tt.setGenesis())) } wg := &sync.WaitGroup{} @@ -197,10 +179,11 @@ func TestService_InitStartStop(t *testing.T) { func TestService_waitForStateInitialization(t *testing.T) { hook := logTest.NewGlobal() - newService := func(ctx context.Context, mc *mock.ChainService) *Service { + newService := func(ctx context.Context, mc *mock.ChainService) (*Service, *startup.ClockSynchronizer) { + cs := startup.NewClockSynchronizer() ctx, cancel := context.WithCancel(ctx) s := &Service{ - cfg: &Config{Chain: mc, StateNotifier: mc.StateNotifier()}, + cfg: &Config{Chain: mc, StateNotifier: mc.StateNotifier(), ClockWaiter: cs, InitialSyncComplete: make(chan struct{})}, ctx: ctx, cancel: cancel, synced: abool.New(), @@ -208,7 +191,7 @@ func TestService_waitForStateInitialization(t *testing.T) { counter: ratecounter.NewRateCounter(counterSeconds * time.Second), genesisChan: make(chan time.Time), } - return s + return s, cs } t.Run("no state and context close", func(t *testing.T) { @@ -216,13 +199,11 @@ func TestService_waitForStateInitialization(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - s := newService(ctx, &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}) + s, _ := newService(ctx, &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}) wg := &sync.WaitGroup{} wg.Add(1) go func() { - go s.waitForStateInitialization() - currTime := <-s.genesisChan - assert.Equal(t, true, currTime.IsZero()) + s.Start() wg.Done() }() go func() { @@ -235,7 +216,7 @@ func TestService_waitForStateInitialization(t *testing.T) { t.Fatalf("Test should have exited by now, timed out") } assert.LogsContain(t, hook, "Waiting for state to be initialized") - assert.LogsContain(t, hook, "Context closed, exiting goroutine") + assert.LogsContain(t, hook, "initial-sync failed to receive startup event") assert.LogsDoNotContain(t, hook, "Subscription to state notifier failed") }) @@ -243,41 +224,30 @@ func TestService_waitForStateInitialization(t *testing.T) { defer hook.Reset() ctx, cancel := context.WithCancel(context.Background()) defer cancel() - s := newService(ctx, &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}) - expectedGenesisTime := time.Unix(358544700, 0) - var receivedGenesisTime time.Time + st, err := util.NewBeaconState() + require.NoError(t, err) + gt := time.Unix(int64(st.GenesisTime()), 0) + s, gs := newService(ctx, &mock.ChainService{State: st, Genesis: gt, ValidatorsRoot: [32]byte{}}) + + expectedGenesisTime := gt wg := &sync.WaitGroup{} wg.Add(1) go func() { - go s.waitForStateInitialization() - receivedGenesisTime = <-s.genesisChan - assert.Equal(t, false, receivedGenesisTime.IsZero()) + s.Start() wg.Done() }() + rg := func() time.Time { return gt.Add(time.Second * 12) } go func() { - time.AfterFunc(500*time.Millisecond, func() { - // Send invalid event at first. - s.cfg.StateNotifier.StateFeed().Send(&feed.Event{ - Type: statefeed.Initialized, - Data: &statefeed.BlockProcessedData{}, - }) - // Send valid event. - s.cfg.StateNotifier.StateFeed().Send(&feed.Event{ - Type: statefeed.Initialized, - Data: &statefeed.InitializedData{ - StartTime: expectedGenesisTime, - GenesisValidatorsRoot: make([]byte, 32), - }, - }) + time.AfterFunc(200*time.Millisecond, func() { + var vr [32]byte + require.NoError(t, gs.SetClock(startup.NewClock(expectedGenesisTime, vr, startup.WithNower(rg)))) }) }() if util.WaitTimeout(wg, time.Second*2) { t.Fatalf("Test should have exited by now, timed out") } - assert.Equal(t, expectedGenesisTime, receivedGenesisTime) - assert.LogsContain(t, hook, "Event feed data is not type *statefeed.InitializedData") assert.LogsContain(t, hook, "Waiting for state to be initialized") assert.LogsContain(t, hook, "Received state initialized event") assert.LogsDoNotContain(t, hook, "Context closed, exiting goroutine") @@ -287,29 +257,17 @@ func TestService_waitForStateInitialization(t *testing.T) { defer hook.Reset() ctx, cancel := context.WithCancel(context.Background()) defer cancel() - s := newService(ctx, &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}) + s, gs := newService(ctx, &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}) // Initialize mock feed _ = s.cfg.StateNotifier.StateFeed() expectedGenesisTime := time.Now().Add(60 * time.Second) wg := &sync.WaitGroup{} - wg.Add(1) - go func() { - s.waitForStateInitialization() - wg.Done() - }() - wg.Add(1) go func() { time.AfterFunc(500*time.Millisecond, func() { - // Send valid event. - s.cfg.StateNotifier.StateFeed().Send(&feed.Event{ - Type: statefeed.Initialized, - Data: &statefeed.InitializedData{ - StartTime: expectedGenesisTime, - GenesisValidatorsRoot: make([]byte, 32), - }, - }) + var vr [32]byte + require.NoError(t, gs.SetClock(startup.NewClock(expectedGenesisTime, vr))) }) s.Start() wg.Done() @@ -326,11 +284,12 @@ func TestService_waitForStateInitialization(t *testing.T) { func TestService_markSynced(t *testing.T) { mc := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}} - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() s := NewService(ctx, &Config{ - Chain: mc, - StateNotifier: mc.StateNotifier(), + Chain: mc, + StateNotifier: mc.StateNotifier(), + InitialSyncComplete: make(chan struct{}), }) require.NotNil(t, s) assert.Equal(t, false, s.chainStarted.IsSet()) @@ -340,33 +299,16 @@ func TestService_markSynced(t *testing.T) { s.chainStarted.Set() assert.ErrorContains(t, "syncing", s.Status()) - expectedGenesisTime := time.Unix(358544700, 0) - var receivedGenesisTime time.Time - - stateChannel := make(chan *feed.Event, 1) - stateSub := s.cfg.StateNotifier.StateFeed().Subscribe(stateChannel) - defer stateSub.Unsubscribe() - - wg := &sync.WaitGroup{} - wg.Add(1) go func() { - select { - case stateEvent := <-stateChannel: - if stateEvent.Type == statefeed.Synced { - data, ok := stateEvent.Data.(*statefeed.SyncedData) - require.Equal(t, true, ok, "Event feed data is not type *statefeed.SyncedData") - receivedGenesisTime = data.StartTime - } - case <-s.ctx.Done(): - } - wg.Done() + s.markSynced() }() - s.markSynced(expectedGenesisTime) - if util.WaitTimeout(wg, time.Second*2) { - t.Fatalf("Test should have exited by now, timed out") + select { + case <-s.cfg.InitialSyncComplete: + case <-ctx.Done(): + require.NoError(t, ctx.Err()) // this is an error because it means initial sync complete failed to close } - assert.Equal(t, expectedGenesisTime, receivedGenesisTime) + assert.Equal(t, false, s.Syncing()) } @@ -459,9 +401,7 @@ func TestService_Initialized(t *testing.T) { } func TestService_Synced(t *testing.T) { - s := NewService(context.Background(), &Config{ - StateNotifier: &mock.MockStateNotifier{}, - }) + s := NewService(context.Background(), &Config{}) s.synced.UnSet() assert.Equal(t, false, s.Synced()) s.synced.Set() diff --git a/beacon-chain/sync/metrics.go b/beacon-chain/sync/metrics.go index 5e8a50096b..ed3e8d89ab 100644 --- a/beacon-chain/sync/metrics.go +++ b/beacon-chain/sync/metrics.go @@ -130,7 +130,7 @@ var ( func (s *Service) updateMetrics() { // do not update metrics if genesis time // has not been initialized - if s.cfg.chain.GenesisTime().IsZero() { + if s.cfg.clock.GenesisTime().IsZero() { return } // We update the dynamic subnet topics. @@ -138,8 +138,8 @@ func (s *Service) updateMetrics() { if err != nil { log.WithError(err).Debugf("Could not compute fork digest") } - indices := s.aggregatorSubnetIndices(s.cfg.chain.CurrentSlot()) - syncIndices := cache.SyncSubnetIDs.GetAllSubnets(slots.ToEpoch(s.cfg.chain.CurrentSlot())) + indices := s.aggregatorSubnetIndices(s.cfg.clock.CurrentSlot()) + syncIndices := cache.SyncSubnetIDs.GetAllSubnets(slots.ToEpoch(s.cfg.clock.CurrentSlot())) attTopic := p2p.GossipTypeMapping[reflect.TypeOf(&pb.Attestation{})] syncTopic := p2p.GossipTypeMapping[reflect.TypeOf(&pb.SyncCommitteeMessage{})] attTopic += s.cfg.p2p.Encoding().ProtocolSuffix() diff --git a/beacon-chain/sync/options.go b/beacon-chain/sync/options.go index 8c5a807be7..cd378eb81b 100644 --- a/beacon-chain/sync/options.go +++ b/beacon-chain/sync/options.go @@ -4,7 +4,6 @@ import ( "github.com/prysmaticlabs/prysm/v4/async/event" blockfeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/block" "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/operation" - statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state" "github.com/prysmaticlabs/prysm/v4/beacon-chain/db" "github.com/prysmaticlabs/prysm/v4/beacon-chain/execution" "github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations" @@ -13,6 +12,7 @@ import ( "github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/synccommittee" "github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/voluntaryexits" "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/startup" "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen" ) @@ -88,13 +88,6 @@ func WithInitialSync(initialSync Checker) Option { } } -func WithStateNotifier(stateNotifier statefeed.Notifier) Option { - return func(s *Service) error { - s.cfg.stateNotifier = stateNotifier - return nil - } -} - func WithBlockNotifier(blockNotifier blockfeed.Notifier) Option { return func(s *Service) error { s.cfg.blockNotifier = blockNotifier @@ -136,3 +129,17 @@ func WithExecutionPayloadReconstructor(r execution.ExecutionPayloadReconstructor return nil } } + +func WithClockWaiter(cw startup.ClockWaiter) Option { + return func(s *Service) error { + s.clockWaiter = cw + return nil + } +} + +func WithInitialSyncComplete(c chan struct{}) Option { + return func(s *Service) error { + s.initialSyncComplete = c + return nil + } +} diff --git a/beacon-chain/sync/pending_attestations_queue.go b/beacon-chain/sync/pending_attestations_queue.go index 4a8eee38b0..2b57ab4eed 100644 --- a/beacon-chain/sync/pending_attestations_queue.go +++ b/beacon-chain/sync/pending_attestations_queue.go @@ -46,7 +46,7 @@ func (s *Service) processPendingAtts(ctx context.Context) error { // Before a node processes pending attestations queue, it verifies // the attestations in the queue are still valid. Attestations will // be deleted from the queue if invalid (ie. getting staled from falling too many slots behind). - s.validatePendingAtts(ctx, s.cfg.chain.CurrentSlot()) + s.validatePendingAtts(ctx, s.cfg.clock.CurrentSlot()) s.pendingAttsLock.RLock() roots := make([][32]byte, 0, len(s.blkRootToPendingAtts)) @@ -76,7 +76,7 @@ func (s *Service) processPendingAtts(ctx context.Context) error { } else { // Pending attestation's missing block has not arrived yet. log.WithFields(logrus.Fields{ - "currentSlot": s.cfg.chain.CurrentSlot(), + "currentSlot": s.cfg.clock.CurrentSlot(), "attSlot": attestations[0].Message.Aggregate.Data.Slot, "attCount": len(attestations), "blockRoot": hex.EncodeToString(bytesutil.Trunc(bRoot[:])), diff --git a/beacon-chain/sync/pending_attestations_queue_test.go b/beacon-chain/sync/pending_attestations_queue_test.go index 13208ddec7..3ec67dcc1e 100644 --- a/beacon-chain/sync/pending_attestations_queue_test.go +++ b/beacon-chain/sync/pending_attestations_queue_test.go @@ -16,6 +16,7 @@ import ( "github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations" "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers" p2ptest "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/startup" lruwrpr "github.com/prysmaticlabs/prysm/v4/cache/lru" fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams" "github.com/prysmaticlabs/prysm/v4/config/params" @@ -42,8 +43,9 @@ func TestProcessPendingAtts_NoBlockRequestBlock(t *testing.T) { p1.Peers().SetConnectionState(p2.PeerID(), peers.PeerConnected) p1.Peers().SetChainState(p2.PeerID(), ðpb.Status{}) + chain := &mock.ChainService{Genesis: prysmTime.Now(), FinalizedCheckPoint: ðpb.Checkpoint{}} r := &Service{ - cfg: &config{p2p: p1, beaconDB: db, chain: &mock.ChainService{Genesis: prysmTime.Now(), FinalizedCheckPoint: ðpb.Checkpoint{}}}, + cfg: &config{p2p: p1, beaconDB: db, chain: chain, clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot)}, blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof), chainStarted: abool.New(), } @@ -105,20 +107,22 @@ func TestProcessPendingAtts_HasBlockSaveUnAggregatedAtt(t *testing.T) { require.NoError(t, beaconState.SetGenesisTime(uint64(time.Now().Unix()))) + chain := &mock.ChainService{Genesis: time.Now(), + State: beaconState, + FinalizedCheckPoint: ðpb.Checkpoint{ + Root: aggregateAndProof.Aggregate.Data.BeaconBlockRoot, + Epoch: 0, + }, + } ctx, cancel := context.WithCancel(context.Background()) r := &Service{ ctx: ctx, cfg: &config{ p2p: p1, beaconDB: db, - chain: &mock.ChainService{Genesis: time.Now(), - State: beaconState, - FinalizedCheckPoint: ðpb.Checkpoint{ - Root: aggregateAndProof.Aggregate.Data.BeaconBlockRoot, - Epoch: 0, - }, - }, - attPool: attestations.NewPool(), + chain: chain, + clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot), + attPool: attestations.NewPool(), }, blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof), seenUnAggregatedAttestationCache: lruwrpr.New(10), @@ -147,14 +151,16 @@ func TestProcessPendingAtts_NoBroadcastWithBadSignature(t *testing.T) { p1 := p2ptest.NewTestP2P(t) s, _ := util.DeterministicGenesisState(t, 256) + chain := &mock.ChainService{ + State: s, + Genesis: prysmTime.Now(), FinalizedCheckPoint: ðpb.Checkpoint{Root: make([]byte, 32)}} r := &Service{ cfg: &config{ p2p: p1, beaconDB: db, - chain: &mock.ChainService{ - State: s, - Genesis: prysmTime.Now(), FinalizedCheckPoint: ðpb.Checkpoint{Root: make([]byte, 32)}}, - attPool: attestations.NewPool(), + chain: chain, + clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot), + attPool: attestations.NewPool(), }, blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof), } @@ -224,18 +230,20 @@ func TestProcessPendingAtts_NoBroadcastWithBadSignature(t *testing.T) { require.NoError(t, s.SetGenesisTime(uint64(time.Now().Unix()))) ctx, cancel := context.WithCancel(context.Background()) + chain2 := &mock.ChainService{Genesis: time.Now(), + State: s, + FinalizedCheckPoint: ðpb.Checkpoint{ + Root: aggregateAndProof.Aggregate.Data.BeaconBlockRoot, + Epoch: 0, + }} r = &Service{ ctx: ctx, cfg: &config{ p2p: p1, beaconDB: db, - chain: &mock.ChainService{Genesis: time.Now(), - State: s, - FinalizedCheckPoint: ðpb.Checkpoint{ - Root: aggregateAndProof.Aggregate.Data.BeaconBlockRoot, - Epoch: 0, - }}, - attPool: attestations.NewPool(), + chain: chain2, + clock: startup.NewClock(chain2.Genesis, chain2.ValidatorsRoot), + attPool: attestations.NewPool(), }, blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof), seenUnAggregatedAttestationCache: lruwrpr.New(10), @@ -305,20 +313,22 @@ func TestProcessPendingAtts_HasBlockSaveAggregatedAtt(t *testing.T) { require.NoError(t, beaconState.SetGenesisTime(uint64(time.Now().Unix()))) + chain := &mock.ChainService{Genesis: time.Now(), + DB: db, + State: beaconState, + FinalizedCheckPoint: ðpb.Checkpoint{ + Root: aggregateAndProof.Aggregate.Data.BeaconBlockRoot, + Epoch: 0, + }} ctx, cancel := context.WithCancel(context.Background()) r := &Service{ ctx: ctx, cfg: &config{ p2p: p1, beaconDB: db, - chain: &mock.ChainService{Genesis: time.Now(), - DB: db, - State: beaconState, - FinalizedCheckPoint: ðpb.Checkpoint{ - Root: aggregateAndProof.Aggregate.Data.BeaconBlockRoot, - Epoch: 0, - }}, - attPool: attestations.NewPool(), + chain: chain, + clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot), + attPool: attestations.NewPool(), }, blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof), seenAggregatedAttestationCache: lruwrpr.New(10), diff --git a/beacon-chain/sync/pending_blocks_queue.go b/beacon-chain/sync/pending_blocks_queue.go index 13437b6350..b07fc6ee7b 100644 --- a/beacon-chain/sync/pending_blocks_queue.go +++ b/beacon-chain/sync/pending_blocks_queue.go @@ -37,7 +37,7 @@ func (s *Service) processPendingBlocksQueue() { locker := new(sync.Mutex) async.RunEvery(s.ctx, processPendingBlocksPeriod, func() { // Don't process the pending blocks if genesis time has not been set. The chain is not ready. - if !s.isGenesisTimeSet() { + if !s.chainIsStarted() { return } locker.Lock() @@ -69,7 +69,7 @@ func (s *Service) processPendingBlocks(ctx context.Context) error { for _, slot := range ss { // process the blocks during their respective slot. // otherwise wait for the right slot to process the block. - if slot > s.cfg.chain.CurrentSlot() { + if slot > s.cfg.clock.CurrentSlot() { continue } @@ -445,12 +445,6 @@ func (s *Service) addPendingBlockToCache(b interfaces.ReadOnlySignedBeaconBlock) return nil } -// Returns true if the genesis time has been set in chain service. -// Without the genesis time, the chain does not start. -func (s *Service) isGenesisTimeSet() bool { - return s.cfg.chain.GenesisTime().Unix() != 0 -} - // This converts input string to slot. func cacheKeyToSlot(s string) primitives.Slot { b := []byte(s) diff --git a/beacon-chain/sync/pending_blocks_queue_test.go b/beacon-chain/sync/pending_blocks_queue_test.go index 7e417d5d5e..2fce2b76d9 100644 --- a/beacon-chain/sync/pending_blocks_queue_test.go +++ b/beacon-chain/sync/pending_blocks_queue_test.go @@ -20,6 +20,7 @@ import ( "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers" p2ptest "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing" p2ptypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/types" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/startup" "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen" "github.com/prysmaticlabs/prysm/v4/config/params" "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks" @@ -51,6 +52,7 @@ func TestRegularSyncBeaconBlockSubscriber_ProcessPendingBlocks1(t *testing.T) { Epoch: 0, }, }, + clock: startup.NewClock(time.Unix(0, 0), [32]byte{}), stateGen: stategen.New(db, doublylinkedtree.New()), }, slotToPendingBlocks: gcache.New(time.Second, 2*time.Second), @@ -123,6 +125,7 @@ func TestRegularSyncBeaconBlockSubscriber_OptimisticStatus(t *testing.T) { Epoch: 0, }, }, + clock: startup.NewClock(time.Unix(0, 0), [32]byte{}), stateGen: stategen.New(db, doublylinkedtree.New()), }, slotToPendingBlocks: gcache.New(time.Second, 2*time.Second), @@ -196,6 +199,7 @@ func TestRegularSyncBeaconBlockSubscriber_ExecutionEngineTimesOut(t *testing.T) }, ReceiveBlockMockErr: execution.ErrHTTPTimeout, }, + clock: startup.NewClock(time.Unix(0, 0), [32]byte{}), stateGen: stategen.New(db, fcs), }, slotToPendingBlocks: gcache.New(time.Second, 2*time.Second), @@ -323,6 +327,7 @@ func TestRegularSyncBeaconBlockSubscriber_DoNotReprocessBlock(t *testing.T) { Epoch: 0, }, }, + clock: startup.NewClock(time.Unix(0, 0), [32]byte{}), stateGen: stategen.New(db, doublylinkedtree.New()), }, slotToPendingBlocks: gcache.New(time.Second, 2*time.Second), @@ -391,6 +396,7 @@ func TestRegularSyncBeaconBlockSubscriber_ProcessPendingBlocks_2Chains(t *testin Root: make([]byte, 32), }, }, + clock: startup.NewClock(time.Unix(0, 0), [32]byte{}), stateGen: stategen.New(db, doublylinkedtree.New()), }, slotToPendingBlocks: gcache.New(time.Second, 2*time.Second), @@ -583,18 +589,20 @@ func TestService_BatchRootRequest(t *testing.T) { p1.Connect(p2) assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected") + chain := &mock.ChainService{ + FinalizedCheckPoint: ðpb.Checkpoint{ + Epoch: 1, + Root: make([]byte, 32), + }, + ValidatorsRoot: [32]byte{}, + Genesis: time.Now(), + } r := &Service{ cfg: &config{ p2p: p1, beaconDB: db, - chain: &mock.ChainService{ - FinalizedCheckPoint: ðpb.Checkpoint{ - Epoch: 1, - Root: make([]byte, 32), - }, - ValidatorsRoot: [32]byte{}, - Genesis: time.Now(), - }, + chain: chain, + clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot), }, slotToPendingBlocks: gcache.New(time.Second, 2*time.Second), seenPendingBlocks: make(map[[32]byte]bool), @@ -713,6 +721,7 @@ func TestService_ProcessPendingBlockOnCorrectSlot(t *testing.T) { p2p: p1, beaconDB: db, chain: &mockChain, + clock: startup.NewClock(mockChain.Genesis, mockChain.ValidatorsRoot), stateGen: stategen.New(db, fcs), }, slotToPendingBlocks: gcache.New(time.Second, 2*time.Second), diff --git a/beacon-chain/sync/rpc.go b/beacon-chain/sync/rpc.go index c69c7eaf59..cf95452afe 100644 --- a/beacon-chain/sync/rpc.go +++ b/beacon-chain/sync/rpc.go @@ -33,7 +33,7 @@ type rpcHandler func(context.Context, interface{}, libp2pcore.Stream) error // registerRPCHandlers for p2p RPC. func (s *Service) registerRPCHandlers() { - currEpoch := slots.ToEpoch(s.cfg.chain.CurrentSlot()) + currEpoch := slots.ToEpoch(s.cfg.clock.CurrentSlot()) // Register V2 handlers if we are past altair fork epoch. if currEpoch >= params.BeaconConfig().AltairForkEpoch { s.registerRPC( diff --git a/beacon-chain/sync/rpc_beacon_blocks_by_range.go b/beacon-chain/sync/rpc_beacon_blocks_by_range.go index 61381ef451..a81a077214 100644 --- a/beacon-chain/sync/rpc_beacon_blocks_by_range.go +++ b/beacon-chain/sync/rpc_beacon_blocks_by_range.go @@ -228,7 +228,7 @@ func (s *Service) validateRangeRequest(r *pb.BeaconBlocksByRangeRequest) error { // Add a buffer for possible large range requests from nodes syncing close to the // head of the chain. buffer := rangeLimit * 2 - highestExpectedSlot := s.cfg.chain.CurrentSlot().Add(uint64(buffer)) + highestExpectedSlot := s.cfg.clock.CurrentSlot().Add(uint64(buffer)) // Ensure all request params are within appropriate bounds if count == 0 || count > maxRequestBlocks { diff --git a/beacon-chain/sync/rpc_beacon_blocks_by_range_test.go b/beacon-chain/sync/rpc_beacon_blocks_by_range_test.go index 074034d551..2472308398 100644 --- a/beacon-chain/sync/rpc_beacon_blocks_by_range_test.go +++ b/beacon-chain/sync/rpc_beacon_blocks_by_range_test.go @@ -20,6 +20,7 @@ import ( "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/encoder" p2ptest "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing" p2ptypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/types" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/startup" "github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags" fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams" "github.com/prysmaticlabs/prysm/v4/config/params" @@ -63,7 +64,13 @@ func TestRPCBeaconBlocksByRange_RPCHandlerReturnsBlocks(t *testing.T) { } // Start service with 160 as allowed blocks capacity (and almost zero capacity recovery). - r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}}, rateLimiter: newRateLimiter(p1)} + r := &Service{ + cfg: &config{ + p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}, + clock: startup.NewClock(time.Unix(0, 0), [32]byte{}), + }, + rateLimiter: newRateLimiter(p1), + } pcl := protocol.ID(p2p.RPCBlocksByRangeTopicV1) topic := string(pcl) r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(0.000001, int64(req.Count*10), time.Second, false) @@ -125,7 +132,8 @@ func TestRPCBeaconBlocksByRange_ReturnCorrectNumberBack(t *testing.T) { require.NoError(t, d.SaveGenesisBlockRoot(context.Background(), genRoot)) // Start service with 160 as allowed blocks capacity (and almost zero capacity recovery). - r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}}, rateLimiter: newRateLimiter(p1)} + clock := startup.NewClock(time.Unix(0, 0), [32]byte{}) + r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}, clock: clock}, rateLimiter: newRateLimiter(p1)} pcl := protocol.ID(p2p.RPCBlocksByRangeTopicV1) topic := string(pcl) r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(0.000001, int64(req.Count*10), time.Second, false) @@ -239,6 +247,7 @@ func TestRPCBeaconBlocksByRange_ReconstructsPayloads(t *testing.T) { beaconDB: d, chain: &chainMock.ChainService{}, executionPayloadReconstructor: mockEngine, + clock: startup.NewClock(time.Unix(0, 0), [32]byte{}), }, rateLimiter: newRateLimiter(p1), } @@ -309,7 +318,8 @@ func TestRPCBeaconBlocksByRange_RPCHandlerReturnsSortedBlocks(t *testing.T) { } // Start service with 160 as allowed blocks capacity (and almost zero capacity recovery). - r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}}, rateLimiter: newRateLimiter(p1)} + clock := startup.NewClock(time.Unix(0, 0), [32]byte{}) + r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}, clock: clock}, rateLimiter: newRateLimiter(p1)} pcl := protocol.ID(p2p.RPCBlocksByRangeTopicV1) topic := string(pcl) r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(0.000001, int64(req.Count*10), time.Second, false) @@ -374,7 +384,7 @@ func TestRPCBeaconBlocksByRange_ReturnsGenesisBlock(t *testing.T) { prevRoot = rt } - r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}}, rateLimiter: newRateLimiter(p1)} + r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}, clock: startup.NewClock(time.Unix(0, 0), [32]byte{})}, rateLimiter: newRateLimiter(p1)} pcl := protocol.ID(p2p.RPCBlocksByRangeTopicV1) topic := string(pcl) r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(10000, 10000, time.Second, false) @@ -465,7 +475,8 @@ func TestRPCBeaconBlocksByRange_RPCHandlerRateLimitOverflow(t *testing.T) { assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected") capacity := int64(flags.Get().BlockBatchLimit * 3) - r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}}, rateLimiter: newRateLimiter(p1)} + clock := startup.NewClock(time.Unix(0, 0), [32]byte{}) + r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}, clock: clock}, rateLimiter: newRateLimiter(p1)} pcl := protocol.ID(p2p.RPCBlocksByRangeTopicV1) topic := string(pcl) @@ -491,7 +502,8 @@ func TestRPCBeaconBlocksByRange_RPCHandlerRateLimitOverflow(t *testing.T) { assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected") capacity := int64(flags.Get().BlockBatchLimit * 3) - r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}}, rateLimiter: newRateLimiter(p1)} + clock := startup.NewClock(time.Unix(0, 0), [32]byte{}) + r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}, clock: clock}, rateLimiter: newRateLimiter(p1)} pcl := protocol.ID(p2p.RPCBlocksByRangeTopicV1) topic := string(pcl) @@ -521,7 +533,8 @@ func TestRPCBeaconBlocksByRange_RPCHandlerRateLimitOverflow(t *testing.T) { assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected") capacity := int64(flags.Get().BlockBatchLimit * flags.Get().BlockBatchLimitBurstFactor) - r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}}, rateLimiter: newRateLimiter(p1)} + clock := startup.NewClock(time.Unix(0, 0), [32]byte{}) + r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}, clock: clock}, rateLimiter: newRateLimiter(p1)} pcl := protocol.ID(p2p.RPCBlocksByRangeTopicV1) topic := string(pcl) r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(0.000001, capacity, time.Second, false) @@ -552,11 +565,13 @@ func TestRPCBeaconBlocksByRange_RPCHandlerRateLimitOverflow(t *testing.T) { func TestRPCBeaconBlocksByRange_validateRangeRequest(t *testing.T) { slotsSinceGenesis := primitives.Slot(1000) offset := int64(slotsSinceGenesis.Mul(params.BeaconConfig().SecondsPerSlot)) + chain := &chainMock.ChainService{ + Genesis: time.Now().Add(time.Second * time.Duration(-1*offset)), + } r := &Service{ cfg: &config{ - chain: &chainMock.ChainService{ - Genesis: time.Now().Add(time.Second * time.Duration(-1*offset)), - }, + chain: chain, + clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot), }, } @@ -720,7 +735,8 @@ func TestRPCBeaconBlocksByRange_EnforceResponseInvariants(t *testing.T) { p1.Connect(p2) assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected") - r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}}, rateLimiter: newRateLimiter(p1)} + clock := startup.NewClock(time.Unix(0, 0), [32]byte{}) + r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}, clock: clock}, rateLimiter: newRateLimiter(p1)} r.rateLimiter.limiterMap[string(pcl)] = leakybucket.NewCollector(0.000001, 640, time.Second, false) req := ðpb.BeaconBlocksByRangeRequest{ StartSlot: 448, @@ -888,7 +904,8 @@ func TestRPCBeaconBlocksByRange_FilterBlocks(t *testing.T) { p1.Connect(p2) assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected") - r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}}, rateLimiter: newRateLimiter(p1)} + clock := startup.NewClock(time.Unix(0, 0), [32]byte{}) + r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}, clock: clock}, rateLimiter: newRateLimiter(p1)} r.rateLimiter.limiterMap[string(pcl)] = leakybucket.NewCollector(0.000001, 640, time.Second, false) req := ðpb.BeaconBlocksByRangeRequest{ StartSlot: 1, @@ -919,7 +936,8 @@ func TestRPCBeaconBlocksByRange_FilterBlocks(t *testing.T) { p1.Connect(p2) assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected") - r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}}, rateLimiter: newRateLimiter(p1)} + clock := startup.NewClock(time.Unix(0, 0), [32]byte{}) + r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}, clock: clock}, rateLimiter: newRateLimiter(p1)} r.rateLimiter.limiterMap[string(pcl)] = leakybucket.NewCollector(0.000001, 640, time.Second, false) req := ðpb.BeaconBlocksByRangeRequest{ StartSlot: 1, @@ -954,7 +972,8 @@ func TestRPCBeaconBlocksByRange_FilterBlocks(t *testing.T) { p1.Connect(p2) assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected") - r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}}, rateLimiter: newRateLimiter(p1)} + clock := startup.NewClock(time.Unix(0, 0), [32]byte{}) + r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}, clock: clock}, rateLimiter: newRateLimiter(p1)} r.rateLimiter.limiterMap[string(pcl)] = leakybucket.NewCollector(0.000001, 640, time.Second, false) req := ðpb.BeaconBlocksByRangeRequest{ StartSlot: 1, @@ -989,7 +1008,8 @@ func TestRPCBeaconBlocksByRange_FilterBlocks(t *testing.T) { p1.Connect(p2) assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected") - r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}}, rateLimiter: newRateLimiter(p1)} + clock := startup.NewClock(time.Unix(0, 0), [32]byte{}) + r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}, clock: clock}, rateLimiter: newRateLimiter(p1)} r.rateLimiter.limiterMap[string(pcl)] = leakybucket.NewCollector(0.000001, 640, time.Second, false) req := ðpb.BeaconBlocksByRangeRequest{ StartSlot: 1, @@ -1029,7 +1049,8 @@ func TestRPCBeaconBlocksByRange_FilterBlocks(t *testing.T) { p1.Connect(p2) assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected") - r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}}, rateLimiter: newRateLimiter(p1)} + clock := startup.NewClock(time.Unix(0, 0), [32]byte{}) + r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}, clock: clock}, rateLimiter: newRateLimiter(p1)} r.rateLimiter.limiterMap[string(pcl)] = leakybucket.NewCollector(0.000001, 640, time.Second, false) req := ðpb.BeaconBlocksByRangeRequest{ StartSlot: 1, diff --git a/beacon-chain/sync/rpc_beacon_blocks_by_root.go b/beacon-chain/sync/rpc_beacon_blocks_by_root.go index a7a5822188..1eaa461635 100644 --- a/beacon-chain/sync/rpc_beacon_blocks_by_root.go +++ b/beacon-chain/sync/rpc_beacon_blocks_by_root.go @@ -19,7 +19,7 @@ func (s *Service) sendRecentBeaconBlocksRequest(ctx context.Context, blockRoots ctx, cancel := context.WithTimeout(ctx, respTimeout) defer cancel() - _, err := SendBeaconBlocksByRootRequest(ctx, s.cfg.chain, s.cfg.p2p, id, blockRoots, func(blk interfaces.ReadOnlySignedBeaconBlock) error { + _, err := SendBeaconBlocksByRootRequest(ctx, s.cfg.clock, s.cfg.p2p, id, blockRoots, func(blk interfaces.ReadOnlySignedBeaconBlock) error { blkRoot, err := blk.Block().HashTreeRoot() if err != nil { return err diff --git a/beacon-chain/sync/rpc_beacon_blocks_by_root_test.go b/beacon-chain/sync/rpc_beacon_blocks_by_root_test.go index b6b45732fa..263f44cfaf 100644 --- a/beacon-chain/sync/rpc_beacon_blocks_by_root_test.go +++ b/beacon-chain/sync/rpc_beacon_blocks_by_root_test.go @@ -19,6 +19,7 @@ import ( "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p" p2ptest "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing" p2pTypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/types" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/startup" fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams" "github.com/prysmaticlabs/prysm/v4/config/params" "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks" @@ -50,7 +51,7 @@ func TestRecentBeaconBlocksRPCHandler_ReturnsBlocks(t *testing.T) { blkRoots = append(blkRoots, root) } - r := &Service{cfg: &config{p2p: p1, beaconDB: d}, rateLimiter: newRateLimiter(p1)} + r := &Service{cfg: &config{p2p: p1, beaconDB: d, clock: startup.NewClock(time.Unix(0, 0), [32]byte{})}, rateLimiter: newRateLimiter(p1)} r.cfg.chain = &mock.ChainService{ValidatorsRoot: [32]byte{}} pcl := protocol.ID(p2p.RPCBlocksByRootTopicV1) topic := string(pcl) @@ -148,8 +149,9 @@ func TestRecentBeaconBlocksRPCHandler_ReturnsBlocks_ReconstructsPayload(t *testi p2p: p1, beaconDB: d, executionPayloadReconstructor: mockEngine, + chain: &mock.ChainService{ValidatorsRoot: [32]byte{}}, + clock: startup.NewClock(time.Unix(0, 0), [32]byte{}), }, rateLimiter: newRateLimiter(p1)} - r.cfg.chain = &mock.ChainService{ValidatorsRoot: [32]byte{}} pcl := protocol.ID(p2p.RPCBlocksByRootTopicV1) topic := string(pcl) r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(10000, 10000, time.Second, false) @@ -204,16 +206,18 @@ func TestRecentBeaconBlocks_RPCRequestSent(t *testing.T) { expectedRoots := p2pTypes.BeaconBlockByRootsReq{blockBRoot, blockARoot} + chain := &mock.ChainService{ + State: genesisState, + FinalizedCheckPoint: finalizedCheckpt, + Root: blockARoot[:], + Genesis: time.Now(), + ValidatorsRoot: [32]byte{}, + } r := &Service{ cfg: &config{ - p2p: p1, - chain: &mock.ChainService{ - State: genesisState, - FinalizedCheckPoint: finalizedCheckpt, - Root: blockARoot[:], - Genesis: time.Now(), - ValidatorsRoot: [32]byte{}, - }, + p2p: p1, + chain: chain, + clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot), }, slotToPendingBlocks: gcache.New(time.Second, 2*time.Second), seenPendingBlocks: make(map[[32]byte]bool), diff --git a/beacon-chain/sync/rpc_chunked_response.go b/beacon-chain/sync/rpc_chunked_response.go index 51051bfdb6..ed18224a59 100644 --- a/beacon-chain/sync/rpc_chunked_response.go +++ b/beacon-chain/sync/rpc_chunked_response.go @@ -20,42 +20,48 @@ import ( // response_chunk ::= | | | func (s *Service) chunkBlockWriter(stream libp2pcore.Stream, blk interfaces.ReadOnlySignedBeaconBlock) error { SetStreamWriteDeadline(stream, defaultWriteDuration) - return WriteBlockChunk(stream, s.cfg.chain, s.cfg.p2p.Encoding(), blk) + return WriteBlockChunk(stream, s.cfg.clock, s.cfg.p2p.Encoding(), blk) } // WriteBlockChunk writes block chunk object to stream. // response_chunk ::= | | | -func WriteBlockChunk(stream libp2pcore.Stream, chain blockchain.ChainInfoFetcher, encoding encoder.NetworkEncoding, blk interfaces.ReadOnlySignedBeaconBlock) error { +func WriteBlockChunk(stream libp2pcore.Stream, tor blockchain.TemporalOracle, encoding encoder.NetworkEncoding, blk interfaces.ReadOnlySignedBeaconBlock) error { if _, err := stream.Write([]byte{responseCodeSuccess}); err != nil { return err } var obtainedCtx []byte + valRoot := tor.GenesisValidatorsRoot() switch blk.Version() { case version.Phase0: - valRoot := chain.GenesisValidatorsRoot() digest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().GenesisEpoch, valRoot[:]) if err != nil { return err } obtainedCtx = digest[:] case version.Altair: - valRoot := chain.GenesisValidatorsRoot() digest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().AltairForkEpoch, valRoot[:]) if err != nil { return err } obtainedCtx = digest[:] case version.Bellatrix: - valRoot := chain.GenesisValidatorsRoot() digest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().BellatrixForkEpoch, valRoot[:]) if err != nil { return err } obtainedCtx = digest[:] + case version.Capella: + digest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().CapellaForkEpoch, valRoot[:]) + if err != nil { + return err + } + obtainedCtx = digest[:] + default: + return errors.Wrapf(ErrUnrecognizedVersion, "block version %d is not recognized", blk.Version()) } - if err := writeContextToStream(obtainedCtx, stream, chain); err != nil { + if err := writeContextToStream(obtainedCtx, stream); err != nil { return err } _, err := encoding.EncodeWithMaxLength(stream, blk) @@ -64,18 +70,18 @@ func WriteBlockChunk(stream libp2pcore.Stream, chain blockchain.ChainInfoFetcher // ReadChunkedBlock handles each response chunk that is sent by the // peer and converts it into a beacon block. -func ReadChunkedBlock(stream libp2pcore.Stream, chain blockchain.ForkFetcher, p2p p2p.EncodingProvider, isFirstChunk bool) (interfaces.ReadOnlySignedBeaconBlock, error) { +func ReadChunkedBlock(stream libp2pcore.Stream, tor blockchain.TemporalOracle, p2p p2p.EncodingProvider, isFirstChunk bool) (interfaces.ReadOnlySignedBeaconBlock, error) { // Handle deadlines differently for first chunk if isFirstChunk { - return readFirstChunkedBlock(stream, chain, p2p) + return readFirstChunkedBlock(stream, tor, p2p) } - return readResponseChunk(stream, chain, p2p) + return readResponseChunk(stream, tor, p2p) } // readFirstChunkedBlock reads the first chunked block and applies the appropriate deadlines to // it. -func readFirstChunkedBlock(stream libp2pcore.Stream, chain blockchain.ForkFetcher, p2p p2p.EncodingProvider) (interfaces.ReadOnlySignedBeaconBlock, error) { +func readFirstChunkedBlock(stream libp2pcore.Stream, tor blockchain.TemporalOracle, p2p p2p.EncodingProvider) (interfaces.ReadOnlySignedBeaconBlock, error) { code, errMsg, err := ReadStatusCode(stream, p2p.Encoding()) if err != nil { return nil, err @@ -83,11 +89,11 @@ func readFirstChunkedBlock(stream libp2pcore.Stream, chain blockchain.ForkFetche if code != 0 { return nil, errors.New(errMsg) } - rpcCtx, err := readContextFromStream(stream, chain) + rpcCtx, err := readContextFromStream(stream) if err != nil { return nil, err } - blk, err := extractBlockDataType(rpcCtx, chain) + blk, err := extractBlockDataType(rpcCtx, tor) if err != nil { return nil, err } @@ -97,7 +103,7 @@ func readFirstChunkedBlock(stream libp2pcore.Stream, chain blockchain.ForkFetche // readResponseChunk reads the response from the stream and decodes it into the // provided message type. -func readResponseChunk(stream libp2pcore.Stream, chain blockchain.ForkFetcher, p2p p2p.EncodingProvider) (interfaces.ReadOnlySignedBeaconBlock, error) { +func readResponseChunk(stream libp2pcore.Stream, tor blockchain.TemporalOracle, p2p p2p.EncodingProvider) (interfaces.ReadOnlySignedBeaconBlock, error) { SetStreamReadDeadline(stream, respTimeout) code, errMsg, err := readStatusCodeNoDeadline(stream, p2p.Encoding()) if err != nil { @@ -107,11 +113,11 @@ func readResponseChunk(stream libp2pcore.Stream, chain blockchain.ForkFetcher, p return nil, errors.New(errMsg) } // No-op for now with the rpc context. - rpcCtx, err := readContextFromStream(stream, chain) + rpcCtx, err := readContextFromStream(stream) if err != nil { return nil, err } - blk, err := extractBlockDataType(rpcCtx, chain) + blk, err := extractBlockDataType(rpcCtx, tor) if err != nil { return nil, err } @@ -119,7 +125,7 @@ func readResponseChunk(stream libp2pcore.Stream, chain blockchain.ForkFetcher, p return blk, err } -func extractBlockDataType(digest []byte, chain blockchain.ForkFetcher) (interfaces.ReadOnlySignedBeaconBlock, error) { +func extractBlockDataType(digest []byte, tor blockchain.TemporalOracle) (interfaces.ReadOnlySignedBeaconBlock, error) { if len(digest) == 0 { bFunc, ok := types.BlockMap[bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion)] if !ok { @@ -130,7 +136,7 @@ func extractBlockDataType(digest []byte, chain blockchain.ForkFetcher) (interfac if len(digest) != forkDigestLength { return nil, errors.Errorf("invalid digest returned, wanted a length of %d but received %d", forkDigestLength, len(digest)) } - vRoot := chain.GenesisValidatorsRoot() + vRoot := tor.GenesisValidatorsRoot() for k, blkFunc := range types.BlockMap { rDigest, err := signing.ComputeForkDigest(k[:], vRoot[:]) if err != nil { @@ -140,5 +146,5 @@ func extractBlockDataType(digest []byte, chain blockchain.ForkFetcher) (interfac return blkFunc() } } - return nil, errors.New("no valid digest matched") + return nil, errors.Wrapf(ErrNoValidDigest, "could not extract block data type, saw digest=%#x, genesis=%v, vr=%#x", digest, tor.GenesisTime(), tor.GenesisValidatorsRoot()) } diff --git a/beacon-chain/sync/rpc_goodbye.go b/beacon-chain/sync/rpc_goodbye.go index 7125f02c75..3737505ed2 100644 --- a/beacon-chain/sync/rpc_goodbye.go +++ b/beacon-chain/sync/rpc_goodbye.go @@ -96,7 +96,7 @@ func (s *Service) sendGoodByeMessage(ctx context.Context, code p2ptypes.RPCGoodb ctx, cancel := context.WithTimeout(ctx, respTimeout) defer cancel() - topic, err := p2p.TopicFromMessage(p2p.GoodbyeMessageName, slots.ToEpoch(s.cfg.chain.CurrentSlot())) + topic, err := p2p.TopicFromMessage(p2p.GoodbyeMessageName, slots.ToEpoch(s.cfg.clock.CurrentSlot())) if err != nil { return err } diff --git a/beacon-chain/sync/rpc_goodbye_test.go b/beacon-chain/sync/rpc_goodbye_test.go index ef464971b5..866f399d9d 100644 --- a/beacon-chain/sync/rpc_goodbye_test.go +++ b/beacon-chain/sync/rpc_goodbye_test.go @@ -12,6 +12,7 @@ import ( db "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing" p2ptest "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing" p2ptypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/types" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/startup" "github.com/prysmaticlabs/prysm/v4/config/params" "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives" leakybucket "github.com/prysmaticlabs/prysm/v4/container/leaky-bucket" @@ -153,11 +154,13 @@ func TestSendGoodbye_SendsMessage(t *testing.T) { // Set up a head state in the database with data we expect. d := db.SetupDB(t) + chain := &mock.ChainService{ValidatorsRoot: [32]byte{}, Genesis: time.Now()} r := &Service{ cfg: &config{ beaconDB: d, p2p: p1, - chain: &mock.ChainService{ValidatorsRoot: [32]byte{}, Genesis: time.Now()}, + chain: chain, + clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot), }, rateLimiter: newRateLimiter(p1), } @@ -198,11 +201,13 @@ func TestSendGoodbye_DisconnectWithPeer(t *testing.T) { // Set up a head state in the database with data we expect. d := db.SetupDB(t) + chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}} r := &Service{ cfg: &config{ beaconDB: d, p2p: p1, - chain: &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}, + chain: chain, + clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot), }, rateLimiter: newRateLimiter(p1), } diff --git a/beacon-chain/sync/rpc_metadata.go b/beacon-chain/sync/rpc_metadata.go index 01954d19c0..e7278cf258 100644 --- a/beacon-chain/sync/rpc_metadata.go +++ b/beacon-chain/sync/rpc_metadata.go @@ -89,7 +89,7 @@ func (s *Service) sendMetaDataRequest(ctx context.Context, id peer.ID) (metadata ctx, cancel := context.WithTimeout(ctx, respTimeout) defer cancel() - topic, err := p2p.TopicFromMessage(p2p.MetadataMessageName, slots.ToEpoch(s.cfg.chain.CurrentSlot())) + topic, err := p2p.TopicFromMessage(p2p.MetadataMessageName, slots.ToEpoch(s.cfg.clock.CurrentSlot())) if err != nil { return nil, err } @@ -107,12 +107,12 @@ func (s *Service) sendMetaDataRequest(ctx context.Context, id peer.ID) (metadata s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer()) return nil, errors.New(errMsg) } - valRoot := s.cfg.chain.GenesisValidatorsRoot() - rpcCtx, err := forks.ForkDigestFromEpoch(slots.ToEpoch(s.cfg.chain.CurrentSlot()), valRoot[:]) + valRoot := s.cfg.clock.GenesisValidatorsRoot() + rpcCtx, err := forks.ForkDigestFromEpoch(slots.ToEpoch(s.cfg.clock.CurrentSlot()), valRoot[:]) if err != nil { return nil, err } - msg, err := extractMetaDataType(rpcCtx[:], s.cfg.chain) + msg, err := extractMetaDataType(rpcCtx[:], s.cfg.clock) if err != nil { return nil, err } @@ -134,7 +134,7 @@ func (s *Service) sendMetaDataRequest(ctx context.Context, id peer.ID) (metadata return msg, nil } -func extractMetaDataType(digest []byte, chain blockchain.ChainInfoFetcher) (metadata.Metadata, error) { +func extractMetaDataType(digest []byte, tor blockchain.TemporalOracle) (metadata.Metadata, error) { if len(digest) == 0 { mdFunc, ok := types.MetaDataMap[bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion)] if !ok { @@ -145,7 +145,7 @@ func extractMetaDataType(digest []byte, chain blockchain.ChainInfoFetcher) (meta if len(digest) != forkDigestLength { return nil, errors.Errorf("invalid digest returned, wanted a length of %d but received %d", forkDigestLength, len(digest)) } - vRoot := chain.GenesisValidatorsRoot() + vRoot := tor.GenesisValidatorsRoot() for k, mdFunc := range types.MetaDataMap { rDigest, err := signing.ComputeForkDigest(k[:], vRoot[:]) if err != nil { @@ -155,5 +155,5 @@ func extractMetaDataType(digest []byte, chain blockchain.ChainInfoFetcher) (meta return mdFunc(), nil } } - return nil, errors.New("no valid digest matched") + return nil, errors.Wrapf(ErrNoValidDigest, "could not extract metadata type, saw digest=%#x, genesis=%v, vr=%#x", digest, tor.GenesisTime(), tor.GenesisValidatorsRoot()) } diff --git a/beacon-chain/sync/rpc_metadata_test.go b/beacon-chain/sync/rpc_metadata_test.go index 54e3945b9a..e0edf3bdf0 100644 --- a/beacon-chain/sync/rpc_metadata_test.go +++ b/beacon-chain/sync/rpc_metadata_test.go @@ -15,6 +15,7 @@ import ( db "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing" "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p" p2ptest "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/startup" "github.com/prysmaticlabs/prysm/v4/config/params" "github.com/prysmaticlabs/prysm/v4/consensus-types/wrapper" leakybucket "github.com/prysmaticlabs/prysm/v4/container/leaky-bucket" @@ -90,12 +91,14 @@ func TestMetadataRPCHandler_SendsMetadata(t *testing.T) { }) // Set up a head state in the database with data we expect. + chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}} d := db.SetupDB(t) r := &Service{ cfg: &config{ beaconDB: d, p2p: p1, - chain: &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}, + chain: chain, + clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot), }, rateLimiter: newRateLimiter(p1), } @@ -158,20 +161,24 @@ func TestMetadataRPCHandler_SendsMetadataAltair(t *testing.T) { // Set up a head state in the database with data we expect. d := db.SetupDB(t) + chain := &mock.ChainService{Genesis: time.Now().Add(-5 * oneEpoch()), ValidatorsRoot: [32]byte{}} r := &Service{ cfg: &config{ beaconDB: d, p2p: p1, - chain: &mock.ChainService{Genesis: time.Now().Add(-5 * oneEpoch()), ValidatorsRoot: [32]byte{}}, + chain: chain, + clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot), }, rateLimiter: newRateLimiter(p1), } + chain2 := &mock.ChainService{Genesis: time.Now().Add(-5 * oneEpoch()), ValidatorsRoot: [32]byte{}} r2 := &Service{ cfg: &config{ beaconDB: d, p2p: p2, - chain: &mock.ChainService{Genesis: time.Now().Add(-5 * oneEpoch()), ValidatorsRoot: [32]byte{}}, + chain: chain2, + clock: startup.NewClock(chain2.Genesis, chain2.ValidatorsRoot), }, rateLimiter: newRateLimiter(p2), } @@ -236,7 +243,7 @@ func TestExtractMetaDataType(t *testing.T) { type args struct { digest []byte - chain blockchain.ChainInfoFetcher + clock blockchain.TemporalOracle } tests := []struct { name string @@ -248,7 +255,7 @@ func TestExtractMetaDataType(t *testing.T) { name: "no digest", args: args{ digest: []byte{}, - chain: &mock.ChainService{ValidatorsRoot: [32]byte{}}, + clock: startup.NewClock(time.Now(), [32]byte{}), }, want: wrapper.WrappedMetadataV0(&pb.MetaDataV0{}), wantErr: false, @@ -257,7 +264,7 @@ func TestExtractMetaDataType(t *testing.T) { name: "invalid digest", args: args{ digest: []byte{0x00, 0x01}, - chain: &mock.ChainService{ValidatorsRoot: [32]byte{}}, + clock: startup.NewClock(time.Now(), [32]byte{}), }, want: nil, wantErr: true, @@ -266,7 +273,7 @@ func TestExtractMetaDataType(t *testing.T) { name: "non existent digest", args: args{ digest: []byte{0x00, 0x01, 0x02, 0x03}, - chain: &mock.ChainService{ValidatorsRoot: [32]byte{}}, + clock: startup.NewClock(time.Now(), [32]byte{}), }, want: nil, wantErr: true, @@ -275,7 +282,7 @@ func TestExtractMetaDataType(t *testing.T) { name: "genesis fork version", args: args{ digest: genDigest[:], - chain: &mock.ChainService{ValidatorsRoot: [32]byte{}}, + clock: startup.NewClock(time.Now(), [32]byte{}), }, want: wrapper.WrappedMetadataV0(&pb.MetaDataV0{}), wantErr: false, @@ -284,7 +291,7 @@ func TestExtractMetaDataType(t *testing.T) { name: "altair fork version", args: args{ digest: altairDigest[:], - chain: &mock.ChainService{ValidatorsRoot: [32]byte{}}, + clock: startup.NewClock(time.Now(), [32]byte{}), }, want: wrapper.WrappedMetadataV1(&pb.MetaDataV1{}), wantErr: false, @@ -292,7 +299,7 @@ func TestExtractMetaDataType(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := extractMetaDataType(tt.args.digest, tt.args.chain) + got, err := extractMetaDataType(tt.args.digest, tt.args.clock) if (err != nil) != tt.wantErr { t.Errorf("extractMetaDataType() error = %v, wantErr %v", err, tt.wantErr) return diff --git a/beacon-chain/sync/rpc_ping.go b/beacon-chain/sync/rpc_ping.go index 2044acf228..87af6cdc72 100644 --- a/beacon-chain/sync/rpc_ping.go +++ b/beacon-chain/sync/rpc_ping.go @@ -78,7 +78,7 @@ func (s *Service) sendPingRequest(ctx context.Context, id peer.ID) error { defer cancel() metadataSeq := primitives.SSZUint64(s.cfg.p2p.MetadataSeq()) - topic, err := p2p.TopicFromMessage(p2p.PingMessageName, slots.ToEpoch(s.cfg.chain.CurrentSlot())) + topic, err := p2p.TopicFromMessage(p2p.PingMessageName, slots.ToEpoch(s.cfg.clock.CurrentSlot())) if err != nil { return err } diff --git a/beacon-chain/sync/rpc_ping_test.go b/beacon-chain/sync/rpc_ping_test.go index fa5bdac452..7c7fbd86c8 100644 --- a/beacon-chain/sync/rpc_ping_test.go +++ b/beacon-chain/sync/rpc_ping_test.go @@ -14,6 +14,7 @@ import ( "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p" p2ptest "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing" p2ptypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/types" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/startup" "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives" "github.com/prysmaticlabs/prysm/v4/consensus-types/wrapper" leakybucket "github.com/prysmaticlabs/prysm/v4/container/leaky-bucket" @@ -97,11 +98,13 @@ func TestPingRPCHandler_SendsPing(t *testing.T) { // Set up a head state in the database with data we expect. d := db.SetupDB(t) + chain := &mock.ChainService{ValidatorsRoot: [32]byte{}, Genesis: time.Now()} r := &Service{ cfg: &config{ beaconDB: d, p2p: p1, - chain: &mock.ChainService{ValidatorsRoot: [32]byte{}, Genesis: time.Now()}, + chain: chain, + clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot), }, rateLimiter: newRateLimiter(p1), } @@ -112,11 +115,13 @@ func TestPingRPCHandler_SendsPing(t *testing.T) { p2.Peers().Add(new(enr.Record), p1.BHost.ID(), p1.BHost.Addrs()[0], network.DirUnknown) p2.Peers().SetMetadata(p1.BHost.ID(), p1.LocalMetadata) + chain2 := &mock.ChainService{ValidatorsRoot: [32]byte{}, Genesis: time.Now()} r2 := &Service{ cfg: &config{ beaconDB: d, p2p: p2, - chain: &mock.ChainService{ValidatorsRoot: [32]byte{}, Genesis: time.Now()}, + chain: chain2, + clock: startup.NewClock(chain2.Genesis, chain.ValidatorsRoot), }, rateLimiter: newRateLimiter(p2), } diff --git a/beacon-chain/sync/rpc_send_request.go b/beacon-chain/sync/rpc_send_request.go index 3361a8fb4e..df77bea9ff 100644 --- a/beacon-chain/sync/rpc_send_request.go +++ b/beacon-chain/sync/rpc_send_request.go @@ -25,10 +25,10 @@ type BeaconBlockProcessor func(block interfaces.ReadOnlySignedBeaconBlock) error // SendBeaconBlocksByRangeRequest sends BeaconBlocksByRange and returns fetched blocks, if any. func SendBeaconBlocksByRangeRequest( - ctx context.Context, chain blockchain.ForkFetcher, p2pProvider p2p.SenderEncoder, pid peer.ID, + ctx context.Context, tor blockchain.TemporalOracle, p2pProvider p2p.SenderEncoder, pid peer.ID, req *pb.BeaconBlocksByRangeRequest, blockProcessor BeaconBlockProcessor, ) ([]interfaces.ReadOnlySignedBeaconBlock, error) { - topic, err := p2p.TopicFromMessage(p2p.BeaconBlocksByRangeMessageName, slots.ToEpoch(chain.CurrentSlot())) + topic, err := p2p.TopicFromMessage(p2p.BeaconBlocksByRangeMessageName, slots.ToEpoch(tor.CurrentSlot())) if err != nil { return nil, err } @@ -50,7 +50,7 @@ func SendBeaconBlocksByRangeRequest( var prevSlot primitives.Slot for i := uint64(0); ; i++ { isFirstChunk := i == 0 - blk, err := ReadChunkedBlock(stream, chain, p2pProvider, isFirstChunk) + blk, err := ReadChunkedBlock(stream, tor, p2pProvider, isFirstChunk) if errors.Is(err, io.EOF) { break } @@ -87,10 +87,10 @@ func SendBeaconBlocksByRangeRequest( // SendBeaconBlocksByRootRequest sends BeaconBlocksByRoot and returns fetched blocks, if any. func SendBeaconBlocksByRootRequest( - ctx context.Context, chain blockchain.ChainInfoFetcher, p2pProvider p2p.P2P, pid peer.ID, + ctx context.Context, clock blockchain.TemporalOracle, p2pProvider p2p.P2P, pid peer.ID, req *p2ptypes.BeaconBlockByRootsReq, blockProcessor BeaconBlockProcessor, ) ([]interfaces.ReadOnlySignedBeaconBlock, error) { - topic, err := p2p.TopicFromMessage(p2p.BeaconBlocksByRootsMessageName, slots.ToEpoch(chain.CurrentSlot())) + topic, err := p2p.TopicFromMessage(p2p.BeaconBlocksByRootsMessageName, slots.ToEpoch(clock.CurrentSlot())) if err != nil { return nil, err } @@ -115,7 +115,7 @@ func SendBeaconBlocksByRootRequest( break } isFirstChunk := i == 0 - blk, err := ReadChunkedBlock(stream, chain, p2pProvider, isFirstChunk) + blk, err := ReadChunkedBlock(stream, clock, p2pProvider, isFirstChunk) if errors.Is(err, io.EOF) { break } diff --git a/beacon-chain/sync/rpc_send_request_test.go b/beacon-chain/sync/rpc_send_request_test.go index 3fad05de8c..c086514536 100644 --- a/beacon-chain/sync/rpc_send_request_test.go +++ b/beacon-chain/sync/rpc_send_request_test.go @@ -9,10 +9,10 @@ import ( "time" "github.com/libp2p/go-libp2p/core/network" - mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing" "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p" p2ptest "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing" p2pTypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/types" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/startup" "github.com/prysmaticlabs/prysm/v4/config/params" "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks" "github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces" @@ -35,8 +35,7 @@ func TestSendRequest_SendBeaconBlocksByRangeRequest(t *testing.T) { p1.Connect(bogusPeer) req := ðpb.BeaconBlocksByRangeRequest{} - chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}} - _, err := SendBeaconBlocksByRangeRequest(ctx, chain, p1, bogusPeer.PeerID(), req, nil) + _, err := SendBeaconBlocksByRangeRequest(ctx, startup.NewClock(time.Now(), [32]byte{}), p1, bogusPeer.PeerID(), req, nil) assert.ErrorContains(t, "protocols not supported", err) }) @@ -83,10 +82,9 @@ func TestSendRequest_SendBeaconBlocksByRangeRequest(t *testing.T) { if uint64(i) >= uint64(len(knownBlocks)) { break } - chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}} wsb, err := blocks.NewSignedBeaconBlock(knownBlocks[i]) require.NoError(t, err) - err = WriteBlockChunk(stream, chain, p2pProvider.Encoding(), wsb) + err = WriteBlockChunk(stream, startup.NewClock(time.Now(), [32]byte{}), p2pProvider.Encoding(), wsb) if err != nil && err.Error() != network.ErrReset.Error() { require.NoError(t, err) } @@ -105,8 +103,7 @@ func TestSendRequest_SendBeaconBlocksByRangeRequest(t *testing.T) { Count: 128, Step: 1, } - chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}} - blocks, err := SendBeaconBlocksByRangeRequest(ctx, chain, p1, p2.PeerID(), req, nil) + blocks, err := SendBeaconBlocksByRangeRequest(ctx, startup.NewClock(time.Now(), [32]byte{}), p1, p2.PeerID(), req, nil) assert.NoError(t, err) assert.Equal(t, 128, len(blocks)) }) @@ -124,8 +121,7 @@ func TestSendRequest_SendBeaconBlocksByRangeRequest(t *testing.T) { Step: 1, } blocksFromProcessor := make([]interfaces.ReadOnlySignedBeaconBlock, 0) - chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}} - blocks, err := SendBeaconBlocksByRangeRequest(ctx, chain, p1, p2.PeerID(), req, func(block interfaces.ReadOnlySignedBeaconBlock) error { + blocks, err := SendBeaconBlocksByRangeRequest(ctx, startup.NewClock(time.Now(), [32]byte{}), p1, p2.PeerID(), req, func(block interfaces.ReadOnlySignedBeaconBlock) error { blocksFromProcessor = append(blocksFromProcessor, block) return nil }) @@ -147,8 +143,7 @@ func TestSendRequest_SendBeaconBlocksByRangeRequest(t *testing.T) { Step: 1, } errFromProcessor := errors.New("processor error") - chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}} - _, err := SendBeaconBlocksByRangeRequest(ctx, chain, p1, p2.PeerID(), req, func(block interfaces.ReadOnlySignedBeaconBlock) error { + _, err := SendBeaconBlocksByRangeRequest(ctx, startup.NewClock(time.Now(), [32]byte{}), p1, p2.PeerID(), req, func(block interfaces.ReadOnlySignedBeaconBlock) error { return errFromProcessor }) assert.ErrorContains(t, errFromProcessor.Error(), err) @@ -166,8 +161,7 @@ func TestSendRequest_SendBeaconBlocksByRangeRequest(t *testing.T) { Count: 128, Step: 1, } - chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}} - blocks, err := SendBeaconBlocksByRangeRequest(ctx, chain, p1, p2.PeerID(), req, nil) + blocks, err := SendBeaconBlocksByRangeRequest(ctx, startup.NewClock(time.Now(), [32]byte{}), p1, p2.PeerID(), req, nil) assert.NoError(t, err) assert.Equal(t, 128, len(blocks)) @@ -178,7 +172,7 @@ func TestSendRequest_SendBeaconBlocksByRangeRequest(t *testing.T) { cfg.MaxRequestBlocks = maxRequestBlocks params.OverrideBeaconNetworkConfig(cfg) }() - blocks, err = SendBeaconBlocksByRangeRequest(ctx, chain, p1, p2.PeerID(), req, func(block interfaces.ReadOnlySignedBeaconBlock) error { + blocks, err = SendBeaconBlocksByRangeRequest(ctx, startup.NewClock(time.Now(), [32]byte{}), p1, p2.PeerID(), req, func(block interfaces.ReadOnlySignedBeaconBlock) error { // Since ssz checks the boundaries, and doesn't normally allow to send requests bigger than // the max request size, we are updating max request size dynamically. Even when updated dynamically, // no more than max request size of blocks is expected on return. @@ -209,8 +203,7 @@ func TestSendRequest_SendBeaconBlocksByRangeRequest(t *testing.T) { Count: 128, Step: 1, } - chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}} - blocks, err := SendBeaconBlocksByRangeRequest(ctx, chain, p1, p2.PeerID(), req, nil) + blocks, err := SendBeaconBlocksByRangeRequest(ctx, startup.NewClock(time.Now(), [32]byte{}), p1, p2.PeerID(), req, nil) assert.ErrorContains(t, expectedErr.Error(), err) assert.Equal(t, 0, len(blocks)) }) @@ -238,9 +231,8 @@ func TestSendRequest_SendBeaconBlocksByRangeRequest(t *testing.T) { if uint64(i) >= uint64(len(knownBlocks)) { break } - chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}} wsb, err := blocks.NewSignedBeaconBlock(knownBlocks[i]) - err = WriteBlockChunk(stream, chain, p2.Encoding(), wsb) + err = WriteBlockChunk(stream, startup.NewClock(time.Now(), [32]byte{}), p2.Encoding(), wsb) if err != nil && err.Error() != network.ErrReset.Error() { require.NoError(t, err) } @@ -252,8 +244,7 @@ func TestSendRequest_SendBeaconBlocksByRangeRequest(t *testing.T) { Count: 128, Step: 1, } - chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}} - blocks, err := SendBeaconBlocksByRangeRequest(ctx, chain, p1, p2.PeerID(), req, nil) + blocks, err := SendBeaconBlocksByRangeRequest(ctx, startup.NewClock(time.Now(), [32]byte{}), p1, p2.PeerID(), req, nil) assert.ErrorContains(t, ErrInvalidFetchedData.Error(), err) assert.Equal(t, 0, len(blocks)) @@ -282,10 +273,9 @@ func TestSendRequest_SendBeaconBlocksByRangeRequest(t *testing.T) { if uint64(i) >= uint64(len(knownBlocks)) { break } - chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}} wsb, err := blocks.NewSignedBeaconBlock(knownBlocks[i]) require.NoError(t, err) - err = WriteBlockChunk(stream, chain, p2.Encoding(), wsb) + err = WriteBlockChunk(stream, startup.NewClock(time.Now(), [32]byte{}), p2.Encoding(), wsb) if err != nil && err.Error() != network.ErrReset.Error() { require.NoError(t, err) } @@ -297,8 +287,7 @@ func TestSendRequest_SendBeaconBlocksByRangeRequest(t *testing.T) { Count: 128, Step: 10, } - chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}} - blocks, err := SendBeaconBlocksByRangeRequest(ctx, chain, p1, p2.PeerID(), req, nil) + blocks, err := SendBeaconBlocksByRangeRequest(ctx, startup.NewClock(time.Now(), [32]byte{}), p1, p2.PeerID(), req, nil) assert.ErrorContains(t, ErrInvalidFetchedData.Error(), err) assert.Equal(t, 0, len(blocks)) @@ -327,8 +316,7 @@ func TestSendRequest_SendBeaconBlocksByRootRequest(t *testing.T) { p1.Connect(bogusPeer) req := &p2pTypes.BeaconBlockByRootsReq{} - chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}} - _, err := SendBeaconBlocksByRootRequest(ctx, chain, p1, bogusPeer.PeerID(), req, nil) + _, err := SendBeaconBlocksByRootRequest(ctx, startup.NewClock(time.Now(), [32]byte{}), p1, bogusPeer.PeerID(), req, nil) assert.ErrorContains(t, "protocols not supported", err) }) @@ -377,8 +365,7 @@ func TestSendRequest_SendBeaconBlocksByRootRequest(t *testing.T) { p2.SetStreamHandler(pcl, knownBlocksProvider(p2, nil)) req := &p2pTypes.BeaconBlockByRootsReq{knownRoots[0], knownRoots[1]} - chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}} - blocks, err := SendBeaconBlocksByRootRequest(ctx, chain, p1, p2.PeerID(), req, nil) + blocks, err := SendBeaconBlocksByRootRequest(ctx, startup.NewClock(time.Now(), [32]byte{}), p1, p2.PeerID(), req, nil) assert.NoError(t, err) assert.Equal(t, 2, len(blocks)) }) @@ -392,8 +379,7 @@ func TestSendRequest_SendBeaconBlocksByRootRequest(t *testing.T) { // No error from block processor. req := &p2pTypes.BeaconBlockByRootsReq{knownRoots[0], knownRoots[1]} blocksFromProcessor := make([]interfaces.ReadOnlySignedBeaconBlock, 0) - chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}} - blocks, err := SendBeaconBlocksByRootRequest(ctx, chain, p1, p2.PeerID(), req, func(block interfaces.ReadOnlySignedBeaconBlock) error { + blocks, err := SendBeaconBlocksByRootRequest(ctx, startup.NewClock(time.Now(), [32]byte{}), p1, p2.PeerID(), req, func(block interfaces.ReadOnlySignedBeaconBlock) error { blocksFromProcessor = append(blocksFromProcessor, block) return nil }) @@ -411,8 +397,7 @@ func TestSendRequest_SendBeaconBlocksByRootRequest(t *testing.T) { // Send error from block processor. req := &p2pTypes.BeaconBlockByRootsReq{knownRoots[0], knownRoots[1]} errFromProcessor := errors.New("processor error") - chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}} - _, err := SendBeaconBlocksByRootRequest(ctx, chain, p1, p2.PeerID(), req, func(block interfaces.ReadOnlySignedBeaconBlock) error { + _, err := SendBeaconBlocksByRootRequest(ctx, startup.NewClock(time.Now(), [32]byte{}), p1, p2.PeerID(), req, func(block interfaces.ReadOnlySignedBeaconBlock) error { return errFromProcessor }) assert.ErrorContains(t, errFromProcessor.Error(), err) @@ -426,8 +411,8 @@ func TestSendRequest_SendBeaconBlocksByRootRequest(t *testing.T) { // No cap on max roots. req := &p2pTypes.BeaconBlockByRootsReq{knownRoots[0], knownRoots[1], knownRoots[2], knownRoots[3]} - chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}} - blocks, err := SendBeaconBlocksByRootRequest(ctx, chain, p1, p2.PeerID(), req, nil) + clock := startup.NewClock(time.Now(), [32]byte{}) + blocks, err := SendBeaconBlocksByRootRequest(ctx, clock, p1, p2.PeerID(), req, nil) assert.NoError(t, err) assert.Equal(t, 4, len(blocks)) @@ -438,7 +423,7 @@ func TestSendRequest_SendBeaconBlocksByRootRequest(t *testing.T) { cfg.MaxRequestBlocks = maxRequestBlocks params.OverrideBeaconNetworkConfig(cfg) }() - blocks, err = SendBeaconBlocksByRootRequest(ctx, chain, p1, p2.PeerID(), req, func(block interfaces.ReadOnlySignedBeaconBlock) error { + blocks, err = SendBeaconBlocksByRootRequest(ctx, clock, p1, p2.PeerID(), req, func(block interfaces.ReadOnlySignedBeaconBlock) error { // Since ssz checks the boundaries, and doesn't normally allow to send requests bigger than // the max request size, we are updating max request size dynamically. Even when updated dynamically, // no more than max request size of blocks is expected on return. @@ -465,8 +450,7 @@ func TestSendRequest_SendBeaconBlocksByRootRequest(t *testing.T) { })) req := &p2pTypes.BeaconBlockByRootsReq{knownRoots[0], knownRoots[1], knownRoots[2], knownRoots[3]} - chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}} - blocks, err := SendBeaconBlocksByRootRequest(ctx, chain, p1, p2.PeerID(), req, nil) + blocks, err := SendBeaconBlocksByRootRequest(ctx, startup.NewClock(time.Now(), [32]byte{}), p1, p2.PeerID(), req, nil) assert.ErrorContains(t, expectedErr.Error(), err) assert.Equal(t, 0, len(blocks)) }) @@ -486,8 +470,7 @@ func TestSendRequest_SendBeaconBlocksByRootRequest(t *testing.T) { })) req := &p2pTypes.BeaconBlockByRootsReq{knownRoots[0], knownRoots[1], knownRoots[2], knownRoots[3]} - chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}} - blocks, err := SendBeaconBlocksByRootRequest(ctx, chain, p1, p2.PeerID(), req, nil) + blocks, err := SendBeaconBlocksByRootRequest(ctx, startup.NewClock(time.Now(), [32]byte{}), p1, p2.PeerID(), req, nil) assert.NoError(t, err) assert.Equal(t, 3, len(blocks)) }) diff --git a/beacon-chain/sync/rpc_status.go b/beacon-chain/sync/rpc_status.go index 92f1598c00..7c7665aa29 100644 --- a/beacon-chain/sync/rpc_status.go +++ b/beacon-chain/sync/rpc_status.go @@ -93,7 +93,7 @@ func (s *Service) resyncIfBehind() { // Check if the current node is more than 1 epoch behind. if highestEpoch > (syncedEpoch + 1) { log.WithFields(logrus.Fields{ - "currentEpoch": slots.ToEpoch(s.cfg.chain.CurrentSlot()), + "currentEpoch": slots.ToEpoch(s.cfg.clock.CurrentSlot()), "syncedEpoch": syncedEpoch, "peersEpoch": highestEpoch, }).Info("Fallen behind peers; reverting to initial sync to catch up") @@ -110,7 +110,7 @@ func (s *Service) resyncIfBehind() { // shouldReSync returns true if the node is not syncing and falls behind two epochs. func (s *Service) shouldReSync() bool { syncedEpoch := slots.ToEpoch(s.cfg.chain.HeadSlot()) - currentEpoch := slots.ToEpoch(s.cfg.chain.CurrentSlot()) + currentEpoch := slots.ToEpoch(s.cfg.clock.CurrentSlot()) prevEpoch := primitives.Epoch(0) if currentEpoch > 1 { prevEpoch = currentEpoch - 1 @@ -140,7 +140,7 @@ func (s *Service) sendRPCStatusRequest(ctx context.Context, id peer.ID) error { HeadRoot: headRoot, HeadSlot: s.cfg.chain.HeadSlot(), } - topic, err := p2p.TopicFromMessage(p2p.StatusMessageName, slots.ToEpoch(s.cfg.chain.CurrentSlot())) + topic, err := p2p.TopicFromMessage(p2p.StatusMessageName, slots.ToEpoch(s.cfg.clock.CurrentSlot())) if err != nil { return err } @@ -288,7 +288,7 @@ func (s *Service) validateStatusMessage(ctx context.Context, msg *pb.Status) err if !bytes.Equal(forkDigest[:], msg.ForkDigest) { return p2ptypes.ErrWrongForkDigestVersion } - genesis := s.cfg.chain.GenesisTime() + genesis := s.cfg.clock.GenesisTime() cp := s.cfg.chain.FinalizedCheckpt() finalizedEpoch := cp.Epoch maxEpoch := slots.EpochsSinceGenesis(genesis) diff --git a/beacon-chain/sync/rpc_status_test.go b/beacon-chain/sync/rpc_status_test.go index 37d86d2141..108c2eb912 100644 --- a/beacon-chain/sync/rpc_status_test.go +++ b/beacon-chain/sync/rpc_status_test.go @@ -9,6 +9,7 @@ import ( "github.com/ethereum/go-ethereum/p2p/enr" "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/protocol" + "github.com/prysmaticlabs/prysm/v4/async/abool" mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing" "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition" "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/kv" @@ -17,6 +18,7 @@ import ( "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers" p2ptest "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing" p2ptypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/types" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/startup" state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native" mockSync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/initial-sync/testing" "github.com/prysmaticlabs/prysm/v4/config/params" @@ -41,6 +43,8 @@ func TestStatusRPCHandler_Disconnects_OnForkVersionMismatch(t *testing.T) { assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected") root := [32]byte{'C'} + gt := time.Now() + vr := [32]byte{'A'} r := &Service{ cfg: &config{ p2p: p1, @@ -53,10 +57,11 @@ func TestStatusRPCHandler_Disconnects_OnForkVersionMismatch(t *testing.T) { Epoch: 0, Root: root[:], }, - Genesis: time.Now(), - ValidatorsRoot: [32]byte{'A'}, + Genesis: gt, + ValidatorsRoot: vr, Root: make([]byte, 32), }, + clock: startup.NewClock(gt, vr), }, rateLimiter: newRateLimiter(p1), } @@ -109,6 +114,8 @@ func TestStatusRPCHandler_ConnectsOnGenesis(t *testing.T) { assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected") var root [32]byte + gt := time.Now() + vr := [32]byte{'A'} r := &Service{ cfg: &config{ p2p: p1, @@ -121,10 +128,11 @@ func TestStatusRPCHandler_ConnectsOnGenesis(t *testing.T) { Epoch: 0, Root: params.BeaconConfig().ZeroHash[:], }, - Genesis: time.Now(), - ValidatorsRoot: [32]byte{'A'}, + Genesis: gt, + ValidatorsRoot: vr, Root: make([]byte, 32), }, + clock: startup.NewClock(gt, vr), }, rateLimiter: newRateLimiter(p1), } @@ -187,6 +195,8 @@ func TestStatusRPCHandler_ReturnsHelloMessage(t *testing.T) { totalSec := int64(params.BeaconConfig().SlotsPerEpoch.Mul(5 * params.BeaconConfig().SecondsPerSlot)) genTime := time.Now().Unix() - totalSec + gt := time.Unix(genTime, 0) + vr := [32]byte{'A'} r := &Service{ cfg: &config{ p2p: p1, @@ -198,12 +208,13 @@ func TestStatusRPCHandler_ReturnsHelloMessage(t *testing.T) { PreviousVersion: params.BeaconConfig().GenesisForkVersion, CurrentVersion: params.BeaconConfig().GenesisForkVersion, }, - ValidatorsRoot: [32]byte{'A'}, - Genesis: time.Unix(genTime, 0), + ValidatorsRoot: vr, + Genesis: gt, FinalizedRoots: map[[32]byte]bool{ finalizedRoot: true, }, }, + clock: startup.NewClock(gt, vr), beaconDB: db, }, rateLimiter: newRateLimiter(p1), @@ -249,6 +260,10 @@ func TestStatusRPCHandler_ReturnsHelloMessage(t *testing.T) { } func TestHandshakeHandlers_Roundtrip(t *testing.T) { + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + // Scenario is that p1 and p2 connect, exchange handshakes. // p2 disconnects and p1 should forget the handshake status. p1 := p2ptest.NewTestP2P(t) @@ -271,48 +286,56 @@ func TestHandshakeHandlers_Roundtrip(t *testing.T) { require.NoError(t, err) blk := util.NewBeaconBlock() blk.Block.Slot = 0 - util.SaveBlock(t, context.Background(), db, blk) + util.SaveBlock(t, ctx, db, blk) finalizedRoot, err := blk.Block.HashTreeRoot() require.NoError(t, err) - require.NoError(t, db.SaveGenesisBlockRoot(context.Background(), finalizedRoot)) + require.NoError(t, db.SaveGenesisBlockRoot(ctx, finalizedRoot)) + chain := &mock.ChainService{ + State: st, + FinalizedCheckPoint: ðpb.Checkpoint{Epoch: 0, Root: finalizedRoot[:]}, + Fork: ðpb.Fork{ + PreviousVersion: params.BeaconConfig().GenesisForkVersion, + CurrentVersion: params.BeaconConfig().GenesisForkVersion, + }, + Genesis: time.Now(), + ValidatorsRoot: [32]byte{'A'}, + Root: make([]byte, 32), + FinalizedRoots: map[[32]byte]bool{ + finalizedRoot: true, + }, + } + cw := startup.NewClockSynchronizer() r := &Service{ + ctx: ctx, cfg: &config{ - p2p: p1, - chain: &mock.ChainService{ - State: st, - FinalizedCheckPoint: ðpb.Checkpoint{Epoch: 0, Root: finalizedRoot[:]}, - Fork: ðpb.Fork{ - PreviousVersion: params.BeaconConfig().GenesisForkVersion, - CurrentVersion: params.BeaconConfig().GenesisForkVersion, - }, - Genesis: time.Now(), - ValidatorsRoot: [32]byte{'A'}, - Root: make([]byte, 32), - FinalizedRoots: map[[32]byte]bool{ - finalizedRoot: true, - }, - }, + p2p: p1, + chain: chain, + clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot), beaconDB: db, }, - ctx: context.Background(), - rateLimiter: newRateLimiter(p1), + rateLimiter: newRateLimiter(p1), + clockWaiter: cw, + chainStarted: abool.New(), } p1.Digest, err = r.currentForkDigest() require.NoError(t, err) + chain2 := &mock.ChainService{ + FinalizedCheckPoint: ðpb.Checkpoint{Epoch: 0, Root: finalizedRoot[:]}, + } r2 := &Service{ + ctx: ctx, cfg: &config{ - chain: &mock.ChainService{ - FinalizedCheckPoint: ðpb.Checkpoint{Epoch: 0, Root: finalizedRoot[:]}, - }, - p2p: p2, + chain: chain2, + clock: startup.NewClock(chain2.Genesis, chain2.ValidatorsRoot), + p2p: p2, }, rateLimiter: newRateLimiter(p2), } p2.Digest, err = r.currentForkDigest() require.NoError(t, err) - r.Start() + go r.Start() // Setup streams pcl := protocol.ID("/eth2/beacon_chain/req/status/1/ssz_snappy") @@ -347,13 +370,14 @@ func TestHandshakeHandlers_Roundtrip(t *testing.T) { out := new(primitives.SSZUint64) assert.NoError(t, r.cfg.p2p.Encoding().DecodeWithMaxLength(stream, out)) assert.Equal(t, uint64(2), uint64(*out)) - assert.NoError(t, r2.pingHandler(context.Background(), out, stream)) + assert.NoError(t, r2.pingHandler(ctx, out, stream)) assert.NoError(t, stream.Close()) }) numInactive1 := len(p1.Peers().Inactive()) numActive1 := len(p1.Peers().Active()) + require.NoError(t, cw.SetClock(startup.NewClock(chain.Genesis, chain.ValidatorsRoot))) p1.Connect(p2) p1.Peers().Add(new(enr.Record), p2.BHost.ID(), p2.BHost.Addrs()[0], network.DirUnknown) @@ -412,20 +436,22 @@ func TestStatusRPCRequest_RequestSent(t *testing.T) { Root: finalizedRoot[:], } + chain := &mock.ChainService{ + State: genesisState, + FinalizedCheckPoint: finalizedCheckpt, + Root: headRoot[:], + Fork: ðpb.Fork{ + PreviousVersion: params.BeaconConfig().GenesisForkVersion, + CurrentVersion: params.BeaconConfig().GenesisForkVersion, + }, + Genesis: time.Now(), + ValidatorsRoot: [32]byte{'A'}, + } r := &Service{ cfg: &config{ - p2p: p1, - chain: &mock.ChainService{ - State: genesisState, - FinalizedCheckPoint: finalizedCheckpt, - Root: headRoot[:], - Fork: ðpb.Fork{ - PreviousVersion: params.BeaconConfig().GenesisForkVersion, - CurrentVersion: params.BeaconConfig().GenesisForkVersion, - }, - Genesis: time.Now(), - ValidatorsRoot: [32]byte{'A'}, - }, + p2p: p1, + chain: chain, + clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot), }, ctx: context.Background(), rateLimiter: newRateLimiter(p1), @@ -494,45 +520,48 @@ func TestStatusRPCRequest_FinalizedBlockExists(t *testing.T) { } totalSec := int64(params.BeaconConfig().SlotsPerEpoch.Mul(5 * params.BeaconConfig().SecondsPerSlot)) genTime := time.Now().Unix() - totalSec + chain := &mock.ChainService{ + State: genesisState, + FinalizedCheckPoint: finalizedCheckpt, + Root: headRoot[:], + Fork: ðpb.Fork{ + PreviousVersion: params.BeaconConfig().GenesisForkVersion, + CurrentVersion: params.BeaconConfig().GenesisForkVersion, + }, + Genesis: time.Unix(genTime, 0), + ValidatorsRoot: [32]byte{'A'}, + FinalizedRoots: map[[32]byte]bool{ + finalizedRoot: true, + }, + } r := &Service{ cfg: &config{ - p2p: p1, - chain: &mock.ChainService{ - State: genesisState, - FinalizedCheckPoint: finalizedCheckpt, - Root: headRoot[:], - Fork: ðpb.Fork{ - PreviousVersion: params.BeaconConfig().GenesisForkVersion, - CurrentVersion: params.BeaconConfig().GenesisForkVersion, - }, - Genesis: time.Unix(genTime, 0), - ValidatorsRoot: [32]byte{'A'}, - FinalizedRoots: map[[32]byte]bool{ - finalizedRoot: true, - }, - }, + p2p: p1, + chain: chain, + clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot), }, ctx: context.Background(), rateLimiter: newRateLimiter(p1), } - + chain2 := &mock.ChainService{ + State: genesisState, + FinalizedCheckPoint: finalizedCheckpt, + Root: headRoot[:], + Fork: ðpb.Fork{ + PreviousVersion: params.BeaconConfig().GenesisForkVersion, + CurrentVersion: params.BeaconConfig().GenesisForkVersion, + }, + Genesis: time.Unix(genTime, 0), + ValidatorsRoot: [32]byte{'A'}, + FinalizedRoots: map[[32]byte]bool{ + finalizedRoot: true, + }, + } r2 := &Service{ cfg: &config{ - p2p: p1, - chain: &mock.ChainService{ - State: genesisState, - FinalizedCheckPoint: finalizedCheckpt, - Root: headRoot[:], - Fork: ðpb.Fork{ - PreviousVersion: params.BeaconConfig().GenesisForkVersion, - CurrentVersion: params.BeaconConfig().GenesisForkVersion, - }, - Genesis: time.Unix(genTime, 0), - ValidatorsRoot: [32]byte{'A'}, - FinalizedRoots: map[[32]byte]bool{ - finalizedRoot: true, - }, - }, + p2p: p1, + chain: chain2, + clock: startup.NewClock(chain2.Genesis, chain2.ValidatorsRoot), beaconDB: db, }, ctx: context.Background(), @@ -674,48 +703,52 @@ func TestStatusRPCRequest_FinalizedBlockSkippedSlots(t *testing.T) { epoch := expectedFinalizedEpoch.Add(2) totalSec := uint64(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epoch) * params.BeaconConfig().SecondsPerSlot)) - genTime := time.Now().Unix() - int64(totalSec) + gt := time.Unix(time.Now().Unix()-int64(totalSec), 0) + vr := [32]byte{'A'} + chain := &mock.ChainService{ + State: nState, + FinalizedCheckPoint: remoteFinalizedChkpt, + Root: rHeadRoot[:], + Fork: ðpb.Fork{ + PreviousVersion: params.BeaconConfig().GenesisForkVersion, + CurrentVersion: params.BeaconConfig().GenesisForkVersion, + }, + Genesis: gt, + ValidatorsRoot: vr, + FinalizedRoots: map[[32]byte]bool{ + tt.expectedFinalizedRoot: true, + tt.remoteFinalizedRoot: true, + }, + } r := &Service{ cfg: &config{ - p2p: p1, - chain: &mock.ChainService{ - State: nState, - FinalizedCheckPoint: remoteFinalizedChkpt, - Root: rHeadRoot[:], - Fork: ðpb.Fork{ - PreviousVersion: params.BeaconConfig().GenesisForkVersion, - CurrentVersion: params.BeaconConfig().GenesisForkVersion, - }, - Genesis: time.Unix(genTime, 0), - ValidatorsRoot: [32]byte{'A'}, - FinalizedRoots: map[[32]byte]bool{ - tt.expectedFinalizedRoot: true, - tt.remoteFinalizedRoot: true, - }, - }, + p2p: p1, + chain: chain, + clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot), }, ctx: context.Background(), rateLimiter: newRateLimiter(p1), } - + chain2 := &mock.ChainService{ + State: nState, + FinalizedCheckPoint: finalizedCheckpt, + Root: headRoot[:], + Fork: ðpb.Fork{ + PreviousVersion: params.BeaconConfig().GenesisForkVersion, + CurrentVersion: params.BeaconConfig().GenesisForkVersion, + }, + Genesis: gt, + ValidatorsRoot: vr, + FinalizedRoots: map[[32]byte]bool{ + tt.expectedFinalizedRoot: true, + tt.remoteFinalizedRoot: true, + }, + } r2 := &Service{ cfg: &config{ - p2p: p2, - chain: &mock.ChainService{ - State: nState, - FinalizedCheckPoint: finalizedCheckpt, - Root: headRoot[:], - Fork: ðpb.Fork{ - PreviousVersion: params.BeaconConfig().GenesisForkVersion, - CurrentVersion: params.BeaconConfig().GenesisForkVersion, - }, - Genesis: time.Unix(genTime, 0), - ValidatorsRoot: [32]byte{'A'}, - FinalizedRoots: map[[32]byte]bool{ - tt.expectedFinalizedRoot: true, - tt.remoteFinalizedRoot: true, - }, - }, + p2p: p2, + chain: chain2, + clock: startup.NewClock(chain2.Genesis, chain2.ValidatorsRoot), beaconDB: db, }, @@ -750,6 +783,10 @@ func TestStatusRPCRequest_FinalizedBlockSkippedSlots(t *testing.T) { } func TestStatusRPCRequest_BadPeerHandshake(t *testing.T) { + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + p1 := p2ptest.NewTestP2P(t) p2 := p2ptest.NewTestP2P(t) @@ -761,7 +798,7 @@ func TestStatusRPCRequest_BadPeerHandshake(t *testing.T) { finalized := util.NewBeaconBlock() finalizedRoot, err := finalized.Block.HashTreeRoot() require.NoError(t, err) - genesisState, err := transition.GenesisBeaconState(context.Background(), nil, 0, ðpb.Eth1Data{}) + genesisState, err := transition.GenesisBeaconState(ctx, nil, 0, ðpb.Eth1Data{}) require.NoError(t, err) require.NoError(t, genesisState.SetSlot(111)) require.NoError(t, genesisState.UpdateBlockRootAtIndex(111%uint64(params.BeaconConfig().SlotsPerHistoricalRoot), headRoot)) @@ -769,28 +806,32 @@ func TestStatusRPCRequest_BadPeerHandshake(t *testing.T) { Epoch: 5, Root: finalizedRoot[:], } + chain := &mock.ChainService{ + State: genesisState, + FinalizedCheckPoint: finalizedCheckpt, + Root: headRoot[:], + Fork: ðpb.Fork{ + PreviousVersion: params.BeaconConfig().GenesisForkVersion, + CurrentVersion: params.BeaconConfig().GenesisForkVersion, + }, + Genesis: time.Now(), + ValidatorsRoot: [32]byte{'A'}, + } + cw := startup.NewClockSynchronizer() r := &Service{ cfg: &config{ - p2p: p1, - chain: &mock.ChainService{ - State: genesisState, - FinalizedCheckPoint: finalizedCheckpt, - Root: headRoot[:], - Fork: ðpb.Fork{ - PreviousVersion: params.BeaconConfig().GenesisForkVersion, - CurrentVersion: params.BeaconConfig().GenesisForkVersion, - }, - Genesis: time.Now(), - ValidatorsRoot: [32]byte{'A'}, - }, + p2p: p1, + chain: chain, }, - ctx: context.Background(), - rateLimiter: newRateLimiter(p1), + ctx: ctx, + rateLimiter: newRateLimiter(p1), + clockWaiter: cw, + chainStarted: abool.New(), } - r.Start() + go r.Start() // Setup streams pcl := protocol.ID("/eth2/beacon_chain/req/status/1/ssz_snappy") @@ -816,6 +857,8 @@ func TestStatusRPCRequest_BadPeerHandshake(t *testing.T) { assert.NoError(t, err) }) + require.NoError(t, cw.SetClock(startup.NewClock(chain.Genesis, chain.ValidatorsRoot))) + assert.Equal(t, false, p1.Peers().Scorers().IsBadPeer(p2.PeerID()), "Peer is marked as bad") p1.Connect(p2) @@ -850,19 +893,21 @@ func TestStatusRPC_ValidGenesisMessage(t *testing.T) { Epoch: 5, Root: finalizedRoot[:], } + chain := &mock.ChainService{ + State: genesisState, + FinalizedCheckPoint: finalizedCheckpt, + Root: headRoot[:], + Fork: ðpb.Fork{ + PreviousVersion: params.BeaconConfig().GenesisForkVersion, + CurrentVersion: params.BeaconConfig().GenesisForkVersion, + }, + Genesis: time.Now(), + ValidatorsRoot: [32]byte{'A'}, + } r := &Service{ cfg: &config{ - chain: &mock.ChainService{ - State: genesisState, - FinalizedCheckPoint: finalizedCheckpt, - Root: headRoot[:], - Fork: ðpb.Fork{ - PreviousVersion: params.BeaconConfig().GenesisForkVersion, - CurrentVersion: params.BeaconConfig().GenesisForkVersion, - }, - Genesis: time.Now(), - ValidatorsRoot: [32]byte{'A'}, - }, + chain: chain, + clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot), }, ctx: context.Background(), } @@ -932,12 +977,14 @@ func TestShouldResync(t *testing.T) { headState, err := transition.GenesisBeaconState(context.Background(), nil, 0, ðpb.Eth1Data{}) require.NoError(t, err) require.NoError(t, headState.SetSlot(tt.args.headSlot)) + chain := &mock.ChainService{ + State: headState, + Genesis: tt.args.genesis, + } r := &Service{ cfg: &config{ - chain: &mock.ChainService{ - State: headState, - Genesis: tt.args.genesis, - }, + chain: chain, + clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot), initialSync: &mockSync.Sync{IsSyncing: tt.args.syncing}, }, ctx: context.Background(), diff --git a/beacon-chain/sync/service.go b/beacon-chain/sync/service.go index 13eb1e1f38..853651f0af 100644 --- a/beacon-chain/sync/service.go +++ b/beacon-chain/sync/service.go @@ -19,10 +19,8 @@ import ( "github.com/prysmaticlabs/prysm/v4/async/abool" "github.com/prysmaticlabs/prysm/v4/async/event" "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain" - "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed" blockfeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/block" "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/operation" - statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state" "github.com/prysmaticlabs/prysm/v4/beacon-chain/db" "github.com/prysmaticlabs/prysm/v4/beacon-chain/execution" "github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations" @@ -31,6 +29,7 @@ import ( "github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/synccommittee" "github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/voluntaryexits" "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/startup" "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen" lruwrpr "github.com/prysmaticlabs/prysm/v4/cache/lru" "github.com/prysmaticlabs/prysm/v4/config/params" @@ -79,13 +78,13 @@ type config struct { blsToExecPool blstoexec.PoolManager chain blockchainService initialSync Checker - stateNotifier statefeed.Notifier blockNotifier blockfeed.Notifier operationNotifier operation.Notifier executionPayloadReconstructor execution.ExecutionPayloadReconstructor stateGen *stategen.State slasherAttestationsFeed *event.Feed slasherBlockHeadersFeed *event.Feed + clock *startup.Clock } // This defines the interface for interacting with block chain service @@ -139,6 +138,8 @@ type Service struct { syncContributionBitsOverlapLock sync.RWMutex syncContributionBitsOverlapCache *lru.Cache signatureChan chan *signatureVerifier + clockWaiter startup.ClockWaiter + initialSyncComplete chan struct{} } // NewService initializes new regular sync service. @@ -164,14 +165,14 @@ func NewService(ctx context.Context, opts ...Option) *Service { r.rateLimiter = newRateLimiter(r.cfg.p2p) r.initCaches() - go r.registerHandlers() - go r.verifierRoutine() - return r } // Start the regular sync service. func (s *Service) Start() { + go s.verifierRoutine() + go s.registerHandlers() + s.cfg.p2p.AddConnectionHandler(s.reValidatePeer, s.sendGoodbye) s.cfg.p2p.AddDisconnectionHandler(func(_ context.Context, _ peer.ID) error { // no-op @@ -210,7 +211,7 @@ func (s *Service) Stop() error { func (s *Service) Status() error { // If our head slot is on a previous epoch and our peers are reporting their head block are // in the most recent epoch, then we might be out of sync. - if headEpoch := slots.ToEpoch(s.cfg.chain.HeadSlot()); headEpoch+1 < slots.ToEpoch(s.cfg.chain.CurrentSlot()) && + if headEpoch := slots.ToEpoch(s.cfg.chain.HeadSlot()); headEpoch+1 < slots.ToEpoch(s.cfg.clock.CurrentSlot()) && headEpoch+1 < s.cfg.p2p.Peers().HighestEpoch() { return errors.New("out of sync") } @@ -232,58 +233,42 @@ func (s *Service) initCaches() { s.badBlockCache = lruwrpr.New(badBlockSize) } -func (s *Service) registerHandlers() { - // Wait until chain start. - stateChannel := make(chan *feed.Event, 1) - stateSub := s.cfg.stateNotifier.StateFeed().Subscribe(stateChannel) - defer stateSub.Unsubscribe() - for { - select { - case e := <-stateChannel: - switch e.Type { - case statefeed.Initialized: - data, ok := e.Data.(*statefeed.InitializedData) - if !ok { - log.Error("Event feed data is not type *statefeed.InitializedData") - return - } - startTime := data.StartTime - log.WithField("starttime", startTime).Debug("Received state initialized event") +func (s *Service) waitForChainStart() { + clock, err := s.clockWaiter.WaitForClock(s.ctx) + if err != nil { + log.WithError(err).Error("sync service failed to receive genesis data") + return + } + s.cfg.clock = clock + startTime := clock.GenesisTime() + log.WithField("starttime", startTime).Debug("Received state initialized event") + // Register respective rpc handlers at state initialized event. + s.registerRPCHandlers() + // Wait for chainstart in separate routine. + if startTime.After(prysmTime.Now()) { + time.Sleep(prysmTime.Until(startTime)) + } + log.WithField("starttime", startTime).Debug("Chain started in sync service") + s.markForChainStart() +} - // Register respective rpc handlers at state initialized event. - s.registerRPCHandlers() - // Wait for chainstart in separate routine. - go func() { - if startTime.After(prysmTime.Now()) { - time.Sleep(prysmTime.Until(startTime)) - } - log.WithField("starttime", startTime).Debug("Chain started in sync service") - s.markForChainStart() - }() - case statefeed.Synced: - _, ok := e.Data.(*statefeed.SyncedData) - if !ok { - log.Error("Event feed data is not type *statefeed.SyncedData") - return - } - // Register respective pubsub handlers at state synced event. - digest, err := s.currentForkDigest() - if err != nil { - log.WithError(err).Error("Could not retrieve current fork digest") - return - } - currentEpoch := slots.ToEpoch(slots.CurrentSlot(uint64(s.cfg.chain.GenesisTime().Unix()))) - s.registerSubscribers(currentEpoch, digest) - go s.forkWatcher() - return - } - case <-s.ctx.Done(): - log.Debug("Context closed, exiting goroutine") - return - case err := <-stateSub.Err(): - log.WithError(err).Error("Could not subscribe to state notifier") +func (s *Service) registerHandlers() { + s.waitForChainStart() + select { + case <-s.initialSyncComplete: + // Register respective pubsub handlers at state synced event. + digest, err := s.currentForkDigest() + if err != nil { + log.WithError(err).Error("Could not retrieve current fork digest") return } + currentEpoch := slots.ToEpoch(slots.CurrentSlot(uint64(s.cfg.clock.GenesisTime().Unix()))) + s.registerSubscribers(currentEpoch, digest) + go s.forkWatcher() + return + case <-s.ctx.Done(): + log.Debug("Context closed, exiting goroutine") + return } } @@ -292,6 +277,10 @@ func (s *Service) markForChainStart() { s.chainStarted.Set() } +func (s *Service) chainIsStarted() bool { + return s.chainStarted.IsSet() +} + // Checker defines a struct which can verify whether a node is currently // synchronizing a chain with the rest of peers in the network. type Checker interface { diff --git a/beacon-chain/sync/service_test.go b/beacon-chain/sync/service_test.go index 009ea870f4..a7b199e8ed 100644 --- a/beacon-chain/sync/service_test.go +++ b/beacon-chain/sync/service_test.go @@ -9,9 +9,9 @@ import ( "github.com/prysmaticlabs/prysm/v4/async/abool" mockChain "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing" "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed" - statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state" dbTest "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing" p2ptest "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/startup" state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native" mockSync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/initial-sync/testing" "github.com/prysmaticlabs/prysm/v4/crypto/bls" @@ -25,14 +25,16 @@ import ( func TestService_StatusZeroEpoch(t *testing.T) { bState, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{Slot: 0}) require.NoError(t, err) + chain := &mockChain.ChainService{ + Genesis: time.Now(), + State: bState, + } r := &Service{ cfg: &config{ p2p: p2ptest.NewTestP2P(t), initialSync: new(mockSync.Sync), - chain: &mockChain.ChainService{ - Genesis: time.Now(), - State: bState, - }, + chain: chain, + clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot), }, chainStarted: abool.New(), } @@ -47,29 +49,25 @@ func TestSyncHandlers_WaitToSync(t *testing.T) { Genesis: time.Now(), ValidatorsRoot: [32]byte{'A'}, } + gs := startup.NewClockSynchronizer() r := Service{ ctx: context.Background(), cfg: &config{ - p2p: p2p, - chain: chainService, - stateNotifier: chainService.StateNotifier(), - initialSync: &mockSync.Sync{IsSyncing: false}, + p2p: p2p, + chain: chainService, + initialSync: &mockSync.Sync{IsSyncing: false}, }, chainStarted: abool.New(), + clockWaiter: gs, } topic := "/eth2/%x/beacon_block" go r.registerHandlers() + go r.waitForChainStart() time.Sleep(100 * time.Millisecond) - i := r.cfg.stateNotifier.StateFeed().Send(&feed.Event{ - Type: statefeed.Initialized, - Data: &statefeed.InitializedData{ - StartTime: time.Now(), - }, - }) - if i == 0 { - t.Fatal("didn't send genesis time to subscribers") - } + + var vr [32]byte + require.NoError(t, gs.SetClock(startup.NewClock(time.Now(), vr))) b := []byte("sk") b32 := bytesutil.ToBytes32(b) sk, err := bls.SecretKeyFromBytes(b32[:]) @@ -90,33 +88,24 @@ func TestSyncHandlers_WaitForChainStart(t *testing.T) { Genesis: time.Now(), ValidatorsRoot: [32]byte{'A'}, } + gs := startup.NewClockSynchronizer() r := Service{ ctx: context.Background(), cfg: &config{ - p2p: p2p, - chain: chainService, - stateNotifier: chainService.StateNotifier(), - initialSync: &mockSync.Sync{IsSyncing: false}, + p2p: p2p, + chain: chainService, + initialSync: &mockSync.Sync{IsSyncing: false}, }, chainStarted: abool.New(), slotToPendingBlocks: gcache.New(time.Second, 2*time.Second), + clockWaiter: gs, } go r.registerHandlers() - time.Sleep(100 * time.Millisecond) - i := r.cfg.stateNotifier.StateFeed().Send(&feed.Event{ - Type: statefeed.Initialized, - Data: &statefeed.InitializedData{ - StartTime: time.Now().Add(2 * time.Second), - }, - }) - if i == 0 { - t.Fatal("didn't send genesis time to subscribers") - } - require.Equal(t, false, r.chainStarted.IsSet(), "Chainstart was marked prematurely") + var vr [32]byte + require.NoError(t, gs.SetClock(startup.NewClock(time.Now(), vr))) + r.waitForChainStart() - // wait for chainstart to be sent - time.Sleep(3 * time.Second) require.Equal(t, true, r.chainStarted.IsSet(), "Did not receive chain start event.") } @@ -128,18 +117,20 @@ func TestSyncHandlers_WaitTillSynced(t *testing.T) { } ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() + gs := startup.NewClockSynchronizer() r := Service{ ctx: ctx, cfg: &config{ p2p: p2p, beaconDB: dbTest.SetupDB(t), chain: chainService, - stateNotifier: chainService.StateNotifier(), blockNotifier: chainService.BlockNotifier(), initialSync: &mockSync.Sync{IsSyncing: false}, }, - chainStarted: abool.New(), - subHandler: newSubTopicHandler(), + chainStarted: abool.New(), + subHandler: newSubTopicHandler(), + clockWaiter: gs, + initialSyncComplete: make(chan struct{}), } r.initCaches() @@ -148,19 +139,9 @@ func TestSyncHandlers_WaitTillSynced(t *testing.T) { r.registerHandlers() syncCompleteCh <- true }() - for i := 0; i == 0; { - assert.NoError(t, ctx.Err()) - i = r.cfg.stateNotifier.StateFeed().Send(&feed.Event{ - Type: statefeed.Initialized, - Data: &statefeed.InitializedData{ - StartTime: time.Now(), - }, - }) - } - for !r.chainStarted.IsSet() { - assert.NoError(t, ctx.Err()) - time.Sleep(time.Millisecond) - } + var vr [32]byte + require.NoError(t, gs.SetClock(startup.NewClock(time.Now(), vr))) + r.waitForChainStart() require.Equal(t, true, r.chainStarted.IsSet(), "Did not receive chain start event.") blockChan := make(chan *feed.Event, 1) @@ -184,15 +165,7 @@ func TestSyncHandlers_WaitTillSynced(t *testing.T) { p2p.ReceivePubSub(topic, msg) assert.Equal(t, 0, len(blockChan), "block was received by sync service despite not being fully synced") - for i := 0; i == 0; { - assert.NoError(t, ctx.Err()) - i = r.cfg.stateNotifier.StateFeed().Send(&feed.Event{ - Type: statefeed.Synced, - Data: &statefeed.SyncedData{ - StartTime: time.Now(), - }, - }) - } + close(r.initialSyncComplete) <-syncCompleteCh p2p.ReceivePubSub(topic, msg) @@ -211,30 +184,25 @@ func TestSyncService_StopCleanly(t *testing.T) { ValidatorsRoot: [32]byte{'A'}, } ctx, cancel := context.WithCancel(context.Background()) + gs := startup.NewClockSynchronizer() r := Service{ ctx: ctx, cancel: cancel, cfg: &config{ - p2p: p2p, - chain: chainService, - stateNotifier: chainService.StateNotifier(), - initialSync: &mockSync.Sync{IsSyncing: false}, + p2p: p2p, + chain: chainService, + initialSync: &mockSync.Sync{IsSyncing: false}, }, - chainStarted: abool.New(), - subHandler: newSubTopicHandler(), + chainStarted: abool.New(), + subHandler: newSubTopicHandler(), + clockWaiter: gs, + initialSyncComplete: make(chan struct{}), } go r.registerHandlers() - time.Sleep(100 * time.Millisecond) - i := r.cfg.stateNotifier.StateFeed().Send(&feed.Event{ - Type: statefeed.Initialized, - Data: &statefeed.InitializedData{ - StartTime: time.Now(), - }, - }) - if i == 0 { - t.Fatal("didn't send genesis time to subscribers") - } + var vr [32]byte + require.NoError(t, gs.SetClock(startup.NewClock(time.Now(), vr))) + r.waitForChainStart() var err error p2p.Digest, err = r.currentForkDigest() @@ -244,16 +212,7 @@ func TestSyncService_StopCleanly(t *testing.T) { time.Sleep(2 * time.Second) require.Equal(t, true, r.chainStarted.IsSet(), "Did not receive chain start event.") - i = r.cfg.stateNotifier.StateFeed().Send(&feed.Event{ - Type: statefeed.Synced, - Data: &statefeed.SyncedData{ - StartTime: time.Now(), - }, - }) - if i == 0 { - t.Fatal("didn't send genesis time to sync event subscribers") - } - + close(r.initialSyncComplete) time.Sleep(1 * time.Second) require.NotEqual(t, 0, len(r.cfg.p2p.PubSub().GetTopics())) diff --git a/beacon-chain/sync/subscriber.go b/beacon-chain/sync/subscriber.go index 3325525638..3e189f7fd2 100644 --- a/beacon-chain/sync/subscriber.go +++ b/beacon-chain/sync/subscriber.go @@ -135,7 +135,7 @@ func (s *Service) registerSubscribers(epoch primitives.Epoch, digest [4]byte) { // subscribe to a given topic with a given validator and subscription handler. // The base protobuf message is used to initialize new messages for decoding. func (s *Service) subscribe(topic string, validator wrappedVal, handle subHandler, digest [4]byte) *pubsub.Subscription { - genRoot := s.cfg.chain.GenesisValidatorsRoot() + genRoot := s.cfg.clock.GenesisValidatorsRoot() _, e, err := forks.RetrieveForkDataFromDigest(digest, genRoot[:]) if err != nil { // Impossible condition as it would mean digest does not exist. @@ -301,7 +301,7 @@ func (s *Service) wrapAndReportValidation(topic string, v wrappedVal) (string, p // subscribe to a static subnet with the given topic and index.A given validator and subscription handler is // used to handle messages from the subnet. The base protobuf message is used to initialize new messages for decoding. func (s *Service) subscribeStaticWithSubnets(topic string, validator wrappedVal, handle subHandler, digest [4]byte) { - genRoot := s.cfg.chain.GenesisValidatorsRoot() + genRoot := s.cfg.clock.GenesisValidatorsRoot() _, e, err := forks.RetrieveForkDataFromDigest(digest, genRoot[:]) if err != nil { // Impossible condition as it would mean digest does not exist. @@ -315,7 +315,7 @@ func (s *Service) subscribeStaticWithSubnets(topic string, validator wrappedVal, for i := uint64(0); i < params.BeaconNetworkConfig().AttestationSubnetCount; i++ { s.subscribeWithBase(s.addDigestAndIndexToTopic(topic, digest, i), validator, handle) } - genesis := s.cfg.chain.GenesisTime() + genesis := s.cfg.clock.GenesisTime() ticker := slots.NewSlotTicker(genesis, params.BeaconConfig().SecondsPerSlot) go func() { @@ -374,7 +374,7 @@ func (s *Service) subscribeDynamicWithSubnets( handle subHandler, digest [4]byte, ) { - genRoot := s.cfg.chain.GenesisValidatorsRoot() + genRoot := s.cfg.clock.GenesisValidatorsRoot() _, e, err := forks.RetrieveForkDataFromDigest(digest, genRoot[:]) if err != nil { // Impossible condition as it would mean digest does not exist. @@ -385,7 +385,7 @@ func (s *Service) subscribeDynamicWithSubnets( panic(fmt.Sprintf("%s is not mapped to any message in GossipTopicMappings", topicFormat)) } subscriptions := make(map[uint64]*pubsub.Subscription, params.BeaconConfig().MaxCommitteesPerSlot) - genesis := s.cfg.chain.GenesisTime() + genesis := s.cfg.clock.GenesisTime() ticker := slots.NewSlotTicker(genesis, params.BeaconConfig().SecondsPerSlot) go func() { @@ -503,7 +503,7 @@ func (s *Service) subscribeSyncSubnet( // subscribe to a static subnet with the given topic and index. A given validator and subscription handler is // used to handle messages from the subnet. The base protobuf message is used to initialize new messages for decoding. func (s *Service) subscribeStaticWithSyncSubnets(topic string, validator wrappedVal, handle subHandler, digest [4]byte) { - genRoot := s.cfg.chain.GenesisValidatorsRoot() + genRoot := s.cfg.clock.GenesisValidatorsRoot() _, e, err := forks.RetrieveForkDataFromDigest(digest, genRoot[:]) if err != nil { panic(err) @@ -515,7 +515,7 @@ func (s *Service) subscribeStaticWithSyncSubnets(topic string, validator wrapped for i := uint64(0); i < params.BeaconConfig().SyncCommitteeSubnetCount; i++ { s.subscribeWithBase(s.addDigestAndIndexToTopic(topic, digest, i), validator, handle) } - genesis := s.cfg.chain.GenesisTime() + genesis := s.cfg.clock.GenesisTime() ticker := slots.NewSlotTicker(genesis, params.BeaconConfig().SecondsPerSlot) go func() { @@ -574,7 +574,7 @@ func (s *Service) subscribeDynamicWithSyncSubnets( handle subHandler, digest [4]byte, ) { - genRoot := s.cfg.chain.GenesisValidatorsRoot() + genRoot := s.cfg.clock.GenesisValidatorsRoot() _, e, err := forks.RetrieveForkDataFromDigest(digest, genRoot[:]) if err != nil { panic(err) @@ -584,7 +584,7 @@ func (s *Service) subscribeDynamicWithSyncSubnets( panic(fmt.Sprintf("%s is not mapped to any message in GossipTopicMappings", topicFormat)) } subscriptions := make(map[uint64]*pubsub.Subscription, params.BeaconConfig().SyncCommitteeSubnetCount) - genesis := s.cfg.chain.GenesisTime() + genesis := s.cfg.clock.GenesisTime() ticker := slots.NewSlotTicker(genesis, params.BeaconConfig().SecondsPerSlot) go func() { @@ -686,7 +686,7 @@ func (s *Service) filterNeededPeers(pids []peer.ID) []peer.ID { log.WithError(err).Error("Could not compute fork digest") return pids } - currSlot := s.cfg.chain.CurrentSlot() + currSlot := s.cfg.clock.CurrentSlot() wantedSubs := s.retrievePersistentSubs(currSlot) wantedSubs = slice.SetUint64(append(wantedSubs, s.attesterSubnetIndices(currSlot)...)) topic := p2p.GossipTypeMapping[reflect.TypeOf(ðpb.Attestation{})] @@ -740,8 +740,8 @@ func (_ *Service) addDigestAndIndexToTopic(topic string, digest [4]byte, idx uin } func (s *Service) currentForkDigest() ([4]byte, error) { - genRoot := s.cfg.chain.GenesisValidatorsRoot() - return forks.CreateForkDigest(s.cfg.chain.GenesisTime(), genRoot[:]) + genRoot := s.cfg.clock.GenesisValidatorsRoot() + return forks.CreateForkDigest(s.cfg.clock.GenesisTime(), genRoot[:]) } // Checks if the provided digest matches up with the current supposed digest. diff --git a/beacon-chain/sync/subscriber_test.go b/beacon-chain/sync/subscriber_test.go index 9ab36bb5d4..878e69a748 100644 --- a/beacon-chain/sync/subscriber_test.go +++ b/beacon-chain/sync/subscriber_test.go @@ -20,6 +20,7 @@ import ( "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p" "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/encoder" p2ptest "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/startup" mockSync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/initial-sync/testing" lruwrpr "github.com/prysmaticlabs/prysm/v4/cache/lru" "github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags" @@ -39,15 +40,18 @@ import ( func TestSubscribe_ReceivesValidMessage(t *testing.T) { p2pService := p2ptest.NewTestP2P(t) + gt := time.Now() + vr := [32]byte{'A'} r := Service{ ctx: context.Background(), cfg: &config{ p2p: p2pService, initialSync: &mockSync.Sync{IsSyncing: false}, chain: &mockChain.ChainService{ - ValidatorsRoot: [32]byte{'A'}, - Genesis: time.Now(), + ValidatorsRoot: vr, + Genesis: gt, }, + clock: startup.NewClock(gt, vr), }, subHandler: newSubTopicHandler(), chainStarted: abool.New(), @@ -79,15 +83,18 @@ func TestSubscribe_ReceivesValidMessage(t *testing.T) { func TestSubscribe_UnsubscribeTopic(t *testing.T) { p2pService := p2ptest.NewTestP2P(t) + gt := time.Now() + vr := [32]byte{'A'} r := Service{ ctx: context.Background(), cfg: &config{ p2p: p2pService, initialSync: &mockSync.Sync{IsSyncing: false}, chain: &mockChain.ChainService{ - ValidatorsRoot: [32]byte{'A'}, - Genesis: time.Now(), + ValidatorsRoot: vr, + Genesis: gt, }, + clock: startup.NewClock(gt, vr), }, chainStarted: abool.New(), subHandler: newSubTopicHandler(), @@ -123,9 +130,11 @@ func TestSubscribe_ReceivesAttesterSlashing(t *testing.T) { p2pService := p2ptest.NewTestP2P(t) ctx := context.Background() d := db.SetupDB(t) + gt := time.Now() + vr := [32]byte{'A'} chainService := &mockChain.ChainService{ - Genesis: time.Now(), - ValidatorsRoot: [32]byte{'A'}, + Genesis: gt, + ValidatorsRoot: vr, } r := Service{ ctx: ctx, @@ -134,6 +143,7 @@ func TestSubscribe_ReceivesAttesterSlashing(t *testing.T) { initialSync: &mockSync.Sync{IsSyncing: false}, slashingPool: slashings.NewPool(), chain: chainService, + clock: startup.NewClock(gt, vr), beaconDB: d, }, seenAttesterSlashingCache: make(map[uint64]bool), @@ -187,6 +197,7 @@ func TestSubscribe_ReceivesProposerSlashing(t *testing.T) { slashingPool: slashings.NewPool(), chain: chainService, beaconDB: d, + clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot), }, seenProposerSlashingCache: lruwrpr.New(10), chainStarted: abool.New(), @@ -226,14 +237,16 @@ func TestSubscribe_ReceivesProposerSlashing(t *testing.T) { func TestSubscribe_HandlesPanic(t *testing.T) { p := p2ptest.NewTestP2P(t) + chain := &mockChain.ChainService{ + Genesis: time.Now(), + ValidatorsRoot: [32]byte{'A'}, + } r := Service{ ctx: context.Background(), cfg: &config{ - chain: &mockChain.ChainService{ - Genesis: time.Now(), - ValidatorsRoot: [32]byte{'A'}, - }, - p2p: p, + chain: chain, + clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot), + p2p: p, }, subHandler: newSubTopicHandler(), chainStarted: abool.New(), @@ -261,14 +274,16 @@ func TestSubscribe_HandlesPanic(t *testing.T) { func TestRevalidateSubscription_CorrectlyFormatsTopic(t *testing.T) { p := p2ptest.NewTestP2P(t) hook := logTest.NewGlobal() + chain := &mockChain.ChainService{ + Genesis: time.Now(), + ValidatorsRoot: [32]byte{'A'}, + } r := Service{ ctx: context.Background(), cfg: &config{ - chain: &mockChain.ChainService{ - Genesis: time.Now(), - ValidatorsRoot: [32]byte{'A'}, - }, - p2p: p, + chain: chain, + clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot), + p2p: p, }, chainStarted: abool.New(), subHandler: newSubTopicHandler(), @@ -300,14 +315,16 @@ func TestRevalidateSubscription_CorrectlyFormatsTopic(t *testing.T) { func TestStaticSubnets(t *testing.T) { p := p2ptest.NewTestP2P(t) ctx, cancel := context.WithCancel(context.Background()) + chain := &mockChain.ChainService{ + Genesis: time.Now(), + ValidatorsRoot: [32]byte{'A'}, + } r := Service{ ctx: ctx, cfg: &config{ - chain: &mockChain.ChainService{ - Genesis: time.Now(), - ValidatorsRoot: [32]byte{'A'}, - }, - p2p: p, + chain: chain, + clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot), + p2p: p, }, chainStarted: abool.New(), subHandler: newSubTopicHandler(), @@ -427,6 +444,7 @@ func Test_wrapAndReportValidation(t *testing.T) { chainStarted: chainStarted, cfg: &config{ chain: mChain, + clock: startup.NewClock(mChain.Genesis, mChain.ValidatorsRoot), }, subHandler: newSubTopicHandler(), } @@ -452,19 +470,28 @@ func TestFilterSubnetPeers(t *testing.T) { defer flags.Init(new(flags.GlobalFlags)) p := p2ptest.NewTestP2P(t) ctx, cancel := context.WithCancel(context.Background()) + defer cancel() currSlot := primitives.Slot(100) + + gt := time.Now() + genPlus100 := func() time.Time { + return gt.Add(time.Second * time.Duration(uint64(currSlot)*params.BeaconConfig().SecondsPerSlot)) + } + chain := &mockChain.ChainService{ + Genesis: gt, + ValidatorsRoot: [32]byte{'A'}, + FinalizedRoots: map[[32]byte]bool{ + {}: true, + }, + } + clock := startup.NewClock(chain.Genesis, chain.ValidatorsRoot, startup.WithNower(genPlus100)) + require.Equal(t, currSlot, clock.CurrentSlot()) r := Service{ ctx: ctx, cfg: &config{ - chain: &mockChain.ChainService{ - Genesis: time.Now(), - ValidatorsRoot: [32]byte{'A'}, - Slot: &currSlot, - FinalizedRoots: map[[32]byte]bool{ - {}: true, - }, - }, - p2p: p, + chain: chain, + clock: clock, + p2p: p, }, chainStarted: abool.New(), subHandler: newSubTopicHandler(), @@ -509,9 +536,7 @@ func TestFilterSubnetPeers(t *testing.T) { } recPeers = r.filterNeededPeers(wantedPeers) - assert.DeepEqual(t, 1, len(recPeers), "expected at least 1 suitable peer to prune") - - cancel() + assert.Equal(t, 1, len(recPeers), "expected at least 1 suitable peer to prune") } func TestSubscribeWithSyncSubnets_StaticOK(t *testing.T) { @@ -522,16 +547,16 @@ func TestSubscribeWithSyncSubnets_StaticOK(t *testing.T) { p := p2ptest.NewTestP2P(t) ctx, cancel := context.WithCancel(context.Background()) - currSlot := primitives.Slot(100) + chain := &mockChain.ChainService{ + Genesis: time.Now(), + ValidatorsRoot: [32]byte{'A'}, + } r := Service{ ctx: ctx, cfg: &config{ - chain: &mockChain.ChainService{ - Genesis: time.Now(), - ValidatorsRoot: [32]byte{'A'}, - Slot: &currSlot, - }, - p2p: p, + chain: chain, + clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot), + p2p: p, }, chainStarted: abool.New(), subHandler: newSubTopicHandler(), @@ -553,23 +578,24 @@ func TestSubscribeWithSyncSubnets_DynamicOK(t *testing.T) { p := p2ptest.NewTestP2P(t) ctx, cancel := context.WithCancel(context.Background()) - currSlot := primitives.Slot(100) + gt := time.Now() + vr := [32]byte{'A'} r := Service{ ctx: ctx, cfg: &config{ chain: &mockChain.ChainService{ - Genesis: time.Now(), - ValidatorsRoot: [32]byte{'A'}, - Slot: &currSlot, + Genesis: gt, + ValidatorsRoot: vr, }, - p2p: p, + p2p: p, + clock: startup.NewClock(gt, vr), }, chainStarted: abool.New(), subHandler: newSubTopicHandler(), } // Empty cache at the end of the test. defer cache.SyncSubnetIDs.EmptyAllCaches() - slot := r.cfg.chain.CurrentSlot() + slot := r.cfg.clock.CurrentSlot() currEpoch := slots.ToEpoch(slot) cache.SyncSubnetIDs.AddSyncCommitteeSubnets([]byte("pubkey"), currEpoch, []uint64{0, 1}, 10*time.Second) digest, err := r.currentForkDigest() @@ -599,22 +625,24 @@ func TestSubscribeWithSyncSubnets_StaticSwitchFork(t *testing.T) { params.BeaconConfig().InitializeForkSchedule() ctx, cancel := context.WithCancel(context.Background()) currSlot := primitives.Slot(100) + chain := &mockChain.ChainService{ + Genesis: time.Now().Add(-time.Duration(uint64(params.BeaconConfig().SlotsPerEpoch)*params.BeaconConfig().SecondsPerSlot) * time.Second), + ValidatorsRoot: [32]byte{'A'}, + Slot: &currSlot, + } r := Service{ ctx: ctx, cfg: &config{ - chain: &mockChain.ChainService{ - Genesis: time.Now().Add(-time.Duration(uint64(params.BeaconConfig().SlotsPerEpoch)*params.BeaconConfig().SecondsPerSlot) * time.Second), - ValidatorsRoot: [32]byte{'A'}, - Slot: &currSlot, - }, - p2p: p, + chain: chain, + clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot), + p2p: p, }, chainStarted: abool.New(), subHandler: newSubTopicHandler(), } // Empty cache at the end of the test. defer cache.SyncSubnetIDs.EmptyAllCaches() - genRoot := r.cfg.chain.GenesisValidatorsRoot() + genRoot := r.cfg.clock.GenesisValidatorsRoot() digest, err := signing.ComputeForkDigest(params.BeaconConfig().GenesisForkVersion, genRoot[:]) assert.NoError(t, err) r.subscribeStaticWithSyncSubnets(p2p.SyncCommitteeSubnetTopicFormat, nil, nil, digest) @@ -638,15 +666,18 @@ func TestSubscribeWithSyncSubnets_DynamicSwitchFork(t *testing.T) { params.BeaconConfig().InitializeForkSchedule() ctx, cancel := context.WithCancel(context.Background()) currSlot := primitives.Slot(100) + gt := time.Now().Add(-time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second) + vr := [32]byte{'A'} r := Service{ ctx: ctx, cfg: &config{ chain: &mockChain.ChainService{ - Genesis: time.Now().Add(-time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second), - ValidatorsRoot: [32]byte{'A'}, + Genesis: gt, + ValidatorsRoot: vr, Slot: &currSlot, }, - p2p: p, + clock: startup.NewClock(gt, vr), + p2p: p, }, chainStarted: abool.New(), subHandler: newSubTopicHandler(), @@ -654,7 +685,7 @@ func TestSubscribeWithSyncSubnets_DynamicSwitchFork(t *testing.T) { // Empty cache at the end of the test. defer cache.SyncSubnetIDs.EmptyAllCaches() cache.SyncSubnetIDs.AddSyncCommitteeSubnets([]byte("pubkey"), 0, []uint64{0, 1}, 10*time.Second) - genRoot := r.cfg.chain.GenesisValidatorsRoot() + genRoot := r.cfg.clock.GenesisValidatorsRoot() digest, err := signing.ComputeForkDigest(params.BeaconConfig().GenesisForkVersion, genRoot[:]) assert.NoError(t, err) diff --git a/beacon-chain/sync/sync_fuzz_test.go b/beacon-chain/sync/sync_fuzz_test.go index dcb06308b2..5bd404c6e1 100644 --- a/beacon-chain/sync/sync_fuzz_test.go +++ b/beacon-chain/sync/sync_fuzz_test.go @@ -20,6 +20,7 @@ import ( doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree" "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p" p2ptest "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/startup" "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen" mockSync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/initial-sync/testing" lruwrpr "github.com/prysmaticlabs/prysm/v4/cache/lru" @@ -67,6 +68,7 @@ func FuzzValidateBeaconBlockPubSub_Phase0(f *testing.F) { p2p: p, initialSync: &mockSync.Sync{IsSyncing: false}, chain: chainService, + clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot), blockNotifier: chainService.BlockNotifier(), stateGen: stateGen, }, @@ -150,6 +152,7 @@ func FuzzValidateBeaconBlockPubSub_Altair(f *testing.F) { chain: chainService, blockNotifier: chainService.BlockNotifier(), stateGen: stateGen, + clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot), }, seenBlockCache: lruwrpr.New(10), badBlockCache: lruwrpr.New(10), @@ -229,6 +232,7 @@ func FuzzValidateBeaconBlockPubSub_Bellatrix(f *testing.F) { p2p: p, initialSync: &mockSync.Sync{IsSyncing: false}, chain: chainService, + clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot), blockNotifier: chainService.BlockNotifier(), stateGen: stateGen, }, diff --git a/beacon-chain/sync/validate_aggregate_proof.go b/beacon-chain/sync/validate_aggregate_proof.go index cdcedf5c00..0abdf8b09e 100644 --- a/beacon-chain/sync/validate_aggregate_proof.go +++ b/beacon-chain/sync/validate_aggregate_proof.go @@ -80,7 +80,7 @@ func (s *Service) validateAggregateAndProof(ctx context.Context, pid peer.ID, ms // processing tolerance. if err := helpers.ValidateAttestationTime( m.Message.Aggregate.Data.Slot, - s.cfg.chain.GenesisTime(), + s.cfg.clock.GenesisTime(), earlyAttestationProcessingTolerance, ); err != nil { tracing.AnnotateError(span, err) diff --git a/beacon-chain/sync/validate_aggregate_proof_test.go b/beacon-chain/sync/validate_aggregate_proof_test.go index 1c425729ad..0a32327d81 100644 --- a/beacon-chain/sync/validate_aggregate_proof_test.go +++ b/beacon-chain/sync/validate_aggregate_proof_test.go @@ -17,6 +17,7 @@ import ( "github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations" "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p" p2ptest "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/startup" mockSync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/initial-sync/testing" lruwrpr "github.com/prysmaticlabs/prysm/v4/cache/lru" fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams" @@ -355,21 +356,23 @@ func TestValidateAggregateAndProof_CanValidate(t *testing.T) { require.NoError(t, beaconState.SetGenesisTime(uint64(time.Now().Unix()))) ctx, cancel := context.WithCancel(context.Background()) defer cancel() + chain := &mock.ChainService{Genesis: time.Now().Add(-oneEpoch()), + Optimistic: true, + DB: db, + State: beaconState, + ValidAttestation: true, + FinalizedCheckPoint: ðpb.Checkpoint{ + Epoch: 0, + Root: att.Data.BeaconBlockRoot, + }} r := &Service{ ctx: ctx, cfg: &config{ - p2p: p, - beaconDB: db, - initialSync: &mockSync.Sync{IsSyncing: false}, - chain: &mock.ChainService{Genesis: time.Now().Add(-oneEpoch()), - Optimistic: true, - DB: db, - State: beaconState, - ValidAttestation: true, - FinalizedCheckPoint: ðpb.Checkpoint{ - Epoch: 0, - Root: att.Data.BeaconBlockRoot, - }}, + p2p: p, + beaconDB: db, + initialSync: &mockSync.Sync{IsSyncing: false}, + chain: chain, + clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot), attPool: attestations.NewPool(), attestationNotifier: (&mock.ChainService{}).OperationNotifier(), }, @@ -456,22 +459,23 @@ func TestVerifyIndexInCommittee_SeenAggregatorEpoch(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() + chain := &mock.ChainService{Genesis: time.Now().Add(-oneEpoch()), + DB: db, + ValidatorsRoot: [32]byte{'A'}, + State: beaconState, + ValidAttestation: true, + FinalizedCheckPoint: ðpb.Checkpoint{ + Epoch: 0, + Root: signedAggregateAndProof.Message.Aggregate.Data.BeaconBlockRoot, + }} r := &Service{ ctx: ctx, cfg: &config{ - p2p: p, - beaconDB: db, - initialSync: &mockSync.Sync{IsSyncing: false}, - chain: &mock.ChainService{Genesis: time.Now().Add(-oneEpoch()), - DB: db, - ValidatorsRoot: [32]byte{'A'}, - State: beaconState, - ValidAttestation: true, - FinalizedCheckPoint: ðpb.Checkpoint{ - Epoch: 0, - Root: signedAggregateAndProof.Message.Aggregate.Data.BeaconBlockRoot, - }}, - + p2p: p, + beaconDB: db, + initialSync: &mockSync.Sync{IsSyncing: false}, + chain: chain, + clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot), attPool: attestations.NewPool(), attestationNotifier: (&mock.ChainService{}).OperationNotifier(), }, diff --git a/beacon-chain/sync/validate_attester_slashing_test.go b/beacon-chain/sync/validate_attester_slashing_test.go index 7a9534f3d9..3358c865a1 100644 --- a/beacon-chain/sync/validate_attester_slashing_test.go +++ b/beacon-chain/sync/validate_attester_slashing_test.go @@ -14,6 +14,7 @@ import ( "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/signing" "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p" p2ptest "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/startup" "github.com/prysmaticlabs/prysm/v4/beacon-chain/state" mockSync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/initial-sync/testing" "github.com/prysmaticlabs/prysm/v4/config/params" @@ -80,10 +81,12 @@ func TestValidateAttesterSlashing_ValidSlashing(t *testing.T) { slashing, s := setupValidAttesterSlashing(t) + chain := &mock.ChainService{State: s, Genesis: time.Now()} r := &Service{ cfg: &config{ p2p: p, - chain: &mock.ChainService{State: s, Genesis: time.Now()}, + chain: chain, + clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot), initialSync: &mockSync.Sync{IsSyncing: false}, }, seenAttesterSlashingCache: make(map[uint64]bool), @@ -123,10 +126,12 @@ func TestValidateAttesterSlashing_InvalidSlashing_WithdrawableEpoch(t *testing.T require.NoError(t, s.SetValidators(vals)) + chain := &mock.ChainService{State: s, Genesis: time.Now()} r := &Service{ cfg: &config{ p2p: p, - chain: &mock.ChainService{State: s, Genesis: time.Now()}, + chain: chain, + clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot), initialSync: &mockSync.Sync{IsSyncing: false}, }, seenAttesterSlashingCache: make(map[uint64]bool), @@ -171,11 +176,13 @@ func TestValidateAttesterSlashing_CanFilter(t *testing.T) { p := p2ptest.NewTestP2P(t) ctx := context.Background() + chain := &mock.ChainService{Genesis: time.Now()} r := &Service{ cfg: &config{ p2p: p, initialSync: &mockSync.Sync{IsSyncing: false}, - chain: &mock.ChainService{Genesis: time.Now()}, + chain: chain, + clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot), }, seenAttesterSlashingCache: make(map[uint64]bool), subHandler: newSubTopicHandler(), @@ -240,10 +247,12 @@ func TestValidateAttesterSlashing_ContextTimeout(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) defer cancel() + chain := &mock.ChainService{State: s} r := &Service{ cfg: &config{ p2p: p, - chain: &mock.ChainService{State: s}, + chain: chain, + clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot), initialSync: &mockSync.Sync{IsSyncing: false}, }, seenAttesterSlashingCache: make(map[uint64]bool), diff --git a/beacon-chain/sync/validate_beacon_attestation.go b/beacon-chain/sync/validate_beacon_attestation.go index c8849424b8..27f044b478 100644 --- a/beacon-chain/sync/validate_beacon_attestation.go +++ b/beacon-chain/sync/validate_beacon_attestation.go @@ -78,7 +78,7 @@ func (s *Service) validateCommitteeIndexBeaconAttestation(ctx context.Context, p // Attestation's slot is within ATTESTATION_PROPAGATION_SLOT_RANGE and early attestation // processing tolerance. - if err := helpers.ValidateAttestationTime(att.Data.Slot, s.cfg.chain.GenesisTime(), + if err := helpers.ValidateAttestationTime(att.Data.Slot, s.cfg.clock.GenesisTime(), earlyAttestationProcessingTolerance); err != nil { tracing.AnnotateError(span, err) return pubsub.ValidationIgnore, err diff --git a/beacon-chain/sync/validate_beacon_attestation_test.go b/beacon-chain/sync/validate_beacon_attestation_test.go index 57760e4ef9..baf60993db 100644 --- a/beacon-chain/sync/validate_beacon_attestation_test.go +++ b/beacon-chain/sync/validate_beacon_attestation_test.go @@ -15,6 +15,7 @@ import ( "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/signing" dbtest "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing" p2ptest "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/startup" mockSync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/initial-sync/testing" lruwrpr "github.com/prysmaticlabs/prysm/v4/cache/lru" fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams" @@ -46,6 +47,7 @@ func TestService_validateCommitteeIndexBeaconAttestation(t *testing.T) { p2p: p, beaconDB: db, chain: chain, + clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot), attestationNotifier: (&mockChain.ChainService{}).OperationNotifier(), }, blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof), @@ -305,11 +307,7 @@ func TestService_validateCommitteeIndexBeaconAttestation(t *testing.T) { } func TestService_setSeenCommitteeIndicesSlot(t *testing.T) { - chainService := &mockChain.ChainService{ - Genesis: time.Now(), - ValidatorsRoot: [32]byte{'A'}, - } - s := NewService(context.Background(), WithP2P(p2ptest.NewTestP2P(t)), WithStateNotifier(chainService.StateNotifier())) + s := NewService(context.Background(), WithP2P(p2ptest.NewTestP2P(t))) s.initCaches() // Empty cache diff --git a/beacon-chain/sync/validate_beacon_blocks.go b/beacon-chain/sync/validate_beacon_blocks.go index 0c73606bd9..fa5f8d9280 100644 --- a/beacon-chain/sync/validate_beacon_blocks.go +++ b/beacon-chain/sync/validate_beacon_blocks.go @@ -122,7 +122,7 @@ func (s *Service) validateBeaconBlockPubSub(ctx context.Context, pid peer.ID, ms // Be lenient in handling early blocks. Instead of discarding blocks arriving later than // MAXIMUM_GOSSIP_CLOCK_DISPARITY in future, we tolerate blocks arriving at max two slots // earlier (SECONDS_PER_SLOT * 2 seconds). Queue such blocks and process them at the right slot. - genesisTime := uint64(s.cfg.chain.GenesisTime().Unix()) + genesisTime := uint64(s.cfg.clock.GenesisTime().Unix()) if err := slots.VerifyTime(genesisTime, blk.Block().Slot(), earlyBlockProcessingTolerance); err != nil { log.WithError(err).WithFields(getBlockFields(blk)).Debug("Ignored block: could not verify slot time") return pubsub.ValidationIgnore, nil @@ -156,7 +156,7 @@ func (s *Service) validateBeaconBlockPubSub(ctx context.Context, pid peer.ID, ms return pubsub.ValidationIgnore, err } s.pendingQueueLock.Unlock() - err := fmt.Errorf("early block, with current slot %d < block slot %d", s.cfg.chain.CurrentSlot(), blk.Block().Slot()) + err := fmt.Errorf("early block, with current slot %d < block slot %d", s.cfg.clock.CurrentSlot(), blk.Block().Slot()) log.WithError(err).WithFields(getBlockFields(blk)).Debug("Could not process early block") return pubsub.ValidationIgnore, err } diff --git a/beacon-chain/sync/validate_beacon_blocks_test.go b/beacon-chain/sync/validate_beacon_blocks_test.go index 34192ac37b..49ad0ecb03 100644 --- a/beacon-chain/sync/validate_beacon_blocks_test.go +++ b/beacon-chain/sync/validate_beacon_blocks_test.go @@ -23,6 +23,7 @@ import ( "github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations" "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p" p2ptest "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/startup" "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen" mockSync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/initial-sync/testing" lruwrpr "github.com/prysmaticlabs/prysm/v4/cache/lru" @@ -79,6 +80,7 @@ func TestValidateBeaconBlockPubSub_InvalidSignature(t *testing.T) { p2p: p, initialSync: &mockSync.Sync{IsSyncing: false}, chain: chainService, + clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot), blockNotifier: chainService.BlockNotifier(), stateGen: stateGen, }, @@ -122,6 +124,7 @@ func TestValidateBeaconBlockPubSub_BlockAlreadyPresentInDB(t *testing.T) { p2p: p, initialSync: &mockSync.Sync{IsSyncing: false}, chain: chainService, + clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot), blockNotifier: chainService.BlockNotifier(), }, seenBlockCache: lruwrpr.New(10), @@ -183,6 +186,7 @@ func TestValidateBeaconBlockPubSub_CanRecoverStateSummary(t *testing.T) { p2p: p, initialSync: &mockSync.Sync{IsSyncing: false}, chain: chainService, + clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot), blockNotifier: chainService.BlockNotifier(), stateGen: stateGen, }, @@ -248,6 +252,7 @@ func TestValidateBeaconBlockPubSub_IsInCache(t *testing.T) { p2p: p, initialSync: &mockSync.Sync{IsSyncing: false}, chain: chainService, + clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot), blockNotifier: chainService.BlockNotifier(), stateGen: stateGen, }, @@ -313,6 +318,7 @@ func TestValidateBeaconBlockPubSub_ValidProposerSignature(t *testing.T) { p2p: p, initialSync: &mockSync.Sync{IsSyncing: false}, chain: chainService, + clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot), blockNotifier: chainService.BlockNotifier(), stateGen: stateGen, }, @@ -380,6 +386,7 @@ func TestValidateBeaconBlockPubSub_WithLookahead(t *testing.T) { p2p: p, initialSync: &mockSync.Sync{IsSyncing: false}, chain: chainService, + clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot), blockNotifier: chainService.BlockNotifier(), stateGen: stateGen, }, @@ -448,6 +455,7 @@ func TestValidateBeaconBlockPubSub_AdvanceEpochsForState(t *testing.T) { p2p: p, initialSync: &mockSync.Sync{IsSyncing: false}, chain: chainService, + clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot), blockNotifier: chainService.BlockNotifier(), stateGen: stateGen, }, @@ -553,6 +561,7 @@ func TestValidateBeaconBlockPubSub_IgnoreAndQueueBlocksFromNearFuture(t *testing beaconDB: db, initialSync: &mockSync.Sync{IsSyncing: false}, chain: chainService, + clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot), blockNotifier: chainService.BlockNotifier(), stateGen: stateGen, }, @@ -604,6 +613,7 @@ func TestValidateBeaconBlockPubSub_RejectBlocksFromFuture(t *testing.T) { beaconDB: db, initialSync: &mockSync.Sync{IsSyncing: false}, chain: chainService, + clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot), blockNotifier: chainService.BlockNotifier(), }, chainStarted: abool.New(), @@ -657,6 +667,7 @@ func TestValidateBeaconBlockPubSub_RejectBlocksFromThePast(t *testing.T) { p2p: p, initialSync: &mockSync.Sync{IsSyncing: false}, chain: chainService, + clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot), blockNotifier: chainService.BlockNotifier(), }, seenBlockCache: lruwrpr.New(10), @@ -714,6 +725,7 @@ func TestValidateBeaconBlockPubSub_SeenProposerSlot(t *testing.T) { p2p: p, initialSync: &mockSync.Sync{IsSyncing: false}, chain: chainService, + clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot), blockNotifier: chainService.BlockNotifier(), }, seenBlockCache: lruwrpr.New(10), @@ -763,6 +775,7 @@ func TestValidateBeaconBlockPubSub_FilterByFinalizedEpoch(t *testing.T) { beaconDB: db, p2p: p, chain: chain, + clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot), blockNotifier: chain.BlockNotifier(), attPool: attestations.NewPool(), initialSync: &mockSync.Sync{IsSyncing: false}, @@ -848,6 +861,7 @@ func TestValidateBeaconBlockPubSub_ParentNotFinalizedDescendant(t *testing.T) { p2p: p, initialSync: &mockSync.Sync{IsSyncing: false}, chain: chainService, + clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot), blockNotifier: chainService.BlockNotifier(), stateGen: stateGen, }, @@ -913,6 +927,7 @@ func TestValidateBeaconBlockPubSub_InvalidParentBlock(t *testing.T) { p2p: p, initialSync: &mockSync.Sync{IsSyncing: false}, chain: chainService, + clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot), blockNotifier: chainService.BlockNotifier(), stateGen: stateGen, }, @@ -962,6 +977,7 @@ func TestValidateBeaconBlockPubSub_InvalidParentBlock(t *testing.T) { Epoch: 0, }} r.cfg.chain = chainService + r.cfg.clock = startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot) res, err = r.validateBeaconBlockPubSub(ctx, "", m) require.ErrorContains(t, "unknown parent for block", err) @@ -1022,6 +1038,7 @@ func TestValidateBeaconBlockPubSub_RejectBlocksFromBadParent(t *testing.T) { p2p: p, initialSync: &mockSync.Sync{IsSyncing: false}, chain: chainService, + clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot), blockNotifier: chainService.BlockNotifier(), stateGen: stateGen, }, @@ -1106,8 +1123,7 @@ func TestValidateBeaconBlockPubSub_ValidExecutionPayload(t *testing.T) { msg.Block.Body.ExecutionPayload.GasLimit = 11 msg.Block.Body.ExecutionPayload.BlockHash = bytesutil.PadTo([]byte("blockHash"), 32) msg.Block.Body.ExecutionPayload.ParentHash = bytesutil.PadTo([]byte("parentHash"), 32) - msg.Block.Body.ExecutionPayload.Transactions = append(msg.Block.Body.ExecutionPayload.Transactions, []byte("transaction 1")) - msg.Block.Body.ExecutionPayload.Transactions = append(msg.Block.Body.ExecutionPayload.Transactions, []byte("transaction 2")) + msg.Block.Body.ExecutionPayload.Transactions = append(msg.Block.Body.ExecutionPayload.Transactions, []byte("transaction 1"), []byte("transaction 2")) msg.Signature, err = signing.ComputeDomainAndSign(beaconState, 0, msg.Block, params.BeaconConfig().DomainBeaconProposer, privKeys[proposerIdx]) require.NoError(t, err) @@ -1126,6 +1142,7 @@ func TestValidateBeaconBlockPubSub_ValidExecutionPayload(t *testing.T) { chain: chainService, blockNotifier: chainService.BlockNotifier(), stateGen: stateGen, + clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot), }, seenBlockCache: lruwrpr.New(10), badBlockCache: lruwrpr.New(10), @@ -1135,7 +1152,7 @@ func TestValidateBeaconBlockPubSub_ValidExecutionPayload(t *testing.T) { _, err = p.Encoding().EncodeGossip(buf, msg) require.NoError(t, err) topic := p2p.GossipTypeMapping[reflect.TypeOf(msg)] - genesisValidatorsRoot := r.cfg.chain.GenesisValidatorsRoot() + genesisValidatorsRoot := r.cfg.clock.GenesisValidatorsRoot() BellatrixDigest, err := signing.ComputeForkDigest(params.BeaconConfig().BellatrixForkVersion, genesisValidatorsRoot[:]) require.NoError(t, err) topic = r.addDigestToTopic(topic, BellatrixDigest) @@ -1178,8 +1195,7 @@ func TestValidateBeaconBlockPubSub_InvalidPayloadTimestamp(t *testing.T) { msg.Block.Body.ExecutionPayload.GasLimit = 11 msg.Block.Body.ExecutionPayload.BlockHash = bytesutil.PadTo([]byte("blockHash"), 32) msg.Block.Body.ExecutionPayload.ParentHash = bytesutil.PadTo([]byte("parentHash"), 32) - msg.Block.Body.ExecutionPayload.Transactions = append(msg.Block.Body.ExecutionPayload.Transactions, []byte("transaction 1")) - msg.Block.Body.ExecutionPayload.Transactions = append(msg.Block.Body.ExecutionPayload.Transactions, []byte("transaction 2")) + msg.Block.Body.ExecutionPayload.Transactions = append(msg.Block.Body.ExecutionPayload.Transactions, []byte("transaction 1"), []byte("transaction 2")) msg.Signature, err = signing.ComputeDomainAndSign(beaconState, 0, msg.Block, params.BeaconConfig().DomainBeaconProposer, privKeys[proposerIdx]) require.NoError(t, err) @@ -1196,6 +1212,7 @@ func TestValidateBeaconBlockPubSub_InvalidPayloadTimestamp(t *testing.T) { p2p: p, initialSync: &mockSync.Sync{IsSyncing: false}, chain: chainService, + clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot), blockNotifier: chainService.BlockNotifier(), stateGen: stateGen, }, @@ -1207,7 +1224,7 @@ func TestValidateBeaconBlockPubSub_InvalidPayloadTimestamp(t *testing.T) { _, err = p.Encoding().EncodeGossip(buf, msg) require.NoError(t, err) topic := p2p.GossipTypeMapping[reflect.TypeOf(msg)] - genesisValidatorsRoot := r.cfg.chain.GenesisValidatorsRoot() + genesisValidatorsRoot := r.cfg.clock.GenesisValidatorsRoot() BellatrixDigest, err := signing.ComputeForkDigest(params.BeaconConfig().BellatrixForkVersion, genesisValidatorsRoot[:]) assert.NoError(t, err) topic = r.addDigestToTopic(topic, BellatrixDigest) @@ -1281,8 +1298,7 @@ func Test_validateBellatrixBeaconBlockParentValidation(t *testing.T) { msg.Block.Body.ExecutionPayload.GasLimit = 11 msg.Block.Body.ExecutionPayload.BlockHash = bytesutil.PadTo([]byte("blockHash"), 32) msg.Block.Body.ExecutionPayload.ParentHash = bytesutil.PadTo([]byte("parentHash"), 32) - msg.Block.Body.ExecutionPayload.Transactions = append(msg.Block.Body.ExecutionPayload.Transactions, []byte("transaction 1")) - msg.Block.Body.ExecutionPayload.Transactions = append(msg.Block.Body.ExecutionPayload.Transactions, []byte("transaction 2")) + msg.Block.Body.ExecutionPayload.Transactions = append(msg.Block.Body.ExecutionPayload.Transactions, []byte("transaction 1"), []byte("transaction 2")) msg.Signature, err = signing.ComputeDomainAndSign(beaconState, 0, msg.Block, params.BeaconConfig().DomainBeaconProposer, privKeys[proposerIdx]) require.NoError(t, err) @@ -1339,8 +1355,7 @@ func Test_validateBeaconBlockProcessingWhenParentIsOptimistic(t *testing.T) { msg.Block.Body.ExecutionPayload.GasLimit = 11 msg.Block.Body.ExecutionPayload.BlockHash = bytesutil.PadTo([]byte("blockHash"), 32) msg.Block.Body.ExecutionPayload.ParentHash = bytesutil.PadTo([]byte("parentHash"), 32) - msg.Block.Body.ExecutionPayload.Transactions = append(msg.Block.Body.ExecutionPayload.Transactions, []byte("transaction 1")) - msg.Block.Body.ExecutionPayload.Transactions = append(msg.Block.Body.ExecutionPayload.Transactions, []byte("transaction 2")) + msg.Block.Body.ExecutionPayload.Transactions = append(msg.Block.Body.ExecutionPayload.Transactions, []byte("transaction 1"), []byte("transaction 2")) msg.Signature, err = signing.ComputeDomainAndSign(beaconState, 0, msg.Block, params.BeaconConfig().DomainBeaconProposer, privKeys[proposerIdx]) require.NoError(t, err) @@ -1359,6 +1374,7 @@ func Test_validateBeaconBlockProcessingWhenParentIsOptimistic(t *testing.T) { chain: chainService, blockNotifier: chainService.BlockNotifier(), stateGen: stateGen, + clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot), }, seenBlockCache: lruwrpr.New(10), badBlockCache: lruwrpr.New(10), @@ -1368,7 +1384,7 @@ func Test_validateBeaconBlockProcessingWhenParentIsOptimistic(t *testing.T) { _, err = p.Encoding().EncodeGossip(buf, msg) require.NoError(t, err) topic := p2p.GossipTypeMapping[reflect.TypeOf(msg)] - genesisValidatorsRoot := r.cfg.chain.GenesisValidatorsRoot() + genesisValidatorsRoot := r.cfg.clock.GenesisValidatorsRoot() BellatrixDigest, err := signing.ComputeForkDigest(params.BeaconConfig().BellatrixForkVersion, genesisValidatorsRoot[:]) require.NoError(t, err) topic = r.addDigestToTopic(topic, BellatrixDigest) diff --git a/beacon-chain/sync/validate_bls_to_execution_change_test.go b/beacon-chain/sync/validate_bls_to_execution_change_test.go index b844477c94..0ca8375748 100644 --- a/beacon-chain/sync/validate_bls_to_execution_change_test.go +++ b/beacon-chain/sync/validate_bls_to_execution_change_test.go @@ -18,48 +18,48 @@ import ( "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p" "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/encoder" mockp2p "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/startup" "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen" mockSync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/initial-sync/testing" "github.com/prysmaticlabs/prysm/v4/config/params" ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1" "github.com/prysmaticlabs/prysm/v4/testing/assert" + "github.com/prysmaticlabs/prysm/v4/testing/require" "github.com/prysmaticlabs/prysm/v4/testing/util" "github.com/prysmaticlabs/prysm/v4/time/slots" ) func TestService_ValidateBlsToExecutionChange(t *testing.T) { beaconDB := testingdb.SetupDB(t) - defaultTopic := p2p.BlsToExecutionChangeSubnetTopicFormat + defaultTopic := p2p.BlsToExecutionChangeSubnetTopicFormat + "/" + encoder.ProtocolSuffixSSZSnappy fakeDigest := []byte{0xAB, 0x00, 0xCC, 0x9E} wantedExecAddress := []byte{0xd8, 0xdA, 0x6B, 0xF2, 0x69, 0x64, 0xaF, 0x9D, 0x7e, 0xEd, 0x9e, 0x03, 0xE5, 0x34, 0x15, 0xD3, 0x7a, 0xA9, 0x60, 0x45} - defaultTopic = defaultTopic + "/" + encoder.ProtocolSuffixSSZSnappy chainService := &mockChain.ChainService{ Genesis: time.Now(), ValidatorsRoot: [32]byte{'A'}, } var emptySig [96]byte type args struct { - ctx context.Context pid peer.ID msg *ethpb.SignedBLSToExecutionChange topic string } tests := []struct { name string - svc *Service + svcopts []Option setupSvc func(s *Service, msg *ethpb.SignedBLSToExecutionChange, topic string) (*Service, string) + clock *startup.Clock args args want pubsub.ValidationResult }{ { name: "Is syncing", - svc: NewService(context.Background(), + svcopts: []Option{ WithP2P(mockp2p.NewTestP2P(t)), WithInitialSync(&mockSync.Sync{IsSyncing: true}), WithChainService(chainService), - WithStateNotifier(chainService.StateNotifier()), WithOperationNotifier(chainService.OperationNotifier()), - ), + }, setupSvc: func(s *Service, msg *ethpb.SignedBLSToExecutionChange, topic string) (*Service, string) { s.cfg.stateGen = stategen.New(beaconDB, doublylinkedtree.New()) s.cfg.beaconDB = beaconDB @@ -67,7 +67,6 @@ func TestService_ValidateBlsToExecutionChange(t *testing.T) { return s, topic }, args: args{ - ctx: context.Background(), pid: "random", topic: "junk", msg: ðpb.SignedBLSToExecutionChange{ @@ -82,13 +81,12 @@ func TestService_ValidateBlsToExecutionChange(t *testing.T) { }, { name: "Bad Topic", - svc: NewService(context.Background(), + svcopts: []Option{ WithP2P(mockp2p.NewTestP2P(t)), WithInitialSync(&mockSync.Sync{IsSyncing: false}), WithChainService(chainService), - WithStateNotifier(chainService.StateNotifier()), WithOperationNotifier(chainService.OperationNotifier()), - ), + }, setupSvc: func(s *Service, msg *ethpb.SignedBLSToExecutionChange, topic string) (*Service, string) { s.cfg.stateGen = stategen.New(beaconDB, doublylinkedtree.New()) s.cfg.beaconDB = beaconDB @@ -96,7 +94,6 @@ func TestService_ValidateBlsToExecutionChange(t *testing.T) { return s, topic }, args: args{ - ctx: context.Background(), pid: "random", topic: "junk", msg: ðpb.SignedBLSToExecutionChange{ @@ -111,14 +108,13 @@ func TestService_ValidateBlsToExecutionChange(t *testing.T) { }, { name: "Already Seen Message", - svc: NewService(context.Background(), + svcopts: []Option{ WithP2P(mockp2p.NewTestP2P(t)), WithInitialSync(&mockSync.Sync{IsSyncing: false}), WithChainService(chainService), - WithStateNotifier(chainService.StateNotifier()), WithOperationNotifier(chainService.OperationNotifier()), WithBlsToExecPool(blstoexec.NewPool()), - ), + }, setupSvc: func(s *Service, msg *ethpb.SignedBLSToExecutionChange, topic string) (*Service, string) { s.cfg.stateGen = stategen.New(beaconDB, doublylinkedtree.New()) s.cfg.beaconDB = beaconDB @@ -134,7 +130,6 @@ func TestService_ValidateBlsToExecutionChange(t *testing.T) { return s, topic }, args: args{ - ctx: context.Background(), pid: "random", topic: fmt.Sprintf(defaultTopic, fakeDigest), msg: ðpb.SignedBLSToExecutionChange{ @@ -147,25 +142,24 @@ func TestService_ValidateBlsToExecutionChange(t *testing.T) { }}, want: pubsub.ValidationIgnore, }, + { name: "Non-Capella HeadState Valid Execution Change Message", - svc: NewService(context.Background(), + svcopts: []Option{ WithP2P(mockp2p.NewTestP2P(t)), WithInitialSync(&mockSync.Sync{IsSyncing: false}), WithChainService(chainService), - WithStateNotifier(chainService.StateNotifier()), WithOperationNotifier(chainService.OperationNotifier()), WithBlsToExecPool(blstoexec.NewPool()), - ), + }, + clock: startup.NewClock(time.Now().Add(-time.Second*time.Duration(params.BeaconConfig().SecondsPerSlot*10)), [32]byte{'A'}), setupSvc: func(s *Service, msg *ethpb.SignedBLSToExecutionChange, topic string) (*Service, string) { s.cfg.stateGen = stategen.New(beaconDB, doublylinkedtree.New()) s.cfg.beaconDB = beaconDB s.initCaches() st, keys := util.DeterministicGenesisStateBellatrix(t, 128) s.cfg.chain = &mockChain.ChainService{ - ValidatorsRoot: [32]byte{'A'}, - Genesis: time.Now().Add(-time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Duration(10)), - State: st, + State: st, } msg.Message.ValidatorIndex = 50 @@ -181,7 +175,6 @@ func TestService_ValidateBlsToExecutionChange(t *testing.T) { return s, topic }, args: args{ - ctx: context.Background(), pid: "random", topic: fmt.Sprintf(defaultTopic, fakeDigest), msg: ðpb.SignedBLSToExecutionChange{ @@ -196,30 +189,27 @@ func TestService_ValidateBlsToExecutionChange(t *testing.T) { }, { name: "Non-existent Validator Index", - svc: NewService(context.Background(), + svcopts: []Option{ WithP2P(mockp2p.NewTestP2P(t)), WithInitialSync(&mockSync.Sync{IsSyncing: false}), WithChainService(chainService), - WithStateNotifier(chainService.StateNotifier()), WithOperationNotifier(chainService.OperationNotifier()), WithBlsToExecPool(blstoexec.NewPool()), - ), + }, + clock: startup.NewClock(time.Now().Add(-time.Second*time.Duration(params.BeaconConfig().SecondsPerSlot)*time.Duration(10)), [32]byte{'A'}), setupSvc: func(s *Service, msg *ethpb.SignedBLSToExecutionChange, topic string) (*Service, string) { s.cfg.stateGen = stategen.New(beaconDB, doublylinkedtree.New()) s.cfg.beaconDB = beaconDB s.initCaches() st, _ := util.DeterministicGenesisStateCapella(t, 128) s.cfg.chain = &mockChain.ChainService{ - ValidatorsRoot: [32]byte{'A'}, - Genesis: time.Now().Add(-time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Duration(10)), - State: st, + State: st, } msg.Message.ValidatorIndex = 130 return s, topic }, args: args{ - ctx: context.Background(), pid: "random", topic: fmt.Sprintf(defaultTopic, fakeDigest), msg: ðpb.SignedBLSToExecutionChange{ @@ -234,23 +224,21 @@ func TestService_ValidateBlsToExecutionChange(t *testing.T) { }, { name: "Invalid Withdrawal Pubkey", - svc: NewService(context.Background(), + svcopts: []Option{ WithP2P(mockp2p.NewTestP2P(t)), WithInitialSync(&mockSync.Sync{IsSyncing: false}), WithChainService(chainService), - WithStateNotifier(chainService.StateNotifier()), WithOperationNotifier(chainService.OperationNotifier()), WithBlsToExecPool(blstoexec.NewPool()), - ), + }, + clock: startup.NewClock(time.Now().Add(-time.Second*time.Duration(params.BeaconConfig().SecondsPerSlot)*time.Duration(10)), [32]byte{'A'}), setupSvc: func(s *Service, msg *ethpb.SignedBLSToExecutionChange, topic string) (*Service, string) { s.cfg.stateGen = stategen.New(beaconDB, doublylinkedtree.New()) s.cfg.beaconDB = beaconDB s.initCaches() st, keys := util.DeterministicGenesisStateCapella(t, 128) s.cfg.chain = &mockChain.ChainService{ - ValidatorsRoot: [32]byte{'A'}, - Genesis: time.Now().Add(-time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Duration(10)), - State: st, + State: st, } msg.Message.ValidatorIndex = 50 @@ -260,7 +248,6 @@ func TestService_ValidateBlsToExecutionChange(t *testing.T) { return s, topic }, args: args{ - ctx: context.Background(), pid: "random", topic: fmt.Sprintf(defaultTopic, fakeDigest), msg: ðpb.SignedBLSToExecutionChange{ @@ -275,14 +262,14 @@ func TestService_ValidateBlsToExecutionChange(t *testing.T) { }, { name: "Invalid Credentials in State", - svc: NewService(context.Background(), + svcopts: []Option{ WithP2P(mockp2p.NewTestP2P(t)), WithInitialSync(&mockSync.Sync{IsSyncing: false}), WithChainService(chainService), - WithStateNotifier(chainService.StateNotifier()), WithOperationNotifier(chainService.OperationNotifier()), WithBlsToExecPool(blstoexec.NewPool()), - ), + }, + clock: startup.NewClock(time.Now().Add(-time.Second*time.Duration(params.BeaconConfig().SecondsPerSlot)*time.Duration(10)), [32]byte{'A'}), setupSvc: func(s *Service, msg *ethpb.SignedBLSToExecutionChange, topic string) (*Service, string) { s.cfg.stateGen = stategen.New(beaconDB, doublylinkedtree.New()) s.cfg.beaconDB = beaconDB @@ -296,9 +283,7 @@ func TestService_ValidateBlsToExecutionChange(t *testing.T) { return true, val, nil })) s.cfg.chain = &mockChain.ChainService{ - ValidatorsRoot: [32]byte{'A'}, - Genesis: time.Now().Add(-time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Duration(10)), - State: st, + State: st, } msg.Message.ValidatorIndex = 50 @@ -308,7 +293,6 @@ func TestService_ValidateBlsToExecutionChange(t *testing.T) { return s, topic }, args: args{ - ctx: context.Background(), pid: "random", topic: fmt.Sprintf(defaultTopic, fakeDigest), msg: ðpb.SignedBLSToExecutionChange{ @@ -323,23 +307,21 @@ func TestService_ValidateBlsToExecutionChange(t *testing.T) { }, { name: "Invalid Execution Change Signature", - svc: NewService(context.Background(), + svcopts: []Option{ WithP2P(mockp2p.NewTestP2P(t)), WithInitialSync(&mockSync.Sync{IsSyncing: false}), WithChainService(chainService), - WithStateNotifier(chainService.StateNotifier()), WithOperationNotifier(chainService.OperationNotifier()), WithBlsToExecPool(blstoexec.NewPool()), - ), + }, + clock: startup.NewClock(time.Now().Add(-time.Second*time.Duration(params.BeaconConfig().SecondsPerSlot)*time.Duration(10)), [32]byte{'A'}), setupSvc: func(s *Service, msg *ethpb.SignedBLSToExecutionChange, topic string) (*Service, string) { s.cfg.stateGen = stategen.New(beaconDB, doublylinkedtree.New()) s.cfg.beaconDB = beaconDB s.initCaches() st, keys := util.DeterministicGenesisStateCapella(t, 128) s.cfg.chain = &mockChain.ChainService{ - ValidatorsRoot: [32]byte{'A'}, - Genesis: time.Now().Add(-time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Duration(10)), - State: st, + State: st, } msg.Message.ValidatorIndex = 50 @@ -352,7 +334,6 @@ func TestService_ValidateBlsToExecutionChange(t *testing.T) { return s, topic }, args: args{ - ctx: context.Background(), pid: "random", topic: fmt.Sprintf(defaultTopic, fakeDigest), msg: ðpb.SignedBLSToExecutionChange{ @@ -367,23 +348,21 @@ func TestService_ValidateBlsToExecutionChange(t *testing.T) { }, { name: "Valid Execution Change Message", - svc: NewService(context.Background(), + svcopts: []Option{ WithP2P(mockp2p.NewTestP2P(t)), WithInitialSync(&mockSync.Sync{IsSyncing: false}), WithChainService(chainService), - WithStateNotifier(chainService.StateNotifier()), WithOperationNotifier(chainService.OperationNotifier()), WithBlsToExecPool(blstoexec.NewPool()), - ), + }, + clock: startup.NewClock(time.Now().Add(-time.Second*time.Duration(params.BeaconConfig().SecondsPerSlot)*time.Duration(10)), [32]byte{'A'}), setupSvc: func(s *Service, msg *ethpb.SignedBLSToExecutionChange, topic string) (*Service, string) { s.cfg.stateGen = stategen.New(beaconDB, doublylinkedtree.New()) s.cfg.beaconDB = beaconDB s.initCaches() st, keys := util.DeterministicGenesisStateCapella(t, 128) s.cfg.chain = &mockChain.ChainService{ - ValidatorsRoot: [32]byte{'A'}, - Genesis: time.Now().Add(-time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Duration(10)), - State: st, + State: st, } msg.Message.ValidatorIndex = 50 @@ -399,7 +378,6 @@ func TestService_ValidateBlsToExecutionChange(t *testing.T) { return s, topic }, args: args{ - ctx: context.Background(), pid: "random", topic: fmt.Sprintf(defaultTopic, fakeDigest), msg: ðpb.SignedBLSToExecutionChange{ @@ -415,7 +393,19 @@ func TestService_ValidateBlsToExecutionChange(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - tt.svc, tt.args.topic = tt.setupSvc(tt.svc, tt.args.msg, tt.args.topic) + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + cw := startup.NewClockSynchronizer() + opts := []Option{WithClockWaiter(cw)} + svc := NewService(ctx, append(opts, tt.svcopts...)...) + svc, tt.args.topic = tt.setupSvc(svc, tt.args.msg, tt.args.topic) + go svc.Start() + if tt.clock == nil { + tt.clock = startup.NewClock(time.Now(), [32]byte{}) + } + require.NoError(t, cw.SetClock(tt.clock)) marshalledObj, err := tt.args.msg.MarshalSSZ() assert.NoError(t, err) marshalledObj = snappy.Encode(nil, marshalledObj) @@ -427,7 +417,7 @@ func TestService_ValidateBlsToExecutionChange(t *testing.T) { ReceivedFrom: "", ValidatorData: nil, } - if got, err := tt.svc.validateBlsToExecutionChange(tt.args.ctx, tt.args.pid, msg); got != tt.want { + if got, err := svc.validateBlsToExecutionChange(ctx, tt.args.pid, msg); got != tt.want { _ = err t.Errorf("validateBlsToExecutionChange() = %v, want %v", got, tt.want) } diff --git a/beacon-chain/sync/validate_proposer_slashing_test.go b/beacon-chain/sync/validate_proposer_slashing_test.go index 9e644ce361..3aac4a4a8e 100644 --- a/beacon-chain/sync/validate_proposer_slashing_test.go +++ b/beacon-chain/sync/validate_proposer_slashing_test.go @@ -16,6 +16,7 @@ import ( coreTime "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time" "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p" p2ptest "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/startup" "github.com/prysmaticlabs/prysm/v4/beacon-chain/state" state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native" mockSync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/initial-sync/testing" @@ -113,10 +114,12 @@ func TestValidateProposerSlashing_ValidSlashing(t *testing.T) { slashing, s := setupValidProposerSlashing(t) + chain := &mock.ChainService{State: s, Genesis: time.Now()} r := &Service{ cfg: &config{ p2p: p, - chain: &mock.ChainService{State: s, Genesis: time.Now()}, + chain: chain, + clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot), initialSync: &mockSync.Sync{IsSyncing: false}, }, seenProposerSlashingCache: lruwrpr.New(10), diff --git a/beacon-chain/sync/validate_sync_committee_message.go b/beacon-chain/sync/validate_sync_committee_message.go index 1e73ddf8ed..80a0e3c49d 100644 --- a/beacon-chain/sync/validate_sync_committee_message.go +++ b/beacon-chain/sync/validate_sync_committee_message.go @@ -72,7 +72,7 @@ func (s *Service) validateSyncCommitteeMessage( // The message's `slot` is for the current slot (with a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance). if err := altair.ValidateSyncMessageTime( m.Slot, - s.cfg.chain.GenesisTime(), + s.cfg.clock.GenesisTime(), params.BeaconNetworkConfig().MaximumGossipClockDisparity, ); err != nil { tracing.AnnotateError(span, err) diff --git a/beacon-chain/sync/validate_sync_committee_message_test.go b/beacon-chain/sync/validate_sync_committee_message_test.go index 571527ac42..e091829f56 100644 --- a/beacon-chain/sync/validate_sync_committee_message_test.go +++ b/beacon-chain/sync/validate_sync_committee_message_test.go @@ -19,11 +19,13 @@ import ( "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/encoder" mockp2p "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing" p2ptypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/types" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/startup" "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen" mockSync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/initial-sync/testing" "github.com/prysmaticlabs/prysm/v4/config/params" "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives" "github.com/prysmaticlabs/prysm/v4/encoding/bytesutil" + "github.com/prysmaticlabs/prysm/v4/network/forks" ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1" "github.com/prysmaticlabs/prysm/v4/testing/assert" "github.com/prysmaticlabs/prysm/v4/testing/require" @@ -42,36 +44,33 @@ func TestService_ValidateSyncCommitteeMessage(t *testing.T) { } var emptySig [96]byte type args struct { - ctx context.Context pid peer.ID msg *ethpb.SyncCommitteeMessage topic string } tests := []struct { name string - svc *Service - setupSvc func(s *Service, msg *ethpb.SyncCommitteeMessage, topic string) (*Service, string) + svcopts []Option + setupSvc func(s *Service, msg *ethpb.SyncCommitteeMessage, topic string) (*Service, string, *startup.Clock) args args want pubsub.ValidationResult }{ { name: "Is syncing", - svc: NewService(context.Background(), + svcopts: []Option{ WithP2P(mockp2p.NewTestP2P(t)), WithInitialSync(&mockSync.Sync{IsSyncing: true}), WithChainService(chainService), - WithStateNotifier(chainService.StateNotifier()), WithOperationNotifier(chainService.OperationNotifier()), - ), - setupSvc: func(s *Service, msg *ethpb.SyncCommitteeMessage, topic string) (*Service, string) { + }, + setupSvc: func(s *Service, msg *ethpb.SyncCommitteeMessage, topic string) (*Service, string, *startup.Clock) { s.cfg.stateGen = stategen.New(beaconDB, doublylinkedtree.New()) msg.BlockRoot = headRoot[:] s.cfg.beaconDB = beaconDB s.initCaches() - return s, topic + return s, topic, startup.NewClock(time.Now(), [32]byte{}) }, args: args{ - ctx: context.Background(), pid: "random", topic: "junk", msg: ðpb.SyncCommitteeMessage{ @@ -84,22 +83,20 @@ func TestService_ValidateSyncCommitteeMessage(t *testing.T) { }, { name: "Bad Topic", - svc: NewService(context.Background(), + svcopts: []Option{ WithP2P(mockp2p.NewTestP2P(t)), WithInitialSync(&mockSync.Sync{IsSyncing: false}), WithChainService(chainService), - WithStateNotifier(chainService.StateNotifier()), WithOperationNotifier(chainService.OperationNotifier()), - ), - setupSvc: func(s *Service, msg *ethpb.SyncCommitteeMessage, topic string) (*Service, string) { + }, + setupSvc: func(s *Service, msg *ethpb.SyncCommitteeMessage, topic string) (*Service, string, *startup.Clock) { s.cfg.stateGen = stategen.New(beaconDB, doublylinkedtree.New()) msg.BlockRoot = headRoot[:] s.cfg.beaconDB = beaconDB s.initCaches() - return s, topic + return s, topic, startup.NewClock(time.Now(), [32]byte{}) }, args: args{ - ctx: context.Background(), pid: "random", topic: "junk", msg: ðpb.SyncCommitteeMessage{ @@ -112,21 +109,19 @@ func TestService_ValidateSyncCommitteeMessage(t *testing.T) { }, { name: "Future Slot Message", - svc: NewService(context.Background(), + svcopts: []Option{ WithP2P(mockp2p.NewTestP2P(t)), WithInitialSync(&mockSync.Sync{IsSyncing: false}), WithChainService(chainService), - WithStateNotifier(chainService.StateNotifier()), WithOperationNotifier(chainService.OperationNotifier()), - ), - setupSvc: func(s *Service, msg *ethpb.SyncCommitteeMessage, topic string) (*Service, string) { + }, + setupSvc: func(s *Service, msg *ethpb.SyncCommitteeMessage, topic string) (*Service, string, *startup.Clock) { s.cfg.stateGen = stategen.New(beaconDB, doublylinkedtree.New()) s.cfg.beaconDB = beaconDB s.initCaches() - return s, topic + return s, topic, startup.NewClock(time.Now(), [32]byte{}) }, args: args{ - ctx: context.Background(), pid: "random", topic: fmt.Sprintf(defaultTopic, fakeDigest, 0), msg: ðpb.SyncCommitteeMessage{ @@ -139,23 +134,21 @@ func TestService_ValidateSyncCommitteeMessage(t *testing.T) { }, { name: "Already Seen Message", - svc: NewService(context.Background(), + svcopts: []Option{ WithP2P(mockp2p.NewTestP2P(t)), WithInitialSync(&mockSync.Sync{IsSyncing: false}), WithChainService(chainService), - WithStateNotifier(chainService.StateNotifier()), WithOperationNotifier(chainService.OperationNotifier()), - ), - setupSvc: func(s *Service, msg *ethpb.SyncCommitteeMessage, topic string) (*Service, string) { + }, + setupSvc: func(s *Service, msg *ethpb.SyncCommitteeMessage, topic string) (*Service, string, *startup.Clock) { s.cfg.stateGen = stategen.New(beaconDB, doublylinkedtree.New()) s.cfg.beaconDB = beaconDB s.initCaches() s.setSeenSyncMessageIndexSlot(1, 1, 0) - return s, topic + return s, topic, startup.NewClock(time.Now(), [32]byte{}) }, args: args{ - ctx: context.Background(), pid: "random", topic: fmt.Sprintf(defaultTopic, fakeDigest, 0), msg: ðpb.SyncCommitteeMessage{ @@ -168,28 +161,24 @@ func TestService_ValidateSyncCommitteeMessage(t *testing.T) { }, { name: "Non-existent block root", - svc: NewService(context.Background(), + svcopts: []Option{ WithP2P(mockp2p.NewTestP2P(t)), WithInitialSync(&mockSync.Sync{IsSyncing: false}), WithChainService(chainService), - WithStateNotifier(chainService.StateNotifier()), WithOperationNotifier(chainService.OperationNotifier()), - ), - setupSvc: func(s *Service, msg *ethpb.SyncCommitteeMessage, topic string) (*Service, string) { + }, + setupSvc: func(s *Service, msg *ethpb.SyncCommitteeMessage, topic string) (*Service, string, *startup.Clock) { s.cfg.stateGen = stategen.New(beaconDB, doublylinkedtree.New()) s.cfg.beaconDB = beaconDB s.initCaches() - s.cfg.chain = &mockChain.ChainService{ - ValidatorsRoot: [32]byte{'A'}, - Genesis: time.Now().Add(-time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Duration(10)), - } + s.cfg.chain = &mockChain.ChainService{} incorrectRoot := [32]byte{0xBB} msg.BlockRoot = incorrectRoot[:] - return s, topic + gt := time.Now().Add(-time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Duration(10)) + return s, topic, startup.NewClock(gt, [32]byte{'A'}) }, args: args{ - ctx: context.Background(), pid: "random", topic: fmt.Sprintf(defaultTopic, fakeDigest, 0), msg: ðpb.SyncCommitteeMessage{ @@ -202,14 +191,13 @@ func TestService_ValidateSyncCommitteeMessage(t *testing.T) { }, { name: "Subnet is non-existent", - svc: NewService(context.Background(), + svcopts: []Option{ WithP2P(mockp2p.NewTestP2P(t)), WithInitialSync(&mockSync.Sync{IsSyncing: false}), WithChainService(chainService), - WithStateNotifier(chainService.StateNotifier()), WithOperationNotifier(chainService.OperationNotifier()), - ), - setupSvc: func(s *Service, msg *ethpb.SyncCommitteeMessage, topic string) (*Service, string) { + }, + setupSvc: func(s *Service, msg *ethpb.SyncCommitteeMessage, topic string) (*Service, string, *startup.Clock) { s.cfg.stateGen = stategen.New(beaconDB, doublylinkedtree.New()) s.cfg.beaconDB = beaconDB s.initCaches() @@ -218,8 +206,6 @@ func TestService_ValidateSyncCommitteeMessage(t *testing.T) { assert.NoError(t, err) s.cfg.chain = &mockChain.ChainService{ SyncCommitteeIndices: []primitives.CommitteeIndex{0}, - ValidatorsRoot: [32]byte{'A'}, - Genesis: time.Now().Add(-time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Duration(hState.Slot()-1)), } numOfVals := hState.NumValidators() @@ -229,15 +215,16 @@ func TestService_ValidateSyncCommitteeMessage(t *testing.T) { msg.ValidatorIndex = primitives.ValidatorIndex(chosenVal) msg.Slot = slots.PrevSlot(hState.Slot()) - // Set Bad Topic and Subnet - digest, err := s.currentForkDigest() + gt := time.Now().Add(-time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Duration(hState.Slot()-1)) + vr := [32]byte{'A'} + clock := startup.NewClock(gt, vr) + digest, err := forks.CreateForkDigest(gt, vr[:]) assert.NoError(t, err) actualTopic := fmt.Sprintf(defaultTopic, digest, 5) - return s, actualTopic + return s, actualTopic, clock }, args: args{ - ctx: context.Background(), pid: "random", topic: defaultTopic, msg: ðpb.SyncCommitteeMessage{ @@ -250,24 +237,20 @@ func TestService_ValidateSyncCommitteeMessage(t *testing.T) { }, { name: "Validator is non-existent", - svc: NewService(context.Background(), + svcopts: []Option{ WithP2P(mockp2p.NewTestP2P(t)), WithInitialSync(&mockSync.Sync{IsSyncing: false}), WithChainService(chainService), - WithStateNotifier(chainService.StateNotifier()), WithOperationNotifier(chainService.OperationNotifier()), - ), - setupSvc: func(s *Service, msg *ethpb.SyncCommitteeMessage, topic string) (*Service, string) { + }, + setupSvc: func(s *Service, msg *ethpb.SyncCommitteeMessage, topic string) (*Service, string, *startup.Clock) { s.cfg.stateGen = stategen.New(beaconDB, doublylinkedtree.New()) s.cfg.beaconDB = beaconDB s.initCaches() msg.BlockRoot = headRoot[:] hState, err := beaconDB.State(context.Background(), headRoot) assert.NoError(t, err) - s.cfg.chain = &mockChain.ChainService{ - ValidatorsRoot: [32]byte{'A'}, - Genesis: time.Now().Add(-time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Duration(hState.Slot()-1)), - } + s.cfg.chain = &mockChain.ChainService{} numOfVals := hState.NumValidators() @@ -277,14 +260,15 @@ func TestService_ValidateSyncCommitteeMessage(t *testing.T) { msg.ValidatorIndex = primitives.ValidatorIndex(chosenVal) msg.Slot = slots.PrevSlot(hState.Slot()) - digest, err := s.currentForkDigest() + gt := time.Now().Add(-time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Duration(hState.Slot()-1)) + vr := [32]byte{'A'} + digest, err := forks.CreateForkDigest(gt, vr[:]) assert.NoError(t, err) - actualTopic := fmt.Sprintf(defaultTopic, digest, 1) + actualTopic := fmt.Sprintf(defaultTopic, digest, 5) - return s, actualTopic + return s, actualTopic, startup.NewClock(gt, vr) }, args: args{ - ctx: context.Background(), pid: "random", topic: defaultTopic, msg: ðpb.SyncCommitteeMessage{ @@ -297,14 +281,13 @@ func TestService_ValidateSyncCommitteeMessage(t *testing.T) { }, { name: "Invalid Sync Committee Signature", - svc: NewService(context.Background(), + svcopts: []Option{ WithP2P(mockp2p.NewTestP2P(t)), WithInitialSync(&mockSync.Sync{IsSyncing: false}), WithChainService(chainService), - WithStateNotifier(chainService.StateNotifier()), WithOperationNotifier(chainService.OperationNotifier()), - ), - setupSvc: func(s *Service, msg *ethpb.SyncCommitteeMessage, topic string) (*Service, string) { + }, + setupSvc: func(s *Service, msg *ethpb.SyncCommitteeMessage, topic string) (*Service, string, *startup.Clock) { s.cfg.stateGen = stategen.New(beaconDB, doublylinkedtree.New()) s.cfg.beaconDB = beaconDB s.initCaches() @@ -325,21 +308,20 @@ func TestService_ValidateSyncCommitteeMessage(t *testing.T) { subCommitteeSize := params.BeaconConfig().SyncCommitteeSize / params.BeaconConfig().SyncCommitteeSubnetCount s.cfg.chain = &mockChain.ChainService{ SyncCommitteeIndices: []primitives.CommitteeIndex{primitives.CommitteeIndex(subCommitteeSize)}, - ValidatorsRoot: [32]byte{'A'}, - Genesis: time.Now().Add(-time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Duration(hState.Slot()-1)), SyncCommitteeDomain: d, PublicKey: bytesutil.ToBytes48(keys[chosenVal].PublicKey().Marshal()), } // Set Topic and Subnet - digest, err := s.currentForkDigest() + gt := time.Now().Add(-time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Duration(hState.Slot()-1)) + vr := [32]byte{'A'} + digest, err := forks.CreateForkDigest(gt, vr[:]) assert.NoError(t, err) - actualTopic := fmt.Sprintf(defaultTopic, digest, 1) + actualTopic := fmt.Sprintf(defaultTopic, digest, 5) - return s, actualTopic + return s, actualTopic, startup.NewClock(gt, vr) }, args: args{ - ctx: context.Background(), pid: "random", topic: defaultTopic, msg: ðpb.SyncCommitteeMessage{ @@ -352,14 +334,13 @@ func TestService_ValidateSyncCommitteeMessage(t *testing.T) { }, { name: "Valid Sync Committee Signature", - svc: NewService(context.Background(), + svcopts: []Option{ WithP2P(mockp2p.NewTestP2P(t)), WithInitialSync(&mockSync.Sync{IsSyncing: false}), WithChainService(chainService), - WithStateNotifier(chainService.StateNotifier()), WithOperationNotifier(chainService.OperationNotifier()), - ), - setupSvc: func(s *Service, msg *ethpb.SyncCommitteeMessage, topic string) (*Service, string) { + }, + setupSvc: func(s *Service, msg *ethpb.SyncCommitteeMessage, topic string) (*Service, string, *startup.Clock) { s.cfg.stateGen = stategen.New(beaconDB, doublylinkedtree.New()) s.cfg.beaconDB = beaconDB s.initCaches() @@ -379,8 +360,6 @@ func TestService_ValidateSyncCommitteeMessage(t *testing.T) { s.cfg.chain = &mockChain.ChainService{ SyncCommitteeIndices: []primitives.CommitteeIndex{primitives.CommitteeIndex(subCommitteeSize)}, - ValidatorsRoot: [32]byte{'A'}, - Genesis: time.Now().Add(-time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Duration(hState.Slot()-1)), SyncCommitteeDomain: d, PublicKey: bytesutil.ToBytes48(keys[chosenVal].PublicKey().Marshal()), } @@ -391,14 +370,15 @@ func TestService_ValidateSyncCommitteeMessage(t *testing.T) { msg.Slot = slots.PrevSlot(hState.Slot()) // Set Topic and Subnet - digest, err := s.currentForkDigest() + gt := time.Now().Add(-time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Duration(hState.Slot()-1)) + vr := [32]byte{'A'} + digest, err := forks.CreateForkDigest(gt, vr[:]) assert.NoError(t, err) actualTopic := fmt.Sprintf(defaultTopic, digest, 1) - return s, actualTopic + return s, actualTopic, startup.NewClock(gt, vr) }, args: args{ - ctx: context.Background(), pid: "random", topic: defaultTopic, msg: ðpb.SyncCommitteeMessage{ @@ -412,7 +392,18 @@ func TestService_ValidateSyncCommitteeMessage(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - tt.svc, tt.args.topic = tt.setupSvc(tt.svc, tt.args.msg, tt.args.topic) + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + cw := startup.NewClockSynchronizer() + opts := []Option{WithClockWaiter(cw)} + svc := NewService(ctx, append(opts, tt.svcopts...)...) + var clock *startup.Clock + svc, tt.args.topic, clock = tt.setupSvc(svc, tt.args.msg, tt.args.topic) + go svc.Start() + require.NoError(t, cw.SetClock(clock)) + marshalledObj, err := tt.args.msg.MarshalSSZ() assert.NoError(t, err) marshalledObj = snappy.Encode(nil, marshalledObj) @@ -424,7 +415,13 @@ func TestService_ValidateSyncCommitteeMessage(t *testing.T) { ReceivedFrom: "", ValidatorData: nil, } - if got, err := tt.svc.validateSyncCommitteeMessage(tt.args.ctx, tt.args.pid, msg); got != tt.want { + for i := 0; i < 10; i++ { + if !svc.chainIsStarted() { + time.Sleep(100 * time.Millisecond) + } + } + require.Equal(t, true, svc.chainIsStarted()) + if got, err := svc.validateSyncCommitteeMessage(ctx, tt.args.pid, msg); got != tt.want { _ = err t.Errorf("validateSyncCommitteeMessage() = %v, want %v", got, tt.want) } @@ -486,10 +483,8 @@ func TestService_rejectIncorrectSyncCommittee(t *testing.T) { { name: "invalid", cfg: &config{ - chain: &mockChain.ChainService{ - Genesis: time.Now(), - ValidatorsRoot: [32]byte{1}, - }, + chain: &mockChain.ChainService{}, + clock: startup.NewClock(time.Now(), [32]byte{1}), }, committeeIndices: []primitives.CommitteeIndex{0}, setupTopic: func(_ *Service) string { @@ -500,14 +495,13 @@ func TestService_rejectIncorrectSyncCommittee(t *testing.T) { { name: "valid", cfg: &config{ - chain: &mockChain.ChainService{ - Genesis: time.Now(), - ValidatorsRoot: [32]byte{1}, - }, + chain: &mockChain.ChainService{}, + clock: startup.NewClock(time.Now(), [32]byte{1}), }, committeeIndices: []primitives.CommitteeIndex{0}, setupTopic: func(s *Service) string { format := p2p.GossipTypeMapping[reflect.TypeOf(ðpb.SyncCommitteeMessage{})] + digest, err := s.currentForkDigest() require.NoError(t, err) prefix := fmt.Sprintf(format, digest, 0 /* validator index 0 */) diff --git a/beacon-chain/sync/validate_sync_contribution_proof.go b/beacon-chain/sync/validate_sync_contribution_proof.go index bd72cee54d..99e752e88c 100644 --- a/beacon-chain/sync/validate_sync_contribution_proof.go +++ b/beacon-chain/sync/validate_sync_contribution_proof.go @@ -59,7 +59,7 @@ func (s *Service) validateSyncContributionAndProof(ctx context.Context, pid peer } // The contribution's slot is for the current slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance). - if err := altair.ValidateSyncMessageTime(m.Message.Contribution.Slot, s.cfg.chain.GenesisTime(), params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil { + if err := altair.ValidateSyncMessageTime(m.Message.Contribution.Slot, s.cfg.clock.GenesisTime(), params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil { tracing.AnnotateError(span, err) return pubsub.ValidationIgnore, err } diff --git a/beacon-chain/sync/validate_sync_contribution_proof_test.go b/beacon-chain/sync/validate_sync_contribution_proof_test.go index 5c2144b00b..f6b8b0e10b 100644 --- a/beacon-chain/sync/validate_sync_contribution_proof_test.go +++ b/beacon-chain/sync/validate_sync_contribution_proof_test.go @@ -24,6 +24,7 @@ import ( "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/encoder" mockp2p "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing" p2ptypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/types" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/startup" "github.com/prysmaticlabs/prysm/v4/beacon-chain/state" "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen" mockSync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/initial-sync/testing" @@ -51,36 +52,34 @@ func TestService_ValidateSyncContributionAndProof(t *testing.T) { } var emptySig [96]byte type args struct { - ctx context.Context pid peer.ID msg *ethpb.SignedContributionAndProof topic string } tests := []struct { name string - svc *Service - setupSvc func(s *Service, msg *ethpb.SignedContributionAndProof) *Service + svcopts []Option + setupSvc func(s *Service, msg *ethpb.SignedContributionAndProof) (*Service, *startup.Clock) + clock *startup.Clock args args want pubsub.ValidationResult }{ { name: "Is syncing", - svc: NewService(context.Background(), + svcopts: []Option{ WithP2P(mockp2p.NewTestP2P(t)), WithInitialSync(&mockSync.Sync{IsSyncing: true}), WithChainService(chainService), - WithStateNotifier(chainService.StateNotifier()), WithOperationNotifier(chainService.OperationNotifier()), - ), - setupSvc: func(s *Service, msg *ethpb.SignedContributionAndProof) *Service { + }, + setupSvc: func(s *Service, msg *ethpb.SignedContributionAndProof) (*Service, *startup.Clock) { s.cfg.stateGen = stategen.New(database, doublylinkedtree.New()) msg.Message.Contribution.BlockRoot = headRoot[:] s.cfg.beaconDB = database s.initCaches() - return s + return s, startup.NewClock(time.Now(), [32]byte{}) }, args: args{ - ctx: context.Background(), pid: "random", topic: "junk", msg: ðpb.SignedContributionAndProof{ @@ -101,22 +100,20 @@ func TestService_ValidateSyncContributionAndProof(t *testing.T) { }, { name: "Bad Topic", - svc: NewService(context.Background(), + svcopts: []Option{ WithP2P(mockp2p.NewTestP2P(t)), WithInitialSync(&mockSync.Sync{IsSyncing: false}), WithChainService(chainService), - WithStateNotifier(chainService.StateNotifier()), WithOperationNotifier(chainService.OperationNotifier()), - ), - setupSvc: func(s *Service, msg *ethpb.SignedContributionAndProof) *Service { + }, + setupSvc: func(s *Service, msg *ethpb.SignedContributionAndProof) (*Service, *startup.Clock) { s.cfg.stateGen = stategen.New(database, doublylinkedtree.New()) msg.Message.Contribution.BlockRoot = headRoot[:] s.cfg.beaconDB = database s.initCaches() - return s + return s, startup.NewClock(time.Now(), [32]byte{}) }, args: args{ - ctx: context.Background(), pid: "random", topic: "junk", msg: ðpb.SignedContributionAndProof{ @@ -137,21 +134,19 @@ func TestService_ValidateSyncContributionAndProof(t *testing.T) { }, { name: "Future Slot Message", - svc: NewService(context.Background(), + svcopts: []Option{ WithP2P(mockp2p.NewTestP2P(t)), WithInitialSync(&mockSync.Sync{IsSyncing: false}), WithChainService(chainService), - WithStateNotifier(chainService.StateNotifier()), WithOperationNotifier(chainService.OperationNotifier()), - ), - setupSvc: func(s *Service, msg *ethpb.SignedContributionAndProof) *Service { + }, + setupSvc: func(s *Service, msg *ethpb.SignedContributionAndProof) (*Service, *startup.Clock) { s.cfg.stateGen = stategen.New(database, doublylinkedtree.New()) s.cfg.beaconDB = database s.initCaches() - return s + return s, startup.NewClock(time.Now(), [32]byte{}) }, args: args{ - ctx: context.Background(), pid: "random", topic: defaultTopic, msg: ðpb.SignedContributionAndProof{ @@ -172,29 +167,25 @@ func TestService_ValidateSyncContributionAndProof(t *testing.T) { }, { name: "Already Seen Message", - svc: NewService(context.Background(), + svcopts: []Option{ WithP2P(mockp2p.NewTestP2P(t)), WithInitialSync(&mockSync.Sync{IsSyncing: false}), WithChainService(chainService), - WithStateNotifier(chainService.StateNotifier()), WithOperationNotifier(chainService.OperationNotifier()), - ), - setupSvc: func(s *Service, msg *ethpb.SignedContributionAndProof) *Service { + }, + setupSvc: func(s *Service, msg *ethpb.SignedContributionAndProof) (*Service, *startup.Clock) { s.cfg.stateGen = stategen.New(database, doublylinkedtree.New()) s.cfg.beaconDB = database s.initCaches() - s.cfg.chain = &mockChain.ChainService{ - ValidatorsRoot: [32]byte{'A'}, - Genesis: time.Now().Add(-time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Duration(msg.Message.Contribution.Slot)), - } + s.cfg.chain = &mockChain.ChainService{} msg.Message.Contribution.BlockRoot = headRoot[:] msg.Message.Contribution.AggregationBits.SetBitAt(1, true) s.setSyncContributionIndexSlotSeen(1, 1, 1) - return s + gt := time.Now().Add(-time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Duration(msg.Message.Contribution.Slot)) + return s, startup.NewClock(gt, [32]byte{'A'}) }, args: args{ - ctx: context.Background(), pid: "random", topic: defaultTopic, msg: ðpb.SignedContributionAndProof{ @@ -215,29 +206,25 @@ func TestService_ValidateSyncContributionAndProof(t *testing.T) { }, { name: "Invalid Subcommittee Index", - svc: NewService(context.Background(), + svcopts: []Option{ WithP2P(mockp2p.NewTestP2P(t)), WithInitialSync(&mockSync.Sync{IsSyncing: false}), WithChainService(chainService), - WithStateNotifier(chainService.StateNotifier()), WithOperationNotifier(chainService.OperationNotifier()), - ), - setupSvc: func(s *Service, msg *ethpb.SignedContributionAndProof) *Service { + }, + setupSvc: func(s *Service, msg *ethpb.SignedContributionAndProof) (*Service, *startup.Clock) { s.cfg.stateGen = stategen.New(database, doublylinkedtree.New()) s.cfg.beaconDB = database s.initCaches() - s.cfg.chain = &mockChain.ChainService{ - ValidatorsRoot: [32]byte{'A'}, - Genesis: time.Now().Add(-time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Duration(msg.Message.Contribution.Slot)), - } + s.cfg.chain = &mockChain.ChainService{} msg.Message.Contribution.BlockRoot = headRoot[:] msg.Message.Contribution.AggregationBits.SetBitAt(1, true) msg.Message.Contribution.SubcommitteeIndex = 20 - return s + gt := time.Now().Add(-time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Duration(msg.Message.Contribution.Slot)) + return s, startup.NewClock(gt, [32]byte{'A'}) }, args: args{ - ctx: context.Background(), pid: "random", topic: defaultTopic, msg: ðpb.SignedContributionAndProof{ @@ -258,30 +245,26 @@ func TestService_ValidateSyncContributionAndProof(t *testing.T) { }, { name: "Invalid Selection Proof", - svc: NewService(context.Background(), + svcopts: []Option{ WithP2P(mockp2p.NewTestP2P(t)), WithInitialSync(&mockSync.Sync{IsSyncing: false}), WithChainService(chainService), - WithStateNotifier(chainService.StateNotifier()), WithOperationNotifier(chainService.OperationNotifier()), - ), - setupSvc: func(s *Service, msg *ethpb.SignedContributionAndProof) *Service { + }, + setupSvc: func(s *Service, msg *ethpb.SignedContributionAndProof) (*Service, *startup.Clock) { s.cfg.stateGen = stategen.New(database, doublylinkedtree.New()) s.cfg.beaconDB = database s.initCaches() - s.cfg.chain = &mockChain.ChainService{ - ValidatorsRoot: [32]byte{'A'}, - Genesis: time.Now().Add(-time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Duration(msg.Message.Contribution.Slot)), - } + s.cfg.chain = &mockChain.ChainService{} msg.Message.Contribution.BlockRoot = headRoot[:] incorrectProof := [96]byte{0xBB} msg.Message.SelectionProof = incorrectProof[:] msg.Message.Contribution.AggregationBits.SetBitAt(1, true) - return s + gt := time.Now().Add(-time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Duration(msg.Message.Contribution.Slot)) + return s, startup.NewClock(gt, [32]byte{'A'}) }, args: args{ - ctx: context.Background(), pid: "random", topic: defaultTopic, msg: ðpb.SignedContributionAndProof{ @@ -302,21 +285,17 @@ func TestService_ValidateSyncContributionAndProof(t *testing.T) { }, { name: "Invalid Aggregator", - svc: NewService(context.Background(), + svcopts: []Option{ WithP2P(mockp2p.NewTestP2P(t)), WithInitialSync(&mockSync.Sync{IsSyncing: false}), WithChainService(chainService), - WithStateNotifier(chainService.StateNotifier()), WithOperationNotifier(chainService.OperationNotifier()), - ), - setupSvc: func(s *Service, msg *ethpb.SignedContributionAndProof) *Service { + }, + setupSvc: func(s *Service, msg *ethpb.SignedContributionAndProof) (*Service, *startup.Clock) { s.cfg.stateGen = stategen.New(database, doublylinkedtree.New()) s.cfg.beaconDB = database s.initCaches() - s.cfg.chain = &mockChain.ChainService{ - ValidatorsRoot: [32]byte{'A'}, - Genesis: time.Now().Add(-time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Duration(msg.Message.Contribution.Slot)), - } + s.cfg.chain = &mockChain.ChainService{} msg.Message.Contribution.BlockRoot = headRoot[:] hState, err := database.State(context.Background(), headRoot) assert.NoError(t, err) @@ -340,10 +319,10 @@ func TestService_ValidateSyncContributionAndProof(t *testing.T) { } } msg.Message.Contribution.AggregationBits.SetBitAt(1, true) - return s + gt := time.Now().Add(-time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Duration(msg.Message.Contribution.Slot)) + return s, startup.NewClock(gt, [32]byte{'A'}) }, args: args{ - ctx: context.Background(), pid: "random", topic: defaultTopic, msg: ðpb.SignedContributionAndProof{ @@ -364,14 +343,13 @@ func TestService_ValidateSyncContributionAndProof(t *testing.T) { }, { name: "Failed Selection Proof Verification ", - svc: NewService(context.Background(), + svcopts: []Option{ WithP2P(mockp2p.NewTestP2P(t)), WithInitialSync(&mockSync.Sync{IsSyncing: false}), WithChainService(chainService), - WithStateNotifier(chainService.StateNotifier()), WithOperationNotifier(chainService.OperationNotifier()), - ), - setupSvc: func(s *Service, msg *ethpb.SignedContributionAndProof) *Service { + }, + setupSvc: func(s *Service, msg *ethpb.SignedContributionAndProof) (*Service, *startup.Clock) { s.cfg.stateGen = stategen.New(database, doublylinkedtree.New()) s.cfg.beaconDB = database msg.Message.Contribution.BlockRoot = headRoot[:] @@ -398,17 +376,15 @@ func TestService_ValidateSyncContributionAndProof(t *testing.T) { } subCommitteeSize := params.BeaconConfig().SyncCommitteeSize / params.BeaconConfig().SyncCommitteeSubnetCount s.cfg.chain = &mockChain.ChainService{ - ValidatorsRoot: [32]byte{'A'}, - Genesis: time.Now().Add(-time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Duration(msg.Message.Contribution.Slot)), SyncCommitteeIndices: []primitives.CommitteeIndex{primitives.CommitteeIndex(msg.Message.Contribution.SubcommitteeIndex * subCommitteeSize)}, } msg.Message.Contribution.AggregationBits.SetBitAt(1, true) s.initCaches() - return s + gt := time.Now().Add(-time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Duration(msg.Message.Contribution.Slot)) + return s, startup.NewClock(gt, [32]byte{'A'}) }, args: args{ - ctx: context.Background(), pid: "random", topic: defaultTopic, msg: ðpb.SignedContributionAndProof{ @@ -429,14 +405,13 @@ func TestService_ValidateSyncContributionAndProof(t *testing.T) { }, { name: "Invalid Proof Signature", - svc: NewService(context.Background(), + svcopts: []Option{ WithP2P(mockp2p.NewTestP2P(t)), WithInitialSync(&mockSync.Sync{IsSyncing: false}), WithChainService(chainService), - WithStateNotifier(chainService.StateNotifier()), WithOperationNotifier(chainService.OperationNotifier()), - ), - setupSvc: func(s *Service, msg *ethpb.SignedContributionAndProof) *Service { + }, + setupSvc: func(s *Service, msg *ethpb.SignedContributionAndProof) (*Service, *startup.Clock) { s.cfg.stateGen = stategen.New(database, doublylinkedtree.New()) s.cfg.beaconDB = database s.cfg.chain = chainService @@ -477,18 +452,16 @@ func TestService_ValidateSyncContributionAndProof(t *testing.T) { require.NoError(t, err) subCommitteeSize := params.BeaconConfig().SyncCommitteeSize / params.BeaconConfig().SyncCommitteeSubnetCount s.cfg.chain = &mockChain.ChainService{ - ValidatorsRoot: [32]byte{'A'}, - Genesis: time.Now().Add(-time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Duration(msg.Message.Contribution.Slot)), SyncCommitteeIndices: []primitives.CommitteeIndex{primitives.CommitteeIndex(msg.Message.Contribution.SubcommitteeIndex * subCommitteeSize)}, PublicKey: bytesutil.ToBytes48(pubkey), SyncSelectionProofDomain: d, } s.initCaches() - return s + gt := time.Now().Add(-time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Duration(msg.Message.Contribution.Slot)) + return s, startup.NewClock(gt, [32]byte{'A'}) }, args: args{ - ctx: context.Background(), pid: "random", topic: defaultTopic, msg: ðpb.SignedContributionAndProof{ @@ -509,14 +482,13 @@ func TestService_ValidateSyncContributionAndProof(t *testing.T) { }, { name: "Invalid Sync Aggregate", - svc: NewService(context.Background(), + svcopts: []Option{ WithP2P(mockp2p.NewTestP2P(t)), WithInitialSync(&mockSync.Sync{IsSyncing: false}), WithChainService(chainService), - WithStateNotifier(chainService.StateNotifier()), WithOperationNotifier(chainService.OperationNotifier()), - ), - setupSvc: func(s *Service, msg *ethpb.SignedContributionAndProof) *Service { + }, + setupSvc: func(s *Service, msg *ethpb.SignedContributionAndProof) (*Service, *startup.Clock) { s.cfg.stateGen = stategen.New(database, doublylinkedtree.New()) s.cfg.beaconDB = database msg.Message.Contribution.BlockRoot = headRoot[:] @@ -561,16 +533,14 @@ func TestService_ValidateSyncContributionAndProof(t *testing.T) { } } s.cfg.chain = &mockChain.ChainService{ - ValidatorsRoot: [32]byte{'A'}, - Genesis: time.Now().Add(-time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Duration(msg.Message.Contribution.Slot)), SyncCommitteeIndices: []primitives.CommitteeIndex{1}, } s.initCaches() - return s + gt := time.Now().Add(-time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Duration(msg.Message.Contribution.Slot)) + return s, startup.NewClock(gt, [32]byte{'A'}) }, args: args{ - ctx: context.Background(), pid: "random", topic: defaultTopic, msg: ðpb.SignedContributionAndProof{ @@ -591,14 +561,13 @@ func TestService_ValidateSyncContributionAndProof(t *testing.T) { }, { name: "Invalid Signed Sync Contribution And Proof - Zero Bits Set", - svc: NewService(context.Background(), + svcopts: []Option{ WithP2P(mockp2p.NewTestP2P(t)), WithInitialSync(&mockSync.Sync{IsSyncing: false}), WithChainService(chainService), - WithStateNotifier(chainService.StateNotifier()), WithOperationNotifier(chainService.OperationNotifier()), - ), - setupSvc: func(s *Service, msg *ethpb.SignedContributionAndProof) *Service { + }, + setupSvc: func(s *Service, msg *ethpb.SignedContributionAndProof) (*Service, *startup.Clock) { s.cfg.stateGen = stategen.New(database, doublylinkedtree.New()) msg.Message.Contribution.BlockRoot = headRoot[:] s.cfg.beaconDB = database @@ -642,8 +611,6 @@ func TestService_ValidateSyncContributionAndProof(t *testing.T) { require.NoError(t, err) subCommitteeSize := params.BeaconConfig().SyncCommitteeSize / params.BeaconConfig().SyncCommitteeSubnetCount s.cfg.chain = &mockChain.ChainService{ - ValidatorsRoot: [32]byte{'A'}, - Genesis: time.Now().Add(-time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Duration(msg.Message.Contribution.Slot)), SyncCommitteeIndices: []primitives.CommitteeIndex{primitives.CommitteeIndex(msg.Message.Contribution.SubcommitteeIndex * subCommitteeSize)}, PublicKey: bytesutil.ToBytes48(keys[msg.Message.AggregatorIndex].PublicKey().Marshal()), SyncSelectionProofDomain: d, @@ -651,10 +618,10 @@ func TestService_ValidateSyncContributionAndProof(t *testing.T) { SyncCommitteeDomain: make([]byte, 32), } s.initCaches() - return s + gt := time.Now().Add(-time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Duration(msg.Message.Contribution.Slot)) + return s, startup.NewClock(gt, [32]byte{'A'}) }, args: args{ - ctx: context.Background(), pid: "random", topic: defaultTopic, msg: ðpb.SignedContributionAndProof{ @@ -675,14 +642,13 @@ func TestService_ValidateSyncContributionAndProof(t *testing.T) { }, { name: "Valid Signed Sync Contribution And Proof - Single Bit Set", - svc: NewService(context.Background(), + svcopts: []Option{ WithP2P(mockp2p.NewTestP2P(t)), WithInitialSync(&mockSync.Sync{IsSyncing: false}), WithChainService(chainService), - WithStateNotifier(chainService.StateNotifier()), WithOperationNotifier(chainService.OperationNotifier()), - ), - setupSvc: func(s *Service, msg *ethpb.SignedContributionAndProof) *Service { + }, + setupSvc: func(s *Service, msg *ethpb.SignedContributionAndProof) (*Service, *startup.Clock) { s.cfg.stateGen = stategen.New(database, doublylinkedtree.New()) msg.Message.Contribution.BlockRoot = headRoot[:] s.cfg.beaconDB = database @@ -737,8 +703,6 @@ func TestService_ValidateSyncContributionAndProof(t *testing.T) { require.NoError(t, err) subCommitteeSize := params.BeaconConfig().SyncCommitteeSize / params.BeaconConfig().SyncCommitteeSubnetCount s.cfg.chain = &mockChain.ChainService{ - ValidatorsRoot: [32]byte{'A'}, - Genesis: time.Now().Add(-time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Duration(msg.Message.Contribution.Slot)), SyncCommitteeIndices: []primitives.CommitteeIndex{primitives.CommitteeIndex(msg.Message.Contribution.SubcommitteeIndex * subCommitteeSize)}, PublicKey: bytesutil.ToBytes48(keys[msg.Message.AggregatorIndex].PublicKey().Marshal()), SyncSelectionProofDomain: pd, @@ -747,10 +711,10 @@ func TestService_ValidateSyncContributionAndProof(t *testing.T) { SyncCommitteePubkeys: pubkeys, } s.initCaches() - return s + gt := time.Now().Add(-time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Duration(msg.Message.Contribution.Slot)) + return s, startup.NewClock(gt, [32]byte{'A'}) }, args: args{ - ctx: context.Background(), pid: "random", topic: defaultTopic, msg: ðpb.SignedContributionAndProof{ @@ -771,14 +735,13 @@ func TestService_ValidateSyncContributionAndProof(t *testing.T) { }, { name: "Valid Signed Sync Contribution And Proof with Multiple Signatures", - svc: NewService(context.Background(), + svcopts: []Option{ WithP2P(mockp2p.NewTestP2P(t)), WithInitialSync(&mockSync.Sync{IsSyncing: false}), WithChainService(chainService), - WithStateNotifier(chainService.StateNotifier()), WithOperationNotifier(chainService.OperationNotifier()), - ), - setupSvc: func(s *Service, msg *ethpb.SignedContributionAndProof) *Service { + }, + setupSvc: func(s *Service, msg *ethpb.SignedContributionAndProof) (*Service, *startup.Clock) { s.cfg.stateGen = stategen.New(database, doublylinkedtree.New()) msg.Message.Contribution.BlockRoot = headRoot[:] s.cfg.beaconDB = database @@ -835,8 +798,6 @@ func TestService_ValidateSyncContributionAndProof(t *testing.T) { require.NoError(t, err) subCommitteeSize := params.BeaconConfig().SyncCommitteeSize / params.BeaconConfig().SyncCommitteeSubnetCount s.cfg.chain = &mockChain.ChainService{ - ValidatorsRoot: [32]byte{'A'}, - Genesis: time.Now().Add(-time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Duration(msg.Message.Contribution.Slot)), SyncCommitteeIndices: []primitives.CommitteeIndex{primitives.CommitteeIndex(msg.Message.Contribution.SubcommitteeIndex * subCommitteeSize)}, PublicKey: bytesutil.ToBytes48(keys[msg.Message.AggregatorIndex].PublicKey().Marshal()), SyncSelectionProofDomain: pd, @@ -844,12 +805,12 @@ func TestService_ValidateSyncContributionAndProof(t *testing.T) { SyncCommitteeDomain: d, SyncCommitteePubkeys: pubkeys, } + gt := time.Now().Add(-time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Duration(msg.Message.Contribution.Slot)) s.initCaches() - return s + return s, startup.NewClock(gt, [32]byte{'A'}) }, args: args{ - ctx: context.Background(), pid: "random", topic: defaultTopic, msg: ðpb.SignedContributionAndProof{ @@ -871,7 +832,15 @@ func TestService_ValidateSyncContributionAndProof(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - tt.svc = tt.setupSvc(tt.svc, tt.args.msg) + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + cw := startup.NewClockSynchronizer() + svc := NewService(ctx, append([]Option{WithClockWaiter(cw)}, tt.svcopts...)...) + var clock *startup.Clock + svc, clock = tt.setupSvc(svc, tt.args.msg) + require.NoError(t, cw.SetClock(clock)) + go svc.Start() marshalledObj, err := tt.args.msg.MarshalSSZ() assert.NoError(t, err) marshalledObj = snappy.Encode(nil, marshalledObj) @@ -883,7 +852,16 @@ func TestService_ValidateSyncContributionAndProof(t *testing.T) { ReceivedFrom: "", ValidatorData: nil, } - if got, err := tt.svc.validateSyncContributionAndProof(tt.args.ctx, tt.args.pid, msg); got != tt.want { + // a lot happens in the chain service after SetClock is called, + // give it a moment before calling internal methods that would typically + // only execute after waitFor + for i := 0; i < 10; i++ { + if !svc.chainIsStarted() { + time.Sleep(100 * time.Millisecond) + } + } + require.Equal(t, true, svc.chainIsStarted()) + if got, err := svc.validateSyncContributionAndProof(ctx, tt.args.pid, msg); got != tt.want { _ = err t.Errorf("validateSyncContributionAndProof() = %v, want %v", got, tt.want) } @@ -922,9 +900,9 @@ func TestValidateSyncContributionAndProof(t *testing.T) { WithP2P(mockp2p.NewTestP2P(t)), WithInitialSync(&mockSync.Sync{IsSyncing: false}), WithChainService(chainService), - WithStateNotifier(chainService.StateNotifier()), WithOperationNotifier(chainService.OperationNotifier()), ) + s.cfg.clock = startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot) go s.verifierRoutine() s.cfg.stateGen = stategen.New(database, doublylinkedtree.New()) msg.Message.Contribution.BlockRoot = headRoot[:] @@ -989,6 +967,7 @@ func TestValidateSyncContributionAndProof(t *testing.T) { SyncCommitteeDomain: d, SyncCommitteePubkeys: pubkeys, } + s.cfg.clock = startup.NewClock(s.cfg.chain.GenesisTime(), s.cfg.chain.GenesisValidatorsRoot()) s.initCaches() marshalledObj, err := msg.MarshalSSZ() @@ -1065,11 +1044,7 @@ func syncSelectionProofSigningRoot(st state.BeaconState, slot primitives.Slot, c } func TestService_setSyncContributionIndexSlotSeen(t *testing.T) { - chainService := &mockChain.ChainService{ - Genesis: time.Now(), - ValidatorsRoot: [32]byte{'A'}, - } - s := NewService(context.Background(), WithP2P(mockp2p.NewTestP2P(t)), WithStateNotifier(chainService.StateNotifier())) + s := NewService(context.Background(), WithP2P(mockp2p.NewTestP2P(t))) s.initCaches() // Empty cache diff --git a/beacon-chain/sync/validate_voluntary_exit_test.go b/beacon-chain/sync/validate_voluntary_exit_test.go index 4233534248..828b0a1c85 100644 --- a/beacon-chain/sync/validate_voluntary_exit_test.go +++ b/beacon-chain/sync/validate_voluntary_exit_test.go @@ -17,6 +17,7 @@ import ( coreTime "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time" "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p" p2ptest "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/startup" "github.com/prysmaticlabs/prysm/v4/beacon-chain/state" state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native" mockSync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/initial-sync/testing" @@ -76,13 +77,15 @@ func TestValidateVoluntaryExit_ValidExit(t *testing.T) { exit, s := setupValidExit(t) + gt := time.Now() r := &Service{ cfg: &config{ p2p: p, chain: &mock.ChainService{ State: s, - Genesis: time.Now(), + Genesis: gt, }, + clock: startup.NewClock(gt, [32]byte{}), initialSync: &mockSync.Sync{IsSyncing: false}, operationNotifier: (&mock.ChainService{}).OperationNotifier(), }, diff --git a/cmd/prysmctl/testnet/generate_genesis.go b/cmd/prysmctl/testnet/generate_genesis.go index dc3b15502e..7c1589f174 100644 --- a/cmd/prysmctl/testnet/generate_genesis.go +++ b/cmd/prysmctl/testnet/generate_genesis.go @@ -263,6 +263,9 @@ func generateGenesis(ctx context.Context) (state.BeaconState, error) { // set timestamps for genesis and shanghai fork gen.Timestamp = f.GenesisTime gen.Config.ShanghaiTime = interop.GethShanghaiTime(f.GenesisTime, params.BeaconConfig()) + log. + WithField("shanghai", gen.Config.ShanghaiTime). + Info("setting fork geth times") if v > version.Altair { // set ttd to zero so EL goes post-merge immediately gen.Config.TerminalTotalDifficulty = big.NewInt(0) diff --git a/runtime/interop/BUILD.bazel b/runtime/interop/BUILD.bazel index b240cba3fe..ee629e8c09 100644 --- a/runtime/interop/BUILD.bazel +++ b/runtime/interop/BUILD.bazel @@ -25,6 +25,7 @@ go_library( "//config/fieldparams:go_default_library", "//config/params:go_default_library", "//consensus-types/blocks:go_default_library", + "//consensus-types/interfaces:go_default_library", "//container/trie:go_default_library", "//crypto/bls:go_default_library", "//crypto/hash:go_default_library", diff --git a/runtime/interop/premine-state.go b/runtime/interop/premine-state.go index 62117b68ec..38624c2e6e 100644 --- a/runtime/interop/premine-state.go +++ b/runtime/interop/premine-state.go @@ -14,6 +14,7 @@ import ( fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams" "github.com/prysmaticlabs/prysm/v4/config/params" "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks" + "github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces" "github.com/prysmaticlabs/prysm/v4/container/trie" "github.com/prysmaticlabs/prysm/v4/crypto/bls" "github.com/prysmaticlabs/prysm/v4/encoding/bytesutil" @@ -66,7 +67,7 @@ func NewPreminedGenesis(ctx context.Context, t, nvals, pCreds uint64, version in func (s *PremineGenesisConfig) prepare(ctx context.Context) (state.BeaconState, error) { switch s.Version { - case version.Phase0, version.Altair, version.Bellatrix: + case version.Phase0, version.Altair, version.Bellatrix, version.Capella: default: return nil, errors.Wrapf(errUnsupportedVersion, "version=%s", version.String(s.Version)) } @@ -104,6 +105,11 @@ func (s *PremineGenesisConfig) empty() (state.BeaconState, error) { if err != nil { return nil, err } + case version.Capella: + e, err = state_native.InitializeFromProtoCapella(ðpb.BeaconStateCapella{}) + if err != nil { + return nil, err + } default: return nil, errUnsupportedVersion } @@ -279,6 +285,8 @@ func (s *PremineGenesisConfig) setFork(g state.BeaconState) error { pv, cv = params.BeaconConfig().GenesisForkVersion, params.BeaconConfig().AltairForkVersion case version.Bellatrix: pv, cv = params.BeaconConfig().AltairForkVersion, params.BeaconConfig().BellatrixForkVersion + case version.Capella: + pv, cv = params.BeaconConfig().BellatrixForkVersion, params.BeaconConfig().CapellaForkVersion default: return errUnsupportedVersion } @@ -375,6 +383,32 @@ func (s *PremineGenesisConfig) setLatestBlockHeader(g state.BeaconState) error { Transactions: make([][]byte, 0), }, } + case version.Capella: + body = ðpb.BeaconBlockBodyCapella{ + RandaoReveal: make([]byte, 96), + Eth1Data: ðpb.Eth1Data{ + DepositRoot: make([]byte, 32), + BlockHash: make([]byte, 32), + }, + Graffiti: make([]byte, 32), + SyncAggregate: ðpb.SyncAggregate{ + SyncCommitteeBits: make([]byte, fieldparams.SyncCommitteeLength/8), + SyncCommitteeSignature: make([]byte, fieldparams.BLSSignatureLength), + }, + ExecutionPayload: &enginev1.ExecutionPayloadCapella{ + ParentHash: make([]byte, 32), + FeeRecipient: make([]byte, 20), + StateRoot: make([]byte, 32), + ReceiptsRoot: make([]byte, 32), + LogsBloom: make([]byte, 256), + PrevRandao: make([]byte, 32), + BaseFeePerGas: make([]byte, 32), + BlockHash: make([]byte, 32), + Transactions: make([][]byte, 0), + Withdrawals: make([]*enginev1.Withdrawal, 0), + }, + BlsToExecutionChanges: make([]*ethpb.SignedBLSToExecutionChange, 0), + } default: return errUnsupportedVersion } @@ -392,40 +426,73 @@ func (s *PremineGenesisConfig) setLatestBlockHeader(g state.BeaconState) error { } func (s *PremineGenesisConfig) setExecutionPayload(g state.BeaconState) error { - if s.Version < version.Bellatrix { - return nil - } gb := s.GB - payload := &enginev1.ExecutionPayload{ - ParentHash: gb.ParentHash().Bytes(), - FeeRecipient: gb.Coinbase().Bytes(), - StateRoot: gb.Root().Bytes(), - ReceiptsRoot: gb.ReceiptHash().Bytes(), - LogsBloom: gb.Bloom().Bytes(), - PrevRandao: params.BeaconConfig().ZeroHash[:], - BlockNumber: gb.NumberU64(), - GasLimit: gb.GasLimit(), - GasUsed: gb.GasUsed(), - Timestamp: gb.Time(), - ExtraData: gb.Extra()[:32], - BaseFeePerGas: bytesutil.PadTo(bytesutil.ReverseByteOrder(gb.BaseFee().Bytes()), fieldparams.RootLength), - BlockHash: gb.Hash().Bytes(), - Transactions: make([][]byte, 0), + var ed interfaces.ExecutionData + switch s.Version { + case version.Bellatrix: + payload := &enginev1.ExecutionPayload{ + ParentHash: gb.ParentHash().Bytes(), + FeeRecipient: gb.Coinbase().Bytes(), + StateRoot: gb.Root().Bytes(), + ReceiptsRoot: gb.ReceiptHash().Bytes(), + LogsBloom: gb.Bloom().Bytes(), + PrevRandao: params.BeaconConfig().ZeroHash[:], + BlockNumber: gb.NumberU64(), + GasLimit: gb.GasLimit(), + GasUsed: gb.GasUsed(), + Timestamp: gb.Time(), + ExtraData: gb.Extra()[:32], + BaseFeePerGas: bytesutil.PadTo(bytesutil.ReverseByteOrder(gb.BaseFee().Bytes()), fieldparams.RootLength), + BlockHash: gb.Hash().Bytes(), + Transactions: make([][]byte, 0), + } + wep, err := blocks.WrappedExecutionPayload(payload) + if err != nil { + return err + } + eph, err := blocks.PayloadToHeader(wep) + if err != nil { + return err + } + ed, err = blocks.WrappedExecutionPayloadHeader(eph) + if err != nil { + return err + } + case version.Capella: + payload := &enginev1.ExecutionPayloadCapella{ + ParentHash: gb.ParentHash().Bytes(), + FeeRecipient: gb.Coinbase().Bytes(), + StateRoot: gb.Root().Bytes(), + ReceiptsRoot: gb.ReceiptHash().Bytes(), + LogsBloom: gb.Bloom().Bytes(), + PrevRandao: params.BeaconConfig().ZeroHash[:], + BlockNumber: gb.NumberU64(), + GasLimit: gb.GasLimit(), + GasUsed: gb.GasUsed(), + Timestamp: gb.Time(), + ExtraData: gb.Extra()[:32], + BaseFeePerGas: bytesutil.PadTo(bytesutil.ReverseByteOrder(gb.BaseFee().Bytes()), fieldparams.RootLength), + BlockHash: gb.Hash().Bytes(), + Transactions: make([][]byte, 0), + Withdrawals: make([]*enginev1.Withdrawal, 0), + } + wep, err := blocks.WrappedExecutionPayloadCapella(payload, 0) + if err != nil { + return err + } + eph, err := blocks.PayloadToHeaderCapella(wep) + if err != nil { + return err + } + ed, err = blocks.WrappedExecutionPayloadHeaderCapella(eph, 0) + if err != nil { + return err + } + default: + return nil } - wep, err := blocks.WrappedExecutionPayload(payload) - if err != nil { - return err - } - eph, err := blocks.PayloadToHeader(wep) - if err != nil { - return err - } - wh, err := blocks.WrappedExecutionPayloadHeader(eph) - if err != nil { - return err - } - return g.SetLatestExecutionPayloadHeader(wh) + return g.SetLatestExecutionPayloadHeader(ed) } func nZeroRoots(n uint64) [][]byte { diff --git a/testing/endtoend/BUILD.bazel b/testing/endtoend/BUILD.bazel index 73acb72801..897eee7038 100644 --- a/testing/endtoend/BUILD.bazel +++ b/testing/endtoend/BUILD.bazel @@ -48,6 +48,7 @@ common_deps = [ "//beacon-chain/core/transition:go_default_library", "//beacon-chain/db/testing:go_default_library", "//beacon-chain/operations/slashings/mock:go_default_library", + "//beacon-chain/startup:go_default_library", "//beacon-chain/state/stategen/mock:go_default_library", "//config/params:go_default_library", "//consensus-types/primitives:go_default_library", diff --git a/testing/endtoend/slasher_simulator_e2e_test.go b/testing/endtoend/slasher_simulator_e2e_test.go index 87baea5c67..1b051f5e7f 100644 --- a/testing/endtoend/slasher_simulator_e2e_test.go +++ b/testing/endtoend/slasher_simulator_e2e_test.go @@ -9,6 +9,7 @@ import ( mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing" dbtest "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing" mockslashings "github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/slashings/mock" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/startup" mockstategen "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen/mock" "github.com/prysmaticlabs/prysm/v4/config/params" "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives" @@ -87,6 +88,7 @@ func TestEndToEnd_SlasherSimulator(t *testing.T) { gen := mockstategen.NewMockService() gen.AddStateForRoot(beaconState, [32]byte{}) + gs := startup.NewClockSynchronizer() sim, err := slashersimulator.New(ctx, &slashersimulator.ServiceConfig{ Params: simulatorParams, Database: slasherDB, @@ -97,6 +99,8 @@ func TestEndToEnd_SlasherSimulator(t *testing.T) { PrivateKeysByValidatorIndex: privKeys, SlashingsPool: &mockslashings.PoolMock{}, SyncChecker: mockSyncChecker{}, + ClockWaiter: gs, + ClockSetter: gs, }) require.NoError(t, err) sim.Start() diff --git a/testing/slasher/simulator/BUILD.bazel b/testing/slasher/simulator/BUILD.bazel index be1c40c5f5..b77a58618a 100644 --- a/testing/slasher/simulator/BUILD.bazel +++ b/testing/slasher/simulator/BUILD.bazel @@ -15,13 +15,13 @@ go_library( deps = [ "//async/event:go_default_library", "//beacon-chain/blockchain:go_default_library", - "//beacon-chain/core/feed:go_default_library", "//beacon-chain/core/feed/state:go_default_library", "//beacon-chain/core/helpers:go_default_library", "//beacon-chain/core/signing:go_default_library", "//beacon-chain/db:go_default_library", "//beacon-chain/operations/slashings:go_default_library", "//beacon-chain/slasher:go_default_library", + "//beacon-chain/startup:go_default_library", "//beacon-chain/state:go_default_library", "//beacon-chain/state/stategen:go_default_library", "//beacon-chain/sync:go_default_library", diff --git a/testing/slasher/simulator/simulator.go b/testing/slasher/simulator/simulator.go index b4ff3106ca..dd6aed68e1 100644 --- a/testing/slasher/simulator/simulator.go +++ b/testing/slasher/simulator/simulator.go @@ -7,11 +7,11 @@ import ( "github.com/prysmaticlabs/prysm/v4/async/event" "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain" - "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed" statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state" "github.com/prysmaticlabs/prysm/v4/beacon-chain/db" "github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/slashings" "github.com/prysmaticlabs/prysm/v4/beacon-chain/slasher" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/startup" "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen" "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync" "github.com/prysmaticlabs/prysm/v4/config/params" @@ -35,6 +35,8 @@ type ServiceConfig struct { SlashingsPool slashings.PoolManager PrivateKeysByValidatorIndex map[primitives.ValidatorIndex]bls.SecretKey SyncChecker sync.Checker + ClockWaiter startup.ClockWaiter + ClockSetter startup.ClockSetter } // Parameters for a slasher simulator. @@ -94,6 +96,7 @@ func New(ctx context.Context, srvConfig *ServiceConfig) (*Simulator, error) { StateGen: srvConfig.StateGen, SlashingPoolInserter: srvConfig.SlashingsPool, SyncChecker: srvConfig.SyncChecker, + ClockWaiter: srvConfig.ClockWaiter, }) if err != nil { return nil, err @@ -142,10 +145,10 @@ func (s *Simulator) Start() { // for slasher to pick up a genesis time. time.Sleep(time.Second) s.genesisTime = time.Now() - s.srvConfig.StateNotifier.StateFeed().Send(&feed.Event{ - Type: statefeed.Initialized, - Data: &statefeed.InitializedData{StartTime: s.genesisTime}, - }) + var vr [32]byte + if err := s.srvConfig.ClockSetter.SetClock(startup.NewClock(s.genesisTime, vr)); err != nil { + panic(err) + } // We simulate blocks and attestations for N epochs. s.simulateBlocksAndAttestations(s.ctx) diff --git a/testing/spectest/shared/common/forkchoice/BUILD.bazel b/testing/spectest/shared/common/forkchoice/BUILD.bazel index 5a5ea8400b..30b86a9102 100644 --- a/testing/spectest/shared/common/forkchoice/BUILD.bazel +++ b/testing/spectest/shared/common/forkchoice/BUILD.bazel @@ -22,6 +22,7 @@ go_library( "//beacon-chain/execution:go_default_library", "//beacon-chain/forkchoice/doubly-linked-tree:go_default_library", "//beacon-chain/operations/attestations:go_default_library", + "//beacon-chain/startup:go_default_library", "//beacon-chain/state:go_default_library", "//beacon-chain/state/state-native:go_default_library", "//beacon-chain/state/stategen:go_default_library", diff --git a/testing/spectest/shared/common/forkchoice/service.go b/testing/spectest/shared/common/forkchoice/service.go index ee92067a30..c001bd1bab 100644 --- a/testing/spectest/shared/common/forkchoice/service.go +++ b/testing/spectest/shared/common/forkchoice/service.go @@ -15,6 +15,7 @@ import ( testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing" doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree" "github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/startup" "github.com/prysmaticlabs/prysm/v4/beacon-chain/state" "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen" "github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces" @@ -65,6 +66,7 @@ func startChainService(t testing.TB, blockchain.WithAttestationPool(attestations.NewPool()), blockchain.WithDepositCache(depositCache), blockchain.WithProposerIdsCache(cache.NewProposerPayloadIDsCache()), + blockchain.WithClockSynchronizer(startup.NewClockSynchronizer()), ) service, err := blockchain.NewService(context.Background(), opts...) require.NoError(t, err) diff --git a/time/slots/slottime.go b/time/slots/slottime.go index 2c70215d72..63e482f979 100644 --- a/time/slots/slottime.go +++ b/time/slots/slottime.go @@ -173,6 +173,14 @@ func CurrentSlot(genesisTimeSec uint64) primitives.Slot { return primitives.Slot((now - genesisTimeSec) / params.BeaconConfig().SecondsPerSlot) } +// Duration computes the span of time between two instants, represented as Slots. +func Duration(start, end time.Time) primitives.Slot { + if end.Before(start) { + return 0 + } + return primitives.Slot(uint64(end.Unix()-start.Unix()) / params.BeaconConfig().SecondsPerSlot) +} + // ValidateClock validates a provided slot against the local // clock to ensure slots that are unreasonable are returned with // an error. @@ -233,7 +241,7 @@ func SyncCommitteePeriodStartEpoch(e primitives.Epoch) (primitives.Epoch, error) // SecondsSinceSlotStart returns the number of seconds transcurred since the // given slot start time -func SecondsSinceSlotStart(s primitives.Slot, genesisTime uint64, timeStamp uint64) (uint64, error) { +func SecondsSinceSlotStart(s primitives.Slot, genesisTime, timeStamp uint64) (uint64, error) { if timeStamp < genesisTime+uint64(s)*params.BeaconConfig().SecondsPerSlot { return 0, errors.New("could not compute seconds since slot start: invalid timestamp") } diff --git a/time/slots/slottime_test.go b/time/slots/slottime_test.go index dd49b160c9..44bc0ea3ee 100644 --- a/time/slots/slottime_test.go +++ b/time/slots/slottime_test.go @@ -491,3 +491,75 @@ func TestSecondsSinceSlotStart(t *testing.T) { } } } + +func TestDuration(t *testing.T) { + oneSlot := time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second + cases := []struct { + name string + start time.Time + endDelta time.Duration + expected primitives.Slot + }{ + { + name: "end before start", + start: time.Now(), + endDelta: -64 * time.Second, + expected: 0, + }, + { + name: "end equals start", + start: time.Now(), + endDelta: 0, + expected: 0, + }, + { + name: "one slot apart", + start: time.Now(), + endDelta: oneSlot, + expected: 1, + }, + { + name: "same slot", + start: time.Now(), + endDelta: time.Second, + expected: 0, + }, + { + name: "don't round up", + start: time.Now(), + endDelta: oneSlot - time.Second, + expected: 0, + }, + { + name: "don't round up pt 2", + start: time.Now(), + endDelta: 2*oneSlot - time.Second, + expected: 1, + }, + { + name: "2 slots", + start: time.Now(), + endDelta: 2 * oneSlot, + expected: 2, + }, + { + name: "1 epoch", + start: time.Now(), + endDelta: time.Duration(params.BeaconConfig().SlotsPerEpoch) * oneSlot, + expected: params.BeaconConfig().SlotsPerEpoch, + }, + { + name: "1 epoch and change", + start: time.Now(), + endDelta: oneSlot + time.Second + time.Duration(params.BeaconConfig().SlotsPerEpoch)*oneSlot, + expected: params.BeaconConfig().SlotsPerEpoch + 1, + }, + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + end := c.start.Add(c.endDelta) + a := Duration(c.start, end) + require.Equal(t, c.expected, a) + }) + } +}