mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-08 07:03:58 -05:00
Replace statefeed Initialize (#12285)
* refactor initialization to blocking startup method * require genesisSetter in blockchain, fix tests * work-around gazelle weirdness * fix dep gazelle ignores * only call SetGenesis once * fix typo * validator test setup and fix to return right error * move waitForChainStart to Start * wire up sync Service.genesisWaiter * fix p2p genesisWaiter plumbing * remove extra clock type, integrate into genesis and rename * use time.Now when no Nower is specified * remove unused ClockSetter * simplify rpc context checking * fix typo * use clock everywhere in sync; [32]byte val root * don't use DeepEqual to compare [32]byte and []byte * don't use clock in init sync, not wired up yet * use clock waiter in blockchain as well * use cancelable contexts in tests with goroutines * missed a reference to WithClockSetter * Update beacon-chain/startup/genesis.go Co-authored-by: Radosław Kapka <rkapka@wp.pl> * Update beacon-chain/blockchain/service_test.go Co-authored-by: Radosław Kapka <rkapka@wp.pl> * more clear docs * doc for NewClock * move clock typedef to more logical file name * adding documentation * gaz * fixes for capella * reducing test raciness * fix races in committee cache tests * lint * add tests on Duration slot math helper * startup package test coverage * fix bad merge * set non-zero genesis time in tests that call Start * happy deepsource, happy me-epsource * replace Synced event with channel * remove unused error * remove accidental wip commit * gaz! * remove unused event constants * remove sync statefeed subscription to fix deadlock * remove state notifier * fix build --------- Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com> Co-authored-by: Radosław Kapka <rkapka@wp.pl> Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com> Co-authored-by: nisdas <nishdas93@gmail.com>
This commit is contained in:
@@ -58,6 +58,7 @@ go_library(
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
@@ -119,6 +120,7 @@ go_test(
|
||||
"receive_attestation_test.go",
|
||||
"receive_block_test.go",
|
||||
"service_test.go",
|
||||
"setup_test.go",
|
||||
"weak_subjectivity_checks_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
@@ -168,6 +170,7 @@ go_test(
|
||||
"mock_test.go",
|
||||
"receive_block_test.go",
|
||||
"service_norace_test.go",
|
||||
"setup_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
gc_goopts = [
|
||||
|
||||
@@ -85,6 +85,12 @@ type ForkFetcher interface {
|
||||
TimeFetcher
|
||||
}
|
||||
|
||||
// TemporalOracle is like ForkFetcher minus CurrentFork()
|
||||
type TemporalOracle interface {
|
||||
GenesisFetcher
|
||||
TimeFetcher
|
||||
}
|
||||
|
||||
// CanonicalFetcher retrieves the current chain's canonical information.
|
||||
type CanonicalFetcher interface {
|
||||
IsCanonical(ctx context.Context, blockRoot [32]byte) (bool, error)
|
||||
@@ -327,7 +333,7 @@ func (s *Service) HeadValidatorIndexToPublicKey(_ context.Context, index primiti
|
||||
}
|
||||
|
||||
// IsOptimistic returns true if the current head is optimistic.
|
||||
func (s *Service) IsOptimistic(ctx context.Context) (bool, error) {
|
||||
func (s *Service) IsOptimistic(_ context.Context) (bool, error) {
|
||||
if slots.ToEpoch(s.CurrentSlot()) < params.BeaconConfig().BellatrixForkEpoch {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
@@ -72,16 +71,8 @@ func TestHeadRoot_Nil(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestFinalizedCheckpt_GenesisRootOk(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service, tr := minimalTestService(t)
|
||||
ctx, fcs := tr.ctx, tr.fcs
|
||||
|
||||
gs, _ := util.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
@@ -97,16 +88,8 @@ func TestFinalizedCheckpt_GenesisRootOk(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCurrentJustifiedCheckpt_CanRetrieve(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service, tr := minimalTestService(t)
|
||||
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
|
||||
|
||||
jroot := [32]byte{'j'}
|
||||
cp := &forkchoicetypes.Checkpoint{Epoch: 6, Root: jroot}
|
||||
@@ -120,16 +103,8 @@ func TestCurrentJustifiedCheckpt_CanRetrieve(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestFinalizedBlockHash(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service, tr := minimalTestService(t)
|
||||
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
|
||||
|
||||
r := [32]byte{'f'}
|
||||
cp := &forkchoicetypes.Checkpoint{Epoch: 6, Root: r}
|
||||
@@ -170,16 +145,9 @@ func TestHeadSlot_CanRetrieve(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestHeadRoot_CanRetrieve(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
|
||||
gs, _ := util.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
|
||||
@@ -189,16 +157,8 @@ func TestHeadRoot_CanRetrieve(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestHeadRoot_UseDB(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service, tr := minimalTestService(t)
|
||||
ctx, beaconDB := tr.ctx, tr.db
|
||||
|
||||
service.head = &head{root: params.BeaconConfig().ZeroHash}
|
||||
b := util.NewBeaconBlock()
|
||||
|
||||
@@ -9,14 +9,11 @@ import (
|
||||
gethtypes "github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
|
||||
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/execution"
|
||||
mockExecution "github.com/prysmaticlabs/prysm/v4/beacon-chain/execution/testing"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
|
||||
bstate "github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/features"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
@@ -33,23 +30,16 @@ import (
|
||||
)
|
||||
|
||||
func Test_NotifyForkchoiceUpdate_GetPayloadAttrErrorCanContinue(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache()))
|
||||
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
|
||||
|
||||
altairBlk := util.SaveBlock(t, ctx, beaconDB, util.NewBeaconBlockAltair())
|
||||
altairBlkRoot, err := altairBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
bellatrixBlk := util.SaveBlock(t, ctx, beaconDB, util.NewBeaconBlockBellatrix())
|
||||
bellatrixBlkRoot, err := bellatrixBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
fcs := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
st, _ := util.DeterministicGenesisState(t, 10)
|
||||
service.head = &head{
|
||||
state: st,
|
||||
@@ -96,23 +86,15 @@ func Test_NotifyForkchoiceUpdate_GetPayloadAttrErrorCanContinue(t *testing.T) {
|
||||
}
|
||||
|
||||
func Test_NotifyForkchoiceUpdate(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache()))
|
||||
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
|
||||
|
||||
altairBlk := util.SaveBlock(t, ctx, beaconDB, util.NewBeaconBlockAltair())
|
||||
altairBlkRoot, err := altairBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
bellatrixBlk := util.SaveBlock(t, ctx, beaconDB, util.NewBeaconBlockBellatrix())
|
||||
bellatrixBlkRoot, err := bellatrixBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
fcs := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
st, _ := util.DeterministicGenesisState(t, 10)
|
||||
service.head = &head{
|
||||
state: st,
|
||||
@@ -264,8 +246,8 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
|
||||
}
|
||||
|
||||
func Test_NotifyForkchoiceUpdate_NIlLVH(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache()))
|
||||
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
|
||||
|
||||
// Prepare blocks
|
||||
ba := util.NewBeaconBlockBellatrix()
|
||||
@@ -297,12 +279,6 @@ func Test_NotifyForkchoiceUpdate_NIlLVH(t *testing.T) {
|
||||
brd, err := wbd.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Insert blocks into forkchoice
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
fcs := doublylinkedtree.New()
|
||||
service.cfg.ForkChoiceStore = fcs
|
||||
service.cfg.ProposerSlotIndexCache = cache.NewProposerPayloadIDsCache()
|
||||
|
||||
fcs.SetBalancesByRooter(func(context.Context, [32]byte) ([]uint64, error) { return []uint64{50, 100, 200}, nil })
|
||||
require.NoError(t, fcs.UpdateJustifiedCheckpoint(ctx, &forkchoicetypes.Checkpoint{}))
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
@@ -358,8 +334,8 @@ func Test_NotifyForkchoiceUpdate_NIlLVH(t *testing.T) {
|
||||
// 3. the blockchain package calls fcu to obtain heads G -> F -> D.
|
||||
|
||||
func Test_NotifyForkchoiceUpdateRecursive_DoublyLinkedTree(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache()))
|
||||
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
|
||||
|
||||
// Prepare blocks
|
||||
ba := util.NewBeaconBlockBellatrix()
|
||||
@@ -414,12 +390,6 @@ func Test_NotifyForkchoiceUpdateRecursive_DoublyLinkedTree(t *testing.T) {
|
||||
brg, err := wbg.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Insert blocks into forkchoice
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
fcs := doublylinkedtree.New()
|
||||
service.cfg.ForkChoiceStore = fcs
|
||||
service.cfg.ProposerSlotIndexCache = cache.NewProposerPayloadIDsCache()
|
||||
|
||||
fcs.SetBalancesByRooter(func(context.Context, [32]byte) ([]uint64, error) { return []uint64{50, 100, 200}, nil })
|
||||
require.NoError(t, fcs.UpdateJustifiedCheckpoint(ctx, &forkchoicetypes.Checkpoint{}))
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
@@ -497,15 +467,9 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.TerminalTotalDifficulty = "2"
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache()))
|
||||
ctx, fcs := tr.ctx, tr.fcs
|
||||
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
phase0State, _ := util.DeterministicGenesisState(t, 1)
|
||||
altairState, _ := util.DeterministicGenesisStateAltair(t, 1)
|
||||
bellatrixState, _ := util.DeterministicGenesisStateBellatrix(t, 2)
|
||||
@@ -536,8 +500,6 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
}
|
||||
bellatrixBlk, err := consensusblocks.NewSignedBeaconBlock(util.HydrateSignedBeaconBlockBellatrix(blk))
|
||||
require.NoError(t, err)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
|
||||
service.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
r, err := bellatrixBlk.Block().HashTreeRoot()
|
||||
@@ -744,14 +706,10 @@ func Test_NotifyNewPayload_SetOptimisticToValid(t *testing.T) {
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.TerminalTotalDifficulty = "2"
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
|
||||
service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache()))
|
||||
ctx := tr.ctx
|
||||
|
||||
bellatrixState, _ := util.DeterministicGenesisStateBellatrix(t, 2)
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
@@ -764,8 +722,6 @@ func Test_NotifyNewPayload_SetOptimisticToValid(t *testing.T) {
|
||||
}
|
||||
bellatrixBlk, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
e := &mockExecution.EngineClient{BlockByHashMap: map[[32]byte]*v1.ExecutionBlock{}}
|
||||
e.BlockByHashMap[[32]byte{'a'}] = &v1.ExecutionBlock{
|
||||
Header: gethtypes.Header{
|
||||
@@ -788,17 +744,9 @@ func Test_NotifyNewPayload_SetOptimisticToValid(t *testing.T) {
|
||||
}
|
||||
|
||||
func Test_GetPayloadAttribute(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, doublylinkedtree.New())),
|
||||
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
|
||||
}
|
||||
service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache()))
|
||||
ctx := tr.ctx
|
||||
|
||||
// Cache miss
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
hasPayload, _, vId := service.getPayloadAttribute(ctx, st, 0, []byte{})
|
||||
require.Equal(t, false, hasPayload)
|
||||
@@ -826,22 +774,15 @@ func Test_GetPayloadAttribute(t *testing.T) {
|
||||
}
|
||||
|
||||
func Test_GetPayloadAttribute_PrepareAllPayloads(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
hook := logTest.NewGlobal()
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
PrepareAllPayloads: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, doublylinkedtree.New())),
|
||||
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
|
||||
}
|
||||
service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache()))
|
||||
ctx := tr.ctx
|
||||
|
||||
hook := logTest.NewGlobal()
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
hasPayload, attr, vId := service.getPayloadAttribute(ctx, st, 0, []byte{})
|
||||
require.Equal(t, true, hasPayload)
|
||||
@@ -851,17 +792,9 @@ func Test_GetPayloadAttribute_PrepareAllPayloads(t *testing.T) {
|
||||
}
|
||||
|
||||
func Test_GetPayloadAttributeV2(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, doublylinkedtree.New())),
|
||||
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
|
||||
}
|
||||
service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache()))
|
||||
ctx := tr.ctx
|
||||
|
||||
// Cache miss
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
st, _ := util.DeterministicGenesisStateCapella(t, 1)
|
||||
hasPayload, _, vId := service.getPayloadAttribute(ctx, st, 0, []byte{})
|
||||
require.Equal(t, false, hasPayload)
|
||||
@@ -897,18 +830,9 @@ func Test_GetPayloadAttributeV2(t *testing.T) {
|
||||
func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.OverrideBeaconConfig(params.MainnetConfig())
|
||||
service, tr := minimalTestService(t)
|
||||
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
|
||||
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New()
|
||||
stateGen := stategen.New(beaconDB, fcs)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stateGen),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
var genesisStateRoot [32]byte
|
||||
genesisBlk := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
util.SaveBlock(t, ctx, beaconDB, genesisBlk)
|
||||
@@ -1013,16 +937,8 @@ func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_removeInvalidBlockAndState(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fc := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fc)),
|
||||
WithForkChoiceStore(fc),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
|
||||
// Deleting unknown block should not error.
|
||||
require.NoError(t, service.removeInvalidBlockAndState(ctx, [][32]byte{{'a'}, {'b'}, {'c'}}))
|
||||
@@ -1066,18 +982,10 @@ func TestService_removeInvalidBlockAndState(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_getPayloadHash(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fc := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fc)),
|
||||
WithForkChoiceStore(fc),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
|
||||
_, err = service.getPayloadHash(ctx, []byte{})
|
||||
_, err := service.getPayloadHash(ctx, []byte{})
|
||||
require.ErrorIs(t, errBlockNotFoundInCacheOrDB, err)
|
||||
|
||||
b := util.NewBeaconBlock()
|
||||
|
||||
@@ -8,8 +8,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
|
||||
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
|
||||
mockExecution "github.com/prysmaticlabs/prysm/v4/beacon-chain/execution/testing"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
@@ -145,23 +143,15 @@ func TestService_forkchoiceUpdateWithExecution_exceptionalCases(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_forkchoiceUpdateWithExecution_SameHeadRootNewProposer(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service, tr := minimalTestService(t)
|
||||
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
|
||||
|
||||
altairBlk := util.SaveBlock(t, ctx, beaconDB, util.NewBeaconBlockAltair())
|
||||
altairBlkRoot, err := altairBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
bellatrixBlk := util.SaveBlock(t, ctx, beaconDB, util.NewBeaconBlockBellatrix())
|
||||
bellatrixBlkRoot, err := bellatrixBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
fcs := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
st, _ := util.DeterministicGenesisState(t, 10)
|
||||
service.head = &head{
|
||||
state: st,
|
||||
@@ -200,18 +190,10 @@ func TestService_forkchoiceUpdateWithExecution_SameHeadRootNewProposer(t *testin
|
||||
|
||||
func TestShouldOverrideFCU(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
service, tr := minimalTestService(t)
|
||||
ctx, fcs := tr.ctx, tr.fcs
|
||||
|
||||
service.SetGenesisTime(time.Now().Add(-time.Duration(2*params.BeaconConfig().SecondsPerSlot) * time.Second))
|
||||
require.NoError(t, err)
|
||||
headRoot := [32]byte{'b'}
|
||||
parentRoot := [32]byte{'a'}
|
||||
ojc := ðpb.Checkpoint{}
|
||||
|
||||
@@ -9,10 +9,8 @@ import (
|
||||
|
||||
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/blstoexec"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
@@ -581,18 +579,9 @@ func TestSaveOrphanedAtts_CanFilter_DoublyLinkedTrie(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestUpdateHead_noSavedChanges(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
service, tr := minimalTestService(t)
|
||||
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
|
||||
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
ojp := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, [32]byte{}, ojp, ojp)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -5,16 +5,19 @@ import (
|
||||
|
||||
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
|
||||
)
|
||||
|
||||
func testServiceOptsWithDB(t *testing.T) []Option {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New()
|
||||
cs := startup.NewClockSynchronizer()
|
||||
return []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithClockSynchronizer(cs),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,5 +25,6 @@ func testServiceOptsWithDB(t *testing.T) []Option {
|
||||
// in your code path. this is a lightweight way to satisfy the stategen/beacondb
|
||||
// initialization requirements w/o the overhead of db init.
|
||||
func testServiceOptsNoDB() []Option {
|
||||
return []Option{}
|
||||
cs := startup.NewClockSynchronizer()
|
||||
return []Option{WithClockSynchronizer(cs)}
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/slashings"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
@@ -163,3 +164,11 @@ func WithFinalizedStateAtStartUp(st state.BeaconState) Option {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithClockSynchronizer(gs *startup.ClockSynchronizer) Option {
|
||||
return func(s *Service) error {
|
||||
s.clockSetter = gs
|
||||
s.clockWaiter = gs
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,17 +1,13 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
gethtypes "github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/holiman/uint256"
|
||||
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
|
||||
mocks "github.com/prysmaticlabs/prysm/v4/beacon-chain/execution/testing"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
@@ -108,16 +104,8 @@ func Test_validateMergeBlock(t *testing.T) {
|
||||
cfg.TerminalTotalDifficulty = "2"
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
|
||||
engine := &mocks.EngineClient{BlockByHashMap: map[[32]byte]*enginev1.ExecutionBlock{}}
|
||||
service.cfg.ExecutionEngineCaller = engine
|
||||
@@ -158,16 +146,8 @@ func Test_validateMergeBlock(t *testing.T) {
|
||||
}
|
||||
|
||||
func Test_getBlkParentHashAndTD(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
|
||||
engine := &mocks.EngineClient{BlockByHashMap: map[[32]byte]*enginev1.ExecutionBlock{}}
|
||||
service.cfg.ExecutionEngineCaller = engine
|
||||
@@ -239,14 +219,9 @@ func Test_validateTerminalBlockHash(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, doublylinkedtree.New())),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
|
||||
blk, err := blocks.NewSignedBeaconBlock(util.HydrateSignedBeaconBlockBellatrix(ðpb.SignedBeaconBlockBellatrix{}))
|
||||
require.NoError(t, err)
|
||||
blk.SetSlot(1)
|
||||
|
||||
@@ -6,9 +6,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
@@ -21,19 +18,10 @@ import (
|
||||
)
|
||||
|
||||
func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service, tr := minimalTestService(t)
|
||||
ctx, beaconDB := tr.ctx, tr.db
|
||||
|
||||
fc := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithForkChoiceStore(fc),
|
||||
WithStateGen(stategen.New(beaconDB, fc)),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = blockTree1(t, beaconDB, []byte{'g'})
|
||||
_, err := blockTree1(t, beaconDB, []byte{'g'})
|
||||
require.NoError(t, err)
|
||||
|
||||
blkWithoutState := util.NewBeaconBlock()
|
||||
@@ -128,17 +116,9 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestStore_OnAttestation_Ok_DoublyLinkedTree(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
|
||||
fcs := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
genesisState, pks := util.DeterministicGenesisState(t, 64)
|
||||
service.SetGenesisTime(time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot), 0))
|
||||
require.NoError(t, service.saveGenesisData(ctx, genesisState))
|
||||
@@ -158,15 +138,8 @@ func TestStore_OnAttestation_Ok_DoublyLinkedTree(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestStore_SaveCheckpointState(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, doublylinkedtree.New())),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
@@ -220,15 +193,8 @@ func TestStore_SaveCheckpointState(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestStore_UpdateCheckpointState(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, doublylinkedtree.New())),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
|
||||
epoch := primitives.Epoch(1)
|
||||
baseState, _ := util.DeterministicGenesisState(t, 1)
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/async/event"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
|
||||
@@ -652,28 +651,21 @@ func (s *Service) validateMergeTransitionBlock(ctx context.Context, stateVersion
|
||||
|
||||
// This routine checks if there is a cached proposer payload ID available for the next slot proposer.
|
||||
// If there is not, it will call forkchoice updated with the correct payload attribute then cache the payload ID.
|
||||
func (s *Service) fillMissingPayloadIDRoutine(ctx context.Context, stateFeed *event.Feed) {
|
||||
// Wait for state to be initialized.
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := stateFeed.Subscribe(stateChannel)
|
||||
func (s *Service) spawnLateBlockTasksLoop() {
|
||||
go func() {
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
stateSub.Unsubscribe()
|
||||
_, err := s.clockWaiter.WaitForClock(s.ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("spawnLateBlockTasksLoop encountered an error waiting for initialization")
|
||||
return
|
||||
case <-stateChannel:
|
||||
stateSub.Unsubscribe()
|
||||
break
|
||||
}
|
||||
|
||||
attThreshold := params.BeaconConfig().SecondsPerSlot / 3
|
||||
ticker := slots.NewSlotTickerWithOffset(s.genesisTime, time.Duration(attThreshold)*time.Second, params.BeaconConfig().SecondsPerSlot)
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C():
|
||||
s.lateBlockTasks(ctx)
|
||||
s.lateBlockTasks(s.ctx)
|
||||
|
||||
case <-ctx.Done():
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Context closed, exiting routine")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -12,9 +12,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
gethtypes "github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/pkg/errors"
|
||||
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache/depositcache"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
@@ -24,9 +22,7 @@ import (
|
||||
mockExecution "github.com/prysmaticlabs/prysm/v4/beacon-chain/execution/testing"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/features"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
@@ -46,18 +42,9 @@ import (
|
||||
)
|
||||
|
||||
func TestStore_OnBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
service, tr := minimalTestService(t)
|
||||
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
|
||||
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
var genesisStateRoot [32]byte
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
util.SaveBlock(t, ctx, beaconDB, genesis)
|
||||
@@ -152,17 +139,8 @@ func TestStore_OnBlock(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestStore_OnBlockBatch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
fc := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fc)),
|
||||
WithForkChoiceStore(fc),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
require.NoError(t, service.saveGenesisData(ctx, st))
|
||||
@@ -185,7 +163,7 @@ func TestStore_OnBlockBatch(t *testing.T) {
|
||||
blks = append(blks, wsb)
|
||||
blkRoots = append(blkRoots, root)
|
||||
}
|
||||
err = service.onBlockBatch(ctx, blks, blkRoots[1:])
|
||||
err := service.onBlockBatch(ctx, blks, blkRoots[1:])
|
||||
require.ErrorIs(t, errWrongBlockCount, err)
|
||||
err = service.onBlockBatch(ctx, blks, blkRoots)
|
||||
require.NoError(t, err)
|
||||
@@ -196,17 +174,9 @@ func TestStore_OnBlockBatch(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestStore_OnBlockBatch_NotifyNewPayload(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
|
||||
fc := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fc)),
|
||||
WithForkChoiceStore(fc),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
require.NoError(t, service.saveGenesisData(ctx, st))
|
||||
bState := st.Copy()
|
||||
@@ -227,22 +197,12 @@ func TestStore_OnBlockBatch_NotifyNewPayload(t *testing.T) {
|
||||
blks = append(blks, wsb)
|
||||
blkRoots = append(blkRoots, root)
|
||||
}
|
||||
err = service.onBlockBatch(ctx, blks, blkRoots)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.onBlockBatch(ctx, blks, blkRoots))
|
||||
}
|
||||
|
||||
func TestCachedPreState_CanGetFromStateSummary(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
fc := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fc)),
|
||||
WithForkChoiceStore(fc),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service, tr := minimalTestService(t)
|
||||
ctx, beaconDB := tr.ctx, tr.db
|
||||
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
require.NoError(t, service.saveGenesisData(ctx, st))
|
||||
@@ -260,16 +220,8 @@ func TestCachedPreState_CanGetFromStateSummary(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestFillForkChoiceMissingBlocks_CanSave(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, doublylinkedtree.New())),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore = doublylinkedtree.New()
|
||||
service, tr := minimalTestService(t)
|
||||
ctx, beaconDB := tr.ctx, tr.db
|
||||
|
||||
st, _ := util.DeterministicGenesisState(t, 64)
|
||||
require.NoError(t, service.saveGenesisData(ctx, st))
|
||||
@@ -309,16 +261,8 @@ func TestFillForkChoiceMissingBlocks_CanSave(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestFillForkChoiceMissingBlocks_RootsMatch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, doublylinkedtree.New())),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore = doublylinkedtree.New()
|
||||
service, tr := minimalTestService(t)
|
||||
ctx, beaconDB := tr.ctx, tr.db
|
||||
|
||||
st, _ := util.DeterministicGenesisState(t, 64)
|
||||
require.NoError(t, service.saveGenesisData(ctx, st))
|
||||
@@ -360,16 +304,8 @@ func TestFillForkChoiceMissingBlocks_RootsMatch(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestFillForkChoiceMissingBlocks_FilterFinalized(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, doublylinkedtree.New())),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore = doublylinkedtree.New()
|
||||
service, tr := minimalTestService(t)
|
||||
ctx, beaconDB := tr.ctx, tr.db
|
||||
|
||||
var genesisStateRoot [32]byte
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
@@ -418,17 +354,8 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestFillForkChoiceMissingBlocks_FinalizedSibling(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
fc := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fc)),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore = doublylinkedtree.New()
|
||||
service, tr := minimalTestService(t)
|
||||
ctx, beaconDB := tr.ctx, tr.db
|
||||
|
||||
var genesisStateRoot [32]byte
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
@@ -566,17 +493,8 @@ func TestAncestorByDB_CtxErr(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAncestor_HandleSkipSlot(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
fcs := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service, tr := minimalTestService(t)
|
||||
beaconDB := tr.db
|
||||
|
||||
b1 := util.NewBeaconBlock()
|
||||
b1.Block.Slot = 1
|
||||
@@ -657,17 +575,8 @@ func TestAncestor_CanUseForkchoice(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAncestor_CanUseDB(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
fcs := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service, tr := minimalTestService(t)
|
||||
ctx, beaconDB := tr.ctx, tr.db
|
||||
|
||||
b1 := util.NewBeaconBlock()
|
||||
b1.Block.Slot = 1
|
||||
@@ -732,21 +641,8 @@ func TestHandleEpochBoundary_UpdateFirstSlot(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestOnBlock_CanFinalize_WithOnTick(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New()
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithDepositCache(depositCache),
|
||||
WithStateNotifier(&mock.MockStateNotifier{}),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service, tr := minimalTestService(t)
|
||||
ctx, fcs := tr.ctx, tr.fcs
|
||||
|
||||
gs, keys := util.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
@@ -782,21 +678,8 @@ func TestOnBlock_CanFinalize_WithOnTick(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestOnBlock_CanFinalize(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New()
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithDepositCache(depositCache),
|
||||
WithStateNotifier(&mock.MockStateNotifier{}),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
|
||||
gs, keys := util.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
@@ -830,39 +713,15 @@ func TestOnBlock_CanFinalize(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestOnBlock_NilBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New()
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithDepositCache(depositCache),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service, tr := minimalTestService(t)
|
||||
|
||||
err = service.onBlock(ctx, nil, [32]byte{})
|
||||
err := service.onBlock(tr.ctx, nil, [32]byte{})
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
}
|
||||
|
||||
func TestOnBlock_InvalidSignature(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New()
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithDepositCache(depositCache),
|
||||
WithStateNotifier(&mock.MockStateNotifier{}),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
|
||||
gs, keys := util.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
@@ -885,21 +744,8 @@ func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) {
|
||||
config.BellatrixForkEpoch = 2
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New()
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithDepositCache(depositCache),
|
||||
WithStateNotifier(&mock.MockStateNotifier{}),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
|
||||
gs, keys := util.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
@@ -918,13 +764,8 @@ func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestInsertFinalizedDeposits(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
opts := testServiceOptsWithDB(t)
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
opts = append(opts, WithDepositCache(depositCache))
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service, tr := minimalTestService(t)
|
||||
ctx, depositCache := tr.ctx, tr.dc
|
||||
|
||||
gs, _ := util.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
@@ -952,13 +793,8 @@ func TestInsertFinalizedDeposits(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestInsertFinalizedDeposits_MultipleFinalizedRoutines(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
opts := testServiceOptsWithDB(t)
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
opts = append(opts, WithDepositCache(depositCache))
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service, tr := minimalTestService(t)
|
||||
ctx, depositCache := tr.ctx, tr.dc
|
||||
|
||||
gs, _ := util.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
@@ -1085,18 +921,8 @@ func Test_validateMergeTransitionBlock(t *testing.T) {
|
||||
cfg.TerminalBlockHash = params.BeaconConfig().ZeroHash
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache()))
|
||||
ctx := tr.ctx
|
||||
|
||||
aHash := common.BytesToHash([]byte("a"))
|
||||
bHash := common.BytesToHash([]byte("b"))
|
||||
@@ -1223,17 +1049,8 @@ func Test_validateMergeTransitionBlock(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_insertSlashingsToForkChoiceStore(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
|
||||
beaconState, privKeys := util.DeterministicGenesisState(t, 100)
|
||||
att1 := util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
@@ -1274,21 +1091,8 @@ func TestService_insertSlashingsToForkChoiceStore(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New()
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithDepositCache(depositCache),
|
||||
WithStateNotifier(&mock.MockStateNotifier{}),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
|
||||
gs, keys := util.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
@@ -1353,17 +1157,8 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
}
|
||||
|
||||
func Test_verifyBlkFinalizedSlot_invalidBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service, _ := minimalTestService(t)
|
||||
|
||||
fcs := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: 1}))
|
||||
blk := util.HydrateBeaconBlock(ðpb.BeaconBlock{Slot: 1})
|
||||
wb, err := consensusblocks.NewBeaconBlock(blk)
|
||||
@@ -1386,22 +1181,9 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
config.BellatrixForkEpoch = 2
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
mockEngine := &mockExecution.EngineClient{ErrNewPayload: execution.ErrAcceptedSyncingPayloadStatus, ErrForkchoiceUpdated: execution.ErrAcceptedSyncingPayloadStatus}
|
||||
fc := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
WithStateGen(stategen.New(beaconDB, fc)),
|
||||
WithForkChoiceStore(fc),
|
||||
WithStateNotifier(&mock.MockStateNotifier{}),
|
||||
WithExecutionEngineCaller(mockEngine),
|
||||
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service, tr := minimalTestService(t, WithExecutionEngineCaller(mockEngine))
|
||||
ctx := tr.ctx
|
||||
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
stateRoot, err := st.HashTreeRoot(ctx)
|
||||
@@ -1546,22 +1328,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
config.BellatrixForkEpoch = 2
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
mockEngine := &mockExecution.EngineClient{ErrNewPayload: execution.ErrAcceptedSyncingPayloadStatus, ErrForkchoiceUpdated: execution.ErrAcceptedSyncingPayloadStatus}
|
||||
fc := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
WithStateGen(stategen.New(beaconDB, fc)),
|
||||
WithForkChoiceStore(fc),
|
||||
WithStateNotifier(&mock.MockStateNotifier{}),
|
||||
WithExecutionEngineCaller(mockEngine),
|
||||
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service, tr := minimalTestService(t, WithExecutionEngineCaller(mockEngine))
|
||||
ctx := tr.ctx
|
||||
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
stateRoot, err := st.HashTreeRoot(ctx)
|
||||
@@ -1707,22 +1476,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
config.BellatrixForkEpoch = 2
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
mockEngine := &mockExecution.EngineClient{ErrNewPayload: execution.ErrAcceptedSyncingPayloadStatus, ErrForkchoiceUpdated: execution.ErrAcceptedSyncingPayloadStatus}
|
||||
fc := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
WithStateGen(stategen.New(beaconDB, fc)),
|
||||
WithForkChoiceStore(fc),
|
||||
WithStateNotifier(&mock.MockStateNotifier{}),
|
||||
WithExecutionEngineCaller(mockEngine),
|
||||
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service, tr := minimalTestService(t, WithExecutionEngineCaller(mockEngine))
|
||||
ctx := tr.ctx
|
||||
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
stateRoot, err := st.HashTreeRoot(ctx)
|
||||
@@ -1915,27 +1671,9 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
config.BellatrixForkEpoch = 2
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
mockEngine := &mockExecution.EngineClient{ErrNewPayload: execution.ErrAcceptedSyncingPayloadStatus, ErrForkchoiceUpdated: execution.ErrAcceptedSyncingPayloadStatus}
|
||||
attSrv, err := attestations.NewService(ctx, &attestations.Config{})
|
||||
require.NoError(t, err)
|
||||
newfc := doublylinkedtree.New()
|
||||
newStateGen := stategen.New(beaconDB, newfc)
|
||||
newfc.SetBalancesByRooter(newStateGen.ActiveNonSlashedBalancesByRoot)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
WithStateGen(newStateGen),
|
||||
WithForkChoiceStore(newfc),
|
||||
WithStateNotifier(&mock.MockStateNotifier{}),
|
||||
WithExecutionEngineCaller(mockEngine),
|
||||
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
|
||||
WithAttestationService(attSrv),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service, tr := minimalTestService(t, WithExecutionEngineCaller(mockEngine))
|
||||
ctx := tr.ctx
|
||||
|
||||
genesisState, keys := util.DeterministicGenesisState(t, 64)
|
||||
stateRoot, err := genesisState.HashTreeRoot(ctx)
|
||||
@@ -2084,18 +1822,8 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestOnBlock_HandleBlockAttestations(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fc := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
WithStateGen(stategen.New(beaconDB, fc)),
|
||||
WithForkChoiceStore(fc),
|
||||
WithStateNotifier(&mock.MockStateNotifier{}),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
stateRoot, err := st.HashTreeRoot(ctx)
|
||||
@@ -2155,18 +1883,8 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
|
||||
|
||||
func TestFillMissingBlockPayloadId_DiffSlotExitEarly(t *testing.T) {
|
||||
logHook := logTest.NewGlobal()
|
||||
fc := doublylinkedtree.New()
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
opts := []Option{
|
||||
WithForkChoiceStore(fc),
|
||||
WithStateGen(stategen.New(beaconDB, fc)),
|
||||
}
|
||||
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.lateBlockTasks(ctx)
|
||||
service, tr := minimalTestService(t)
|
||||
service.lateBlockTasks(tr.ctx)
|
||||
require.LogsDoNotContain(t, logHook, "could not perform late block tasks")
|
||||
}
|
||||
|
||||
@@ -2177,24 +1895,14 @@ func TestFillMissingBlockPayloadId_PrepareAllPayloads(t *testing.T) {
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
fc := doublylinkedtree.New()
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
opts := []Option{
|
||||
WithForkChoiceStore(fc),
|
||||
WithStateGen(stategen.New(beaconDB, fc)),
|
||||
}
|
||||
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.lateBlockTasks(ctx)
|
||||
service, tr := minimalTestService(t)
|
||||
service.lateBlockTasks(tr.ctx)
|
||||
require.LogsDoNotContain(t, logHook, "could not perform late block tasks")
|
||||
}
|
||||
|
||||
// Helper function to simulate the block being on time or delayed for proposer
|
||||
// boost. It alters the genesisTime tracked by the store.
|
||||
func driftGenesisTime(s *Service, slot int64, delay int64) {
|
||||
func driftGenesisTime(s *Service, slot, delay int64) {
|
||||
offset := slot*int64(params.BeaconConfig().SecondsPerSlot) - delay
|
||||
s.SetGenesisTime(time.Unix(time.Now().Unix()-offset, 0))
|
||||
}
|
||||
|
||||
@@ -7,8 +7,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/async/event"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/features"
|
||||
@@ -67,20 +65,13 @@ func (s *Service) VerifyLmdFfgConsistency(ctx context.Context, a *ethpb.Attestat
|
||||
}
|
||||
|
||||
// This routine processes fork choice attestations from the pool to account for validator votes and fork choice.
|
||||
func (s *Service) spawnProcessAttestationsRoutine(stateFeed *event.Feed) {
|
||||
// Wait for state to be initialized.
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := stateFeed.Subscribe(stateChannel)
|
||||
func (s *Service) spawnProcessAttestationsRoutine() {
|
||||
go func() {
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
stateSub.Unsubscribe()
|
||||
_, err := s.clockWaiter.WaitForClock(s.ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("spawnProcessAttestationsRoutine failed to receive genesis data")
|
||||
return
|
||||
case <-stateChannel:
|
||||
stateSub.Unsubscribe()
|
||||
break
|
||||
}
|
||||
|
||||
if s.genesisTime.IsZero() {
|
||||
log.Warn("ProcessAttestations routine waiting for genesis time")
|
||||
for s.genesisTime.IsZero() {
|
||||
|
||||
@@ -7,11 +7,7 @@ import (
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
@@ -31,22 +27,18 @@ var (
|
||||
|
||||
func TestAttestationCheckPtState_FarFutureSlot(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service, _ := minimalTestService(t)
|
||||
|
||||
chainService := setupBeaconChain(t, beaconDB)
|
||||
chainService.genesisTime = time.Now()
|
||||
service.genesisTime = time.Now()
|
||||
|
||||
e := primitives.Epoch(slots.MaxSlotBuffer/uint64(params.BeaconConfig().SlotsPerEpoch) + 1)
|
||||
_, err := chainService.AttestationTargetState(context.Background(), ðpb.Checkpoint{Epoch: e})
|
||||
_, err := service.AttestationTargetState(context.Background(), ðpb.Checkpoint{Epoch: e})
|
||||
require.ErrorContains(t, "exceeds max allowed value relative to the local clock", err)
|
||||
}
|
||||
|
||||
func TestVerifyLMDFFGConsistent_NotOK(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
opts := testServiceOptsWithDB(t)
|
||||
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
|
||||
b32 := util.NewBeaconBlock()
|
||||
b32.Block.Slot = 32
|
||||
@@ -69,11 +61,8 @@ func TestVerifyLMDFFGConsistent_NotOK(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestVerifyLMDFFGConsistent_OK(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
|
||||
b32 := util.NewBeaconBlock()
|
||||
b32.Block.Slot = 32
|
||||
@@ -96,13 +85,10 @@ func TestVerifyLMDFFGConsistent_OK(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestProcessAttestations_Ok(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
hook := logTest.NewGlobal()
|
||||
ctx := context.Background()
|
||||
opts := testServiceOptsWithDB(t)
|
||||
opts = append(opts, WithAttestationPool(attestations.NewPool()))
|
||||
ctx := tr.ctx
|
||||
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.genesisTime = prysmTime.Now().Add(-1 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)
|
||||
genesisState, pks := util.DeterministicGenesisState(t, 64)
|
||||
require.NoError(t, genesisState.SetGenesisTime(uint64(prysmTime.Now().Unix())-params.BeaconConfig().SecondsPerSlot))
|
||||
@@ -126,21 +112,9 @@ func TestProcessAttestations_Ok(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New()
|
||||
newStateGen := stategen.New(beaconDB, fcs)
|
||||
fcs.SetBalancesByRooter(newStateGen.ActiveNonSlashedBalancesByRoot)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(newStateGen),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
WithStateNotifier(&mockBeaconNode{}),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
service, tr := minimalTestService(t)
|
||||
ctx, fcs := tr.ctx, tr.fcs
|
||||
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.genesisTime = prysmTime.Now().Add(-2 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)
|
||||
genesisState, pks := util.DeterministicGenesisState(t, 64)
|
||||
require.NoError(t, service.saveGenesisData(ctx, genesisState))
|
||||
@@ -189,21 +163,9 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_UpdateHead_NoAtts(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New()
|
||||
newStateGen := stategen.New(beaconDB, fcs)
|
||||
fcs.SetBalancesByRooter(newStateGen.ActiveNonSlashedBalancesByRoot)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
WithStateNotifier(&mockBeaconNode{}),
|
||||
WithStateGen(newStateGen),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
service, tr := minimalTestService(t)
|
||||
ctx, fcs := tr.ctx, tr.fcs
|
||||
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.genesisTime = prysmTime.Now().Add(-2 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)
|
||||
genesisState, pks := util.DeterministicGenesisState(t, 64)
|
||||
require.NoError(t, service.saveGenesisData(ctx, genesisState))
|
||||
|
||||
@@ -7,12 +7,7 @@ import (
|
||||
"time"
|
||||
|
||||
blockchainTesting "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/blstoexec"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
@@ -125,22 +120,15 @@ func TestService_ReceiveBlock(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
wg.Add(1)
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s, tr := minimalTestService(t,
|
||||
WithFinalizedStateAtStartUp(genesis),
|
||||
WithExitPool(voluntaryexits.NewPool()),
|
||||
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}))
|
||||
|
||||
beaconDB := tr.db
|
||||
genesisBlockRoot := bytesutil.ToBytes32(nil)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, genesis, genesisBlockRoot))
|
||||
|
||||
fc := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithForkChoiceStore(fc),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
WithExitPool(voluntaryexits.NewPool()),
|
||||
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}),
|
||||
WithStateGen(stategen.New(beaconDB, fc)),
|
||||
WithFinalizedStateAtStartUp(genesis),
|
||||
}
|
||||
s, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
// Initialize it here.
|
||||
_ = s.cfg.StateNotifier.StateFeed()
|
||||
require.NoError(t, s.saveGenesisData(ctx, genesis))
|
||||
@@ -162,25 +150,16 @@ func TestService_ReceiveBlock(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_ReceiveBlockUpdateHead(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
s, tr := minimalTestService(t,
|
||||
WithExitPool(voluntaryexits.NewPool()),
|
||||
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}))
|
||||
ctx, beaconDB := tr.ctx, tr.db
|
||||
genesis, keys := util.DeterministicGenesisState(t, 64)
|
||||
b, err := util.GenerateFullBlock(genesis, keys, util.DefaultBlockGenConfig(), 1)
|
||||
assert.NoError(t, err)
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
genesisBlockRoot := bytesutil.ToBytes32(nil)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, genesis, genesisBlockRoot))
|
||||
fc := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithForkChoiceStore(fc),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
WithExitPool(voluntaryexits.NewPool()),
|
||||
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}),
|
||||
WithStateGen(stategen.New(beaconDB, fc)),
|
||||
}
|
||||
|
||||
s, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
// Initialize it here.
|
||||
_ = s.cfg.StateNotifier.StateFeed()
|
||||
require.NoError(t, s.saveGenesisData(ctx, genesis))
|
||||
@@ -246,17 +225,8 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
fc := doublylinkedtree.New()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithForkChoiceStore(fc),
|
||||
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}),
|
||||
WithStateGen(stategen.New(beaconDB, fc)),
|
||||
}
|
||||
s, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
err = s.saveGenesisData(ctx, genesis)
|
||||
s, _ := minimalTestService(t, WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}))
|
||||
err := s.saveGenesisData(ctx, genesis)
|
||||
require.NoError(t, err)
|
||||
root, err := tt.args.block.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
@@ -276,10 +246,7 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_HasBlock(t *testing.T) {
|
||||
opts := testServiceOptsWithDB(t)
|
||||
opts = append(opts, WithStateNotifier(&blockchainTesting.MockStateNotifier{}))
|
||||
s, err := NewService(context.Background(), opts...)
|
||||
require.NoError(t, err)
|
||||
s, _ := minimalTestService(t)
|
||||
r := [32]byte{'a'}
|
||||
if s.HasBlock(context.Background(), r) {
|
||||
t.Error("Should not have block")
|
||||
@@ -299,10 +266,8 @@ func TestService_HasBlock(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCheckSaveHotStateDB_Enabling(t *testing.T) {
|
||||
opts := testServiceOptsWithDB(t)
|
||||
hook := logTest.NewGlobal()
|
||||
s, err := NewService(context.Background(), opts...)
|
||||
require.NoError(t, err)
|
||||
s, _ := minimalTestService(t)
|
||||
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
|
||||
s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
|
||||
@@ -312,9 +277,9 @@ func TestCheckSaveHotStateDB_Enabling(t *testing.T) {
|
||||
|
||||
func TestCheckSaveHotStateDB_Disabling(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
opts := testServiceOptsWithDB(t)
|
||||
s, err := NewService(context.Background(), opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
s, _ := minimalTestService(t)
|
||||
|
||||
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
|
||||
s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
|
||||
@@ -326,9 +291,7 @@ func TestCheckSaveHotStateDB_Disabling(t *testing.T) {
|
||||
|
||||
func TestCheckSaveHotStateDB_Overflow(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
opts := testServiceOptsWithDB(t)
|
||||
s, err := NewService(context.Background(), opts...)
|
||||
require.NoError(t, err)
|
||||
s, _ := minimalTestService(t)
|
||||
s.genesisTime = time.Now()
|
||||
|
||||
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
|
||||
@@ -336,19 +299,8 @@ func TestCheckSaveHotStateDB_Overflow(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestHandleBlockBLSToExecutionChanges(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fc := doublylinkedtree.New()
|
||||
pool := blstoexec.NewPool()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fc)),
|
||||
WithForkChoiceStore(fc),
|
||||
WithStateNotifier(&blockchainTesting.MockStateNotifier{}),
|
||||
WithBLSToExecPool(pool),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service, tr := minimalTestService(t)
|
||||
pool := tr.blsPool
|
||||
|
||||
t.Run("pre Capella block", func(t *testing.T) {
|
||||
body := ðpb.BeaconBlockBodyBellatrix{}
|
||||
|
||||
@@ -27,6 +27,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/slashings"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/features"
|
||||
@@ -57,6 +58,8 @@ type Service struct {
|
||||
initSyncBlocks map[[32]byte]interfaces.ReadOnlySignedBeaconBlock
|
||||
initSyncBlocksLock sync.RWMutex
|
||||
wsVerifier *WeakSubjectivityVerifier
|
||||
clockSetter startup.ClockSetter
|
||||
clockWaiter startup.ClockWaiter
|
||||
}
|
||||
|
||||
// config options for the service.
|
||||
@@ -83,6 +86,8 @@ type config struct {
|
||||
ExecutionEngineCaller execution.EngineCaller
|
||||
}
|
||||
|
||||
var ErrMissingClockSetter = errors.New("blockchain Service initialized without a startup.ClockSetter")
|
||||
|
||||
// NewService instantiates a new block service instance that will
|
||||
// be registered into a running beacon node.
|
||||
func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
@@ -100,6 +105,9 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if srv.clockSetter == nil {
|
||||
return nil, ErrMissingClockSetter
|
||||
}
|
||||
var err error
|
||||
srv.wsVerifier, err = NewWeakSubjectivityVerifier(srv.cfg.WeakSubjectivityCheckpt, srv.cfg.BeaconDB)
|
||||
if err != nil {
|
||||
@@ -121,8 +129,8 @@ func (s *Service) Start() {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
s.spawnProcessAttestationsRoutine(s.cfg.StateNotifier.StateFeed())
|
||||
s.fillMissingPayloadIDRoutine(s.ctx, s.cfg.StateNotifier.StateFeed())
|
||||
s.spawnProcessAttestationsRoutine()
|
||||
s.spawnLateBlockTasksLoop()
|
||||
}
|
||||
|
||||
// Stop the blockchain service's main event loop and associated goroutines.
|
||||
@@ -236,13 +244,10 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
return errors.Wrap(err, "could not verify initial checkpoint provided for chain sync")
|
||||
}
|
||||
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.Initialized,
|
||||
Data: &statefeed.InitializedData{
|
||||
StartTime: s.genesisTime,
|
||||
GenesisValidatorsRoot: saved.GenesisValidatorsRoot(),
|
||||
},
|
||||
})
|
||||
vr := bytesutil.ToBytes32(saved.GenesisValidatorsRoot())
|
||||
if err := s.clockSetter.SetClock(startup.NewClock(s.genesisTime, vr)); err != nil {
|
||||
return errors.Wrap(err, "failed to initialize blockchain service")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -359,15 +364,10 @@ func (s *Service) onExecutionChainStart(ctx context.Context, genesisTime time.Ti
|
||||
}
|
||||
go slots.CountdownToGenesis(ctx, genesisTime, uint64(initializedState.NumValidators()), gRoot)
|
||||
|
||||
// We send out a state initialized event to the rest of the services
|
||||
// running in the beacon node.
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.Initialized,
|
||||
Data: &statefeed.InitializedData{
|
||||
StartTime: genesisTime,
|
||||
GenesisValidatorsRoot: initializedState.GenesisValidatorsRoot(),
|
||||
},
|
||||
})
|
||||
vr := bytesutil.ToBytes32(initializedState.GenesisValidatorsRoot())
|
||||
if err := s.clockSetter.SetClock(startup.NewClock(genesisTime, vr)); err != nil {
|
||||
log.WithError(err).Fatal("failed to initialize blockchain service from execution start event")
|
||||
}
|
||||
}
|
||||
|
||||
// initializes the state and genesis block of the beacon chain to persistent storage
|
||||
|
||||
@@ -8,13 +8,9 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/prysmaticlabs/prysm/v4/async/event"
|
||||
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache/depositcache"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
@@ -25,7 +21,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/slashings"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/features"
|
||||
@@ -40,45 +36,8 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
type mockBeaconNode struct {
|
||||
stateFeed *event.Feed
|
||||
}
|
||||
|
||||
// StateFeed mocks the same method in the beacon node.
|
||||
func (mbn *mockBeaconNode) StateFeed() *event.Feed {
|
||||
if mbn.stateFeed == nil {
|
||||
mbn.stateFeed = new(event.Feed)
|
||||
}
|
||||
return mbn.stateFeed
|
||||
}
|
||||
|
||||
type mockBroadcaster struct {
|
||||
broadcastCalled bool
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) Broadcast(_ context.Context, _ proto.Message) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastAttestation(_ context.Context, _ uint64, _ *ethpb.Attestation) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastSyncCommitteeMessage(_ context.Context, _ uint64, _ *ethpb.SyncCommitteeMessage) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastBLSChanges(_ context.Context, _ []*ethpb.SignedBLSToExecutionChange) {
|
||||
}
|
||||
|
||||
var _ p2p.Broadcaster = (*mockBroadcaster)(nil)
|
||||
|
||||
func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
ctx := context.Background()
|
||||
var web3Service *execution.Service
|
||||
@@ -141,6 +100,7 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
WithAttestationService(attService),
|
||||
WithStateGen(stateGen),
|
||||
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
|
||||
WithClockSynchronizer(startup.NewClockSynchronizer()),
|
||||
}
|
||||
|
||||
chainService, err := NewService(ctx, opts...)
|
||||
@@ -157,12 +117,14 @@ func TestChainStartStop_Initialized(t *testing.T) {
|
||||
|
||||
chainService := setupBeaconChain(t, beaconDB)
|
||||
|
||||
gt := time.Unix(23, 0)
|
||||
genesisBlk := util.NewBeaconBlock()
|
||||
blkRoot, err := genesisBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, ctx, beaconDB, genesisBlk)
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.SetGenesisTime(uint64(gt.Unix())))
|
||||
require.NoError(t, s.SetSlot(1))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, s, blkRoot))
|
||||
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, blkRoot))
|
||||
@@ -192,12 +154,14 @@ func TestChainStartStop_GenesisZeroHashes(t *testing.T) {
|
||||
|
||||
chainService := setupBeaconChain(t, beaconDB)
|
||||
|
||||
gt := time.Unix(23, 0)
|
||||
genesisBlk := util.NewBeaconBlock()
|
||||
blkRoot, err := genesisBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb := util.SaveBlock(t, ctx, beaconDB, genesisBlk)
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.SetGenesisTime(uint64(gt.Unix())))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, s, blkRoot))
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, blkRoot))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
@@ -264,12 +228,14 @@ func TestChainService_CorrectGenesisRoots(t *testing.T) {
|
||||
|
||||
chainService := setupBeaconChain(t, beaconDB)
|
||||
|
||||
gt := time.Unix(23, 0)
|
||||
genesisBlk := util.NewBeaconBlock()
|
||||
blkRoot, err := genesisBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, ctx, beaconDB, genesisBlk)
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.SetGenesisTime(uint64(gt.Unix())))
|
||||
require.NoError(t, s.SetSlot(0))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, s, blkRoot))
|
||||
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, blkRoot))
|
||||
@@ -290,14 +256,9 @@ func TestChainService_CorrectGenesisRoots(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestChainService_InitializeChainInfo(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
genesis := util.NewBeaconBlock()
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, genesis)
|
||||
|
||||
finalizedSlot := params.BeaconConfig().SlotsPerEpoch*2 + 1
|
||||
headBlock := util.NewBeaconBlock()
|
||||
@@ -309,23 +270,18 @@ func TestChainService_InitializeChainInfo(t *testing.T) {
|
||||
require.NoError(t, headState.SetGenesisValidatorsRoot(params.BeaconConfig().ZeroHash[:]))
|
||||
headRoot, err := headBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
c, tr := minimalTestService(t, WithFinalizedStateAtStartUp(headState))
|
||||
ctx, beaconDB, stateGen := tr.ctx, tr.db, tr.sg
|
||||
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, genesis)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, headBlock)
|
||||
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Epoch: slots.ToEpoch(finalizedSlot), Root: headRoot[:]}))
|
||||
attSrv, err := attestations.NewService(ctx, &attestations.Config{})
|
||||
require.NoError(t, err)
|
||||
fc := doublylinkedtree.New()
|
||||
stateGen := stategen.New(beaconDB, fc)
|
||||
c, err := NewService(ctx,
|
||||
WithForkChoiceStore(fc),
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stateGen),
|
||||
WithAttestationService(attSrv),
|
||||
WithStateNotifier(&mock.MockStateNotifier{}),
|
||||
WithFinalizedStateAtStartUp(headState))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, stateGen.SaveState(ctx, headRoot, headState))
|
||||
|
||||
require.NoError(t, c.StartFromSavedState(headState))
|
||||
headBlk, err := c.HeadBlock(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -345,14 +301,9 @@ func TestChainService_InitializeChainInfo(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestChainService_InitializeChainInfo_SetHeadAtGenesis(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
genesis := util.NewBeaconBlock()
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, genesis)
|
||||
|
||||
finalizedSlot := params.BeaconConfig().SlotsPerEpoch*2 + 1
|
||||
headBlock := util.NewBeaconBlock()
|
||||
@@ -364,27 +315,21 @@ func TestChainService_InitializeChainInfo_SetHeadAtGenesis(t *testing.T) {
|
||||
require.NoError(t, headState.SetGenesisValidatorsRoot(params.BeaconConfig().ZeroHash[:]))
|
||||
headRoot, err := headBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
|
||||
|
||||
c, tr := minimalTestService(t, WithFinalizedStateAtStartUp(headState))
|
||||
ctx, beaconDB := tr.ctx, tr.db
|
||||
|
||||
util.SaveBlock(t, ctx, beaconDB, genesis)
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, headBlock)
|
||||
attSrv, err := attestations.NewService(ctx, &attestations.Config{})
|
||||
require.NoError(t, err)
|
||||
ss := ðpb.StateSummary{
|
||||
Slot: finalizedSlot,
|
||||
Root: headRoot[:],
|
||||
}
|
||||
require.NoError(t, beaconDB.SaveStateSummary(ctx, ss))
|
||||
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Root: headRoot[:], Epoch: slots.ToEpoch(finalizedSlot)}))
|
||||
fc := doublylinkedtree.New()
|
||||
stateGen := stategen.New(beaconDB, fc)
|
||||
c, err := NewService(ctx,
|
||||
WithForkChoiceStore(fc),
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stateGen),
|
||||
WithAttestationService(attSrv),
|
||||
WithStateNotifier(&mock.MockStateNotifier{}),
|
||||
WithFinalizedStateAtStartUp(headState))
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, c.StartFromSavedState(headState))
|
||||
s, err := c.HeadState(ctx)
|
||||
@@ -460,17 +405,21 @@ func TestServiceStop_SaveCachedBlocks(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestProcessChainStartTime_ReceivedFeed(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := service.cfg.StateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
defer stateSub.Unsubscribe()
|
||||
service.onExecutionChainStart(context.Background(), time.Now())
|
||||
|
||||
stateEvent := <-stateChannel
|
||||
require.Equal(t, int(stateEvent.Type), statefeed.Initialized)
|
||||
_, ok := stateEvent.Data.(*statefeed.InitializedData)
|
||||
require.Equal(t, true, ok)
|
||||
mgs := &MockClockSetter{}
|
||||
service.clockSetter = mgs
|
||||
gt := time.Now()
|
||||
service.onExecutionChainStart(context.Background(), gt)
|
||||
gs, err := beaconDB.GenesisState(ctx)
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, nil, gs)
|
||||
require.Equal(t, 32, len(gs.GenesisValidatorsRoot()))
|
||||
var zero [32]byte
|
||||
require.DeepNotEqual(t, gs.GenesisValidatorsRoot(), zero[:])
|
||||
require.Equal(t, gt, mgs.G.GenesisTime())
|
||||
require.Equal(t, bytesutil.ToBytes32(gs.GenesisValidatorsRoot()), mgs.G.GenesisValidatorsRoot())
|
||||
}
|
||||
|
||||
func BenchmarkHasBlockDB(b *testing.B) {
|
||||
@@ -519,15 +468,10 @@ func TestChainService_EverythingOptimistic(t *testing.T) {
|
||||
EnableStartOptimistic: true,
|
||||
})
|
||||
defer resetFn()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
genesis := util.NewBeaconBlock()
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, genesis)
|
||||
|
||||
finalizedSlot := params.BeaconConfig().SlotsPerEpoch*2 + 1
|
||||
headBlock := util.NewBeaconBlock()
|
||||
headBlock.Block.Slot = finalizedSlot
|
||||
@@ -538,21 +482,17 @@ func TestChainService_EverythingOptimistic(t *testing.T) {
|
||||
require.NoError(t, headState.SetGenesisValidatorsRoot(params.BeaconConfig().ZeroHash[:]))
|
||||
headRoot, err := headBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
c, tr := minimalTestService(t, WithFinalizedStateAtStartUp(headState))
|
||||
ctx, beaconDB, stateGen := tr.ctx, tr.db, tr.sg
|
||||
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, genesis)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, headBlock)
|
||||
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Epoch: slots.ToEpoch(finalizedSlot), Root: headRoot[:]}))
|
||||
attSrv, err := attestations.NewService(ctx, &attestations.Config{})
|
||||
require.NoError(t, err)
|
||||
fc := doublylinkedtree.New()
|
||||
stateGen := stategen.New(beaconDB, fc)
|
||||
c, err := NewService(ctx,
|
||||
WithForkChoiceStore(fc),
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stateGen),
|
||||
WithAttestationService(attSrv),
|
||||
WithStateNotifier(&mock.MockStateNotifier{}),
|
||||
WithFinalizedStateAtStartUp(headState))
|
||||
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, stateGen.SaveState(ctx, headRoot, headState))
|
||||
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, ðpb.Checkpoint{Epoch: slots.ToEpoch(finalizedSlot), Root: headRoot[:]}))
|
||||
@@ -562,3 +502,19 @@ func TestChainService_EverythingOptimistic(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, op)
|
||||
}
|
||||
|
||||
// MockClockSetter satisfies the ClockSetter interface for testing the conditions where blockchain.Service should
|
||||
// call SetGenesis.
|
||||
type MockClockSetter struct {
|
||||
G *startup.Clock
|
||||
Err error
|
||||
}
|
||||
|
||||
var _ startup.ClockSetter = &MockClockSetter{}
|
||||
|
||||
// SetClock satisfies the ClockSetter interface.
|
||||
// The value is written to an exported field 'G' so that it can be accessed in tests.
|
||||
func (s *MockClockSetter) SetClock(g *startup.Clock) error {
|
||||
s.G = g
|
||||
return s.Err
|
||||
}
|
||||
|
||||
115
beacon-chain/blockchain/setup_test.go
Normal file
115
beacon-chain/blockchain/setup_test.go
Normal file
@@ -0,0 +1,115 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/async/event"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache/depositcache"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/blstoexec"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
type mockBeaconNode struct {
|
||||
stateFeed *event.Feed
|
||||
}
|
||||
|
||||
// StateFeed mocks the same method in the beacon node.
|
||||
func (mbn *mockBeaconNode) StateFeed() *event.Feed {
|
||||
if mbn.stateFeed == nil {
|
||||
mbn.stateFeed = new(event.Feed)
|
||||
}
|
||||
return mbn.stateFeed
|
||||
}
|
||||
|
||||
type mockBroadcaster struct {
|
||||
broadcastCalled bool
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) Broadcast(_ context.Context, _ proto.Message) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastAttestation(_ context.Context, _ uint64, _ *ethpb.Attestation) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastSyncCommitteeMessage(_ context.Context, _ uint64, _ *ethpb.SyncCommitteeMessage) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastBLSChanges(_ context.Context, _ []*ethpb.SignedBLSToExecutionChange) {
|
||||
}
|
||||
|
||||
var _ p2p.Broadcaster = (*mockBroadcaster)(nil)
|
||||
|
||||
type testServiceRequirements struct {
|
||||
ctx context.Context
|
||||
db db.Database
|
||||
fcs forkchoice.ForkChoicer
|
||||
sg *stategen.State
|
||||
notif statefeed.Notifier
|
||||
cs *startup.ClockSynchronizer
|
||||
attPool attestations.Pool
|
||||
attSrv *attestations.Service
|
||||
blsPool *blstoexec.Pool
|
||||
dc *depositcache.DepositCache
|
||||
}
|
||||
|
||||
func minimalTestService(t *testing.T, opts ...Option) (*Service, *testServiceRequirements) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New()
|
||||
sg := stategen.New(beaconDB, fcs)
|
||||
notif := &mockBeaconNode{}
|
||||
fcs.SetBalancesByRooter(sg.ActiveNonSlashedBalancesByRoot)
|
||||
cs := startup.NewClockSynchronizer()
|
||||
attPool := attestations.NewPool()
|
||||
attSrv, err := attestations.NewService(ctx, &attestations.Config{Pool: attPool})
|
||||
require.NoError(t, err)
|
||||
blsPool := blstoexec.NewPool()
|
||||
dc, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
req := &testServiceRequirements{
|
||||
ctx: ctx,
|
||||
db: beaconDB,
|
||||
fcs: fcs,
|
||||
sg: sg,
|
||||
notif: notif,
|
||||
cs: cs,
|
||||
attPool: attPool,
|
||||
attSrv: attSrv,
|
||||
blsPool: blsPool,
|
||||
dc: dc,
|
||||
}
|
||||
defOpts := []Option{WithDatabase(req.db),
|
||||
WithStateNotifier(req.notif),
|
||||
WithStateGen(req.sg),
|
||||
WithForkChoiceStore(req.fcs),
|
||||
WithClockSynchronizer(req.cs),
|
||||
WithAttestationPool(req.attPool),
|
||||
WithAttestationService(req.attSrv),
|
||||
WithBLSToExecPool(req.blsPool),
|
||||
WithDepositCache(dc),
|
||||
}
|
||||
// append the variadic opts so they override the defaults by being processed afterwards
|
||||
opts = append(defOpts, opts...)
|
||||
s, err := NewService(req.ctx, opts...)
|
||||
|
||||
require.NoError(t, err)
|
||||
return s, req
|
||||
}
|
||||
@@ -15,10 +15,10 @@ const (
|
||||
BlockProcessed = iota + 1
|
||||
// ChainStarted is sent when enough validators are active to start proposing blocks.
|
||||
ChainStarted
|
||||
// Initialized is sent when the internal beacon node's state is ready to be accessed.
|
||||
Initialized
|
||||
// Synced is sent when the beacon node has completed syncing and is ready to participate in the network.
|
||||
Synced
|
||||
// deprecated: Initialized is sent when the internal beacon node's state is ready to be accessed.
|
||||
_
|
||||
// deprecated: Synced is sent when the beacon node has completed syncing and is ready to participate in the network.
|
||||
_
|
||||
// Reorg is an event sent when the new head is not a descendant of the previous head.
|
||||
Reorg
|
||||
// FinalizedCheckpoint event.
|
||||
|
||||
@@ -48,6 +48,7 @@ go_test(
|
||||
"attestation_test.go",
|
||||
"beacon_committee_test.go",
|
||||
"block_test.go",
|
||||
"main_test.go",
|
||||
"randao_test.go",
|
||||
"rewards_penalties_test.go",
|
||||
"shuffle_test.go",
|
||||
|
||||
13
beacon-chain/core/helpers/main_test.go
Normal file
13
beacon-chain/core/helpers/main_test.go
Normal file
@@ -0,0 +1,13 @@
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// run ClearCache before each test to prevent cross-test side effects
|
||||
func TestMain(m *testing.M) {
|
||||
ClearCache()
|
||||
code := m.Run()
|
||||
os.Exit(code)
|
||||
}
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
@@ -241,10 +240,44 @@ func TestProcessBlock_AllEventsTrackedVals(t *testing.T) {
|
||||
|
||||
func TestLogAggregatedPerformance(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
s := setupService(t)
|
||||
latestPerformance := map[primitives.ValidatorIndex]ValidatorLatestPerformance{
|
||||
1: {
|
||||
balance: 32000000000,
|
||||
},
|
||||
2: {
|
||||
balance: 32000000000,
|
||||
},
|
||||
12: {
|
||||
balance: 31900000000,
|
||||
},
|
||||
15: {
|
||||
balance: 31900000000,
|
||||
},
|
||||
}
|
||||
aggregatedPerformance := map[primitives.ValidatorIndex]ValidatorAggregatedPerformance{
|
||||
1: {
|
||||
startEpoch: 0,
|
||||
startBalance: 31700000000,
|
||||
totalAttestedCount: 12,
|
||||
totalRequestedCount: 15,
|
||||
totalDistance: 14,
|
||||
totalCorrectHead: 8,
|
||||
totalCorrectSource: 11,
|
||||
totalCorrectTarget: 12,
|
||||
totalProposedCount: 1,
|
||||
totalSyncCommitteeContributions: 0,
|
||||
totalSyncCommitteeAggregations: 0,
|
||||
},
|
||||
2: {},
|
||||
12: {},
|
||||
15: {},
|
||||
}
|
||||
s := &Service{
|
||||
latestPerformance: latestPerformance,
|
||||
aggregatedPerformance: aggregatedPerformance,
|
||||
}
|
||||
|
||||
s.logAggregatedPerformance()
|
||||
time.Sleep(3000 * time.Millisecond)
|
||||
wanted := "\"Aggregated performance since launch\" AttestationInclusion=\"80.00%\"" +
|
||||
" AverageInclusionDistance=1.2 BalanceChangePct=\"0.95%\" CorrectlyVotedHeadPct=\"66.67%\" " +
|
||||
"CorrectlyVotedSourcePct=\"91.67%\" CorrectlyVotedTargetPct=\"100.00%\" StartBalance=31700000000 " +
|
||||
|
||||
@@ -19,13 +19,8 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
// Error when event feed data is not statefeed.SyncedData.
|
||||
errNotSyncedData = errors.New("event feed data is not of type *statefeed.SyncedData")
|
||||
|
||||
// Error when the context is closed while waiting for sync.
|
||||
errContextClosedWhileWaiting = errors.New("context closed while waiting for beacon to sync to latest Head")
|
||||
)
|
||||
// Error when the context is closed while waiting for sync.
|
||||
var errContextClosedWhileWaiting = errors.New("context closed while waiting for beacon to sync to latest Head")
|
||||
|
||||
// ValidatorLatestPerformance keeps track of the latest participation of the validator
|
||||
type ValidatorLatestPerformance struct {
|
||||
@@ -63,6 +58,7 @@ type ValidatorMonitorConfig struct {
|
||||
AttestationNotifier operation.Notifier
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
StateGen stategen.StateManager
|
||||
InitialSyncComplete chan struct{}
|
||||
}
|
||||
|
||||
// Service is the main structure that tracks validators and reports logs and
|
||||
@@ -131,7 +127,7 @@ func (s *Service) run(stateChannel chan *feed.Event, stateSub event.Subscription
|
||||
return
|
||||
}
|
||||
|
||||
if err := s.waitForSync(stateChannel, stateSub); err != nil {
|
||||
if err := s.waitForSync(s.config.InitialSyncComplete); err != nil {
|
||||
log.WithError(err)
|
||||
return
|
||||
}
|
||||
@@ -197,24 +193,13 @@ func (s *Service) Stop() error {
|
||||
}
|
||||
|
||||
// waitForSync waits until the beacon node is synced to the latest head.
|
||||
func (s *Service) waitForSync(stateChannel chan *feed.Event, stateSub event.Subscription) error {
|
||||
for {
|
||||
select {
|
||||
case e := <-stateChannel:
|
||||
if e.Type == statefeed.Synced {
|
||||
_, ok := e.Data.(*statefeed.SyncedData)
|
||||
if !ok {
|
||||
return errNotSyncedData
|
||||
}
|
||||
return nil
|
||||
}
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Context closed, exiting goroutine")
|
||||
return errContextClosedWhileWaiting
|
||||
case err := <-stateSub.Err():
|
||||
log.WithError(err).Error("Could not subscribe to state notifier")
|
||||
return err
|
||||
}
|
||||
func (s *Service) waitForSync(syncChan chan struct{}) error {
|
||||
select {
|
||||
case <-syncChan:
|
||||
return nil
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Context closed, exiting goroutine")
|
||||
return errContextClosedWhileWaiting
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -93,6 +93,7 @@ func setupService(t *testing.T) *Service {
|
||||
StateNotifier: chainService.StateNotifier(),
|
||||
HeadFetcher: chainService,
|
||||
AttestationNotifier: chainService.OperationNotifier(),
|
||||
InitialSyncComplete: make(chan struct{}),
|
||||
},
|
||||
|
||||
ctx: context.Background(),
|
||||
@@ -140,34 +141,9 @@ func TestNewService(t *testing.T) {
|
||||
func TestStart(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
s := setupService(t)
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := s.config.StateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
defer stateSub.Unsubscribe()
|
||||
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
s.Start()
|
||||
|
||||
go func() {
|
||||
select {
|
||||
case stateEvent := <-stateChannel:
|
||||
if stateEvent.Type == statefeed.Synced {
|
||||
_, ok := stateEvent.Data.(*statefeed.SyncedData)
|
||||
require.Equal(t, true, ok, "Event feed data is not type *statefeed.SyncedData")
|
||||
}
|
||||
case <-s.ctx.Done():
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
for sent := 0; sent == 0; {
|
||||
sent = s.config.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.Synced,
|
||||
Data: &statefeed.SyncedData{
|
||||
StartTime: time.Now(),
|
||||
},
|
||||
})
|
||||
}
|
||||
close(s.config.InitialSyncComplete)
|
||||
|
||||
// wait for Logrus
|
||||
time.Sleep(1000 * time.Millisecond)
|
||||
@@ -267,26 +243,29 @@ func TestMonitorRoutine(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestWaitForSync(t *testing.T) {
|
||||
s := setupService(t)
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := s.config.StateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
defer stateSub.Unsubscribe()
|
||||
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
s := &Service{ctx: ctx}
|
||||
syncChan := make(chan struct{})
|
||||
|
||||
go func() {
|
||||
err := s.waitForSync(stateChannel, stateSub)
|
||||
require.NoError(t, err)
|
||||
wg.Done()
|
||||
// Failsafe to make sure tests never get deadlocked; we should always go through the happy path before 500ms.
|
||||
// Otherwise, the NoError assertion below will fail.
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
cancel()
|
||||
}()
|
||||
go func() {
|
||||
close(syncChan)
|
||||
}()
|
||||
require.NoError(t, s.waitForSync(syncChan))
|
||||
}
|
||||
|
||||
stateChannel <- &feed.Event{
|
||||
Type: statefeed.Synced,
|
||||
Data: &statefeed.SyncedData{
|
||||
StartTime: time.Now(),
|
||||
},
|
||||
}
|
||||
func TestWaitForSyncCanceled(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
s := &Service{ctx: ctx}
|
||||
syncChan := make(chan struct{})
|
||||
|
||||
cancel()
|
||||
require.ErrorIs(t, s.waitForSync(syncChan), errContextClosedWhileWaiting)
|
||||
}
|
||||
|
||||
func TestRun(t *testing.T) {
|
||||
@@ -295,21 +274,11 @@ func TestRun(t *testing.T) {
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := s.config.StateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
|
||||
go func() {
|
||||
s.run(stateChannel, stateSub)
|
||||
wg.Done()
|
||||
}()
|
||||
close(s.config.InitialSyncComplete)
|
||||
|
||||
stateChannel <- &feed.Event{
|
||||
Type: statefeed.Synced,
|
||||
Data: &statefeed.SyncedData{
|
||||
StartTime: time.Now(),
|
||||
},
|
||||
}
|
||||
//wait for Logrus
|
||||
time.Sleep(1000 * time.Millisecond)
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
require.LogsContain(t, hook, "Synced to head epoch, starting reporting performance")
|
||||
}
|
||||
|
||||
@@ -40,6 +40,7 @@ go_library(
|
||||
"//beacon-chain/rpc:go_default_library",
|
||||
"//beacon-chain/rpc/apimiddleware:go_default_library",
|
||||
"//beacon-chain/slasher:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//beacon-chain/sync:go_default_library",
|
||||
|
||||
@@ -42,6 +42,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/apimiddleware"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/slasher"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
|
||||
regularsync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync"
|
||||
@@ -107,6 +108,8 @@ type BeaconNode struct {
|
||||
GenesisInitializer genesis.Initializer
|
||||
CheckpointInitializer checkpoint.Initializer
|
||||
forkChoicer forkchoice.ForkChoicer
|
||||
clockWaiter startup.ClockWaiter
|
||||
initialSyncComplete chan struct{}
|
||||
}
|
||||
|
||||
// New creates a new node instance, sets up configuration options, and registers
|
||||
@@ -177,12 +180,16 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
|
||||
proposerIdsCache: cache.NewProposerPayloadIDsCache(),
|
||||
}
|
||||
|
||||
beacon.initialSyncComplete = make(chan struct{})
|
||||
for _, opt := range opts {
|
||||
if err := opt(beacon); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
synchronizer := startup.NewClockSynchronizer()
|
||||
beacon.clockWaiter = synchronizer
|
||||
|
||||
beacon.forkChoicer = doublylinkedtree.New()
|
||||
depositAddress, err := execution.DepositContractAddress()
|
||||
if err != nil {
|
||||
@@ -229,17 +236,17 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
|
||||
}
|
||||
|
||||
log.Debugln("Registering Blockchain Service")
|
||||
if err := beacon.registerBlockchainService(beacon.forkChoicer); err != nil {
|
||||
if err := beacon.registerBlockchainService(beacon.forkChoicer, synchronizer); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugln("Registering Initial Sync Service")
|
||||
if err := beacon.registerInitialSyncService(); err != nil {
|
||||
if err := beacon.registerInitialSyncService(beacon.initialSyncComplete); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugln("Registering Sync Service")
|
||||
if err := beacon.registerSyncService(); err != nil {
|
||||
if err := beacon.registerSyncService(beacon.initialSyncComplete); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -265,7 +272,7 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
|
||||
}
|
||||
|
||||
log.Debugln("Registering Validator Monitoring Service")
|
||||
if err := beacon.registerValidatorMonitorService(); err != nil {
|
||||
if err := beacon.registerValidatorMonitorService(beacon.initialSyncComplete); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -548,6 +555,7 @@ func (b *BeaconNode) registerP2P(cliCtx *cli.Context) error {
|
||||
EnableUPnP: cliCtx.Bool(cmd.EnableUPnPFlag.Name),
|
||||
StateNotifier: b,
|
||||
DB: b.db,
|
||||
ClockWaiter: b.clockWaiter,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -581,7 +589,7 @@ func (b *BeaconNode) registerAttestationPool() error {
|
||||
return b.services.RegisterService(s)
|
||||
}
|
||||
|
||||
func (b *BeaconNode) registerBlockchainService(fc forkchoice.ForkChoicer) error {
|
||||
func (b *BeaconNode) registerBlockchainService(fc forkchoice.ForkChoicer, gs *startup.ClockSynchronizer) error {
|
||||
var web3Service *execution.Service
|
||||
if err := b.services.FetchService(&web3Service); err != nil {
|
||||
return err
|
||||
@@ -611,6 +619,7 @@ func (b *BeaconNode) registerBlockchainService(fc forkchoice.ForkChoicer) error
|
||||
blockchain.WithSlasherAttestationsFeed(b.slasherAttestationsFeed),
|
||||
blockchain.WithFinalizedStateAtStartUp(b.finalizedStateAtStartUp),
|
||||
blockchain.WithProposerIdsCache(b.proposerIdsCache),
|
||||
blockchain.WithClockSynchronizer(gs),
|
||||
)
|
||||
|
||||
blockchainService, err := blockchain.NewService(b.ctx, opts...)
|
||||
@@ -652,7 +661,7 @@ func (b *BeaconNode) registerPOWChainService() error {
|
||||
return b.services.RegisterService(web3Service)
|
||||
}
|
||||
|
||||
func (b *BeaconNode) registerSyncService() error {
|
||||
func (b *BeaconNode) registerSyncService(initialSyncComplete chan struct{}) error {
|
||||
var web3Service *execution.Service
|
||||
if err := b.services.FetchService(&web3Service); err != nil {
|
||||
return err
|
||||
@@ -674,7 +683,6 @@ func (b *BeaconNode) registerSyncService() error {
|
||||
regularsync.WithP2P(b.fetchP2P()),
|
||||
regularsync.WithChainService(chainService),
|
||||
regularsync.WithInitialSync(initSync),
|
||||
regularsync.WithStateNotifier(b),
|
||||
regularsync.WithBlockNotifier(b),
|
||||
regularsync.WithAttestationNotifier(b),
|
||||
regularsync.WithOperationNotifier(b),
|
||||
@@ -687,22 +695,26 @@ func (b *BeaconNode) registerSyncService() error {
|
||||
regularsync.WithSlasherAttestationsFeed(b.slasherAttestationsFeed),
|
||||
regularsync.WithSlasherBlockHeadersFeed(b.slasherBlockHeadersFeed),
|
||||
regularsync.WithExecutionPayloadReconstructor(web3Service),
|
||||
regularsync.WithClockWaiter(b.clockWaiter),
|
||||
regularsync.WithInitialSyncComplete(initialSyncComplete),
|
||||
)
|
||||
return b.services.RegisterService(rs)
|
||||
}
|
||||
|
||||
func (b *BeaconNode) registerInitialSyncService() error {
|
||||
func (b *BeaconNode) registerInitialSyncService(complete chan struct{}) error {
|
||||
var chainService *blockchain.Service
|
||||
if err := b.services.FetchService(&chainService); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
is := initialsync.NewService(b.ctx, &initialsync.Config{
|
||||
DB: b.db,
|
||||
Chain: chainService,
|
||||
P2P: b.fetchP2P(),
|
||||
StateNotifier: b,
|
||||
BlockNotifier: b,
|
||||
DB: b.db,
|
||||
Chain: chainService,
|
||||
P2P: b.fetchP2P(),
|
||||
StateNotifier: b,
|
||||
BlockNotifier: b,
|
||||
ClockWaiter: b.clockWaiter,
|
||||
InitialSyncComplete: complete,
|
||||
})
|
||||
return b.services.RegisterService(is)
|
||||
}
|
||||
@@ -834,6 +846,7 @@ func (b *BeaconNode) registerRPCService(router *mux.Router) error {
|
||||
ProposerIdsCache: b.proposerIdsCache,
|
||||
BlockBuilder: b.fetchBuilderService(),
|
||||
Router: router,
|
||||
ClockWaiter: b.clockWaiter,
|
||||
})
|
||||
|
||||
return b.services.RegisterService(rpcService)
|
||||
@@ -934,7 +947,7 @@ func (b *BeaconNode) registerDeterminsticGenesisService() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *BeaconNode) registerValidatorMonitorService() error {
|
||||
func (b *BeaconNode) registerValidatorMonitorService(initialSyncComplete chan struct{}) error {
|
||||
cliSlice := b.cliCtx.IntSlice(cmd.ValidatorMonitorIndicesFlag.Name)
|
||||
if cliSlice == nil {
|
||||
return nil
|
||||
@@ -953,6 +966,7 @@ func (b *BeaconNode) registerValidatorMonitorService() error {
|
||||
AttestationNotifier: b,
|
||||
StateGen: b.stateGen,
|
||||
HeadFetcher: chainService,
|
||||
InitialSyncComplete: initialSyncComplete,
|
||||
}
|
||||
svc, err := monitor.NewService(b.ctx, monitorConfig, tracked)
|
||||
if err != nil {
|
||||
|
||||
@@ -164,7 +164,7 @@ func TestMonitor_RegisteredCorrectly(t *testing.T) {
|
||||
require.NoError(t, cliCtx.Set(cmd.ValidatorMonitorIndicesFlag.Name, "1,2"))
|
||||
n := &BeaconNode{ctx: context.Background(), cliCtx: cliCtx, services: runtime.NewServiceRegistry()}
|
||||
require.NoError(t, n.services.RegisterService(&blockchain.Service{}))
|
||||
require.NoError(t, n.registerValidatorMonitorService())
|
||||
require.NoError(t, n.registerValidatorMonitorService(make(chan struct{})))
|
||||
|
||||
var mService *monitor.Service
|
||||
require.NoError(t, n.services.FetchService(&mService))
|
||||
|
||||
@@ -44,7 +44,6 @@ go_library(
|
||||
"//async:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
@@ -54,6 +53,7 @@ go_library(
|
||||
"//beacon-chain/p2p/peers/peerdata:go_default_library",
|
||||
"//beacon-chain/p2p/peers/scorers:go_default_library",
|
||||
"//beacon-chain/p2p/types:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
@@ -132,11 +132,8 @@ go_test(
|
||||
flaky = True,
|
||||
tags = ["requires-network"],
|
||||
deps = [
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
@@ -146,6 +143,7 @@ go_test(
|
||||
"//beacon-chain/p2p/peers/scorers:go_default_library",
|
||||
"//beacon-chain/p2p/testing:go_default_library",
|
||||
"//beacon-chain/p2p/types:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
|
||||
@@ -3,6 +3,7 @@ package p2p
|
||||
import (
|
||||
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
)
|
||||
|
||||
// Config for the p2p service. These parameters are set from application level flags
|
||||
@@ -28,4 +29,5 @@ type Config struct {
|
||||
DenyListCIDR []string
|
||||
StateNotifier statefeed.Notifier
|
||||
DB db.ReadOnlyDatabase
|
||||
ClockWaiter startup.ClockWaiter
|
||||
}
|
||||
|
||||
@@ -22,12 +22,11 @@ import (
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers/peerdata"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers/scorers"
|
||||
testp2p "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/wrapper"
|
||||
leakybucket "github.com/prysmaticlabs/prysm/v4/container/leaky-bucket"
|
||||
@@ -169,8 +168,10 @@ func TestMultiAddrConversion_OK(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestStaticPeering_PeersAreAdded(t *testing.T) {
|
||||
cs := startup.NewClockSynchronizer()
|
||||
cfg := &Config{
|
||||
MaxPeers: 30,
|
||||
MaxPeers: 30,
|
||||
ClockWaiter: cs,
|
||||
}
|
||||
port := 6000
|
||||
var staticPeers []string
|
||||
@@ -204,16 +205,8 @@ func TestStaticPeering_PeersAreAdded(t *testing.T) {
|
||||
<-exitRoutine
|
||||
}()
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
// Send in a loop to ensure it is delivered (busy wait for the service to subscribe to the state feed).
|
||||
for sent := 0; sent == 0; {
|
||||
sent = s.stateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.Initialized,
|
||||
Data: &statefeed.InitializedData{
|
||||
StartTime: time.Now(),
|
||||
GenesisValidatorsRoot: make([]byte, 32),
|
||||
},
|
||||
})
|
||||
}
|
||||
var vr [32]byte
|
||||
require.NoError(t, cs.SetClock(startup.NewClock(time.Now(), vr)))
|
||||
time.Sleep(4 * time.Second)
|
||||
ps := s.host.Network().Peers()
|
||||
assert.Equal(t, 5, len(ps), "Not all peers added to peerstore")
|
||||
|
||||
@@ -125,7 +125,6 @@ func TestStartDiscv5_SameForkDigests_DifferentNextForkData(t *testing.T) {
|
||||
cfg: &Config{UDPPort: uint(port)},
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: genesisValidatorsRoot,
|
||||
stateNotifier: &mock.MockStateNotifier{},
|
||||
}
|
||||
bootListener, err := s.createListener(ipAddr, pkey)
|
||||
require.NoError(t, err)
|
||||
@@ -155,7 +154,6 @@ func TestStartDiscv5_SameForkDigests_DifferentNextForkData(t *testing.T) {
|
||||
cfg: cfg,
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: genesisValidatorsRoot,
|
||||
stateNotifier: &mock.MockStateNotifier{},
|
||||
}
|
||||
listener, err := s.startDiscoveryV5(ipAddr, pkey)
|
||||
assert.NoError(t, err, "Could not start discovery for node")
|
||||
|
||||
@@ -9,10 +9,8 @@ import (
|
||||
|
||||
pubsubpb "github.com/libp2p/go-libp2p-pubsub/pb"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/encoder"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/network/forks"
|
||||
@@ -337,28 +335,16 @@ func TestService_MonitorsStateForkUpdates(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
defer cancel()
|
||||
notifier := &mock.MockStateNotifier{}
|
||||
s, err := NewService(ctx, &Config{
|
||||
StateNotifier: notifier,
|
||||
})
|
||||
cs := startup.NewClockSynchronizer()
|
||||
s, err := NewService(ctx, &Config{ClockWaiter: cs})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, false, s.isInitialized())
|
||||
|
||||
go s.awaitStateInitialized()
|
||||
|
||||
for n := 0; n == 0; {
|
||||
if ctx.Err() != nil {
|
||||
t.Fatal(ctx.Err())
|
||||
}
|
||||
n = notifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.Initialized,
|
||||
Data: &statefeed.InitializedData{
|
||||
StartTime: prysmTime.Now(),
|
||||
GenesisValidatorsRoot: bytesutil.PadTo([]byte("genesis"), 32),
|
||||
},
|
||||
})
|
||||
}
|
||||
vr := bytesutil.ToBytes32(bytesutil.PadTo([]byte("genesis"), 32))
|
||||
require.NoError(t, cs.SetClock(startup.NewClock(prysmTime.Now(), vr)))
|
||||
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
|
||||
@@ -11,20 +11,23 @@ import (
|
||||
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/encoder"
|
||||
testp2p "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
)
|
||||
|
||||
func TestService_PublishToTopicConcurrentMapWrite(t *testing.T) {
|
||||
cs := startup.NewClockSynchronizer()
|
||||
s, err := NewService(context.Background(), &Config{
|
||||
StateNotifier: &mock.MockStateNotifier{},
|
||||
ClockWaiter: cs,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
defer cancel()
|
||||
|
||||
go s.awaitStateInitialized()
|
||||
fd := initializeStateWithForkDigest(ctx, t, s.stateNotifier.StateFeed())
|
||||
fd := initializeStateWithForkDigest(ctx, t, cs)
|
||||
|
||||
if !s.isInitialized() {
|
||||
t.Fatal("service was not initialized")
|
||||
|
||||
@@ -20,8 +20,6 @@ import (
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/async"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/encoder"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers/scorers"
|
||||
@@ -77,7 +75,6 @@ type Service struct {
|
||||
initializationLock sync.Mutex
|
||||
dv5Listener Listener
|
||||
startupErr error
|
||||
stateNotifier statefeed.Notifier
|
||||
ctx context.Context
|
||||
host host.Host
|
||||
genesisTime time.Time
|
||||
@@ -93,13 +90,12 @@ func NewService(ctx context.Context, cfg *Config) (*Service, error) {
|
||||
_ = cancel // govet fix for lost cancel. Cancel is handled in service.Stop().
|
||||
|
||||
s := &Service{
|
||||
ctx: ctx,
|
||||
stateNotifier: cfg.StateNotifier,
|
||||
cancel: cancel,
|
||||
cfg: cfg,
|
||||
isPreGenesis: true,
|
||||
joinedTopics: make(map[string]*pubsub.Topic, len(gossipTopicMappings)),
|
||||
subnetsLock: make(map[uint64]*sync.RWMutex),
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
cfg: cfg,
|
||||
isPreGenesis: true,
|
||||
joinedTopics: make(map[string]*pubsub.Topic, len(gossipTopicMappings)),
|
||||
subnetsLock: make(map[uint64]*sync.RWMutex),
|
||||
}
|
||||
|
||||
dv5Nodes := parseBootStrapAddrs(s.cfg.BootstrapNodeAddr)
|
||||
@@ -383,38 +379,19 @@ func (s *Service) pingPeers() {
|
||||
func (s *Service) awaitStateInitialized() {
|
||||
s.initializationLock.Lock()
|
||||
defer s.initializationLock.Unlock()
|
||||
|
||||
if s.isInitialized() {
|
||||
return
|
||||
}
|
||||
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := s.stateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
cleanup := stateSub.Unsubscribe
|
||||
defer cleanup()
|
||||
for {
|
||||
select {
|
||||
case event := <-stateChannel:
|
||||
if event.Type == statefeed.Initialized {
|
||||
data, ok := event.Data.(*statefeed.InitializedData)
|
||||
if !ok {
|
||||
// log.Fatalf will prevent defer from being called
|
||||
cleanup()
|
||||
log.Fatalf("Received wrong data over state initialized feed: %v", data)
|
||||
}
|
||||
s.genesisTime = data.StartTime
|
||||
s.genesisValidatorsRoot = data.GenesisValidatorsRoot
|
||||
_, err := s.currentForkDigest() // initialize fork digest cache
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not initialize fork digest")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Context closed, exiting goroutine")
|
||||
return
|
||||
}
|
||||
clock, err := s.cfg.ClockWaiter.WaitForClock(s.ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Fatal("failed to receive initial genesis data")
|
||||
}
|
||||
s.genesisTime = clock.GenesisTime()
|
||||
gvr := clock.GenesisValidatorsRoot()
|
||||
s.genesisValidatorsRoot = gvr[:]
|
||||
_, err = s.currentForkDigest() // initialize fork digest cache
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not initialize fork digest")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -15,13 +15,11 @@ import (
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
noise "github.com/libp2p/go-libp2p/p2p/security/noise"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
"github.com/prysmaticlabs/prysm/v4/async/event"
|
||||
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/encoder"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers/scorers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/network/forks"
|
||||
@@ -102,30 +100,22 @@ func TestService_Start_OnlyStartsOnce(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
hook := logTest.NewGlobal()
|
||||
|
||||
cs := startup.NewClockSynchronizer()
|
||||
cfg := &Config{
|
||||
TCPPort: 2000,
|
||||
UDPPort: 2000,
|
||||
StateNotifier: &mock.MockStateNotifier{},
|
||||
TCPPort: 2000,
|
||||
UDPPort: 2000,
|
||||
ClockWaiter: cs,
|
||||
}
|
||||
s, err := NewService(context.Background(), cfg)
|
||||
require.NoError(t, err)
|
||||
s.stateNotifier = &mock.MockStateNotifier{}
|
||||
s.dv5Listener = &mockListener{}
|
||||
exitRoutine := make(chan bool)
|
||||
go func() {
|
||||
s.Start()
|
||||
<-exitRoutine
|
||||
}()
|
||||
// Send in a loop to ensure it is delivered (busy wait for the service to subscribe to the state feed).
|
||||
for sent := 0; sent == 0; {
|
||||
sent = s.stateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.Initialized,
|
||||
Data: &statefeed.InitializedData{
|
||||
StartTime: time.Now(),
|
||||
GenesisValidatorsRoot: make([]byte, 32),
|
||||
},
|
||||
})
|
||||
}
|
||||
var vr [32]byte
|
||||
require.NoError(t, cs.SetClock(startup.NewClock(time.Now(), vr)))
|
||||
time.Sleep(time.Second * 2)
|
||||
assert.Equal(t, true, s.started, "Expected service to be started")
|
||||
s.Start()
|
||||
@@ -155,17 +145,17 @@ func TestService_Status_NoGenesisTimeSet(t *testing.T) {
|
||||
func TestService_Start_NoDiscoverFlag(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
|
||||
cs := startup.NewClockSynchronizer()
|
||||
cfg := &Config{
|
||||
TCPPort: 2000,
|
||||
UDPPort: 2000,
|
||||
StateNotifier: &mock.MockStateNotifier{},
|
||||
NoDiscovery: true, // <-- no s.dv5Listener is created
|
||||
ClockWaiter: cs,
|
||||
}
|
||||
s, err := NewService(context.Background(), cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
s.stateNotifier = &mock.MockStateNotifier{}
|
||||
|
||||
// required params to addForkEntry in s.forkWatcher
|
||||
s.genesisTime = time.Now()
|
||||
beaconCfg := params.BeaconConfig().Copy()
|
||||
@@ -181,16 +171,8 @@ func TestService_Start_NoDiscoverFlag(t *testing.T) {
|
||||
<-exitRoutine
|
||||
}()
|
||||
|
||||
// Send in a loop to ensure it is delivered (busy wait for the service to subscribe to the state feed).
|
||||
for sent := 0; sent == 0; {
|
||||
sent = s.stateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.Initialized,
|
||||
Data: &statefeed.InitializedData{
|
||||
StartTime: time.Now(),
|
||||
GenesisValidatorsRoot: make([]byte, 32),
|
||||
},
|
||||
})
|
||||
}
|
||||
var vr [32]byte
|
||||
require.NoError(t, cs.SetClock(startup.NewClock(time.Now(), vr)))
|
||||
|
||||
time.Sleep(time.Second * 2)
|
||||
|
||||
@@ -207,11 +189,11 @@ func TestListenForNewNodes(t *testing.T) {
|
||||
_, pkey := createAddrAndPrivKey(t)
|
||||
ipAddr := net.ParseIP("127.0.0.1")
|
||||
genesisTime := prysmTime.Now()
|
||||
genesisValidatorsRoot := make([]byte, 32)
|
||||
var gvr [32]byte
|
||||
s := &Service{
|
||||
cfg: cfg,
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: genesisValidatorsRoot,
|
||||
genesisValidatorsRoot: gvr[:],
|
||||
}
|
||||
bootListener, err := s.createListener(ipAddr, pkey)
|
||||
require.NoError(t, err)
|
||||
@@ -229,11 +211,12 @@ func TestListenForNewNodes(t *testing.T) {
|
||||
var listeners []*discover.UDPv5
|
||||
var hosts []host.Host
|
||||
// setup other nodes.
|
||||
cs := startup.NewClockSynchronizer()
|
||||
cfg = &Config{
|
||||
BootstrapNodeAddr: []string{bootNode.String()},
|
||||
Discv5BootStrapAddr: []string{bootNode.String()},
|
||||
MaxPeers: 30,
|
||||
StateNotifier: notifier,
|
||||
ClockWaiter: cs,
|
||||
}
|
||||
for i := 1; i <= 5; i++ {
|
||||
h, pkey, ipAddr := createHost(t, port+i)
|
||||
@@ -242,7 +225,7 @@ func TestListenForNewNodes(t *testing.T) {
|
||||
s := &Service{
|
||||
cfg: cfg,
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: genesisValidatorsRoot,
|
||||
genesisValidatorsRoot: gvr[:],
|
||||
}
|
||||
listener, err := s.startDiscoveryV5(ipAddr, pkey)
|
||||
assert.NoError(t, err, "Could not start discovery for node")
|
||||
@@ -276,16 +259,9 @@ func TestListenForNewNodes(t *testing.T) {
|
||||
<-exitRoutine
|
||||
}()
|
||||
time.Sleep(1 * time.Second)
|
||||
// Send in a loop to ensure it is delivered (busy wait for the service to subscribe to the state feed).
|
||||
for sent := 0; sent == 0; {
|
||||
sent = s.stateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.Initialized,
|
||||
Data: &statefeed.InitializedData{
|
||||
StartTime: genesisTime,
|
||||
GenesisValidatorsRoot: genesisValidatorsRoot,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
require.NoError(t, cs.SetClock(startup.NewClock(genesisTime, gvr)))
|
||||
|
||||
time.Sleep(4 * time.Second)
|
||||
assert.Equal(t, 5, len(s.host.Network().Peers()), "Not all peers added to peerstore")
|
||||
require.NoError(t, s.Stop())
|
||||
@@ -327,11 +303,12 @@ func TestService_JoinLeaveTopic(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
defer cancel()
|
||||
s, err := NewService(ctx, &Config{StateNotifier: &mock.MockStateNotifier{}})
|
||||
gs := startup.NewClockSynchronizer()
|
||||
s, err := NewService(ctx, &Config{StateNotifier: &mock.MockStateNotifier{}, ClockWaiter: gs})
|
||||
require.NoError(t, err)
|
||||
|
||||
go s.awaitStateInitialized()
|
||||
fd := initializeStateWithForkDigest(ctx, t, s.stateNotifier.StateFeed())
|
||||
fd := initializeStateWithForkDigest(ctx, t, gs)
|
||||
|
||||
assert.Equal(t, 0, len(s.joinedTopics))
|
||||
|
||||
@@ -358,23 +335,12 @@ func TestService_JoinLeaveTopic(t *testing.T) {
|
||||
|
||||
// initializeStateWithForkDigest sets up the state feed initialized event and returns the fork
|
||||
// digest associated with that genesis event.
|
||||
func initializeStateWithForkDigest(ctx context.Context, t *testing.T, ef *event.Feed) [4]byte {
|
||||
func initializeStateWithForkDigest(_ context.Context, t *testing.T, gs startup.ClockSetter) [4]byte {
|
||||
gt := prysmTime.Now()
|
||||
gvr := bytesutil.PadTo([]byte("genesis validators root"), 32)
|
||||
for n := 0; n == 0; {
|
||||
if ctx.Err() != nil {
|
||||
t.Fatal(ctx.Err())
|
||||
}
|
||||
n = ef.Send(&feed.Event{
|
||||
Type: statefeed.Initialized,
|
||||
Data: &statefeed.InitializedData{
|
||||
StartTime: gt,
|
||||
GenesisValidatorsRoot: gvr,
|
||||
},
|
||||
})
|
||||
}
|
||||
gvr := bytesutil.ToBytes32(bytesutil.PadTo([]byte("genesis validators root"), 32))
|
||||
require.NoError(t, gs.SetClock(startup.NewClock(gt, gvr)))
|
||||
|
||||
fd, err := forks.CreateForkDigest(gt, gvr)
|
||||
fd, err := forks.CreateForkDigest(gt, gvr[:])
|
||||
require.NoError(t, err)
|
||||
|
||||
time.Sleep(50 * time.Millisecond) // wait for pubsub filter to initialize.
|
||||
|
||||
@@ -12,10 +12,8 @@ import (
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/libp2p/go-libp2p/core/crypto"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/wrapper"
|
||||
@@ -88,15 +86,17 @@ func TestStartDiscV5_DiscoverPeersWithSubnets(t *testing.T) {
|
||||
|
||||
// Make one service on port 4001.
|
||||
port = 4001
|
||||
gs := startup.NewClockSynchronizer()
|
||||
cfg := &Config{
|
||||
BootstrapNodeAddr: []string{bootNode.String()},
|
||||
Discv5BootStrapAddr: []string{bootNode.String()},
|
||||
MaxPeers: 30,
|
||||
UDPPort: uint(port),
|
||||
ClockWaiter: gs,
|
||||
}
|
||||
cfg.StateNotifier = &mock.MockStateNotifier{}
|
||||
s, err = NewService(context.Background(), cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
exitRoutine := make(chan bool)
|
||||
go func() {
|
||||
s.Start()
|
||||
@@ -104,15 +104,8 @@ func TestStartDiscV5_DiscoverPeersWithSubnets(t *testing.T) {
|
||||
}()
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
// Send in a loop to ensure it is delivered (busy wait for the service to subscribe to the state feed).
|
||||
for sent := 0; sent == 0; {
|
||||
sent = s.stateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.Initialized,
|
||||
Data: &statefeed.InitializedData{
|
||||
StartTime: time.Now(),
|
||||
GenesisValidatorsRoot: make([]byte, 32),
|
||||
},
|
||||
})
|
||||
}
|
||||
var vr [32]byte
|
||||
require.NoError(t, gs.SetClock(startup.NewClock(time.Now(), vr)))
|
||||
|
||||
// Wait for the nodes to have their local routing tables to be populated with the other nodes
|
||||
time.Sleep(6 * discoveryWaitTime)
|
||||
|
||||
@@ -36,6 +36,7 @@ go_library(
|
||||
"//beacon-chain/rpc/prysm/v1alpha1/node:go_default_library",
|
||||
"//beacon-chain/rpc/prysm/v1alpha1/validator:go_default_library",
|
||||
"//beacon-chain/slasher:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//beacon-chain/sync:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
|
||||
@@ -55,6 +55,7 @@ go_library(
|
||||
"//beacon-chain/operations/synccommittee:go_default_library",
|
||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//beacon-chain/sync:go_default_library",
|
||||
|
||||
@@ -7,12 +7,10 @@ import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/builder"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache/depositcache"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
|
||||
blockfeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/block"
|
||||
opfeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/operation"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
|
||||
@@ -25,6 +23,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/synccommittee"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/sync"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
@@ -74,6 +73,7 @@ type Server struct {
|
||||
ExecutionEngineCaller execution.EngineCaller
|
||||
BlockBuilder builder.BlockBuilder
|
||||
BLSChangesPool blstoexec.PoolManager
|
||||
ClockWaiter startup.ClockWaiter
|
||||
}
|
||||
|
||||
// WaitForActivation checks if a validator public key exists in the active validator registry of the current
|
||||
@@ -170,30 +170,17 @@ func (vs *Server) WaitForChainStart(_ *emptypb.Empty, stream ethpb.BeaconNodeVal
|
||||
return stream.Send(res)
|
||||
}
|
||||
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := vs.StateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
defer stateSub.Unsubscribe()
|
||||
for {
|
||||
select {
|
||||
case event := <-stateChannel:
|
||||
if event.Type == statefeed.Initialized {
|
||||
data, ok := event.Data.(*statefeed.InitializedData)
|
||||
if !ok {
|
||||
return errors.New("event data is not type *statefeed.InitializedData")
|
||||
}
|
||||
log.WithField("starttime", data.StartTime).Debug("Received chain started event")
|
||||
log.Debug("Sending genesis time notification to connected validator clients")
|
||||
res := ðpb.ChainStartResponse{
|
||||
Started: true,
|
||||
GenesisTime: uint64(data.StartTime.Unix()),
|
||||
GenesisValidatorsRoot: data.GenesisValidatorsRoot,
|
||||
}
|
||||
return stream.Send(res)
|
||||
}
|
||||
case <-stateSub.Err():
|
||||
return status.Error(codes.Aborted, "Subscriber closed, exiting goroutine")
|
||||
case <-vs.Ctx.Done():
|
||||
return status.Error(codes.Canceled, "Context canceled")
|
||||
}
|
||||
clock, err := vs.ClockWaiter.WaitForClock(vs.Ctx)
|
||||
if err != nil {
|
||||
return status.Error(codes.Canceled, "Context canceled")
|
||||
}
|
||||
log.WithField("starttime", clock.GenesisTime()).Debug("Received chain started event")
|
||||
log.Debug("Sending genesis time notification to connected validator clients")
|
||||
gvr := clock.GenesisValidatorsRoot()
|
||||
res := ðpb.ChainStartResponse{
|
||||
Started: true,
|
||||
GenesisTime: uint64(clock.GenesisTime().Unix()),
|
||||
GenesisValidatorsRoot: gvr[:],
|
||||
}
|
||||
return stream.Send(res)
|
||||
}
|
||||
|
||||
@@ -10,9 +10,8 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/async/event"
|
||||
mockChain "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache/depositcache"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
|
||||
mockExecution "github.com/prysmaticlabs/prysm/v4/beacon-chain/execution/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
|
||||
@@ -189,13 +188,14 @@ func TestWaitForActivation_MultipleStatuses(t *testing.T) {
|
||||
func TestWaitForChainStart_ContextClosed(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
chainService := &mockChain.ChainService{}
|
||||
Server := &Server{
|
||||
server := &Server{
|
||||
Ctx: ctx,
|
||||
ChainStartFetcher: &mockExecution.FaultyExecutionChain{
|
||||
ChainFeed: new(event.Feed),
|
||||
},
|
||||
StateNotifier: chainService.StateNotifier(),
|
||||
HeadFetcher: chainService,
|
||||
ClockWaiter: startup.NewClockSynchronizer(),
|
||||
}
|
||||
|
||||
exitRoutine := make(chan bool)
|
||||
@@ -204,7 +204,7 @@ func TestWaitForChainStart_ContextClosed(t *testing.T) {
|
||||
mockStream := mock.NewMockBeaconNodeValidator_WaitForChainStartServer(ctrl)
|
||||
mockStream.EXPECT().Context().Return(ctx)
|
||||
go func(tt *testing.T) {
|
||||
err := Server.WaitForChainStart(&emptypb.Empty{}, mockStream)
|
||||
err := server.WaitForChainStart(&emptypb.Empty{}, mockStream)
|
||||
assert.ErrorContains(tt, "Context canceled", err)
|
||||
<-exitRoutine
|
||||
}(t)
|
||||
@@ -243,11 +243,9 @@ func TestWaitForChainStart_AlreadyStarted(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestWaitForChainStart_HeadStateDoesNotExist(t *testing.T) {
|
||||
genesisValidatorsRoot := params.BeaconConfig().ZeroHash
|
||||
|
||||
// Set head state to nil
|
||||
chainService := &mockChain.ChainService{State: nil}
|
||||
notifier := chainService.StateNotifier()
|
||||
gs := startup.NewClockSynchronizer()
|
||||
Server := &Server{
|
||||
Ctx: context.Background(),
|
||||
ChainStartFetcher: &mockExecution.Chain{
|
||||
@@ -255,6 +253,7 @@ func TestWaitForChainStart_HeadStateDoesNotExist(t *testing.T) {
|
||||
},
|
||||
StateNotifier: chainService.StateNotifier(),
|
||||
HeadFetcher: chainService,
|
||||
ClockWaiter: gs,
|
||||
}
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
@@ -267,15 +266,7 @@ func TestWaitForChainStart_HeadStateDoesNotExist(t *testing.T) {
|
||||
assert.NoError(t, Server.WaitForChainStart(&emptypb.Empty{}, mockStream), "Could not call RPC method")
|
||||
wg.Done()
|
||||
}()
|
||||
// Simulate a late state initialization event, so that
|
||||
// method is able to handle race condition here.
|
||||
notifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.Initialized,
|
||||
Data: &statefeed.InitializedData{
|
||||
StartTime: time.Unix(0, 0),
|
||||
GenesisValidatorsRoot: genesisValidatorsRoot[:],
|
||||
},
|
||||
})
|
||||
|
||||
util.WaitTimeout(wg, time.Second)
|
||||
}
|
||||
|
||||
@@ -284,6 +275,8 @@ func TestWaitForChainStart_NotStartedThenLogFired(t *testing.T) {
|
||||
|
||||
genesisValidatorsRoot := bytesutil.ToBytes32([]byte("validators"))
|
||||
chainService := &mockChain.ChainService{}
|
||||
gs := startup.NewClockSynchronizer()
|
||||
|
||||
Server := &Server{
|
||||
Ctx: context.Background(),
|
||||
ChainStartFetcher: &mockExecution.FaultyExecutionChain{
|
||||
@@ -291,6 +284,7 @@ func TestWaitForChainStart_NotStartedThenLogFired(t *testing.T) {
|
||||
},
|
||||
StateNotifier: chainService.StateNotifier(),
|
||||
HeadFetcher: chainService,
|
||||
ClockWaiter: gs,
|
||||
}
|
||||
exitRoutine := make(chan bool)
|
||||
ctrl := gomock.NewController(t)
|
||||
@@ -310,15 +304,7 @@ func TestWaitForChainStart_NotStartedThenLogFired(t *testing.T) {
|
||||
}(t)
|
||||
|
||||
// Send in a loop to ensure it is delivered (busy wait for the service to subscribe to the state feed).
|
||||
for sent := 0; sent == 0; {
|
||||
sent = Server.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.Initialized,
|
||||
Data: &statefeed.InitializedData{
|
||||
StartTime: time.Unix(0, 0),
|
||||
GenesisValidatorsRoot: genesisValidatorsRoot[:],
|
||||
},
|
||||
})
|
||||
}
|
||||
require.NoError(t, gs.SetClock(startup.NewClock(time.Unix(0, 0), genesisValidatorsRoot)))
|
||||
|
||||
exitRoutine <- true
|
||||
require.LogsContain(t, hook, "Sending genesis time")
|
||||
|
||||
@@ -41,6 +41,7 @@ import (
|
||||
nodev1alpha1 "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/prysm/v1alpha1/node"
|
||||
validatorv1alpha1 "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/prysm/v1alpha1/validator"
|
||||
slasherservice "github.com/prysmaticlabs/prysm/v4/beacon-chain/slasher"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
|
||||
chainSync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/features"
|
||||
@@ -120,6 +121,7 @@ type Config struct {
|
||||
OptimisticModeFetcher blockchain.OptimisticModeFetcher
|
||||
BlockBuilder builder.BlockBuilder
|
||||
Router *mux.Router
|
||||
ClockWaiter startup.ClockWaiter
|
||||
}
|
||||
|
||||
// NewService instantiates a new RPC service instance that will
|
||||
@@ -246,6 +248,7 @@ func (s *Service) Start() {
|
||||
ProposerSlotIndexCache: s.cfg.ProposerIdsCache,
|
||||
BlockBuilder: s.cfg.BlockBuilder,
|
||||
BLSChangesPool: s.cfg.BLSChangesPool,
|
||||
ClockWaiter: s.cfg.ClockWaiter,
|
||||
}
|
||||
validatorServerV1 := &validator.Server{
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
|
||||
@@ -26,11 +26,11 @@ go_library(
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
"//beacon-chain/slasher/types:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//beacon-chain/sync:go_default_library",
|
||||
@@ -70,14 +70,13 @@ go_test(
|
||||
deps = [
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
|
||||
"//beacon-chain/operations/slashings/mock:go_default_library",
|
||||
"//beacon-chain/slasher/mock:go_default_library",
|
||||
"//beacon-chain/slasher/types:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//beacon-chain/sync/initial-sync/testing:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
dbtest "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
|
||||
slashingsmock "github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/slashings/mock"
|
||||
slashertypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/slasher/types"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
|
||||
@@ -239,6 +240,7 @@ func Test_processQueuedAttestations(t *testing.T) {
|
||||
HeadStateFetcher: mockChain,
|
||||
AttestationStateFetcher: mockChain,
|
||||
SlashingPoolInserter: &slashingsmock.PoolMock{},
|
||||
ClockWaiter: startup.NewClockSynchronizer(),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
s.genesisTime = genesisTime
|
||||
@@ -296,6 +298,7 @@ func Test_processQueuedAttestations_MultipleChunkIndices(t *testing.T) {
|
||||
HeadStateFetcher: mockChain,
|
||||
AttestationStateFetcher: mockChain,
|
||||
SlashingPoolInserter: &slashingsmock.PoolMock{},
|
||||
ClockWaiter: startup.NewClockSynchronizer(),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
s.genesisTime = genesisTime
|
||||
@@ -361,6 +364,7 @@ func Test_processQueuedAttestations_OverlappingChunkIndices(t *testing.T) {
|
||||
HeadStateFetcher: mockChain,
|
||||
AttestationStateFetcher: mockChain,
|
||||
SlashingPoolInserter: &slashingsmock.PoolMock{},
|
||||
ClockWaiter: startup.NewClockSynchronizer(),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
s.genesisTime = genesisTime
|
||||
@@ -475,6 +479,7 @@ func Test_applyAttestationForValidator_MinSpanChunk(t *testing.T) {
|
||||
&ServiceConfig{
|
||||
Database: slasherDB,
|
||||
StateNotifier: &mock.MockStateNotifier{},
|
||||
ClockWaiter: startup.NewClockSynchronizer(),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -535,6 +540,7 @@ func Test_applyAttestationForValidator_MaxSpanChunk(t *testing.T) {
|
||||
&ServiceConfig{
|
||||
Database: slasherDB,
|
||||
StateNotifier: &mock.MockStateNotifier{},
|
||||
ClockWaiter: startup.NewClockSynchronizer(),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -602,6 +608,7 @@ func Test_checkDoubleVotes_SlashableInputAttestations(t *testing.T) {
|
||||
&ServiceConfig{
|
||||
Database: slasherDB,
|
||||
StateNotifier: &mock.MockStateNotifier{},
|
||||
ClockWaiter: startup.NewClockSynchronizer(),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -638,6 +645,7 @@ func Test_checkDoubleVotes_SlashableAttestationsOnDisk(t *testing.T) {
|
||||
&ServiceConfig{
|
||||
Database: slasherDB,
|
||||
StateNotifier: &mock.MockStateNotifier{},
|
||||
ClockWaiter: startup.NewClockSynchronizer(),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -683,6 +691,7 @@ func testLoadChunks(t *testing.T, kind slashertypes.ChunkKind) {
|
||||
&ServiceConfig{
|
||||
Database: slasherDB,
|
||||
StateNotifier: &mock.MockStateNotifier{},
|
||||
ClockWaiter: startup.NewClockSynchronizer(),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -769,6 +778,7 @@ func TestService_processQueuedAttestations(t *testing.T) {
|
||||
Database: slasherDB,
|
||||
StateNotifier: &mock.MockStateNotifier{},
|
||||
HeadStateFetcher: mockChain,
|
||||
ClockWaiter: startup.NewClockSynchronizer(),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -805,6 +815,7 @@ func BenchmarkCheckSlashableAttestations(b *testing.B) {
|
||||
Database: slasherDB,
|
||||
StateNotifier: &mock.MockStateNotifier{},
|
||||
HeadStateFetcher: mockChain,
|
||||
ClockWaiter: startup.NewClockSynchronizer(),
|
||||
})
|
||||
require.NoError(b, err)
|
||||
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
slashingsmock "github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/slashings/mock"
|
||||
slashertypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/slasher/types"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
@@ -63,6 +64,7 @@ func Test_processQueuedBlocks_DetectsDoubleProposals(t *testing.T) {
|
||||
HeadStateFetcher: mockChain,
|
||||
StateGen: stategen.New(beaconDB, doublylinkedtree.New()),
|
||||
SlashingPoolInserter: &slashingsmock.PoolMock{},
|
||||
ClockWaiter: startup.NewClockSynchronizer(),
|
||||
},
|
||||
params: DefaultParams(),
|
||||
blksQueue: newBlocksQueue(),
|
||||
@@ -129,6 +131,7 @@ func Test_processQueuedBlocks_NotSlashable(t *testing.T) {
|
||||
Database: slasherDB,
|
||||
StateNotifier: &mock.MockStateNotifier{},
|
||||
HeadStateFetcher: mockChain,
|
||||
ClockWaiter: startup.NewClockSynchronizer(),
|
||||
},
|
||||
params: DefaultParams(),
|
||||
blksQueue: newBlocksQueue(),
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
dbtest "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
|
||||
slashertypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/slasher/types"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
params2 "github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
@@ -24,6 +25,7 @@ func TestSlasher_receiveAttestations_OK(t *testing.T) {
|
||||
serviceCfg: &ServiceConfig{
|
||||
IndexedAttestationsFeed: new(event.Feed),
|
||||
StateNotifier: &mock.MockStateNotifier{},
|
||||
ClockWaiter: startup.NewClockSynchronizer(),
|
||||
},
|
||||
attsQueue: newAttestationsQueue(),
|
||||
}
|
||||
@@ -207,6 +209,7 @@ func TestSlasher_receiveAttestations_OnlyValidAttestations(t *testing.T) {
|
||||
serviceCfg: &ServiceConfig{
|
||||
IndexedAttestationsFeed: new(event.Feed),
|
||||
StateNotifier: &mock.MockStateNotifier{},
|
||||
ClockWaiter: startup.NewClockSynchronizer(),
|
||||
},
|
||||
attsQueue: newAttestationsQueue(),
|
||||
}
|
||||
@@ -245,6 +248,7 @@ func TestSlasher_receiveBlocks_OK(t *testing.T) {
|
||||
serviceCfg: &ServiceConfig{
|
||||
BeaconBlockHeadersFeed: new(event.Feed),
|
||||
StateNotifier: &mock.MockStateNotifier{},
|
||||
ClockWaiter: startup.NewClockSynchronizer(),
|
||||
},
|
||||
blksQueue: newBlocksQueue(),
|
||||
}
|
||||
@@ -288,6 +292,7 @@ func TestService_processQueuedBlocks(t *testing.T) {
|
||||
Database: slasherDB,
|
||||
StateNotifier: &mock.MockStateNotifier{},
|
||||
HeadStateFetcher: mockChain,
|
||||
ClockWaiter: startup.NewClockSynchronizer(),
|
||||
},
|
||||
blksQueue: newBlocksQueue(),
|
||||
}
|
||||
|
||||
@@ -10,10 +10,10 @@ import (
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/async/event"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/slashings"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/sync"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
@@ -39,6 +39,7 @@ type ServiceConfig struct {
|
||||
SlashingPoolInserter slashings.PoolInserter
|
||||
HeadStateFetcher blockchain.HeadFetcher
|
||||
SyncChecker sync.Checker
|
||||
ClockWaiter startup.ClockWaiter
|
||||
}
|
||||
|
||||
// SlashingChecker is an interface for defining services that the beacon node may interact with to provide slashing data.
|
||||
@@ -167,43 +168,19 @@ func (s *Service) Stop() error {
|
||||
}
|
||||
|
||||
// Status of the slasher service.
|
||||
func (_ *Service) Status() error {
|
||||
func (*Service) Status() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) waitForChainInitialization() {
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := s.serviceCfg.StateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
defer stateSub.Unsubscribe()
|
||||
for {
|
||||
select {
|
||||
case stateEvent := <-stateChannel:
|
||||
// Wait for us to receive the genesis time via a chain started notification.
|
||||
if stateEvent.Type == statefeed.Initialized {
|
||||
// Alternatively, if the chain has already started, we then read the genesis
|
||||
// time value from this data.
|
||||
data, ok := stateEvent.Data.(*statefeed.InitializedData)
|
||||
if !ok {
|
||||
log.Error(
|
||||
"Could not receive chain start notification, want *statefeed.ChainStartedData",
|
||||
)
|
||||
return
|
||||
}
|
||||
s.genesisTime = data.StartTime
|
||||
log.WithField("genesisTime", s.genesisTime).Info(
|
||||
"Slasher received chain initialization event",
|
||||
)
|
||||
return
|
||||
}
|
||||
case err := <-stateSub.Err():
|
||||
log.WithError(err).Error(
|
||||
"Slasher could not subscribe to state events",
|
||||
)
|
||||
return
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
}
|
||||
clock, err := s.serviceCfg.ClockWaiter.WaitForClock(s.ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not receive chain start notification")
|
||||
}
|
||||
s.genesisTime = clock.GenesisTime()
|
||||
log.WithField("genesisTime", s.genesisTime).Info(
|
||||
"Slasher received chain initialization event",
|
||||
)
|
||||
}
|
||||
|
||||
func (s *Service) waitForSync(genesisTime time.Time) {
|
||||
|
||||
@@ -8,10 +8,9 @@ import (
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/async/event"
|
||||
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
|
||||
dbtest "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
|
||||
mockslasher "github.com/prysmaticlabs/prysm/v4/beacon-chain/slasher/mock"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
mockSync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/initial-sync/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
@@ -42,6 +41,7 @@ func TestService_StartStop_ChainInitialized(t *testing.T) {
|
||||
State: beaconState,
|
||||
Slot: ¤tSlot,
|
||||
}
|
||||
gs := startup.NewClockSynchronizer()
|
||||
srv, err := New(context.Background(), &ServiceConfig{
|
||||
IndexedAttestationsFeed: new(event.Feed),
|
||||
BeaconBlockHeadersFeed: new(event.Feed),
|
||||
@@ -49,14 +49,13 @@ func TestService_StartStop_ChainInitialized(t *testing.T) {
|
||||
Database: slasherDB,
|
||||
HeadStateFetcher: mockChain,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
ClockWaiter: gs,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
go srv.Start()
|
||||
time.Sleep(time.Millisecond * 100)
|
||||
srv.serviceCfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.Initialized,
|
||||
Data: &statefeed.InitializedData{StartTime: time.Now()},
|
||||
})
|
||||
var vr [32]byte
|
||||
require.NoError(t, gs.SetClock(startup.NewClock(time.Now(), vr)))
|
||||
time.Sleep(time.Millisecond * 100)
|
||||
srv.attsSlotTicker = &slots.SlotTicker{}
|
||||
srv.blocksSlotTicker = &slots.SlotTicker{}
|
||||
|
||||
31
beacon-chain/startup/BUILD.bazel
Normal file
31
beacon-chain/startup/BUILD.bazel
Normal file
@@ -0,0 +1,31 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"clock.go",
|
||||
"synchronizer.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/startup",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"clock_test.go",
|
||||
"synchronizer_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
],
|
||||
)
|
||||
75
beacon-chain/startup/clock.go
Normal file
75
beacon-chain/startup/clock.go
Normal file
@@ -0,0 +1,75 @@
|
||||
package startup
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
types "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
)
|
||||
|
||||
// Nower is a function that can return the current time.
|
||||
// In Clock, Now() will use time.Now by default, but a Nower can be set using WithNower in NewClock
|
||||
// to customize the return value for Now() in tests.
|
||||
type Nower func() time.Time
|
||||
|
||||
// Clock abstracts important time-related concerns in the beacon chain:
|
||||
// - provides a time.Now() construct that can be overridden in tests
|
||||
// - GenesisTime() to know the genesis time or use genesis time determination as a synchronization point.
|
||||
// - CurrentSlot: convenience conversion for current time -> slot
|
||||
// (support backwards compatibility with the TimeFetcher interface)
|
||||
// - GenesisValidatorsRoot: is determined at the same point as genesis time and is needed by some of the same code,
|
||||
// so it is also bundled for convenience.
|
||||
type Clock struct {
|
||||
t time.Time
|
||||
vr [32]byte
|
||||
now Nower
|
||||
}
|
||||
|
||||
// GenesisTime returns the genesis timestamp.
|
||||
func (g *Clock) GenesisTime() time.Time {
|
||||
return g.t
|
||||
}
|
||||
|
||||
// GenesisValidatorsRoot returns the genesis state validator root
|
||||
func (g *Clock) GenesisValidatorsRoot() [32]byte {
|
||||
return g.vr
|
||||
}
|
||||
|
||||
// CurrentSlot returns the current slot relative to the time.Time value that Clock embeds.
|
||||
func (g *Clock) CurrentSlot() types.Slot {
|
||||
now := g.now()
|
||||
return slots.Duration(g.t, now)
|
||||
}
|
||||
|
||||
// Now provides a value for time.Now() that can be overridden in tests.
|
||||
func (g *Clock) Now() time.Time {
|
||||
return g.now()
|
||||
}
|
||||
|
||||
// ClockOpt is a functional option to change the behavior of a clock value made by NewClock.
|
||||
// It is primarily intended as a way to inject an alternate time.Now() callback (WithNower) for testing.
|
||||
type ClockOpt func(*Clock)
|
||||
|
||||
// WithNower allows tests in particular to inject an alternate implementation of time.Now (vs using system time)
|
||||
func WithNower(n Nower) ClockOpt {
|
||||
return func(g *Clock) {
|
||||
g.now = n
|
||||
}
|
||||
}
|
||||
|
||||
// NewClock constructs a Clock value from a genesis timestamp (t) and a Genesis Validator Root (vr).
|
||||
// The WithNower ClockOpt can be used in tests to specify an alternate `time.Now` implementation,
|
||||
// for instance to return a value for `Now` spanning a certain number of slots from genesis time, to control the current slot.
|
||||
func NewClock(t time.Time, vr [32]byte, opts ...ClockOpt) *Clock {
|
||||
c := &Clock{
|
||||
t: t,
|
||||
vr: vr,
|
||||
}
|
||||
for _, o := range opts {
|
||||
o(c)
|
||||
}
|
||||
if c.now == nil {
|
||||
c.now = time.Now
|
||||
}
|
||||
return c
|
||||
}
|
||||
49
beacon-chain/startup/clock_test.go
Normal file
49
beacon-chain/startup/clock_test.go
Normal file
@@ -0,0 +1,49 @@
|
||||
package startup
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
)
|
||||
|
||||
func TestClock(t *testing.T) {
|
||||
vr := [32]byte{}
|
||||
cases := []struct {
|
||||
name string
|
||||
nSlots primitives.Slot
|
||||
}{
|
||||
{
|
||||
name: "3 slots",
|
||||
nSlots: 3,
|
||||
},
|
||||
{
|
||||
name: "0 slots",
|
||||
nSlots: 0,
|
||||
},
|
||||
{
|
||||
name: "1 epoch",
|
||||
nSlots: params.BeaconConfig().SlotsPerEpoch,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
genesis, now := testInterval(c.nSlots)
|
||||
nower := func() time.Time { return now }
|
||||
cl := NewClock(genesis, vr, WithNower(nower))
|
||||
require.Equal(t, genesis, cl.GenesisTime())
|
||||
require.Equal(t, now, cl.Now())
|
||||
require.Equal(t, c.nSlots, cl.CurrentSlot())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testInterval(nSlots primitives.Slot) (time.Time, time.Time) {
|
||||
oneSlot := time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot)
|
||||
var start uint64 = 23
|
||||
endOffset := oneSlot * time.Duration(nSlots)
|
||||
startTime := time.Unix(int64(start), 0)
|
||||
return startTime, startTime.Add(endOffset)
|
||||
}
|
||||
60
beacon-chain/startup/synchronizer.go
Normal file
60
beacon-chain/startup/synchronizer.go
Normal file
@@ -0,0 +1,60 @@
|
||||
package startup
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var errClockSet = errors.New("refusing to change clock after it is set")
|
||||
|
||||
// ClockSynchronizer provides a synchronization mechanism for services that rely on the genesis time and validator root
|
||||
// being known before getting to work.
|
||||
type ClockSynchronizer struct {
|
||||
ready chan struct{}
|
||||
c *Clock
|
||||
}
|
||||
|
||||
// ClockWaiter specifies the WaitForClock method. ClockSynchronizer works in a 1:N pattern, with 1 thread calling
|
||||
// SetClock, and the others blocking on a call to WaitForClock until the expected *Clock value is set.
|
||||
type ClockWaiter interface {
|
||||
WaitForClock(context.Context) (*Clock, error)
|
||||
}
|
||||
|
||||
// ClockSetter specifies the SetClock method. ClockSynchronizer works in a 1:N pattern, so in a given graph of services,
|
||||
// only one service should be given the ClockSetter, and all others relying on the service's activation should use
|
||||
// ClockWaiter.
|
||||
type ClockSetter interface {
|
||||
SetClock(c *Clock) error
|
||||
}
|
||||
|
||||
// SetClock sets the Clock value `c` and unblocks all threads waiting for `c` via WaitForClock.
|
||||
// Calling SetClock more than once will return an error, as calling this function is meant to be a signal
|
||||
// that the system is ready to start.
|
||||
func (w *ClockSynchronizer) SetClock(c *Clock) error {
|
||||
if w.c != nil {
|
||||
return errors.Wrapf(errClockSet, "when SetClock called, Clock already set to time=%d", w.c.GenesisTime().Unix())
|
||||
}
|
||||
w.c = c
|
||||
close(w.ready)
|
||||
return nil
|
||||
}
|
||||
|
||||
// WaitForClock will block the caller until the *Clock value is available. If the provided context is canceled (eg via
|
||||
// a deadline set upstream), the function will return the error given by ctx.Err().
|
||||
func (w *ClockSynchronizer) WaitForClock(ctx context.Context) (*Clock, error) {
|
||||
select {
|
||||
case <-w.ready:
|
||||
return w.c, nil
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
// NewClockSynchronizer initializes a single instance of ClockSynchronizer that must be used by all ClockWaiters that
|
||||
// need to be synchronized to a ClockSetter (ie blockchain service).
|
||||
func NewClockSynchronizer() *ClockSynchronizer {
|
||||
return &ClockSynchronizer{
|
||||
ready: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
51
beacon-chain/startup/synchronizer_test.go
Normal file
51
beacon-chain/startup/synchronizer_test.go
Normal file
@@ -0,0 +1,51 @@
|
||||
package startup
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
)
|
||||
|
||||
func TestSynchronizerErrOnSecondSet(t *testing.T) {
|
||||
s := NewClockSynchronizer()
|
||||
require.NoError(t, s.SetClock(NewClock(time.Now(), [32]byte{})))
|
||||
require.ErrorIs(t, s.SetClock(NewClock(time.Now(), [32]byte{})), errClockSet)
|
||||
}
|
||||
|
||||
func TestWaitForClockCanceled(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
s := NewClockSynchronizer()
|
||||
c, err := s.WaitForClock(ctx)
|
||||
require.Equal(t, true, c == nil)
|
||||
require.ErrorIs(t, err, context.Canceled)
|
||||
}
|
||||
|
||||
func TestWaitForClock(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
s := NewClockSynchronizer()
|
||||
var vr [32]byte
|
||||
copy(vr[:], bytesutil.PadTo([]byte("valroot"), 32))
|
||||
genesis := time.Unix(23, 0)
|
||||
later := time.Unix(42, 0)
|
||||
nower := func() time.Time { return later }
|
||||
expect := NewClock(genesis, vr, WithNower(nower))
|
||||
go func() {
|
||||
// This is just to ensure the test doesn't hang.
|
||||
// If we hit this cancellation case, then the happy path failed and the NoError assertion etc below will fail.
|
||||
time.Sleep(time.Second)
|
||||
cancel()
|
||||
}()
|
||||
go func() {
|
||||
require.NoError(t, s.SetClock(expect))
|
||||
}()
|
||||
c, err := s.WaitForClock(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, later, c.Now())
|
||||
require.Equal(t, genesis, c.GenesisTime())
|
||||
require.Equal(t, vr, c.GenesisValidatorsRoot())
|
||||
}
|
||||
@@ -65,7 +65,6 @@ go_library(
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/block:go_default_library",
|
||||
"//beacon-chain/core/feed/operation:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
@@ -82,6 +81,7 @@ go_library(
|
||||
"//beacon-chain/p2p/encoder:go_default_library",
|
||||
"//beacon-chain/p2p/peers:go_default_library",
|
||||
"//beacon-chain/p2p/types:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//cache/lru:go_default_library",
|
||||
@@ -179,7 +179,6 @@ go_test(
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/operation:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
@@ -198,6 +197,7 @@ go_test(
|
||||
"//beacon-chain/p2p/peers:go_default_library",
|
||||
"//beacon-chain/p2p/testing:go_default_library",
|
||||
"//beacon-chain/p2p/types:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
|
||||
@@ -37,7 +37,6 @@ func TestBroadcastBLSChanges(t *testing.T) {
|
||||
WithP2P(mockp2p.NewTestP2P(t)),
|
||||
WithInitialSync(&mockSync.Sync{IsSyncing: false}),
|
||||
WithChainService(chainService),
|
||||
WithStateNotifier(chainService.StateNotifier()),
|
||||
WithOperationNotifier(chainService.OperationNotifier()),
|
||||
WithBlsToExecPool(blstoexec.NewPool()),
|
||||
)
|
||||
@@ -71,7 +70,6 @@ func TestRateBLSChanges(t *testing.T) {
|
||||
WithP2P(p1),
|
||||
WithInitialSync(&mockSync.Sync{IsSyncing: false}),
|
||||
WithChainService(chainService),
|
||||
WithStateNotifier(chainService.StateNotifier()),
|
||||
WithOperationNotifier(chainService.OperationNotifier()),
|
||||
WithBlsToExecPool(blstoexec.NewPool()),
|
||||
)
|
||||
@@ -141,7 +139,6 @@ func TestBroadcastBLSBatch_changes_slice(t *testing.T) {
|
||||
WithP2P(p1),
|
||||
WithInitialSync(&mockSync.Sync{IsSyncing: false}),
|
||||
WithChainService(chainService),
|
||||
WithStateNotifier(chainService.StateNotifier()),
|
||||
WithOperationNotifier(chainService.OperationNotifier()),
|
||||
WithBlsToExecPool(blstoexec.NewPool()),
|
||||
)
|
||||
|
||||
@@ -4,8 +4,6 @@ import (
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
|
||||
)
|
||||
|
||||
@@ -13,33 +11,29 @@ import (
|
||||
const forkDigestLength = 4
|
||||
|
||||
// writes peer's current context for the expected payload to the stream.
|
||||
func writeContextToStream(objCtx []byte, stream network.Stream, chain blockchain.ForkFetcher) error {
|
||||
func writeContextToStream(objCtx []byte, stream network.Stream) error {
|
||||
// The rpc context for our v2 methods is the fork-digest of
|
||||
// the relevant payload. We write the associated fork-digest(context)
|
||||
// into the stream for the payload.
|
||||
rpcCtx, err := rpcContext(stream, chain)
|
||||
rpcCtx, err := expectRpcContext(stream)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Exit early if there is an empty context.
|
||||
if len(rpcCtx) == 0 {
|
||||
// Exit early if an empty context is expected.
|
||||
if !rpcCtx {
|
||||
return nil
|
||||
}
|
||||
// Always choose the object's context when writing to the stream.
|
||||
if objCtx != nil {
|
||||
rpcCtx = objCtx
|
||||
}
|
||||
_, err = stream.Write(rpcCtx)
|
||||
_, err = stream.Write(objCtx)
|
||||
return err
|
||||
}
|
||||
|
||||
// reads any attached context-bytes to the payload.
|
||||
func readContextFromStream(stream network.Stream, chain blockchain.ForkFetcher) ([]byte, error) {
|
||||
rpcCtx, err := rpcContext(stream, chain)
|
||||
func readContextFromStream(stream network.Stream) ([]byte, error) {
|
||||
hasCtx, err := expectRpcContext(stream)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(rpcCtx) == 0 {
|
||||
if !hasCtx {
|
||||
return []byte{}, nil
|
||||
}
|
||||
// Read context (fork-digest) from stream
|
||||
@@ -50,26 +44,18 @@ func readContextFromStream(stream network.Stream, chain blockchain.ForkFetcher)
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// retrieve expected context depending on rpc topic schema version.
|
||||
func rpcContext(stream network.Stream, chain blockchain.ForkFetcher) ([]byte, error) {
|
||||
func expectRpcContext(stream network.Stream) (bool, error) {
|
||||
_, _, version, err := p2p.TopicDeconstructor(string(stream.Protocol()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return false, err
|
||||
}
|
||||
switch version {
|
||||
case p2p.SchemaVersionV1:
|
||||
// Return empty context for a v1 method.
|
||||
return []byte{}, nil
|
||||
return false, nil
|
||||
case p2p.SchemaVersionV2:
|
||||
currFork := chain.CurrentFork()
|
||||
genRoot := chain.GenesisValidatorsRoot()
|
||||
digest, err := signing.ComputeForkDigest(currFork.CurrentVersion, genRoot[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return digest[:], nil
|
||||
return true, nil
|
||||
default:
|
||||
return nil, errors.New("invalid version of %s registered for topic: %s")
|
||||
return false, errors.New("invalid version of %s registered for topic: %s")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -31,7 +31,7 @@ func TestContextWrite_NoWrites(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Nothing will be written to the stream
|
||||
assert.NoError(t, writeContextToStream(nil, strm, nil))
|
||||
assert.NoError(t, writeContextToStream([]byte{}, strm))
|
||||
if util.WaitTimeout(wg, 1*time.Second) {
|
||||
t.Fatal("Did not receive stream within 1 sec")
|
||||
}
|
||||
@@ -48,7 +48,7 @@ func TestContextRead_NoReads(t *testing.T) {
|
||||
wantedData := []byte{'A', 'B', 'C', 'D'}
|
||||
nPeer.BHost.SetStreamHandler(core.ProtocolID(prID), func(stream network.Stream) {
|
||||
// No Context will be read from it
|
||||
dt, err := readContextFromStream(stream, nil)
|
||||
dt, err := readContextFromStream(stream)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 0, len(dt))
|
||||
|
||||
|
||||
@@ -49,7 +49,7 @@ func (s *Service) decodePubsubMessage(msg *pubsub.Message) (ssz.Unmarshaler, err
|
||||
}
|
||||
// Handle different message types across forks.
|
||||
if topic == p2p.BlockSubnetTopicFormat {
|
||||
m, err = extractBlockDataType(fDigest[:], s.cfg.chain)
|
||||
m, err = extractBlockDataType(fDigest[:], s.cfg.clock)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
|
||||
p2ptesting "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
@@ -81,8 +82,9 @@ func TestService_decodePubsubMessage(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
chain := &mock.ChainService{ValidatorsRoot: [32]byte{}, Genesis: time.Now()}
|
||||
s := &Service{
|
||||
cfg: &config{p2p: p2ptesting.NewTestP2P(t), chain: &mock.ChainService{ValidatorsRoot: [32]byte{}, Genesis: time.Now()}},
|
||||
cfg: &config{p2p: p2ptesting.NewTestP2P(t), chain: chain, clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot)},
|
||||
}
|
||||
if tt.topic != "" {
|
||||
if tt.input == nil {
|
||||
|
||||
@@ -13,6 +13,9 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var ErrNoValidDigest = errors.New("no valid digest matched")
|
||||
var ErrUnrecognizedVersion = errors.New("cannot determine context bytes for unrecognized object")
|
||||
|
||||
var responseCodeSuccess = byte(0x00)
|
||||
var responseCodeInvalidRequest = byte(0x01)
|
||||
var responseCodeServerError = byte(0x02)
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
// Is a background routine that observes for new incoming forks. Depending on the epoch
|
||||
// it will be in charge of subscribing/unsubscribing the relevant topics at the fork boundaries.
|
||||
func (s *Service) forkWatcher() {
|
||||
slotTicker := slots.NewSlotTicker(s.cfg.chain.GenesisTime(), params.BeaconConfig().SecondsPerSlot)
|
||||
slotTicker := slots.NewSlotTicker(s.cfg.clock.GenesisTime(), params.BeaconConfig().SecondsPerSlot)
|
||||
for {
|
||||
select {
|
||||
// In the event of a node restart, we will still end up subscribing to the correct
|
||||
@@ -42,8 +42,8 @@ func (s *Service) forkWatcher() {
|
||||
// Checks if there is a fork in the next epoch and if there is
|
||||
// it registers the appropriate gossip and rpc topics.
|
||||
func (s *Service) registerForUpcomingFork(currEpoch primitives.Epoch) error {
|
||||
genRoot := s.cfg.chain.GenesisValidatorsRoot()
|
||||
isNextForkEpoch, err := forks.IsForkNextEpoch(s.cfg.chain.GenesisTime(), genRoot[:])
|
||||
genRoot := s.cfg.clock.GenesisValidatorsRoot()
|
||||
isNextForkEpoch, err := forks.IsForkNextEpoch(s.cfg.clock.GenesisTime(), genRoot[:])
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Could not retrieve next fork epoch")
|
||||
}
|
||||
@@ -70,7 +70,7 @@ func (s *Service) registerForUpcomingFork(currEpoch primitives.Epoch) error {
|
||||
// Checks if there was a fork in the previous epoch, and if there
|
||||
// was then we deregister the topics from that particular fork.
|
||||
func (s *Service) deregisterFromPastFork(currEpoch primitives.Epoch) error {
|
||||
genRoot := s.cfg.chain.GenesisValidatorsRoot()
|
||||
genRoot := s.cfg.clock.GenesisValidatorsRoot()
|
||||
// This method takes care of the de-registration of
|
||||
// old gossip pubsub handlers. Once we are at the epoch
|
||||
// after the fork, we de-register from all the outdated topics.
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
mockChain "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
|
||||
p2ptest "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
mockSync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/initial-sync/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
@@ -29,19 +30,21 @@ func TestService_CheckForNextEpochFork(t *testing.T) {
|
||||
name: "no fork in the next epoch",
|
||||
svcCreator: func(t *testing.T) *Service {
|
||||
peer2peer := p2ptest.NewTestP2P(t)
|
||||
gt := time.Now().Add(time.Duration(-params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().SlotsPerEpoch))) * time.Second)
|
||||
vr := [32]byte{'A'}
|
||||
chainService := &mockChain.ChainService{
|
||||
Genesis: time.Now().Add(time.Duration(-params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().SlotsPerEpoch))) * time.Second),
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
Genesis: gt,
|
||||
ValidatorsRoot: vr,
|
||||
}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
r := &Service{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
cfg: &config{
|
||||
p2p: peer2peer,
|
||||
chain: chainService,
|
||||
stateNotifier: chainService.StateNotifier(),
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
p2p: peer2peer,
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(gt, vr),
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
@@ -58,9 +61,11 @@ func TestService_CheckForNextEpochFork(t *testing.T) {
|
||||
name: "altair fork in the next epoch",
|
||||
svcCreator: func(t *testing.T) *Service {
|
||||
peer2peer := p2ptest.NewTestP2P(t)
|
||||
gt := time.Now().Add(-4 * oneEpoch())
|
||||
vr := [32]byte{'A'}
|
||||
chainService := &mockChain.ChainService{
|
||||
Genesis: time.Now().Add(-4 * oneEpoch()),
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
Genesis: gt,
|
||||
ValidatorsRoot: vr,
|
||||
}
|
||||
bCfg := params.BeaconConfig().Copy()
|
||||
bCfg.AltairForkEpoch = 5
|
||||
@@ -71,10 +76,10 @@ func TestService_CheckForNextEpochFork(t *testing.T) {
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
cfg: &config{
|
||||
p2p: peer2peer,
|
||||
chain: chainService,
|
||||
stateNotifier: chainService.StateNotifier(),
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
p2p: peer2peer,
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(gt, vr),
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
@@ -84,7 +89,7 @@ func TestService_CheckForNextEpochFork(t *testing.T) {
|
||||
currEpoch: 4,
|
||||
wantErr: false,
|
||||
postSvcCheck: func(t *testing.T, s *Service) {
|
||||
genRoot := s.cfg.chain.GenesisValidatorsRoot()
|
||||
genRoot := s.cfg.clock.GenesisValidatorsRoot()
|
||||
digest, err := forks.ForkDigestFromEpoch(5, genRoot[:])
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, s.subHandler.digestExists(digest))
|
||||
@@ -115,10 +120,10 @@ func TestService_CheckForNextEpochFork(t *testing.T) {
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
cfg: &config{
|
||||
p2p: peer2peer,
|
||||
chain: chainService,
|
||||
stateNotifier: chainService.StateNotifier(),
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
p2p: peer2peer,
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot),
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
@@ -128,7 +133,7 @@ func TestService_CheckForNextEpochFork(t *testing.T) {
|
||||
currEpoch: 4,
|
||||
wantErr: false,
|
||||
postSvcCheck: func(t *testing.T, s *Service) {
|
||||
genRoot := s.cfg.chain.GenesisValidatorsRoot()
|
||||
genRoot := s.cfg.clock.GenesisValidatorsRoot()
|
||||
digest, err := forks.ForkDigestFromEpoch(5, genRoot[:])
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, s.subHandler.digestExists(digest))
|
||||
@@ -167,15 +172,16 @@ func TestService_CheckForPreviousEpochFork(t *testing.T) {
|
||||
Genesis: time.Now().Add(-oneEpoch()),
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
}
|
||||
clock := startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
r := &Service{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
cfg: &config{
|
||||
p2p: peer2peer,
|
||||
chain: chainService,
|
||||
stateNotifier: chainService.StateNotifier(),
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
p2p: peer2peer,
|
||||
chain: chainService,
|
||||
clock: clock,
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
@@ -207,6 +213,7 @@ func TestService_CheckForPreviousEpochFork(t *testing.T) {
|
||||
Genesis: time.Now().Add(-4 * oneEpoch()),
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
}
|
||||
clock := startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot)
|
||||
bCfg := params.BeaconConfig().Copy()
|
||||
bCfg.AltairForkEpoch = 3
|
||||
params.OverrideBeaconConfig(bCfg)
|
||||
@@ -216,10 +223,10 @@ func TestService_CheckForPreviousEpochFork(t *testing.T) {
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
cfg: &config{
|
||||
p2p: peer2peer,
|
||||
chain: chainService,
|
||||
stateNotifier: chainService.StateNotifier(),
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
p2p: peer2peer,
|
||||
chain: chainService,
|
||||
clock: clock,
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
@@ -232,7 +239,7 @@ func TestService_CheckForPreviousEpochFork(t *testing.T) {
|
||||
chainService.Genesis = prevGenesis
|
||||
r.registerRPCHandlersAltair()
|
||||
|
||||
genRoot := r.cfg.chain.GenesisValidatorsRoot()
|
||||
genRoot := r.cfg.clock.GenesisValidatorsRoot()
|
||||
digest, err := forks.ForkDigestFromEpoch(0, genRoot[:])
|
||||
assert.NoError(t, err)
|
||||
r.registerSubscribers(0, digest)
|
||||
@@ -248,7 +255,7 @@ func TestService_CheckForPreviousEpochFork(t *testing.T) {
|
||||
currEpoch: 4,
|
||||
wantErr: false,
|
||||
postSvcCheck: func(t *testing.T, s *Service) {
|
||||
genRoot := s.cfg.chain.GenesisValidatorsRoot()
|
||||
genRoot := s.cfg.clock.GenesisValidatorsRoot()
|
||||
digest, err := forks.ForkDigestFromEpoch(0, genRoot[:])
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, false, s.subHandler.digestExists(digest))
|
||||
@@ -281,6 +288,7 @@ func TestService_CheckForPreviousEpochFork(t *testing.T) {
|
||||
Genesis: time.Now().Add(-4 * oneEpoch()),
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
}
|
||||
clock := startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot)
|
||||
bCfg := params.BeaconConfig().Copy()
|
||||
bCfg.AltairForkEpoch = 1
|
||||
bCfg.BellatrixForkEpoch = 3
|
||||
@@ -291,15 +299,15 @@ func TestService_CheckForPreviousEpochFork(t *testing.T) {
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
cfg: &config{
|
||||
p2p: peer2peer,
|
||||
chain: chainService,
|
||||
stateNotifier: chainService.StateNotifier(),
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
p2p: peer2peer,
|
||||
chain: chainService,
|
||||
clock: clock,
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
genRoot := r.cfg.chain.GenesisValidatorsRoot()
|
||||
genRoot := r.cfg.clock.GenesisValidatorsRoot()
|
||||
digest, err := forks.ForkDigestFromEpoch(1, genRoot[:])
|
||||
assert.NoError(t, err)
|
||||
r.registerSubscribers(1, digest)
|
||||
@@ -315,7 +323,7 @@ func TestService_CheckForPreviousEpochFork(t *testing.T) {
|
||||
currEpoch: 4,
|
||||
wantErr: false,
|
||||
postSvcCheck: func(t *testing.T, s *Service) {
|
||||
genRoot := s.cfg.chain.GenesisValidatorsRoot()
|
||||
genRoot := s.cfg.clock.GenesisValidatorsRoot()
|
||||
digest, err := forks.ForkDigestFromEpoch(1, genRoot[:])
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, false, s.subHandler.digestExists(digest))
|
||||
|
||||
@@ -18,7 +18,6 @@ go_library(
|
||||
deps = [
|
||||
"//async/abool:go_default_library",
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/block:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
@@ -26,6 +25,7 @@ go_library(
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/p2p/peers/scorers:go_default_library",
|
||||
"//beacon-chain/p2p/types:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/sync:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
@@ -107,10 +107,7 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//async/abool:go_default_library",
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
@@ -118,6 +115,7 @@ go_test(
|
||||
"//beacon-chain/p2p/peers/scorers:go_default_library",
|
||||
"//beacon-chain/p2p/testing:go_default_library",
|
||||
"//beacon-chain/p2p/types:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/sync:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
|
||||
p2pTypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
prysmsync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync"
|
||||
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
@@ -86,6 +87,7 @@ type blocksFetcher struct {
|
||||
capacityWeight float64 // how remaining capacity affects peer selection
|
||||
mode syncMode // allows to use fetcher in different sync scenarios
|
||||
quit chan struct{} // termination notifier
|
||||
clock *startup.Clock
|
||||
}
|
||||
|
||||
// peerLock restricts fetcher actions on per peer basis. Currently, used for rate limiting.
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
dbtest "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
|
||||
p2pm "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
|
||||
p2pt "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
beaconsync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync"
|
||||
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
@@ -548,7 +549,10 @@ func TestBlocksFetcher_RequestBlocksRateLimitingLocks(t *testing.T) {
|
||||
defer cancel()
|
||||
fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{p2p: p1})
|
||||
fetcher.rateLimiter = leakybucket.NewCollector(float64(req.Count), int64(req.Count*burstFactor), 1*time.Second, false)
|
||||
fetcher.chain = &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
gt := time.Now()
|
||||
vr := [32]byte{}
|
||||
fetcher.chain = &mock.ChainService{Genesis: gt, ValidatorsRoot: vr}
|
||||
fetcher.clock = startup.NewClock(gt, vr)
|
||||
hook := logTest.NewGlobal()
|
||||
wg := new(sync.WaitGroup)
|
||||
wg.Add(1)
|
||||
@@ -614,7 +618,10 @@ func TestBlocksFetcher_WaitForBandwidth(t *testing.T) {
|
||||
defer cancel()
|
||||
fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{p2p: p1})
|
||||
fetcher.rateLimiter = leakybucket.NewCollector(float64(req.Count), int64(req.Count*burstFactor), 5*time.Second, false)
|
||||
fetcher.chain = &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
gt := time.Now()
|
||||
vr := [32]byte{}
|
||||
fetcher.chain = &mock.ChainService{Genesis: gt, ValidatorsRoot: vr}
|
||||
fetcher.clock = startup.NewClock(gt, vr)
|
||||
start := time.Now()
|
||||
assert.NoError(t, fetcher.waitForBandwidth(p2.PeerID(), 10))
|
||||
dur := time.Since(start)
|
||||
@@ -647,10 +654,10 @@ func TestBlocksFetcher_requestBlocksFromPeerReturningInvalidBlocks(t *testing.T)
|
||||
for i := req.StartSlot; i < req.StartSlot.Add(req.Count*req.Step); i += primitives.Slot(req.Step) {
|
||||
blk := util.NewBeaconBlock()
|
||||
blk.Block.Slot = i
|
||||
mchain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
tor := startup.NewClock(time.Now(), [32]byte{})
|
||||
wsb, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, beaconsync.WriteBlockChunk(stream, mchain, p1.Encoding(), wsb))
|
||||
assert.NoError(t, beaconsync.WriteBlockChunk(stream, tor, p1.Encoding(), wsb))
|
||||
}
|
||||
assert.NoError(t, stream.Close())
|
||||
}
|
||||
@@ -671,10 +678,10 @@ func TestBlocksFetcher_requestBlocksFromPeerReturningInvalidBlocks(t *testing.T)
|
||||
for i := req.StartSlot; i < req.StartSlot.Add(req.Count*req.Step+1); i += primitives.Slot(req.Step) {
|
||||
blk := util.NewBeaconBlock()
|
||||
blk.Block.Slot = i
|
||||
chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
tor := startup.NewClock(time.Now(), [32]byte{})
|
||||
wsb, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, beaconsync.WriteBlockChunk(stream, chain, p1.Encoding(), wsb))
|
||||
assert.NoError(t, beaconsync.WriteBlockChunk(stream, tor, p1.Encoding(), wsb))
|
||||
}
|
||||
assert.NoError(t, stream.Close())
|
||||
}
|
||||
@@ -695,16 +702,16 @@ func TestBlocksFetcher_requestBlocksFromPeerReturningInvalidBlocks(t *testing.T)
|
||||
return func(stream network.Stream) {
|
||||
blk := util.NewBeaconBlock()
|
||||
blk.Block.Slot = 163
|
||||
chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
tor := startup.NewClock(time.Now(), [32]byte{})
|
||||
wsb, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, beaconsync.WriteBlockChunk(stream, chain, p1.Encoding(), wsb))
|
||||
assert.NoError(t, beaconsync.WriteBlockChunk(stream, tor, p1.Encoding(), wsb))
|
||||
|
||||
blk = util.NewBeaconBlock()
|
||||
blk.Block.Slot = 162
|
||||
wsb, err = blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, beaconsync.WriteBlockChunk(stream, chain, p1.Encoding(), wsb))
|
||||
assert.NoError(t, beaconsync.WriteBlockChunk(stream, tor, p1.Encoding(), wsb))
|
||||
assert.NoError(t, stream.Close())
|
||||
}
|
||||
},
|
||||
@@ -724,17 +731,17 @@ func TestBlocksFetcher_requestBlocksFromPeerReturningInvalidBlocks(t *testing.T)
|
||||
return func(stream network.Stream) {
|
||||
blk := util.NewBeaconBlock()
|
||||
blk.Block.Slot = 160
|
||||
chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
tor := startup.NewClock(time.Now(), [32]byte{})
|
||||
|
||||
wsb, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, beaconsync.WriteBlockChunk(stream, chain, p1.Encoding(), wsb))
|
||||
assert.NoError(t, beaconsync.WriteBlockChunk(stream, tor, p1.Encoding(), wsb))
|
||||
|
||||
blk = util.NewBeaconBlock()
|
||||
blk.Block.Slot = 160
|
||||
wsb, err = blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, beaconsync.WriteBlockChunk(stream, chain, p1.Encoding(), wsb))
|
||||
assert.NoError(t, beaconsync.WriteBlockChunk(stream, tor, p1.Encoding(), wsb))
|
||||
assert.NoError(t, stream.Close())
|
||||
}
|
||||
},
|
||||
@@ -757,19 +764,19 @@ func TestBlocksFetcher_requestBlocksFromPeerReturningInvalidBlocks(t *testing.T)
|
||||
}()
|
||||
for i := req.StartSlot; i < req.StartSlot.Add(req.Count*req.Step); i += primitives.Slot(req.Step) {
|
||||
blk := util.NewBeaconBlock()
|
||||
chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
tor := startup.NewClock(time.Now(), [32]byte{})
|
||||
// Patch mid block, with invalid slot number.
|
||||
if i == req.StartSlot.Add(req.Count*req.Step/2) {
|
||||
blk.Block.Slot = req.StartSlot - 1
|
||||
wsb, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, beaconsync.WriteBlockChunk(stream, chain, p1.Encoding(), wsb))
|
||||
assert.NoError(t, beaconsync.WriteBlockChunk(stream, tor, p1.Encoding(), wsb))
|
||||
break
|
||||
}
|
||||
blk.Block.Slot = i
|
||||
wsb, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, beaconsync.WriteBlockChunk(stream, chain, p1.Encoding(), wsb))
|
||||
assert.NoError(t, beaconsync.WriteBlockChunk(stream, tor, p1.Encoding(), wsb))
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -792,19 +799,19 @@ func TestBlocksFetcher_requestBlocksFromPeerReturningInvalidBlocks(t *testing.T)
|
||||
}()
|
||||
for i := req.StartSlot; i < req.StartSlot.Add(req.Count*req.Step); i += primitives.Slot(req.Step) {
|
||||
blk := util.NewBeaconBlock()
|
||||
chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
tor := startup.NewClock(time.Now(), [32]byte{})
|
||||
// Patch mid block, with invalid slot number.
|
||||
if i == req.StartSlot.Add(req.Count*req.Step/2) {
|
||||
blk.Block.Slot = req.StartSlot.Add(req.Count * req.Step)
|
||||
wsb, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, beaconsync.WriteBlockChunk(stream, chain, p1.Encoding(), wsb))
|
||||
assert.NoError(t, beaconsync.WriteBlockChunk(stream, tor, p1.Encoding(), wsb))
|
||||
break
|
||||
}
|
||||
blk.Block.Slot = i
|
||||
wsb, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, beaconsync.WriteBlockChunk(stream, chain, p1.Encoding(), wsb))
|
||||
assert.NoError(t, beaconsync.WriteBlockChunk(stream, tor, p1.Encoding(), wsb))
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -824,16 +831,16 @@ func TestBlocksFetcher_requestBlocksFromPeerReturningInvalidBlocks(t *testing.T)
|
||||
return func(stream network.Stream) {
|
||||
blk := util.NewBeaconBlock()
|
||||
blk.Block.Slot = 100
|
||||
chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
tor := startup.NewClock(time.Now(), [32]byte{})
|
||||
wsb, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, beaconsync.WriteBlockChunk(stream, chain, p1.Encoding(), wsb))
|
||||
assert.NoError(t, beaconsync.WriteBlockChunk(stream, tor, p1.Encoding(), wsb))
|
||||
|
||||
blk = util.NewBeaconBlock()
|
||||
blk.Block.Slot = 105
|
||||
wsb, err = blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, beaconsync.WriteBlockChunk(stream, chain, p1.Encoding(), wsb))
|
||||
assert.NoError(t, beaconsync.WriteBlockChunk(stream, tor, p1.Encoding(), wsb))
|
||||
assert.NoError(t, stream.Close())
|
||||
}
|
||||
},
|
||||
@@ -852,16 +859,16 @@ func TestBlocksFetcher_requestBlocksFromPeerReturningInvalidBlocks(t *testing.T)
|
||||
return func(stream network.Stream) {
|
||||
blk := util.NewBeaconBlock()
|
||||
blk.Block.Slot = 100
|
||||
chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
tor := startup.NewClock(time.Now(), [32]byte{})
|
||||
wsb, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, beaconsync.WriteBlockChunk(stream, chain, p1.Encoding(), wsb))
|
||||
assert.NoError(t, beaconsync.WriteBlockChunk(stream, tor, p1.Encoding(), wsb))
|
||||
|
||||
blk = util.NewBeaconBlock()
|
||||
blk.Block.Slot = 103
|
||||
wsb, err = blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, beaconsync.WriteBlockChunk(stream, chain, p1.Encoding(), wsb))
|
||||
assert.NoError(t, beaconsync.WriteBlockChunk(stream, tor, p1.Encoding(), wsb))
|
||||
assert.NoError(t, stream.Close())
|
||||
}
|
||||
},
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers"
|
||||
p2pt "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
|
||||
p2pTypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
beaconsync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync"
|
||||
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
@@ -210,11 +211,10 @@ func connectPeer(t *testing.T, host *p2pt.TestP2P, datum *peerData, peerStatus *
|
||||
ret = ret[:req.Count]
|
||||
}
|
||||
|
||||
mChain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
for i := 0; i < len(ret); i++ {
|
||||
wsb, err := blocks.NewSignedBeaconBlock(ret[i])
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, beaconsync.WriteBlockChunk(stream, mChain, p.Encoding(), wsb))
|
||||
assert.NoError(t, beaconsync.WriteBlockChunk(stream, startup.NewClock(time.Now(), [32]byte{}), p.Encoding(), wsb))
|
||||
}
|
||||
})
|
||||
|
||||
@@ -283,10 +283,9 @@ func connectPeerHavingBlocks(
|
||||
if uint64(i) >= uint64(len(blks)) {
|
||||
break
|
||||
}
|
||||
chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
wsb, err := blocks.NewSignedBeaconBlock(blks[i])
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconsync.WriteBlockChunk(stream, chain, p.Encoding(), wsb))
|
||||
require.NoError(t, beaconsync.WriteBlockChunk(stream, startup.NewClock(time.Now(), [32]byte{}), p.Encoding(), wsb))
|
||||
}
|
||||
})
|
||||
|
||||
|
||||
@@ -11,11 +11,11 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/async/abool"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
|
||||
blockfeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/block"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime"
|
||||
@@ -34,11 +34,13 @@ type blockchainService interface {
|
||||
|
||||
// Config to set up the initial sync service.
|
||||
type Config struct {
|
||||
P2P p2p.P2P
|
||||
DB db.ReadOnlyDatabase
|
||||
Chain blockchainService
|
||||
StateNotifier statefeed.Notifier
|
||||
BlockNotifier blockfeed.Notifier
|
||||
P2P p2p.P2P
|
||||
DB db.ReadOnlyDatabase
|
||||
Chain blockchainService
|
||||
StateNotifier statefeed.Notifier
|
||||
BlockNotifier blockfeed.Notifier
|
||||
ClockWaiter startup.ClockWaiter
|
||||
InitialSyncComplete chan struct{}
|
||||
}
|
||||
|
||||
// Service service.
|
||||
@@ -50,6 +52,7 @@ type Service struct {
|
||||
chainStarted *abool.AtomicBool
|
||||
counter *ratecounter.RateCounter
|
||||
genesisChan chan time.Time
|
||||
clock *startup.Clock
|
||||
}
|
||||
|
||||
// NewService configures the initial sync service responsible for bringing the node up to the
|
||||
@@ -66,31 +69,34 @@ func NewService(ctx context.Context, cfg *Config) *Service {
|
||||
genesisChan: make(chan time.Time),
|
||||
}
|
||||
|
||||
// The reason why we have this goroutine in the constructor is to avoid a race condition
|
||||
// between services' Start method and the initialization event.
|
||||
// See https://github.com/prysmaticlabs/prysm/issues/10602 for details.
|
||||
go s.waitForStateInitialization()
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// Start the initial sync service.
|
||||
func (s *Service) Start() {
|
||||
// Wait for state initialized event.
|
||||
genesis := <-s.genesisChan
|
||||
if genesis.IsZero() {
|
||||
log.Info("Waiting for state to be initialized")
|
||||
clock, err := s.cfg.ClockWaiter.WaitForClock(s.ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("initial-sync failed to receive startup event")
|
||||
return
|
||||
}
|
||||
s.clock = clock
|
||||
log.Info("Received state initialized event")
|
||||
|
||||
gt := clock.GenesisTime()
|
||||
if gt.IsZero() {
|
||||
log.Debug("Exiting Initial Sync Service")
|
||||
return
|
||||
}
|
||||
if genesis.After(prysmTime.Now()) {
|
||||
s.markSynced(genesis)
|
||||
log.WithField("genesisTime", genesis).Info("Genesis time has not arrived - not syncing")
|
||||
if gt.After(prysmTime.Now()) {
|
||||
s.markSynced()
|
||||
log.WithField("genesisTime", gt).Info("Genesis time has not arrived - not syncing")
|
||||
return
|
||||
}
|
||||
currentSlot := slots.Since(genesis)
|
||||
currentSlot := clock.CurrentSlot()
|
||||
if slots.ToEpoch(currentSlot) == 0 {
|
||||
log.WithField("genesisTime", genesis).Info("Chain started within the last epoch - not syncing")
|
||||
s.markSynced(genesis)
|
||||
log.WithField("genesisTime", gt).Info("Chain started within the last epoch - not syncing")
|
||||
s.markSynced()
|
||||
return
|
||||
}
|
||||
s.chainStarted.Set()
|
||||
@@ -98,18 +104,18 @@ func (s *Service) Start() {
|
||||
// Are we already in sync, or close to it?
|
||||
if slots.ToEpoch(s.cfg.Chain.HeadSlot()) == slots.ToEpoch(currentSlot) {
|
||||
log.Info("Already synced to the current chain head")
|
||||
s.markSynced(genesis)
|
||||
s.markSynced()
|
||||
return
|
||||
}
|
||||
s.waitForMinimumPeers()
|
||||
if err := s.roundRobinSync(genesis); err != nil {
|
||||
if err := s.roundRobinSync(gt); err != nil {
|
||||
if errors.Is(s.ctx.Err(), context.Canceled) {
|
||||
return
|
||||
}
|
||||
panic(err)
|
||||
}
|
||||
log.Infof("Synced up to slot %d", s.cfg.Chain.HeadSlot())
|
||||
s.markSynced(genesis)
|
||||
s.markSynced()
|
||||
}
|
||||
|
||||
// Stop initial sync.
|
||||
@@ -181,48 +187,8 @@ func (s *Service) waitForMinimumPeers() {
|
||||
}
|
||||
}
|
||||
|
||||
// waitForStateInitialization makes sure that beacon node is ready to be accessed: it is either
|
||||
// already properly configured or system waits up until state initialized event is triggered.
|
||||
func (s *Service) waitForStateInitialization() {
|
||||
// Wait for state to be initialized.
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := s.cfg.StateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
defer stateSub.Unsubscribe()
|
||||
log.Info("Waiting for state to be initialized")
|
||||
for {
|
||||
select {
|
||||
case event := <-stateChannel:
|
||||
if event.Type == statefeed.Initialized {
|
||||
data, ok := event.Data.(*statefeed.InitializedData)
|
||||
if !ok {
|
||||
log.Error("Event feed data is not type *statefeed.InitializedData")
|
||||
continue
|
||||
}
|
||||
log.WithField("starttime", data.StartTime).Debug("Received state initialized event")
|
||||
s.genesisChan <- data.StartTime
|
||||
return
|
||||
}
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Context closed, exiting goroutine")
|
||||
// Send a zero time in the event we are exiting.
|
||||
s.genesisChan <- time.Time{}
|
||||
return
|
||||
case err := <-stateSub.Err():
|
||||
log.WithError(err).Error("Subscription to state notifier failed")
|
||||
// Send a zero time in the event we are exiting.
|
||||
s.genesisChan <- time.Time{}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// markSynced marks node as synced and notifies feed listeners.
|
||||
func (s *Service) markSynced(genesis time.Time) {
|
||||
func (s *Service) markSynced() {
|
||||
s.synced.Set()
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.Synced,
|
||||
Data: &statefeed.SyncedData{
|
||||
StartTime: genesis,
|
||||
},
|
||||
})
|
||||
close(s.cfg.InitialSyncComplete)
|
||||
}
|
||||
|
||||
@@ -8,12 +8,10 @@ import (
|
||||
|
||||
"github.com/paulbellamy/ratecounter"
|
||||
"github.com/prysmaticlabs/prysm/v4/async/abool"
|
||||
"github.com/prysmaticlabs/prysm/v4/async/event"
|
||||
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
|
||||
dbtest "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
|
||||
p2pt "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
@@ -36,7 +34,7 @@ func TestService_InitStartStop(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
assert func()
|
||||
methodRuns func(fd *event.Feed)
|
||||
setGenesis func() *startup.Clock
|
||||
chainService func() *mock.ChainService
|
||||
}{
|
||||
{
|
||||
@@ -61,15 +59,9 @@ func TestService_InitStartStop(t *testing.T) {
|
||||
ValidatorsRoot: [32]byte{},
|
||||
}
|
||||
},
|
||||
methodRuns: func(fd *event.Feed) {
|
||||
// Send valid event.
|
||||
fd.Send(&feed.Event{
|
||||
Type: statefeed.Initialized,
|
||||
Data: &statefeed.InitializedData{
|
||||
StartTime: time.Unix(4113849600, 0),
|
||||
GenesisValidatorsRoot: make([]byte, 32),
|
||||
},
|
||||
})
|
||||
setGenesis: func() *startup.Clock {
|
||||
var vr [32]byte
|
||||
return startup.NewClock(time.Unix(4113849600, 0), vr)
|
||||
},
|
||||
assert: func() {
|
||||
assert.LogsContain(t, hook, "Genesis time has not arrived - not syncing")
|
||||
@@ -91,15 +83,9 @@ func TestService_InitStartStop(t *testing.T) {
|
||||
ValidatorsRoot: [32]byte{},
|
||||
}
|
||||
},
|
||||
methodRuns: func(fd *event.Feed) {
|
||||
// Send valid event.
|
||||
fd.Send(&feed.Event{
|
||||
Type: statefeed.Initialized,
|
||||
Data: &statefeed.InitializedData{
|
||||
StartTime: time.Now().Add(-5 * time.Minute),
|
||||
GenesisValidatorsRoot: make([]byte, 32),
|
||||
},
|
||||
})
|
||||
setGenesis: func() *startup.Clock {
|
||||
var vr [32]byte
|
||||
return startup.NewClock(time.Now().Add(-5*time.Minute), vr)
|
||||
},
|
||||
assert: func() {
|
||||
assert.LogsContain(t, hook, "Chain started within the last epoch - not syncing")
|
||||
@@ -124,16 +110,10 @@ func TestService_InitStartStop(t *testing.T) {
|
||||
ValidatorsRoot: [32]byte{},
|
||||
}
|
||||
},
|
||||
methodRuns: func(fd *event.Feed) {
|
||||
setGenesis: func() *startup.Clock {
|
||||
futureSlot := primitives.Slot(27354)
|
||||
// Send valid event.
|
||||
fd.Send(&feed.Event{
|
||||
Type: statefeed.Initialized,
|
||||
Data: &statefeed.InitializedData{
|
||||
StartTime: makeGenesisTime(futureSlot),
|
||||
GenesisValidatorsRoot: make([]byte, 32),
|
||||
},
|
||||
})
|
||||
var vr [32]byte
|
||||
return startup.NewClock(makeGenesisTime(futureSlot), vr)
|
||||
},
|
||||
assert: func() {
|
||||
assert.LogsContain(t, hook, "Starting initial chain sync...")
|
||||
@@ -161,16 +141,18 @@ func TestService_InitStartStop(t *testing.T) {
|
||||
mc = tt.chainService()
|
||||
}
|
||||
// Initialize feed
|
||||
notifier := &mock.MockStateNotifier{}
|
||||
gs := startup.NewClockSynchronizer()
|
||||
s := NewService(ctx, &Config{
|
||||
P2P: p,
|
||||
Chain: mc,
|
||||
StateNotifier: notifier,
|
||||
P2P: p,
|
||||
Chain: mc,
|
||||
ClockWaiter: gs,
|
||||
StateNotifier: &mock.MockStateNotifier{},
|
||||
InitialSyncComplete: make(chan struct{}),
|
||||
})
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
assert.NotNil(t, s)
|
||||
if tt.methodRuns != nil {
|
||||
tt.methodRuns(notifier.StateFeed())
|
||||
if tt.setGenesis != nil {
|
||||
require.NoError(t, gs.SetClock(tt.setGenesis()))
|
||||
}
|
||||
|
||||
wg := &sync.WaitGroup{}
|
||||
@@ -197,10 +179,11 @@ func TestService_InitStartStop(t *testing.T) {
|
||||
|
||||
func TestService_waitForStateInitialization(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
newService := func(ctx context.Context, mc *mock.ChainService) *Service {
|
||||
newService := func(ctx context.Context, mc *mock.ChainService) (*Service, *startup.ClockSynchronizer) {
|
||||
cs := startup.NewClockSynchronizer()
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
s := &Service{
|
||||
cfg: &Config{Chain: mc, StateNotifier: mc.StateNotifier()},
|
||||
cfg: &Config{Chain: mc, StateNotifier: mc.StateNotifier(), ClockWaiter: cs, InitialSyncComplete: make(chan struct{})},
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
synced: abool.New(),
|
||||
@@ -208,7 +191,7 @@ func TestService_waitForStateInitialization(t *testing.T) {
|
||||
counter: ratecounter.NewRateCounter(counterSeconds * time.Second),
|
||||
genesisChan: make(chan time.Time),
|
||||
}
|
||||
return s
|
||||
return s, cs
|
||||
}
|
||||
|
||||
t.Run("no state and context close", func(t *testing.T) {
|
||||
@@ -216,13 +199,11 @@ func TestService_waitForStateInitialization(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
s := newService(ctx, &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}})
|
||||
s, _ := newService(ctx, &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}})
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
go s.waitForStateInitialization()
|
||||
currTime := <-s.genesisChan
|
||||
assert.Equal(t, true, currTime.IsZero())
|
||||
s.Start()
|
||||
wg.Done()
|
||||
}()
|
||||
go func() {
|
||||
@@ -235,7 +216,7 @@ func TestService_waitForStateInitialization(t *testing.T) {
|
||||
t.Fatalf("Test should have exited by now, timed out")
|
||||
}
|
||||
assert.LogsContain(t, hook, "Waiting for state to be initialized")
|
||||
assert.LogsContain(t, hook, "Context closed, exiting goroutine")
|
||||
assert.LogsContain(t, hook, "initial-sync failed to receive startup event")
|
||||
assert.LogsDoNotContain(t, hook, "Subscription to state notifier failed")
|
||||
})
|
||||
|
||||
@@ -243,41 +224,30 @@ func TestService_waitForStateInitialization(t *testing.T) {
|
||||
defer hook.Reset()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
s := newService(ctx, &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}})
|
||||
|
||||
expectedGenesisTime := time.Unix(358544700, 0)
|
||||
var receivedGenesisTime time.Time
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
gt := time.Unix(int64(st.GenesisTime()), 0)
|
||||
s, gs := newService(ctx, &mock.ChainService{State: st, Genesis: gt, ValidatorsRoot: [32]byte{}})
|
||||
|
||||
expectedGenesisTime := gt
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
go s.waitForStateInitialization()
|
||||
receivedGenesisTime = <-s.genesisChan
|
||||
assert.Equal(t, false, receivedGenesisTime.IsZero())
|
||||
s.Start()
|
||||
wg.Done()
|
||||
}()
|
||||
rg := func() time.Time { return gt.Add(time.Second * 12) }
|
||||
go func() {
|
||||
time.AfterFunc(500*time.Millisecond, func() {
|
||||
// Send invalid event at first.
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.Initialized,
|
||||
Data: &statefeed.BlockProcessedData{},
|
||||
})
|
||||
// Send valid event.
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.Initialized,
|
||||
Data: &statefeed.InitializedData{
|
||||
StartTime: expectedGenesisTime,
|
||||
GenesisValidatorsRoot: make([]byte, 32),
|
||||
},
|
||||
})
|
||||
time.AfterFunc(200*time.Millisecond, func() {
|
||||
var vr [32]byte
|
||||
require.NoError(t, gs.SetClock(startup.NewClock(expectedGenesisTime, vr, startup.WithNower(rg))))
|
||||
})
|
||||
}()
|
||||
|
||||
if util.WaitTimeout(wg, time.Second*2) {
|
||||
t.Fatalf("Test should have exited by now, timed out")
|
||||
}
|
||||
assert.Equal(t, expectedGenesisTime, receivedGenesisTime)
|
||||
assert.LogsContain(t, hook, "Event feed data is not type *statefeed.InitializedData")
|
||||
assert.LogsContain(t, hook, "Waiting for state to be initialized")
|
||||
assert.LogsContain(t, hook, "Received state initialized event")
|
||||
assert.LogsDoNotContain(t, hook, "Context closed, exiting goroutine")
|
||||
@@ -287,29 +257,17 @@ func TestService_waitForStateInitialization(t *testing.T) {
|
||||
defer hook.Reset()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
s := newService(ctx, &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}})
|
||||
s, gs := newService(ctx, &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}})
|
||||
// Initialize mock feed
|
||||
_ = s.cfg.StateNotifier.StateFeed()
|
||||
|
||||
expectedGenesisTime := time.Now().Add(60 * time.Second)
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
s.waitForStateInitialization()
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
time.AfterFunc(500*time.Millisecond, func() {
|
||||
// Send valid event.
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.Initialized,
|
||||
Data: &statefeed.InitializedData{
|
||||
StartTime: expectedGenesisTime,
|
||||
GenesisValidatorsRoot: make([]byte, 32),
|
||||
},
|
||||
})
|
||||
var vr [32]byte
|
||||
require.NoError(t, gs.SetClock(startup.NewClock(expectedGenesisTime, vr)))
|
||||
})
|
||||
s.Start()
|
||||
wg.Done()
|
||||
@@ -326,11 +284,12 @@ func TestService_waitForStateInitialization(t *testing.T) {
|
||||
|
||||
func TestService_markSynced(t *testing.T) {
|
||||
mc := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||
defer cancel()
|
||||
s := NewService(ctx, &Config{
|
||||
Chain: mc,
|
||||
StateNotifier: mc.StateNotifier(),
|
||||
Chain: mc,
|
||||
StateNotifier: mc.StateNotifier(),
|
||||
InitialSyncComplete: make(chan struct{}),
|
||||
})
|
||||
require.NotNil(t, s)
|
||||
assert.Equal(t, false, s.chainStarted.IsSet())
|
||||
@@ -340,33 +299,16 @@ func TestService_markSynced(t *testing.T) {
|
||||
s.chainStarted.Set()
|
||||
assert.ErrorContains(t, "syncing", s.Status())
|
||||
|
||||
expectedGenesisTime := time.Unix(358544700, 0)
|
||||
var receivedGenesisTime time.Time
|
||||
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := s.cfg.StateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
defer stateSub.Unsubscribe()
|
||||
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
select {
|
||||
case stateEvent := <-stateChannel:
|
||||
if stateEvent.Type == statefeed.Synced {
|
||||
data, ok := stateEvent.Data.(*statefeed.SyncedData)
|
||||
require.Equal(t, true, ok, "Event feed data is not type *statefeed.SyncedData")
|
||||
receivedGenesisTime = data.StartTime
|
||||
}
|
||||
case <-s.ctx.Done():
|
||||
}
|
||||
wg.Done()
|
||||
s.markSynced()
|
||||
}()
|
||||
s.markSynced(expectedGenesisTime)
|
||||
|
||||
if util.WaitTimeout(wg, time.Second*2) {
|
||||
t.Fatalf("Test should have exited by now, timed out")
|
||||
select {
|
||||
case <-s.cfg.InitialSyncComplete:
|
||||
case <-ctx.Done():
|
||||
require.NoError(t, ctx.Err()) // this is an error because it means initial sync complete failed to close
|
||||
}
|
||||
assert.Equal(t, expectedGenesisTime, receivedGenesisTime)
|
||||
|
||||
assert.Equal(t, false, s.Syncing())
|
||||
}
|
||||
|
||||
@@ -459,9 +401,7 @@ func TestService_Initialized(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_Synced(t *testing.T) {
|
||||
s := NewService(context.Background(), &Config{
|
||||
StateNotifier: &mock.MockStateNotifier{},
|
||||
})
|
||||
s := NewService(context.Background(), &Config{})
|
||||
s.synced.UnSet()
|
||||
assert.Equal(t, false, s.Synced())
|
||||
s.synced.Set()
|
||||
|
||||
@@ -130,7 +130,7 @@ var (
|
||||
func (s *Service) updateMetrics() {
|
||||
// do not update metrics if genesis time
|
||||
// has not been initialized
|
||||
if s.cfg.chain.GenesisTime().IsZero() {
|
||||
if s.cfg.clock.GenesisTime().IsZero() {
|
||||
return
|
||||
}
|
||||
// We update the dynamic subnet topics.
|
||||
@@ -138,8 +138,8 @@ func (s *Service) updateMetrics() {
|
||||
if err != nil {
|
||||
log.WithError(err).Debugf("Could not compute fork digest")
|
||||
}
|
||||
indices := s.aggregatorSubnetIndices(s.cfg.chain.CurrentSlot())
|
||||
syncIndices := cache.SyncSubnetIDs.GetAllSubnets(slots.ToEpoch(s.cfg.chain.CurrentSlot()))
|
||||
indices := s.aggregatorSubnetIndices(s.cfg.clock.CurrentSlot())
|
||||
syncIndices := cache.SyncSubnetIDs.GetAllSubnets(slots.ToEpoch(s.cfg.clock.CurrentSlot()))
|
||||
attTopic := p2p.GossipTypeMapping[reflect.TypeOf(&pb.Attestation{})]
|
||||
syncTopic := p2p.GossipTypeMapping[reflect.TypeOf(&pb.SyncCommitteeMessage{})]
|
||||
attTopic += s.cfg.p2p.Encoding().ProtocolSuffix()
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/async/event"
|
||||
blockfeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/block"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/operation"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/execution"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations"
|
||||
@@ -13,6 +12,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/synccommittee"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
|
||||
)
|
||||
|
||||
@@ -88,13 +88,6 @@ func WithInitialSync(initialSync Checker) Option {
|
||||
}
|
||||
}
|
||||
|
||||
func WithStateNotifier(stateNotifier statefeed.Notifier) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.stateNotifier = stateNotifier
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithBlockNotifier(blockNotifier blockfeed.Notifier) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.blockNotifier = blockNotifier
|
||||
@@ -136,3 +129,17 @@ func WithExecutionPayloadReconstructor(r execution.ExecutionPayloadReconstructor
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithClockWaiter(cw startup.ClockWaiter) Option {
|
||||
return func(s *Service) error {
|
||||
s.clockWaiter = cw
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithInitialSyncComplete(c chan struct{}) Option {
|
||||
return func(s *Service) error {
|
||||
s.initialSyncComplete = c
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -46,7 +46,7 @@ func (s *Service) processPendingAtts(ctx context.Context) error {
|
||||
// Before a node processes pending attestations queue, it verifies
|
||||
// the attestations in the queue are still valid. Attestations will
|
||||
// be deleted from the queue if invalid (ie. getting staled from falling too many slots behind).
|
||||
s.validatePendingAtts(ctx, s.cfg.chain.CurrentSlot())
|
||||
s.validatePendingAtts(ctx, s.cfg.clock.CurrentSlot())
|
||||
|
||||
s.pendingAttsLock.RLock()
|
||||
roots := make([][32]byte, 0, len(s.blkRootToPendingAtts))
|
||||
@@ -76,7 +76,7 @@ func (s *Service) processPendingAtts(ctx context.Context) error {
|
||||
} else {
|
||||
// Pending attestation's missing block has not arrived yet.
|
||||
log.WithFields(logrus.Fields{
|
||||
"currentSlot": s.cfg.chain.CurrentSlot(),
|
||||
"currentSlot": s.cfg.clock.CurrentSlot(),
|
||||
"attSlot": attestations[0].Message.Aggregate.Data.Slot,
|
||||
"attCount": len(attestations),
|
||||
"blockRoot": hex.EncodeToString(bytesutil.Trunc(bRoot[:])),
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers"
|
||||
p2ptest "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
lruwrpr "github.com/prysmaticlabs/prysm/v4/cache/lru"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
@@ -42,8 +43,9 @@ func TestProcessPendingAtts_NoBlockRequestBlock(t *testing.T) {
|
||||
p1.Peers().SetConnectionState(p2.PeerID(), peers.PeerConnected)
|
||||
p1.Peers().SetChainState(p2.PeerID(), ðpb.Status{})
|
||||
|
||||
chain := &mock.ChainService{Genesis: prysmTime.Now(), FinalizedCheckPoint: ðpb.Checkpoint{}}
|
||||
r := &Service{
|
||||
cfg: &config{p2p: p1, beaconDB: db, chain: &mock.ChainService{Genesis: prysmTime.Now(), FinalizedCheckPoint: ðpb.Checkpoint{}}},
|
||||
cfg: &config{p2p: p1, beaconDB: db, chain: chain, clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot)},
|
||||
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
|
||||
chainStarted: abool.New(),
|
||||
}
|
||||
@@ -105,20 +107,22 @@ func TestProcessPendingAtts_HasBlockSaveUnAggregatedAtt(t *testing.T) {
|
||||
|
||||
require.NoError(t, beaconState.SetGenesisTime(uint64(time.Now().Unix())))
|
||||
|
||||
chain := &mock.ChainService{Genesis: time.Now(),
|
||||
State: beaconState,
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{
|
||||
Root: aggregateAndProof.Aggregate.Data.BeaconBlockRoot,
|
||||
Epoch: 0,
|
||||
},
|
||||
}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
r := &Service{
|
||||
ctx: ctx,
|
||||
cfg: &config{
|
||||
p2p: p1,
|
||||
beaconDB: db,
|
||||
chain: &mock.ChainService{Genesis: time.Now(),
|
||||
State: beaconState,
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{
|
||||
Root: aggregateAndProof.Aggregate.Data.BeaconBlockRoot,
|
||||
Epoch: 0,
|
||||
},
|
||||
},
|
||||
attPool: attestations.NewPool(),
|
||||
chain: chain,
|
||||
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
|
||||
attPool: attestations.NewPool(),
|
||||
},
|
||||
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
|
||||
seenUnAggregatedAttestationCache: lruwrpr.New(10),
|
||||
@@ -147,14 +151,16 @@ func TestProcessPendingAtts_NoBroadcastWithBadSignature(t *testing.T) {
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
|
||||
s, _ := util.DeterministicGenesisState(t, 256)
|
||||
chain := &mock.ChainService{
|
||||
State: s,
|
||||
Genesis: prysmTime.Now(), FinalizedCheckPoint: ðpb.Checkpoint{Root: make([]byte, 32)}}
|
||||
r := &Service{
|
||||
cfg: &config{
|
||||
p2p: p1,
|
||||
beaconDB: db,
|
||||
chain: &mock.ChainService{
|
||||
State: s,
|
||||
Genesis: prysmTime.Now(), FinalizedCheckPoint: ðpb.Checkpoint{Root: make([]byte, 32)}},
|
||||
attPool: attestations.NewPool(),
|
||||
chain: chain,
|
||||
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
|
||||
attPool: attestations.NewPool(),
|
||||
},
|
||||
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
|
||||
}
|
||||
@@ -224,18 +230,20 @@ func TestProcessPendingAtts_NoBroadcastWithBadSignature(t *testing.T) {
|
||||
|
||||
require.NoError(t, s.SetGenesisTime(uint64(time.Now().Unix())))
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
chain2 := &mock.ChainService{Genesis: time.Now(),
|
||||
State: s,
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{
|
||||
Root: aggregateAndProof.Aggregate.Data.BeaconBlockRoot,
|
||||
Epoch: 0,
|
||||
}}
|
||||
r = &Service{
|
||||
ctx: ctx,
|
||||
cfg: &config{
|
||||
p2p: p1,
|
||||
beaconDB: db,
|
||||
chain: &mock.ChainService{Genesis: time.Now(),
|
||||
State: s,
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{
|
||||
Root: aggregateAndProof.Aggregate.Data.BeaconBlockRoot,
|
||||
Epoch: 0,
|
||||
}},
|
||||
attPool: attestations.NewPool(),
|
||||
chain: chain2,
|
||||
clock: startup.NewClock(chain2.Genesis, chain2.ValidatorsRoot),
|
||||
attPool: attestations.NewPool(),
|
||||
},
|
||||
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
|
||||
seenUnAggregatedAttestationCache: lruwrpr.New(10),
|
||||
@@ -305,20 +313,22 @@ func TestProcessPendingAtts_HasBlockSaveAggregatedAtt(t *testing.T) {
|
||||
|
||||
require.NoError(t, beaconState.SetGenesisTime(uint64(time.Now().Unix())))
|
||||
|
||||
chain := &mock.ChainService{Genesis: time.Now(),
|
||||
DB: db,
|
||||
State: beaconState,
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{
|
||||
Root: aggregateAndProof.Aggregate.Data.BeaconBlockRoot,
|
||||
Epoch: 0,
|
||||
}}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
r := &Service{
|
||||
ctx: ctx,
|
||||
cfg: &config{
|
||||
p2p: p1,
|
||||
beaconDB: db,
|
||||
chain: &mock.ChainService{Genesis: time.Now(),
|
||||
DB: db,
|
||||
State: beaconState,
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{
|
||||
Root: aggregateAndProof.Aggregate.Data.BeaconBlockRoot,
|
||||
Epoch: 0,
|
||||
}},
|
||||
attPool: attestations.NewPool(),
|
||||
chain: chain,
|
||||
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
|
||||
attPool: attestations.NewPool(),
|
||||
},
|
||||
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
|
||||
seenAggregatedAttestationCache: lruwrpr.New(10),
|
||||
|
||||
@@ -37,7 +37,7 @@ func (s *Service) processPendingBlocksQueue() {
|
||||
locker := new(sync.Mutex)
|
||||
async.RunEvery(s.ctx, processPendingBlocksPeriod, func() {
|
||||
// Don't process the pending blocks if genesis time has not been set. The chain is not ready.
|
||||
if !s.isGenesisTimeSet() {
|
||||
if !s.chainIsStarted() {
|
||||
return
|
||||
}
|
||||
locker.Lock()
|
||||
@@ -69,7 +69,7 @@ func (s *Service) processPendingBlocks(ctx context.Context) error {
|
||||
for _, slot := range ss {
|
||||
// process the blocks during their respective slot.
|
||||
// otherwise wait for the right slot to process the block.
|
||||
if slot > s.cfg.chain.CurrentSlot() {
|
||||
if slot > s.cfg.clock.CurrentSlot() {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -445,12 +445,6 @@ func (s *Service) addPendingBlockToCache(b interfaces.ReadOnlySignedBeaconBlock)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Returns true if the genesis time has been set in chain service.
|
||||
// Without the genesis time, the chain does not start.
|
||||
func (s *Service) isGenesisTimeSet() bool {
|
||||
return s.cfg.chain.GenesisTime().Unix() != 0
|
||||
}
|
||||
|
||||
// This converts input string to slot.
|
||||
func cacheKeyToSlot(s string) primitives.Slot {
|
||||
b := []byte(s)
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers"
|
||||
p2ptest "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
|
||||
p2ptypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
@@ -51,6 +52,7 @@ func TestRegularSyncBeaconBlockSubscriber_ProcessPendingBlocks1(t *testing.T) {
|
||||
Epoch: 0,
|
||||
},
|
||||
},
|
||||
clock: startup.NewClock(time.Unix(0, 0), [32]byte{}),
|
||||
stateGen: stategen.New(db, doublylinkedtree.New()),
|
||||
},
|
||||
slotToPendingBlocks: gcache.New(time.Second, 2*time.Second),
|
||||
@@ -123,6 +125,7 @@ func TestRegularSyncBeaconBlockSubscriber_OptimisticStatus(t *testing.T) {
|
||||
Epoch: 0,
|
||||
},
|
||||
},
|
||||
clock: startup.NewClock(time.Unix(0, 0), [32]byte{}),
|
||||
stateGen: stategen.New(db, doublylinkedtree.New()),
|
||||
},
|
||||
slotToPendingBlocks: gcache.New(time.Second, 2*time.Second),
|
||||
@@ -196,6 +199,7 @@ func TestRegularSyncBeaconBlockSubscriber_ExecutionEngineTimesOut(t *testing.T)
|
||||
},
|
||||
ReceiveBlockMockErr: execution.ErrHTTPTimeout,
|
||||
},
|
||||
clock: startup.NewClock(time.Unix(0, 0), [32]byte{}),
|
||||
stateGen: stategen.New(db, fcs),
|
||||
},
|
||||
slotToPendingBlocks: gcache.New(time.Second, 2*time.Second),
|
||||
@@ -323,6 +327,7 @@ func TestRegularSyncBeaconBlockSubscriber_DoNotReprocessBlock(t *testing.T) {
|
||||
Epoch: 0,
|
||||
},
|
||||
},
|
||||
clock: startup.NewClock(time.Unix(0, 0), [32]byte{}),
|
||||
stateGen: stategen.New(db, doublylinkedtree.New()),
|
||||
},
|
||||
slotToPendingBlocks: gcache.New(time.Second, 2*time.Second),
|
||||
@@ -391,6 +396,7 @@ func TestRegularSyncBeaconBlockSubscriber_ProcessPendingBlocks_2Chains(t *testin
|
||||
Root: make([]byte, 32),
|
||||
},
|
||||
},
|
||||
clock: startup.NewClock(time.Unix(0, 0), [32]byte{}),
|
||||
stateGen: stategen.New(db, doublylinkedtree.New()),
|
||||
},
|
||||
slotToPendingBlocks: gcache.New(time.Second, 2*time.Second),
|
||||
@@ -583,18 +589,20 @@ func TestService_BatchRootRequest(t *testing.T) {
|
||||
p1.Connect(p2)
|
||||
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
||||
|
||||
chain := &mock.ChainService{
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: make([]byte, 32),
|
||||
},
|
||||
ValidatorsRoot: [32]byte{},
|
||||
Genesis: time.Now(),
|
||||
}
|
||||
r := &Service{
|
||||
cfg: &config{
|
||||
p2p: p1,
|
||||
beaconDB: db,
|
||||
chain: &mock.ChainService{
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: make([]byte, 32),
|
||||
},
|
||||
ValidatorsRoot: [32]byte{},
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
chain: chain,
|
||||
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
|
||||
},
|
||||
slotToPendingBlocks: gcache.New(time.Second, 2*time.Second),
|
||||
seenPendingBlocks: make(map[[32]byte]bool),
|
||||
@@ -713,6 +721,7 @@ func TestService_ProcessPendingBlockOnCorrectSlot(t *testing.T) {
|
||||
p2p: p1,
|
||||
beaconDB: db,
|
||||
chain: &mockChain,
|
||||
clock: startup.NewClock(mockChain.Genesis, mockChain.ValidatorsRoot),
|
||||
stateGen: stategen.New(db, fcs),
|
||||
},
|
||||
slotToPendingBlocks: gcache.New(time.Second, 2*time.Second),
|
||||
|
||||
@@ -33,7 +33,7 @@ type rpcHandler func(context.Context, interface{}, libp2pcore.Stream) error
|
||||
|
||||
// registerRPCHandlers for p2p RPC.
|
||||
func (s *Service) registerRPCHandlers() {
|
||||
currEpoch := slots.ToEpoch(s.cfg.chain.CurrentSlot())
|
||||
currEpoch := slots.ToEpoch(s.cfg.clock.CurrentSlot())
|
||||
// Register V2 handlers if we are past altair fork epoch.
|
||||
if currEpoch >= params.BeaconConfig().AltairForkEpoch {
|
||||
s.registerRPC(
|
||||
|
||||
@@ -228,7 +228,7 @@ func (s *Service) validateRangeRequest(r *pb.BeaconBlocksByRangeRequest) error {
|
||||
// Add a buffer for possible large range requests from nodes syncing close to the
|
||||
// head of the chain.
|
||||
buffer := rangeLimit * 2
|
||||
highestExpectedSlot := s.cfg.chain.CurrentSlot().Add(uint64(buffer))
|
||||
highestExpectedSlot := s.cfg.clock.CurrentSlot().Add(uint64(buffer))
|
||||
|
||||
// Ensure all request params are within appropriate bounds
|
||||
if count == 0 || count > maxRequestBlocks {
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/encoder"
|
||||
p2ptest "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
|
||||
p2ptypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
@@ -63,7 +64,13 @@ func TestRPCBeaconBlocksByRange_RPCHandlerReturnsBlocks(t *testing.T) {
|
||||
}
|
||||
|
||||
// Start service with 160 as allowed blocks capacity (and almost zero capacity recovery).
|
||||
r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}}, rateLimiter: newRateLimiter(p1)}
|
||||
r := &Service{
|
||||
cfg: &config{
|
||||
p2p: p1, beaconDB: d, chain: &chainMock.ChainService{},
|
||||
clock: startup.NewClock(time.Unix(0, 0), [32]byte{}),
|
||||
},
|
||||
rateLimiter: newRateLimiter(p1),
|
||||
}
|
||||
pcl := protocol.ID(p2p.RPCBlocksByRangeTopicV1)
|
||||
topic := string(pcl)
|
||||
r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(0.000001, int64(req.Count*10), time.Second, false)
|
||||
@@ -125,7 +132,8 @@ func TestRPCBeaconBlocksByRange_ReturnCorrectNumberBack(t *testing.T) {
|
||||
require.NoError(t, d.SaveGenesisBlockRoot(context.Background(), genRoot))
|
||||
|
||||
// Start service with 160 as allowed blocks capacity (and almost zero capacity recovery).
|
||||
r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}}, rateLimiter: newRateLimiter(p1)}
|
||||
clock := startup.NewClock(time.Unix(0, 0), [32]byte{})
|
||||
r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}, clock: clock}, rateLimiter: newRateLimiter(p1)}
|
||||
pcl := protocol.ID(p2p.RPCBlocksByRangeTopicV1)
|
||||
topic := string(pcl)
|
||||
r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(0.000001, int64(req.Count*10), time.Second, false)
|
||||
@@ -239,6 +247,7 @@ func TestRPCBeaconBlocksByRange_ReconstructsPayloads(t *testing.T) {
|
||||
beaconDB: d,
|
||||
chain: &chainMock.ChainService{},
|
||||
executionPayloadReconstructor: mockEngine,
|
||||
clock: startup.NewClock(time.Unix(0, 0), [32]byte{}),
|
||||
},
|
||||
rateLimiter: newRateLimiter(p1),
|
||||
}
|
||||
@@ -309,7 +318,8 @@ func TestRPCBeaconBlocksByRange_RPCHandlerReturnsSortedBlocks(t *testing.T) {
|
||||
}
|
||||
|
||||
// Start service with 160 as allowed blocks capacity (and almost zero capacity recovery).
|
||||
r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}}, rateLimiter: newRateLimiter(p1)}
|
||||
clock := startup.NewClock(time.Unix(0, 0), [32]byte{})
|
||||
r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}, clock: clock}, rateLimiter: newRateLimiter(p1)}
|
||||
pcl := protocol.ID(p2p.RPCBlocksByRangeTopicV1)
|
||||
topic := string(pcl)
|
||||
r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(0.000001, int64(req.Count*10), time.Second, false)
|
||||
@@ -374,7 +384,7 @@ func TestRPCBeaconBlocksByRange_ReturnsGenesisBlock(t *testing.T) {
|
||||
prevRoot = rt
|
||||
}
|
||||
|
||||
r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}}, rateLimiter: newRateLimiter(p1)}
|
||||
r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}, clock: startup.NewClock(time.Unix(0, 0), [32]byte{})}, rateLimiter: newRateLimiter(p1)}
|
||||
pcl := protocol.ID(p2p.RPCBlocksByRangeTopicV1)
|
||||
topic := string(pcl)
|
||||
r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(10000, 10000, time.Second, false)
|
||||
@@ -465,7 +475,8 @@ func TestRPCBeaconBlocksByRange_RPCHandlerRateLimitOverflow(t *testing.T) {
|
||||
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
||||
|
||||
capacity := int64(flags.Get().BlockBatchLimit * 3)
|
||||
r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}}, rateLimiter: newRateLimiter(p1)}
|
||||
clock := startup.NewClock(time.Unix(0, 0), [32]byte{})
|
||||
r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}, clock: clock}, rateLimiter: newRateLimiter(p1)}
|
||||
|
||||
pcl := protocol.ID(p2p.RPCBlocksByRangeTopicV1)
|
||||
topic := string(pcl)
|
||||
@@ -491,7 +502,8 @@ func TestRPCBeaconBlocksByRange_RPCHandlerRateLimitOverflow(t *testing.T) {
|
||||
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
||||
|
||||
capacity := int64(flags.Get().BlockBatchLimit * 3)
|
||||
r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}}, rateLimiter: newRateLimiter(p1)}
|
||||
clock := startup.NewClock(time.Unix(0, 0), [32]byte{})
|
||||
r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}, clock: clock}, rateLimiter: newRateLimiter(p1)}
|
||||
|
||||
pcl := protocol.ID(p2p.RPCBlocksByRangeTopicV1)
|
||||
topic := string(pcl)
|
||||
@@ -521,7 +533,8 @@ func TestRPCBeaconBlocksByRange_RPCHandlerRateLimitOverflow(t *testing.T) {
|
||||
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
||||
|
||||
capacity := int64(flags.Get().BlockBatchLimit * flags.Get().BlockBatchLimitBurstFactor)
|
||||
r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}}, rateLimiter: newRateLimiter(p1)}
|
||||
clock := startup.NewClock(time.Unix(0, 0), [32]byte{})
|
||||
r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}, clock: clock}, rateLimiter: newRateLimiter(p1)}
|
||||
pcl := protocol.ID(p2p.RPCBlocksByRangeTopicV1)
|
||||
topic := string(pcl)
|
||||
r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(0.000001, capacity, time.Second, false)
|
||||
@@ -552,11 +565,13 @@ func TestRPCBeaconBlocksByRange_RPCHandlerRateLimitOverflow(t *testing.T) {
|
||||
func TestRPCBeaconBlocksByRange_validateRangeRequest(t *testing.T) {
|
||||
slotsSinceGenesis := primitives.Slot(1000)
|
||||
offset := int64(slotsSinceGenesis.Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
chain := &chainMock.ChainService{
|
||||
Genesis: time.Now().Add(time.Second * time.Duration(-1*offset)),
|
||||
}
|
||||
r := &Service{
|
||||
cfg: &config{
|
||||
chain: &chainMock.ChainService{
|
||||
Genesis: time.Now().Add(time.Second * time.Duration(-1*offset)),
|
||||
},
|
||||
chain: chain,
|
||||
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
|
||||
},
|
||||
}
|
||||
|
||||
@@ -720,7 +735,8 @@ func TestRPCBeaconBlocksByRange_EnforceResponseInvariants(t *testing.T) {
|
||||
p1.Connect(p2)
|
||||
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
||||
|
||||
r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}}, rateLimiter: newRateLimiter(p1)}
|
||||
clock := startup.NewClock(time.Unix(0, 0), [32]byte{})
|
||||
r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}, clock: clock}, rateLimiter: newRateLimiter(p1)}
|
||||
r.rateLimiter.limiterMap[string(pcl)] = leakybucket.NewCollector(0.000001, 640, time.Second, false)
|
||||
req := ðpb.BeaconBlocksByRangeRequest{
|
||||
StartSlot: 448,
|
||||
@@ -888,7 +904,8 @@ func TestRPCBeaconBlocksByRange_FilterBlocks(t *testing.T) {
|
||||
p1.Connect(p2)
|
||||
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
||||
|
||||
r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}}, rateLimiter: newRateLimiter(p1)}
|
||||
clock := startup.NewClock(time.Unix(0, 0), [32]byte{})
|
||||
r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}, clock: clock}, rateLimiter: newRateLimiter(p1)}
|
||||
r.rateLimiter.limiterMap[string(pcl)] = leakybucket.NewCollector(0.000001, 640, time.Second, false)
|
||||
req := ðpb.BeaconBlocksByRangeRequest{
|
||||
StartSlot: 1,
|
||||
@@ -919,7 +936,8 @@ func TestRPCBeaconBlocksByRange_FilterBlocks(t *testing.T) {
|
||||
p1.Connect(p2)
|
||||
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
||||
|
||||
r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}}, rateLimiter: newRateLimiter(p1)}
|
||||
clock := startup.NewClock(time.Unix(0, 0), [32]byte{})
|
||||
r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}, clock: clock}, rateLimiter: newRateLimiter(p1)}
|
||||
r.rateLimiter.limiterMap[string(pcl)] = leakybucket.NewCollector(0.000001, 640, time.Second, false)
|
||||
req := ðpb.BeaconBlocksByRangeRequest{
|
||||
StartSlot: 1,
|
||||
@@ -954,7 +972,8 @@ func TestRPCBeaconBlocksByRange_FilterBlocks(t *testing.T) {
|
||||
p1.Connect(p2)
|
||||
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
||||
|
||||
r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}}, rateLimiter: newRateLimiter(p1)}
|
||||
clock := startup.NewClock(time.Unix(0, 0), [32]byte{})
|
||||
r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}, clock: clock}, rateLimiter: newRateLimiter(p1)}
|
||||
r.rateLimiter.limiterMap[string(pcl)] = leakybucket.NewCollector(0.000001, 640, time.Second, false)
|
||||
req := ðpb.BeaconBlocksByRangeRequest{
|
||||
StartSlot: 1,
|
||||
@@ -989,7 +1008,8 @@ func TestRPCBeaconBlocksByRange_FilterBlocks(t *testing.T) {
|
||||
p1.Connect(p2)
|
||||
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
||||
|
||||
r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}}, rateLimiter: newRateLimiter(p1)}
|
||||
clock := startup.NewClock(time.Unix(0, 0), [32]byte{})
|
||||
r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}, clock: clock}, rateLimiter: newRateLimiter(p1)}
|
||||
r.rateLimiter.limiterMap[string(pcl)] = leakybucket.NewCollector(0.000001, 640, time.Second, false)
|
||||
req := ðpb.BeaconBlocksByRangeRequest{
|
||||
StartSlot: 1,
|
||||
@@ -1029,7 +1049,8 @@ func TestRPCBeaconBlocksByRange_FilterBlocks(t *testing.T) {
|
||||
p1.Connect(p2)
|
||||
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
||||
|
||||
r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}}, rateLimiter: newRateLimiter(p1)}
|
||||
clock := startup.NewClock(time.Unix(0, 0), [32]byte{})
|
||||
r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}, clock: clock}, rateLimiter: newRateLimiter(p1)}
|
||||
r.rateLimiter.limiterMap[string(pcl)] = leakybucket.NewCollector(0.000001, 640, time.Second, false)
|
||||
req := ðpb.BeaconBlocksByRangeRequest{
|
||||
StartSlot: 1,
|
||||
|
||||
@@ -19,7 +19,7 @@ func (s *Service) sendRecentBeaconBlocksRequest(ctx context.Context, blockRoots
|
||||
ctx, cancel := context.WithTimeout(ctx, respTimeout)
|
||||
defer cancel()
|
||||
|
||||
_, err := SendBeaconBlocksByRootRequest(ctx, s.cfg.chain, s.cfg.p2p, id, blockRoots, func(blk interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
_, err := SendBeaconBlocksByRootRequest(ctx, s.cfg.clock, s.cfg.p2p, id, blockRoots, func(blk interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
blkRoot, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
|
||||
p2ptest "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
|
||||
p2pTypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
@@ -50,7 +51,7 @@ func TestRecentBeaconBlocksRPCHandler_ReturnsBlocks(t *testing.T) {
|
||||
blkRoots = append(blkRoots, root)
|
||||
}
|
||||
|
||||
r := &Service{cfg: &config{p2p: p1, beaconDB: d}, rateLimiter: newRateLimiter(p1)}
|
||||
r := &Service{cfg: &config{p2p: p1, beaconDB: d, clock: startup.NewClock(time.Unix(0, 0), [32]byte{})}, rateLimiter: newRateLimiter(p1)}
|
||||
r.cfg.chain = &mock.ChainService{ValidatorsRoot: [32]byte{}}
|
||||
pcl := protocol.ID(p2p.RPCBlocksByRootTopicV1)
|
||||
topic := string(pcl)
|
||||
@@ -148,8 +149,9 @@ func TestRecentBeaconBlocksRPCHandler_ReturnsBlocks_ReconstructsPayload(t *testi
|
||||
p2p: p1,
|
||||
beaconDB: d,
|
||||
executionPayloadReconstructor: mockEngine,
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
clock: startup.NewClock(time.Unix(0, 0), [32]byte{}),
|
||||
}, rateLimiter: newRateLimiter(p1)}
|
||||
r.cfg.chain = &mock.ChainService{ValidatorsRoot: [32]byte{}}
|
||||
pcl := protocol.ID(p2p.RPCBlocksByRootTopicV1)
|
||||
topic := string(pcl)
|
||||
r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(10000, 10000, time.Second, false)
|
||||
@@ -204,16 +206,18 @@ func TestRecentBeaconBlocks_RPCRequestSent(t *testing.T) {
|
||||
|
||||
expectedRoots := p2pTypes.BeaconBlockByRootsReq{blockBRoot, blockARoot}
|
||||
|
||||
chain := &mock.ChainService{
|
||||
State: genesisState,
|
||||
FinalizedCheckPoint: finalizedCheckpt,
|
||||
Root: blockARoot[:],
|
||||
Genesis: time.Now(),
|
||||
ValidatorsRoot: [32]byte{},
|
||||
}
|
||||
r := &Service{
|
||||
cfg: &config{
|
||||
p2p: p1,
|
||||
chain: &mock.ChainService{
|
||||
State: genesisState,
|
||||
FinalizedCheckPoint: finalizedCheckpt,
|
||||
Root: blockARoot[:],
|
||||
Genesis: time.Now(),
|
||||
ValidatorsRoot: [32]byte{},
|
||||
},
|
||||
p2p: p1,
|
||||
chain: chain,
|
||||
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
|
||||
},
|
||||
slotToPendingBlocks: gcache.New(time.Second, 2*time.Second),
|
||||
seenPendingBlocks: make(map[[32]byte]bool),
|
||||
|
||||
@@ -20,42 +20,48 @@ import (
|
||||
// response_chunk ::= <result> | <context-bytes> | <encoding-dependent-header> | <encoded-payload>
|
||||
func (s *Service) chunkBlockWriter(stream libp2pcore.Stream, blk interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
SetStreamWriteDeadline(stream, defaultWriteDuration)
|
||||
return WriteBlockChunk(stream, s.cfg.chain, s.cfg.p2p.Encoding(), blk)
|
||||
return WriteBlockChunk(stream, s.cfg.clock, s.cfg.p2p.Encoding(), blk)
|
||||
}
|
||||
|
||||
// WriteBlockChunk writes block chunk object to stream.
|
||||
// response_chunk ::= <result> | <context-bytes> | <encoding-dependent-header> | <encoded-payload>
|
||||
func WriteBlockChunk(stream libp2pcore.Stream, chain blockchain.ChainInfoFetcher, encoding encoder.NetworkEncoding, blk interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
func WriteBlockChunk(stream libp2pcore.Stream, tor blockchain.TemporalOracle, encoding encoder.NetworkEncoding, blk interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
if _, err := stream.Write([]byte{responseCodeSuccess}); err != nil {
|
||||
return err
|
||||
}
|
||||
var obtainedCtx []byte
|
||||
|
||||
valRoot := tor.GenesisValidatorsRoot()
|
||||
switch blk.Version() {
|
||||
case version.Phase0:
|
||||
valRoot := chain.GenesisValidatorsRoot()
|
||||
digest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().GenesisEpoch, valRoot[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
obtainedCtx = digest[:]
|
||||
case version.Altair:
|
||||
valRoot := chain.GenesisValidatorsRoot()
|
||||
digest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().AltairForkEpoch, valRoot[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
obtainedCtx = digest[:]
|
||||
case version.Bellatrix:
|
||||
valRoot := chain.GenesisValidatorsRoot()
|
||||
digest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().BellatrixForkEpoch, valRoot[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
obtainedCtx = digest[:]
|
||||
case version.Capella:
|
||||
digest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().CapellaForkEpoch, valRoot[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
obtainedCtx = digest[:]
|
||||
default:
|
||||
return errors.Wrapf(ErrUnrecognizedVersion, "block version %d is not recognized", blk.Version())
|
||||
}
|
||||
|
||||
if err := writeContextToStream(obtainedCtx, stream, chain); err != nil {
|
||||
if err := writeContextToStream(obtainedCtx, stream); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err := encoding.EncodeWithMaxLength(stream, blk)
|
||||
@@ -64,18 +70,18 @@ func WriteBlockChunk(stream libp2pcore.Stream, chain blockchain.ChainInfoFetcher
|
||||
|
||||
// ReadChunkedBlock handles each response chunk that is sent by the
|
||||
// peer and converts it into a beacon block.
|
||||
func ReadChunkedBlock(stream libp2pcore.Stream, chain blockchain.ForkFetcher, p2p p2p.EncodingProvider, isFirstChunk bool) (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
func ReadChunkedBlock(stream libp2pcore.Stream, tor blockchain.TemporalOracle, p2p p2p.EncodingProvider, isFirstChunk bool) (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
// Handle deadlines differently for first chunk
|
||||
if isFirstChunk {
|
||||
return readFirstChunkedBlock(stream, chain, p2p)
|
||||
return readFirstChunkedBlock(stream, tor, p2p)
|
||||
}
|
||||
|
||||
return readResponseChunk(stream, chain, p2p)
|
||||
return readResponseChunk(stream, tor, p2p)
|
||||
}
|
||||
|
||||
// readFirstChunkedBlock reads the first chunked block and applies the appropriate deadlines to
|
||||
// it.
|
||||
func readFirstChunkedBlock(stream libp2pcore.Stream, chain blockchain.ForkFetcher, p2p p2p.EncodingProvider) (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
func readFirstChunkedBlock(stream libp2pcore.Stream, tor blockchain.TemporalOracle, p2p p2p.EncodingProvider) (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
code, errMsg, err := ReadStatusCode(stream, p2p.Encoding())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -83,11 +89,11 @@ func readFirstChunkedBlock(stream libp2pcore.Stream, chain blockchain.ForkFetche
|
||||
if code != 0 {
|
||||
return nil, errors.New(errMsg)
|
||||
}
|
||||
rpcCtx, err := readContextFromStream(stream, chain)
|
||||
rpcCtx, err := readContextFromStream(stream)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blk, err := extractBlockDataType(rpcCtx, chain)
|
||||
blk, err := extractBlockDataType(rpcCtx, tor)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -97,7 +103,7 @@ func readFirstChunkedBlock(stream libp2pcore.Stream, chain blockchain.ForkFetche
|
||||
|
||||
// readResponseChunk reads the response from the stream and decodes it into the
|
||||
// provided message type.
|
||||
func readResponseChunk(stream libp2pcore.Stream, chain blockchain.ForkFetcher, p2p p2p.EncodingProvider) (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
func readResponseChunk(stream libp2pcore.Stream, tor blockchain.TemporalOracle, p2p p2p.EncodingProvider) (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
SetStreamReadDeadline(stream, respTimeout)
|
||||
code, errMsg, err := readStatusCodeNoDeadline(stream, p2p.Encoding())
|
||||
if err != nil {
|
||||
@@ -107,11 +113,11 @@ func readResponseChunk(stream libp2pcore.Stream, chain blockchain.ForkFetcher, p
|
||||
return nil, errors.New(errMsg)
|
||||
}
|
||||
// No-op for now with the rpc context.
|
||||
rpcCtx, err := readContextFromStream(stream, chain)
|
||||
rpcCtx, err := readContextFromStream(stream)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blk, err := extractBlockDataType(rpcCtx, chain)
|
||||
blk, err := extractBlockDataType(rpcCtx, tor)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -119,7 +125,7 @@ func readResponseChunk(stream libp2pcore.Stream, chain blockchain.ForkFetcher, p
|
||||
return blk, err
|
||||
}
|
||||
|
||||
func extractBlockDataType(digest []byte, chain blockchain.ForkFetcher) (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
func extractBlockDataType(digest []byte, tor blockchain.TemporalOracle) (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
if len(digest) == 0 {
|
||||
bFunc, ok := types.BlockMap[bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion)]
|
||||
if !ok {
|
||||
@@ -130,7 +136,7 @@ func extractBlockDataType(digest []byte, chain blockchain.ForkFetcher) (interfac
|
||||
if len(digest) != forkDigestLength {
|
||||
return nil, errors.Errorf("invalid digest returned, wanted a length of %d but received %d", forkDigestLength, len(digest))
|
||||
}
|
||||
vRoot := chain.GenesisValidatorsRoot()
|
||||
vRoot := tor.GenesisValidatorsRoot()
|
||||
for k, blkFunc := range types.BlockMap {
|
||||
rDigest, err := signing.ComputeForkDigest(k[:], vRoot[:])
|
||||
if err != nil {
|
||||
@@ -140,5 +146,5 @@ func extractBlockDataType(digest []byte, chain blockchain.ForkFetcher) (interfac
|
||||
return blkFunc()
|
||||
}
|
||||
}
|
||||
return nil, errors.New("no valid digest matched")
|
||||
return nil, errors.Wrapf(ErrNoValidDigest, "could not extract block data type, saw digest=%#x, genesis=%v, vr=%#x", digest, tor.GenesisTime(), tor.GenesisValidatorsRoot())
|
||||
}
|
||||
|
||||
@@ -96,7 +96,7 @@ func (s *Service) sendGoodByeMessage(ctx context.Context, code p2ptypes.RPCGoodb
|
||||
ctx, cancel := context.WithTimeout(ctx, respTimeout)
|
||||
defer cancel()
|
||||
|
||||
topic, err := p2p.TopicFromMessage(p2p.GoodbyeMessageName, slots.ToEpoch(s.cfg.chain.CurrentSlot()))
|
||||
topic, err := p2p.TopicFromMessage(p2p.GoodbyeMessageName, slots.ToEpoch(s.cfg.clock.CurrentSlot()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
db "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
|
||||
p2ptest "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
|
||||
p2ptypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
leakybucket "github.com/prysmaticlabs/prysm/v4/container/leaky-bucket"
|
||||
@@ -153,11 +154,13 @@ func TestSendGoodbye_SendsMessage(t *testing.T) {
|
||||
|
||||
// Set up a head state in the database with data we expect.
|
||||
d := db.SetupDB(t)
|
||||
chain := &mock.ChainService{ValidatorsRoot: [32]byte{}, Genesis: time.Now()}
|
||||
r := &Service{
|
||||
cfg: &config{
|
||||
beaconDB: d,
|
||||
p2p: p1,
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}, Genesis: time.Now()},
|
||||
chain: chain,
|
||||
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
|
||||
},
|
||||
rateLimiter: newRateLimiter(p1),
|
||||
}
|
||||
@@ -198,11 +201,13 @@ func TestSendGoodbye_DisconnectWithPeer(t *testing.T) {
|
||||
|
||||
// Set up a head state in the database with data we expect.
|
||||
d := db.SetupDB(t)
|
||||
chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
r := &Service{
|
||||
cfg: &config{
|
||||
beaconDB: d,
|
||||
p2p: p1,
|
||||
chain: &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}},
|
||||
chain: chain,
|
||||
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
|
||||
},
|
||||
rateLimiter: newRateLimiter(p1),
|
||||
}
|
||||
|
||||
@@ -89,7 +89,7 @@ func (s *Service) sendMetaDataRequest(ctx context.Context, id peer.ID) (metadata
|
||||
ctx, cancel := context.WithTimeout(ctx, respTimeout)
|
||||
defer cancel()
|
||||
|
||||
topic, err := p2p.TopicFromMessage(p2p.MetadataMessageName, slots.ToEpoch(s.cfg.chain.CurrentSlot()))
|
||||
topic, err := p2p.TopicFromMessage(p2p.MetadataMessageName, slots.ToEpoch(s.cfg.clock.CurrentSlot()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -107,12 +107,12 @@ func (s *Service) sendMetaDataRequest(ctx context.Context, id peer.ID) (metadata
|
||||
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer())
|
||||
return nil, errors.New(errMsg)
|
||||
}
|
||||
valRoot := s.cfg.chain.GenesisValidatorsRoot()
|
||||
rpcCtx, err := forks.ForkDigestFromEpoch(slots.ToEpoch(s.cfg.chain.CurrentSlot()), valRoot[:])
|
||||
valRoot := s.cfg.clock.GenesisValidatorsRoot()
|
||||
rpcCtx, err := forks.ForkDigestFromEpoch(slots.ToEpoch(s.cfg.clock.CurrentSlot()), valRoot[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
msg, err := extractMetaDataType(rpcCtx[:], s.cfg.chain)
|
||||
msg, err := extractMetaDataType(rpcCtx[:], s.cfg.clock)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -134,7 +134,7 @@ func (s *Service) sendMetaDataRequest(ctx context.Context, id peer.ID) (metadata
|
||||
return msg, nil
|
||||
}
|
||||
|
||||
func extractMetaDataType(digest []byte, chain blockchain.ChainInfoFetcher) (metadata.Metadata, error) {
|
||||
func extractMetaDataType(digest []byte, tor blockchain.TemporalOracle) (metadata.Metadata, error) {
|
||||
if len(digest) == 0 {
|
||||
mdFunc, ok := types.MetaDataMap[bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion)]
|
||||
if !ok {
|
||||
@@ -145,7 +145,7 @@ func extractMetaDataType(digest []byte, chain blockchain.ChainInfoFetcher) (meta
|
||||
if len(digest) != forkDigestLength {
|
||||
return nil, errors.Errorf("invalid digest returned, wanted a length of %d but received %d", forkDigestLength, len(digest))
|
||||
}
|
||||
vRoot := chain.GenesisValidatorsRoot()
|
||||
vRoot := tor.GenesisValidatorsRoot()
|
||||
for k, mdFunc := range types.MetaDataMap {
|
||||
rDigest, err := signing.ComputeForkDigest(k[:], vRoot[:])
|
||||
if err != nil {
|
||||
@@ -155,5 +155,5 @@ func extractMetaDataType(digest []byte, chain blockchain.ChainInfoFetcher) (meta
|
||||
return mdFunc(), nil
|
||||
}
|
||||
}
|
||||
return nil, errors.New("no valid digest matched")
|
||||
return nil, errors.Wrapf(ErrNoValidDigest, "could not extract metadata type, saw digest=%#x, genesis=%v, vr=%#x", digest, tor.GenesisTime(), tor.GenesisValidatorsRoot())
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
db "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
|
||||
p2ptest "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/wrapper"
|
||||
leakybucket "github.com/prysmaticlabs/prysm/v4/container/leaky-bucket"
|
||||
@@ -90,12 +91,14 @@ func TestMetadataRPCHandler_SendsMetadata(t *testing.T) {
|
||||
})
|
||||
|
||||
// Set up a head state in the database with data we expect.
|
||||
chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
d := db.SetupDB(t)
|
||||
r := &Service{
|
||||
cfg: &config{
|
||||
beaconDB: d,
|
||||
p2p: p1,
|
||||
chain: &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}},
|
||||
chain: chain,
|
||||
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
|
||||
},
|
||||
rateLimiter: newRateLimiter(p1),
|
||||
}
|
||||
@@ -158,20 +161,24 @@ func TestMetadataRPCHandler_SendsMetadataAltair(t *testing.T) {
|
||||
|
||||
// Set up a head state in the database with data we expect.
|
||||
d := db.SetupDB(t)
|
||||
chain := &mock.ChainService{Genesis: time.Now().Add(-5 * oneEpoch()), ValidatorsRoot: [32]byte{}}
|
||||
r := &Service{
|
||||
cfg: &config{
|
||||
beaconDB: d,
|
||||
p2p: p1,
|
||||
chain: &mock.ChainService{Genesis: time.Now().Add(-5 * oneEpoch()), ValidatorsRoot: [32]byte{}},
|
||||
chain: chain,
|
||||
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
|
||||
},
|
||||
rateLimiter: newRateLimiter(p1),
|
||||
}
|
||||
|
||||
chain2 := &mock.ChainService{Genesis: time.Now().Add(-5 * oneEpoch()), ValidatorsRoot: [32]byte{}}
|
||||
r2 := &Service{
|
||||
cfg: &config{
|
||||
beaconDB: d,
|
||||
p2p: p2,
|
||||
chain: &mock.ChainService{Genesis: time.Now().Add(-5 * oneEpoch()), ValidatorsRoot: [32]byte{}},
|
||||
chain: chain2,
|
||||
clock: startup.NewClock(chain2.Genesis, chain2.ValidatorsRoot),
|
||||
},
|
||||
rateLimiter: newRateLimiter(p2),
|
||||
}
|
||||
@@ -236,7 +243,7 @@ func TestExtractMetaDataType(t *testing.T) {
|
||||
|
||||
type args struct {
|
||||
digest []byte
|
||||
chain blockchain.ChainInfoFetcher
|
||||
clock blockchain.TemporalOracle
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -248,7 +255,7 @@ func TestExtractMetaDataType(t *testing.T) {
|
||||
name: "no digest",
|
||||
args: args{
|
||||
digest: []byte{},
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
clock: startup.NewClock(time.Now(), [32]byte{}),
|
||||
},
|
||||
want: wrapper.WrappedMetadataV0(&pb.MetaDataV0{}),
|
||||
wantErr: false,
|
||||
@@ -257,7 +264,7 @@ func TestExtractMetaDataType(t *testing.T) {
|
||||
name: "invalid digest",
|
||||
args: args{
|
||||
digest: []byte{0x00, 0x01},
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
clock: startup.NewClock(time.Now(), [32]byte{}),
|
||||
},
|
||||
want: nil,
|
||||
wantErr: true,
|
||||
@@ -266,7 +273,7 @@ func TestExtractMetaDataType(t *testing.T) {
|
||||
name: "non existent digest",
|
||||
args: args{
|
||||
digest: []byte{0x00, 0x01, 0x02, 0x03},
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
clock: startup.NewClock(time.Now(), [32]byte{}),
|
||||
},
|
||||
want: nil,
|
||||
wantErr: true,
|
||||
@@ -275,7 +282,7 @@ func TestExtractMetaDataType(t *testing.T) {
|
||||
name: "genesis fork version",
|
||||
args: args{
|
||||
digest: genDigest[:],
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
clock: startup.NewClock(time.Now(), [32]byte{}),
|
||||
},
|
||||
want: wrapper.WrappedMetadataV0(&pb.MetaDataV0{}),
|
||||
wantErr: false,
|
||||
@@ -284,7 +291,7 @@ func TestExtractMetaDataType(t *testing.T) {
|
||||
name: "altair fork version",
|
||||
args: args{
|
||||
digest: altairDigest[:],
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
clock: startup.NewClock(time.Now(), [32]byte{}),
|
||||
},
|
||||
want: wrapper.WrappedMetadataV1(&pb.MetaDataV1{}),
|
||||
wantErr: false,
|
||||
@@ -292,7 +299,7 @@ func TestExtractMetaDataType(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := extractMetaDataType(tt.args.digest, tt.args.chain)
|
||||
got, err := extractMetaDataType(tt.args.digest, tt.args.clock)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("extractMetaDataType() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
|
||||
@@ -78,7 +78,7 @@ func (s *Service) sendPingRequest(ctx context.Context, id peer.ID) error {
|
||||
defer cancel()
|
||||
|
||||
metadataSeq := primitives.SSZUint64(s.cfg.p2p.MetadataSeq())
|
||||
topic, err := p2p.TopicFromMessage(p2p.PingMessageName, slots.ToEpoch(s.cfg.chain.CurrentSlot()))
|
||||
topic, err := p2p.TopicFromMessage(p2p.PingMessageName, slots.ToEpoch(s.cfg.clock.CurrentSlot()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
|
||||
p2ptest "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
|
||||
p2ptypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/wrapper"
|
||||
leakybucket "github.com/prysmaticlabs/prysm/v4/container/leaky-bucket"
|
||||
@@ -97,11 +98,13 @@ func TestPingRPCHandler_SendsPing(t *testing.T) {
|
||||
|
||||
// Set up a head state in the database with data we expect.
|
||||
d := db.SetupDB(t)
|
||||
chain := &mock.ChainService{ValidatorsRoot: [32]byte{}, Genesis: time.Now()}
|
||||
r := &Service{
|
||||
cfg: &config{
|
||||
beaconDB: d,
|
||||
p2p: p1,
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}, Genesis: time.Now()},
|
||||
chain: chain,
|
||||
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
|
||||
},
|
||||
rateLimiter: newRateLimiter(p1),
|
||||
}
|
||||
@@ -112,11 +115,13 @@ func TestPingRPCHandler_SendsPing(t *testing.T) {
|
||||
p2.Peers().Add(new(enr.Record), p1.BHost.ID(), p1.BHost.Addrs()[0], network.DirUnknown)
|
||||
p2.Peers().SetMetadata(p1.BHost.ID(), p1.LocalMetadata)
|
||||
|
||||
chain2 := &mock.ChainService{ValidatorsRoot: [32]byte{}, Genesis: time.Now()}
|
||||
r2 := &Service{
|
||||
cfg: &config{
|
||||
beaconDB: d,
|
||||
p2p: p2,
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}, Genesis: time.Now()},
|
||||
chain: chain2,
|
||||
clock: startup.NewClock(chain2.Genesis, chain.ValidatorsRoot),
|
||||
},
|
||||
rateLimiter: newRateLimiter(p2),
|
||||
}
|
||||
|
||||
@@ -25,10 +25,10 @@ type BeaconBlockProcessor func(block interfaces.ReadOnlySignedBeaconBlock) error
|
||||
|
||||
// SendBeaconBlocksByRangeRequest sends BeaconBlocksByRange and returns fetched blocks, if any.
|
||||
func SendBeaconBlocksByRangeRequest(
|
||||
ctx context.Context, chain blockchain.ForkFetcher, p2pProvider p2p.SenderEncoder, pid peer.ID,
|
||||
ctx context.Context, tor blockchain.TemporalOracle, p2pProvider p2p.SenderEncoder, pid peer.ID,
|
||||
req *pb.BeaconBlocksByRangeRequest, blockProcessor BeaconBlockProcessor,
|
||||
) ([]interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
topic, err := p2p.TopicFromMessage(p2p.BeaconBlocksByRangeMessageName, slots.ToEpoch(chain.CurrentSlot()))
|
||||
topic, err := p2p.TopicFromMessage(p2p.BeaconBlocksByRangeMessageName, slots.ToEpoch(tor.CurrentSlot()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -50,7 +50,7 @@ func SendBeaconBlocksByRangeRequest(
|
||||
var prevSlot primitives.Slot
|
||||
for i := uint64(0); ; i++ {
|
||||
isFirstChunk := i == 0
|
||||
blk, err := ReadChunkedBlock(stream, chain, p2pProvider, isFirstChunk)
|
||||
blk, err := ReadChunkedBlock(stream, tor, p2pProvider, isFirstChunk)
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
@@ -87,10 +87,10 @@ func SendBeaconBlocksByRangeRequest(
|
||||
|
||||
// SendBeaconBlocksByRootRequest sends BeaconBlocksByRoot and returns fetched blocks, if any.
|
||||
func SendBeaconBlocksByRootRequest(
|
||||
ctx context.Context, chain blockchain.ChainInfoFetcher, p2pProvider p2p.P2P, pid peer.ID,
|
||||
ctx context.Context, clock blockchain.TemporalOracle, p2pProvider p2p.P2P, pid peer.ID,
|
||||
req *p2ptypes.BeaconBlockByRootsReq, blockProcessor BeaconBlockProcessor,
|
||||
) ([]interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
topic, err := p2p.TopicFromMessage(p2p.BeaconBlocksByRootsMessageName, slots.ToEpoch(chain.CurrentSlot()))
|
||||
topic, err := p2p.TopicFromMessage(p2p.BeaconBlocksByRootsMessageName, slots.ToEpoch(clock.CurrentSlot()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -115,7 +115,7 @@ func SendBeaconBlocksByRootRequest(
|
||||
break
|
||||
}
|
||||
isFirstChunk := i == 0
|
||||
blk, err := ReadChunkedBlock(stream, chain, p2pProvider, isFirstChunk)
|
||||
blk, err := ReadChunkedBlock(stream, clock, p2pProvider, isFirstChunk)
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
|
||||
@@ -9,10 +9,10 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
|
||||
p2ptest "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
|
||||
p2pTypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
@@ -35,8 +35,7 @@ func TestSendRequest_SendBeaconBlocksByRangeRequest(t *testing.T) {
|
||||
p1.Connect(bogusPeer)
|
||||
|
||||
req := ðpb.BeaconBlocksByRangeRequest{}
|
||||
chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
_, err := SendBeaconBlocksByRangeRequest(ctx, chain, p1, bogusPeer.PeerID(), req, nil)
|
||||
_, err := SendBeaconBlocksByRangeRequest(ctx, startup.NewClock(time.Now(), [32]byte{}), p1, bogusPeer.PeerID(), req, nil)
|
||||
assert.ErrorContains(t, "protocols not supported", err)
|
||||
})
|
||||
|
||||
@@ -83,10 +82,9 @@ func TestSendRequest_SendBeaconBlocksByRangeRequest(t *testing.T) {
|
||||
if uint64(i) >= uint64(len(knownBlocks)) {
|
||||
break
|
||||
}
|
||||
chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
wsb, err := blocks.NewSignedBeaconBlock(knownBlocks[i])
|
||||
require.NoError(t, err)
|
||||
err = WriteBlockChunk(stream, chain, p2pProvider.Encoding(), wsb)
|
||||
err = WriteBlockChunk(stream, startup.NewClock(time.Now(), [32]byte{}), p2pProvider.Encoding(), wsb)
|
||||
if err != nil && err.Error() != network.ErrReset.Error() {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -105,8 +103,7 @@ func TestSendRequest_SendBeaconBlocksByRangeRequest(t *testing.T) {
|
||||
Count: 128,
|
||||
Step: 1,
|
||||
}
|
||||
chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
blocks, err := SendBeaconBlocksByRangeRequest(ctx, chain, p1, p2.PeerID(), req, nil)
|
||||
blocks, err := SendBeaconBlocksByRangeRequest(ctx, startup.NewClock(time.Now(), [32]byte{}), p1, p2.PeerID(), req, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 128, len(blocks))
|
||||
})
|
||||
@@ -124,8 +121,7 @@ func TestSendRequest_SendBeaconBlocksByRangeRequest(t *testing.T) {
|
||||
Step: 1,
|
||||
}
|
||||
blocksFromProcessor := make([]interfaces.ReadOnlySignedBeaconBlock, 0)
|
||||
chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
blocks, err := SendBeaconBlocksByRangeRequest(ctx, chain, p1, p2.PeerID(), req, func(block interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
blocks, err := SendBeaconBlocksByRangeRequest(ctx, startup.NewClock(time.Now(), [32]byte{}), p1, p2.PeerID(), req, func(block interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
blocksFromProcessor = append(blocksFromProcessor, block)
|
||||
return nil
|
||||
})
|
||||
@@ -147,8 +143,7 @@ func TestSendRequest_SendBeaconBlocksByRangeRequest(t *testing.T) {
|
||||
Step: 1,
|
||||
}
|
||||
errFromProcessor := errors.New("processor error")
|
||||
chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
_, err := SendBeaconBlocksByRangeRequest(ctx, chain, p1, p2.PeerID(), req, func(block interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
_, err := SendBeaconBlocksByRangeRequest(ctx, startup.NewClock(time.Now(), [32]byte{}), p1, p2.PeerID(), req, func(block interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
return errFromProcessor
|
||||
})
|
||||
assert.ErrorContains(t, errFromProcessor.Error(), err)
|
||||
@@ -166,8 +161,7 @@ func TestSendRequest_SendBeaconBlocksByRangeRequest(t *testing.T) {
|
||||
Count: 128,
|
||||
Step: 1,
|
||||
}
|
||||
chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
blocks, err := SendBeaconBlocksByRangeRequest(ctx, chain, p1, p2.PeerID(), req, nil)
|
||||
blocks, err := SendBeaconBlocksByRangeRequest(ctx, startup.NewClock(time.Now(), [32]byte{}), p1, p2.PeerID(), req, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 128, len(blocks))
|
||||
|
||||
@@ -178,7 +172,7 @@ func TestSendRequest_SendBeaconBlocksByRangeRequest(t *testing.T) {
|
||||
cfg.MaxRequestBlocks = maxRequestBlocks
|
||||
params.OverrideBeaconNetworkConfig(cfg)
|
||||
}()
|
||||
blocks, err = SendBeaconBlocksByRangeRequest(ctx, chain, p1, p2.PeerID(), req, func(block interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
blocks, err = SendBeaconBlocksByRangeRequest(ctx, startup.NewClock(time.Now(), [32]byte{}), p1, p2.PeerID(), req, func(block interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
// Since ssz checks the boundaries, and doesn't normally allow to send requests bigger than
|
||||
// the max request size, we are updating max request size dynamically. Even when updated dynamically,
|
||||
// no more than max request size of blocks is expected on return.
|
||||
@@ -209,8 +203,7 @@ func TestSendRequest_SendBeaconBlocksByRangeRequest(t *testing.T) {
|
||||
Count: 128,
|
||||
Step: 1,
|
||||
}
|
||||
chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
blocks, err := SendBeaconBlocksByRangeRequest(ctx, chain, p1, p2.PeerID(), req, nil)
|
||||
blocks, err := SendBeaconBlocksByRangeRequest(ctx, startup.NewClock(time.Now(), [32]byte{}), p1, p2.PeerID(), req, nil)
|
||||
assert.ErrorContains(t, expectedErr.Error(), err)
|
||||
assert.Equal(t, 0, len(blocks))
|
||||
})
|
||||
@@ -238,9 +231,8 @@ func TestSendRequest_SendBeaconBlocksByRangeRequest(t *testing.T) {
|
||||
if uint64(i) >= uint64(len(knownBlocks)) {
|
||||
break
|
||||
}
|
||||
chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
wsb, err := blocks.NewSignedBeaconBlock(knownBlocks[i])
|
||||
err = WriteBlockChunk(stream, chain, p2.Encoding(), wsb)
|
||||
err = WriteBlockChunk(stream, startup.NewClock(time.Now(), [32]byte{}), p2.Encoding(), wsb)
|
||||
if err != nil && err.Error() != network.ErrReset.Error() {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -252,8 +244,7 @@ func TestSendRequest_SendBeaconBlocksByRangeRequest(t *testing.T) {
|
||||
Count: 128,
|
||||
Step: 1,
|
||||
}
|
||||
chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
blocks, err := SendBeaconBlocksByRangeRequest(ctx, chain, p1, p2.PeerID(), req, nil)
|
||||
blocks, err := SendBeaconBlocksByRangeRequest(ctx, startup.NewClock(time.Now(), [32]byte{}), p1, p2.PeerID(), req, nil)
|
||||
assert.ErrorContains(t, ErrInvalidFetchedData.Error(), err)
|
||||
assert.Equal(t, 0, len(blocks))
|
||||
|
||||
@@ -282,10 +273,9 @@ func TestSendRequest_SendBeaconBlocksByRangeRequest(t *testing.T) {
|
||||
if uint64(i) >= uint64(len(knownBlocks)) {
|
||||
break
|
||||
}
|
||||
chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
wsb, err := blocks.NewSignedBeaconBlock(knownBlocks[i])
|
||||
require.NoError(t, err)
|
||||
err = WriteBlockChunk(stream, chain, p2.Encoding(), wsb)
|
||||
err = WriteBlockChunk(stream, startup.NewClock(time.Now(), [32]byte{}), p2.Encoding(), wsb)
|
||||
if err != nil && err.Error() != network.ErrReset.Error() {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -297,8 +287,7 @@ func TestSendRequest_SendBeaconBlocksByRangeRequest(t *testing.T) {
|
||||
Count: 128,
|
||||
Step: 10,
|
||||
}
|
||||
chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
blocks, err := SendBeaconBlocksByRangeRequest(ctx, chain, p1, p2.PeerID(), req, nil)
|
||||
blocks, err := SendBeaconBlocksByRangeRequest(ctx, startup.NewClock(time.Now(), [32]byte{}), p1, p2.PeerID(), req, nil)
|
||||
assert.ErrorContains(t, ErrInvalidFetchedData.Error(), err)
|
||||
assert.Equal(t, 0, len(blocks))
|
||||
|
||||
@@ -327,8 +316,7 @@ func TestSendRequest_SendBeaconBlocksByRootRequest(t *testing.T) {
|
||||
p1.Connect(bogusPeer)
|
||||
|
||||
req := &p2pTypes.BeaconBlockByRootsReq{}
|
||||
chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
_, err := SendBeaconBlocksByRootRequest(ctx, chain, p1, bogusPeer.PeerID(), req, nil)
|
||||
_, err := SendBeaconBlocksByRootRequest(ctx, startup.NewClock(time.Now(), [32]byte{}), p1, bogusPeer.PeerID(), req, nil)
|
||||
assert.ErrorContains(t, "protocols not supported", err)
|
||||
})
|
||||
|
||||
@@ -377,8 +365,7 @@ func TestSendRequest_SendBeaconBlocksByRootRequest(t *testing.T) {
|
||||
p2.SetStreamHandler(pcl, knownBlocksProvider(p2, nil))
|
||||
|
||||
req := &p2pTypes.BeaconBlockByRootsReq{knownRoots[0], knownRoots[1]}
|
||||
chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
blocks, err := SendBeaconBlocksByRootRequest(ctx, chain, p1, p2.PeerID(), req, nil)
|
||||
blocks, err := SendBeaconBlocksByRootRequest(ctx, startup.NewClock(time.Now(), [32]byte{}), p1, p2.PeerID(), req, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 2, len(blocks))
|
||||
})
|
||||
@@ -392,8 +379,7 @@ func TestSendRequest_SendBeaconBlocksByRootRequest(t *testing.T) {
|
||||
// No error from block processor.
|
||||
req := &p2pTypes.BeaconBlockByRootsReq{knownRoots[0], knownRoots[1]}
|
||||
blocksFromProcessor := make([]interfaces.ReadOnlySignedBeaconBlock, 0)
|
||||
chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
blocks, err := SendBeaconBlocksByRootRequest(ctx, chain, p1, p2.PeerID(), req, func(block interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
blocks, err := SendBeaconBlocksByRootRequest(ctx, startup.NewClock(time.Now(), [32]byte{}), p1, p2.PeerID(), req, func(block interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
blocksFromProcessor = append(blocksFromProcessor, block)
|
||||
return nil
|
||||
})
|
||||
@@ -411,8 +397,7 @@ func TestSendRequest_SendBeaconBlocksByRootRequest(t *testing.T) {
|
||||
// Send error from block processor.
|
||||
req := &p2pTypes.BeaconBlockByRootsReq{knownRoots[0], knownRoots[1]}
|
||||
errFromProcessor := errors.New("processor error")
|
||||
chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
_, err := SendBeaconBlocksByRootRequest(ctx, chain, p1, p2.PeerID(), req, func(block interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
_, err := SendBeaconBlocksByRootRequest(ctx, startup.NewClock(time.Now(), [32]byte{}), p1, p2.PeerID(), req, func(block interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
return errFromProcessor
|
||||
})
|
||||
assert.ErrorContains(t, errFromProcessor.Error(), err)
|
||||
@@ -426,8 +411,8 @@ func TestSendRequest_SendBeaconBlocksByRootRequest(t *testing.T) {
|
||||
|
||||
// No cap on max roots.
|
||||
req := &p2pTypes.BeaconBlockByRootsReq{knownRoots[0], knownRoots[1], knownRoots[2], knownRoots[3]}
|
||||
chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
blocks, err := SendBeaconBlocksByRootRequest(ctx, chain, p1, p2.PeerID(), req, nil)
|
||||
clock := startup.NewClock(time.Now(), [32]byte{})
|
||||
blocks, err := SendBeaconBlocksByRootRequest(ctx, clock, p1, p2.PeerID(), req, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 4, len(blocks))
|
||||
|
||||
@@ -438,7 +423,7 @@ func TestSendRequest_SendBeaconBlocksByRootRequest(t *testing.T) {
|
||||
cfg.MaxRequestBlocks = maxRequestBlocks
|
||||
params.OverrideBeaconNetworkConfig(cfg)
|
||||
}()
|
||||
blocks, err = SendBeaconBlocksByRootRequest(ctx, chain, p1, p2.PeerID(), req, func(block interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
blocks, err = SendBeaconBlocksByRootRequest(ctx, clock, p1, p2.PeerID(), req, func(block interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
// Since ssz checks the boundaries, and doesn't normally allow to send requests bigger than
|
||||
// the max request size, we are updating max request size dynamically. Even when updated dynamically,
|
||||
// no more than max request size of blocks is expected on return.
|
||||
@@ -465,8 +450,7 @@ func TestSendRequest_SendBeaconBlocksByRootRequest(t *testing.T) {
|
||||
}))
|
||||
|
||||
req := &p2pTypes.BeaconBlockByRootsReq{knownRoots[0], knownRoots[1], knownRoots[2], knownRoots[3]}
|
||||
chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
blocks, err := SendBeaconBlocksByRootRequest(ctx, chain, p1, p2.PeerID(), req, nil)
|
||||
blocks, err := SendBeaconBlocksByRootRequest(ctx, startup.NewClock(time.Now(), [32]byte{}), p1, p2.PeerID(), req, nil)
|
||||
assert.ErrorContains(t, expectedErr.Error(), err)
|
||||
assert.Equal(t, 0, len(blocks))
|
||||
})
|
||||
@@ -486,8 +470,7 @@ func TestSendRequest_SendBeaconBlocksByRootRequest(t *testing.T) {
|
||||
}))
|
||||
|
||||
req := &p2pTypes.BeaconBlockByRootsReq{knownRoots[0], knownRoots[1], knownRoots[2], knownRoots[3]}
|
||||
chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
blocks, err := SendBeaconBlocksByRootRequest(ctx, chain, p1, p2.PeerID(), req, nil)
|
||||
blocks, err := SendBeaconBlocksByRootRequest(ctx, startup.NewClock(time.Now(), [32]byte{}), p1, p2.PeerID(), req, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 3, len(blocks))
|
||||
})
|
||||
|
||||
@@ -93,7 +93,7 @@ func (s *Service) resyncIfBehind() {
|
||||
// Check if the current node is more than 1 epoch behind.
|
||||
if highestEpoch > (syncedEpoch + 1) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"currentEpoch": slots.ToEpoch(s.cfg.chain.CurrentSlot()),
|
||||
"currentEpoch": slots.ToEpoch(s.cfg.clock.CurrentSlot()),
|
||||
"syncedEpoch": syncedEpoch,
|
||||
"peersEpoch": highestEpoch,
|
||||
}).Info("Fallen behind peers; reverting to initial sync to catch up")
|
||||
@@ -110,7 +110,7 @@ func (s *Service) resyncIfBehind() {
|
||||
// shouldReSync returns true if the node is not syncing and falls behind two epochs.
|
||||
func (s *Service) shouldReSync() bool {
|
||||
syncedEpoch := slots.ToEpoch(s.cfg.chain.HeadSlot())
|
||||
currentEpoch := slots.ToEpoch(s.cfg.chain.CurrentSlot())
|
||||
currentEpoch := slots.ToEpoch(s.cfg.clock.CurrentSlot())
|
||||
prevEpoch := primitives.Epoch(0)
|
||||
if currentEpoch > 1 {
|
||||
prevEpoch = currentEpoch - 1
|
||||
@@ -140,7 +140,7 @@ func (s *Service) sendRPCStatusRequest(ctx context.Context, id peer.ID) error {
|
||||
HeadRoot: headRoot,
|
||||
HeadSlot: s.cfg.chain.HeadSlot(),
|
||||
}
|
||||
topic, err := p2p.TopicFromMessage(p2p.StatusMessageName, slots.ToEpoch(s.cfg.chain.CurrentSlot()))
|
||||
topic, err := p2p.TopicFromMessage(p2p.StatusMessageName, slots.ToEpoch(s.cfg.clock.CurrentSlot()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -288,7 +288,7 @@ func (s *Service) validateStatusMessage(ctx context.Context, msg *pb.Status) err
|
||||
if !bytes.Equal(forkDigest[:], msg.ForkDigest) {
|
||||
return p2ptypes.ErrWrongForkDigestVersion
|
||||
}
|
||||
genesis := s.cfg.chain.GenesisTime()
|
||||
genesis := s.cfg.clock.GenesisTime()
|
||||
cp := s.cfg.chain.FinalizedCheckpt()
|
||||
finalizedEpoch := cp.Epoch
|
||||
maxEpoch := slots.EpochsSinceGenesis(genesis)
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
"github.com/prysmaticlabs/prysm/v4/async/abool"
|
||||
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/kv"
|
||||
@@ -17,6 +18,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers"
|
||||
p2ptest "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
|
||||
p2ptypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
|
||||
mockSync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/initial-sync/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
@@ -41,6 +43,8 @@ func TestStatusRPCHandler_Disconnects_OnForkVersionMismatch(t *testing.T) {
|
||||
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
||||
root := [32]byte{'C'}
|
||||
|
||||
gt := time.Now()
|
||||
vr := [32]byte{'A'}
|
||||
r := &Service{
|
||||
cfg: &config{
|
||||
p2p: p1,
|
||||
@@ -53,10 +57,11 @@ func TestStatusRPCHandler_Disconnects_OnForkVersionMismatch(t *testing.T) {
|
||||
Epoch: 0,
|
||||
Root: root[:],
|
||||
},
|
||||
Genesis: time.Now(),
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
Genesis: gt,
|
||||
ValidatorsRoot: vr,
|
||||
Root: make([]byte, 32),
|
||||
},
|
||||
clock: startup.NewClock(gt, vr),
|
||||
},
|
||||
rateLimiter: newRateLimiter(p1),
|
||||
}
|
||||
@@ -109,6 +114,8 @@ func TestStatusRPCHandler_ConnectsOnGenesis(t *testing.T) {
|
||||
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
||||
var root [32]byte
|
||||
|
||||
gt := time.Now()
|
||||
vr := [32]byte{'A'}
|
||||
r := &Service{
|
||||
cfg: &config{
|
||||
p2p: p1,
|
||||
@@ -121,10 +128,11 @@ func TestStatusRPCHandler_ConnectsOnGenesis(t *testing.T) {
|
||||
Epoch: 0,
|
||||
Root: params.BeaconConfig().ZeroHash[:],
|
||||
},
|
||||
Genesis: time.Now(),
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
Genesis: gt,
|
||||
ValidatorsRoot: vr,
|
||||
Root: make([]byte, 32),
|
||||
},
|
||||
clock: startup.NewClock(gt, vr),
|
||||
},
|
||||
rateLimiter: newRateLimiter(p1),
|
||||
}
|
||||
@@ -187,6 +195,8 @@ func TestStatusRPCHandler_ReturnsHelloMessage(t *testing.T) {
|
||||
totalSec := int64(params.BeaconConfig().SlotsPerEpoch.Mul(5 * params.BeaconConfig().SecondsPerSlot))
|
||||
genTime := time.Now().Unix() - totalSec
|
||||
|
||||
gt := time.Unix(genTime, 0)
|
||||
vr := [32]byte{'A'}
|
||||
r := &Service{
|
||||
cfg: &config{
|
||||
p2p: p1,
|
||||
@@ -198,12 +208,13 @@ func TestStatusRPCHandler_ReturnsHelloMessage(t *testing.T) {
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
Genesis: time.Unix(genTime, 0),
|
||||
ValidatorsRoot: vr,
|
||||
Genesis: gt,
|
||||
FinalizedRoots: map[[32]byte]bool{
|
||||
finalizedRoot: true,
|
||||
},
|
||||
},
|
||||
clock: startup.NewClock(gt, vr),
|
||||
beaconDB: db,
|
||||
},
|
||||
rateLimiter: newRateLimiter(p1),
|
||||
@@ -249,6 +260,10 @@ func TestStatusRPCHandler_ReturnsHelloMessage(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestHandshakeHandlers_Roundtrip(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
// Scenario is that p1 and p2 connect, exchange handshakes.
|
||||
// p2 disconnects and p1 should forget the handshake status.
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
@@ -271,48 +286,56 @@ func TestHandshakeHandlers_Roundtrip(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
blk := util.NewBeaconBlock()
|
||||
blk.Block.Slot = 0
|
||||
util.SaveBlock(t, context.Background(), db, blk)
|
||||
util.SaveBlock(t, ctx, db, blk)
|
||||
finalizedRoot, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(context.Background(), finalizedRoot))
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, finalizedRoot))
|
||||
chain := &mock.ChainService{
|
||||
State: st,
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{Epoch: 0, Root: finalizedRoot[:]},
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
Genesis: time.Now(),
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
Root: make([]byte, 32),
|
||||
FinalizedRoots: map[[32]byte]bool{
|
||||
finalizedRoot: true,
|
||||
},
|
||||
}
|
||||
cw := startup.NewClockSynchronizer()
|
||||
r := &Service{
|
||||
ctx: ctx,
|
||||
cfg: &config{
|
||||
p2p: p1,
|
||||
chain: &mock.ChainService{
|
||||
State: st,
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{Epoch: 0, Root: finalizedRoot[:]},
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
Genesis: time.Now(),
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
Root: make([]byte, 32),
|
||||
FinalizedRoots: map[[32]byte]bool{
|
||||
finalizedRoot: true,
|
||||
},
|
||||
},
|
||||
p2p: p1,
|
||||
chain: chain,
|
||||
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
|
||||
beaconDB: db,
|
||||
},
|
||||
ctx: context.Background(),
|
||||
rateLimiter: newRateLimiter(p1),
|
||||
rateLimiter: newRateLimiter(p1),
|
||||
clockWaiter: cw,
|
||||
chainStarted: abool.New(),
|
||||
}
|
||||
p1.Digest, err = r.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
|
||||
chain2 := &mock.ChainService{
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{Epoch: 0, Root: finalizedRoot[:]},
|
||||
}
|
||||
r2 := &Service{
|
||||
ctx: ctx,
|
||||
cfg: &config{
|
||||
chain: &mock.ChainService{
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{Epoch: 0, Root: finalizedRoot[:]},
|
||||
},
|
||||
p2p: p2,
|
||||
chain: chain2,
|
||||
clock: startup.NewClock(chain2.Genesis, chain2.ValidatorsRoot),
|
||||
p2p: p2,
|
||||
},
|
||||
rateLimiter: newRateLimiter(p2),
|
||||
}
|
||||
p2.Digest, err = r.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
|
||||
r.Start()
|
||||
go r.Start()
|
||||
|
||||
// Setup streams
|
||||
pcl := protocol.ID("/eth2/beacon_chain/req/status/1/ssz_snappy")
|
||||
@@ -347,13 +370,14 @@ func TestHandshakeHandlers_Roundtrip(t *testing.T) {
|
||||
out := new(primitives.SSZUint64)
|
||||
assert.NoError(t, r.cfg.p2p.Encoding().DecodeWithMaxLength(stream, out))
|
||||
assert.Equal(t, uint64(2), uint64(*out))
|
||||
assert.NoError(t, r2.pingHandler(context.Background(), out, stream))
|
||||
assert.NoError(t, r2.pingHandler(ctx, out, stream))
|
||||
assert.NoError(t, stream.Close())
|
||||
})
|
||||
|
||||
numInactive1 := len(p1.Peers().Inactive())
|
||||
numActive1 := len(p1.Peers().Active())
|
||||
|
||||
require.NoError(t, cw.SetClock(startup.NewClock(chain.Genesis, chain.ValidatorsRoot)))
|
||||
p1.Connect(p2)
|
||||
|
||||
p1.Peers().Add(new(enr.Record), p2.BHost.ID(), p2.BHost.Addrs()[0], network.DirUnknown)
|
||||
@@ -412,20 +436,22 @@ func TestStatusRPCRequest_RequestSent(t *testing.T) {
|
||||
Root: finalizedRoot[:],
|
||||
}
|
||||
|
||||
chain := &mock.ChainService{
|
||||
State: genesisState,
|
||||
FinalizedCheckPoint: finalizedCheckpt,
|
||||
Root: headRoot[:],
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
Genesis: time.Now(),
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
}
|
||||
r := &Service{
|
||||
cfg: &config{
|
||||
p2p: p1,
|
||||
chain: &mock.ChainService{
|
||||
State: genesisState,
|
||||
FinalizedCheckPoint: finalizedCheckpt,
|
||||
Root: headRoot[:],
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
Genesis: time.Now(),
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
},
|
||||
p2p: p1,
|
||||
chain: chain,
|
||||
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
|
||||
},
|
||||
ctx: context.Background(),
|
||||
rateLimiter: newRateLimiter(p1),
|
||||
@@ -494,45 +520,48 @@ func TestStatusRPCRequest_FinalizedBlockExists(t *testing.T) {
|
||||
}
|
||||
totalSec := int64(params.BeaconConfig().SlotsPerEpoch.Mul(5 * params.BeaconConfig().SecondsPerSlot))
|
||||
genTime := time.Now().Unix() - totalSec
|
||||
chain := &mock.ChainService{
|
||||
State: genesisState,
|
||||
FinalizedCheckPoint: finalizedCheckpt,
|
||||
Root: headRoot[:],
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
Genesis: time.Unix(genTime, 0),
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
FinalizedRoots: map[[32]byte]bool{
|
||||
finalizedRoot: true,
|
||||
},
|
||||
}
|
||||
r := &Service{
|
||||
cfg: &config{
|
||||
p2p: p1,
|
||||
chain: &mock.ChainService{
|
||||
State: genesisState,
|
||||
FinalizedCheckPoint: finalizedCheckpt,
|
||||
Root: headRoot[:],
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
Genesis: time.Unix(genTime, 0),
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
FinalizedRoots: map[[32]byte]bool{
|
||||
finalizedRoot: true,
|
||||
},
|
||||
},
|
||||
p2p: p1,
|
||||
chain: chain,
|
||||
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
|
||||
},
|
||||
ctx: context.Background(),
|
||||
rateLimiter: newRateLimiter(p1),
|
||||
}
|
||||
|
||||
chain2 := &mock.ChainService{
|
||||
State: genesisState,
|
||||
FinalizedCheckPoint: finalizedCheckpt,
|
||||
Root: headRoot[:],
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
Genesis: time.Unix(genTime, 0),
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
FinalizedRoots: map[[32]byte]bool{
|
||||
finalizedRoot: true,
|
||||
},
|
||||
}
|
||||
r2 := &Service{
|
||||
cfg: &config{
|
||||
p2p: p1,
|
||||
chain: &mock.ChainService{
|
||||
State: genesisState,
|
||||
FinalizedCheckPoint: finalizedCheckpt,
|
||||
Root: headRoot[:],
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
Genesis: time.Unix(genTime, 0),
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
FinalizedRoots: map[[32]byte]bool{
|
||||
finalizedRoot: true,
|
||||
},
|
||||
},
|
||||
p2p: p1,
|
||||
chain: chain2,
|
||||
clock: startup.NewClock(chain2.Genesis, chain2.ValidatorsRoot),
|
||||
beaconDB: db,
|
||||
},
|
||||
ctx: context.Background(),
|
||||
@@ -674,48 +703,52 @@ func TestStatusRPCRequest_FinalizedBlockSkippedSlots(t *testing.T) {
|
||||
|
||||
epoch := expectedFinalizedEpoch.Add(2)
|
||||
totalSec := uint64(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epoch) * params.BeaconConfig().SecondsPerSlot))
|
||||
genTime := time.Now().Unix() - int64(totalSec)
|
||||
gt := time.Unix(time.Now().Unix()-int64(totalSec), 0)
|
||||
vr := [32]byte{'A'}
|
||||
chain := &mock.ChainService{
|
||||
State: nState,
|
||||
FinalizedCheckPoint: remoteFinalizedChkpt,
|
||||
Root: rHeadRoot[:],
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
Genesis: gt,
|
||||
ValidatorsRoot: vr,
|
||||
FinalizedRoots: map[[32]byte]bool{
|
||||
tt.expectedFinalizedRoot: true,
|
||||
tt.remoteFinalizedRoot: true,
|
||||
},
|
||||
}
|
||||
r := &Service{
|
||||
cfg: &config{
|
||||
p2p: p1,
|
||||
chain: &mock.ChainService{
|
||||
State: nState,
|
||||
FinalizedCheckPoint: remoteFinalizedChkpt,
|
||||
Root: rHeadRoot[:],
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
Genesis: time.Unix(genTime, 0),
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
FinalizedRoots: map[[32]byte]bool{
|
||||
tt.expectedFinalizedRoot: true,
|
||||
tt.remoteFinalizedRoot: true,
|
||||
},
|
||||
},
|
||||
p2p: p1,
|
||||
chain: chain,
|
||||
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
|
||||
},
|
||||
ctx: context.Background(),
|
||||
rateLimiter: newRateLimiter(p1),
|
||||
}
|
||||
|
||||
chain2 := &mock.ChainService{
|
||||
State: nState,
|
||||
FinalizedCheckPoint: finalizedCheckpt,
|
||||
Root: headRoot[:],
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
Genesis: gt,
|
||||
ValidatorsRoot: vr,
|
||||
FinalizedRoots: map[[32]byte]bool{
|
||||
tt.expectedFinalizedRoot: true,
|
||||
tt.remoteFinalizedRoot: true,
|
||||
},
|
||||
}
|
||||
r2 := &Service{
|
||||
cfg: &config{
|
||||
p2p: p2,
|
||||
chain: &mock.ChainService{
|
||||
State: nState,
|
||||
FinalizedCheckPoint: finalizedCheckpt,
|
||||
Root: headRoot[:],
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
Genesis: time.Unix(genTime, 0),
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
FinalizedRoots: map[[32]byte]bool{
|
||||
tt.expectedFinalizedRoot: true,
|
||||
tt.remoteFinalizedRoot: true,
|
||||
},
|
||||
},
|
||||
p2p: p2,
|
||||
chain: chain2,
|
||||
clock: startup.NewClock(chain2.Genesis, chain2.ValidatorsRoot),
|
||||
beaconDB: db,
|
||||
},
|
||||
|
||||
@@ -750,6 +783,10 @@ func TestStatusRPCRequest_FinalizedBlockSkippedSlots(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestStatusRPCRequest_BadPeerHandshake(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
|
||||
@@ -761,7 +798,7 @@ func TestStatusRPCRequest_BadPeerHandshake(t *testing.T) {
|
||||
finalized := util.NewBeaconBlock()
|
||||
finalizedRoot, err := finalized.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
genesisState, err := transition.GenesisBeaconState(context.Background(), nil, 0, ðpb.Eth1Data{})
|
||||
genesisState, err := transition.GenesisBeaconState(ctx, nil, 0, ðpb.Eth1Data{})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, genesisState.SetSlot(111))
|
||||
require.NoError(t, genesisState.UpdateBlockRootAtIndex(111%uint64(params.BeaconConfig().SlotsPerHistoricalRoot), headRoot))
|
||||
@@ -769,28 +806,32 @@ func TestStatusRPCRequest_BadPeerHandshake(t *testing.T) {
|
||||
Epoch: 5,
|
||||
Root: finalizedRoot[:],
|
||||
}
|
||||
chain := &mock.ChainService{
|
||||
State: genesisState,
|
||||
FinalizedCheckPoint: finalizedCheckpt,
|
||||
Root: headRoot[:],
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
Genesis: time.Now(),
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
}
|
||||
cw := startup.NewClockSynchronizer()
|
||||
|
||||
r := &Service{
|
||||
cfg: &config{
|
||||
p2p: p1,
|
||||
chain: &mock.ChainService{
|
||||
State: genesisState,
|
||||
FinalizedCheckPoint: finalizedCheckpt,
|
||||
Root: headRoot[:],
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
Genesis: time.Now(),
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
},
|
||||
p2p: p1,
|
||||
chain: chain,
|
||||
},
|
||||
|
||||
ctx: context.Background(),
|
||||
rateLimiter: newRateLimiter(p1),
|
||||
ctx: ctx,
|
||||
rateLimiter: newRateLimiter(p1),
|
||||
clockWaiter: cw,
|
||||
chainStarted: abool.New(),
|
||||
}
|
||||
|
||||
r.Start()
|
||||
go r.Start()
|
||||
|
||||
// Setup streams
|
||||
pcl := protocol.ID("/eth2/beacon_chain/req/status/1/ssz_snappy")
|
||||
@@ -816,6 +857,8 @@ func TestStatusRPCRequest_BadPeerHandshake(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
require.NoError(t, cw.SetClock(startup.NewClock(chain.Genesis, chain.ValidatorsRoot)))
|
||||
|
||||
assert.Equal(t, false, p1.Peers().Scorers().IsBadPeer(p2.PeerID()), "Peer is marked as bad")
|
||||
p1.Connect(p2)
|
||||
|
||||
@@ -850,19 +893,21 @@ func TestStatusRPC_ValidGenesisMessage(t *testing.T) {
|
||||
Epoch: 5,
|
||||
Root: finalizedRoot[:],
|
||||
}
|
||||
chain := &mock.ChainService{
|
||||
State: genesisState,
|
||||
FinalizedCheckPoint: finalizedCheckpt,
|
||||
Root: headRoot[:],
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
Genesis: time.Now(),
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
}
|
||||
r := &Service{
|
||||
cfg: &config{
|
||||
chain: &mock.ChainService{
|
||||
State: genesisState,
|
||||
FinalizedCheckPoint: finalizedCheckpt,
|
||||
Root: headRoot[:],
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
Genesis: time.Now(),
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
},
|
||||
chain: chain,
|
||||
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
|
||||
},
|
||||
ctx: context.Background(),
|
||||
}
|
||||
@@ -932,12 +977,14 @@ func TestShouldResync(t *testing.T) {
|
||||
headState, err := transition.GenesisBeaconState(context.Background(), nil, 0, ðpb.Eth1Data{})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, headState.SetSlot(tt.args.headSlot))
|
||||
chain := &mock.ChainService{
|
||||
State: headState,
|
||||
Genesis: tt.args.genesis,
|
||||
}
|
||||
r := &Service{
|
||||
cfg: &config{
|
||||
chain: &mock.ChainService{
|
||||
State: headState,
|
||||
Genesis: tt.args.genesis,
|
||||
},
|
||||
chain: chain,
|
||||
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
|
||||
initialSync: &mockSync.Sync{IsSyncing: tt.args.syncing},
|
||||
},
|
||||
ctx: context.Background(),
|
||||
|
||||
@@ -19,10 +19,8 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/async/abool"
|
||||
"github.com/prysmaticlabs/prysm/v4/async/event"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
|
||||
blockfeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/block"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/operation"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/execution"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations"
|
||||
@@ -31,6 +29,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/synccommittee"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
|
||||
lruwrpr "github.com/prysmaticlabs/prysm/v4/cache/lru"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
@@ -79,13 +78,13 @@ type config struct {
|
||||
blsToExecPool blstoexec.PoolManager
|
||||
chain blockchainService
|
||||
initialSync Checker
|
||||
stateNotifier statefeed.Notifier
|
||||
blockNotifier blockfeed.Notifier
|
||||
operationNotifier operation.Notifier
|
||||
executionPayloadReconstructor execution.ExecutionPayloadReconstructor
|
||||
stateGen *stategen.State
|
||||
slasherAttestationsFeed *event.Feed
|
||||
slasherBlockHeadersFeed *event.Feed
|
||||
clock *startup.Clock
|
||||
}
|
||||
|
||||
// This defines the interface for interacting with block chain service
|
||||
@@ -139,6 +138,8 @@ type Service struct {
|
||||
syncContributionBitsOverlapLock sync.RWMutex
|
||||
syncContributionBitsOverlapCache *lru.Cache
|
||||
signatureChan chan *signatureVerifier
|
||||
clockWaiter startup.ClockWaiter
|
||||
initialSyncComplete chan struct{}
|
||||
}
|
||||
|
||||
// NewService initializes new regular sync service.
|
||||
@@ -164,14 +165,14 @@ func NewService(ctx context.Context, opts ...Option) *Service {
|
||||
r.rateLimiter = newRateLimiter(r.cfg.p2p)
|
||||
r.initCaches()
|
||||
|
||||
go r.registerHandlers()
|
||||
go r.verifierRoutine()
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
// Start the regular sync service.
|
||||
func (s *Service) Start() {
|
||||
go s.verifierRoutine()
|
||||
go s.registerHandlers()
|
||||
|
||||
s.cfg.p2p.AddConnectionHandler(s.reValidatePeer, s.sendGoodbye)
|
||||
s.cfg.p2p.AddDisconnectionHandler(func(_ context.Context, _ peer.ID) error {
|
||||
// no-op
|
||||
@@ -210,7 +211,7 @@ func (s *Service) Stop() error {
|
||||
func (s *Service) Status() error {
|
||||
// If our head slot is on a previous epoch and our peers are reporting their head block are
|
||||
// in the most recent epoch, then we might be out of sync.
|
||||
if headEpoch := slots.ToEpoch(s.cfg.chain.HeadSlot()); headEpoch+1 < slots.ToEpoch(s.cfg.chain.CurrentSlot()) &&
|
||||
if headEpoch := slots.ToEpoch(s.cfg.chain.HeadSlot()); headEpoch+1 < slots.ToEpoch(s.cfg.clock.CurrentSlot()) &&
|
||||
headEpoch+1 < s.cfg.p2p.Peers().HighestEpoch() {
|
||||
return errors.New("out of sync")
|
||||
}
|
||||
@@ -232,58 +233,42 @@ func (s *Service) initCaches() {
|
||||
s.badBlockCache = lruwrpr.New(badBlockSize)
|
||||
}
|
||||
|
||||
func (s *Service) registerHandlers() {
|
||||
// Wait until chain start.
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := s.cfg.stateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
defer stateSub.Unsubscribe()
|
||||
for {
|
||||
select {
|
||||
case e := <-stateChannel:
|
||||
switch e.Type {
|
||||
case statefeed.Initialized:
|
||||
data, ok := e.Data.(*statefeed.InitializedData)
|
||||
if !ok {
|
||||
log.Error("Event feed data is not type *statefeed.InitializedData")
|
||||
return
|
||||
}
|
||||
startTime := data.StartTime
|
||||
log.WithField("starttime", startTime).Debug("Received state initialized event")
|
||||
func (s *Service) waitForChainStart() {
|
||||
clock, err := s.clockWaiter.WaitForClock(s.ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("sync service failed to receive genesis data")
|
||||
return
|
||||
}
|
||||
s.cfg.clock = clock
|
||||
startTime := clock.GenesisTime()
|
||||
log.WithField("starttime", startTime).Debug("Received state initialized event")
|
||||
// Register respective rpc handlers at state initialized event.
|
||||
s.registerRPCHandlers()
|
||||
// Wait for chainstart in separate routine.
|
||||
if startTime.After(prysmTime.Now()) {
|
||||
time.Sleep(prysmTime.Until(startTime))
|
||||
}
|
||||
log.WithField("starttime", startTime).Debug("Chain started in sync service")
|
||||
s.markForChainStart()
|
||||
}
|
||||
|
||||
// Register respective rpc handlers at state initialized event.
|
||||
s.registerRPCHandlers()
|
||||
// Wait for chainstart in separate routine.
|
||||
go func() {
|
||||
if startTime.After(prysmTime.Now()) {
|
||||
time.Sleep(prysmTime.Until(startTime))
|
||||
}
|
||||
log.WithField("starttime", startTime).Debug("Chain started in sync service")
|
||||
s.markForChainStart()
|
||||
}()
|
||||
case statefeed.Synced:
|
||||
_, ok := e.Data.(*statefeed.SyncedData)
|
||||
if !ok {
|
||||
log.Error("Event feed data is not type *statefeed.SyncedData")
|
||||
return
|
||||
}
|
||||
// Register respective pubsub handlers at state synced event.
|
||||
digest, err := s.currentForkDigest()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not retrieve current fork digest")
|
||||
return
|
||||
}
|
||||
currentEpoch := slots.ToEpoch(slots.CurrentSlot(uint64(s.cfg.chain.GenesisTime().Unix())))
|
||||
s.registerSubscribers(currentEpoch, digest)
|
||||
go s.forkWatcher()
|
||||
return
|
||||
}
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Context closed, exiting goroutine")
|
||||
return
|
||||
case err := <-stateSub.Err():
|
||||
log.WithError(err).Error("Could not subscribe to state notifier")
|
||||
func (s *Service) registerHandlers() {
|
||||
s.waitForChainStart()
|
||||
select {
|
||||
case <-s.initialSyncComplete:
|
||||
// Register respective pubsub handlers at state synced event.
|
||||
digest, err := s.currentForkDigest()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not retrieve current fork digest")
|
||||
return
|
||||
}
|
||||
currentEpoch := slots.ToEpoch(slots.CurrentSlot(uint64(s.cfg.clock.GenesisTime().Unix())))
|
||||
s.registerSubscribers(currentEpoch, digest)
|
||||
go s.forkWatcher()
|
||||
return
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Context closed, exiting goroutine")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -292,6 +277,10 @@ func (s *Service) markForChainStart() {
|
||||
s.chainStarted.Set()
|
||||
}
|
||||
|
||||
func (s *Service) chainIsStarted() bool {
|
||||
return s.chainStarted.IsSet()
|
||||
}
|
||||
|
||||
// Checker defines a struct which can verify whether a node is currently
|
||||
// synchronizing a chain with the rest of peers in the network.
|
||||
type Checker interface {
|
||||
|
||||
@@ -9,9 +9,9 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/async/abool"
|
||||
mockChain "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
|
||||
dbTest "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
|
||||
p2ptest "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
|
||||
mockSync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/initial-sync/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
|
||||
@@ -25,14 +25,16 @@ import (
|
||||
func TestService_StatusZeroEpoch(t *testing.T) {
|
||||
bState, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{Slot: 0})
|
||||
require.NoError(t, err)
|
||||
chain := &mockChain.ChainService{
|
||||
Genesis: time.Now(),
|
||||
State: bState,
|
||||
}
|
||||
r := &Service{
|
||||
cfg: &config{
|
||||
p2p: p2ptest.NewTestP2P(t),
|
||||
initialSync: new(mockSync.Sync),
|
||||
chain: &mockChain.ChainService{
|
||||
Genesis: time.Now(),
|
||||
State: bState,
|
||||
},
|
||||
chain: chain,
|
||||
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
}
|
||||
@@ -47,29 +49,25 @@ func TestSyncHandlers_WaitToSync(t *testing.T) {
|
||||
Genesis: time.Now(),
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
}
|
||||
gs := startup.NewClockSynchronizer()
|
||||
r := Service{
|
||||
ctx: context.Background(),
|
||||
cfg: &config{
|
||||
p2p: p2p,
|
||||
chain: chainService,
|
||||
stateNotifier: chainService.StateNotifier(),
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
p2p: p2p,
|
||||
chain: chainService,
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
clockWaiter: gs,
|
||||
}
|
||||
|
||||
topic := "/eth2/%x/beacon_block"
|
||||
go r.registerHandlers()
|
||||
go r.waitForChainStart()
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
i := r.cfg.stateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.Initialized,
|
||||
Data: &statefeed.InitializedData{
|
||||
StartTime: time.Now(),
|
||||
},
|
||||
})
|
||||
if i == 0 {
|
||||
t.Fatal("didn't send genesis time to subscribers")
|
||||
}
|
||||
|
||||
var vr [32]byte
|
||||
require.NoError(t, gs.SetClock(startup.NewClock(time.Now(), vr)))
|
||||
b := []byte("sk")
|
||||
b32 := bytesutil.ToBytes32(b)
|
||||
sk, err := bls.SecretKeyFromBytes(b32[:])
|
||||
@@ -90,33 +88,24 @@ func TestSyncHandlers_WaitForChainStart(t *testing.T) {
|
||||
Genesis: time.Now(),
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
}
|
||||
gs := startup.NewClockSynchronizer()
|
||||
r := Service{
|
||||
ctx: context.Background(),
|
||||
cfg: &config{
|
||||
p2p: p2p,
|
||||
chain: chainService,
|
||||
stateNotifier: chainService.StateNotifier(),
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
p2p: p2p,
|
||||
chain: chainService,
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
slotToPendingBlocks: gcache.New(time.Second, 2*time.Second),
|
||||
clockWaiter: gs,
|
||||
}
|
||||
|
||||
go r.registerHandlers()
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
i := r.cfg.stateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.Initialized,
|
||||
Data: &statefeed.InitializedData{
|
||||
StartTime: time.Now().Add(2 * time.Second),
|
||||
},
|
||||
})
|
||||
if i == 0 {
|
||||
t.Fatal("didn't send genesis time to subscribers")
|
||||
}
|
||||
require.Equal(t, false, r.chainStarted.IsSet(), "Chainstart was marked prematurely")
|
||||
var vr [32]byte
|
||||
require.NoError(t, gs.SetClock(startup.NewClock(time.Now(), vr)))
|
||||
r.waitForChainStart()
|
||||
|
||||
// wait for chainstart to be sent
|
||||
time.Sleep(3 * time.Second)
|
||||
require.Equal(t, true, r.chainStarted.IsSet(), "Did not receive chain start event.")
|
||||
}
|
||||
|
||||
@@ -128,18 +117,20 @@ func TestSyncHandlers_WaitTillSynced(t *testing.T) {
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
gs := startup.NewClockSynchronizer()
|
||||
r := Service{
|
||||
ctx: ctx,
|
||||
cfg: &config{
|
||||
p2p: p2p,
|
||||
beaconDB: dbTest.SetupDB(t),
|
||||
chain: chainService,
|
||||
stateNotifier: chainService.StateNotifier(),
|
||||
blockNotifier: chainService.BlockNotifier(),
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
clockWaiter: gs,
|
||||
initialSyncComplete: make(chan struct{}),
|
||||
}
|
||||
r.initCaches()
|
||||
|
||||
@@ -148,19 +139,9 @@ func TestSyncHandlers_WaitTillSynced(t *testing.T) {
|
||||
r.registerHandlers()
|
||||
syncCompleteCh <- true
|
||||
}()
|
||||
for i := 0; i == 0; {
|
||||
assert.NoError(t, ctx.Err())
|
||||
i = r.cfg.stateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.Initialized,
|
||||
Data: &statefeed.InitializedData{
|
||||
StartTime: time.Now(),
|
||||
},
|
||||
})
|
||||
}
|
||||
for !r.chainStarted.IsSet() {
|
||||
assert.NoError(t, ctx.Err())
|
||||
time.Sleep(time.Millisecond)
|
||||
}
|
||||
var vr [32]byte
|
||||
require.NoError(t, gs.SetClock(startup.NewClock(time.Now(), vr)))
|
||||
r.waitForChainStart()
|
||||
require.Equal(t, true, r.chainStarted.IsSet(), "Did not receive chain start event.")
|
||||
|
||||
blockChan := make(chan *feed.Event, 1)
|
||||
@@ -184,15 +165,7 @@ func TestSyncHandlers_WaitTillSynced(t *testing.T) {
|
||||
p2p.ReceivePubSub(topic, msg)
|
||||
assert.Equal(t, 0, len(blockChan), "block was received by sync service despite not being fully synced")
|
||||
|
||||
for i := 0; i == 0; {
|
||||
assert.NoError(t, ctx.Err())
|
||||
i = r.cfg.stateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.Synced,
|
||||
Data: &statefeed.SyncedData{
|
||||
StartTime: time.Now(),
|
||||
},
|
||||
})
|
||||
}
|
||||
close(r.initialSyncComplete)
|
||||
<-syncCompleteCh
|
||||
|
||||
p2p.ReceivePubSub(topic, msg)
|
||||
@@ -211,30 +184,25 @@ func TestSyncService_StopCleanly(t *testing.T) {
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
gs := startup.NewClockSynchronizer()
|
||||
r := Service{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
cfg: &config{
|
||||
p2p: p2p,
|
||||
chain: chainService,
|
||||
stateNotifier: chainService.StateNotifier(),
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
p2p: p2p,
|
||||
chain: chainService,
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
clockWaiter: gs,
|
||||
initialSyncComplete: make(chan struct{}),
|
||||
}
|
||||
|
||||
go r.registerHandlers()
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
i := r.cfg.stateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.Initialized,
|
||||
Data: &statefeed.InitializedData{
|
||||
StartTime: time.Now(),
|
||||
},
|
||||
})
|
||||
if i == 0 {
|
||||
t.Fatal("didn't send genesis time to subscribers")
|
||||
}
|
||||
var vr [32]byte
|
||||
require.NoError(t, gs.SetClock(startup.NewClock(time.Now(), vr)))
|
||||
r.waitForChainStart()
|
||||
|
||||
var err error
|
||||
p2p.Digest, err = r.currentForkDigest()
|
||||
@@ -244,16 +212,7 @@ func TestSyncService_StopCleanly(t *testing.T) {
|
||||
time.Sleep(2 * time.Second)
|
||||
require.Equal(t, true, r.chainStarted.IsSet(), "Did not receive chain start event.")
|
||||
|
||||
i = r.cfg.stateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.Synced,
|
||||
Data: &statefeed.SyncedData{
|
||||
StartTime: time.Now(),
|
||||
},
|
||||
})
|
||||
if i == 0 {
|
||||
t.Fatal("didn't send genesis time to sync event subscribers")
|
||||
}
|
||||
|
||||
close(r.initialSyncComplete)
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
require.NotEqual(t, 0, len(r.cfg.p2p.PubSub().GetTopics()))
|
||||
|
||||
@@ -135,7 +135,7 @@ func (s *Service) registerSubscribers(epoch primitives.Epoch, digest [4]byte) {
|
||||
// subscribe to a given topic with a given validator and subscription handler.
|
||||
// The base protobuf message is used to initialize new messages for decoding.
|
||||
func (s *Service) subscribe(topic string, validator wrappedVal, handle subHandler, digest [4]byte) *pubsub.Subscription {
|
||||
genRoot := s.cfg.chain.GenesisValidatorsRoot()
|
||||
genRoot := s.cfg.clock.GenesisValidatorsRoot()
|
||||
_, e, err := forks.RetrieveForkDataFromDigest(digest, genRoot[:])
|
||||
if err != nil {
|
||||
// Impossible condition as it would mean digest does not exist.
|
||||
@@ -301,7 +301,7 @@ func (s *Service) wrapAndReportValidation(topic string, v wrappedVal) (string, p
|
||||
// subscribe to a static subnet with the given topic and index.A given validator and subscription handler is
|
||||
// used to handle messages from the subnet. The base protobuf message is used to initialize new messages for decoding.
|
||||
func (s *Service) subscribeStaticWithSubnets(topic string, validator wrappedVal, handle subHandler, digest [4]byte) {
|
||||
genRoot := s.cfg.chain.GenesisValidatorsRoot()
|
||||
genRoot := s.cfg.clock.GenesisValidatorsRoot()
|
||||
_, e, err := forks.RetrieveForkDataFromDigest(digest, genRoot[:])
|
||||
if err != nil {
|
||||
// Impossible condition as it would mean digest does not exist.
|
||||
@@ -315,7 +315,7 @@ func (s *Service) subscribeStaticWithSubnets(topic string, validator wrappedVal,
|
||||
for i := uint64(0); i < params.BeaconNetworkConfig().AttestationSubnetCount; i++ {
|
||||
s.subscribeWithBase(s.addDigestAndIndexToTopic(topic, digest, i), validator, handle)
|
||||
}
|
||||
genesis := s.cfg.chain.GenesisTime()
|
||||
genesis := s.cfg.clock.GenesisTime()
|
||||
ticker := slots.NewSlotTicker(genesis, params.BeaconConfig().SecondsPerSlot)
|
||||
|
||||
go func() {
|
||||
@@ -374,7 +374,7 @@ func (s *Service) subscribeDynamicWithSubnets(
|
||||
handle subHandler,
|
||||
digest [4]byte,
|
||||
) {
|
||||
genRoot := s.cfg.chain.GenesisValidatorsRoot()
|
||||
genRoot := s.cfg.clock.GenesisValidatorsRoot()
|
||||
_, e, err := forks.RetrieveForkDataFromDigest(digest, genRoot[:])
|
||||
if err != nil {
|
||||
// Impossible condition as it would mean digest does not exist.
|
||||
@@ -385,7 +385,7 @@ func (s *Service) subscribeDynamicWithSubnets(
|
||||
panic(fmt.Sprintf("%s is not mapped to any message in GossipTopicMappings", topicFormat))
|
||||
}
|
||||
subscriptions := make(map[uint64]*pubsub.Subscription, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
genesis := s.cfg.chain.GenesisTime()
|
||||
genesis := s.cfg.clock.GenesisTime()
|
||||
ticker := slots.NewSlotTicker(genesis, params.BeaconConfig().SecondsPerSlot)
|
||||
|
||||
go func() {
|
||||
@@ -503,7 +503,7 @@ func (s *Service) subscribeSyncSubnet(
|
||||
// subscribe to a static subnet with the given topic and index. A given validator and subscription handler is
|
||||
// used to handle messages from the subnet. The base protobuf message is used to initialize new messages for decoding.
|
||||
func (s *Service) subscribeStaticWithSyncSubnets(topic string, validator wrappedVal, handle subHandler, digest [4]byte) {
|
||||
genRoot := s.cfg.chain.GenesisValidatorsRoot()
|
||||
genRoot := s.cfg.clock.GenesisValidatorsRoot()
|
||||
_, e, err := forks.RetrieveForkDataFromDigest(digest, genRoot[:])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@@ -515,7 +515,7 @@ func (s *Service) subscribeStaticWithSyncSubnets(topic string, validator wrapped
|
||||
for i := uint64(0); i < params.BeaconConfig().SyncCommitteeSubnetCount; i++ {
|
||||
s.subscribeWithBase(s.addDigestAndIndexToTopic(topic, digest, i), validator, handle)
|
||||
}
|
||||
genesis := s.cfg.chain.GenesisTime()
|
||||
genesis := s.cfg.clock.GenesisTime()
|
||||
ticker := slots.NewSlotTicker(genesis, params.BeaconConfig().SecondsPerSlot)
|
||||
|
||||
go func() {
|
||||
@@ -574,7 +574,7 @@ func (s *Service) subscribeDynamicWithSyncSubnets(
|
||||
handle subHandler,
|
||||
digest [4]byte,
|
||||
) {
|
||||
genRoot := s.cfg.chain.GenesisValidatorsRoot()
|
||||
genRoot := s.cfg.clock.GenesisValidatorsRoot()
|
||||
_, e, err := forks.RetrieveForkDataFromDigest(digest, genRoot[:])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@@ -584,7 +584,7 @@ func (s *Service) subscribeDynamicWithSyncSubnets(
|
||||
panic(fmt.Sprintf("%s is not mapped to any message in GossipTopicMappings", topicFormat))
|
||||
}
|
||||
subscriptions := make(map[uint64]*pubsub.Subscription, params.BeaconConfig().SyncCommitteeSubnetCount)
|
||||
genesis := s.cfg.chain.GenesisTime()
|
||||
genesis := s.cfg.clock.GenesisTime()
|
||||
ticker := slots.NewSlotTicker(genesis, params.BeaconConfig().SecondsPerSlot)
|
||||
|
||||
go func() {
|
||||
@@ -686,7 +686,7 @@ func (s *Service) filterNeededPeers(pids []peer.ID) []peer.ID {
|
||||
log.WithError(err).Error("Could not compute fork digest")
|
||||
return pids
|
||||
}
|
||||
currSlot := s.cfg.chain.CurrentSlot()
|
||||
currSlot := s.cfg.clock.CurrentSlot()
|
||||
wantedSubs := s.retrievePersistentSubs(currSlot)
|
||||
wantedSubs = slice.SetUint64(append(wantedSubs, s.attesterSubnetIndices(currSlot)...))
|
||||
topic := p2p.GossipTypeMapping[reflect.TypeOf(ðpb.Attestation{})]
|
||||
@@ -740,8 +740,8 @@ func (_ *Service) addDigestAndIndexToTopic(topic string, digest [4]byte, idx uin
|
||||
}
|
||||
|
||||
func (s *Service) currentForkDigest() ([4]byte, error) {
|
||||
genRoot := s.cfg.chain.GenesisValidatorsRoot()
|
||||
return forks.CreateForkDigest(s.cfg.chain.GenesisTime(), genRoot[:])
|
||||
genRoot := s.cfg.clock.GenesisValidatorsRoot()
|
||||
return forks.CreateForkDigest(s.cfg.clock.GenesisTime(), genRoot[:])
|
||||
}
|
||||
|
||||
// Checks if the provided digest matches up with the current supposed digest.
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/encoder"
|
||||
p2ptest "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
mockSync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/initial-sync/testing"
|
||||
lruwrpr "github.com/prysmaticlabs/prysm/v4/cache/lru"
|
||||
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags"
|
||||
@@ -39,15 +40,18 @@ import (
|
||||
|
||||
func TestSubscribe_ReceivesValidMessage(t *testing.T) {
|
||||
p2pService := p2ptest.NewTestP2P(t)
|
||||
gt := time.Now()
|
||||
vr := [32]byte{'A'}
|
||||
r := Service{
|
||||
ctx: context.Background(),
|
||||
cfg: &config{
|
||||
p2p: p2pService,
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
chain: &mockChain.ChainService{
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
Genesis: time.Now(),
|
||||
ValidatorsRoot: vr,
|
||||
Genesis: gt,
|
||||
},
|
||||
clock: startup.NewClock(gt, vr),
|
||||
},
|
||||
subHandler: newSubTopicHandler(),
|
||||
chainStarted: abool.New(),
|
||||
@@ -79,15 +83,18 @@ func TestSubscribe_ReceivesValidMessage(t *testing.T) {
|
||||
|
||||
func TestSubscribe_UnsubscribeTopic(t *testing.T) {
|
||||
p2pService := p2ptest.NewTestP2P(t)
|
||||
gt := time.Now()
|
||||
vr := [32]byte{'A'}
|
||||
r := Service{
|
||||
ctx: context.Background(),
|
||||
cfg: &config{
|
||||
p2p: p2pService,
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
chain: &mockChain.ChainService{
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
Genesis: time.Now(),
|
||||
ValidatorsRoot: vr,
|
||||
Genesis: gt,
|
||||
},
|
||||
clock: startup.NewClock(gt, vr),
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
@@ -123,9 +130,11 @@ func TestSubscribe_ReceivesAttesterSlashing(t *testing.T) {
|
||||
p2pService := p2ptest.NewTestP2P(t)
|
||||
ctx := context.Background()
|
||||
d := db.SetupDB(t)
|
||||
gt := time.Now()
|
||||
vr := [32]byte{'A'}
|
||||
chainService := &mockChain.ChainService{
|
||||
Genesis: time.Now(),
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
Genesis: gt,
|
||||
ValidatorsRoot: vr,
|
||||
}
|
||||
r := Service{
|
||||
ctx: ctx,
|
||||
@@ -134,6 +143,7 @@ func TestSubscribe_ReceivesAttesterSlashing(t *testing.T) {
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
slashingPool: slashings.NewPool(),
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(gt, vr),
|
||||
beaconDB: d,
|
||||
},
|
||||
seenAttesterSlashingCache: make(map[uint64]bool),
|
||||
@@ -187,6 +197,7 @@ func TestSubscribe_ReceivesProposerSlashing(t *testing.T) {
|
||||
slashingPool: slashings.NewPool(),
|
||||
chain: chainService,
|
||||
beaconDB: d,
|
||||
clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot),
|
||||
},
|
||||
seenProposerSlashingCache: lruwrpr.New(10),
|
||||
chainStarted: abool.New(),
|
||||
@@ -226,14 +237,16 @@ func TestSubscribe_ReceivesProposerSlashing(t *testing.T) {
|
||||
|
||||
func TestSubscribe_HandlesPanic(t *testing.T) {
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
chain := &mockChain.ChainService{
|
||||
Genesis: time.Now(),
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
}
|
||||
r := Service{
|
||||
ctx: context.Background(),
|
||||
cfg: &config{
|
||||
chain: &mockChain.ChainService{
|
||||
Genesis: time.Now(),
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
},
|
||||
p2p: p,
|
||||
chain: chain,
|
||||
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
|
||||
p2p: p,
|
||||
},
|
||||
subHandler: newSubTopicHandler(),
|
||||
chainStarted: abool.New(),
|
||||
@@ -261,14 +274,16 @@ func TestSubscribe_HandlesPanic(t *testing.T) {
|
||||
func TestRevalidateSubscription_CorrectlyFormatsTopic(t *testing.T) {
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
hook := logTest.NewGlobal()
|
||||
chain := &mockChain.ChainService{
|
||||
Genesis: time.Now(),
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
}
|
||||
r := Service{
|
||||
ctx: context.Background(),
|
||||
cfg: &config{
|
||||
chain: &mockChain.ChainService{
|
||||
Genesis: time.Now(),
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
},
|
||||
p2p: p,
|
||||
chain: chain,
|
||||
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
|
||||
p2p: p,
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
@@ -300,14 +315,16 @@ func TestRevalidateSubscription_CorrectlyFormatsTopic(t *testing.T) {
|
||||
func TestStaticSubnets(t *testing.T) {
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
chain := &mockChain.ChainService{
|
||||
Genesis: time.Now(),
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
}
|
||||
r := Service{
|
||||
ctx: ctx,
|
||||
cfg: &config{
|
||||
chain: &mockChain.ChainService{
|
||||
Genesis: time.Now(),
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
},
|
||||
p2p: p,
|
||||
chain: chain,
|
||||
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
|
||||
p2p: p,
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
@@ -427,6 +444,7 @@ func Test_wrapAndReportValidation(t *testing.T) {
|
||||
chainStarted: chainStarted,
|
||||
cfg: &config{
|
||||
chain: mChain,
|
||||
clock: startup.NewClock(mChain.Genesis, mChain.ValidatorsRoot),
|
||||
},
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
@@ -452,19 +470,28 @@ func TestFilterSubnetPeers(t *testing.T) {
|
||||
defer flags.Init(new(flags.GlobalFlags))
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
currSlot := primitives.Slot(100)
|
||||
|
||||
gt := time.Now()
|
||||
genPlus100 := func() time.Time {
|
||||
return gt.Add(time.Second * time.Duration(uint64(currSlot)*params.BeaconConfig().SecondsPerSlot))
|
||||
}
|
||||
chain := &mockChain.ChainService{
|
||||
Genesis: gt,
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
FinalizedRoots: map[[32]byte]bool{
|
||||
{}: true,
|
||||
},
|
||||
}
|
||||
clock := startup.NewClock(chain.Genesis, chain.ValidatorsRoot, startup.WithNower(genPlus100))
|
||||
require.Equal(t, currSlot, clock.CurrentSlot())
|
||||
r := Service{
|
||||
ctx: ctx,
|
||||
cfg: &config{
|
||||
chain: &mockChain.ChainService{
|
||||
Genesis: time.Now(),
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
Slot: &currSlot,
|
||||
FinalizedRoots: map[[32]byte]bool{
|
||||
{}: true,
|
||||
},
|
||||
},
|
||||
p2p: p,
|
||||
chain: chain,
|
||||
clock: clock,
|
||||
p2p: p,
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
@@ -509,9 +536,7 @@ func TestFilterSubnetPeers(t *testing.T) {
|
||||
}
|
||||
|
||||
recPeers = r.filterNeededPeers(wantedPeers)
|
||||
assert.DeepEqual(t, 1, len(recPeers), "expected at least 1 suitable peer to prune")
|
||||
|
||||
cancel()
|
||||
assert.Equal(t, 1, len(recPeers), "expected at least 1 suitable peer to prune")
|
||||
}
|
||||
|
||||
func TestSubscribeWithSyncSubnets_StaticOK(t *testing.T) {
|
||||
@@ -522,16 +547,16 @@ func TestSubscribeWithSyncSubnets_StaticOK(t *testing.T) {
|
||||
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
currSlot := primitives.Slot(100)
|
||||
chain := &mockChain.ChainService{
|
||||
Genesis: time.Now(),
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
}
|
||||
r := Service{
|
||||
ctx: ctx,
|
||||
cfg: &config{
|
||||
chain: &mockChain.ChainService{
|
||||
Genesis: time.Now(),
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
Slot: &currSlot,
|
||||
},
|
||||
p2p: p,
|
||||
chain: chain,
|
||||
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
|
||||
p2p: p,
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
@@ -553,23 +578,24 @@ func TestSubscribeWithSyncSubnets_DynamicOK(t *testing.T) {
|
||||
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
currSlot := primitives.Slot(100)
|
||||
gt := time.Now()
|
||||
vr := [32]byte{'A'}
|
||||
r := Service{
|
||||
ctx: ctx,
|
||||
cfg: &config{
|
||||
chain: &mockChain.ChainService{
|
||||
Genesis: time.Now(),
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
Slot: &currSlot,
|
||||
Genesis: gt,
|
||||
ValidatorsRoot: vr,
|
||||
},
|
||||
p2p: p,
|
||||
p2p: p,
|
||||
clock: startup.NewClock(gt, vr),
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
// Empty cache at the end of the test.
|
||||
defer cache.SyncSubnetIDs.EmptyAllCaches()
|
||||
slot := r.cfg.chain.CurrentSlot()
|
||||
slot := r.cfg.clock.CurrentSlot()
|
||||
currEpoch := slots.ToEpoch(slot)
|
||||
cache.SyncSubnetIDs.AddSyncCommitteeSubnets([]byte("pubkey"), currEpoch, []uint64{0, 1}, 10*time.Second)
|
||||
digest, err := r.currentForkDigest()
|
||||
@@ -599,22 +625,24 @@ func TestSubscribeWithSyncSubnets_StaticSwitchFork(t *testing.T) {
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
currSlot := primitives.Slot(100)
|
||||
chain := &mockChain.ChainService{
|
||||
Genesis: time.Now().Add(-time.Duration(uint64(params.BeaconConfig().SlotsPerEpoch)*params.BeaconConfig().SecondsPerSlot) * time.Second),
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
Slot: &currSlot,
|
||||
}
|
||||
r := Service{
|
||||
ctx: ctx,
|
||||
cfg: &config{
|
||||
chain: &mockChain.ChainService{
|
||||
Genesis: time.Now().Add(-time.Duration(uint64(params.BeaconConfig().SlotsPerEpoch)*params.BeaconConfig().SecondsPerSlot) * time.Second),
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
Slot: &currSlot,
|
||||
},
|
||||
p2p: p,
|
||||
chain: chain,
|
||||
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
|
||||
p2p: p,
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
// Empty cache at the end of the test.
|
||||
defer cache.SyncSubnetIDs.EmptyAllCaches()
|
||||
genRoot := r.cfg.chain.GenesisValidatorsRoot()
|
||||
genRoot := r.cfg.clock.GenesisValidatorsRoot()
|
||||
digest, err := signing.ComputeForkDigest(params.BeaconConfig().GenesisForkVersion, genRoot[:])
|
||||
assert.NoError(t, err)
|
||||
r.subscribeStaticWithSyncSubnets(p2p.SyncCommitteeSubnetTopicFormat, nil, nil, digest)
|
||||
@@ -638,15 +666,18 @@ func TestSubscribeWithSyncSubnets_DynamicSwitchFork(t *testing.T) {
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
currSlot := primitives.Slot(100)
|
||||
gt := time.Now().Add(-time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)
|
||||
vr := [32]byte{'A'}
|
||||
r := Service{
|
||||
ctx: ctx,
|
||||
cfg: &config{
|
||||
chain: &mockChain.ChainService{
|
||||
Genesis: time.Now().Add(-time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second),
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
Genesis: gt,
|
||||
ValidatorsRoot: vr,
|
||||
Slot: &currSlot,
|
||||
},
|
||||
p2p: p,
|
||||
clock: startup.NewClock(gt, vr),
|
||||
p2p: p,
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
@@ -654,7 +685,7 @@ func TestSubscribeWithSyncSubnets_DynamicSwitchFork(t *testing.T) {
|
||||
// Empty cache at the end of the test.
|
||||
defer cache.SyncSubnetIDs.EmptyAllCaches()
|
||||
cache.SyncSubnetIDs.AddSyncCommitteeSubnets([]byte("pubkey"), 0, []uint64{0, 1}, 10*time.Second)
|
||||
genRoot := r.cfg.chain.GenesisValidatorsRoot()
|
||||
genRoot := r.cfg.clock.GenesisValidatorsRoot()
|
||||
digest, err := signing.ComputeForkDigest(params.BeaconConfig().GenesisForkVersion, genRoot[:])
|
||||
assert.NoError(t, err)
|
||||
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
|
||||
p2ptest "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
|
||||
mockSync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/initial-sync/testing"
|
||||
lruwrpr "github.com/prysmaticlabs/prysm/v4/cache/lru"
|
||||
@@ -67,6 +68,7 @@ func FuzzValidateBeaconBlockPubSub_Phase0(f *testing.F) {
|
||||
p2p: p,
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot),
|
||||
blockNotifier: chainService.BlockNotifier(),
|
||||
stateGen: stateGen,
|
||||
},
|
||||
@@ -150,6 +152,7 @@ func FuzzValidateBeaconBlockPubSub_Altair(f *testing.F) {
|
||||
chain: chainService,
|
||||
blockNotifier: chainService.BlockNotifier(),
|
||||
stateGen: stateGen,
|
||||
clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot),
|
||||
},
|
||||
seenBlockCache: lruwrpr.New(10),
|
||||
badBlockCache: lruwrpr.New(10),
|
||||
@@ -229,6 +232,7 @@ func FuzzValidateBeaconBlockPubSub_Bellatrix(f *testing.F) {
|
||||
p2p: p,
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot),
|
||||
blockNotifier: chainService.BlockNotifier(),
|
||||
stateGen: stateGen,
|
||||
},
|
||||
|
||||
@@ -80,7 +80,7 @@ func (s *Service) validateAggregateAndProof(ctx context.Context, pid peer.ID, ms
|
||||
// processing tolerance.
|
||||
if err := helpers.ValidateAttestationTime(
|
||||
m.Message.Aggregate.Data.Slot,
|
||||
s.cfg.chain.GenesisTime(),
|
||||
s.cfg.clock.GenesisTime(),
|
||||
earlyAttestationProcessingTolerance,
|
||||
); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
|
||||
p2ptest "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
mockSync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/initial-sync/testing"
|
||||
lruwrpr "github.com/prysmaticlabs/prysm/v4/cache/lru"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
@@ -355,21 +356,23 @@ func TestValidateAggregateAndProof_CanValidate(t *testing.T) {
|
||||
require.NoError(t, beaconState.SetGenesisTime(uint64(time.Now().Unix())))
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
chain := &mock.ChainService{Genesis: time.Now().Add(-oneEpoch()),
|
||||
Optimistic: true,
|
||||
DB: db,
|
||||
State: beaconState,
|
||||
ValidAttestation: true,
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: att.Data.BeaconBlockRoot,
|
||||
}}
|
||||
r := &Service{
|
||||
ctx: ctx,
|
||||
cfg: &config{
|
||||
p2p: p,
|
||||
beaconDB: db,
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
chain: &mock.ChainService{Genesis: time.Now().Add(-oneEpoch()),
|
||||
Optimistic: true,
|
||||
DB: db,
|
||||
State: beaconState,
|
||||
ValidAttestation: true,
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: att.Data.BeaconBlockRoot,
|
||||
}},
|
||||
p2p: p,
|
||||
beaconDB: db,
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
chain: chain,
|
||||
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
|
||||
attPool: attestations.NewPool(),
|
||||
attestationNotifier: (&mock.ChainService{}).OperationNotifier(),
|
||||
},
|
||||
@@ -456,22 +459,23 @@ func TestVerifyIndexInCommittee_SeenAggregatorEpoch(t *testing.T) {
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
chain := &mock.ChainService{Genesis: time.Now().Add(-oneEpoch()),
|
||||
DB: db,
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
State: beaconState,
|
||||
ValidAttestation: true,
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: signedAggregateAndProof.Message.Aggregate.Data.BeaconBlockRoot,
|
||||
}}
|
||||
r := &Service{
|
||||
ctx: ctx,
|
||||
cfg: &config{
|
||||
p2p: p,
|
||||
beaconDB: db,
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
chain: &mock.ChainService{Genesis: time.Now().Add(-oneEpoch()),
|
||||
DB: db,
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
State: beaconState,
|
||||
ValidAttestation: true,
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: signedAggregateAndProof.Message.Aggregate.Data.BeaconBlockRoot,
|
||||
}},
|
||||
|
||||
p2p: p,
|
||||
beaconDB: db,
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
chain: chain,
|
||||
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
|
||||
attPool: attestations.NewPool(),
|
||||
attestationNotifier: (&mock.ChainService{}).OperationNotifier(),
|
||||
},
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
|
||||
p2ptest "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
mockSync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/initial-sync/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
@@ -80,10 +81,12 @@ func TestValidateAttesterSlashing_ValidSlashing(t *testing.T) {
|
||||
|
||||
slashing, s := setupValidAttesterSlashing(t)
|
||||
|
||||
chain := &mock.ChainService{State: s, Genesis: time.Now()}
|
||||
r := &Service{
|
||||
cfg: &config{
|
||||
p2p: p,
|
||||
chain: &mock.ChainService{State: s, Genesis: time.Now()},
|
||||
chain: chain,
|
||||
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
seenAttesterSlashingCache: make(map[uint64]bool),
|
||||
@@ -123,10 +126,12 @@ func TestValidateAttesterSlashing_InvalidSlashing_WithdrawableEpoch(t *testing.T
|
||||
|
||||
require.NoError(t, s.SetValidators(vals))
|
||||
|
||||
chain := &mock.ChainService{State: s, Genesis: time.Now()}
|
||||
r := &Service{
|
||||
cfg: &config{
|
||||
p2p: p,
|
||||
chain: &mock.ChainService{State: s, Genesis: time.Now()},
|
||||
chain: chain,
|
||||
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
seenAttesterSlashingCache: make(map[uint64]bool),
|
||||
@@ -171,11 +176,13 @@ func TestValidateAttesterSlashing_CanFilter(t *testing.T) {
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
ctx := context.Background()
|
||||
|
||||
chain := &mock.ChainService{Genesis: time.Now()}
|
||||
r := &Service{
|
||||
cfg: &config{
|
||||
p2p: p,
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
chain: &mock.ChainService{Genesis: time.Now()},
|
||||
chain: chain,
|
||||
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
|
||||
},
|
||||
seenAttesterSlashingCache: make(map[uint64]bool),
|
||||
subHandler: newSubTopicHandler(),
|
||||
@@ -240,10 +247,12 @@ func TestValidateAttesterSlashing_ContextTimeout(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
|
||||
defer cancel()
|
||||
|
||||
chain := &mock.ChainService{State: s}
|
||||
r := &Service{
|
||||
cfg: &config{
|
||||
p2p: p,
|
||||
chain: &mock.ChainService{State: s},
|
||||
chain: chain,
|
||||
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
seenAttesterSlashingCache: make(map[uint64]bool),
|
||||
|
||||
@@ -78,7 +78,7 @@ func (s *Service) validateCommitteeIndexBeaconAttestation(ctx context.Context, p
|
||||
|
||||
// Attestation's slot is within ATTESTATION_PROPAGATION_SLOT_RANGE and early attestation
|
||||
// processing tolerance.
|
||||
if err := helpers.ValidateAttestationTime(att.Data.Slot, s.cfg.chain.GenesisTime(),
|
||||
if err := helpers.ValidateAttestationTime(att.Data.Slot, s.cfg.clock.GenesisTime(),
|
||||
earlyAttestationProcessingTolerance); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return pubsub.ValidationIgnore, err
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/signing"
|
||||
dbtest "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
|
||||
p2ptest "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
mockSync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/initial-sync/testing"
|
||||
lruwrpr "github.com/prysmaticlabs/prysm/v4/cache/lru"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
@@ -46,6 +47,7 @@ func TestService_validateCommitteeIndexBeaconAttestation(t *testing.T) {
|
||||
p2p: p,
|
||||
beaconDB: db,
|
||||
chain: chain,
|
||||
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
|
||||
attestationNotifier: (&mockChain.ChainService{}).OperationNotifier(),
|
||||
},
|
||||
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
|
||||
@@ -305,11 +307,7 @@ func TestService_validateCommitteeIndexBeaconAttestation(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_setSeenCommitteeIndicesSlot(t *testing.T) {
|
||||
chainService := &mockChain.ChainService{
|
||||
Genesis: time.Now(),
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
}
|
||||
s := NewService(context.Background(), WithP2P(p2ptest.NewTestP2P(t)), WithStateNotifier(chainService.StateNotifier()))
|
||||
s := NewService(context.Background(), WithP2P(p2ptest.NewTestP2P(t)))
|
||||
s.initCaches()
|
||||
|
||||
// Empty cache
|
||||
|
||||
@@ -122,7 +122,7 @@ func (s *Service) validateBeaconBlockPubSub(ctx context.Context, pid peer.ID, ms
|
||||
// Be lenient in handling early blocks. Instead of discarding blocks arriving later than
|
||||
// MAXIMUM_GOSSIP_CLOCK_DISPARITY in future, we tolerate blocks arriving at max two slots
|
||||
// earlier (SECONDS_PER_SLOT * 2 seconds). Queue such blocks and process them at the right slot.
|
||||
genesisTime := uint64(s.cfg.chain.GenesisTime().Unix())
|
||||
genesisTime := uint64(s.cfg.clock.GenesisTime().Unix())
|
||||
if err := slots.VerifyTime(genesisTime, blk.Block().Slot(), earlyBlockProcessingTolerance); err != nil {
|
||||
log.WithError(err).WithFields(getBlockFields(blk)).Debug("Ignored block: could not verify slot time")
|
||||
return pubsub.ValidationIgnore, nil
|
||||
@@ -156,7 +156,7 @@ func (s *Service) validateBeaconBlockPubSub(ctx context.Context, pid peer.ID, ms
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
s.pendingQueueLock.Unlock()
|
||||
err := fmt.Errorf("early block, with current slot %d < block slot %d", s.cfg.chain.CurrentSlot(), blk.Block().Slot())
|
||||
err := fmt.Errorf("early block, with current slot %d < block slot %d", s.cfg.clock.CurrentSlot(), blk.Block().Slot())
|
||||
log.WithError(err).WithFields(getBlockFields(blk)).Debug("Could not process early block")
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user