mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-08 04:54:05 -05:00
refactor subscription code to enable peer discovery asap (#15660)
* refactor subscription code to enable peer discovery asap * fix subscription tests * hunting down the other test initialization bugs * refactor subscription parameter tracking type * manu naming feedback * manu naming feedback * missing godoc * protect tracker from nil subscriptions --------- Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
This commit is contained in:
@@ -50,6 +50,7 @@ const (
|
||||
|
||||
// TestP2P represents a p2p implementation that can be used for testing.
|
||||
type TestP2P struct {
|
||||
mu sync.Mutex
|
||||
t *testing.T
|
||||
BHost host.Host
|
||||
EnodeID enode.ID
|
||||
@@ -243,6 +244,8 @@ func (p *TestP2P) SetStreamHandler(topic string, handler network.StreamHandler)
|
||||
|
||||
// JoinTopic will join PubSub topic, if not already joined.
|
||||
func (p *TestP2P) JoinTopic(topic string, opts ...pubsub.TopicOpt) (*pubsub.Topic, error) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
if _, ok := p.joinedTopics[topic]; !ok {
|
||||
joinedTopic, err := p.pubsub.Join(topic, opts...)
|
||||
if err != nil {
|
||||
|
||||
@@ -67,6 +67,13 @@ func WithNower(n Nower) ClockOpt {
|
||||
}
|
||||
}
|
||||
|
||||
// WithTimeAsNow will create a Nower based on the given time.Time and set it as the Now() implementation.
|
||||
func WithTimeAsNow(t time.Time) ClockOpt {
|
||||
return func(g *Clock) {
|
||||
g.now = func() time.Time { return t }
|
||||
}
|
||||
}
|
||||
|
||||
// NewClock constructs a Clock value from a genesis timestamp (t) and a Genesis Validator Root (vr).
|
||||
// The WithNower ClockOpt can be used in tests to specify an alternate `time.Now` implementation,
|
||||
// for instance to return a value for `Now` spanning a certain number of slots from genesis time, to control the current slot.
|
||||
|
||||
@@ -265,6 +265,7 @@ go_test(
|
||||
"//crypto/rand:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz/equality:go_default_library",
|
||||
"//genesis:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
|
||||
@@ -106,6 +106,9 @@ func (s *Service) custodyGroupCount() (uint64, error) {
|
||||
// validatorsCustodyRequirements computes the custody requirements based on the
|
||||
// finalized state and the tracked validators.
|
||||
func (s *Service) validatorsCustodyRequirement() (uint64, error) {
|
||||
if s.trackedValidatorsCache == nil {
|
||||
return 0, nil
|
||||
}
|
||||
// Get the indices of the tracked validators.
|
||||
indices := s.trackedValidatorsCache.Indices()
|
||||
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
// Is a background routine that observes for new incoming forks. Depending on the epoch
|
||||
// it will be in charge of subscribing/unsubscribing the relevant topics at the fork boundaries.
|
||||
func (s *Service) forkWatcher() {
|
||||
<-s.initialSyncComplete
|
||||
slotTicker := slots.NewSlotTicker(s.cfg.clock.GenesisTime(), params.BeaconConfig().SecondsPerSlot)
|
||||
for {
|
||||
select {
|
||||
|
||||
@@ -2,96 +2,84 @@ package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/async/abool"
|
||||
mockChain "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
p2ptest "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
mockSync "github.com/OffchainLabs/prysm/v6/beacon-chain/sync/initial-sync/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/genesis"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
)
|
||||
|
||||
func defaultClockWithTimeAtEpoch(epoch primitives.Epoch) *startup.Clock {
|
||||
now := genesis.Time().Add(params.EpochsDuration(epoch, params.BeaconConfig()))
|
||||
return startup.NewClock(genesis.Time(), genesis.ValidatorsRoot(), startup.WithTimeAsNow(now))
|
||||
}
|
||||
|
||||
func testForkWatcherService(t *testing.T, current primitives.Epoch) *Service {
|
||||
closedChan := make(chan struct{})
|
||||
close(closedChan)
|
||||
peer2peer := p2ptest.NewTestP2P(t)
|
||||
chainService := &mockChain.ChainService{
|
||||
Genesis: genesis.Time(),
|
||||
ValidatorsRoot: genesis.ValidatorsRoot(),
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(t.Context(), 10*time.Millisecond)
|
||||
r := &Service{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
cfg: &config{
|
||||
p2p: peer2peer,
|
||||
chain: chainService,
|
||||
clock: defaultClockWithTimeAtEpoch(current),
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
initialSyncComplete: closedChan,
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func TestService_CheckForNextEpochFork(t *testing.T) {
|
||||
closedChan := make(chan struct{})
|
||||
close(closedChan)
|
||||
params.SetupTestConfigCleanup(t)
|
||||
genesis.StoreEmbeddedDuringTest(t, params.BeaconConfig().ConfigName)
|
||||
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 1096*2
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
svcCreator func(t *testing.T) *Service
|
||||
currEpoch primitives.Epoch
|
||||
wantErr bool
|
||||
postSvcCheck func(t *testing.T, s *Service)
|
||||
name string
|
||||
svcCreator func(t *testing.T) *Service
|
||||
checkRegistration func(t *testing.T, s *Service)
|
||||
forkEpoch primitives.Epoch
|
||||
epochAtRegistration func(primitives.Epoch) primitives.Epoch
|
||||
nextForkEpoch primitives.Epoch
|
||||
}{
|
||||
{
|
||||
name: "no fork in the next epoch",
|
||||
svcCreator: func(t *testing.T) *Service {
|
||||
peer2peer := p2ptest.NewTestP2P(t)
|
||||
gt := time.Now().Add(time.Duration(-params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().SlotsPerEpoch))) * time.Second)
|
||||
vr := [32]byte{'A'}
|
||||
chainService := &mockChain.ChainService{
|
||||
Genesis: gt,
|
||||
ValidatorsRoot: vr,
|
||||
}
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
r := &Service{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
cfg: &config{
|
||||
p2p: peer2peer,
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(gt, vr),
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
return r
|
||||
},
|
||||
currEpoch: 10,
|
||||
wantErr: false,
|
||||
postSvcCheck: func(t *testing.T, s *Service) {
|
||||
|
||||
},
|
||||
name: "no fork in the next epoch",
|
||||
forkEpoch: params.BeaconConfig().AltairForkEpoch,
|
||||
epochAtRegistration: func(e primitives.Epoch) primitives.Epoch { return e - 2 },
|
||||
nextForkEpoch: params.BeaconConfig().BellatrixForkEpoch,
|
||||
checkRegistration: func(t *testing.T, s *Service) {},
|
||||
},
|
||||
{
|
||||
name: "altair fork in the next epoch",
|
||||
svcCreator: func(t *testing.T) *Service {
|
||||
peer2peer := p2ptest.NewTestP2P(t)
|
||||
gt := time.Now().Add(-4 * oneEpoch())
|
||||
vr := [32]byte{'A'}
|
||||
chainService := &mockChain.ChainService{
|
||||
Genesis: gt,
|
||||
ValidatorsRoot: vr,
|
||||
}
|
||||
bCfg := params.BeaconConfig().Copy()
|
||||
bCfg.AltairForkEpoch = 5
|
||||
params.OverrideBeaconConfig(bCfg)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
r := &Service{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
cfg: &config{
|
||||
p2p: peer2peer,
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(gt, vr),
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
return r
|
||||
},
|
||||
currEpoch: 4,
|
||||
wantErr: false,
|
||||
postSvcCheck: func(t *testing.T, s *Service) {
|
||||
digest := params.ForkDigest(5)
|
||||
assert.Equal(t, true, s.subHandler.digestExists(digest))
|
||||
name: "altair fork in the next epoch",
|
||||
forkEpoch: params.BeaconConfig().AltairForkEpoch,
|
||||
epochAtRegistration: func(e primitives.Epoch) primitives.Epoch { return e - 1 },
|
||||
nextForkEpoch: params.BeaconConfig().BellatrixForkEpoch,
|
||||
checkRegistration: func(t *testing.T, s *Service) {
|
||||
digest := params.ForkDigest(params.BeaconConfig().AltairForkEpoch)
|
||||
rpcMap := make(map[string]bool)
|
||||
for _, p := range s.cfg.p2p.Host().Mux().Protocols() {
|
||||
rpcMap[string(p)] = true
|
||||
@@ -99,375 +87,132 @@ func TestService_CheckForNextEpochFork(t *testing.T) {
|
||||
assert.Equal(t, true, rpcMap[p2p.RPCBlocksByRangeTopicV2+s.cfg.p2p.Encoding().ProtocolSuffix()], "topic doesn't exist")
|
||||
assert.Equal(t, true, rpcMap[p2p.RPCBlocksByRootTopicV2+s.cfg.p2p.Encoding().ProtocolSuffix()], "topic doesn't exist")
|
||||
assert.Equal(t, true, rpcMap[p2p.RPCMetaDataTopicV2+s.cfg.p2p.Encoding().ProtocolSuffix()], "topic doesn't exist")
|
||||
expected := fmt.Sprintf(p2p.SyncContributionAndProofSubnetTopicFormat+s.cfg.p2p.Encoding().ProtocolSuffix(), digest)
|
||||
assert.Equal(t, true, s.subHandler.topicExists(expected), "subnet topic doesn't exist")
|
||||
// TODO: we should check subcommittee indices here but we need to work with the committee cache to do it properly
|
||||
/*
|
||||
subIndices := mapFromCount(params.BeaconConfig().SyncCommitteeSubnetCount)
|
||||
for idx := range subIndices {
|
||||
topic := fmt.Sprintf(p2p.SyncCommitteeSubnetTopicFormat, digest, idx)
|
||||
expected := topic + s.cfg.p2p.Encoding().ProtocolSuffix()
|
||||
assert.Equal(t, true, s.subHandler.topicExists(expected), fmt.Sprintf("subnet topic %s doesn't exist", expected))
|
||||
}
|
||||
*/
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "bellatrix fork in the next epoch",
|
||||
svcCreator: func(t *testing.T) *Service {
|
||||
peer2peer := p2ptest.NewTestP2P(t)
|
||||
chainService := &mockChain.ChainService{
|
||||
Genesis: time.Now().Add(-4 * oneEpoch()),
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
}
|
||||
bCfg := params.BeaconConfig().Copy()
|
||||
bCfg.AltairForkEpoch = 3
|
||||
bCfg.BellatrixForkEpoch = 5
|
||||
params.OverrideBeaconConfig(bCfg)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
r := &Service{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
cfg: &config{
|
||||
p2p: peer2peer,
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot),
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
return r
|
||||
},
|
||||
currEpoch: 4,
|
||||
wantErr: false,
|
||||
postSvcCheck: func(t *testing.T, s *Service) {
|
||||
digest := params.ForkDigest(5)
|
||||
assert.Equal(t, true, s.subHandler.digestExists(digest))
|
||||
name: "capella fork in the next epoch",
|
||||
checkRegistration: func(t *testing.T, s *Service) {
|
||||
digest := params.ForkDigest(params.BeaconConfig().CapellaForkEpoch)
|
||||
rpcMap := make(map[string]bool)
|
||||
for _, p := range s.cfg.p2p.Host().Mux().Protocols() {
|
||||
rpcMap[string(p)] = true
|
||||
}
|
||||
|
||||
expected := fmt.Sprintf(p2p.BlsToExecutionChangeSubnetTopicFormat+s.cfg.p2p.Encoding().ProtocolSuffix(), digest)
|
||||
assert.Equal(t, true, s.subHandler.topicExists(expected), "subnet topic doesn't exist")
|
||||
},
|
||||
forkEpoch: params.BeaconConfig().CapellaForkEpoch,
|
||||
nextForkEpoch: params.BeaconConfig().DenebForkEpoch,
|
||||
epochAtRegistration: func(e primitives.Epoch) primitives.Epoch { return e - 1 },
|
||||
},
|
||||
{
|
||||
name: "deneb fork in the next epoch",
|
||||
svcCreator: func(t *testing.T) *Service {
|
||||
peer2peer := p2ptest.NewTestP2P(t)
|
||||
gt := time.Now().Add(-4 * oneEpoch())
|
||||
vr := [32]byte{'A'}
|
||||
chainService := &mockChain.ChainService{
|
||||
Genesis: gt,
|
||||
ValidatorsRoot: vr,
|
||||
}
|
||||
bCfg := params.BeaconConfig().Copy()
|
||||
bCfg.DenebForkEpoch = 5
|
||||
params.OverrideBeaconConfig(bCfg)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
r := &Service{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
cfg: &config{
|
||||
p2p: peer2peer,
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(gt, vr),
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
return r
|
||||
},
|
||||
currEpoch: 4,
|
||||
wantErr: false,
|
||||
postSvcCheck: func(t *testing.T, s *Service) {
|
||||
digest := params.ForkDigest(5)
|
||||
assert.Equal(t, true, s.subHandler.digestExists(digest))
|
||||
checkRegistration: func(t *testing.T, s *Service) {
|
||||
digest := params.ForkDigest(params.BeaconConfig().DenebForkEpoch)
|
||||
rpcMap := make(map[string]bool)
|
||||
for _, p := range s.cfg.p2p.Host().Mux().Protocols() {
|
||||
rpcMap[string(p)] = true
|
||||
}
|
||||
subIndices := mapFromCount(params.BeaconConfig().BlobsidecarSubnetCount)
|
||||
for idx := range subIndices {
|
||||
topic := fmt.Sprintf(p2p.BlobSubnetTopicFormat, digest, idx)
|
||||
expected := topic + s.cfg.p2p.Encoding().ProtocolSuffix()
|
||||
assert.Equal(t, true, s.subHandler.topicExists(expected), fmt.Sprintf("subnet topic %s doesn't exist", expected))
|
||||
}
|
||||
assert.Equal(t, true, rpcMap[p2p.RPCBlobSidecarsByRangeTopicV1+s.cfg.p2p.Encoding().ProtocolSuffix()], "topic doesn't exist")
|
||||
assert.Equal(t, true, rpcMap[p2p.RPCBlobSidecarsByRootTopicV1+s.cfg.p2p.Encoding().ProtocolSuffix()], "topic doesn't exist")
|
||||
},
|
||||
forkEpoch: params.BeaconConfig().DenebForkEpoch,
|
||||
nextForkEpoch: params.BeaconConfig().ElectraForkEpoch,
|
||||
epochAtRegistration: func(e primitives.Epoch) primitives.Epoch { return e - 1 },
|
||||
},
|
||||
{
|
||||
name: "electra fork in the next epoch",
|
||||
svcCreator: func(t *testing.T) *Service {
|
||||
peer2peer := p2ptest.NewTestP2P(t)
|
||||
gt := time.Now().Add(-4 * oneEpoch())
|
||||
vr := [32]byte{'A'}
|
||||
chainService := &mockChain.ChainService{
|
||||
Genesis: gt,
|
||||
ValidatorsRoot: vr,
|
||||
checkRegistration: func(t *testing.T, s *Service) {
|
||||
digest := params.ForkDigest(params.BeaconConfig().ElectraForkEpoch)
|
||||
subIndices := mapFromCount(params.BeaconConfig().BlobsidecarSubnetCountElectra)
|
||||
for idx := range subIndices {
|
||||
topic := fmt.Sprintf(p2p.BlobSubnetTopicFormat, digest, idx)
|
||||
expected := topic + s.cfg.p2p.Encoding().ProtocolSuffix()
|
||||
assert.Equal(t, true, s.subHandler.topicExists(expected), fmt.Sprintf("subnet topic %s doesn't exist", expected))
|
||||
}
|
||||
bCfg := params.BeaconConfig().Copy()
|
||||
bCfg.ElectraForkEpoch = 5
|
||||
params.OverrideBeaconConfig(bCfg)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
r := &Service{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
cfg: &config{
|
||||
p2p: peer2peer,
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(gt, vr),
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
return r
|
||||
},
|
||||
currEpoch: 4,
|
||||
wantErr: false,
|
||||
postSvcCheck: func(t *testing.T, s *Service) {
|
||||
digest := params.ForkDigest(5)
|
||||
assert.Equal(t, true, s.subHandler.digestExists(digest))
|
||||
rpcMap := make(map[string]bool)
|
||||
for _, p := range s.cfg.p2p.Host().Mux().Protocols() {
|
||||
rpcMap[string(p)] = true
|
||||
}
|
||||
assert.Equal(t, true, rpcMap[p2p.RPCBlobSidecarsByRangeTopicV1+s.cfg.p2p.Encoding().ProtocolSuffix()], "topic doesn't exist")
|
||||
assert.Equal(t, true, rpcMap[p2p.RPCBlobSidecarsByRootTopicV1+s.cfg.p2p.Encoding().ProtocolSuffix()], "topic doesn't exist")
|
||||
},
|
||||
forkEpoch: params.BeaconConfig().ElectraForkEpoch,
|
||||
nextForkEpoch: params.BeaconConfig().FuluForkEpoch,
|
||||
epochAtRegistration: func(e primitives.Epoch) primitives.Epoch { return e - 1 },
|
||||
},
|
||||
{
|
||||
name: "fulu fork in the next epoch",
|
||||
svcCreator: func(t *testing.T) *Service {
|
||||
peer2peer := p2ptest.NewTestP2P(t)
|
||||
gt := time.Now().Add(-4 * oneEpoch())
|
||||
vr := [32]byte{'A'}
|
||||
chainService := &mockChain.ChainService{
|
||||
Genesis: gt,
|
||||
ValidatorsRoot: vr,
|
||||
}
|
||||
bCfg := params.BeaconConfig().Copy()
|
||||
bCfg.FuluForkEpoch = 5
|
||||
params.OverrideBeaconConfig(bCfg)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
r := &Service{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
cfg: &config{
|
||||
p2p: peer2peer,
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(gt, vr),
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
trackedValidatorsCache: cache.NewTrackedValidatorsCache(),
|
||||
}
|
||||
return r
|
||||
},
|
||||
currEpoch: 4,
|
||||
wantErr: false,
|
||||
postSvcCheck: func(t *testing.T, s *Service) {
|
||||
digest := params.ForkDigest(5)
|
||||
assert.Equal(t, true, s.subHandler.digestExists(digest))
|
||||
checkRegistration: func(t *testing.T, s *Service) {
|
||||
rpcMap := make(map[string]bool)
|
||||
for _, p := range s.cfg.p2p.Host().Mux().Protocols() {
|
||||
rpcMap[string(p)] = true
|
||||
}
|
||||
assert.Equal(t, true, rpcMap[p2p.RPCBlobSidecarsByRangeTopicV1+s.cfg.p2p.Encoding().ProtocolSuffix()], "topic doesn't exist")
|
||||
assert.Equal(t, true, rpcMap[p2p.RPCBlobSidecarsByRootTopicV1+s.cfg.p2p.Encoding().ProtocolSuffix()], "topic doesn't exist")
|
||||
assert.Equal(t, true, rpcMap[p2p.RPCMetaDataTopicV3+s.cfg.p2p.Encoding().ProtocolSuffix()], "topic doesn't exist")
|
||||
},
|
||||
forkEpoch: params.BeaconConfig().FuluForkEpoch,
|
||||
nextForkEpoch: params.BeaconConfig().FuluForkEpoch,
|
||||
epochAtRegistration: func(e primitives.Epoch) primitives.Epoch { return e - 1 },
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s := tt.svcCreator(t)
|
||||
if err := s.registerForUpcomingFork(tt.currEpoch); (err != nil) != tt.wantErr {
|
||||
t.Errorf("registerForUpcomingFork() error = %v, wantErr %v", err, tt.wantErr)
|
||||
current := tt.epochAtRegistration(tt.forkEpoch)
|
||||
s := testForkWatcherService(t, current)
|
||||
wg := attachSpawner(s)
|
||||
require.NoError(t, s.registerForUpcomingFork(s.cfg.clock.CurrentEpoch()))
|
||||
wg.Wait()
|
||||
tt.checkRegistration(t, s)
|
||||
|
||||
if current != tt.forkEpoch-1 {
|
||||
return
|
||||
}
|
||||
tt.postSvcCheck(t, s)
|
||||
|
||||
// Ensure the topics were registered for the upcoming fork
|
||||
digest := params.ForkDigest(tt.forkEpoch)
|
||||
assert.Equal(t, true, s.subHandler.digestExists(digest))
|
||||
|
||||
// After this point we are checking deregistration, which doesn't apply if there isn't a higher
|
||||
// nextForkEpoch.
|
||||
if tt.forkEpoch >= tt.nextForkEpoch {
|
||||
return
|
||||
}
|
||||
|
||||
nextDigest := params.ForkDigest(tt.nextForkEpoch)
|
||||
// Move the clock to just before the next fork epoch and ensure deregistration is correct
|
||||
wg = attachSpawner(s)
|
||||
s.cfg.clock = defaultClockWithTimeAtEpoch(tt.nextForkEpoch - 1)
|
||||
require.NoError(t, s.registerForUpcomingFork(s.cfg.clock.CurrentEpoch()))
|
||||
wg.Wait()
|
||||
// deregister as if it is the epoch after the next fork epoch
|
||||
require.NoError(t, s.deregisterFromPastFork(tt.nextForkEpoch+1))
|
||||
assert.Equal(t, false, s.subHandler.digestExists(digest))
|
||||
assert.Equal(t, true, s.subHandler.digestExists(nextDigest))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestService_CheckForPreviousEpochFork(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
tests := []struct {
|
||||
name string
|
||||
svcCreator func(t *testing.T) *Service
|
||||
currEpoch primitives.Epoch
|
||||
wantErr bool
|
||||
postSvcCheck func(t *testing.T, s *Service)
|
||||
}{
|
||||
{
|
||||
name: "no fork in the previous epoch",
|
||||
svcCreator: func(t *testing.T) *Service {
|
||||
peer2peer := p2ptest.NewTestP2P(t)
|
||||
chainService := &mockChain.ChainService{
|
||||
Genesis: time.Now().Add(-oneEpoch()),
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
}
|
||||
clock := startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot)
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
r := &Service{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
cfg: &config{
|
||||
p2p: peer2peer,
|
||||
chain: chainService,
|
||||
clock: clock,
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
err := r.registerRPCHandlers()
|
||||
assert.NoError(t, err)
|
||||
return r
|
||||
},
|
||||
currEpoch: 10,
|
||||
wantErr: false,
|
||||
postSvcCheck: func(t *testing.T, s *Service) {
|
||||
ptcls := s.cfg.p2p.Host().Mux().Protocols()
|
||||
pMap := make(map[string]bool)
|
||||
for _, p := range ptcls {
|
||||
pMap[string(p)] = true
|
||||
}
|
||||
assert.Equal(t, true, pMap[p2p.RPCGoodByeTopicV1+s.cfg.p2p.Encoding().ProtocolSuffix()])
|
||||
assert.Equal(t, true, pMap[p2p.RPCStatusTopicV1+s.cfg.p2p.Encoding().ProtocolSuffix()])
|
||||
assert.Equal(t, true, pMap[p2p.RPCPingTopicV1+s.cfg.p2p.Encoding().ProtocolSuffix()])
|
||||
assert.Equal(t, true, pMap[p2p.RPCMetaDataTopicV1+s.cfg.p2p.Encoding().ProtocolSuffix()])
|
||||
assert.Equal(t, true, pMap[p2p.RPCBlocksByRangeTopicV1+s.cfg.p2p.Encoding().ProtocolSuffix()])
|
||||
assert.Equal(t, true, pMap[p2p.RPCBlocksByRootTopicV1+s.cfg.p2p.Encoding().ProtocolSuffix()])
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "altair fork in the previous epoch",
|
||||
svcCreator: func(t *testing.T) *Service {
|
||||
peer2peer := p2ptest.NewTestP2P(t)
|
||||
chainService := &mockChain.ChainService{
|
||||
Genesis: time.Now().Add(-4 * oneEpoch()),
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
}
|
||||
clock := startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot)
|
||||
bCfg := params.BeaconConfig().Copy()
|
||||
bCfg.AltairForkEpoch = 3
|
||||
params.OverrideBeaconConfig(bCfg)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
r := &Service{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
cfg: &config{
|
||||
p2p: peer2peer,
|
||||
chain: chainService,
|
||||
clock: clock,
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
prevGenesis := chainService.Genesis
|
||||
// To allow registration of v1 handlers
|
||||
chainService.Genesis = time.Now().Add(-1 * oneEpoch())
|
||||
err := r.registerRPCHandlers()
|
||||
assert.NoError(t, err)
|
||||
|
||||
chainService.Genesis = prevGenesis
|
||||
previous, err := r.rpcHandlerByTopicFromFork(version.Phase0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
next, err := r.rpcHandlerByTopicFromFork(version.Altair)
|
||||
assert.NoError(t, err)
|
||||
|
||||
handlerByTopic := addedRPCHandlerByTopic(previous, next)
|
||||
|
||||
for topic, handler := range handlerByTopic {
|
||||
r.registerRPC(topic, handler)
|
||||
}
|
||||
|
||||
digest := params.ForkDigest(0)
|
||||
assert.NoError(t, err)
|
||||
r.registerSubscribers(0, digest)
|
||||
assert.Equal(t, true, r.subHandler.digestExists(digest))
|
||||
|
||||
digest = params.ForkDigest(3)
|
||||
r.registerSubscribers(3, digest)
|
||||
assert.Equal(t, true, r.subHandler.digestExists(digest))
|
||||
|
||||
return r
|
||||
},
|
||||
currEpoch: 4,
|
||||
wantErr: false,
|
||||
postSvcCheck: func(t *testing.T, s *Service) {
|
||||
digest := params.ForkDigest(0)
|
||||
assert.Equal(t, false, s.subHandler.digestExists(digest))
|
||||
digest = params.ForkDigest(3)
|
||||
assert.Equal(t, true, s.subHandler.digestExists(digest))
|
||||
|
||||
ptcls := s.cfg.p2p.Host().Mux().Protocols()
|
||||
pMap := make(map[string]bool)
|
||||
for _, p := range ptcls {
|
||||
pMap[string(p)] = true
|
||||
}
|
||||
assert.Equal(t, true, pMap[p2p.RPCGoodByeTopicV1+s.cfg.p2p.Encoding().ProtocolSuffix()])
|
||||
assert.Equal(t, true, pMap[p2p.RPCStatusTopicV1+s.cfg.p2p.Encoding().ProtocolSuffix()])
|
||||
assert.Equal(t, true, pMap[p2p.RPCPingTopicV1+s.cfg.p2p.Encoding().ProtocolSuffix()])
|
||||
assert.Equal(t, true, pMap[p2p.RPCMetaDataTopicV2+s.cfg.p2p.Encoding().ProtocolSuffix()])
|
||||
assert.Equal(t, true, pMap[p2p.RPCBlocksByRangeTopicV2+s.cfg.p2p.Encoding().ProtocolSuffix()])
|
||||
assert.Equal(t, true, pMap[p2p.RPCBlocksByRootTopicV2+s.cfg.p2p.Encoding().ProtocolSuffix()])
|
||||
|
||||
assert.Equal(t, false, pMap[p2p.RPCMetaDataTopicV1+s.cfg.p2p.Encoding().ProtocolSuffix()])
|
||||
assert.Equal(t, false, pMap[p2p.RPCBlocksByRangeTopicV1+s.cfg.p2p.Encoding().ProtocolSuffix()])
|
||||
assert.Equal(t, false, pMap[p2p.RPCBlocksByRootTopicV1+s.cfg.p2p.Encoding().ProtocolSuffix()])
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "bellatrix fork in the previous epoch",
|
||||
svcCreator: func(t *testing.T) *Service {
|
||||
peer2peer := p2ptest.NewTestP2P(t)
|
||||
chainService := &mockChain.ChainService{
|
||||
Genesis: time.Now().Add(-4 * oneEpoch()),
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
}
|
||||
clock := startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot)
|
||||
bCfg := params.BeaconConfig().Copy()
|
||||
bCfg.AltairForkEpoch = 1
|
||||
bCfg.BellatrixForkEpoch = 3
|
||||
params.OverrideBeaconConfig(bCfg)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
r := &Service{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
cfg: &config{
|
||||
p2p: peer2peer,
|
||||
chain: chainService,
|
||||
clock: clock,
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
digest := params.ForkDigest(1)
|
||||
r.registerSubscribers(1, digest)
|
||||
assert.Equal(t, true, r.subHandler.digestExists(digest))
|
||||
|
||||
digest = params.ForkDigest(3)
|
||||
r.registerSubscribers(3, digest)
|
||||
assert.Equal(t, true, r.subHandler.digestExists(digest))
|
||||
|
||||
return r
|
||||
},
|
||||
currEpoch: 4,
|
||||
wantErr: false,
|
||||
postSvcCheck: func(t *testing.T, s *Service) {
|
||||
digest := params.ForkDigest(1)
|
||||
assert.Equal(t, false, s.subHandler.digestExists(digest))
|
||||
digest = params.ForkDigest(3)
|
||||
assert.Equal(t, true, s.subHandler.digestExists(digest))
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s := tt.svcCreator(t)
|
||||
if err := s.deregisterFromPastFork(tt.currEpoch); (err != nil) != tt.wantErr {
|
||||
t.Errorf("registerForUpcomingFork() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
tt.postSvcCheck(t, s)
|
||||
})
|
||||
func attachSpawner(s *Service) *sync.WaitGroup {
|
||||
wg := new(sync.WaitGroup)
|
||||
s.subscriptionSpawner = func(f func()) {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
f()
|
||||
}()
|
||||
}
|
||||
return wg
|
||||
}
|
||||
|
||||
// oneEpoch returns the duration of one epoch.
|
||||
|
||||
@@ -316,11 +316,14 @@ func TestHandshakeHandlers_Roundtrip(t *testing.T) {
|
||||
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
|
||||
beaconDB: db,
|
||||
stateNotifier: chain.StateNotifier(),
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
rateLimiter: newRateLimiter(p1),
|
||||
clockWaiter: cw,
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
markInitSyncComplete(t, r)
|
||||
clock := startup.NewClockSynchronizer()
|
||||
require.NoError(t, clock.SetClock(startup.NewClock(time.Now(), [32]byte{})))
|
||||
r.verifierWaiter = verification.NewInitializerWaiter(clock, chain.ForkChoiceStore, r.cfg.stateGen)
|
||||
@@ -337,9 +340,12 @@ func TestHandshakeHandlers_Roundtrip(t *testing.T) {
|
||||
clock: startup.NewClock(chain2.Genesis, chain2.ValidatorsRoot),
|
||||
p2p: p2,
|
||||
stateNotifier: chain.StateNotifier(),
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
rateLimiter: newRateLimiter(p2),
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
markInitSyncComplete(t, r2)
|
||||
clock = startup.NewClockSynchronizer()
|
||||
require.NoError(t, clock.SetClock(startup.NewClock(time.Now(), [32]byte{})))
|
||||
r2.verifierWaiter = verification.NewInitializerWaiter(clock, chain2.ForkChoiceStore, r2.cfg.stateGen)
|
||||
@@ -909,13 +915,16 @@ func TestStatusRPCRequest_BadPeerHandshake(t *testing.T) {
|
||||
p2p: p1,
|
||||
chain: chain,
|
||||
stateNotifier: chain.StateNotifier(),
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
|
||||
ctx: ctx,
|
||||
rateLimiter: newRateLimiter(p1),
|
||||
clockWaiter: cw,
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
markInitSyncComplete(t, r)
|
||||
clock := startup.NewClockSynchronizer()
|
||||
require.NoError(t, clock.SetClock(startup.NewClock(time.Now(), [32]byte{})))
|
||||
r.verifierWaiter = verification.NewInitializerWaiter(clock, chain.ForkChoiceStore, r.cfg.stateGen)
|
||||
|
||||
@@ -178,6 +178,7 @@ type Service struct {
|
||||
lcStore *lightClient.Store
|
||||
dataColumnLogCh chan dataColumnLogEntry
|
||||
registeredNetworkEntry params.NetworkScheduleEntry
|
||||
subscriptionSpawner func(func()) // see Service.spawn for details
|
||||
}
|
||||
|
||||
// NewService initializes new regular sync service.
|
||||
@@ -254,7 +255,7 @@ func (s *Service) Start() {
|
||||
s.newColumnsVerifier = newDataColumnsVerifierFromInitializer(v)
|
||||
|
||||
go s.verifierRoutine()
|
||||
go s.startTasksPostInitialSync()
|
||||
go s.startDiscoveryAndSubscriptions()
|
||||
go s.processDataColumnLogs()
|
||||
|
||||
s.cfg.p2p.AddConnectionHandler(s.reValidatePeer, s.sendGoodbye)
|
||||
@@ -384,32 +385,31 @@ func (s *Service) waitForChainStart() {
|
||||
s.markForChainStart()
|
||||
}
|
||||
|
||||
func (s *Service) startTasksPostInitialSync() {
|
||||
func (s *Service) startDiscoveryAndSubscriptions() {
|
||||
// Wait for the chain to start.
|
||||
s.waitForChainStart()
|
||||
|
||||
select {
|
||||
case <-s.initialSyncComplete:
|
||||
// Compute the current epoch.
|
||||
currentSlot := slots.CurrentSlot(s.cfg.clock.GenesisTime())
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
|
||||
// Compute the current fork forkDigest.
|
||||
forkDigest, err := s.currentForkDigest()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not retrieve current fork digest")
|
||||
return
|
||||
}
|
||||
|
||||
// Register respective pubsub handlers at state synced event.
|
||||
s.registerSubscribers(currentEpoch, forkDigest)
|
||||
|
||||
// Start the fork watcher.
|
||||
go s.forkWatcher()
|
||||
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Context closed, exiting goroutine")
|
||||
if s.ctx.Err() != nil {
|
||||
log.Debug("Context closed, exiting StartDiscoveryAndSubscription")
|
||||
return
|
||||
}
|
||||
|
||||
// Compute the current epoch.
|
||||
currentSlot := slots.CurrentSlot(s.cfg.clock.GenesisTime())
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
|
||||
// Compute the current fork forkDigest.
|
||||
forkDigest, err := s.currentForkDigest()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not retrieve current fork digest")
|
||||
return
|
||||
}
|
||||
|
||||
// Register respective pubsub handlers at state synced event.
|
||||
s.registerSubscribers(currentEpoch, forkDigest)
|
||||
|
||||
// Start the fork watcher.
|
||||
go s.forkWatcher()
|
||||
}
|
||||
|
||||
func (s *Service) writeErrorResponseToStream(responseCode byte, reason string, stream libp2pcore.Stream) {
|
||||
|
||||
@@ -69,7 +69,7 @@ func TestSyncHandlers_WaitToSync(t *testing.T) {
|
||||
}
|
||||
|
||||
topic := "/eth2/%x/beacon_block"
|
||||
go r.startTasksPostInitialSync()
|
||||
go r.startDiscoveryAndSubscriptions()
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
var vr [32]byte
|
||||
@@ -150,7 +150,7 @@ func TestSyncHandlers_WaitTillSynced(t *testing.T) {
|
||||
|
||||
syncCompleteCh := make(chan bool)
|
||||
go func() {
|
||||
r.startTasksPostInitialSync()
|
||||
r.startDiscoveryAndSubscriptions()
|
||||
syncCompleteCh <- true
|
||||
}()
|
||||
|
||||
@@ -206,8 +206,9 @@ func TestSyncService_StopCleanly(t *testing.T) {
|
||||
clockWaiter: gs,
|
||||
initialSyncComplete: make(chan struct{}),
|
||||
}
|
||||
markInitSyncComplete(t, &r)
|
||||
|
||||
go r.startTasksPostInitialSync()
|
||||
go r.startDiscoveryAndSubscriptions()
|
||||
var vr [32]byte
|
||||
require.NoError(t, gs.SetClock(startup.NewClock(time.Now(), vr)))
|
||||
r.waitForChainStart()
|
||||
@@ -220,9 +221,6 @@ func TestSyncService_StopCleanly(t *testing.T) {
|
||||
time.Sleep(2 * time.Second)
|
||||
require.Equal(t, true, r.chainStarted.IsSet(), "Did not receive chain start event.")
|
||||
|
||||
close(r.initialSyncComplete)
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
require.NotEqual(t, 0, len(r.cfg.p2p.PubSub().GetTopics()))
|
||||
require.NotEqual(t, 0, len(r.cfg.p2p.Host().Mux().Protocols()))
|
||||
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"reflect"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
@@ -35,40 +36,114 @@ import (
|
||||
|
||||
const pubsubMessageTimeout = 30 * time.Second
|
||||
|
||||
type (
|
||||
// wrappedVal represents a gossip validator which also returns an error along with the result.
|
||||
wrappedVal func(context.Context, peer.ID, *pubsub.Message) (pubsub.ValidationResult, error)
|
||||
|
||||
// subHandler represents handler for a given subscription.
|
||||
subHandler func(context.Context, proto.Message) error
|
||||
|
||||
// parameters used for the `subscribeWithParameters` function.
|
||||
subscribeParameters struct {
|
||||
topicFormat string
|
||||
validate wrappedVal
|
||||
handle subHandler
|
||||
digest [4]byte
|
||||
|
||||
// getSubnetsToJoin is a function that returns all subnets the node should join.
|
||||
getSubnetsToJoin func(currentSlot primitives.Slot) map[uint64]bool
|
||||
|
||||
// getSubnetsRequiringPeers is a function that returns all subnets that require peers to be found
|
||||
// but for which no subscriptions are needed.
|
||||
getSubnetsRequiringPeers func(currentSlot primitives.Slot) map[uint64]bool
|
||||
}
|
||||
|
||||
subscribeToSubnetsParameters struct {
|
||||
subscriptionBySubnet map[uint64]*pubsub.Subscription
|
||||
topicFormat string
|
||||
digest [4]byte
|
||||
validate wrappedVal
|
||||
handle subHandler
|
||||
getSubnetsToJoin func(currentSlot primitives.Slot) map[uint64]bool
|
||||
}
|
||||
)
|
||||
|
||||
var errInvalidDigest = errors.New("invalid digest")
|
||||
|
||||
// wrappedVal represents a gossip validator which also returns an error along with the result.
|
||||
type wrappedVal func(context.Context, peer.ID, *pubsub.Message) (pubsub.ValidationResult, error)
|
||||
|
||||
// subHandler represents handler for a given subscription.
|
||||
type subHandler func(context.Context, proto.Message) error
|
||||
|
||||
// subscribeParameters holds the parameters that are needed to construct a set of subscriptions topics for a given
|
||||
// set of gossipsub subnets.
|
||||
type subscribeParameters struct {
|
||||
topicFormat string
|
||||
validate wrappedVal
|
||||
handle subHandler
|
||||
digest [4]byte
|
||||
// getSubnetsToJoin is a function that returns all subnets the node should join.
|
||||
getSubnetsToJoin func(currentSlot primitives.Slot) map[uint64]bool
|
||||
// getSubnetsRequiringPeers is a function that returns all subnets that require peers to be found
|
||||
// but for which no subscriptions are needed.
|
||||
getSubnetsRequiringPeers func(currentSlot primitives.Slot) map[uint64]bool
|
||||
}
|
||||
|
||||
// shortTopic is a less verbose version of topic strings used for logging.
|
||||
func (p subscribeParameters) shortTopic() string {
|
||||
short := p.topicFormat
|
||||
fmtLen := len(short)
|
||||
if fmtLen >= 3 && short[fmtLen-3:] == "_%d" {
|
||||
short = short[:fmtLen-3]
|
||||
}
|
||||
return fmt.Sprintf(short, p.digest)
|
||||
}
|
||||
|
||||
func (p subscribeParameters) logFields() logrus.Fields {
|
||||
return logrus.Fields{
|
||||
"topic": p.shortTopic(),
|
||||
}
|
||||
}
|
||||
|
||||
// fullTopic is the fully qualified topic string, given to gossipsub.
|
||||
func (p subscribeParameters) fullTopic(subnet uint64, suffix string) string {
|
||||
return fmt.Sprintf(p.topicFormat, p.digest, subnet) + suffix
|
||||
}
|
||||
|
||||
// subnetTracker keeps track of which subnets we are subscribed to, out of the set of
|
||||
// possible subnets described by a `subscribeParameters`.
|
||||
type subnetTracker struct {
|
||||
subscribeParameters
|
||||
mu sync.RWMutex
|
||||
subscriptions map[uint64]*pubsub.Subscription
|
||||
}
|
||||
|
||||
func newSubnetTracker(p subscribeParameters) *subnetTracker {
|
||||
return &subnetTracker{
|
||||
subscribeParameters: p,
|
||||
subscriptions: make(map[uint64]*pubsub.Subscription),
|
||||
}
|
||||
}
|
||||
|
||||
// unwanted takes a list of wanted subnets and returns a list of currently subscribed subnets that are not included.
|
||||
func (t *subnetTracker) unwanted(wanted map[uint64]bool) []uint64 {
|
||||
t.mu.RLock()
|
||||
defer t.mu.RUnlock()
|
||||
unwanted := make([]uint64, 0, len(t.subscriptions))
|
||||
for subnet := range t.subscriptions {
|
||||
if wanted == nil || !wanted[subnet] {
|
||||
unwanted = append(unwanted, subnet)
|
||||
}
|
||||
}
|
||||
return unwanted
|
||||
}
|
||||
|
||||
// missing takes a list of wanted subnets and returns a list of wanted subnets that are not currently tracked.
|
||||
func (t *subnetTracker) missing(wanted map[uint64]bool) []uint64 {
|
||||
t.mu.RLock()
|
||||
defer t.mu.RUnlock()
|
||||
missing := make([]uint64, 0, len(wanted))
|
||||
for subnet := range wanted {
|
||||
if _, ok := t.subscriptions[subnet]; !ok {
|
||||
missing = append(missing, subnet)
|
||||
}
|
||||
}
|
||||
return missing
|
||||
}
|
||||
|
||||
// cancelSubscription cancels and removes the subscription for a given subnet.
|
||||
func (t *subnetTracker) cancelSubscription(subnet uint64) {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
defer delete(t.subscriptions, subnet)
|
||||
|
||||
sub := t.subscriptions[subnet]
|
||||
if sub == nil {
|
||||
return
|
||||
}
|
||||
sub.Cancel()
|
||||
}
|
||||
|
||||
// track asks subscriptionTracker to hold on to the subscription for a given subnet so
|
||||
// that we can remember that it is tracked and cancel its context when it's time to unsubscribe.
|
||||
func (t *subnetTracker) track(subnet uint64, sub *pubsub.Subscription) {
|
||||
if sub == nil {
|
||||
return
|
||||
}
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
t.subscriptions[subnet] = sub
|
||||
}
|
||||
|
||||
// noopValidator is a no-op that only decodes the message, but does not check its contents.
|
||||
func (s *Service) noopValidator(_ context.Context, _ peer.ID, msg *pubsub.Message) (pubsub.ValidationResult, error) {
|
||||
m, err := s.decodePubsubMessage(msg)
|
||||
@@ -112,132 +187,146 @@ func (s *Service) activeSyncSubnetIndices(currentSlot primitives.Slot) map[uint6
|
||||
return mapFromSlice(subscriptions)
|
||||
}
|
||||
|
||||
// spawn allows the Service to use a custom function for launching goroutines.
|
||||
// This is useful in tests where we can set spawner to a sync.WaitGroup and
|
||||
// wait for the spawned goroutines to finish.
|
||||
func (s *Service) spawn(f func()) {
|
||||
if s.subscriptionSpawner != nil {
|
||||
s.subscriptionSpawner(f)
|
||||
} else {
|
||||
go f()
|
||||
}
|
||||
}
|
||||
|
||||
// Register PubSub subscribers
|
||||
func (s *Service) registerSubscribers(epoch primitives.Epoch, digest [4]byte) {
|
||||
s.subscribe(
|
||||
p2p.BlockSubnetTopicFormat,
|
||||
s.validateBeaconBlockPubSub,
|
||||
s.beaconBlockSubscriber,
|
||||
digest,
|
||||
)
|
||||
s.subscribe(
|
||||
p2p.AggregateAndProofSubnetTopicFormat,
|
||||
s.validateAggregateAndProof,
|
||||
s.beaconAggregateProofSubscriber,
|
||||
digest,
|
||||
)
|
||||
s.subscribe(
|
||||
p2p.ExitSubnetTopicFormat,
|
||||
s.validateVoluntaryExit,
|
||||
s.voluntaryExitSubscriber,
|
||||
digest,
|
||||
)
|
||||
s.subscribe(
|
||||
p2p.ProposerSlashingSubnetTopicFormat,
|
||||
s.validateProposerSlashing,
|
||||
s.proposerSlashingSubscriber,
|
||||
digest,
|
||||
)
|
||||
s.subscribe(
|
||||
p2p.AttesterSlashingSubnetTopicFormat,
|
||||
s.validateAttesterSlashing,
|
||||
s.attesterSlashingSubscriber,
|
||||
digest,
|
||||
)
|
||||
s.subscribeWithParameters(subscribeParameters{
|
||||
topicFormat: p2p.AttestationSubnetTopicFormat,
|
||||
validate: s.validateCommitteeIndexBeaconAttestation,
|
||||
handle: s.committeeIndexBeaconAttestationSubscriber,
|
||||
digest: digest,
|
||||
getSubnetsToJoin: s.persistentAndAggregatorSubnetIndices,
|
||||
getSubnetsRequiringPeers: attesterSubnetIndices,
|
||||
s.spawn(func() {
|
||||
s.subscribe(p2p.BlockSubnetTopicFormat, s.validateBeaconBlockPubSub, s.beaconBlockSubscriber, digest)
|
||||
})
|
||||
s.spawn(func() {
|
||||
s.subscribe(p2p.AggregateAndProofSubnetTopicFormat, s.validateAggregateAndProof, s.beaconAggregateProofSubscriber, digest)
|
||||
})
|
||||
s.spawn(func() {
|
||||
s.subscribe(p2p.ExitSubnetTopicFormat, s.validateVoluntaryExit, s.voluntaryExitSubscriber, digest)
|
||||
})
|
||||
s.spawn(func() {
|
||||
s.subscribe(p2p.ProposerSlashingSubnetTopicFormat, s.validateProposerSlashing, s.proposerSlashingSubscriber, digest)
|
||||
})
|
||||
s.spawn(func() {
|
||||
s.subscribe(p2p.AttesterSlashingSubnetTopicFormat, s.validateAttesterSlashing, s.attesterSlashingSubscriber, digest)
|
||||
})
|
||||
s.spawn(func() {
|
||||
s.subscribeWithParameters(subscribeParameters{
|
||||
topicFormat: p2p.AttestationSubnetTopicFormat,
|
||||
validate: s.validateCommitteeIndexBeaconAttestation,
|
||||
handle: s.committeeIndexBeaconAttestationSubscriber,
|
||||
digest: digest,
|
||||
getSubnetsToJoin: s.persistentAndAggregatorSubnetIndices,
|
||||
getSubnetsRequiringPeers: attesterSubnetIndices,
|
||||
})
|
||||
})
|
||||
|
||||
// New gossip topic in Altair
|
||||
if params.BeaconConfig().AltairForkEpoch <= epoch {
|
||||
s.subscribe(
|
||||
p2p.SyncContributionAndProofSubnetTopicFormat,
|
||||
s.validateSyncContributionAndProof,
|
||||
s.syncContributionAndProofSubscriber,
|
||||
digest,
|
||||
)
|
||||
|
||||
s.subscribeWithParameters(subscribeParameters{
|
||||
topicFormat: p2p.SyncCommitteeSubnetTopicFormat,
|
||||
validate: s.validateSyncCommitteeMessage,
|
||||
handle: s.syncCommitteeMessageSubscriber,
|
||||
digest: digest,
|
||||
getSubnetsToJoin: s.activeSyncSubnetIndices,
|
||||
s.spawn(func() {
|
||||
s.subscribe(
|
||||
p2p.SyncContributionAndProofSubnetTopicFormat,
|
||||
s.validateSyncContributionAndProof,
|
||||
s.syncContributionAndProofSubscriber,
|
||||
digest,
|
||||
)
|
||||
})
|
||||
s.spawn(func() {
|
||||
s.subscribeWithParameters(subscribeParameters{
|
||||
topicFormat: p2p.SyncCommitteeSubnetTopicFormat,
|
||||
validate: s.validateSyncCommitteeMessage,
|
||||
handle: s.syncCommitteeMessageSubscriber,
|
||||
digest: digest,
|
||||
getSubnetsToJoin: s.activeSyncSubnetIndices,
|
||||
})
|
||||
})
|
||||
|
||||
if features.Get().EnableLightClient {
|
||||
s.subscribe(
|
||||
p2p.LightClientOptimisticUpdateTopicFormat,
|
||||
s.validateLightClientOptimisticUpdate,
|
||||
s.lightClientOptimisticUpdateSubscriber,
|
||||
digest,
|
||||
)
|
||||
s.subscribe(
|
||||
p2p.LightClientFinalityUpdateTopicFormat,
|
||||
s.validateLightClientFinalityUpdate,
|
||||
s.lightClientFinalityUpdateSubscriber,
|
||||
digest,
|
||||
)
|
||||
s.spawn(func() {
|
||||
s.subscribe(
|
||||
p2p.LightClientOptimisticUpdateTopicFormat,
|
||||
s.validateLightClientOptimisticUpdate,
|
||||
s.lightClientOptimisticUpdateSubscriber,
|
||||
digest,
|
||||
)
|
||||
})
|
||||
s.spawn(func() {
|
||||
s.subscribe(
|
||||
p2p.LightClientFinalityUpdateTopicFormat,
|
||||
s.validateLightClientFinalityUpdate,
|
||||
s.lightClientFinalityUpdateSubscriber,
|
||||
digest,
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// New gossip topic in Capella
|
||||
if params.BeaconConfig().CapellaForkEpoch <= epoch {
|
||||
s.subscribe(
|
||||
p2p.BlsToExecutionChangeSubnetTopicFormat,
|
||||
s.validateBlsToExecutionChange,
|
||||
s.blsToExecutionChangeSubscriber,
|
||||
digest,
|
||||
)
|
||||
s.spawn(func() {
|
||||
s.subscribe(
|
||||
p2p.BlsToExecutionChangeSubnetTopicFormat,
|
||||
s.validateBlsToExecutionChange,
|
||||
s.blsToExecutionChangeSubscriber,
|
||||
digest,
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
// New gossip topic in Deneb, removed in Electra
|
||||
if params.BeaconConfig().DenebForkEpoch <= epoch && epoch < params.BeaconConfig().ElectraForkEpoch {
|
||||
s.subscribeWithParameters(subscribeParameters{
|
||||
topicFormat: p2p.BlobSubnetTopicFormat,
|
||||
validate: s.validateBlob,
|
||||
handle: s.blobSubscriber,
|
||||
digest: digest,
|
||||
getSubnetsToJoin: func(primitives.Slot) map[uint64]bool {
|
||||
return mapFromCount(params.BeaconConfig().BlobsidecarSubnetCount)
|
||||
},
|
||||
s.spawn(func() {
|
||||
s.subscribeWithParameters(subscribeParameters{
|
||||
topicFormat: p2p.BlobSubnetTopicFormat,
|
||||
validate: s.validateBlob,
|
||||
handle: s.blobSubscriber,
|
||||
digest: digest,
|
||||
getSubnetsToJoin: func(primitives.Slot) map[uint64]bool {
|
||||
return mapFromCount(params.BeaconConfig().BlobsidecarSubnetCount)
|
||||
},
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// New gossip topic in Electra, removed in Fulu
|
||||
if params.BeaconConfig().ElectraForkEpoch <= epoch && epoch < params.BeaconConfig().FuluForkEpoch {
|
||||
s.subscribeWithParameters(subscribeParameters{
|
||||
topicFormat: p2p.BlobSubnetTopicFormat,
|
||||
validate: s.validateBlob,
|
||||
handle: s.blobSubscriber,
|
||||
digest: digest,
|
||||
getSubnetsToJoin: func(currentSlot primitives.Slot) map[uint64]bool {
|
||||
return mapFromCount(params.BeaconConfig().BlobsidecarSubnetCountElectra)
|
||||
},
|
||||
s.spawn(func() {
|
||||
s.subscribeWithParameters(subscribeParameters{
|
||||
topicFormat: p2p.BlobSubnetTopicFormat,
|
||||
validate: s.validateBlob,
|
||||
handle: s.blobSubscriber,
|
||||
digest: digest,
|
||||
getSubnetsToJoin: func(currentSlot primitives.Slot) map[uint64]bool {
|
||||
return mapFromCount(params.BeaconConfig().BlobsidecarSubnetCountElectra)
|
||||
},
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// New gossip topic in Fulu.
|
||||
if params.BeaconConfig().FuluForkEpoch <= epoch {
|
||||
s.subscribeWithParameters(subscribeParameters{
|
||||
topicFormat: p2p.DataColumnSubnetTopicFormat,
|
||||
validate: s.validateDataColumn,
|
||||
handle: s.dataColumnSubscriber,
|
||||
digest: digest,
|
||||
getSubnetsToJoin: s.dataColumnSubnetIndices,
|
||||
getSubnetsRequiringPeers: s.allDataColumnSubnets,
|
||||
s.spawn(func() {
|
||||
s.subscribeWithParameters(subscribeParameters{
|
||||
topicFormat: p2p.DataColumnSubnetTopicFormat,
|
||||
validate: s.validateDataColumn,
|
||||
handle: s.dataColumnSubscriber,
|
||||
digest: digest,
|
||||
getSubnetsToJoin: s.dataColumnSubnetIndices,
|
||||
getSubnetsRequiringPeers: s.allDataColumnSubnets,
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// subscribe to a given topic with a given validator and subscription handler.
|
||||
// The base protobuf message is used to initialize new messages for decoding.
|
||||
func (s *Service) subscribe(topic string, validator wrappedVal, handle subHandler, digest [4]byte) *pubsub.Subscription {
|
||||
func (s *Service) subscribe(topic string, validator wrappedVal, handle subHandler, digest [4]byte) {
|
||||
<-s.initialSyncComplete
|
||||
_, e, err := params.ForkDataFromDigest(digest)
|
||||
if err != nil {
|
||||
// Impossible condition as it would mean digest does not exist.
|
||||
@@ -248,7 +337,7 @@ func (s *Service) subscribe(topic string, validator wrappedVal, handle subHandle
|
||||
// Impossible condition as it would mean topic does not exist.
|
||||
panic(fmt.Sprintf("%s is not mapped to any message in GossipTopicMappings", topic)) // lint:nopanic -- Impossible condition.
|
||||
}
|
||||
return s.subscribeWithBase(s.addDigestToTopic(topic, digest), validator, handle)
|
||||
s.subscribeWithBase(s.addDigestToTopic(topic, digest), validator, handle)
|
||||
}
|
||||
|
||||
func (s *Service) subscribeWithBase(topic string, validator wrappedVal, handle subHandler) *pubsub.Subscription {
|
||||
@@ -413,61 +502,38 @@ func (s *Service) wrapAndReportValidation(topic string, v wrappedVal) (string, p
|
||||
// pruneSubscriptions unsubscribes from topics we are currently subscribed to but that are
|
||||
// not in the list of wanted subnets.
|
||||
// This function mutates the `subscriptionBySubnet` map, which is used to keep track of the current subscriptions.
|
||||
func (s *Service) pruneSubscriptions(
|
||||
subscriptionBySubnet map[uint64]*pubsub.Subscription,
|
||||
wantedSubnets map[uint64]bool,
|
||||
topicFormat string,
|
||||
digest [4]byte,
|
||||
) {
|
||||
for subnet, subscription := range subscriptionBySubnet {
|
||||
if subscription == nil {
|
||||
// Should not happen, but just in case.
|
||||
delete(subscriptionBySubnet, subnet)
|
||||
continue
|
||||
}
|
||||
|
||||
if wantedSubnets[subnet] {
|
||||
// Nothing to prune.
|
||||
continue
|
||||
}
|
||||
|
||||
// We are subscribed to a subnet that is no longer wanted.
|
||||
subscription.Cancel()
|
||||
fullTopic := fmt.Sprintf(topicFormat, digest, subnet) + s.cfg.p2p.Encoding().ProtocolSuffix()
|
||||
s.unSubscribeFromTopic(fullTopic)
|
||||
delete(subscriptionBySubnet, subnet)
|
||||
func (s *Service) pruneSubscriptions(t *subnetTracker, wantedSubnets map[uint64]bool) {
|
||||
for _, subnet := range t.unwanted(wantedSubnets) {
|
||||
t.cancelSubscription(subnet)
|
||||
s.unSubscribeFromTopic(t.fullTopic(subnet, s.cfg.p2p.Encoding().ProtocolSuffix()))
|
||||
}
|
||||
}
|
||||
|
||||
// subscribeToSubnets subscribes to needed subnets and unsubscribe from unneeded ones.
|
||||
// This functions mutates the `subscriptionBySubnet` map, which is used to keep track of the current subscriptions.
|
||||
func (s *Service) subscribeToSubnets(p subscribeToSubnetsParameters) error {
|
||||
func (s *Service) subscribeToSubnets(t *subnetTracker) error {
|
||||
// Do not subscribe if not synced.
|
||||
if s.chainStarted.IsSet() && s.cfg.initialSync.Syncing() {
|
||||
return nil
|
||||
}
|
||||
|
||||
valid, err := isDigestValid(p.digest, s.cfg.clock)
|
||||
valid, err := isDigestValid(t.digest, s.cfg.clock)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "is digest valid")
|
||||
}
|
||||
|
||||
// Unsubscribe from all subnets if digest is not valid. It's likely to be the case after a hard fork.
|
||||
if !valid {
|
||||
wantedSubnets := map[uint64]bool{}
|
||||
s.pruneSubscriptions(p.subscriptionBySubnet, wantedSubnets, p.topicFormat, p.digest)
|
||||
s.pruneSubscriptions(t, nil)
|
||||
return errInvalidDigest
|
||||
}
|
||||
|
||||
subnetsToJoin := p.getSubnetsToJoin(s.cfg.clock.CurrentSlot())
|
||||
s.pruneSubscriptions(p.subscriptionBySubnet, subnetsToJoin, p.topicFormat, p.digest)
|
||||
for subnet := range subnetsToJoin {
|
||||
subnetTopic := fmt.Sprintf(p.topicFormat, p.digest, subnet)
|
||||
|
||||
if _, exists := p.subscriptionBySubnet[subnet]; !exists {
|
||||
subscription := s.subscribeWithBase(subnetTopic, p.validate, p.handle)
|
||||
p.subscriptionBySubnet[subnet] = subscription
|
||||
}
|
||||
subnetsToJoin := t.getSubnetsToJoin(s.cfg.clock.CurrentSlot())
|
||||
s.pruneSubscriptions(t, subnetsToJoin)
|
||||
for _, subnet := range t.missing(subnetsToJoin) {
|
||||
// TODO: subscribeWithBase appends the protocol suffix, other methods don't. Make this consistent.
|
||||
topic := t.fullTopic(subnet, "")
|
||||
t.track(subnet, s.subscribeWithBase(topic, t.validate, t.handle))
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -475,110 +541,81 @@ func (s *Service) subscribeToSubnets(p subscribeToSubnetsParameters) error {
|
||||
|
||||
// subscribeWithParameters subscribes to a list of subnets.
|
||||
func (s *Service) subscribeWithParameters(p subscribeParameters) {
|
||||
shortTopicFormat := p.topicFormat
|
||||
shortTopicFormatLen := len(shortTopicFormat)
|
||||
if shortTopicFormatLen >= 3 && shortTopicFormat[shortTopicFormatLen-3:] == "_%d" {
|
||||
shortTopicFormat = shortTopicFormat[:shortTopicFormatLen-3]
|
||||
}
|
||||
shortTopic := fmt.Sprintf(shortTopicFormat, p.digest)
|
||||
tracker := newSubnetTracker(p)
|
||||
// Try once immediately so we don't have to wait until the next slot.
|
||||
s.ensureSubnetPeersAndSubscribe(tracker)
|
||||
|
||||
parameters := subscribeToSubnetsParameters{
|
||||
subscriptionBySubnet: make(map[uint64]*pubsub.Subscription),
|
||||
topicFormat: p.topicFormat,
|
||||
digest: p.digest,
|
||||
validate: p.validate,
|
||||
handle: p.handle,
|
||||
getSubnetsToJoin: p.getSubnetsToJoin,
|
||||
go s.logMinimumPeersPerSubnet(p)
|
||||
|
||||
slotTicker := slots.NewSlotTicker(s.cfg.clock.GenesisTime(), params.BeaconConfig().SecondsPerSlot)
|
||||
defer slotTicker.Done()
|
||||
for {
|
||||
select {
|
||||
case <-slotTicker.C():
|
||||
s.ensureSubnetPeersAndSubscribe(tracker)
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
err := s.subscribeToSubnets(parameters)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not subscribe to subnets")
|
||||
}
|
||||
|
||||
func (s *Service) ensureSubnetPeersAndSubscribe(tracker *subnetTracker) {
|
||||
timeout := time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second
|
||||
minPeers := flags.Get().MinimumPeersPerSubnet
|
||||
logFields := tracker.logFields()
|
||||
neededSubnets := computeAllNeededSubnets(s.cfg.clock.CurrentSlot(), tracker.getSubnetsToJoin, tracker.getSubnetsRequiringPeers)
|
||||
|
||||
if err := s.subscribeToSubnets(tracker); err != nil {
|
||||
if errors.Is(err, errInvalidDigest) {
|
||||
log.WithFields(logFields).Debug("Digest is invalid, stopping subscription")
|
||||
return
|
||||
}
|
||||
log.WithFields(logFields).WithError(err).Error("Could not subscribe to subnets")
|
||||
return
|
||||
}
|
||||
|
||||
slotDuration := time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second
|
||||
ctx, cancel := context.WithTimeout(s.ctx, timeout)
|
||||
defer cancel()
|
||||
if err := s.cfg.p2p.FindAndDialPeersWithSubnets(ctx, tracker.topicFormat, tracker.digest, minPeers, neededSubnets); err != nil && !errors.Is(err, context.DeadlineExceeded) {
|
||||
log.WithFields(logFields).WithError(err).Debug("Could not find peers with subnets")
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) logMinimumPeersPerSubnet(p subscribeParameters) {
|
||||
logFields := p.logFields()
|
||||
minimumPeersPerSubnet := flags.Get().MinimumPeersPerSubnet
|
||||
// Subscribe to expected subnets and search for peers if needed at every slot.
|
||||
go func() {
|
||||
currentSlot := s.cfg.clock.CurrentSlot()
|
||||
neededSubnets := computeAllNeededSubnets(currentSlot, p.getSubnetsToJoin, p.getSubnetsRequiringPeers)
|
||||
func() {
|
||||
ctx, cancel := context.WithTimeout(s.ctx, slotDuration)
|
||||
defer cancel()
|
||||
|
||||
if err := s.cfg.p2p.FindAndDialPeersWithSubnets(ctx, p.topicFormat, p.digest, minimumPeersPerSubnet, neededSubnets); err != nil && !errors.Is(err, context.DeadlineExceeded) {
|
||||
log.WithError(err).Debug("Could not find peers with subnets")
|
||||
}
|
||||
}()
|
||||
|
||||
slotTicker := slots.NewSlotTicker(s.cfg.clock.GenesisTime(), params.BeaconConfig().SecondsPerSlot)
|
||||
defer slotTicker.Done()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-slotTicker.C():
|
||||
currentSlot := s.cfg.clock.CurrentSlot()
|
||||
neededSubnets := computeAllNeededSubnets(currentSlot, p.getSubnetsToJoin, p.getSubnetsRequiringPeers)
|
||||
|
||||
if err := s.subscribeToSubnets(parameters); err != nil {
|
||||
if errors.Is(err, errInvalidDigest) {
|
||||
log.WithField("topics", shortTopic).Debug("Digest is invalid, stopping subscription")
|
||||
return
|
||||
}
|
||||
log.WithError(err).Error("Could not subscribe to subnets")
|
||||
continue
|
||||
}
|
||||
|
||||
func() {
|
||||
ctx, cancel := context.WithTimeout(s.ctx, slotDuration)
|
||||
defer cancel()
|
||||
|
||||
if err := s.cfg.p2p.FindAndDialPeersWithSubnets(ctx, p.topicFormat, p.digest, minimumPeersPerSubnet, neededSubnets); err != nil && !errors.Is(err, context.DeadlineExceeded) {
|
||||
log.WithError(err).Debug("Could not find peers with subnets")
|
||||
}
|
||||
}()
|
||||
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Warn the user if we are not subscribed to enough peers in the subnets.
|
||||
go func() {
|
||||
log := log.WithField("minimum", minimumPeersPerSubnet)
|
||||
logTicker := time.NewTicker(5 * time.Minute)
|
||||
defer logTicker.Stop()
|
||||
log := log.WithField("minimum", minimumPeersPerSubnet)
|
||||
logTicker := time.NewTicker(5 * time.Minute)
|
||||
defer logTicker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-logTicker.C:
|
||||
currentSlot := s.cfg.clock.CurrentSlot()
|
||||
subnetsToFindPeersIndex := computeAllNeededSubnets(currentSlot, p.getSubnetsToJoin, p.getSubnetsRequiringPeers)
|
||||
for {
|
||||
select {
|
||||
case <-logTicker.C:
|
||||
currentSlot := s.cfg.clock.CurrentSlot()
|
||||
subnetsToFindPeersIndex := computeAllNeededSubnets(currentSlot, p.getSubnetsToJoin, p.getSubnetsRequiringPeers)
|
||||
|
||||
isSubnetWithMissingPeers := false
|
||||
// Find new peers for wanted subnets if needed.
|
||||
for index := range subnetsToFindPeersIndex {
|
||||
topic := fmt.Sprintf(p.topicFormat, p.digest, index)
|
||||
isSubnetWithMissingPeers := false
|
||||
// Find new peers for wanted subnets if needed.
|
||||
for index := range subnetsToFindPeersIndex {
|
||||
topic := fmt.Sprintf(p.topicFormat, p.digest, index)
|
||||
|
||||
// Check if we have enough peers in the subnet. Skip if we do.
|
||||
if count := s.connectedPeersCount(topic); count < minimumPeersPerSubnet {
|
||||
isSubnetWithMissingPeers = true
|
||||
log.WithFields(logrus.Fields{
|
||||
"topic": topic,
|
||||
"actual": count,
|
||||
}).Warning("Not enough connected peers")
|
||||
}
|
||||
// Check if we have enough peers in the subnet. Skip if we do.
|
||||
if count := s.connectedPeersCount(topic); count < minimumPeersPerSubnet {
|
||||
isSubnetWithMissingPeers = true
|
||||
log.WithFields(logrus.Fields{
|
||||
"topic": topic,
|
||||
"actual": count,
|
||||
}).Warning("Not enough connected peers")
|
||||
}
|
||||
|
||||
if !isSubnetWithMissingPeers {
|
||||
log.WithField("topic", shortTopic).Info("All subnets have enough connected peers")
|
||||
}
|
||||
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
}
|
||||
if !isSubnetWithMissingPeers {
|
||||
log.WithFields(logFields).Debug("All subnets have enough connected peers")
|
||||
}
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) unSubscribeFromTopic(topic string) {
|
||||
|
||||
@@ -58,6 +58,7 @@ func TestSubscribe_ReceivesValidMessage(t *testing.T) {
|
||||
subHandler: newSubTopicHandler(),
|
||||
chainStarted: abool.New(),
|
||||
}
|
||||
markInitSyncComplete(t, &r)
|
||||
var err error
|
||||
p2pService.Digest, err = r.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
@@ -83,6 +84,11 @@ func TestSubscribe_ReceivesValidMessage(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func markInitSyncComplete(_ *testing.T, s *Service) {
|
||||
s.initialSyncComplete = make(chan struct{})
|
||||
close(s.initialSyncComplete)
|
||||
}
|
||||
|
||||
func TestSubscribe_UnsubscribeTopic(t *testing.T) {
|
||||
p2pService := p2ptest.NewTestP2P(t)
|
||||
gt := time.Now()
|
||||
@@ -101,6 +107,7 @@ func TestSubscribe_UnsubscribeTopic(t *testing.T) {
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
markInitSyncComplete(t, &r)
|
||||
var err error
|
||||
p2pService.Digest, err = r.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
@@ -152,6 +159,7 @@ func TestSubscribe_ReceivesAttesterSlashing(t *testing.T) {
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
markInitSyncComplete(t, &r)
|
||||
topic := "/eth2/%x/attester_slashing"
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
@@ -205,6 +213,7 @@ func TestSubscribe_ReceivesProposerSlashing(t *testing.T) {
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
markInitSyncComplete(t, &r)
|
||||
topic := "/eth2/%x/proposer_slashing"
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
@@ -253,6 +262,8 @@ func TestSubscribe_HandlesPanic(t *testing.T) {
|
||||
subHandler: newSubTopicHandler(),
|
||||
chainStarted: abool.New(),
|
||||
}
|
||||
markInitSyncComplete(t, &r)
|
||||
|
||||
var err error
|
||||
p.Digest, err = r.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
@@ -292,25 +303,33 @@ func TestRevalidateSubscription_CorrectlyFormatsTopic(t *testing.T) {
|
||||
}
|
||||
digest, err := r.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
subscriptions := make(map[uint64]*pubsub.Subscription, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
|
||||
defaultTopic := "/eth2/testing/%#x/committee%d"
|
||||
params := subscribeParameters{
|
||||
topicFormat: "/eth2/testing/%#x/committee%d",
|
||||
digest: digest,
|
||||
}
|
||||
tracker := newSubnetTracker(params)
|
||||
|
||||
// committee index 1
|
||||
fullTopic := fmt.Sprintf(defaultTopic, digest, 1) + r.cfg.p2p.Encoding().ProtocolSuffix()
|
||||
c1 := uint64(1)
|
||||
fullTopic := params.fullTopic(c1, r.cfg.p2p.Encoding().ProtocolSuffix())
|
||||
_, topVal := r.wrapAndReportValidation(fullTopic, r.noopValidator)
|
||||
require.NoError(t, r.cfg.p2p.PubSub().RegisterTopicValidator(fullTopic, topVal))
|
||||
subscriptions[1], err = r.cfg.p2p.SubscribeToTopic(fullTopic)
|
||||
sub1, err := r.cfg.p2p.SubscribeToTopic(fullTopic)
|
||||
require.NoError(t, err)
|
||||
tracker.track(c1, sub1)
|
||||
|
||||
// committee index 2
|
||||
fullTopic = fmt.Sprintf(defaultTopic, digest, 2) + r.cfg.p2p.Encoding().ProtocolSuffix()
|
||||
c2 := uint64(2)
|
||||
fullTopic = params.fullTopic(c2, r.cfg.p2p.Encoding().ProtocolSuffix())
|
||||
_, topVal = r.wrapAndReportValidation(fullTopic, r.noopValidator)
|
||||
err = r.cfg.p2p.PubSub().RegisterTopicValidator(fullTopic, topVal)
|
||||
require.NoError(t, err)
|
||||
subscriptions[2], err = r.cfg.p2p.SubscribeToTopic(fullTopic)
|
||||
sub2, err := r.cfg.p2p.SubscribeToTopic(fullTopic)
|
||||
require.NoError(t, err)
|
||||
tracker.track(c2, sub2)
|
||||
|
||||
r.pruneSubscriptions(subscriptions, map[uint64]bool{2: true}, defaultTopic, digest)
|
||||
r.pruneSubscriptions(tracker, map[uint64]bool{c2: true})
|
||||
require.LogsDoNotContain(t, hook, "Could not unregister topic validator")
|
||||
}
|
||||
|
||||
@@ -539,7 +558,7 @@ func TestSubscribeWithSyncSubnets_DynamicOK(t *testing.T) {
|
||||
cache.SyncSubnetIDs.AddSyncCommitteeSubnets([]byte("pubkey"), currEpoch, []uint64{0, 1}, 10*time.Second)
|
||||
digest, err := r.currentForkDigest()
|
||||
assert.NoError(t, err)
|
||||
r.subscribeWithParameters(subscribeParameters{
|
||||
go r.subscribeWithParameters(subscribeParameters{
|
||||
topicFormat: p2p.SyncCommitteeSubnetTopicFormat,
|
||||
digest: digest,
|
||||
getSubnetsToJoin: r.activeSyncSubnetIndices,
|
||||
@@ -580,6 +599,7 @@ func TestSubscribeWithSyncSubnets_DynamicSwitchFork(t *testing.T) {
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
markInitSyncComplete(t, &r)
|
||||
// Empty cache at the end of the test.
|
||||
defer cache.SyncSubnetIDs.EmptyAllCaches()
|
||||
cache.SyncSubnetIDs.AddSyncCommitteeSubnets([]byte("pubkey"), 0, []uint64{0, 1}, 10*time.Second)
|
||||
@@ -589,12 +609,11 @@ func TestSubscribeWithSyncSubnets_DynamicSwitchFork(t *testing.T) {
|
||||
require.Equal(t, [4]byte(params.BeaconConfig().DenebForkVersion), version)
|
||||
require.Equal(t, params.BeaconConfig().DenebForkEpoch, e)
|
||||
|
||||
sp := subscribeToSubnetsParameters{
|
||||
subscriptionBySubnet: make(map[uint64]*pubsub.Subscription),
|
||||
topicFormat: p2p.SyncCommitteeSubnetTopicFormat,
|
||||
digest: digest,
|
||||
getSubnetsToJoin: r.activeSyncSubnetIndices,
|
||||
}
|
||||
sp := newSubnetTracker(subscribeParameters{
|
||||
topicFormat: p2p.SyncCommitteeSubnetTopicFormat,
|
||||
digest: digest,
|
||||
getSubnetsToJoin: r.activeSyncSubnetIndices,
|
||||
})
|
||||
require.NoError(t, r.subscribeToSubnets(sp))
|
||||
assert.Equal(t, 2, len(r.cfg.p2p.PubSub().GetTopics()))
|
||||
topicMap := map[string]bool{}
|
||||
@@ -697,6 +716,7 @@ func TestSubscribe_ReceivesLCOptimisticUpdate(t *testing.T) {
|
||||
lcStore: lightClient.NewLightClientStore(d, &p2ptest.FakeP2P{}, new(event.Feed)),
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
markInitSyncComplete(t, &r)
|
||||
topic := p2p.LightClientOptimisticUpdateTopicFormat
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
@@ -764,6 +784,7 @@ func TestSubscribe_ReceivesLCFinalityUpdate(t *testing.T) {
|
||||
lcStore: lightClient.NewLightClientStore(d, &p2ptest.FakeP2P{}, new(event.Feed)),
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
markInitSyncComplete(t, &r)
|
||||
topic := p2p.LightClientFinalityUpdateTopicFormat
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
@@ -427,6 +427,7 @@ func TestService_ValidateBlsToExecutionChange(t *testing.T) {
|
||||
cw := startup.NewClockSynchronizer()
|
||||
opts := []Option{WithClockWaiter(cw)}
|
||||
svc := NewService(ctx, append(opts, tt.svcopts...)...)
|
||||
markInitSyncComplete(t, svc)
|
||||
svc, tt.args.topic = tt.setupSvc(svc, tt.args.msg, tt.args.topic)
|
||||
go svc.Start()
|
||||
if tt.clock == nil {
|
||||
|
||||
@@ -409,6 +409,7 @@ func TestService_ValidateSyncCommitteeMessage(t *testing.T) {
|
||||
svc := NewService(ctx, append(opts, tt.svcopts...)...)
|
||||
var clock *startup.Clock
|
||||
svc, tt.args.topic, clock = tt.setupSvc(svc, tt.args.msg, tt.args.topic)
|
||||
markInitSyncComplete(t, svc)
|
||||
go svc.Start()
|
||||
require.NoError(t, cw.SetClock(clock))
|
||||
svc.verifierWaiter = verification.NewInitializerWaiter(cw, chainService.ForkChoiceStore, svc.cfg.stateGen)
|
||||
|
||||
@@ -856,7 +856,7 @@ func TestService_ValidateSyncContributionAndProof(t *testing.T) {
|
||||
svc, clock = tt.setupSvc(svc, tt.args.msg)
|
||||
require.NoError(t, cw.SetClock(clock))
|
||||
svc.verifierWaiter = verification.NewInitializerWaiter(cw, chainService.ForkChoiceStore, svc.cfg.stateGen)
|
||||
|
||||
markInitSyncComplete(t, svc)
|
||||
go svc.Start()
|
||||
marshalledObj, err := tt.args.msg.MarshalSSZ()
|
||||
assert.NoError(t, err)
|
||||
|
||||
2
changelog/kasey_start-discovery-immediately.md
Normal file
2
changelog/kasey_start-discovery-immediately.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Fixed
|
||||
- Start topic-based peer discovery before initial sync completes so that we have coverage of needed columns when range syncing.
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
@@ -404,6 +405,8 @@ func (b *BeaconChainConfig) InitializeForkSchedule() {
|
||||
|
||||
func LogDigests(b *BeaconChainConfig) {
|
||||
schedule := b.networkSchedule
|
||||
schedule.mu.RLock()
|
||||
defer schedule.mu.RUnlock()
|
||||
for _, e := range schedule.entries {
|
||||
log.WithFields(e.LogFields()).Debug("Network schedule entry initialized")
|
||||
digests := make([]string, 0, len(schedule.byDigest))
|
||||
@@ -415,6 +418,7 @@ func LogDigests(b *BeaconChainConfig) {
|
||||
}
|
||||
|
||||
type NetworkSchedule struct {
|
||||
mu sync.RWMutex
|
||||
entries []NetworkScheduleEntry
|
||||
byEpoch map[primitives.Epoch]*NetworkScheduleEntry
|
||||
byVersion map[[fieldparams.VersionLength]byte]*NetworkScheduleEntry
|
||||
@@ -482,6 +486,11 @@ func (ns *NetworkSchedule) ForEpoch(epoch primitives.Epoch) NetworkScheduleEntry
|
||||
}
|
||||
|
||||
func (ns *NetworkSchedule) activatedAt(epoch primitives.Epoch) (*NetworkScheduleEntry, bool) {
|
||||
ns.mu.RLock()
|
||||
defer ns.mu.RUnlock()
|
||||
if ns.byEpoch == nil {
|
||||
return nil, false
|
||||
}
|
||||
entry, ok := ns.byEpoch[epoch]
|
||||
return entry, ok
|
||||
}
|
||||
@@ -503,6 +512,8 @@ func (ns *NetworkSchedule) merge(other *NetworkSchedule) *NetworkSchedule {
|
||||
}
|
||||
|
||||
func (ns *NetworkSchedule) index(e NetworkScheduleEntry) {
|
||||
ns.mu.Lock()
|
||||
defer ns.mu.Unlock()
|
||||
if _, ok := ns.byDigest[e.ForkDigest]; !ok {
|
||||
ns.byDigest[e.ForkDigest] = &e
|
||||
}
|
||||
@@ -763,3 +774,23 @@ func WithinDAPeriod(block, current primitives.Epoch) bool {
|
||||
|
||||
return block+BeaconConfig().MinEpochsForBlobsSidecarsRequest >= current
|
||||
}
|
||||
|
||||
// EpochsDuration returns the time duration of the given number of epochs.
|
||||
func EpochsDuration(count primitives.Epoch, b *BeaconChainConfig) time.Duration {
|
||||
return SlotsDuration(SlotsForEpochs(count, b), b)
|
||||
}
|
||||
|
||||
// SlotsForEpochs returns the number of slots in the given number of epochs.
|
||||
func SlotsForEpochs(count primitives.Epoch, b *BeaconChainConfig) primitives.Slot {
|
||||
return primitives.Slot(count) * b.SlotsPerEpoch
|
||||
}
|
||||
|
||||
// SlotsDuration returns the time duration of the given number of slots.
|
||||
func SlotsDuration(count primitives.Slot, b *BeaconChainConfig) time.Duration {
|
||||
return time.Duration(count) * SecondsPerSlot(b)
|
||||
}
|
||||
|
||||
// SecondsPerSlot returns the time duration of a single slot.
|
||||
func SecondsPerSlot(b *BeaconChainConfig) time.Duration {
|
||||
return time.Duration(b.SecondsPerSlot) * time.Second
|
||||
}
|
||||
|
||||
@@ -57,8 +57,11 @@ func ForkFromConfig(cfg *BeaconChainConfig, epoch primitives.Epoch) *ethpb.Fork
|
||||
// ForkDataFromDigest performs the inverse, where it tries to determine the fork version
|
||||
// and epoch from a provided digest by looping through our current fork schedule.
|
||||
func ForkDataFromDigest(digest [4]byte) ([fieldparams.VersionLength]byte, primitives.Epoch, error) {
|
||||
cfg := BeaconConfig()
|
||||
entry, ok := cfg.networkSchedule.byDigest[digest]
|
||||
ns := BeaconConfig().networkSchedule
|
||||
ns.mu.RLock()
|
||||
defer ns.mu.RUnlock()
|
||||
// Look up the digest in our map of digests to fork versions and epochs.
|
||||
entry, ok := ns.byDigest[digest]
|
||||
if !ok {
|
||||
return [fieldparams.VersionLength]byte{}, 0, errors.Errorf("no fork exists for a digest of %#x", digest)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user