mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-09 15:37:56 -05:00
listenForNewNodes and FindPeersWithSubnet: Stop using ReadNodes and use iterator instead. (#14669)
* `listenForNewNodes` and `FindPeersWithSubnet`: Stop using `Readnodes` and use iterator instead. It avoids infinite loop in small devnets. * Update beacon-chain/p2p/discovery.go Co-authored-by: Sammy Rosso <15244892+saolyn@users.noreply.github.com> --------- Co-authored-by: Sammy Rosso <15244892+saolyn@users.noreply.github.com>
This commit is contained in:
@@ -98,6 +98,7 @@ The format is based on Keep a Changelog, and this project adheres to Semantic Ve
|
||||
- corrects nil check on some interface attestation types
|
||||
- temporary solution to handling electra attesation and attester_slashing events. [pr](14655)
|
||||
- Diverse log improvements and comment additions.
|
||||
- P2P: Avoid infinite loop when looking for peers in small networks.
|
||||
|
||||
|
||||
### Security
|
||||
|
||||
@@ -17,7 +17,6 @@ go_library(
|
||||
"handshake.go",
|
||||
"info.go",
|
||||
"interfaces.go",
|
||||
"iterator.go",
|
||||
"log.go",
|
||||
"message_id.go",
|
||||
"monitoring.go",
|
||||
@@ -164,12 +163,10 @@ go_test(
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/testing:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//crypto:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/discover:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
|
||||
@@ -225,11 +225,11 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
defer bootListener.Close()
|
||||
|
||||
// Use shorter period for testing.
|
||||
currentPeriod := pollingPeriod
|
||||
pollingPeriod = 1 * time.Second
|
||||
// Use smaller batch size for testing.
|
||||
currentBatchSize := batchSize
|
||||
batchSize = 2
|
||||
defer func() {
|
||||
pollingPeriod = currentPeriod
|
||||
batchSize = currentBatchSize
|
||||
}()
|
||||
|
||||
bootNode := bootListener.Self()
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
ecdsaprysm "github.com/prysmaticlabs/prysm/v5/crypto/ecdsa"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type ListenerRebooter interface {
|
||||
@@ -47,10 +48,12 @@ const (
|
||||
udp6
|
||||
)
|
||||
|
||||
const quickProtocolEnrKey = "quic"
|
||||
|
||||
type quicProtocol uint16
|
||||
|
||||
// quicProtocol is the "quic" key, which holds the QUIC port of the node.
|
||||
func (quicProtocol) ENRKey() string { return "quic" }
|
||||
func (quicProtocol) ENRKey() string { return quickProtocolEnrKey }
|
||||
|
||||
type listenerWrapper struct {
|
||||
mu sync.RWMutex
|
||||
@@ -133,68 +136,129 @@ func (l *listenerWrapper) RebootListener() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// RefreshENR uses an epoch to refresh the enr entry for our node
|
||||
// with the tracked committee ids for the epoch, allowing our node
|
||||
// to be dynamically discoverable by others given our tracked committee ids.
|
||||
func (s *Service) RefreshENR() {
|
||||
// return early if discv5 isn't running
|
||||
// RefreshPersistentSubnets checks that we are tracking our local persistent subnets for a variety of gossip topics.
|
||||
// This routine verifies and updates our attestation and sync committee subnets if they have been rotated.
|
||||
func (s *Service) RefreshPersistentSubnets() {
|
||||
// Return early if discv5 service isn't running.
|
||||
if s.dv5Listener == nil || !s.isInitialized() {
|
||||
return
|
||||
}
|
||||
currEpoch := slots.ToEpoch(slots.CurrentSlot(uint64(s.genesisTime.Unix())))
|
||||
if err := initializePersistentSubnets(s.dv5Listener.LocalNode().ID(), currEpoch); err != nil {
|
||||
|
||||
// Get the current epoch.
|
||||
currentSlot := slots.CurrentSlot(uint64(s.genesisTime.Unix()))
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
|
||||
// Get our node ID.
|
||||
nodeID := s.dv5Listener.LocalNode().ID()
|
||||
|
||||
// Get our node record.
|
||||
record := s.dv5Listener.Self().Record()
|
||||
|
||||
// Get the version of our metadata.
|
||||
metadataVersion := s.Metadata().Version()
|
||||
|
||||
// Initialize persistent subnets.
|
||||
if err := initializePersistentSubnets(nodeID, currentEpoch); err != nil {
|
||||
log.WithError(err).Error("Could not initialize persistent subnets")
|
||||
return
|
||||
}
|
||||
|
||||
// Get the current attestation subnet bitfield.
|
||||
bitV := bitfield.NewBitvector64()
|
||||
committees := cache.SubnetIDs.GetAllSubnets()
|
||||
for _, idx := range committees {
|
||||
attestationCommittees := cache.SubnetIDs.GetAllSubnets()
|
||||
for _, idx := range attestationCommittees {
|
||||
bitV.SetBitAt(idx, true)
|
||||
}
|
||||
currentBitV, err := attBitvector(s.dv5Listener.Self().Record())
|
||||
|
||||
// Get the attestation subnet bitfield we store in our record.
|
||||
inRecordBitV, err := attBitvector(record)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not retrieve att bitfield")
|
||||
return
|
||||
}
|
||||
|
||||
// Compare current epoch with our fork epochs
|
||||
// Get the attestation subnet bitfield in our metadata.
|
||||
inMetadataBitV := s.Metadata().AttnetsBitfield()
|
||||
|
||||
// Is our attestation bitvector record up to date?
|
||||
isBitVUpToDate := bytes.Equal(bitV, inRecordBitV) && bytes.Equal(bitV, inMetadataBitV)
|
||||
|
||||
// Compare current epoch with Altair fork epoch
|
||||
altairForkEpoch := params.BeaconConfig().AltairForkEpoch
|
||||
switch {
|
||||
case currEpoch < altairForkEpoch:
|
||||
|
||||
if currentEpoch < altairForkEpoch {
|
||||
// Phase 0 behaviour.
|
||||
if bytes.Equal(bitV, currentBitV) {
|
||||
// return early if bitfield hasn't changed
|
||||
if isBitVUpToDate {
|
||||
// Return early if bitfield hasn't changed.
|
||||
return
|
||||
}
|
||||
|
||||
// Some data changed. Update the record and the metadata.
|
||||
s.updateSubnetRecordWithMetadata(bitV)
|
||||
default:
|
||||
// Retrieve sync subnets from application level
|
||||
// cache.
|
||||
bitS := bitfield.Bitvector4{byte(0x00)}
|
||||
committees = cache.SyncSubnetIDs.GetAllSubnets(currEpoch)
|
||||
for _, idx := range committees {
|
||||
bitS.SetBitAt(idx, true)
|
||||
}
|
||||
currentBitS, err := syncBitvector(s.dv5Listener.Self().Record())
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not retrieve sync bitfield")
|
||||
return
|
||||
}
|
||||
if bytes.Equal(bitV, currentBitV) && bytes.Equal(bitS, currentBitS) &&
|
||||
s.Metadata().Version() == version.Altair {
|
||||
// return early if bitfields haven't changed
|
||||
return
|
||||
}
|
||||
s.updateSubnetRecordWithMetadataV2(bitV, bitS)
|
||||
|
||||
// Ping all peers.
|
||||
s.pingPeersAndLogEnr()
|
||||
|
||||
return
|
||||
}
|
||||
// ping all peers to inform them of new metadata
|
||||
|
||||
// Get the current sync subnet bitfield.
|
||||
bitS := bitfield.Bitvector4{byte(0x00)}
|
||||
syncCommittees := cache.SyncSubnetIDs.GetAllSubnets(currentEpoch)
|
||||
for _, idx := range syncCommittees {
|
||||
bitS.SetBitAt(idx, true)
|
||||
}
|
||||
|
||||
// Get the sync subnet bitfield we store in our record.
|
||||
inRecordBitS, err := syncBitvector(record)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not retrieve sync bitfield")
|
||||
return
|
||||
}
|
||||
|
||||
// Get the sync subnet bitfield in our metadata.
|
||||
currentBitSInMetadata := s.Metadata().SyncnetsBitfield()
|
||||
|
||||
// Is our sync bitvector record up to date?
|
||||
isBitSUpToDate := bytes.Equal(bitS, inRecordBitS) && bytes.Equal(bitS, currentBitSInMetadata)
|
||||
|
||||
if metadataVersion == version.Altair && isBitVUpToDate && isBitSUpToDate {
|
||||
// Nothing to do, return early.
|
||||
return
|
||||
}
|
||||
|
||||
// Some data have changed, update our record and metadata.
|
||||
s.updateSubnetRecordWithMetadataV2(bitV, bitS)
|
||||
|
||||
// Ping all peers to inform them of new metadata
|
||||
s.pingPeersAndLogEnr()
|
||||
}
|
||||
|
||||
// listen for new nodes watches for new nodes in the network and adds them to the peerstore.
|
||||
func (s *Service) listenForNewNodes() {
|
||||
iterator := filterNodes(s.ctx, s.dv5Listener.RandomNodes(), s.filterPeer)
|
||||
const (
|
||||
minLogInterval = 1 * time.Minute
|
||||
thresholdLimit = 5
|
||||
)
|
||||
|
||||
peersSummary := func(threshold uint) (uint, uint) {
|
||||
// Retrieve how many active peers we have.
|
||||
activePeers := s.Peers().Active()
|
||||
activePeerCount := uint(len(activePeers))
|
||||
|
||||
// Compute how many peers we are missing to reach the threshold.
|
||||
if activePeerCount >= threshold {
|
||||
return activePeerCount, 0
|
||||
}
|
||||
|
||||
missingPeerCount := threshold - activePeerCount
|
||||
|
||||
return activePeerCount, missingPeerCount
|
||||
}
|
||||
|
||||
var lastLogTime time.Time
|
||||
|
||||
iterator := s.dv5Listener.RandomNodes()
|
||||
defer iterator.Close()
|
||||
connectivityTicker := time.NewTicker(1 * time.Minute)
|
||||
thresholdCount := 0
|
||||
@@ -203,25 +267,31 @@ func (s *Service) listenForNewNodes() {
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
|
||||
case <-connectivityTicker.C:
|
||||
// Skip the connectivity check if not enabled.
|
||||
if !features.Get().EnableDiscoveryReboot {
|
||||
continue
|
||||
}
|
||||
|
||||
if !s.isBelowOutboundPeerThreshold() {
|
||||
// Reset counter if we are beyond the threshold
|
||||
thresholdCount = 0
|
||||
continue
|
||||
}
|
||||
|
||||
thresholdCount++
|
||||
|
||||
// Reboot listener if connectivity drops
|
||||
if thresholdCount > 5 {
|
||||
log.WithField("outboundConnectionCount", len(s.peers.OutboundConnected())).Warn("Rebooting discovery listener, reached threshold.")
|
||||
if thresholdCount > thresholdLimit {
|
||||
outBoundConnectedCount := len(s.peers.OutboundConnected())
|
||||
log.WithField("outboundConnectionCount", outBoundConnectedCount).Warn("Rebooting discovery listener, reached threshold.")
|
||||
if err := s.dv5Listener.RebootListener(); err != nil {
|
||||
log.WithError(err).Error("Could not reboot listener")
|
||||
continue
|
||||
}
|
||||
iterator = filterNodes(s.ctx, s.dv5Listener.RandomNodes(), s.filterPeer)
|
||||
|
||||
iterator = s.dv5Listener.RandomNodes()
|
||||
thresholdCount = 0
|
||||
}
|
||||
default:
|
||||
@@ -232,17 +302,35 @@ func (s *Service) listenForNewNodes() {
|
||||
time.Sleep(pollingPeriod)
|
||||
continue
|
||||
}
|
||||
wantedCount := s.wantedPeerDials()
|
||||
if wantedCount == 0 {
|
||||
|
||||
// Compute the number of new peers we want to dial.
|
||||
activePeerCount, missingPeerCount := peersSummary(s.cfg.MaxPeers)
|
||||
|
||||
fields := logrus.Fields{
|
||||
"currentPeerCount": activePeerCount,
|
||||
"targetPeerCount": s.cfg.MaxPeers,
|
||||
}
|
||||
|
||||
if missingPeerCount == 0 {
|
||||
log.Trace("Not looking for peers, at peer limit")
|
||||
time.Sleep(pollingPeriod)
|
||||
continue
|
||||
}
|
||||
|
||||
if time.Since(lastLogTime) > minLogInterval {
|
||||
lastLogTime = time.Now()
|
||||
log.WithFields(fields).Debug("Searching for new active peers")
|
||||
}
|
||||
|
||||
// Restrict dials if limit is applied.
|
||||
if flags.MaxDialIsActive() {
|
||||
wantedCount = min(wantedCount, flags.Get().MaxConcurrentDials)
|
||||
maxConcurrentDials := uint(flags.Get().MaxConcurrentDials)
|
||||
missingPeerCount = min(missingPeerCount, maxConcurrentDials)
|
||||
}
|
||||
wantedNodes := enode.ReadNodes(iterator, wantedCount)
|
||||
|
||||
// Search for new peers.
|
||||
wantedNodes := searchForPeers(iterator, batchSize, missingPeerCount, s.filterPeer)
|
||||
|
||||
wg := new(sync.WaitGroup)
|
||||
for i := 0; i < len(wantedNodes); i++ {
|
||||
node := wantedNodes[i]
|
||||
@@ -458,6 +546,8 @@ func (s *Service) filterPeer(node *enode.Node) bool {
|
||||
|
||||
// Ignore nodes that are already active.
|
||||
if s.peers.IsActive(peerData.ID) {
|
||||
// Constantly update enr for known peers
|
||||
s.peers.UpdateENR(node.Record(), peerData.ID)
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -526,17 +616,6 @@ func (s *Service) isBelowOutboundPeerThreshold() bool {
|
||||
return outBoundCount < outBoundThreshold
|
||||
}
|
||||
|
||||
func (s *Service) wantedPeerDials() int {
|
||||
maxPeers := int(s.cfg.MaxPeers)
|
||||
|
||||
activePeers := len(s.Peers().Active())
|
||||
wantedCount := 0
|
||||
if maxPeers > activePeers {
|
||||
wantedCount = maxPeers - activePeers
|
||||
}
|
||||
return wantedCount
|
||||
}
|
||||
|
||||
// PeersFromStringAddrs converts peer raw ENRs into multiaddrs for p2p.
|
||||
func PeersFromStringAddrs(addrs []string) ([]ma.Multiaddr, error) {
|
||||
var allAddrs []ma.Multiaddr
|
||||
|
||||
@@ -16,6 +16,8 @@ import (
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/libp2p/go-libp2p"
|
||||
"github.com/libp2p/go-libp2p/core/crypto"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
@@ -30,13 +32,12 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/wrapper"
|
||||
leakybucket "github.com/prysmaticlabs/prysm/v5/container/leaky-bucket"
|
||||
ecdsaprysm "github.com/prysmaticlabs/prysm/v5/crypto/ecdsa"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
prysmNetwork "github.com/prysmaticlabs/prysm/v5/network"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
@@ -131,6 +132,10 @@ func TestStartDiscV5_DiscoverAllPeers(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCreateLocalNode(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.Eip7594ForkEpoch = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
testCases := []struct {
|
||||
name string
|
||||
cfg *Config
|
||||
@@ -499,192 +504,270 @@ func addPeer(t *testing.T, p *peers.Status, state peerdata.ConnectionState, outb
|
||||
return id
|
||||
}
|
||||
|
||||
func TestRefreshENR_ForkBoundaries(t *testing.T) {
|
||||
func createAndConnectPeer(t *testing.T, p2pService *testp2p.TestP2P, offset int) {
|
||||
// Create the private key.
|
||||
privateKeyBytes := make([]byte, 32)
|
||||
for i := 0; i < 32; i++ {
|
||||
privateKeyBytes[i] = byte(offset + i)
|
||||
}
|
||||
|
||||
privateKey, err := crypto.UnmarshalSecp256k1PrivateKey(privateKeyBytes)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create the peer.
|
||||
peer := testp2p.NewTestP2P(t, libp2p.Identity(privateKey))
|
||||
|
||||
// Add the peer and connect it.
|
||||
p2pService.Peers().Add(&enr.Record{}, peer.PeerID(), nil, network.DirOutbound)
|
||||
p2pService.Peers().SetConnectionState(peer.PeerID(), peers.Connected)
|
||||
p2pService.Connect(peer)
|
||||
}
|
||||
|
||||
// Define the ping count.
|
||||
var actualPingCount int
|
||||
|
||||
type check struct {
|
||||
pingCount int
|
||||
metadataSequenceNumber uint64
|
||||
attestationSubnets []uint64
|
||||
syncSubnets []uint64
|
||||
custodySubnetCount *uint64
|
||||
}
|
||||
|
||||
func checkPingCountCacheMetadataRecord(
|
||||
t *testing.T,
|
||||
service *Service,
|
||||
expected check,
|
||||
) {
|
||||
// Check the ping count.
|
||||
require.Equal(t, expected.pingCount, actualPingCount)
|
||||
|
||||
// Check the attestation subnets in the cache.
|
||||
actualAttestationSubnets := cache.SubnetIDs.GetAllSubnets()
|
||||
require.DeepSSZEqual(t, expected.attestationSubnets, actualAttestationSubnets)
|
||||
|
||||
// Check the metadata sequence number.
|
||||
actualMetadataSequenceNumber := service.metaData.SequenceNumber()
|
||||
require.Equal(t, expected.metadataSequenceNumber, actualMetadataSequenceNumber)
|
||||
|
||||
// Compute expected attestation subnets bits.
|
||||
expectedBitV := bitfield.NewBitvector64()
|
||||
exists := false
|
||||
|
||||
for _, idx := range expected.attestationSubnets {
|
||||
exists = true
|
||||
expectedBitV.SetBitAt(idx, true)
|
||||
}
|
||||
|
||||
// Check attnets in ENR.
|
||||
var actualBitVENR bitfield.Bitvector64
|
||||
err := service.dv5Listener.LocalNode().Node().Record().Load(enr.WithEntry(attSubnetEnrKey, &actualBitVENR))
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, expectedBitV, actualBitVENR)
|
||||
|
||||
// Check attnets in metadata.
|
||||
if !exists {
|
||||
expectedBitV = nil
|
||||
}
|
||||
|
||||
actualBitVMetadata := service.metaData.AttnetsBitfield()
|
||||
require.DeepSSZEqual(t, expectedBitV, actualBitVMetadata)
|
||||
|
||||
if expected.syncSubnets != nil {
|
||||
// Compute expected sync subnets bits.
|
||||
expectedBitS := bitfield.NewBitvector4()
|
||||
exists = false
|
||||
|
||||
for _, idx := range expected.syncSubnets {
|
||||
exists = true
|
||||
expectedBitS.SetBitAt(idx, true)
|
||||
}
|
||||
|
||||
// Check syncnets in ENR.
|
||||
var actualBitSENR bitfield.Bitvector4
|
||||
err := service.dv5Listener.LocalNode().Node().Record().Load(enr.WithEntry(syncCommsSubnetEnrKey, &actualBitSENR))
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, expectedBitS, actualBitSENR)
|
||||
|
||||
// Check syncnets in metadata.
|
||||
if !exists {
|
||||
expectedBitS = nil
|
||||
}
|
||||
|
||||
actualBitSMetadata := service.metaData.SyncnetsBitfield()
|
||||
require.DeepSSZEqual(t, expectedBitS, actualBitSMetadata)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRefreshPersistentSubnets(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
|
||||
// Clean up caches after usage.
|
||||
defer cache.SubnetIDs.EmptyAllCaches()
|
||||
defer cache.SyncSubnetIDs.EmptyAllCaches()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
svcBuilder func(t *testing.T) *Service
|
||||
postValidation func(t *testing.T, s *Service)
|
||||
const (
|
||||
altairForkEpoch = 5
|
||||
eip7594ForkEpoch = 10
|
||||
)
|
||||
|
||||
// Set up epochs.
|
||||
defaultCfg := params.BeaconConfig()
|
||||
cfg := defaultCfg.Copy()
|
||||
cfg.AltairForkEpoch = altairForkEpoch
|
||||
cfg.Eip7594ForkEpoch = eip7594ForkEpoch
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Compute the number of seconds per epoch.
|
||||
secondsPerSlot := params.BeaconConfig().SecondsPerSlot
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
secondsPerEpoch := secondsPerSlot * uint64(slotsPerEpoch)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
epochSinceGenesis uint64
|
||||
checks []check
|
||||
}{
|
||||
{
|
||||
name: "metadata no change",
|
||||
svcBuilder: func(t *testing.T) *Service {
|
||||
port := 2000
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
s := &Service{
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
cfg: &Config{UDPPort: uint(port)},
|
||||
}
|
||||
createListener := func() (*discover.UDPv5, error) {
|
||||
return s.createListener(ipAddr, pkey)
|
||||
}
|
||||
listener, err := newListener(createListener)
|
||||
assert.NoError(t, err)
|
||||
s.dv5Listener = listener
|
||||
s.metaData = wrapper.WrappedMetadataV0(new(ethpb.MetaDataV0))
|
||||
s.updateSubnetRecordWithMetadata([]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00})
|
||||
return s
|
||||
},
|
||||
postValidation: func(t *testing.T, s *Service) {
|
||||
currEpoch := slots.ToEpoch(slots.CurrentSlot(uint64(s.genesisTime.Unix())))
|
||||
subs, err := computeSubscribedSubnets(s.dv5Listener.LocalNode().ID(), currEpoch)
|
||||
assert.NoError(t, err)
|
||||
|
||||
bitV := bitfield.NewBitvector64()
|
||||
for _, idx := range subs {
|
||||
bitV.SetBitAt(idx, true)
|
||||
}
|
||||
assert.DeepEqual(t, bitV, s.metaData.AttnetsBitfield())
|
||||
name: "Phase0",
|
||||
epochSinceGenesis: 0,
|
||||
checks: []check{
|
||||
{
|
||||
pingCount: 0,
|
||||
metadataSequenceNumber: 0,
|
||||
attestationSubnets: []uint64{},
|
||||
},
|
||||
{
|
||||
pingCount: 1,
|
||||
metadataSequenceNumber: 1,
|
||||
attestationSubnets: []uint64{40, 41},
|
||||
},
|
||||
{
|
||||
pingCount: 1,
|
||||
metadataSequenceNumber: 1,
|
||||
attestationSubnets: []uint64{40, 41},
|
||||
},
|
||||
{
|
||||
pingCount: 1,
|
||||
metadataSequenceNumber: 1,
|
||||
attestationSubnets: []uint64{40, 41},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "metadata updated",
|
||||
svcBuilder: func(t *testing.T) *Service {
|
||||
port := 2000
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
s := &Service{
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
cfg: &Config{UDPPort: uint(port)},
|
||||
}
|
||||
createListener := func() (*discover.UDPv5, error) {
|
||||
return s.createListener(ipAddr, pkey)
|
||||
}
|
||||
listener, err := newListener(createListener)
|
||||
assert.NoError(t, err)
|
||||
s.dv5Listener = listener
|
||||
s.metaData = wrapper.WrappedMetadataV0(new(ethpb.MetaDataV0))
|
||||
s.updateSubnetRecordWithMetadata([]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01})
|
||||
cache.SubnetIDs.AddPersistentCommittee([]uint64{1, 2, 3, 23}, 0)
|
||||
return s
|
||||
},
|
||||
postValidation: func(t *testing.T, s *Service) {
|
||||
assert.DeepEqual(t, bitfield.Bitvector64{0xe, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0}, s.metaData.AttnetsBitfield())
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "metadata updated at fork epoch",
|
||||
svcBuilder: func(t *testing.T) *Service {
|
||||
port := 2000
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
s := &Service{
|
||||
genesisTime: time.Now().Add(-5 * oneEpochDuration()),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
cfg: &Config{UDPPort: uint(port)},
|
||||
}
|
||||
createListener := func() (*discover.UDPv5, error) {
|
||||
return s.createListener(ipAddr, pkey)
|
||||
}
|
||||
listener, err := newListener(createListener)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Update params
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.AltairForkEpoch = 5
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
|
||||
s.dv5Listener = listener
|
||||
s.metaData = wrapper.WrappedMetadataV0(new(ethpb.MetaDataV0))
|
||||
s.updateSubnetRecordWithMetadata([]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01})
|
||||
cache.SubnetIDs.AddPersistentCommittee([]uint64{1, 2, 3, 23}, 0)
|
||||
return s
|
||||
},
|
||||
postValidation: func(t *testing.T, s *Service) {
|
||||
assert.Equal(t, version.Altair, s.metaData.Version())
|
||||
assert.DeepEqual(t, bitfield.Bitvector4{0x00}, s.metaData.MetadataObjV1().Syncnets)
|
||||
assert.DeepEqual(t, bitfield.Bitvector64{0xe, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0}, s.metaData.AttnetsBitfield())
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "metadata updated at fork epoch with no bitfield",
|
||||
svcBuilder: func(t *testing.T) *Service {
|
||||
port := 2000
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
s := &Service{
|
||||
genesisTime: time.Now().Add(-5 * oneEpochDuration()),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
cfg: &Config{UDPPort: uint(port)},
|
||||
}
|
||||
createListener := func() (*discover.UDPv5, error) {
|
||||
return s.createListener(ipAddr, pkey)
|
||||
}
|
||||
listener, err := newListener(createListener)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Update params
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.AltairForkEpoch = 5
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
|
||||
s.dv5Listener = listener
|
||||
s.metaData = wrapper.WrappedMetadataV0(new(ethpb.MetaDataV0))
|
||||
s.updateSubnetRecordWithMetadata([]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00})
|
||||
return s
|
||||
},
|
||||
postValidation: func(t *testing.T, s *Service) {
|
||||
assert.Equal(t, version.Altair, s.metaData.Version())
|
||||
assert.DeepEqual(t, bitfield.Bitvector4{0x00}, s.metaData.MetadataObjV1().Syncnets)
|
||||
currEpoch := slots.ToEpoch(slots.CurrentSlot(uint64(s.genesisTime.Unix())))
|
||||
subs, err := computeSubscribedSubnets(s.dv5Listener.LocalNode().ID(), currEpoch)
|
||||
assert.NoError(t, err)
|
||||
|
||||
bitV := bitfield.NewBitvector64()
|
||||
for _, idx := range subs {
|
||||
bitV.SetBitAt(idx, true)
|
||||
}
|
||||
assert.DeepEqual(t, bitV, s.metaData.AttnetsBitfield())
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "metadata updated past fork epoch with bitfields",
|
||||
svcBuilder: func(t *testing.T) *Service {
|
||||
port := 2000
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
s := &Service{
|
||||
genesisTime: time.Now().Add(-6 * oneEpochDuration()),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
cfg: &Config{UDPPort: uint(port)},
|
||||
}
|
||||
createListener := func() (*discover.UDPv5, error) {
|
||||
return s.createListener(ipAddr, pkey)
|
||||
}
|
||||
listener, err := newListener(createListener)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Update params
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.AltairForkEpoch = 5
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
|
||||
s.dv5Listener = listener
|
||||
s.metaData = wrapper.WrappedMetadataV0(new(ethpb.MetaDataV0))
|
||||
s.updateSubnetRecordWithMetadata([]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00})
|
||||
cache.SubnetIDs.AddPersistentCommittee([]uint64{1, 2, 3, 23}, 0)
|
||||
cache.SyncSubnetIDs.AddSyncCommitteeSubnets([]byte{'A'}, 0, []uint64{0, 1}, 0)
|
||||
return s
|
||||
},
|
||||
postValidation: func(t *testing.T, s *Service) {
|
||||
assert.Equal(t, version.Altair, s.metaData.Version())
|
||||
assert.DeepEqual(t, bitfield.Bitvector4{0x03}, s.metaData.MetadataObjV1().Syncnets)
|
||||
assert.DeepEqual(t, bitfield.Bitvector64{0xe, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0}, s.metaData.AttnetsBitfield())
|
||||
name: "Altair",
|
||||
epochSinceGenesis: altairForkEpoch,
|
||||
checks: []check{
|
||||
{
|
||||
pingCount: 0,
|
||||
metadataSequenceNumber: 0,
|
||||
attestationSubnets: []uint64{},
|
||||
syncSubnets: nil,
|
||||
},
|
||||
{
|
||||
pingCount: 1,
|
||||
metadataSequenceNumber: 1,
|
||||
attestationSubnets: []uint64{40, 41},
|
||||
syncSubnets: nil,
|
||||
},
|
||||
{
|
||||
pingCount: 2,
|
||||
metadataSequenceNumber: 2,
|
||||
attestationSubnets: []uint64{40, 41},
|
||||
syncSubnets: []uint64{1, 2},
|
||||
},
|
||||
{
|
||||
pingCount: 2,
|
||||
metadataSequenceNumber: 2,
|
||||
attestationSubnets: []uint64{40, 41},
|
||||
syncSubnets: []uint64{1, 2},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s := tt.svcBuilder(t)
|
||||
s.RefreshENR()
|
||||
tt.postValidation(t, s)
|
||||
s.dv5Listener.Close()
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
const peerOffset = 1
|
||||
|
||||
// Initialize the ping count.
|
||||
actualPingCount = 0
|
||||
|
||||
// Create the private key.
|
||||
privateKeyBytes := make([]byte, 32)
|
||||
for i := 0; i < 32; i++ {
|
||||
privateKeyBytes[i] = byte(i)
|
||||
}
|
||||
|
||||
unmarshalledPrivateKey, err := crypto.UnmarshalSecp256k1PrivateKey(privateKeyBytes)
|
||||
require.NoError(t, err)
|
||||
|
||||
privateKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(unmarshalledPrivateKey)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a p2p service.
|
||||
p2p := testp2p.NewTestP2P(t)
|
||||
|
||||
// Create and connect a peer.
|
||||
createAndConnectPeer(t, p2p, peerOffset)
|
||||
|
||||
// Create a service.
|
||||
service := &Service{
|
||||
pingMethod: func(_ context.Context, _ peer.ID) error {
|
||||
actualPingCount++
|
||||
return nil
|
||||
},
|
||||
cfg: &Config{UDPPort: 2000},
|
||||
peers: p2p.Peers(),
|
||||
genesisTime: time.Now().Add(-time.Duration(tc.epochSinceGenesis*secondsPerEpoch) * time.Second),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
}
|
||||
|
||||
// Set the listener and the metadata.
|
||||
createListener := func() (*discover.UDPv5, error) {
|
||||
return service.createListener(nil, privateKey)
|
||||
}
|
||||
|
||||
listener, err := newListener(createListener)
|
||||
require.NoError(t, err)
|
||||
|
||||
service.dv5Listener = listener
|
||||
service.metaData = wrapper.WrappedMetadataV0(new(ethpb.MetaDataV0))
|
||||
|
||||
// Run a check.
|
||||
checkPingCountCacheMetadataRecord(t, service, tc.checks[0])
|
||||
|
||||
// Refresh the persistent subnets.
|
||||
service.RefreshPersistentSubnets()
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Run a check.
|
||||
checkPingCountCacheMetadataRecord(t, service, tc.checks[1])
|
||||
|
||||
// Add a sync committee subnet.
|
||||
cache.SyncSubnetIDs.AddSyncCommitteeSubnets([]byte{'a'}, altairForkEpoch, []uint64{1, 2}, 1*time.Hour)
|
||||
|
||||
// Refresh the persistent subnets.
|
||||
service.RefreshPersistentSubnets()
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Run a check.
|
||||
checkPingCountCacheMetadataRecord(t, service, tc.checks[2])
|
||||
|
||||
// Refresh the persistent subnets.
|
||||
service.RefreshPersistentSubnets()
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Run a check.
|
||||
checkPingCountCacheMetadataRecord(t, service, tc.checks[3])
|
||||
|
||||
// Clean the test.
|
||||
service.dv5Listener.Close()
|
||||
cache.SubnetIDs.EmptyAllCaches()
|
||||
cache.SyncSubnetIDs.EmptyAllCaches()
|
||||
})
|
||||
}
|
||||
|
||||
// Reset the config.
|
||||
params.OverrideBeaconConfig(defaultCfg)
|
||||
}
|
||||
|
||||
@@ -82,7 +82,7 @@ type PeerManager interface {
|
||||
Host() host.Host
|
||||
ENR() *enr.Record
|
||||
DiscoveryAddresses() ([]multiaddr.Multiaddr, error)
|
||||
RefreshENR()
|
||||
RefreshPersistentSubnets()
|
||||
FindPeersWithSubnet(ctx context.Context, topic string, subIndex uint64, threshold int) (bool, error)
|
||||
AddPingMethod(reqFunc func(ctx context.Context, id peer.ID) error)
|
||||
}
|
||||
|
||||
@@ -1,36 +0,0 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
)
|
||||
|
||||
// filterNodes wraps an iterator such that Next only returns nodes for which
|
||||
// the 'check' function returns true. This custom implementation also
|
||||
// checks for context deadlines so that in the event the parent context has
|
||||
// expired, we do exit from the search rather than perform more network
|
||||
// lookups for additional peers.
|
||||
func filterNodes(ctx context.Context, it enode.Iterator, check func(*enode.Node) bool) enode.Iterator {
|
||||
return &filterIter{ctx, it, check}
|
||||
}
|
||||
|
||||
type filterIter struct {
|
||||
context.Context
|
||||
enode.Iterator
|
||||
check func(*enode.Node) bool
|
||||
}
|
||||
|
||||
// Next looks up for the next valid node according to our
|
||||
// filter criteria.
|
||||
func (f *filterIter) Next() bool {
|
||||
for f.Iterator.Next() {
|
||||
if f.Context.Err() != nil {
|
||||
return false
|
||||
}
|
||||
if f.check(f.Node()) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -118,6 +118,15 @@ func NewStatus(ctx context.Context, config *StatusConfig) *Status {
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Status) UpdateENR(record *enr.Record, pid peer.ID) {
|
||||
p.store.Lock()
|
||||
defer p.store.Unlock()
|
||||
|
||||
if peerData, ok := p.store.PeerData(pid); ok {
|
||||
peerData.Enr = record
|
||||
}
|
||||
}
|
||||
|
||||
// Scorers exposes peer scoring management service.
|
||||
func (p *Status) Scorers() *scorers.Service {
|
||||
return p.scorers
|
||||
|
||||
@@ -10,15 +10,25 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/encoder"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/network/forks"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var _ pubsub.SubscriptionFilter = (*Service)(nil)
|
||||
|
||||
// It is set at this limit to handle the possibility
|
||||
// of double topic subscriptions at fork boundaries.
|
||||
// -> 64 Attestation Subnets * 2.
|
||||
// -> 4 Sync Committee Subnets * 2.
|
||||
// -> Block,Aggregate,ProposerSlashing,AttesterSlashing,Exits,SyncContribution * 2.
|
||||
// -> BeaconBlock * 2 = 2
|
||||
// -> BeaconAggregateAndProof * 2 = 2
|
||||
// -> VoluntaryExit * 2 = 2
|
||||
// -> ProposerSlashing * 2 = 2
|
||||
// -> AttesterSlashing * 2 = 2
|
||||
// -> 64 Beacon Attestation * 2 = 128
|
||||
// -> SyncContributionAndProof * 2 = 2
|
||||
// -> 4 SyncCommitteeSubnets * 2 = 8
|
||||
// -> BlsToExecutionChange * 2 = 2
|
||||
// -> 6 BlobSidecar * 2 = 12
|
||||
// -------------------------------------
|
||||
// TOTAL = 162
|
||||
const pubsubSubscriptionRequestLimit = 200
|
||||
|
||||
// CanSubscribe returns true if the topic is of interest and we could subscribe to it.
|
||||
@@ -95,8 +105,15 @@ func (s *Service) CanSubscribe(topic string) bool {
|
||||
// FilterIncomingSubscriptions is invoked for all RPCs containing subscription notifications.
|
||||
// This method returns only the topics of interest and may return an error if the subscription
|
||||
// request contains too many topics.
|
||||
func (s *Service) FilterIncomingSubscriptions(_ peer.ID, subs []*pubsubpb.RPC_SubOpts) ([]*pubsubpb.RPC_SubOpts, error) {
|
||||
func (s *Service) FilterIncomingSubscriptions(peerID peer.ID, subs []*pubsubpb.RPC_SubOpts) ([]*pubsubpb.RPC_SubOpts, error) {
|
||||
if len(subs) > pubsubSubscriptionRequestLimit {
|
||||
subsCount := len(subs)
|
||||
log.WithFields(logrus.Fields{
|
||||
"peerID": peerID,
|
||||
"subscriptionCounts": subsCount,
|
||||
"subscriptionLimit": pubsubSubscriptionRequestLimit,
|
||||
}).Debug("Too many incoming subscriptions, filtering them")
|
||||
|
||||
return nil, pubsub.ErrTooManySubscriptions
|
||||
}
|
||||
|
||||
|
||||
@@ -43,6 +43,10 @@ var _ runtime.Service = (*Service)(nil)
|
||||
// defined below.
|
||||
var pollingPeriod = 6 * time.Second
|
||||
|
||||
// When looking for new nodes, if not enough nodes are found,
|
||||
// we stop after this amount of iterations.
|
||||
var batchSize = 2_000
|
||||
|
||||
// Refresh rate of ENR set at twice per slot.
|
||||
var refreshRate = slots.DivideSlotBy(2)
|
||||
|
||||
@@ -227,7 +231,7 @@ func (s *Service) Start() {
|
||||
}
|
||||
// Initialize metadata according to the
|
||||
// current epoch.
|
||||
s.RefreshENR()
|
||||
s.RefreshPersistentSubnets()
|
||||
|
||||
// Periodic functions.
|
||||
async.RunEvery(s.ctx, params.BeaconConfig().TtfbTimeoutDuration(), func() {
|
||||
@@ -235,7 +239,7 @@ func (s *Service) Start() {
|
||||
})
|
||||
async.RunEvery(s.ctx, 30*time.Minute, s.Peers().Prune)
|
||||
async.RunEvery(s.ctx, time.Duration(params.BeaconConfig().RespTimeout)*time.Second, s.updateMetrics)
|
||||
async.RunEvery(s.ctx, refreshRate, s.RefreshENR)
|
||||
async.RunEvery(s.ctx, refreshRate, s.RefreshPersistentSubnets)
|
||||
async.RunEvery(s.ctx, 1*time.Minute, func() {
|
||||
inboundQUICCount := len(s.peers.InboundConnectedWithProtocol(peers.QUIC))
|
||||
inboundTCPCount := len(s.peers.InboundConnectedWithProtocol(peers.TCP))
|
||||
|
||||
@@ -2,6 +2,7 @@ package p2p
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -19,22 +20,24 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
mathutil "github.com/prysmaticlabs/prysm/v5/math"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
pb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var attestationSubnetCount = params.BeaconConfig().AttestationSubnetCount
|
||||
var syncCommsSubnetCount = params.BeaconConfig().SyncCommitteeSubnetCount
|
||||
var (
|
||||
attestationSubnetCount = params.BeaconConfig().AttestationSubnetCount
|
||||
syncCommsSubnetCount = params.BeaconConfig().SyncCommitteeSubnetCount
|
||||
|
||||
var attSubnetEnrKey = params.BeaconNetworkConfig().AttSubnetKey
|
||||
var syncCommsSubnetEnrKey = params.BeaconNetworkConfig().SyncCommsSubnetKey
|
||||
attSubnetEnrKey = params.BeaconNetworkConfig().AttSubnetKey
|
||||
syncCommsSubnetEnrKey = params.BeaconNetworkConfig().SyncCommsSubnetKey
|
||||
)
|
||||
|
||||
// The value used with the subnet, in order
|
||||
// to create an appropriate key to retrieve
|
||||
// the relevant lock. This is used to differentiate
|
||||
// sync subnets from attestation subnets. This is deliberately
|
||||
// chosen as more than 64(attestation subnet count).
|
||||
// sync subnets from others. This is deliberately
|
||||
// chosen as more than 64 (attestation subnet count).
|
||||
const syncLockerVal = 100
|
||||
|
||||
// The value used with the blob sidecar subnet, in order
|
||||
@@ -44,6 +47,77 @@ const syncLockerVal = 100
|
||||
// chosen more than sync and attestation subnet combined.
|
||||
const blobSubnetLockerVal = 110
|
||||
|
||||
// nodeFilter return a function that filters nodes based on the subnet topic and subnet index.
|
||||
func (s *Service) nodeFilter(topic string, index uint64) (func(node *enode.Node) bool, error) {
|
||||
switch {
|
||||
case strings.Contains(topic, GossipAttestationMessage):
|
||||
return s.filterPeerForAttSubnet(index), nil
|
||||
case strings.Contains(topic, GossipSyncCommitteeMessage):
|
||||
return s.filterPeerForSyncSubnet(index), nil
|
||||
default:
|
||||
return nil, errors.Errorf("no subnet exists for provided topic: %s", topic)
|
||||
}
|
||||
}
|
||||
|
||||
// searchForPeers performs a network search for peers subscribed to a particular subnet.
|
||||
// It exits as soon as one of these conditions is met:
|
||||
// - It looped through `batchSize` nodes.
|
||||
// - It found `peersToFindCount“ peers corresponding to the `filter` criteria.
|
||||
// - Iterator is exhausted.
|
||||
func searchForPeers(
|
||||
iterator enode.Iterator,
|
||||
batchSize int,
|
||||
peersToFindCount uint,
|
||||
filter func(node *enode.Node) bool,
|
||||
) []*enode.Node {
|
||||
nodeFromNodeID := make(map[enode.ID]*enode.Node, batchSize)
|
||||
for i := 0; i < batchSize && uint(len(nodeFromNodeID)) <= peersToFindCount && iterator.Next(); i++ {
|
||||
node := iterator.Node()
|
||||
|
||||
// Filter out nodes that do not meet the criteria.
|
||||
if !filter(node) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Remove duplicates, keeping the node with higher seq.
|
||||
prevNode, ok := nodeFromNodeID[node.ID()]
|
||||
if ok && prevNode.Seq() > node.Seq() {
|
||||
continue
|
||||
}
|
||||
|
||||
nodeFromNodeID[node.ID()] = node
|
||||
}
|
||||
|
||||
// Convert the map to a slice.
|
||||
nodes := make([]*enode.Node, 0, len(nodeFromNodeID))
|
||||
for _, node := range nodeFromNodeID {
|
||||
nodes = append(nodes, node)
|
||||
}
|
||||
|
||||
return nodes
|
||||
}
|
||||
|
||||
// dialPeer dials a peer in a separate goroutine.
|
||||
func (s *Service) dialPeer(ctx context.Context, wg *sync.WaitGroup, node *enode.Node) {
|
||||
info, _, err := convertToAddrInfo(node)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if info == nil {
|
||||
return
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
if err := s.connectWithPeer(ctx, *info); err != nil {
|
||||
log.WithError(err).Tracef("Could not connect with peer %s", info.String())
|
||||
}
|
||||
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
|
||||
// FindPeersWithSubnet performs a network search for peers
|
||||
// subscribed to a particular subnet. Then it tries to connect
|
||||
// with those peers. This method will block until either:
|
||||
@@ -52,67 +126,104 @@ const blobSubnetLockerVal = 110
|
||||
// On some edge cases, this method may hang indefinitely while peers
|
||||
// are actually found. In such a case, the user should cancel the context
|
||||
// and re-run the method again.
|
||||
func (s *Service) FindPeersWithSubnet(ctx context.Context, topic string,
|
||||
index uint64, threshold int) (bool, error) {
|
||||
func (s *Service) FindPeersWithSubnet(
|
||||
ctx context.Context,
|
||||
topic string,
|
||||
index uint64,
|
||||
threshold int,
|
||||
) (bool, error) {
|
||||
const minLogInterval = 1 * time.Minute
|
||||
|
||||
ctx, span := trace.StartSpan(ctx, "p2p.FindPeersWithSubnet")
|
||||
defer span.End()
|
||||
|
||||
span.SetAttributes(trace.Int64Attribute("index", int64(index))) // lint:ignore uintcast -- It's safe to do this for tracing.
|
||||
|
||||
if s.dv5Listener == nil {
|
||||
// return if discovery isn't set
|
||||
// Return if discovery isn't set
|
||||
return false, nil
|
||||
}
|
||||
|
||||
topic += s.Encoding().ProtocolSuffix()
|
||||
iterator := s.dv5Listener.RandomNodes()
|
||||
defer iterator.Close()
|
||||
switch {
|
||||
case strings.Contains(topic, GossipAttestationMessage):
|
||||
iterator = filterNodes(ctx, iterator, s.filterPeerForAttSubnet(index))
|
||||
case strings.Contains(topic, GossipSyncCommitteeMessage):
|
||||
iterator = filterNodes(ctx, iterator, s.filterPeerForSyncSubnet(index))
|
||||
default:
|
||||
return false, errors.New("no subnet exists for provided topic")
|
||||
|
||||
filter, err := s.nodeFilter(topic, index)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "node filter")
|
||||
}
|
||||
|
||||
peersSummary := func(topic string, threshold int) (int, int) {
|
||||
// Retrieve how many peers we have for this topic.
|
||||
peerCountForTopic := len(s.pubsub.ListPeers(topic))
|
||||
|
||||
// Compute how many peers we are missing to reach the threshold.
|
||||
missingPeerCountForTopic := max(0, threshold-peerCountForTopic)
|
||||
|
||||
return peerCountForTopic, missingPeerCountForTopic
|
||||
}
|
||||
|
||||
// Compute how many peers we are missing to reach the threshold.
|
||||
peerCountForTopic, missingPeerCountForTopic := peersSummary(topic, threshold)
|
||||
|
||||
// Exit early if we have enough peers.
|
||||
if missingPeerCountForTopic == 0 {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"topic": topic,
|
||||
"targetPeerCount": threshold,
|
||||
})
|
||||
|
||||
log.WithField("currentPeerCount", peerCountForTopic).Debug("Searching for new peers for a subnet - start")
|
||||
|
||||
lastLogTime := time.Now()
|
||||
|
||||
wg := new(sync.WaitGroup)
|
||||
for {
|
||||
currNum := len(s.pubsub.ListPeers(topic))
|
||||
if currNum >= threshold {
|
||||
// If the context is done, we can exit the loop. This is the unhappy path.
|
||||
if err := ctx.Err(); err != nil {
|
||||
return false, errors.Errorf(
|
||||
"unable to find requisite number of peers for topic %s - only %d out of %d peers available after searching",
|
||||
topic, peerCountForTopic, threshold,
|
||||
)
|
||||
}
|
||||
|
||||
// Search for new peers in the network.
|
||||
nodes := searchForPeers(iterator, batchSize, uint(missingPeerCountForTopic), filter)
|
||||
|
||||
// Restrict dials if limit is applied.
|
||||
maxConcurrentDials := math.MaxInt
|
||||
if flags.MaxDialIsActive() {
|
||||
maxConcurrentDials = flags.Get().MaxConcurrentDials
|
||||
}
|
||||
|
||||
// Dial the peers in batches.
|
||||
for start := 0; start < len(nodes); start += maxConcurrentDials {
|
||||
stop := min(start+maxConcurrentDials, len(nodes))
|
||||
for _, node := range nodes[start:stop] {
|
||||
s.dialPeer(ctx, wg, node)
|
||||
}
|
||||
|
||||
// Wait for all dials to be completed.
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
peerCountForTopic, missingPeerCountForTopic := peersSummary(topic, threshold)
|
||||
|
||||
// If we have enough peers, we can exit the loop. This is the happy path.
|
||||
if missingPeerCountForTopic == 0 {
|
||||
break
|
||||
}
|
||||
if err := ctx.Err(); err != nil {
|
||||
return false, errors.Errorf("unable to find requisite number of peers for topic %s - "+
|
||||
"only %d out of %d peers were able to be found", topic, currNum, threshold)
|
||||
}
|
||||
nodeCount := int(params.BeaconNetworkConfig().MinimumPeersInSubnetSearch)
|
||||
// Restrict dials if limit is applied.
|
||||
if flags.MaxDialIsActive() {
|
||||
nodeCount = min(nodeCount, flags.Get().MaxConcurrentDials)
|
||||
}
|
||||
nodes := enode.ReadNodes(iterator, nodeCount)
|
||||
for _, node := range nodes {
|
||||
info, _, err := convertToAddrInfo(node)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if info == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
if err := s.connectWithPeer(ctx, *info); err != nil {
|
||||
log.WithError(err).Tracef("Could not connect with peer %s", info.String())
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
if time.Since(lastLogTime) > minLogInterval {
|
||||
lastLogTime = time.Now()
|
||||
log.WithField("currentPeerCount", peerCountForTopic).Debug("Searching for new peers for a subnet - continue")
|
||||
}
|
||||
// Wait for all dials to be completed.
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
log.WithField("currentPeerCount", threshold).Debug("Searching for new peers for a subnet - success")
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@@ -156,11 +267,17 @@ func (s *Service) filterPeerForSyncSubnet(index uint64) func(node *enode.Node) b
|
||||
// lower threshold to broadcast object compared to searching
|
||||
// for a subnet. So that even in the event of poor peer
|
||||
// connectivity, we can still broadcast an attestation.
|
||||
func (s *Service) hasPeerWithSubnet(topic string) bool {
|
||||
func (s *Service) hasPeerWithSubnet(subnetTopic string) bool {
|
||||
// In the event peer threshold is lower, we will choose the lower
|
||||
// threshold.
|
||||
minPeers := mathutil.Min(1, uint64(flags.Get().MinimumPeersPerSubnet))
|
||||
return len(s.pubsub.ListPeers(topic+s.Encoding().ProtocolSuffix())) >= int(minPeers) // lint:ignore uintcast -- Min peers can be safely cast to int.
|
||||
minPeers := min(1, flags.Get().MinimumPeersPerSubnet)
|
||||
topic := subnetTopic + s.Encoding().ProtocolSuffix()
|
||||
peersWithSubnet := s.pubsub.ListPeers(topic)
|
||||
peersWithSubnetCount := len(peersWithSubnet)
|
||||
|
||||
enoughPeers := peersWithSubnetCount >= minPeers
|
||||
|
||||
return enoughPeers
|
||||
}
|
||||
|
||||
// Updates the service's discv5 listener record's attestation subnet
|
||||
@@ -355,10 +472,10 @@ func syncBitvector(record *enr.Record) (bitfield.Bitvector4, error) {
|
||||
|
||||
// The subnet locker is a map which keeps track of all
|
||||
// mutexes stored per subnet. This locker is re-used
|
||||
// between both the attestation and sync subnets. In
|
||||
// order to differentiate between attestation and sync
|
||||
// subnets. Sync subnets are stored by (subnet+syncLockerVal). This
|
||||
// is to prevent conflicts while allowing both subnets
|
||||
// between both the attestation, sync and blob subnets.
|
||||
// Sync subnets are stored by (subnet+syncLockerVal).
|
||||
// Blob subnets are stored by (subnet+blobSubnetLockerVal).
|
||||
// This is to prevent conflicts while allowing subnets
|
||||
// to use a single locker.
|
||||
func (s *Service) subnetLocker(i uint64) *sync.RWMutex {
|
||||
s.subnetsLockLock.Lock()
|
||||
|
||||
@@ -27,6 +27,7 @@ go_library(
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//config:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/connmgr:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/control:go_default_library",
|
||||
|
||||
@@ -65,8 +65,8 @@ func (*FakeP2P) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ int
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// RefreshENR mocks the p2p func.
|
||||
func (*FakeP2P) RefreshENR() {}
|
||||
// RefreshPersistentSubnets mocks the p2p func.
|
||||
func (*FakeP2P) RefreshPersistentSubnets() {}
|
||||
|
||||
// LeaveTopic -- fake.
|
||||
func (*FakeP2P) LeaveTopic(_ string) error {
|
||||
|
||||
@@ -47,8 +47,8 @@ func (m *MockPeerManager) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
|
||||
return m.DiscoveryAddr, nil
|
||||
}
|
||||
|
||||
// RefreshENR .
|
||||
func (*MockPeerManager) RefreshENR() {}
|
||||
// RefreshPersistentSubnets .
|
||||
func (*MockPeerManager) RefreshPersistentSubnets() {}
|
||||
|
||||
// FindPeersWithSubnet .
|
||||
func (*MockPeerManager) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ int) (bool, error) {
|
||||
|
||||
@@ -10,9 +10,11 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/libp2p/go-libp2p"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/config"
|
||||
core "github.com/libp2p/go-libp2p/core"
|
||||
"github.com/libp2p/go-libp2p/core/control"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
@@ -34,13 +36,17 @@ import (
|
||||
|
||||
// We have to declare this again here to prevent a circular dependency
|
||||
// with the main p2p package.
|
||||
const metatadataV1Topic = "/eth2/beacon_chain/req/metadata/1"
|
||||
const metatadataV2Topic = "/eth2/beacon_chain/req/metadata/2"
|
||||
const (
|
||||
metadataV1Topic = "/eth2/beacon_chain/req/metadata/1"
|
||||
metadataV2Topic = "/eth2/beacon_chain/req/metadata/2"
|
||||
metadataV3Topic = "/eth2/beacon_chain/req/metadata/3"
|
||||
)
|
||||
|
||||
// TestP2P represents a p2p implementation that can be used for testing.
|
||||
type TestP2P struct {
|
||||
t *testing.T
|
||||
BHost host.Host
|
||||
EnodeID enode.ID
|
||||
pubsub *pubsub.PubSub
|
||||
joinedTopics map[string]*pubsub.Topic
|
||||
BroadcastCalled atomic.Bool
|
||||
@@ -51,9 +57,17 @@ type TestP2P struct {
|
||||
}
|
||||
|
||||
// NewTestP2P initializes a new p2p test service.
|
||||
func NewTestP2P(t *testing.T) *TestP2P {
|
||||
func NewTestP2P(t *testing.T, userOptions ...config.Option) *TestP2P {
|
||||
ctx := context.Background()
|
||||
h, err := libp2p.New(libp2p.ResourceManager(&network.NullResourceManager{}), libp2p.Transport(tcp.NewTCPTransport), libp2p.DefaultListenAddrs)
|
||||
options := []config.Option{
|
||||
libp2p.ResourceManager(&network.NullResourceManager{}),
|
||||
libp2p.Transport(tcp.NewTCPTransport),
|
||||
libp2p.DefaultListenAddrs,
|
||||
}
|
||||
|
||||
options = append(options, userOptions...)
|
||||
|
||||
h, err := libp2p.New(options...)
|
||||
require.NoError(t, err)
|
||||
ps, err := pubsub.NewFloodSub(ctx, h,
|
||||
pubsub.WithMessageSigning(false),
|
||||
@@ -270,6 +284,11 @@ func (*TestP2P) ENR() *enr.Record {
|
||||
return new(enr.Record)
|
||||
}
|
||||
|
||||
// NodeID returns the node id of the local peer.
|
||||
func (p *TestP2P) NodeID() enode.ID {
|
||||
return p.EnodeID
|
||||
}
|
||||
|
||||
// DiscoveryAddresses --
|
||||
func (*TestP2P) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
|
||||
return nil, nil
|
||||
@@ -278,7 +297,7 @@ func (*TestP2P) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
|
||||
// AddConnectionHandler handles the connection with a newly connected peer.
|
||||
func (p *TestP2P) AddConnectionHandler(f, _ func(ctx context.Context, id peer.ID) error) {
|
||||
p.BHost.Network().Notify(&network.NotifyBundle{
|
||||
ConnectedF: func(net network.Network, conn network.Conn) {
|
||||
ConnectedF: func(_ network.Network, conn network.Conn) {
|
||||
// Must be handled in a goroutine as this callback cannot be blocking.
|
||||
go func() {
|
||||
p.peers.Add(new(enr.Record), conn.RemotePeer(), conn.RemoteMultiaddr(), conn.Stat().Direction)
|
||||
@@ -302,7 +321,7 @@ func (p *TestP2P) AddConnectionHandler(f, _ func(ctx context.Context, id peer.ID
|
||||
// AddDisconnectionHandler --
|
||||
func (p *TestP2P) AddDisconnectionHandler(f func(ctx context.Context, id peer.ID) error) {
|
||||
p.BHost.Network().Notify(&network.NotifyBundle{
|
||||
DisconnectedF: func(net network.Network, conn network.Conn) {
|
||||
DisconnectedF: func(_ network.Network, conn network.Conn) {
|
||||
// Must be handled in a goroutine as this callback cannot be blocking.
|
||||
go func() {
|
||||
p.peers.SetConnectionState(conn.RemotePeer(), peers.Disconnecting)
|
||||
@@ -317,6 +336,8 @@ func (p *TestP2P) AddDisconnectionHandler(f func(ctx context.Context, id peer.ID
|
||||
|
||||
// Send a message to a specific peer.
|
||||
func (p *TestP2P) Send(ctx context.Context, msg interface{}, topic string, pid peer.ID) (network.Stream, error) {
|
||||
metadataTopics := map[string]bool{metadataV1Topic: true, metadataV2Topic: true, metadataV3Topic: true}
|
||||
|
||||
t := topic
|
||||
if t == "" {
|
||||
return nil, fmt.Errorf("protocol doesn't exist for proto message: %v", msg)
|
||||
@@ -326,7 +347,7 @@ func (p *TestP2P) Send(ctx context.Context, msg interface{}, topic string, pid p
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if topic != metatadataV1Topic && topic != metatadataV2Topic {
|
||||
if !metadataTopics[topic] {
|
||||
castedMsg, ok := msg.(ssz.Marshaler)
|
||||
if !ok {
|
||||
p.t.Fatalf("%T doesn't support ssz marshaler", msg)
|
||||
@@ -367,8 +388,8 @@ func (*TestP2P) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ int
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// RefreshENR mocks the p2p func.
|
||||
func (*TestP2P) RefreshENR() {}
|
||||
// RefreshPersistentSubnets mocks the p2p func.
|
||||
func (*TestP2P) RefreshPersistentSubnets() {}
|
||||
|
||||
// ForkDigest mocks the p2p func.
|
||||
func (p *TestP2P) ForkDigest() ([4]byte, error) {
|
||||
|
||||
@@ -21,97 +21,148 @@ import (
|
||||
func (s *Service) metaDataHandler(_ context.Context, _ interface{}, stream libp2pcore.Stream) error {
|
||||
SetRPCStreamDeadlines(stream)
|
||||
|
||||
// Validate the incoming request regarding rate limiting.
|
||||
if err := s.rateLimiter.validateRequest(stream, 1); err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "validate request")
|
||||
}
|
||||
|
||||
s.rateLimiter.add(stream, 1)
|
||||
|
||||
if s.cfg.p2p.Metadata() == nil || s.cfg.p2p.Metadata().IsNil() {
|
||||
// Retrieve our metadata.
|
||||
metadata := s.cfg.p2p.Metadata()
|
||||
|
||||
// Handle the case our metadata is nil.
|
||||
if metadata == nil || metadata.IsNil() {
|
||||
nilErr := errors.New("nil metadata stored for host")
|
||||
|
||||
resp, err := s.generateErrorResponse(responseCodeServerError, types.ErrGeneric.Error())
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not generate a response error")
|
||||
} else if _, err := stream.Write(resp); err != nil {
|
||||
return nilErr
|
||||
}
|
||||
|
||||
if _, err := stream.Write(resp); err != nil {
|
||||
log.WithError(err).Debug("Could not write to stream")
|
||||
}
|
||||
|
||||
return nilErr
|
||||
}
|
||||
|
||||
// Get the stream version from the protocol.
|
||||
_, _, streamVersion, err := p2p.TopicDeconstructor(string(stream.Protocol()))
|
||||
if err != nil {
|
||||
wrappedErr := errors.Wrap(err, "topic deconstructor")
|
||||
|
||||
resp, genErr := s.generateErrorResponse(responseCodeServerError, types.ErrGeneric.Error())
|
||||
if genErr != nil {
|
||||
log.WithError(genErr).Debug("Could not generate a response error")
|
||||
} else if _, wErr := stream.Write(resp); wErr != nil {
|
||||
return wrappedErr
|
||||
}
|
||||
|
||||
if _, wErr := stream.Write(resp); wErr != nil {
|
||||
log.WithError(wErr).Debug("Could not write to stream")
|
||||
}
|
||||
return err
|
||||
return wrappedErr
|
||||
}
|
||||
currMd := s.cfg.p2p.Metadata()
|
||||
|
||||
// Handle the case where the stream version is not recognized.
|
||||
metadataVersion := metadata.Version()
|
||||
switch streamVersion {
|
||||
case p2p.SchemaVersionV1:
|
||||
// We have a v1 metadata object saved locally, so we
|
||||
// convert it back to a v0 metadata object.
|
||||
if currMd.Version() != version.Phase0 {
|
||||
currMd = wrapper.WrappedMetadataV0(
|
||||
switch metadataVersion {
|
||||
case version.Altair, version.Deneb:
|
||||
metadata = wrapper.WrappedMetadataV0(
|
||||
&pb.MetaDataV0{
|
||||
Attnets: currMd.AttnetsBitfield(),
|
||||
SeqNumber: currMd.SequenceNumber(),
|
||||
Attnets: metadata.AttnetsBitfield(),
|
||||
SeqNumber: metadata.SequenceNumber(),
|
||||
})
|
||||
}
|
||||
|
||||
case p2p.SchemaVersionV2:
|
||||
// We have a v0 metadata object saved locally, so we
|
||||
// convert it to a v1 metadata object.
|
||||
if currMd.Version() != version.Altair {
|
||||
currMd = wrapper.WrappedMetadataV1(
|
||||
switch metadataVersion {
|
||||
case version.Phase0:
|
||||
metadata = wrapper.WrappedMetadataV1(
|
||||
&pb.MetaDataV1{
|
||||
Attnets: currMd.AttnetsBitfield(),
|
||||
SeqNumber: currMd.SequenceNumber(),
|
||||
Attnets: metadata.AttnetsBitfield(),
|
||||
SeqNumber: metadata.SequenceNumber(),
|
||||
Syncnets: bitfield.Bitvector4{byte(0x00)},
|
||||
})
|
||||
case version.Deneb:
|
||||
metadata = wrapper.WrappedMetadataV1(
|
||||
&pb.MetaDataV1{
|
||||
Attnets: metadata.AttnetsBitfield(),
|
||||
SeqNumber: metadata.SequenceNumber(),
|
||||
Syncnets: metadata.SyncnetsBitfield(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Write the METADATA response into the stream.
|
||||
if _, err := stream.Write([]byte{responseCodeSuccess}); err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "write metadata response")
|
||||
}
|
||||
_, err = s.cfg.p2p.Encoding().EncodeWithMaxLength(stream, currMd)
|
||||
|
||||
// Encode the metadata and write it to the stream.
|
||||
_, err = s.cfg.p2p.Encoding().EncodeWithMaxLength(stream, metadata)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "encode metadata")
|
||||
}
|
||||
|
||||
closeStream(stream, log)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) sendMetaDataRequest(ctx context.Context, id peer.ID) (metadata.Metadata, error) {
|
||||
// sendMetaDataRequest sends a METADATA request to the peer and return the response.
|
||||
func (s *Service) sendMetaDataRequest(ctx context.Context, peerID peer.ID) (metadata.Metadata, error) {
|
||||
ctx, cancel := context.WithTimeout(ctx, respTimeout)
|
||||
defer cancel()
|
||||
|
||||
topic, err := p2p.TopicFromMessage(p2p.MetadataMessageName, slots.ToEpoch(s.cfg.clock.CurrentSlot()))
|
||||
// Compute the current epoch.
|
||||
currentSlot := s.cfg.clock.CurrentSlot()
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
|
||||
// Compute the topic for the metadata request regarding the current epoch.
|
||||
topic, err := p2p.TopicFromMessage(p2p.MetadataMessageName, currentEpoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "topic from message")
|
||||
}
|
||||
stream, err := s.cfg.p2p.Send(ctx, new(interface{}), topic, id)
|
||||
|
||||
// Send the METADATA request to the peer.
|
||||
message := new(interface{})
|
||||
stream, err := s.cfg.p2p.Send(ctx, message, topic, peerID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "send metadata request")
|
||||
}
|
||||
|
||||
defer closeStream(stream, log)
|
||||
|
||||
// Read the METADATA response from the peer.
|
||||
code, errMsg, err := ReadStatusCode(stream, s.cfg.p2p.Encoding())
|
||||
if err != nil {
|
||||
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer())
|
||||
return nil, err
|
||||
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(peerID)
|
||||
return nil, errors.Wrap(err, "read status code")
|
||||
}
|
||||
|
||||
if code != 0 {
|
||||
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer())
|
||||
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(peerID)
|
||||
return nil, errors.New(errMsg)
|
||||
}
|
||||
|
||||
// Get the genesis validators root.
|
||||
valRoot := s.cfg.clock.GenesisValidatorsRoot()
|
||||
rpcCtx, err := forks.ForkDigestFromEpoch(slots.ToEpoch(s.cfg.clock.CurrentSlot()), valRoot[:])
|
||||
|
||||
// Get the fork digest from the current epoch and the genesis validators root.
|
||||
rpcCtx, err := forks.ForkDigestFromEpoch(currentEpoch, valRoot[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "fork digest from epoch")
|
||||
}
|
||||
|
||||
// Instantiate zero value of the metadata.
|
||||
msg, err := extractDataTypeFromTypeMap(types.MetaDataMap, rpcCtx[:], s.cfg.clock)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "extract data type from type map")
|
||||
}
|
||||
|
||||
// Defensive check to ensure valid objects are being sent.
|
||||
topicVersion := ""
|
||||
switch msg.Version() {
|
||||
@@ -120,12 +171,17 @@ func (s *Service) sendMetaDataRequest(ctx context.Context, id peer.ID) (metadata
|
||||
case version.Altair:
|
||||
topicVersion = p2p.SchemaVersionV2
|
||||
}
|
||||
|
||||
// Validate the version of the topic.
|
||||
if err := validateVersion(topicVersion, stream); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Decode the metadata from the peer.
|
||||
if err := s.cfg.p2p.Encoding().DecodeWithMaxLength(stream, msg); err != nil {
|
||||
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer())
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return msg, nil
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/container/slice"
|
||||
@@ -171,7 +172,7 @@ func (s *Service) subscribeWithBase(topic string, validator wrappedVal, handle s
|
||||
// Do not resubscribe already seen subscriptions.
|
||||
ok := s.subHandler.topicExists(topic)
|
||||
if ok {
|
||||
log.Debugf("Provided topic already has an active subscription running: %s", topic)
|
||||
log.WithField("topic", topic).Debug("Provided topic already has an active subscription running")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -188,6 +189,7 @@ func (s *Service) subscribeWithBase(topic string, validator wrappedVal, handle s
|
||||
log.WithError(err).Error("Could not subscribe topic")
|
||||
return nil
|
||||
}
|
||||
|
||||
s.subHandler.addTopic(sub.Topic(), sub)
|
||||
|
||||
// Pipeline decodes the incoming subscription data, runs the validation, and handles the
|
||||
@@ -195,6 +197,7 @@ func (s *Service) subscribeWithBase(topic string, validator wrappedVal, handle s
|
||||
pipeline := func(msg *pubsub.Message) {
|
||||
ctx, cancel := context.WithTimeout(s.ctx, pubsubMessageTimeout)
|
||||
defer cancel()
|
||||
|
||||
ctx, span := trace.StartSpan(ctx, "sync.pubsub")
|
||||
defer span.End()
|
||||
|
||||
@@ -312,8 +315,8 @@ func (s *Service) wrapAndReportValidation(topic string, v wrappedVal) (string, p
|
||||
"multiaddress": multiAddr(pid, s.cfg.p2p.Peers()),
|
||||
"peerID": pid.String(),
|
||||
"agent": agentString(pid, s.cfg.p2p.Host()),
|
||||
"gossipScore": s.cfg.p2p.Peers().Scorers().GossipScorer().Score(pid),
|
||||
}).Debugf("Gossip message was ignored")
|
||||
"gossipScore": fmt.Sprintf("%.2f", s.cfg.p2p.Peers().Scorers().GossipScorer().Score(pid)),
|
||||
}).Debug("Gossip message was ignored")
|
||||
}
|
||||
messageIgnoredValidationCounter.WithLabelValues(topic).Inc()
|
||||
}
|
||||
@@ -368,9 +371,7 @@ func (s *Service) subscribeStaticWithSubnets(topic string, validator wrappedVal,
|
||||
}
|
||||
// Check every slot that there are enough peers
|
||||
for i := uint64(0); i < subnetCount; i++ {
|
||||
if !s.validPeersExist(s.addDigestAndIndexToTopic(topic, digest, i)) {
|
||||
log.Debugf("No peers found subscribed to attestation gossip subnet with "+
|
||||
"committee index %d. Searching network for peers subscribed to the subnet.", i)
|
||||
if !s.enoughPeersAreConnected(s.addDigestAndIndexToTopic(topic, digest, i)) {
|
||||
_, err := s.cfg.p2p.FindPeersWithSubnet(
|
||||
s.ctx,
|
||||
s.addDigestAndIndexToTopic(topic, digest, i),
|
||||
@@ -434,10 +435,8 @@ func (s *Service) subscribeDynamicWithSubnets(
|
||||
return
|
||||
}
|
||||
wantedSubs := s.retrievePersistentSubs(currentSlot)
|
||||
// Resize as appropriate.
|
||||
s.reValidateSubscriptions(subscriptions, wantedSubs, topicFormat, digest)
|
||||
|
||||
// subscribe desired aggregator subnets.
|
||||
for _, idx := range wantedSubs {
|
||||
s.subscribeAggregatorSubnet(subscriptions, idx, digest, validate, handle)
|
||||
}
|
||||
@@ -451,9 +450,15 @@ func (s *Service) subscribeDynamicWithSubnets(
|
||||
}()
|
||||
}
|
||||
|
||||
// revalidate that our currently connected subnets are valid.
|
||||
func (s *Service) reValidateSubscriptions(subscriptions map[uint64]*pubsub.Subscription,
|
||||
wantedSubs []uint64, topicFormat string, digest [4]byte) {
|
||||
// reValidateSubscriptions unsubscribe from topics we are currently subscribed to but that are
|
||||
// not in the list of wanted subnets.
|
||||
// TODO: Rename this functions as it does not only revalidate subscriptions.
|
||||
func (s *Service) reValidateSubscriptions(
|
||||
subscriptions map[uint64]*pubsub.Subscription,
|
||||
wantedSubs []uint64,
|
||||
topicFormat string,
|
||||
digest [4]byte,
|
||||
) {
|
||||
for k, v := range subscriptions {
|
||||
var wanted bool
|
||||
for _, idx := range wantedSubs {
|
||||
@@ -462,6 +467,7 @@ func (s *Service) reValidateSubscriptions(subscriptions map[uint64]*pubsub.Subsc
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !wanted && v != nil {
|
||||
v.Cancel()
|
||||
fullTopic := fmt.Sprintf(topicFormat, digest, k) + s.cfg.p2p.Encoding().ProtocolSuffix()
|
||||
@@ -487,35 +493,7 @@ func (s *Service) subscribeAggregatorSubnet(
|
||||
if _, exists := subscriptions[idx]; !exists {
|
||||
subscriptions[idx] = s.subscribeWithBase(subnetTopic, validate, handle)
|
||||
}
|
||||
if !s.validPeersExist(subnetTopic) {
|
||||
log.Debugf("No peers found subscribed to attestation gossip subnet with "+
|
||||
"committee index %d. Searching network for peers subscribed to the subnet.", idx)
|
||||
_, err := s.cfg.p2p.FindPeersWithSubnet(s.ctx, subnetTopic, idx, flags.Get().MinimumPeersPerSubnet)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not search for peers")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// subscribe missing subnets for our sync committee members.
|
||||
func (s *Service) subscribeSyncSubnet(
|
||||
subscriptions map[uint64]*pubsub.Subscription,
|
||||
idx uint64,
|
||||
digest [4]byte,
|
||||
validate wrappedVal,
|
||||
handle subHandler,
|
||||
) {
|
||||
// do not subscribe if we have no peers in the same
|
||||
// subnet
|
||||
topic := p2p.GossipTypeMapping[reflect.TypeOf(ðpb.SyncCommitteeMessage{})]
|
||||
subnetTopic := fmt.Sprintf(topic, digest, idx)
|
||||
// check if subscription exists and if not subscribe the relevant subnet.
|
||||
if _, exists := subscriptions[idx]; !exists {
|
||||
subscriptions[idx] = s.subscribeWithBase(subnetTopic, validate, handle)
|
||||
}
|
||||
if !s.validPeersExist(subnetTopic) {
|
||||
log.Debugf("No peers found subscribed to sync gossip subnet with "+
|
||||
"committee index %d. Searching network for peers subscribed to the subnet.", idx)
|
||||
if !s.enoughPeersAreConnected(subnetTopic) {
|
||||
_, err := s.cfg.p2p.FindPeersWithSubnet(s.ctx, subnetTopic, idx, flags.Get().MinimumPeersPerSubnet)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not search for peers")
|
||||
@@ -568,9 +546,7 @@ func (s *Service) subscribeStaticWithSyncSubnets(topic string, validator wrapped
|
||||
}
|
||||
// Check every slot that there are enough peers
|
||||
for i := uint64(0); i < params.BeaconConfig().SyncCommitteeSubnetCount; i++ {
|
||||
if !s.validPeersExist(s.addDigestAndIndexToTopic(topic, digest, i)) {
|
||||
log.Debugf("No peers found subscribed to sync gossip subnet with "+
|
||||
"committee index %d. Searching network for peers subscribed to the subnet.", i)
|
||||
if !s.enoughPeersAreConnected(s.addDigestAndIndexToTopic(topic, digest, i)) {
|
||||
_, err := s.cfg.p2p.FindPeersWithSubnet(
|
||||
s.ctx,
|
||||
s.addDigestAndIndexToTopic(topic, digest, i),
|
||||
@@ -588,59 +564,138 @@ func (s *Service) subscribeStaticWithSyncSubnets(topic string, validator wrapped
|
||||
}()
|
||||
}
|
||||
|
||||
// subscribe to a dynamically changing list of subnets. This method expects a fmt compatible
|
||||
// string for the topic name and the list of subnets for subscribed topics that should be
|
||||
// maintained.
|
||||
// subscribeToSyncSubnets subscribes to needed sync subnets, unsubscribe from unneeded ones and search for more peers if needed.
|
||||
// Returns `true` if the digest is valid (wrt. the current epoch), `false` otherwise.
|
||||
func (s *Service) subscribeToSyncSubnets(
|
||||
topicFormat string,
|
||||
digest [4]byte,
|
||||
genesisValidatorsRoot [fieldparams.RootLength]byte,
|
||||
genesisTime time.Time,
|
||||
subscriptions map[uint64]*pubsub.Subscription,
|
||||
currentSlot primitives.Slot,
|
||||
validate wrappedVal,
|
||||
handle subHandler,
|
||||
) bool {
|
||||
// Get sync subnets topic.
|
||||
topic := p2p.GossipTypeMapping[reflect.TypeOf(ðpb.SyncCommitteeMessage{})]
|
||||
|
||||
// Do not subscribe if not synced.
|
||||
if s.chainStarted.IsSet() && s.cfg.initialSync.Syncing() {
|
||||
return true
|
||||
}
|
||||
|
||||
// Do not subscribe is the digest is not valid.
|
||||
valid, err := isDigestValid(digest, genesisTime, genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
return true
|
||||
}
|
||||
|
||||
// Unsubscribe from all subnets if the digest is not valid. It's likely to be the case after a hard fork.
|
||||
if !valid {
|
||||
log.WithField("digest", fmt.Sprintf("%#x", digest)).Warn("Sync subnets with this digest are no longer valid, unsubscribing from all of them.")
|
||||
s.reValidateSubscriptions(subscriptions, []uint64{}, topicFormat, digest)
|
||||
return false
|
||||
}
|
||||
|
||||
// Get the current epoch.
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
|
||||
// Retrieve the subnets we want to subscribe to.
|
||||
wantedSubnetsIndex := s.retrieveActiveSyncSubnets(currentEpoch)
|
||||
|
||||
// Remove subscriptions that are no longer wanted.
|
||||
s.reValidateSubscriptions(subscriptions, wantedSubnetsIndex, topicFormat, digest)
|
||||
|
||||
// Subscribe to wanted subnets.
|
||||
for _, subnetIndex := range wantedSubnetsIndex {
|
||||
subnetTopic := fmt.Sprintf(topic, digest, subnetIndex)
|
||||
|
||||
// Check if subscription exists.
|
||||
if _, exists := subscriptions[subnetIndex]; exists {
|
||||
continue
|
||||
}
|
||||
|
||||
// We need to subscribe to the subnet.
|
||||
subscription := s.subscribeWithBase(subnetTopic, validate, handle)
|
||||
subscriptions[subnetIndex] = subscription
|
||||
}
|
||||
|
||||
// Find new peers for wanted subnets if needed.
|
||||
for _, subnetIndex := range wantedSubnetsIndex {
|
||||
subnetTopic := fmt.Sprintf(topic, digest, subnetIndex)
|
||||
|
||||
// Check if we have enough peers in the subnet. Skip if we do.
|
||||
if s.enoughPeersAreConnected(subnetTopic) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Not enough peers in the subnet, we need to search for more.
|
||||
_, err := s.cfg.p2p.FindPeersWithSubnet(s.ctx, subnetTopic, subnetIndex, flags.Get().MinimumPeersPerSubnet)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not search for peers")
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// subscribeDynamicWithSyncSubnets subscribes to a dynamically changing list of subnets.
|
||||
func (s *Service) subscribeDynamicWithSyncSubnets(
|
||||
topicFormat string,
|
||||
validate wrappedVal,
|
||||
handle subHandler,
|
||||
digest [4]byte,
|
||||
) {
|
||||
genRoot := s.cfg.clock.GenesisValidatorsRoot()
|
||||
_, e, err := forks.RetrieveForkDataFromDigest(digest, genRoot[:])
|
||||
// Retrieve the number of committee subnets we need to subscribe to.
|
||||
syncCommiteeSubnetsCount := params.BeaconConfig().SyncCommitteeSubnetCount
|
||||
|
||||
// Initialize the subscriptions map.
|
||||
subscriptions := make(map[uint64]*pubsub.Subscription, syncCommiteeSubnetsCount)
|
||||
|
||||
// Retrieve the genesis validators root.
|
||||
genesisValidatorsRoot := s.cfg.clock.GenesisValidatorsRoot()
|
||||
|
||||
// Retrieve the epoch of the fork corresponding to the digest.
|
||||
_, epoch, err := forks.RetrieveForkDataFromDigest(digest, genesisValidatorsRoot[:])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
base := p2p.GossipTopicMappings(topicFormat, e)
|
||||
|
||||
// Retrieve the base protobuf message.
|
||||
base := p2p.GossipTopicMappings(topicFormat, epoch)
|
||||
if base == nil {
|
||||
panic(fmt.Sprintf("%s is not mapped to any message in GossipTopicMappings", topicFormat))
|
||||
}
|
||||
subscriptions := make(map[uint64]*pubsub.Subscription, params.BeaconConfig().SyncCommitteeSubnetCount)
|
||||
genesis := s.cfg.clock.GenesisTime()
|
||||
ticker := slots.NewSlotTicker(genesis, params.BeaconConfig().SecondsPerSlot)
|
||||
|
||||
// Retrieve the genesis time.
|
||||
genesisTime := s.cfg.clock.GenesisTime()
|
||||
|
||||
// Define a ticker ticking every slot.
|
||||
secondsPerSlot := params.BeaconConfig().SecondsPerSlot
|
||||
ticker := slots.NewSlotTicker(genesisTime, secondsPerSlot)
|
||||
|
||||
// Retrieve the current slot.
|
||||
currentSlot := s.cfg.clock.CurrentSlot()
|
||||
|
||||
go func() {
|
||||
// Subscribe to the sync subnets.
|
||||
s.subscribeToSyncSubnets(topicFormat, digest, genesisValidatorsRoot, genesisTime, subscriptions, currentSlot, validate, handle)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
ticker.Done()
|
||||
return
|
||||
case currentSlot := <-ticker.C():
|
||||
if s.chainStarted.IsSet() && s.cfg.initialSync.Syncing() {
|
||||
continue
|
||||
}
|
||||
valid, err := isDigestValid(digest, genesis, genRoot)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
continue
|
||||
}
|
||||
if !valid {
|
||||
log.Warnf("Sync subnets with digest %#x are no longer valid, unsubscribing from all of them.", digest)
|
||||
// Unsubscribes from all our current subnets.
|
||||
s.reValidateSubscriptions(subscriptions, []uint64{}, topicFormat, digest)
|
||||
isDigestValid := s.subscribeToSyncSubnets(topicFormat, digest, genesisValidatorsRoot, genesisTime, subscriptions, currentSlot, validate, handle)
|
||||
|
||||
// Stop the ticker if the digest is not valid. Likely to happen after a hard fork.
|
||||
if !isDigestValid {
|
||||
ticker.Done()
|
||||
return
|
||||
}
|
||||
|
||||
wantedSubs := s.retrieveActiveSyncSubnets(slots.ToEpoch(currentSlot))
|
||||
// Resize as appropriate.
|
||||
s.reValidateSubscriptions(subscriptions, wantedSubs, topicFormat, digest)
|
||||
|
||||
// subscribe desired aggregator subnets.
|
||||
for _, idx := range wantedSubs {
|
||||
s.subscribeSyncSubnet(subscriptions, idx, digest, validate, handle)
|
||||
}
|
||||
case <-s.ctx.Done():
|
||||
ticker.Done()
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
@@ -650,9 +705,7 @@ func (s *Service) subscribeDynamicWithSyncSubnets(
|
||||
func (s *Service) lookupAttesterSubnets(digest [4]byte, idx uint64) {
|
||||
topic := p2p.GossipTypeMapping[reflect.TypeOf(ðpb.Attestation{})]
|
||||
subnetTopic := fmt.Sprintf(topic, digest, idx)
|
||||
if !s.validPeersExist(subnetTopic) {
|
||||
log.Debugf("No peers found subscribed to attestation gossip subnet with "+
|
||||
"committee index %d. Searching network for peers subscribed to the subnet.", idx)
|
||||
if !s.enoughPeersAreConnected(subnetTopic) {
|
||||
// perform a search for peers with the desired committee index.
|
||||
_, err := s.cfg.p2p.FindPeersWithSubnet(s.ctx, subnetTopic, idx, flags.Get().MinimumPeersPerSubnet)
|
||||
if err != nil {
|
||||
@@ -676,10 +729,15 @@ func (s *Service) unSubscribeFromTopic(topic string) {
|
||||
}
|
||||
}
|
||||
|
||||
// find if we have peers who are subscribed to the same subnet
|
||||
func (s *Service) validPeersExist(subnetTopic string) bool {
|
||||
numOfPeers := s.cfg.p2p.PubSub().ListPeers(subnetTopic + s.cfg.p2p.Encoding().ProtocolSuffix())
|
||||
return len(numOfPeers) >= flags.Get().MinimumPeersPerSubnet
|
||||
// enoughPeersAreConnected checks if we have enough peers which are subscribed to the same subnet.
|
||||
func (s *Service) enoughPeersAreConnected(subnetTopic string) bool {
|
||||
topic := subnetTopic + s.cfg.p2p.Encoding().ProtocolSuffix()
|
||||
threshold := flags.Get().MinimumPeersPerSubnet
|
||||
|
||||
peersWithSubnet := s.cfg.p2p.PubSub().ListPeers(topic)
|
||||
peersWithSubnetCount := len(peersWithSubnet)
|
||||
|
||||
return peersWithSubnetCount >= threshold
|
||||
}
|
||||
|
||||
func (s *Service) retrievePersistentSubs(currSlot primitives.Slot) []uint64 {
|
||||
|
||||
@@ -8,6 +8,9 @@ import (
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
// MetadataV0
|
||||
// ----------
|
||||
|
||||
// MetadataV0 is a convenience wrapper around our metadata protobuf object.
|
||||
type MetadataV0 struct {
|
||||
md *pb.MetaDataV0
|
||||
@@ -28,6 +31,11 @@ func (m MetadataV0) AttnetsBitfield() bitfield.Bitvector64 {
|
||||
return m.md.Attnets
|
||||
}
|
||||
|
||||
// SyncnetsBitfield returns the bitfield stored in the metadata.
|
||||
func (m MetadataV0) SyncnetsBitfield() bitfield.Bitvector4 {
|
||||
return bitfield.Bitvector4{0}
|
||||
}
|
||||
|
||||
// InnerObject returns the underlying metadata protobuf structure.
|
||||
func (m MetadataV0) InnerObject() interface{} {
|
||||
return m.md
|
||||
@@ -74,16 +82,19 @@ func (m MetadataV0) MetadataObjV0() *pb.MetaDataV0 {
|
||||
|
||||
// MetadataObjV1 returns the inner metadata object in its type
|
||||
// specified form. If it doesn't exist then we return nothing.
|
||||
func (_ MetadataV0) MetadataObjV1() *pb.MetaDataV1 {
|
||||
func (MetadataV0) MetadataObjV1() *pb.MetaDataV1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Version returns the fork version of the underlying object.
|
||||
func (_ MetadataV0) Version() int {
|
||||
func (MetadataV0) Version() int {
|
||||
return version.Phase0
|
||||
}
|
||||
|
||||
// MetadataV1 is a convenience wrapper around our metadata v2 protobuf object.
|
||||
// MetadataV1
|
||||
// ----------
|
||||
|
||||
// MetadataV1 is a convenience wrapper around our metadata v1 protobuf object.
|
||||
type MetadataV1 struct {
|
||||
md *pb.MetaDataV1
|
||||
}
|
||||
@@ -103,6 +114,11 @@ func (m MetadataV1) AttnetsBitfield() bitfield.Bitvector64 {
|
||||
return m.md.Attnets
|
||||
}
|
||||
|
||||
// SyncnetsBitfield returns the bitfield stored in the metadata.
|
||||
func (m MetadataV1) SyncnetsBitfield() bitfield.Bitvector4 {
|
||||
return m.md.Syncnets
|
||||
}
|
||||
|
||||
// InnerObject returns the underlying metadata protobuf structure.
|
||||
func (m MetadataV1) InnerObject() interface{} {
|
||||
return m.md
|
||||
@@ -143,7 +159,7 @@ func (m MetadataV1) UnmarshalSSZ(buf []byte) error {
|
||||
|
||||
// MetadataObjV0 returns the inner metadata object in its type
|
||||
// specified form. If it doesn't exist then we return nothing.
|
||||
func (_ MetadataV1) MetadataObjV0() *pb.MetaDataV0 {
|
||||
func (MetadataV1) MetadataObjV0() *pb.MetaDataV0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -154,6 +170,6 @@ func (m MetadataV1) MetadataObjV1() *pb.MetaDataV1 {
|
||||
}
|
||||
|
||||
// Version returns the fork version of the underlying object.
|
||||
func (_ MetadataV1) Version() int {
|
||||
func (MetadataV1) Version() int {
|
||||
return version.Altair
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
type Metadata interface {
|
||||
SequenceNumber() uint64
|
||||
AttnetsBitfield() bitfield.Bitvector64
|
||||
SyncnetsBitfield() bitfield.Bitvector4
|
||||
InnerObject() interface{}
|
||||
IsNil() bool
|
||||
Copy() Metadata
|
||||
|
||||
Reference in New Issue
Block a user