mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-09 21:38:05 -05:00
Compare commits
16 Commits
peerDAS-re
...
peerdas-no
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a7cef2026c | ||
|
|
d3e2a3a234 | ||
|
|
5f0840f0c3 | ||
|
|
c2f09c3b8a | ||
|
|
0e83849faf | ||
|
|
e04032ae59 | ||
|
|
b25adabd2c | ||
|
|
b9ee4de1a6 | ||
|
|
a9b0b5dbcd | ||
|
|
e34a97a63e | ||
|
|
ec35840722 | ||
|
|
751a7a00ee | ||
|
|
e25161c5d7 | ||
|
|
d63b6a3b88 | ||
|
|
0d5fd5e41c | ||
|
|
8cd37ab17d |
@@ -53,7 +53,7 @@ func Start() error {
|
||||
}
|
||||
if !kzgLoaded {
|
||||
// TODO: Provide a configuration option for this.
|
||||
var precompute uint = 0
|
||||
var precompute uint = 8
|
||||
|
||||
// Free the current trusted setup before running this method. CKZG
|
||||
// panics if the same setup is run multiple times.
|
||||
|
||||
@@ -3,6 +3,7 @@ package blockchain
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -657,9 +658,8 @@ func (s *Service) isDataColumnsAvailable(ctx context.Context, root [32]byte, sig
|
||||
return errors.Wrap(err, "custody columns")
|
||||
}
|
||||
|
||||
// Expected is the number of custody data columnns a node is expected to have.
|
||||
expected := len(colMap)
|
||||
if expected == 0 {
|
||||
// colMap represents the data columnns a node is expected to custody.
|
||||
if len(colMap) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -683,14 +683,14 @@ func (s *Service) isDataColumnsAvailable(ctx context.Context, root [32]byte, sig
|
||||
}
|
||||
|
||||
// Get a map of data column indices that are not currently available.
|
||||
missing, err := missingDataColumns(s.blobStorage, root, colMap)
|
||||
missingMap, err := missingDataColumns(s.blobStorage, root, colMap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If there are no missing indices, all data column sidecars are available.
|
||||
// This is the happy path.
|
||||
if len(missing) == 0 {
|
||||
if len(missingMap) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -698,8 +698,24 @@ func (s *Service) isDataColumnsAvailable(ctx context.Context, root [32]byte, sig
|
||||
nextSlot := slots.BeginsAt(signed.Block().Slot()+1, s.genesisTime)
|
||||
// Avoid logging if DA check is called after next slot start.
|
||||
if nextSlot.After(time.Now()) {
|
||||
// Compute sorted slice of expected columns.
|
||||
expected := make([]uint64, 0, len(colMap))
|
||||
for col := range colMap {
|
||||
expected = append(expected, col)
|
||||
}
|
||||
|
||||
slices.Sort[[]uint64](expected)
|
||||
|
||||
// Compute sorted slice of missing columns.
|
||||
missing := make([]uint64, 0, len(missingMap))
|
||||
for col := range missingMap {
|
||||
missing = append(missing, col)
|
||||
}
|
||||
|
||||
slices.Sort[[]uint64](missing)
|
||||
|
||||
nst := time.AfterFunc(time.Until(nextSlot), func() {
|
||||
if len(missing) == 0 {
|
||||
if len(missingMap) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -707,7 +723,7 @@ func (s *Service) isDataColumnsAvailable(ctx context.Context, root [32]byte, sig
|
||||
"slot": signed.Block().Slot(),
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"columnsExpected": expected,
|
||||
"columnsWaiting": len(missing),
|
||||
"columnsWaiting": missing,
|
||||
}).Error("Still waiting for data columns DA check at slot end.")
|
||||
})
|
||||
defer nst.Stop()
|
||||
@@ -722,7 +738,7 @@ func (s *Service) isDataColumnsAvailable(ctx context.Context, root [32]byte, sig
|
||||
}
|
||||
|
||||
// This is a data column we are expecting.
|
||||
if _, ok := missing[rootIndex.Index]; ok {
|
||||
if _, ok := missingMap[rootIndex.Index]; ok {
|
||||
retrievedDataColumnsCount++
|
||||
}
|
||||
|
||||
@@ -733,15 +749,15 @@ func (s *Service) isDataColumnsAvailable(ctx context.Context, root [32]byte, sig
|
||||
}
|
||||
|
||||
// Remove the index from the missing map.
|
||||
delete(missing, rootIndex.Index)
|
||||
delete(missingMap, rootIndex.Index)
|
||||
|
||||
// Exit if there is no more missing data columns.
|
||||
if len(missing) == 0 {
|
||||
if len(missingMap) == 0 {
|
||||
return nil
|
||||
}
|
||||
case <-ctx.Done():
|
||||
missingIndexes := make([]uint64, 0, len(missing))
|
||||
for val := range missing {
|
||||
missingIndexes := make([]uint64, 0, len(missingMap))
|
||||
for val := range missingMap {
|
||||
copiedVal := val
|
||||
missingIndexes = append(missingIndexes, copiedVal)
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ go_library(
|
||||
srcs = [
|
||||
"helpers.go",
|
||||
"log.go",
|
||||
"metrics.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas",
|
||||
visibility = ["//visibility:public"],
|
||||
@@ -22,6 +23,8 @@ go_library(
|
||||
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
|
||||
"@com_github_holiman_uint256//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@org_golang_x_sync//errgroup:go_default_library",
|
||||
],
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package peerdas
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"math"
|
||||
@@ -33,7 +34,7 @@ const (
|
||||
)
|
||||
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/p2p-interface.md#the-discovery-domain-discv5
|
||||
type Csc uint8
|
||||
type Csc uint64
|
||||
|
||||
func (Csc) ENRKey() string { return CustodySubnetCountEnrKey }
|
||||
|
||||
@@ -117,6 +118,7 @@ func CustodyColumns(nodeId enode.ID, custodySubnetCount uint64) (map[uint64]bool
|
||||
// DataColumnSidecars computes the data column sidecars from the signed block and blobs.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/das-core.md#recover_matrix
|
||||
func DataColumnSidecars(signedBlock interfaces.ReadOnlySignedBeaconBlock, blobs []kzg.Blob) ([]*ethpb.DataColumnSidecar, error) {
|
||||
startTime := time.Now()
|
||||
blobsCount := len(blobs)
|
||||
if blobsCount == 0 {
|
||||
return nil, nil
|
||||
@@ -145,16 +147,24 @@ func DataColumnSidecars(signedBlock interfaces.ReadOnlySignedBeaconBlock, blobs
|
||||
}
|
||||
|
||||
// Compute cells and proofs.
|
||||
cellsAndProofs := make([]kzg.CellsAndProofs, 0, blobsCount)
|
||||
cellsAndProofs := make([]kzg.CellsAndProofs, blobsCount)
|
||||
|
||||
eg, _ := errgroup.WithContext(context.Background())
|
||||
for i := range blobs {
|
||||
blob := &blobs[i]
|
||||
blobCellsAndProofs, err := kzg.ComputeCellsAndKZGProofs(blob)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "compute cells and KZG proofs")
|
||||
}
|
||||
blobIndex := i
|
||||
eg.Go(func() error {
|
||||
blob := &blobs[blobIndex]
|
||||
blobCellsAndProofs, err := kzg.ComputeCellsAndKZGProofs(blob)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "compute cells and KZG proofs")
|
||||
}
|
||||
|
||||
cellsAndProofs = append(cellsAndProofs, blobCellsAndProofs)
|
||||
cellsAndProofs[blobIndex] = blobCellsAndProofs
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if err := eg.Wait(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get the column sidecars.
|
||||
@@ -196,7 +206,7 @@ func DataColumnSidecars(signedBlock interfaces.ReadOnlySignedBeaconBlock, blobs
|
||||
|
||||
sidecars = append(sidecars, sidecar)
|
||||
}
|
||||
|
||||
dataColumnComputationTime.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
return sidecars, nil
|
||||
}
|
||||
|
||||
|
||||
14
beacon-chain/core/peerdas/metrics.go
Normal file
14
beacon-chain/core/peerdas/metrics.go
Normal file
@@ -0,0 +1,14 @@
|
||||
package peerdas
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
var dataColumnComputationTime = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "data_column_sidecar_computation_milliseconds",
|
||||
Help: "Captures the time taken to compute data column sidecars from blobs.",
|
||||
Buckets: []float64{100, 250, 500, 750, 1000, 1500, 2000, 4000, 8000, 12000, 16000},
|
||||
},
|
||||
)
|
||||
@@ -402,7 +402,7 @@ func (s *Service) broadcastObject(ctx context.Context, obj ssz.Marshaler, topic
|
||||
}
|
||||
|
||||
func attestationToTopic(subnet uint64, forkDigest [fieldparams.VersionLength]byte) string {
|
||||
return fmt.Sprintf(AttestationSubnetTopicFormat, forkDigest, subnet)
|
||||
return fmt.Sprintf(BeaconAttestationSubnetTopicFormat, forkDigest, subnet)
|
||||
}
|
||||
|
||||
func syncCommitteeToTopic(subnet uint64, forkDigest [fieldparams.VersionLength]byte) string {
|
||||
|
||||
@@ -103,8 +103,8 @@ func TestService_Broadcast_ReturnsErr_TopicNotMapped(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_Attestation_Subnet(t *testing.T) {
|
||||
if gtm := GossipTypeMapping[reflect.TypeOf(ðpb.Attestation{})]; gtm != AttestationSubnetTopicFormat {
|
||||
t.Errorf("Constant is out of date. Wanted %s, got %s", AttestationSubnetTopicFormat, gtm)
|
||||
if gtm := GossipTypeMapping[reflect.TypeOf(ðpb.Attestation{})]; gtm != BeaconAttestationSubnetTopicFormat {
|
||||
t.Errorf("Constant is out of date. Wanted %s, got %s", BeaconAttestationSubnetTopicFormat, gtm)
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
@@ -170,7 +170,7 @@ func TestService_BroadcastAttestation(t *testing.T) {
|
||||
msg := util.HydrateAttestation(ðpb.Attestation{AggregationBits: bitfield.NewBitlist(7)})
|
||||
subnet := uint64(5)
|
||||
|
||||
topic := AttestationSubnetTopicFormat
|
||||
topic := BeaconAttestationSubnetTopicFormat
|
||||
GossipTypeMapping[reflect.TypeOf(msg)] = topic
|
||||
digest, err := p.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
@@ -332,7 +332,7 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
|
||||
go p2.listenForNewNodes()
|
||||
|
||||
msg := util.HydrateAttestation(ðpb.Attestation{AggregationBits: bitfield.NewBitlist(7)})
|
||||
topic := AttestationSubnetTopicFormat
|
||||
topic := BeaconAttestationSubnetTopicFormat
|
||||
GossipTypeMapping[reflect.TypeOf(msg)] = topic
|
||||
digest, err := p.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -33,7 +33,7 @@ func (*Service) InterceptPeerDial(_ peer.ID) (allow bool) {
|
||||
// multiaddr for the given peer.
|
||||
func (s *Service) InterceptAddrDial(pid peer.ID, m multiaddr.Multiaddr) (allow bool) {
|
||||
// Disallow bad peers from dialing in.
|
||||
if s.peers.IsBad(pid) {
|
||||
if s.peers.Status(pid).IsBad {
|
||||
return false
|
||||
}
|
||||
return filterConnections(s.addrFilter, m)
|
||||
|
||||
@@ -72,26 +72,10 @@ loop:
|
||||
return validPeers, nil
|
||||
}
|
||||
|
||||
// CustodyCountFromRemotePeer retrieves the custody count from a remote peer.
|
||||
func (s *Service) CustodyCountFromRemotePeer(pid peer.ID) uint64 {
|
||||
func (s *Service) custodyCountFromRemotePeerEnr(pid peer.ID) uint64 {
|
||||
// By default, we assume the peer custodies the minimum number of subnets.
|
||||
custodyRequirement := params.BeaconConfig().CustodyRequirement
|
||||
|
||||
// First, try to get the custody count from the peer's metadata.
|
||||
metadata, err := s.peers.Metadata(pid)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("peerID", pid).Debug("Failed to retrieve metadata for peer, defaulting to the ENR value")
|
||||
}
|
||||
|
||||
if metadata != nil {
|
||||
custodyCount := metadata.CustodySubnetCount()
|
||||
if custodyCount > 0 {
|
||||
return uint64(custodyCount)
|
||||
}
|
||||
}
|
||||
|
||||
log.WithField("peerID", pid).Debug("Failed to retrieve custody count from metadata for peer, defaulting to the ENR value")
|
||||
|
||||
// Retrieve the ENR of the peer.
|
||||
record, err := s.peers.ENR(pid)
|
||||
if err != nil {
|
||||
@@ -116,3 +100,30 @@ func (s *Service) CustodyCountFromRemotePeer(pid peer.ID) uint64 {
|
||||
|
||||
return custodyCount
|
||||
}
|
||||
|
||||
// CustodyCountFromRemotePeer retrieves the custody count from a remote peer.
|
||||
func (s *Service) CustodyCountFromRemotePeer(pid peer.ID) uint64 {
|
||||
// Try to get the custody count from the peer's metadata.
|
||||
metadata, err := s.peers.Metadata(pid)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("peerID", pid).Debug("Failed to retrieve metadata for peer, defaulting to the ENR value")
|
||||
return s.custodyCountFromRemotePeerEnr(pid)
|
||||
}
|
||||
|
||||
// If the metadata is nil, default to the ENR value.
|
||||
if metadata == nil {
|
||||
log.WithField("peerID", pid).Debug("Metadata is nil, defaulting to the ENR value")
|
||||
return s.custodyCountFromRemotePeerEnr(pid)
|
||||
}
|
||||
|
||||
// Get the custody subnets count from the metadata.
|
||||
custodyCount := metadata.CustodySubnetCount()
|
||||
|
||||
// If the custody count is null, default to the ENR value.
|
||||
if custodyCount == 0 {
|
||||
log.WithField("peerID", pid).Debug("The custody count extracted from the metadata equals to 0, defaulting to the ENR value")
|
||||
return s.custodyCountFromRemotePeerEnr(pid)
|
||||
}
|
||||
|
||||
return custodyCount
|
||||
}
|
||||
|
||||
@@ -121,12 +121,12 @@ func TestCustodyCountFromRemotePeer(t *testing.T) {
|
||||
|
||||
// Define a metadata with zero custody.
|
||||
zeroMetadata := wrapper.WrappedMetadataV2(&pb.MetaDataV2{
|
||||
CustodySubnetCount: []byte{0},
|
||||
CustodySubnetCount: 0,
|
||||
})
|
||||
|
||||
// Define a nominal metadata.
|
||||
nominalMetadata := wrapper.WrappedMetadataV2(&pb.MetaDataV2{
|
||||
CustodySubnetCount: []byte{uint8(expectedMetadata)},
|
||||
CustodySubnetCount: expectedMetadata,
|
||||
})
|
||||
|
||||
testCases := []struct {
|
||||
|
||||
@@ -119,7 +119,7 @@ func (s *Service) RefreshPersistentSubnets() {
|
||||
s.updateSubnetRecordWithMetadata(bitV)
|
||||
|
||||
// Ping all peers.
|
||||
s.pingPeers()
|
||||
s.pingPeersAndLogEnr()
|
||||
|
||||
return
|
||||
}
|
||||
@@ -157,7 +157,7 @@ func (s *Service) RefreshPersistentSubnets() {
|
||||
s.updateSubnetRecordWithMetadataV2(bitV, bitS)
|
||||
|
||||
// Ping all peers to inform them of new metadata
|
||||
s.pingPeers()
|
||||
s.pingPeersAndLogEnr()
|
||||
|
||||
return
|
||||
}
|
||||
@@ -175,7 +175,7 @@ func (s *Service) RefreshPersistentSubnets() {
|
||||
// Get the custody subnet count in our metadata.
|
||||
inMetadataCustodySubnetCount := s.Metadata().CustodySubnetCount()
|
||||
|
||||
isCustodySubnetCountUpToDate := custodySubnetCount == inRecordCustodySubnetCount && custodySubnetCount == uint64(inMetadataCustodySubnetCount)
|
||||
isCustodySubnetCountUpToDate := (custodySubnetCount == inRecordCustodySubnetCount && custodySubnetCount == inMetadataCustodySubnetCount)
|
||||
|
||||
if isBitVUpToDate && isBitSUpToDate && isCustodySubnetCountUpToDate {
|
||||
// Nothing to do, return early.
|
||||
@@ -186,7 +186,7 @@ func (s *Service) RefreshPersistentSubnets() {
|
||||
s.updateSubnetRecordWithMetadataV3(bitV, bitS, custodySubnetCount)
|
||||
|
||||
// Ping all peers.
|
||||
s.pingPeers()
|
||||
s.pingPeersAndLogEnr()
|
||||
}
|
||||
|
||||
// listen for new nodes watches for new nodes in the network and adds them to the peerstore.
|
||||
@@ -427,7 +427,7 @@ func (s *Service) filterPeer(node *enode.Node) bool {
|
||||
}
|
||||
|
||||
// Ignore bad nodes.
|
||||
if s.peers.IsBad(peerData.ID) {
|
||||
if s.peers.Status(peerData.ID).IsBad {
|
||||
return false
|
||||
}
|
||||
|
||||
|
||||
@@ -473,7 +473,7 @@ type check struct {
|
||||
metadataSequenceNumber uint64
|
||||
attestationSubnets []uint64
|
||||
syncSubnets []uint64
|
||||
custodySubnetCount *uint8
|
||||
custodySubnetCount *uint64
|
||||
}
|
||||
|
||||
func checkPingCountCacheMetadataRecord(
|
||||
@@ -542,7 +542,7 @@ func checkPingCountCacheMetadataRecord(
|
||||
|
||||
if expected.custodySubnetCount != nil {
|
||||
// Check custody subnet count in ENR.
|
||||
var actualCustodySubnetCount uint8
|
||||
var actualCustodySubnetCount uint64
|
||||
err := service.dv5Listener.LocalNode().Node().Record().Load(enr.WithEntry(peerdas.CustodySubnetCountEnrKey, &actualCustodySubnetCount))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, *expected.custodySubnetCount, actualCustodySubnetCount)
|
||||
@@ -565,7 +565,7 @@ func TestRefreshPersistentSubnets(t *testing.T) {
|
||||
eip7594ForkEpoch = 10
|
||||
)
|
||||
|
||||
custodySubnetCount := uint8(params.BeaconConfig().CustodyRequirement)
|
||||
custodySubnetCount := params.BeaconConfig().CustodyRequirement
|
||||
|
||||
// Set up epochs.
|
||||
defaultCfg := params.BeaconConfig()
|
||||
|
||||
@@ -12,24 +12,24 @@ import (
|
||||
// gossipTopicMappings represent the protocol ID to protobuf message type map for easy
|
||||
// lookup.
|
||||
var gossipTopicMappings = map[string]func() proto.Message{
|
||||
BlockSubnetTopicFormat: func() proto.Message { return ðpb.SignedBeaconBlock{} },
|
||||
AttestationSubnetTopicFormat: func() proto.Message { return ðpb.Attestation{} },
|
||||
ExitSubnetTopicFormat: func() proto.Message { return ðpb.SignedVoluntaryExit{} },
|
||||
ProposerSlashingSubnetTopicFormat: func() proto.Message { return ðpb.ProposerSlashing{} },
|
||||
AttesterSlashingSubnetTopicFormat: func() proto.Message { return ðpb.AttesterSlashing{} },
|
||||
AggregateAndProofSubnetTopicFormat: func() proto.Message { return ðpb.SignedAggregateAttestationAndProof{} },
|
||||
SyncContributionAndProofSubnetTopicFormat: func() proto.Message { return ðpb.SignedContributionAndProof{} },
|
||||
SyncCommitteeSubnetTopicFormat: func() proto.Message { return ðpb.SyncCommitteeMessage{} },
|
||||
BlsToExecutionChangeSubnetTopicFormat: func() proto.Message { return ðpb.SignedBLSToExecutionChange{} },
|
||||
BlobSubnetTopicFormat: func() proto.Message { return ðpb.BlobSidecar{} },
|
||||
DataColumnSubnetTopicFormat: func() proto.Message { return ðpb.DataColumnSidecar{} },
|
||||
BeaconBlockSubnetTopicFormat: func() proto.Message { return ðpb.SignedBeaconBlock{} },
|
||||
BeaconAttestationSubnetTopicFormat: func() proto.Message { return ðpb.Attestation{} },
|
||||
VoluntaryExitSubnetTopicFormat: func() proto.Message { return ðpb.SignedVoluntaryExit{} },
|
||||
ProposerSlashingSubnetTopicFormat: func() proto.Message { return ðpb.ProposerSlashing{} },
|
||||
AttesterSlashingSubnetTopicFormat: func() proto.Message { return ðpb.AttesterSlashing{} },
|
||||
BeaconAggregateAndProofSubnetTopicFormat: func() proto.Message { return ðpb.SignedAggregateAttestationAndProof{} },
|
||||
SyncCommitteeContributionAndProofSubnetTopicFormat: func() proto.Message { return ðpb.SignedContributionAndProof{} },
|
||||
SyncCommitteeSubnetTopicFormat: func() proto.Message { return ðpb.SyncCommitteeMessage{} },
|
||||
BlsToExecutionChangeSubnetTopicFormat: func() proto.Message { return ðpb.SignedBLSToExecutionChange{} },
|
||||
BlobSubnetTopicFormat: func() proto.Message { return ðpb.BlobSidecar{} },
|
||||
DataColumnSubnetTopicFormat: func() proto.Message { return ðpb.DataColumnSidecar{} },
|
||||
}
|
||||
|
||||
// GossipTopicMappings is a function to return the assigned data type
|
||||
// versioned by epoch.
|
||||
func GossipTopicMappings(topic string, epoch primitives.Epoch) proto.Message {
|
||||
switch topic {
|
||||
case BlockSubnetTopicFormat:
|
||||
case BeaconBlockSubnetTopicFormat:
|
||||
if epoch >= params.BeaconConfig().ElectraForkEpoch {
|
||||
return ðpb.SignedBeaconBlockElectra{}
|
||||
}
|
||||
@@ -46,7 +46,7 @@ func GossipTopicMappings(topic string, epoch primitives.Epoch) proto.Message {
|
||||
return ðpb.SignedBeaconBlockAltair{}
|
||||
}
|
||||
return gossipMessage(topic)
|
||||
case AttestationSubnetTopicFormat:
|
||||
case BeaconAttestationSubnetTopicFormat:
|
||||
if epoch >= params.BeaconConfig().ElectraForkEpoch {
|
||||
return ðpb.AttestationElectra{}
|
||||
}
|
||||
@@ -56,7 +56,7 @@ func GossipTopicMappings(topic string, epoch primitives.Epoch) proto.Message {
|
||||
return ðpb.AttesterSlashingElectra{}
|
||||
}
|
||||
return gossipMessage(topic)
|
||||
case AggregateAndProofSubnetTopicFormat:
|
||||
case BeaconAggregateAndProofSubnetTopicFormat:
|
||||
if epoch >= params.BeaconConfig().ElectraForkEpoch {
|
||||
return ðpb.SignedAggregateAttestationAndProofElectra{}
|
||||
}
|
||||
@@ -93,16 +93,16 @@ func init() {
|
||||
GossipTypeMapping[reflect.TypeOf(v())] = k
|
||||
}
|
||||
// Specially handle Altair objects.
|
||||
GossipTypeMapping[reflect.TypeOf(ðpb.SignedBeaconBlockAltair{})] = BlockSubnetTopicFormat
|
||||
GossipTypeMapping[reflect.TypeOf(ðpb.SignedBeaconBlockAltair{})] = BeaconBlockSubnetTopicFormat
|
||||
// Specially handle Bellatrix objects.
|
||||
GossipTypeMapping[reflect.TypeOf(ðpb.SignedBeaconBlockBellatrix{})] = BlockSubnetTopicFormat
|
||||
GossipTypeMapping[reflect.TypeOf(ðpb.SignedBeaconBlockBellatrix{})] = BeaconBlockSubnetTopicFormat
|
||||
// Specially handle Capella objects.
|
||||
GossipTypeMapping[reflect.TypeOf(ðpb.SignedBeaconBlockCapella{})] = BlockSubnetTopicFormat
|
||||
GossipTypeMapping[reflect.TypeOf(ðpb.SignedBeaconBlockCapella{})] = BeaconBlockSubnetTopicFormat
|
||||
// Specially handle Deneb objects.
|
||||
GossipTypeMapping[reflect.TypeOf(ðpb.SignedBeaconBlockDeneb{})] = BlockSubnetTopicFormat
|
||||
GossipTypeMapping[reflect.TypeOf(ðpb.SignedBeaconBlockDeneb{})] = BeaconBlockSubnetTopicFormat
|
||||
// Specially handle Electra objects.
|
||||
GossipTypeMapping[reflect.TypeOf(ðpb.SignedBeaconBlockElectra{})] = BlockSubnetTopicFormat
|
||||
GossipTypeMapping[reflect.TypeOf(ðpb.AttestationElectra{})] = AttestationSubnetTopicFormat
|
||||
GossipTypeMapping[reflect.TypeOf(ðpb.SignedBeaconBlockElectra{})] = BeaconBlockSubnetTopicFormat
|
||||
GossipTypeMapping[reflect.TypeOf(ðpb.AttestationElectra{})] = BeaconAttestationSubnetTopicFormat
|
||||
GossipTypeMapping[reflect.TypeOf(ðpb.AttesterSlashingElectra{})] = AttesterSlashingSubnetTopicFormat
|
||||
GossipTypeMapping[reflect.TypeOf(ðpb.SignedAggregateAttestationAndProofElectra{})] = AggregateAndProofSubnetTopicFormat
|
||||
GossipTypeMapping[reflect.TypeOf(ðpb.SignedAggregateAttestationAndProofElectra{})] = BeaconAggregateAndProofSubnetTopicFormat
|
||||
}
|
||||
|
||||
@@ -44,86 +44,86 @@ func TestGossipTopicMappings_CorrectType(t *testing.T) {
|
||||
params.OverrideBeaconConfig(bCfg)
|
||||
|
||||
// Phase 0
|
||||
pMessage := GossipTopicMappings(BlockSubnetTopicFormat, 0)
|
||||
pMessage := GossipTopicMappings(BeaconBlockSubnetTopicFormat, 0)
|
||||
_, ok := pMessage.(*ethpb.SignedBeaconBlock)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AttestationSubnetTopicFormat, 0)
|
||||
pMessage = GossipTopicMappings(BeaconAttestationSubnetTopicFormat, 0)
|
||||
_, ok = pMessage.(*ethpb.Attestation)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AttesterSlashingSubnetTopicFormat, 0)
|
||||
_, ok = pMessage.(*ethpb.AttesterSlashing)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AggregateAndProofSubnetTopicFormat, 0)
|
||||
pMessage = GossipTopicMappings(BeaconAggregateAndProofSubnetTopicFormat, 0)
|
||||
_, ok = pMessage.(*ethpb.SignedAggregateAttestationAndProof)
|
||||
assert.Equal(t, true, ok)
|
||||
|
||||
// Altair Fork
|
||||
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, altairForkEpoch)
|
||||
pMessage = GossipTopicMappings(BeaconBlockSubnetTopicFormat, altairForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedBeaconBlockAltair)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AttestationSubnetTopicFormat, altairForkEpoch)
|
||||
pMessage = GossipTopicMappings(BeaconAttestationSubnetTopicFormat, altairForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.Attestation)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AttesterSlashingSubnetTopicFormat, altairForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.AttesterSlashing)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AggregateAndProofSubnetTopicFormat, altairForkEpoch)
|
||||
pMessage = GossipTopicMappings(BeaconAggregateAndProofSubnetTopicFormat, altairForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedAggregateAttestationAndProof)
|
||||
assert.Equal(t, true, ok)
|
||||
|
||||
// Bellatrix Fork
|
||||
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, bellatrixForkEpoch)
|
||||
pMessage = GossipTopicMappings(BeaconBlockSubnetTopicFormat, bellatrixForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedBeaconBlockBellatrix)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AttestationSubnetTopicFormat, bellatrixForkEpoch)
|
||||
pMessage = GossipTopicMappings(BeaconAttestationSubnetTopicFormat, bellatrixForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.Attestation)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AttesterSlashingSubnetTopicFormat, bellatrixForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.AttesterSlashing)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AggregateAndProofSubnetTopicFormat, bellatrixForkEpoch)
|
||||
pMessage = GossipTopicMappings(BeaconAggregateAndProofSubnetTopicFormat, bellatrixForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedAggregateAttestationAndProof)
|
||||
assert.Equal(t, true, ok)
|
||||
|
||||
// Capella Fork
|
||||
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, capellaForkEpoch)
|
||||
pMessage = GossipTopicMappings(BeaconBlockSubnetTopicFormat, capellaForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedBeaconBlockCapella)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AttestationSubnetTopicFormat, capellaForkEpoch)
|
||||
pMessage = GossipTopicMappings(BeaconAttestationSubnetTopicFormat, capellaForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.Attestation)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AttesterSlashingSubnetTopicFormat, capellaForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.AttesterSlashing)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AggregateAndProofSubnetTopicFormat, capellaForkEpoch)
|
||||
pMessage = GossipTopicMappings(BeaconAggregateAndProofSubnetTopicFormat, capellaForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedAggregateAttestationAndProof)
|
||||
assert.Equal(t, true, ok)
|
||||
|
||||
// Deneb Fork
|
||||
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, denebForkEpoch)
|
||||
pMessage = GossipTopicMappings(BeaconBlockSubnetTopicFormat, denebForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedBeaconBlockDeneb)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AttestationSubnetTopicFormat, denebForkEpoch)
|
||||
pMessage = GossipTopicMappings(BeaconAttestationSubnetTopicFormat, denebForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.Attestation)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AttesterSlashingSubnetTopicFormat, denebForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.AttesterSlashing)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AggregateAndProofSubnetTopicFormat, denebForkEpoch)
|
||||
pMessage = GossipTopicMappings(BeaconAggregateAndProofSubnetTopicFormat, denebForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedAggregateAttestationAndProof)
|
||||
assert.Equal(t, true, ok)
|
||||
|
||||
// Electra Fork
|
||||
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, electraForkEpoch)
|
||||
pMessage = GossipTopicMappings(BeaconBlockSubnetTopicFormat, electraForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedBeaconBlockElectra)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AttestationSubnetTopicFormat, electraForkEpoch)
|
||||
pMessage = GossipTopicMappings(BeaconAttestationSubnetTopicFormat, electraForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.AttestationElectra)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AttesterSlashingSubnetTopicFormat, electraForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.AttesterSlashingElectra)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AggregateAndProofSubnetTopicFormat, electraForkEpoch)
|
||||
pMessage = GossipTopicMappings(BeaconAggregateAndProofSubnetTopicFormat, electraForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedAggregateAttestationAndProofElectra)
|
||||
assert.Equal(t, true, ok)
|
||||
}
|
||||
|
||||
@@ -22,7 +22,46 @@ const (
|
||||
)
|
||||
|
||||
func peerMultiaddrString(conn network.Conn) string {
|
||||
return fmt.Sprintf("%s/p2p/%s", conn.RemoteMultiaddr().String(), conn.RemotePeer().String())
|
||||
remoteMultiaddr := conn.RemoteMultiaddr().String()
|
||||
remotePeerID := conn.RemotePeer().String()
|
||||
return fmt.Sprintf("%s/p2p/%s", remoteMultiaddr, remotePeerID)
|
||||
}
|
||||
|
||||
func (s *Service) disconnectFromPeer(
|
||||
conn network.Conn,
|
||||
goodByeFunc func(ctx context.Context, id peer.ID) error,
|
||||
details map[string]interface{},
|
||||
) {
|
||||
// Get the remote peer ID.
|
||||
remotePeerID := conn.RemotePeer()
|
||||
|
||||
// Get the direction of the connection.
|
||||
direction := conn.Stat().Direction.String()
|
||||
|
||||
// Get the remote peer multiaddr.
|
||||
remotePeerMultiAddr := peerMultiaddrString(conn)
|
||||
|
||||
// Set the peer to disconnecting state.
|
||||
s.peers.SetConnectionState(remotePeerID, peers.PeerDisconnecting)
|
||||
|
||||
// Only attempt a goodbye if we are still connected to the peer.
|
||||
if s.host.Network().Connectedness(remotePeerID) == network.Connected {
|
||||
if err := goodByeFunc(context.TODO(), remotePeerID); err != nil {
|
||||
log.WithError(err).Error("Unable to disconnect from peer")
|
||||
}
|
||||
}
|
||||
|
||||
// Get the remaining active peers.
|
||||
activePeerCount := len(s.peers.Active())
|
||||
|
||||
log = log.WithFields(logrus.Fields{
|
||||
"multiaddr": remotePeerMultiAddr,
|
||||
"direction": direction,
|
||||
"remainingActivePeers": activePeerCount,
|
||||
})
|
||||
|
||||
log.WithFields(details).Debug("Peer disconnected")
|
||||
s.peers.SetConnectionState(remotePeerID, peers.PeerDisconnected)
|
||||
}
|
||||
|
||||
// AddConnectionHandler adds a callback function which handles the connection with a
|
||||
@@ -59,16 +98,7 @@ func (s *Service) AddConnectionHandler(reqFunc, goodByeFunc func(ctx context.Con
|
||||
s.host.Network().Notify(&network.NotifyBundle{
|
||||
ConnectedF: func(net network.Network, conn network.Conn) {
|
||||
remotePeer := conn.RemotePeer()
|
||||
disconnectFromPeer := func() {
|
||||
s.peers.SetConnectionState(remotePeer, peers.PeerDisconnecting)
|
||||
// Only attempt a goodbye if we are still connected to the peer.
|
||||
if s.host.Network().Connectedness(remotePeer) == network.Connected {
|
||||
if err := goodByeFunc(context.TODO(), remotePeer); err != nil {
|
||||
log.WithError(err).Error("Unable to disconnect from peer")
|
||||
}
|
||||
}
|
||||
s.peers.SetConnectionState(remotePeer, peers.PeerDisconnected)
|
||||
}
|
||||
|
||||
// Connection handler must be non-blocking as part of libp2p design.
|
||||
go func() {
|
||||
if peerHandshaking(remotePeer) {
|
||||
@@ -77,24 +107,27 @@ func (s *Service) AddConnectionHandler(reqFunc, goodByeFunc func(ctx context.Con
|
||||
return
|
||||
}
|
||||
defer peerFinished(remotePeer)
|
||||
|
||||
// Handle the various pre-existing conditions that will result in us not handshaking.
|
||||
peerConnectionState, err := s.peers.ConnectionState(remotePeer)
|
||||
if err == nil && (peerConnectionState == peers.PeerConnected || peerConnectionState == peers.PeerConnecting) {
|
||||
log.WithField("currentState", peerConnectionState).WithField("reason", "already active").Trace("Ignoring connection request")
|
||||
return
|
||||
}
|
||||
|
||||
s.peers.Add(nil /* ENR */, remotePeer, conn.RemoteMultiaddr(), conn.Stat().Direction)
|
||||
|
||||
// Defensive check in the event we still get a bad peer.
|
||||
if s.peers.IsBad(remotePeer) {
|
||||
log.WithField("reason", "bad peer").Trace("Ignoring connection request")
|
||||
disconnectFromPeer()
|
||||
if status := s.peers.Status(remotePeer); status.IsBad {
|
||||
s.disconnectFromPeer(conn, goodByeFunc, status.Details)
|
||||
return
|
||||
}
|
||||
|
||||
validPeerConnection := func() {
|
||||
s.peers.SetConnectionState(conn.RemotePeer(), peers.PeerConnected)
|
||||
// Go through the handshake process.
|
||||
log.WithFields(logrus.Fields{
|
||||
"direction": conn.Stat().Direction,
|
||||
"direction": conn.Stat().Direction.String(),
|
||||
"multiAddr": peerMultiaddrString(conn),
|
||||
"activePeers": len(s.peers.Active()),
|
||||
}).Debug("Peer connected")
|
||||
@@ -117,30 +150,62 @@ func (s *Service) AddConnectionHandler(reqFunc, goodByeFunc func(ctx context.Con
|
||||
// If peer hasn't sent a status request, we disconnect with them
|
||||
if _, err := s.peers.ChainState(remotePeer); errors.Is(err, peerdata.ErrPeerUnknown) || errors.Is(err, peerdata.ErrNoPeerStatus) {
|
||||
statusMessageMissing.Inc()
|
||||
disconnectFromPeer()
|
||||
|
||||
details := map[string]interface{}{
|
||||
"in": "ConnectedF",
|
||||
"chainStateError": err,
|
||||
}
|
||||
|
||||
s.disconnectFromPeer(conn, goodByeFunc, details)
|
||||
return
|
||||
}
|
||||
|
||||
if peerExists {
|
||||
updated, err := s.peers.ChainStateLastUpdated(remotePeer)
|
||||
if err != nil {
|
||||
disconnectFromPeer()
|
||||
details := map[string]interface{}{
|
||||
"in": "ConnectedF",
|
||||
"chainStateLastUpdatedError": err,
|
||||
}
|
||||
|
||||
s.disconnectFromPeer(conn, goodByeFunc, details)
|
||||
return
|
||||
}
|
||||
// exit if we don't receive any current status messages from
|
||||
// peer.
|
||||
if updated.IsZero() || !updated.After(currentTime) {
|
||||
disconnectFromPeer()
|
||||
|
||||
// Exit if we don't receive any current status messages from peer.
|
||||
if updated.IsZero() {
|
||||
details := map[string]interface{}{
|
||||
"in": "ConnectedF",
|
||||
"reason": "Updated is zero",
|
||||
}
|
||||
|
||||
s.disconnectFromPeer(conn, goodByeFunc, details)
|
||||
return
|
||||
}
|
||||
|
||||
if !updated.After(currentTime) {
|
||||
details := map[string]interface{}{
|
||||
"in": "ConnectedF",
|
||||
"reason": "Did not update",
|
||||
}
|
||||
|
||||
s.disconnectFromPeer(conn, goodByeFunc, details)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
validPeerConnection()
|
||||
return
|
||||
}
|
||||
|
||||
s.peers.SetConnectionState(conn.RemotePeer(), peers.PeerConnecting)
|
||||
if err := reqFunc(context.TODO(), conn.RemotePeer()); err != nil && !errors.Is(err, io.EOF) {
|
||||
log.WithError(err).Trace("Handshake failed")
|
||||
disconnectFromPeer()
|
||||
details := map[string]interface{}{
|
||||
"in": "ConnectedF",
|
||||
"reqFuncErr": err,
|
||||
}
|
||||
|
||||
s.disconnectFromPeer(conn, goodByeFunc, details)
|
||||
return
|
||||
}
|
||||
validPeerConnection()
|
||||
@@ -149,31 +214,45 @@ func (s *Service) AddConnectionHandler(reqFunc, goodByeFunc func(ctx context.Con
|
||||
})
|
||||
}
|
||||
|
||||
// AddDisconnectionHandler disconnects from peers. It handles updating the peer status.
|
||||
// AddDisconnectionHandler disconnects from peers. It handles updating the peer status.
|
||||
// This also calls the handler responsible for maintaining other parts of the sync or p2p system.
|
||||
func (s *Service) AddDisconnectionHandler(handler func(ctx context.Context, id peer.ID) error) {
|
||||
s.host.Network().Notify(&network.NotifyBundle{
|
||||
DisconnectedF: func(net network.Network, conn network.Conn) {
|
||||
log := log.WithField("multiAddr", peerMultiaddrString(conn))
|
||||
remotePeerMultiAddr := peerMultiaddrString(conn)
|
||||
peerID := conn.RemotePeer()
|
||||
direction := conn.Stat().Direction.String()
|
||||
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"multiAddr": remotePeerMultiAddr,
|
||||
"direction": direction,
|
||||
"from": "DisconnectedF",
|
||||
})
|
||||
|
||||
// Must be handled in a goroutine as this callback cannot be blocking.
|
||||
go func() {
|
||||
// Exit early if we are still connected to the peer.
|
||||
if net.Connectedness(conn.RemotePeer()) == network.Connected {
|
||||
if net.Connectedness(peerID) == network.Connected {
|
||||
return
|
||||
}
|
||||
priorState, err := s.peers.ConnectionState(conn.RemotePeer())
|
||||
|
||||
priorState, err := s.peers.ConnectionState(peerID)
|
||||
if err != nil {
|
||||
// Can happen if the peer has already disconnected, so...
|
||||
priorState = peers.PeerDisconnected
|
||||
}
|
||||
s.peers.SetConnectionState(conn.RemotePeer(), peers.PeerDisconnecting)
|
||||
|
||||
s.peers.SetConnectionState(peerID, peers.PeerDisconnecting)
|
||||
if err := handler(context.TODO(), conn.RemotePeer()); err != nil {
|
||||
log.WithError(err).Error("Disconnect handler failed")
|
||||
}
|
||||
s.peers.SetConnectionState(conn.RemotePeer(), peers.PeerDisconnected)
|
||||
|
||||
s.peers.SetConnectionState(peerID, peers.PeerDisconnected)
|
||||
|
||||
// Only log disconnections if we were fully connected.
|
||||
if priorState == peers.PeerConnected {
|
||||
log.WithField("activePeers", len(s.peers.Active())).Debug("Peer disconnected")
|
||||
activePeersCount := len(s.peers.Active())
|
||||
log.WithField("remainingActivePeers", activePeersCount).Debug("Peer disconnected")
|
||||
}
|
||||
}()
|
||||
},
|
||||
|
||||
@@ -21,7 +21,7 @@ func TestMsgID_HashesCorrectly(t *testing.T) {
|
||||
genesisValidatorsRoot := bytesutil.PadTo([]byte{'A'}, 32)
|
||||
d, err := forks.CreateForkDigest(time.Now(), genesisValidatorsRoot)
|
||||
assert.NoError(t, err)
|
||||
tpc := fmt.Sprintf(p2p.BlockSubnetTopicFormat, d)
|
||||
tpc := fmt.Sprintf(p2p.BeaconBlockSubnetTopicFormat, d)
|
||||
invalidSnappy := [32]byte{'J', 'U', 'N', 'K'}
|
||||
pMsg := &pubsubpb.Message{Data: invalidSnappy[:], Topic: &tpc}
|
||||
hashedData := hash.Hash(append(params.BeaconConfig().MessageDomainInvalidSnappy[:], pMsg.Data...))
|
||||
@@ -41,7 +41,7 @@ func TestMessageIDFunction_HashesCorrectlyAltair(t *testing.T) {
|
||||
genesisValidatorsRoot := bytesutil.PadTo([]byte{'A'}, 32)
|
||||
d, err := signing.ComputeForkDigest(params.BeaconConfig().AltairForkVersion, genesisValidatorsRoot)
|
||||
assert.NoError(t, err)
|
||||
tpc := fmt.Sprintf(p2p.BlockSubnetTopicFormat, d)
|
||||
tpc := fmt.Sprintf(p2p.BeaconBlockSubnetTopicFormat, d)
|
||||
topicLen := uint64(len(tpc))
|
||||
topicLenBytes := bytesutil.Uint64ToBytesLittleEndian(topicLen)
|
||||
invalidSnappy := [32]byte{'J', 'U', 'N', 'K'}
|
||||
@@ -71,7 +71,7 @@ func TestMessageIDFunction_HashesCorrectlyBellatrix(t *testing.T) {
|
||||
genesisValidatorsRoot := bytesutil.PadTo([]byte{'A'}, 32)
|
||||
d, err := signing.ComputeForkDigest(params.BeaconConfig().BellatrixForkVersion, genesisValidatorsRoot)
|
||||
assert.NoError(t, err)
|
||||
tpc := fmt.Sprintf(p2p.BlockSubnetTopicFormat, d)
|
||||
tpc := fmt.Sprintf(p2p.BeaconBlockSubnetTopicFormat, d)
|
||||
topicLen := uint64(len(tpc))
|
||||
topicLenBytes := bytesutil.Uint64ToBytesLittleEndian(topicLen)
|
||||
invalidSnappy := [32]byte{'J', 'U', 'N', 'K'}
|
||||
|
||||
@@ -61,7 +61,7 @@ func (s *BadResponsesScorer) Score(pid peer.ID) float64 {
|
||||
|
||||
// scoreNoLock is a lock-free version of Score.
|
||||
func (s *BadResponsesScorer) scoreNoLock(pid peer.ID) float64 {
|
||||
if s.isBadPeerNoLock(pid) {
|
||||
if peerStatus := s.statusNoLock(pid); peerStatus.IsBad {
|
||||
return BadPeerScore
|
||||
}
|
||||
score := float64(0)
|
||||
@@ -114,20 +114,34 @@ func (s *BadResponsesScorer) Increment(pid peer.ID) {
|
||||
peerData.BadResponses++
|
||||
}
|
||||
|
||||
// IsBadPeer states if the peer is to be considered bad.
|
||||
// Status states if the peer is to be considered bad.
|
||||
// If the peer is unknown this will return `false`, which makes using this function easier than returning an error.
|
||||
func (s *BadResponsesScorer) IsBadPeer(pid peer.ID) bool {
|
||||
func (s *BadResponsesScorer) Status(pid peer.ID) PeerStatus {
|
||||
s.store.RLock()
|
||||
defer s.store.RUnlock()
|
||||
return s.isBadPeerNoLock(pid)
|
||||
return s.statusNoLock(pid)
|
||||
}
|
||||
|
||||
// isBadPeerNoLock is lock-free version of IsBadPeer.
|
||||
func (s *BadResponsesScorer) isBadPeerNoLock(pid peer.ID) bool {
|
||||
// statusNoLock is lock-free version of IsBadPeer.
|
||||
func (s *BadResponsesScorer) statusNoLock(pid peer.ID) PeerStatus {
|
||||
if peerData, ok := s.store.PeerData(pid); ok {
|
||||
return peerData.BadResponses >= s.config.Threshold
|
||||
peerBadResponses := peerData.BadResponses
|
||||
badResponsesThreshold := s.config.Threshold
|
||||
|
||||
if peerBadResponses >= badResponsesThreshold {
|
||||
details := map[string]interface{}{
|
||||
"reason": "Too many bad responses",
|
||||
"badResponses": peerBadResponses,
|
||||
"threshold": badResponsesThreshold,
|
||||
}
|
||||
|
||||
return PeerStatus{IsBad: true, Details: details}
|
||||
}
|
||||
|
||||
return PeerStatus{IsBad: false}
|
||||
}
|
||||
return false
|
||||
|
||||
return PeerStatus{IsBad: false}
|
||||
}
|
||||
|
||||
// BadPeers returns the peers that are considered bad.
|
||||
@@ -137,7 +151,7 @@ func (s *BadResponsesScorer) BadPeers() []peer.ID {
|
||||
|
||||
badPeers := make([]peer.ID, 0)
|
||||
for pid := range s.store.Peers() {
|
||||
if s.isBadPeerNoLock(pid) {
|
||||
if s.statusNoLock(pid).IsBad {
|
||||
badPeers = append(badPeers, pid)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -33,19 +33,19 @@ func TestScorers_BadResponses_Score(t *testing.T) {
|
||||
assert.Equal(t, 0., scorer.Score(pid), "Unexpected score for unregistered peer")
|
||||
|
||||
scorer.Increment(pid)
|
||||
assert.Equal(t, false, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, false, scorer.Status(pid).IsBad)
|
||||
assert.Equal(t, -2.5, scorer.Score(pid))
|
||||
|
||||
scorer.Increment(pid)
|
||||
assert.Equal(t, false, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, false, scorer.Status(pid).IsBad)
|
||||
assert.Equal(t, float64(-5), scorer.Score(pid))
|
||||
|
||||
scorer.Increment(pid)
|
||||
assert.Equal(t, false, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, false, scorer.Status(pid).IsBad)
|
||||
assert.Equal(t, float64(-7.5), scorer.Score(pid))
|
||||
|
||||
scorer.Increment(pid)
|
||||
assert.Equal(t, true, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, true, scorer.Status(pid).IsBad)
|
||||
assert.Equal(t, -100.0, scorer.Score(pid))
|
||||
}
|
||||
|
||||
@@ -152,17 +152,17 @@ func TestScorers_BadResponses_IsBadPeer(t *testing.T) {
|
||||
})
|
||||
scorer := peerStatuses.Scorers().BadResponsesScorer()
|
||||
pid := peer.ID("peer1")
|
||||
assert.Equal(t, false, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, false, scorer.Status(pid).IsBad)
|
||||
|
||||
peerStatuses.Add(nil, pid, nil, network.DirUnknown)
|
||||
assert.Equal(t, false, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, false, scorer.Status(pid).IsBad)
|
||||
|
||||
for i := 0; i < scorers.DefaultBadResponsesThreshold; i++ {
|
||||
scorer.Increment(pid)
|
||||
if i == scorers.DefaultBadResponsesThreshold-1 {
|
||||
assert.Equal(t, true, scorer.IsBadPeer(pid), "Unexpected peer status")
|
||||
assert.Equal(t, true, scorer.Status(pid).IsBad, "Unexpected peer status")
|
||||
} else {
|
||||
assert.Equal(t, false, scorer.IsBadPeer(pid), "Unexpected peer status")
|
||||
assert.Equal(t, false, scorer.Status(pid).IsBad, "Unexpected peer status")
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -185,11 +185,11 @@ func TestScorers_BadResponses_BadPeers(t *testing.T) {
|
||||
scorer.Increment(pids[2])
|
||||
scorer.Increment(pids[4])
|
||||
}
|
||||
assert.Equal(t, false, scorer.IsBadPeer(pids[0]), "Invalid peer status")
|
||||
assert.Equal(t, true, scorer.IsBadPeer(pids[1]), "Invalid peer status")
|
||||
assert.Equal(t, true, scorer.IsBadPeer(pids[2]), "Invalid peer status")
|
||||
assert.Equal(t, false, scorer.IsBadPeer(pids[3]), "Invalid peer status")
|
||||
assert.Equal(t, true, scorer.IsBadPeer(pids[4]), "Invalid peer status")
|
||||
assert.Equal(t, false, scorer.Status(pids[0]).IsBad, "Invalid peer status")
|
||||
assert.Equal(t, true, scorer.Status(pids[1]).IsBad, "Invalid peer status")
|
||||
assert.Equal(t, true, scorer.Status(pids[2]).IsBad, "Invalid peer status")
|
||||
assert.Equal(t, false, scorer.Status(pids[3]).IsBad, "Invalid peer status")
|
||||
assert.Equal(t, true, scorer.Status(pids[4]).IsBad, "Invalid peer status")
|
||||
want := []peer.ID{pids[1], pids[2], pids[4]}
|
||||
badPeers := scorer.BadPeers()
|
||||
sort.Slice(badPeers, func(i, j int) bool {
|
||||
|
||||
@@ -173,12 +173,12 @@ func (s *BlockProviderScorer) processedBlocksNoLock(pid peer.ID) uint64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// IsBadPeer states if the peer is to be considered bad.
|
||||
// Status states if the peer is to be considered bad.
|
||||
// Block provider scorer cannot guarantee that lower score of a peer is indeed a sign of a bad peer.
|
||||
// Therefore this scorer never marks peers as bad, and relies on scores to probabilistically sort
|
||||
// out low-scorers (see WeightSorted method).
|
||||
func (*BlockProviderScorer) IsBadPeer(_ peer.ID) bool {
|
||||
return false
|
||||
func (*BlockProviderScorer) Status(_ peer.ID) PeerStatus {
|
||||
return PeerStatus{IsBad: false}
|
||||
}
|
||||
|
||||
// BadPeers returns the peers that are considered bad.
|
||||
|
||||
@@ -481,8 +481,8 @@ func TestScorers_BlockProvider_BadPeerMarking(t *testing.T) {
|
||||
})
|
||||
scorer := peerStatuses.Scorers().BlockProviderScorer()
|
||||
|
||||
assert.Equal(t, false, scorer.IsBadPeer("peer1"), "Unexpected status for unregistered peer")
|
||||
assert.Equal(t, false, scorer.Status("peer1").IsBad, "Unexpected status for unregistered peer")
|
||||
scorer.IncrementProcessedBlocks("peer1", 64)
|
||||
assert.Equal(t, false, scorer.IsBadPeer("peer1"))
|
||||
assert.Equal(t, false, scorer.Status("peer1").IsBad)
|
||||
assert.Equal(t, 0, len(scorer.BadPeers()))
|
||||
}
|
||||
|
||||
@@ -50,20 +50,34 @@ func (s *GossipScorer) scoreNoLock(pid peer.ID) float64 {
|
||||
return peerData.GossipScore
|
||||
}
|
||||
|
||||
// IsBadPeer states if the peer is to be considered bad.
|
||||
func (s *GossipScorer) IsBadPeer(pid peer.ID) bool {
|
||||
// Status states if the peer is to be considered bad.
|
||||
func (s *GossipScorer) Status(pid peer.ID) PeerStatus {
|
||||
s.store.RLock()
|
||||
defer s.store.RUnlock()
|
||||
return s.isBadPeerNoLock(pid)
|
||||
return s.statusNoLock(pid)
|
||||
}
|
||||
|
||||
// isBadPeerNoLock is lock-free version of IsBadPeer.
|
||||
func (s *GossipScorer) isBadPeerNoLock(pid peer.ID) bool {
|
||||
// statusNoLock is lock-free version of IsBadPeer.
|
||||
func (s *GossipScorer) statusNoLock(pid peer.ID) PeerStatus {
|
||||
peerData, ok := s.store.PeerData(pid)
|
||||
if !ok {
|
||||
return false
|
||||
return PeerStatus{IsBad: false}
|
||||
}
|
||||
return peerData.GossipScore < gossipThreshold
|
||||
|
||||
peerGossipScore := peerData.GossipScore
|
||||
isBad := peerGossipScore < gossipThreshold
|
||||
|
||||
if !isBad {
|
||||
return PeerStatus{IsBad: false}
|
||||
}
|
||||
|
||||
return PeerStatus{
|
||||
IsBad: true,
|
||||
Details: map[string]interface{}{
|
||||
"reason": "gossip score below threshold",
|
||||
"score": peerGossipScore,
|
||||
"threshold": gossipThreshold,
|
||||
}}
|
||||
}
|
||||
|
||||
// BadPeers returns the peers that are considered bad.
|
||||
@@ -73,7 +87,7 @@ func (s *GossipScorer) BadPeers() []peer.ID {
|
||||
|
||||
badPeers := make([]peer.ID, 0)
|
||||
for pid := range s.store.Peers() {
|
||||
if s.isBadPeerNoLock(pid) {
|
||||
if s.statusNoLock(pid).IsBad {
|
||||
badPeers = append(badPeers, pid)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -34,7 +34,7 @@ func TestScorers_Gossip_Score(t *testing.T) {
|
||||
},
|
||||
check: func(scorer *scorers.GossipScorer) {
|
||||
assert.Equal(t, -101.0, scorer.Score("peer1"), "Unexpected score")
|
||||
assert.Equal(t, true, scorer.IsBadPeer("peer1"), "Unexpected good peer")
|
||||
assert.Equal(t, true, scorer.Status("peer1").IsBad, "Unexpected good peer")
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -44,7 +44,7 @@ func TestScorers_Gossip_Score(t *testing.T) {
|
||||
},
|
||||
check: func(scorer *scorers.GossipScorer) {
|
||||
assert.Equal(t, 10.0, scorer.Score("peer1"), "Unexpected score")
|
||||
assert.Equal(t, false, scorer.IsBadPeer("peer1"), "Unexpected bad peer")
|
||||
assert.Equal(t, false, scorer.Status("peer1").IsBad, "Unexpected bad peer")
|
||||
_, _, topicMap, err := scorer.GossipData("peer1")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(100), topicMap["a"].TimeInMesh, "incorrect time in mesh")
|
||||
|
||||
@@ -46,7 +46,7 @@ func (s *PeerStatusScorer) Score(pid peer.ID) float64 {
|
||||
|
||||
// scoreNoLock is a lock-free version of Score.
|
||||
func (s *PeerStatusScorer) scoreNoLock(pid peer.ID) float64 {
|
||||
if s.isBadPeerNoLock(pid) {
|
||||
if s.statusNoLock(pid).IsBad {
|
||||
return BadPeerScore
|
||||
}
|
||||
score := float64(0)
|
||||
@@ -66,31 +66,34 @@ func (s *PeerStatusScorer) scoreNoLock(pid peer.ID) float64 {
|
||||
return score
|
||||
}
|
||||
|
||||
// IsBadPeer states if the peer is to be considered bad.
|
||||
func (s *PeerStatusScorer) IsBadPeer(pid peer.ID) bool {
|
||||
// Status states if the peer is to be considered bad.
|
||||
func (s *PeerStatusScorer) Status(pid peer.ID) PeerStatus {
|
||||
s.store.RLock()
|
||||
defer s.store.RUnlock()
|
||||
return s.isBadPeerNoLock(pid)
|
||||
return s.statusNoLock(pid)
|
||||
}
|
||||
|
||||
// isBadPeerNoLock is lock-free version of IsBadPeer.
|
||||
func (s *PeerStatusScorer) isBadPeerNoLock(pid peer.ID) bool {
|
||||
// statusNoLock is lock-free version of Status.
|
||||
func (s *PeerStatusScorer) statusNoLock(pid peer.ID) PeerStatus {
|
||||
peerData, ok := s.store.PeerData(pid)
|
||||
if !ok {
|
||||
return false
|
||||
return PeerStatus{IsBad: false}
|
||||
}
|
||||
|
||||
// Mark peer as bad, if the latest error is one of the terminal ones.
|
||||
terminalErrs := []error{
|
||||
p2ptypes.ErrWrongForkDigestVersion,
|
||||
p2ptypes.ErrInvalidFinalizedRoot,
|
||||
p2ptypes.ErrInvalidRequest,
|
||||
}
|
||||
|
||||
for _, err := range terminalErrs {
|
||||
if errors.Is(peerData.ChainStateValidationError, err) {
|
||||
return true
|
||||
return PeerStatus{IsBad: true, Details: map[string]interface{}{"error": err.Error()}}
|
||||
}
|
||||
}
|
||||
return false
|
||||
|
||||
return PeerStatus{IsBad: false}
|
||||
}
|
||||
|
||||
// BadPeers returns the peers that are considered bad.
|
||||
@@ -100,7 +103,7 @@ func (s *PeerStatusScorer) BadPeers() []peer.ID {
|
||||
|
||||
badPeers := make([]peer.ID, 0)
|
||||
for pid := range s.store.Peers() {
|
||||
if s.isBadPeerNoLock(pid) {
|
||||
if s.statusNoLock(pid).IsBad {
|
||||
badPeers = append(badPeers, pid)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -140,12 +140,12 @@ func TestScorers_PeerStatus_IsBadPeer(t *testing.T) {
|
||||
ScorerParams: &scorers.Config{},
|
||||
})
|
||||
pid := peer.ID("peer1")
|
||||
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer(pid))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().Status(pid).IsBad)
|
||||
assert.Equal(t, false, peerStatuses.Scorers().PeerStatusScorer().Status(pid).IsBad)
|
||||
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid, &pb.Status{}, p2ptypes.ErrWrongForkDigestVersion)
|
||||
assert.Equal(t, true, peerStatuses.Scorers().IsBadPeer(pid))
|
||||
assert.Equal(t, true, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid))
|
||||
assert.Equal(t, true, peerStatuses.Scorers().Status(pid).IsBad)
|
||||
assert.Equal(t, true, peerStatuses.Scorers().PeerStatusScorer().Status(pid).IsBad)
|
||||
}
|
||||
|
||||
func TestScorers_PeerStatus_BadPeers(t *testing.T) {
|
||||
@@ -155,22 +155,22 @@ func TestScorers_PeerStatus_BadPeers(t *testing.T) {
|
||||
pid1 := peer.ID("peer1")
|
||||
pid2 := peer.ID("peer2")
|
||||
pid3 := peer.ID("peer3")
|
||||
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer(pid1))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid1))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer(pid2))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid2))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer(pid3))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid3))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().Status(pid1).IsBad)
|
||||
assert.Equal(t, false, peerStatuses.Scorers().PeerStatusScorer().Status(pid1).IsBad)
|
||||
assert.Equal(t, false, peerStatuses.Scorers().Status(pid2).IsBad)
|
||||
assert.Equal(t, false, peerStatuses.Scorers().PeerStatusScorer().Status(pid2).IsBad)
|
||||
assert.Equal(t, false, peerStatuses.Scorers().Status(pid3).IsBad)
|
||||
assert.Equal(t, false, peerStatuses.Scorers().PeerStatusScorer().Status(pid3).IsBad)
|
||||
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid1, &pb.Status{}, p2ptypes.ErrWrongForkDigestVersion)
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid2, &pb.Status{}, nil)
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid3, &pb.Status{}, p2ptypes.ErrWrongForkDigestVersion)
|
||||
assert.Equal(t, true, peerStatuses.Scorers().IsBadPeer(pid1))
|
||||
assert.Equal(t, true, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid1))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer(pid2))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid2))
|
||||
assert.Equal(t, true, peerStatuses.Scorers().IsBadPeer(pid3))
|
||||
assert.Equal(t, true, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid3))
|
||||
assert.Equal(t, true, peerStatuses.Scorers().Status(pid1).IsBad)
|
||||
assert.Equal(t, true, peerStatuses.Scorers().PeerStatusScorer().Status(pid1).IsBad)
|
||||
assert.Equal(t, false, peerStatuses.Scorers().Status(pid2).IsBad)
|
||||
assert.Equal(t, false, peerStatuses.Scorers().PeerStatusScorer().Status(pid2).IsBad)
|
||||
assert.Equal(t, true, peerStatuses.Scorers().Status(pid3).IsBad)
|
||||
assert.Equal(t, true, peerStatuses.Scorers().PeerStatusScorer().Status(pid3).IsBad)
|
||||
assert.Equal(t, 2, len(peerStatuses.Scorers().PeerStatusScorer().BadPeers()))
|
||||
assert.Equal(t, 2, len(peerStatuses.Scorers().BadPeers()))
|
||||
}
|
||||
|
||||
@@ -24,7 +24,7 @@ const BadPeerScore = gossipThreshold
|
||||
// Scorer defines minimum set of methods every peer scorer must expose.
|
||||
type Scorer interface {
|
||||
Score(pid peer.ID) float64
|
||||
IsBadPeer(pid peer.ID) bool
|
||||
Status(pid peer.ID) PeerStatus
|
||||
BadPeers() []peer.ID
|
||||
}
|
||||
|
||||
@@ -49,6 +49,12 @@ type Config struct {
|
||||
GossipScorerConfig *GossipScorerConfig
|
||||
}
|
||||
|
||||
// PeerStatus describes if a peer is bad and provides additional details.
|
||||
type PeerStatus struct {
|
||||
IsBad bool
|
||||
Details map[string]interface{}
|
||||
}
|
||||
|
||||
// NewService provides fully initialized peer scoring service.
|
||||
func NewService(ctx context.Context, store *peerdata.Store, config *Config) *Service {
|
||||
s := &Service{
|
||||
@@ -123,27 +129,29 @@ func (s *Service) ScoreNoLock(pid peer.ID) float64 {
|
||||
return math.Round(score*ScoreRoundingFactor) / ScoreRoundingFactor
|
||||
}
|
||||
|
||||
// IsBadPeer traverses all the scorers to see if any of them classifies peer as bad.
|
||||
func (s *Service) IsBadPeer(pid peer.ID) bool {
|
||||
// Status traverses all the scorers to see if any of them classifies peer as bad.
|
||||
func (s *Service) Status(pid peer.ID) PeerStatus {
|
||||
s.store.RLock()
|
||||
defer s.store.RUnlock()
|
||||
return s.IsBadPeerNoLock(pid)
|
||||
}
|
||||
|
||||
// IsBadPeerNoLock is a lock-free version of IsBadPeer.
|
||||
func (s *Service) IsBadPeerNoLock(pid peer.ID) bool {
|
||||
if s.scorers.badResponsesScorer.isBadPeerNoLock(pid) {
|
||||
return true
|
||||
func (s *Service) IsBadPeerNoLock(pid peer.ID) PeerStatus {
|
||||
if status := s.scorers.badResponsesScorer.statusNoLock(pid); status.IsBad {
|
||||
return status
|
||||
}
|
||||
if s.scorers.peerStatusScorer.isBadPeerNoLock(pid) {
|
||||
return true
|
||||
|
||||
if status := s.scorers.peerStatusScorer.statusNoLock(pid); status.IsBad {
|
||||
return status
|
||||
}
|
||||
|
||||
if features.Get().EnablePeerScorer {
|
||||
if s.scorers.gossipScorer.isBadPeerNoLock(pid) {
|
||||
return true
|
||||
if status := s.scorers.gossipScorer.statusNoLock(pid); status.IsBad {
|
||||
return status
|
||||
}
|
||||
}
|
||||
return false
|
||||
return PeerStatus{IsBad: false}
|
||||
}
|
||||
|
||||
// BadPeers returns the peers that are considered bad by any of registered scorers.
|
||||
@@ -153,7 +161,7 @@ func (s *Service) BadPeers() []peer.ID {
|
||||
|
||||
badPeers := make([]peer.ID, 0)
|
||||
for pid := range s.store.Peers() {
|
||||
if s.IsBadPeerNoLock(pid) {
|
||||
if s.IsBadPeerNoLock(pid).IsBad {
|
||||
badPeers = append(badPeers, pid)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -100,7 +100,7 @@ func TestScorers_Service_Score(t *testing.T) {
|
||||
return scores
|
||||
}
|
||||
|
||||
pack := func(scorer *scorers.Service, s1, s2, s3 float64) map[string]float64 {
|
||||
pack := func(_ *scorers.Service, s1, s2, s3 float64) map[string]float64 {
|
||||
return map[string]float64{
|
||||
"peer1": roundScore(s1),
|
||||
"peer2": roundScore(s2),
|
||||
@@ -237,7 +237,7 @@ func TestScorers_Service_loop(t *testing.T) {
|
||||
for i := 0; i < s1.Params().Threshold+5; i++ {
|
||||
s1.Increment(pid1)
|
||||
}
|
||||
assert.Equal(t, true, s1.IsBadPeer(pid1), "Peer should be marked as bad")
|
||||
assert.Equal(t, true, s1.Status(pid1).IsBad, "Peer should be marked as bad")
|
||||
|
||||
s2.IncrementProcessedBlocks("peer1", 221)
|
||||
assert.Equal(t, uint64(221), s2.ProcessedBlocks("peer1"))
|
||||
@@ -252,7 +252,7 @@ func TestScorers_Service_loop(t *testing.T) {
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
if s1.IsBadPeer(pid1) == false && s2.ProcessedBlocks("peer1") == 0 {
|
||||
if !s1.Status(pid1).IsBad && s2.ProcessedBlocks("peer1") == 0 {
|
||||
return
|
||||
}
|
||||
case <-ctx.Done():
|
||||
@@ -263,7 +263,7 @@ func TestScorers_Service_loop(t *testing.T) {
|
||||
}()
|
||||
|
||||
<-done
|
||||
assert.Equal(t, false, s1.IsBadPeer(pid1), "Peer should not be marked as bad")
|
||||
assert.Equal(t, false, s1.Status(pid1).IsBad, "Peer should not be marked as bad")
|
||||
assert.Equal(t, uint64(0), s2.ProcessedBlocks("peer1"), "No blocks are expected")
|
||||
}
|
||||
|
||||
@@ -278,10 +278,10 @@ func TestScorers_Service_IsBadPeer(t *testing.T) {
|
||||
},
|
||||
})
|
||||
|
||||
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().Status("peer1").IsBad)
|
||||
peerStatuses.Scorers().BadResponsesScorer().Increment("peer1")
|
||||
peerStatuses.Scorers().BadResponsesScorer().Increment("peer1")
|
||||
assert.Equal(t, true, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
assert.Equal(t, true, peerStatuses.Scorers().Status("peer1").IsBad)
|
||||
}
|
||||
|
||||
func TestScorers_Service_BadPeers(t *testing.T) {
|
||||
@@ -295,16 +295,16 @@ func TestScorers_Service_BadPeers(t *testing.T) {
|
||||
},
|
||||
})
|
||||
|
||||
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer("peer2"))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer("peer3"))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().Status("peer1").IsBad)
|
||||
assert.Equal(t, false, peerStatuses.Scorers().Status("peer2").IsBad)
|
||||
assert.Equal(t, false, peerStatuses.Scorers().Status("peer3").IsBad)
|
||||
assert.Equal(t, 0, len(peerStatuses.Scorers().BadPeers()))
|
||||
for _, pid := range []peer.ID{"peer1", "peer3"} {
|
||||
peerStatuses.Scorers().BadResponsesScorer().Increment(pid)
|
||||
peerStatuses.Scorers().BadResponsesScorer().Increment(pid)
|
||||
}
|
||||
assert.Equal(t, true, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer("peer2"))
|
||||
assert.Equal(t, true, peerStatuses.Scorers().IsBadPeer("peer3"))
|
||||
assert.Equal(t, true, peerStatuses.Scorers().Status("peer1").IsBad)
|
||||
assert.Equal(t, false, peerStatuses.Scorers().Status("peer2").IsBad)
|
||||
assert.Equal(t, true, peerStatuses.Scorers().Status("peer3").IsBad)
|
||||
assert.Equal(t, 2, len(peerStatuses.Scorers().BadPeers()))
|
||||
}
|
||||
|
||||
@@ -341,21 +341,30 @@ func (p *Status) ChainStateLastUpdated(pid peer.ID) (time.Time, error) {
|
||||
return prysmTime.Now(), peerdata.ErrPeerUnknown
|
||||
}
|
||||
|
||||
// IsBad states if the peer is to be considered bad (by *any* of the registered scorers).
|
||||
// Status states if the peer is to be considered bad (by *any* of the registered scorers).
|
||||
// If the peer is unknown this will return `false`, which makes using this function easier than returning an error.
|
||||
func (p *Status) IsBad(pid peer.ID) bool {
|
||||
func (p *Status) Status(pid peer.ID) scorers.PeerStatus {
|
||||
p.store.RLock()
|
||||
defer p.store.RUnlock()
|
||||
return p.isBad(pid)
|
||||
return p.status(pid)
|
||||
}
|
||||
|
||||
// isBad is the lock-free version of IsBad.
|
||||
func (p *Status) isBad(pid peer.ID) bool {
|
||||
// Do not disconnect from trusted peers.
|
||||
// status is the lock-free version of Status.
|
||||
func (p *Status) status(pid peer.ID) scorers.PeerStatus {
|
||||
// Trusted peers are never considered bad.
|
||||
if p.store.IsTrustedPeer(pid) {
|
||||
return false
|
||||
return scorers.PeerStatus{IsBad: false}
|
||||
}
|
||||
return p.isfromBadIP(pid) || p.scorers.IsBadPeerNoLock(pid)
|
||||
|
||||
if status := p.statusFromIP(pid); status.IsBad {
|
||||
return status
|
||||
}
|
||||
|
||||
if status := p.scorers.IsBadPeerNoLock(pid); status.IsBad {
|
||||
return status
|
||||
}
|
||||
|
||||
return scorers.PeerStatus{IsBad: false}
|
||||
}
|
||||
|
||||
// NextValidTime gets the earliest possible time it is to contact/dial
|
||||
@@ -600,7 +609,7 @@ func (p *Status) Prune() {
|
||||
return
|
||||
}
|
||||
notBadPeer := func(pid peer.ID) bool {
|
||||
return !p.isBad(pid)
|
||||
return !p.status(pid).IsBad
|
||||
}
|
||||
notTrustedPeer := func(pid peer.ID) bool {
|
||||
return !p.isTrustedPeers(pid)
|
||||
@@ -990,24 +999,42 @@ func (p *Status) isTrustedPeers(pid peer.ID) bool {
|
||||
|
||||
// this method assumes the store lock is acquired before
|
||||
// executing the method.
|
||||
func (p *Status) isfromBadIP(pid peer.ID) bool {
|
||||
func (p *Status) statusFromIP(pid peer.ID) scorers.PeerStatus {
|
||||
peerData, ok := p.store.PeerData(pid)
|
||||
if !ok {
|
||||
return false
|
||||
return scorers.PeerStatus{IsBad: false}
|
||||
}
|
||||
|
||||
if peerData.Address == nil {
|
||||
return false
|
||||
return scorers.PeerStatus{IsBad: false}
|
||||
}
|
||||
|
||||
ip, err := manet.ToIP(peerData.Address)
|
||||
if err != nil {
|
||||
return true
|
||||
}
|
||||
if val, ok := p.ipTracker[ip.String()]; ok {
|
||||
if val > CollocationLimit {
|
||||
return true
|
||||
return scorers.PeerStatus{
|
||||
IsBad: true,
|
||||
Details: map[string]interface{}{
|
||||
"reason": "Cannot convert multiaddress to IP",
|
||||
"multiaddress": peerData.Address.String(),
|
||||
},
|
||||
}
|
||||
}
|
||||
return false
|
||||
|
||||
if count, ok := p.ipTracker[ip.String()]; ok {
|
||||
if count > CollocationLimit {
|
||||
return scorers.PeerStatus{
|
||||
IsBad: true,
|
||||
Details: map[string]interface{}{
|
||||
"reason": "Too many peers from the same IP",
|
||||
"ip": ip.String(),
|
||||
"count": count,
|
||||
"limit": CollocationLimit,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return scorers.PeerStatus{IsBad: false}
|
||||
}
|
||||
|
||||
func (p *Status) addIpToTracker(pid peer.ID) {
|
||||
|
||||
@@ -347,7 +347,7 @@ func TestPeerBadResponses(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
assert.Equal(t, false, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
assert.Equal(t, false, p.Status(id).IsBad, "Peer marked as bad when should be good")
|
||||
|
||||
address, err := ma.NewMultiaddr("/ip4/213.202.254.180/tcp/13000")
|
||||
require.NoError(t, err, "Failed to create address")
|
||||
@@ -358,25 +358,25 @@ func TestPeerBadResponses(t *testing.T) {
|
||||
resBadResponses, err := scorer.Count(id)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, resBadResponses, "Unexpected bad responses")
|
||||
assert.Equal(t, false, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
assert.Equal(t, false, p.Status(id).IsBad, "Peer marked as bad when should be good")
|
||||
|
||||
scorer.Increment(id)
|
||||
resBadResponses, err = scorer.Count(id)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, resBadResponses, "Unexpected bad responses")
|
||||
assert.Equal(t, false, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
assert.Equal(t, false, p.Status(id).IsBad, "Peer marked as bad when should be good")
|
||||
|
||||
scorer.Increment(id)
|
||||
resBadResponses, err = scorer.Count(id)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 2, resBadResponses, "Unexpected bad responses")
|
||||
assert.Equal(t, true, p.IsBad(id), "Peer not marked as bad when it should be")
|
||||
assert.Equal(t, true, p.Status(id).IsBad, "Peer not marked as bad when it should be")
|
||||
|
||||
scorer.Increment(id)
|
||||
resBadResponses, err = scorer.Count(id)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 3, resBadResponses, "Unexpected bad responses")
|
||||
assert.Equal(t, true, p.IsBad(id), "Peer not marked as bad when it should be")
|
||||
assert.Equal(t, true, p.Status(id).IsBad, "Peer not marked as bad when it should be")
|
||||
}
|
||||
|
||||
func TestAddMetaData(t *testing.T) {
|
||||
@@ -574,7 +574,7 @@ func TestPeerIPTracker(t *testing.T) {
|
||||
badPeers = append(badPeers, createPeer(t, p, addr, network.DirUnknown, peerdata.PeerConnectionState(ethpb.ConnectionState_DISCONNECTED)))
|
||||
}
|
||||
for _, pr := range badPeers {
|
||||
assert.Equal(t, true, p.IsBad(pr), "peer with bad ip is not bad")
|
||||
assert.Equal(t, true, p.Status(pr).IsBad, "peer with bad ip is not bad")
|
||||
}
|
||||
|
||||
// Add in bad peers, so that our records are trimmed out
|
||||
@@ -587,7 +587,7 @@ func TestPeerIPTracker(t *testing.T) {
|
||||
p.Prune()
|
||||
|
||||
for _, pr := range badPeers {
|
||||
assert.Equal(t, false, p.IsBad(pr), "peer with good ip is regarded as bad")
|
||||
assert.Equal(t, false, p.Status(pr).IsBad, "peer with good ip is regarded as bad")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -10,16 +10,27 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/encoder"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/network/forks"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var _ pubsub.SubscriptionFilter = (*Service)(nil)
|
||||
|
||||
// It is set at this limit to handle the possibility
|
||||
// of double topic subscriptions at fork boundaries.
|
||||
// -> 64 Attestation Subnets * 2.
|
||||
// -> 4 Sync Committee Subnets * 2.
|
||||
// -> Block,Aggregate,ProposerSlashing,AttesterSlashing,Exits,SyncContribution * 2.
|
||||
const pubsubSubscriptionRequestLimit = 200
|
||||
// -> BeaconBlock * 2 = 2
|
||||
// -> BeaconAggregateAndProof * 2 = 2
|
||||
// -> VoluntaryExit * 2 = 2
|
||||
// -> ProposerSlashing * 2 = 2
|
||||
// -> AttesterSlashing * 2 = 2
|
||||
// -> 64 Beacon Attestation * 2 = 128
|
||||
// -> SyncContributionAndProof * 2 = 2
|
||||
// -> 4 SyncCommitteeSubnets * 2 = 8
|
||||
// -> BlsToExecutionChange * 2 = 2
|
||||
// -> 128 DataColumnSidecar * 2 = 256
|
||||
// -------------------------------------
|
||||
// TOTAL = 406
|
||||
// (Note: BlobSidecar is not included in this list since it is superseded by DataColumnSidecar)
|
||||
const pubsubSubscriptionRequestLimit = 500
|
||||
|
||||
// CanSubscribe returns true if the topic is of interest and we could subscribe to it.
|
||||
func (s *Service) CanSubscribe(topic string) bool {
|
||||
@@ -95,8 +106,15 @@ func (s *Service) CanSubscribe(topic string) bool {
|
||||
// FilterIncomingSubscriptions is invoked for all RPCs containing subscription notifications.
|
||||
// This method returns only the topics of interest and may return an error if the subscription
|
||||
// request contains too many topics.
|
||||
func (s *Service) FilterIncomingSubscriptions(_ peer.ID, subs []*pubsubpb.RPC_SubOpts) ([]*pubsubpb.RPC_SubOpts, error) {
|
||||
func (s *Service) FilterIncomingSubscriptions(peerID peer.ID, subs []*pubsubpb.RPC_SubOpts) ([]*pubsubpb.RPC_SubOpts, error) {
|
||||
if len(subs) > pubsubSubscriptionRequestLimit {
|
||||
subsCount := len(subs)
|
||||
log.WithFields(logrus.Fields{
|
||||
"peerID": peerID,
|
||||
"subscriptionCounts": subsCount,
|
||||
"subscriptionLimit": pubsubSubscriptionRequestLimit,
|
||||
}).Debug("Too many incoming subscriptions, filtering them")
|
||||
|
||||
return nil, pubsub.ErrTooManySubscriptions
|
||||
}
|
||||
|
||||
|
||||
@@ -35,22 +35,22 @@ func TestService_CanSubscribe(t *testing.T) {
|
||||
tests := []test{
|
||||
{
|
||||
name: "block topic on current fork",
|
||||
topic: fmt.Sprintf(BlockSubnetTopicFormat, digest) + validProtocolSuffix,
|
||||
topic: fmt.Sprintf(BeaconBlockSubnetTopicFormat, digest) + validProtocolSuffix,
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "block topic on unknown fork",
|
||||
topic: fmt.Sprintf(BlockSubnetTopicFormat, [4]byte{0xFF, 0xEE, 0x56, 0x21}) + validProtocolSuffix,
|
||||
topic: fmt.Sprintf(BeaconBlockSubnetTopicFormat, [4]byte{0xFF, 0xEE, 0x56, 0x21}) + validProtocolSuffix,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "block topic missing protocol suffix",
|
||||
topic: fmt.Sprintf(BlockSubnetTopicFormat, currentFork),
|
||||
topic: fmt.Sprintf(BeaconBlockSubnetTopicFormat, currentFork),
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "block topic wrong protocol suffix",
|
||||
topic: fmt.Sprintf(BlockSubnetTopicFormat, currentFork) + "/foobar",
|
||||
topic: fmt.Sprintf(BeaconBlockSubnetTopicFormat, currentFork) + "/foobar",
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
@@ -75,12 +75,12 @@ func TestService_CanSubscribe(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "att subnet topic on current fork",
|
||||
topic: fmt.Sprintf(AttestationSubnetTopicFormat, digest, 55 /*subnet*/) + validProtocolSuffix,
|
||||
topic: fmt.Sprintf(BeaconAttestationSubnetTopicFormat, digest, 55 /*subnet*/) + validProtocolSuffix,
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "att subnet topic on unknown fork",
|
||||
topic: fmt.Sprintf(AttestationSubnetTopicFormat, [4]byte{0xCC, 0xBB, 0xAA, 0xA1} /*fork digest*/, 54 /*subnet*/) + validProtocolSuffix,
|
||||
topic: fmt.Sprintf(BeaconAttestationSubnetTopicFormat, [4]byte{0xCC, 0xBB, 0xAA, 0xA1} /*fork digest*/, 54 /*subnet*/) + validProtocolSuffix,
|
||||
want: false,
|
||||
},
|
||||
}
|
||||
@@ -90,7 +90,7 @@ func TestService_CanSubscribe(t *testing.T) {
|
||||
formatting := []interface{}{digest}
|
||||
|
||||
// Special case for attestation subnets which have a second formatting placeholder.
|
||||
if topic == AttestationSubnetTopicFormat || topic == SyncCommitteeSubnetTopicFormat || topic == BlobSubnetTopicFormat || topic == DataColumnSubnetTopicFormat {
|
||||
if topic == BeaconAttestationSubnetTopicFormat || topic == SyncCommitteeSubnetTopicFormat || topic == BlobSubnetTopicFormat || topic == DataColumnSubnetTopicFormat {
|
||||
formatting = append(formatting, 0 /* some subnet ID */)
|
||||
}
|
||||
|
||||
@@ -252,7 +252,7 @@ func TestService_FilterIncomingSubscriptions(t *testing.T) {
|
||||
return &b
|
||||
}(),
|
||||
Topicid: func() *string {
|
||||
s := fmt.Sprintf(BlockSubnetTopicFormat, digest) + validProtocolSuffix
|
||||
s := fmt.Sprintf(BeaconBlockSubnetTopicFormat, digest) + validProtocolSuffix
|
||||
return &s
|
||||
}(),
|
||||
},
|
||||
@@ -266,7 +266,7 @@ func TestService_FilterIncomingSubscriptions(t *testing.T) {
|
||||
return &b
|
||||
}(),
|
||||
Topicid: func() *string {
|
||||
s := fmt.Sprintf(BlockSubnetTopicFormat, digest) + validProtocolSuffix
|
||||
s := fmt.Sprintf(BeaconBlockSubnetTopicFormat, digest) + validProtocolSuffix
|
||||
return &s
|
||||
}(),
|
||||
},
|
||||
@@ -282,7 +282,7 @@ func TestService_FilterIncomingSubscriptions(t *testing.T) {
|
||||
return &b
|
||||
}(),
|
||||
Topicid: func() *string {
|
||||
s := fmt.Sprintf(BlockSubnetTopicFormat, digest) + validProtocolSuffix
|
||||
s := fmt.Sprintf(BeaconBlockSubnetTopicFormat, digest) + validProtocolSuffix
|
||||
return &s
|
||||
}(),
|
||||
},
|
||||
@@ -292,7 +292,7 @@ func TestService_FilterIncomingSubscriptions(t *testing.T) {
|
||||
return &b
|
||||
}(),
|
||||
Topicid: func() *string {
|
||||
s := fmt.Sprintf(BlockSubnetTopicFormat, digest) + validProtocolSuffix
|
||||
s := fmt.Sprintf(BeaconBlockSubnetTopicFormat, digest) + validProtocolSuffix
|
||||
return &s
|
||||
}(),
|
||||
},
|
||||
@@ -306,7 +306,7 @@ func TestService_FilterIncomingSubscriptions(t *testing.T) {
|
||||
return &b
|
||||
}(),
|
||||
Topicid: func() *string {
|
||||
s := fmt.Sprintf(BlockSubnetTopicFormat, digest) + validProtocolSuffix
|
||||
s := fmt.Sprintf(BeaconBlockSubnetTopicFormat, digest) + validProtocolSuffix
|
||||
return &s
|
||||
}(),
|
||||
},
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
)
|
||||
|
||||
func FuzzMsgID(f *testing.F) {
|
||||
validTopic := fmt.Sprintf(p2p.BlockSubnetTopicFormat, []byte{0xb5, 0x30, 0x3f, 0x2a}) + "/" + encoder.ProtocolSuffixSSZSnappy
|
||||
validTopic := fmt.Sprintf(p2p.BeaconBlockSubnetTopicFormat, []byte{0xb5, 0x30, 0x3f, 0x2a}) + "/" + encoder.ProtocolSuffixSSZSnappy
|
||||
f.Add(validTopic)
|
||||
|
||||
f.Fuzz(func(t *testing.T, topic string) {
|
||||
|
||||
@@ -40,7 +40,7 @@ func TestService_PublishToTopicConcurrentMapWrite(t *testing.T) {
|
||||
s.host = p0.BHost
|
||||
s.pubsub = p0.PubSub()
|
||||
|
||||
topic := fmt.Sprintf(BlockSubnetTopicFormat, fd) + "/" + encoder.ProtocolSuffixSSZSnappy
|
||||
topic := fmt.Sprintf(BeaconBlockSubnetTopicFormat, fd) + "/" + encoder.ProtocolSuffixSSZSnappy
|
||||
|
||||
// Establish the remote peer to be subscribed to the outgoing topic.
|
||||
_, err = p1.SubscribeToTopic(topic)
|
||||
@@ -95,7 +95,7 @@ func TestExtractGossipDigest(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "short digest",
|
||||
topic: fmt.Sprintf(BlockSubnetTopicFormat, []byte{0xb5, 0x30, 0x3f}) + "/" + encoder.ProtocolSuffixSSZSnappy,
|
||||
topic: fmt.Sprintf(BeaconBlockSubnetTopicFormat, []byte{0xb5, 0x30, 0x3f}) + "/" + encoder.ProtocolSuffixSSZSnappy,
|
||||
want: [4]byte{},
|
||||
wantErr: true,
|
||||
error: errors.New("invalid digest length wanted"),
|
||||
@@ -109,7 +109,7 @@ func TestExtractGossipDigest(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "valid topic",
|
||||
topic: fmt.Sprintf(BlockSubnetTopicFormat, []byte{0xb5, 0x30, 0x3f, 0x2a}) + "/" + encoder.ProtocolSuffixSSZSnappy,
|
||||
topic: fmt.Sprintf(BeaconBlockSubnetTopicFormat, []byte{0xb5, 0x30, 0x3f, 0x2a}) + "/" + encoder.ProtocolSuffixSSZSnappy,
|
||||
want: [4]byte{0xb5, 0x30, 0x3f, 0x2a},
|
||||
wantErr: false,
|
||||
error: nil,
|
||||
@@ -128,7 +128,7 @@ func TestExtractGossipDigest(t *testing.T) {
|
||||
}
|
||||
|
||||
func BenchmarkExtractGossipDigest(b *testing.B) {
|
||||
topic := fmt.Sprintf(BlockSubnetTopicFormat, []byte{0xb5, 0x30, 0x3f, 0x2a}) + "/" + encoder.ProtocolSuffixSSZSnappy
|
||||
topic := fmt.Sprintf(BeaconBlockSubnetTopicFormat, []byte{0xb5, 0x30, 0x3f, 0x2a}) + "/" + encoder.ProtocolSuffixSSZSnappy
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := ExtractGossipDigest(topic)
|
||||
|
||||
@@ -202,12 +202,13 @@ func (s *Service) Start() {
|
||||
s.startupErr = err
|
||||
return
|
||||
}
|
||||
err = s.connectToBootnodes()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not add bootnode to the exclusion list")
|
||||
|
||||
if err := s.connectToBootnodes(); err != nil {
|
||||
log.WithError(err).Error("Could not connect to boot nodes")
|
||||
s.startupErr = err
|
||||
return
|
||||
}
|
||||
|
||||
s.dv5Listener = listener
|
||||
go s.listenForNewNodes()
|
||||
}
|
||||
@@ -393,12 +394,17 @@ func (s *Service) AddPingMethod(reqFunc func(ctx context.Context, id peer.ID) er
|
||||
s.pingMethodLock.Unlock()
|
||||
}
|
||||
|
||||
func (s *Service) pingPeers() {
|
||||
func (s *Service) pingPeersAndLogEnr() {
|
||||
s.pingMethodLock.RLock()
|
||||
defer s.pingMethodLock.RUnlock()
|
||||
|
||||
localENR := s.dv5Listener.Self()
|
||||
log.WithField("ENR", localENR).Info("New node record")
|
||||
|
||||
if s.pingMethod == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, pid := range s.peers.Connected() {
|
||||
go func(id peer.ID) {
|
||||
if err := s.pingMethod(s.ctx, id); err != nil {
|
||||
@@ -471,7 +477,7 @@ func (s *Service) connectWithPeer(ctx context.Context, info peer.AddrInfo) error
|
||||
if info.ID == s.host.ID() {
|
||||
return nil
|
||||
}
|
||||
if s.Peers().IsBad(info.ID) {
|
||||
if s.Peers().Status(info.ID).IsBad {
|
||||
return errors.New("refused to connect to bad peer")
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(ctx, maxDialTimeout)
|
||||
|
||||
@@ -313,7 +313,7 @@ func TestService_JoinLeaveTopic(t *testing.T) {
|
||||
|
||||
assert.Equal(t, 0, len(s.joinedTopics))
|
||||
|
||||
topic := fmt.Sprintf(AttestationSubnetTopicFormat, fd, 42) + "/" + encoder.ProtocolSuffixSSZSnappy
|
||||
topic := fmt.Sprintf(BeaconAttestationSubnetTopicFormat, fd, 42) + "/" + encoder.ProtocolSuffixSSZSnappy
|
||||
topicHandle, err := s.JoinTopic(topic)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(s.joinedTopics))
|
||||
|
||||
@@ -2,6 +2,7 @@ package p2p
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -20,9 +21,9 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
mathutil "github.com/prysmaticlabs/prysm/v5/math"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
pb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var attestationSubnetCount = params.BeaconConfig().AttestationSubnetCount
|
||||
@@ -53,6 +54,79 @@ const blobSubnetLockerVal = 110
|
||||
// chosen more than sync, attestation and blob subnet (6) combined.
|
||||
const dataColumnSubnetVal = 150
|
||||
|
||||
// nodeFilter return a function that filters nodes based on the subnet topic and subnet index.
|
||||
func (s *Service) nodeFilter(topic string, index uint64) (func(node *enode.Node) bool, error) {
|
||||
switch {
|
||||
case strings.Contains(topic, GossipAttestationMessage):
|
||||
return s.filterPeerForAttSubnet(index), nil
|
||||
case strings.Contains(topic, GossipSyncCommitteeMessage):
|
||||
return s.filterPeerForSyncSubnet(index), nil
|
||||
case strings.Contains(topic, GossipDataColumnSidecarMessage):
|
||||
return s.filterPeerForDataColumnsSubnet(index), nil
|
||||
default:
|
||||
return nil, errors.Errorf("no subnet exists for provided topic: %s", topic)
|
||||
}
|
||||
}
|
||||
|
||||
// searchForPeers performs a network search for peers subscribed to a particular subnet.
|
||||
// It exits as soon as one of these conditions is met:
|
||||
// - It looped through `batchSize` nodes.
|
||||
// - It found `peersToFindCount“ peers corresponding to the `filter` criteria.
|
||||
// - Iterator is exhausted.
|
||||
func searchForPeers(
|
||||
iterator enode.Iterator,
|
||||
batchSize int,
|
||||
peersToFindCount int,
|
||||
filter func(node *enode.Node) bool,
|
||||
) []*enode.Node {
|
||||
nodeFromNodeID := make(map[enode.ID]*enode.Node, batchSize)
|
||||
for i := 0; i < batchSize && len(nodeFromNodeID) <= peersToFindCount && iterator.Next(); i++ {
|
||||
node := iterator.Node()
|
||||
|
||||
// Filter out nodes that do not meet the criteria.
|
||||
if !filter(node) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Remove duplicates, keeping the node with higher seq.
|
||||
prevNode, ok := nodeFromNodeID[node.ID()]
|
||||
if ok && prevNode.Seq() > node.Seq() {
|
||||
continue
|
||||
}
|
||||
|
||||
nodeFromNodeID[node.ID()] = node
|
||||
}
|
||||
|
||||
// Convert the map to a slice.
|
||||
nodes := make([]*enode.Node, 0, len(nodeFromNodeID))
|
||||
for _, node := range nodeFromNodeID {
|
||||
nodes = append(nodes, node)
|
||||
}
|
||||
|
||||
return nodes
|
||||
}
|
||||
|
||||
// dialPeer dials a peer in a separate goroutine.
|
||||
func (s *Service) dialPeer(ctx context.Context, wg *sync.WaitGroup, node *enode.Node) {
|
||||
info, _, err := convertToAddrInfo(node)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if info == nil {
|
||||
return
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
if err := s.connectWithPeer(ctx, *info); err != nil {
|
||||
log.WithError(err).Tracef("Could not connect with peer %s", info.String())
|
||||
}
|
||||
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
|
||||
// FindPeersWithSubnet performs a network search for peers
|
||||
// subscribed to a particular subnet. Then it tries to connect
|
||||
// with those peers. This method will block until either:
|
||||
@@ -61,69 +135,96 @@ const dataColumnSubnetVal = 150
|
||||
// On some edge cases, this method may hang indefinitely while peers
|
||||
// are actually found. In such a case, the user should cancel the context
|
||||
// and re-run the method again.
|
||||
func (s *Service) FindPeersWithSubnet(ctx context.Context, topic string,
|
||||
index uint64, threshold int) (bool, error) {
|
||||
func (s *Service) FindPeersWithSubnet(
|
||||
ctx context.Context,
|
||||
topic string,
|
||||
index uint64,
|
||||
threshold int,
|
||||
) (bool, error) {
|
||||
const batchSize = 2000
|
||||
|
||||
ctx, span := trace.StartSpan(ctx, "p2p.FindPeersWithSubnet")
|
||||
defer span.End()
|
||||
|
||||
span.AddAttributes(trace.Int64Attribute("index", int64(index))) // lint:ignore uintcast -- It's safe to do this for tracing.
|
||||
|
||||
if s.dv5Listener == nil {
|
||||
// return if discovery isn't set
|
||||
// Return if discovery isn't set
|
||||
return false, nil
|
||||
}
|
||||
|
||||
topic += s.Encoding().ProtocolSuffix()
|
||||
iterator := s.dv5Listener.RandomNodes()
|
||||
defer iterator.Close()
|
||||
switch {
|
||||
case strings.Contains(topic, GossipAttestationMessage):
|
||||
iterator = filterNodes(ctx, iterator, s.filterPeerForAttSubnet(index))
|
||||
case strings.Contains(topic, GossipSyncCommitteeMessage):
|
||||
iterator = filterNodes(ctx, iterator, s.filterPeerForSyncSubnet(index))
|
||||
case strings.Contains(topic, GossipDataColumnSidecarMessage):
|
||||
iterator = filterNodes(ctx, iterator, s.filterPeerForDataColumnsSubnet(index))
|
||||
default:
|
||||
return false, errors.Errorf("no subnet exists for provided topic: %s", topic)
|
||||
|
||||
filter, err := s.nodeFilter(topic, index)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "node filter")
|
||||
}
|
||||
|
||||
peersSummary := func(topic string, threshold int) (int, int) {
|
||||
// Retrieve how many peers we have for this topic.
|
||||
peerCountForTopic := len(s.pubsub.ListPeers(topic))
|
||||
|
||||
// Compute how many peers we are missing to reach the threshold.
|
||||
missingPeerCountForTopic := max(0, threshold-peerCountForTopic)
|
||||
|
||||
return peerCountForTopic, missingPeerCountForTopic
|
||||
}
|
||||
|
||||
// Compute how many peers we are missing to reach the threshold.
|
||||
peerCountForTopic, missingPeerCountForTopic := peersSummary(topic, threshold)
|
||||
|
||||
// Exit early if we have enough peers.
|
||||
if missingPeerCountForTopic == 0 {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"topic": topic,
|
||||
"currentPeerCount": peerCountForTopic,
|
||||
"targetPeerCount": threshold,
|
||||
}).Debug("Searching for new peers in the network - Start")
|
||||
|
||||
wg := new(sync.WaitGroup)
|
||||
for {
|
||||
currNum := len(s.pubsub.ListPeers(topic))
|
||||
if currNum >= threshold {
|
||||
// If we have enough peers, we can exit the loop. This is the happy path.
|
||||
if missingPeerCountForTopic == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
// If the context is done, we can exit the loop. This is the unhappy path.
|
||||
if err := ctx.Err(); err != nil {
|
||||
return false, errors.Errorf("unable to find requisite number of peers for topic %s - "+
|
||||
"only %d out of %d peers were able to be found", topic, currNum, threshold)
|
||||
return false, errors.Errorf(
|
||||
"unable to find requisite number of peers for topic %s - only %d out of %d peers available after searching",
|
||||
topic, peerCountForTopic, threshold,
|
||||
)
|
||||
}
|
||||
nodeCount := int(params.BeaconNetworkConfig().MinimumPeersInSubnetSearch)
|
||||
|
||||
// Search for new peers in the network.
|
||||
nodes := searchForPeers(iterator, batchSize, missingPeerCountForTopic, filter)
|
||||
|
||||
// Restrict dials if limit is applied.
|
||||
maxConcurrentDials := math.MaxInt
|
||||
if flags.MaxDialIsActive() {
|
||||
nodeCount = min(nodeCount, flags.Get().MaxConcurrentDials)
|
||||
maxConcurrentDials = flags.Get().MaxConcurrentDials
|
||||
}
|
||||
nodes := enode.ReadNodes(iterator, nodeCount)
|
||||
for _, node := range nodes {
|
||||
info, _, err := convertToAddrInfo(node)
|
||||
if err != nil {
|
||||
continue
|
||||
|
||||
// Dial the peers in batches.
|
||||
for start := 0; start < len(nodes); start += maxConcurrentDials {
|
||||
stop := min(start+maxConcurrentDials, len(nodes))
|
||||
for _, node := range nodes[start:stop] {
|
||||
s.dialPeer(ctx, wg, node)
|
||||
}
|
||||
|
||||
if info == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
if err := s.connectWithPeer(ctx, *info); err != nil {
|
||||
log.WithError(err).Tracef("Could not connect with peer %s", info.String())
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
// Wait for all dials to be completed.
|
||||
wg.Wait()
|
||||
}
|
||||
// Wait for all dials to be completed.
|
||||
wg.Wait()
|
||||
|
||||
_, missingPeerCountForTopic = peersSummary(topic, threshold)
|
||||
}
|
||||
|
||||
log.WithField("topic", topic).Debug("Searching for new peers in the network - Success")
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@@ -183,11 +284,17 @@ func (s *Service) filterPeerForDataColumnsSubnet(index uint64) func(node *enode.
|
||||
// lower threshold to broadcast object compared to searching
|
||||
// for a subnet. So that even in the event of poor peer
|
||||
// connectivity, we can still broadcast an attestation.
|
||||
func (s *Service) hasPeerWithSubnet(topic string) bool {
|
||||
func (s *Service) hasPeerWithSubnet(subnetTopic string) bool {
|
||||
// In the event peer threshold is lower, we will choose the lower
|
||||
// threshold.
|
||||
minPeers := mathutil.Min(1, uint64(flags.Get().MinimumPeersPerSubnet))
|
||||
return len(s.pubsub.ListPeers(topic+s.Encoding().ProtocolSuffix())) >= int(minPeers) // lint:ignore uintcast -- Min peers can be safely cast to int.
|
||||
minPeers := min(1, flags.Get().MinimumPeersPerSubnet)
|
||||
topic := subnetTopic + s.Encoding().ProtocolSuffix()
|
||||
peersWithSubnet := s.pubsub.ListPeers(topic)
|
||||
peersWithSubnetCount := len(peersWithSubnet)
|
||||
|
||||
enoughPeers := peersWithSubnetCount >= minPeers
|
||||
|
||||
return enoughPeers
|
||||
}
|
||||
|
||||
// Updates the service's discv5 listener record's attestation subnet
|
||||
@@ -239,13 +346,12 @@ func (s *Service) updateSubnetRecordWithMetadataV3(
|
||||
localNode.Set(custodySubnetCountEntry)
|
||||
|
||||
newSeqNumber := s.metaData.SequenceNumber() + 1
|
||||
cscBytes := []byte{uint8(custodySubnetCount)}
|
||||
|
||||
s.metaData = wrapper.WrappedMetadataV2(&pb.MetaDataV2{
|
||||
SeqNumber: newSeqNumber,
|
||||
Attnets: bitVAtt,
|
||||
Syncnets: bitVSync,
|
||||
CustodySubnetCount: cscBytes,
|
||||
CustodySubnetCount: custodySubnetCount,
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -113,7 +113,7 @@ func TestStartDiscV5_FindPeersWithSubnet(t *testing.T) {
|
||||
service.dv5Listener.LocalNode().Set(entry)
|
||||
|
||||
// Join and subscribe to the subnet, needed by libp2p.
|
||||
topic, err := service.pubsub.Join(fmt.Sprintf(AttestationSubnetTopicFormat, bootNodeForkDigest, subnet) + "/ssz_snappy")
|
||||
topic, err := service.pubsub.Join(fmt.Sprintf(BeaconAttestationSubnetTopicFormat, bootNodeForkDigest, subnet) + "/ssz_snappy")
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = topic.Subscribe()
|
||||
@@ -155,7 +155,7 @@ func TestStartDiscV5_FindPeersWithSubnet(t *testing.T) {
|
||||
exists := make([]bool, 0, 3)
|
||||
for i := 1; i <= 3; i++ {
|
||||
subnet := uint64(i)
|
||||
topic := fmt.Sprintf(AttestationSubnetTopicFormat, bootNodeForkDigest, subnet)
|
||||
topic := fmt.Sprintf(BeaconAttestationSubnetTopicFormat, bootNodeForkDigest, subnet)
|
||||
|
||||
exist := false
|
||||
|
||||
|
||||
@@ -290,7 +290,7 @@ func (*TestP2P) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
|
||||
// AddConnectionHandler handles the connection with a newly connected peer.
|
||||
func (p *TestP2P) AddConnectionHandler(f, _ func(ctx context.Context, id peer.ID) error) {
|
||||
p.BHost.Network().Notify(&network.NotifyBundle{
|
||||
ConnectedF: func(net network.Network, conn network.Conn) {
|
||||
ConnectedF: func(_ network.Network, conn network.Conn) {
|
||||
// Must be handled in a goroutine as this callback cannot be blocking.
|
||||
go func() {
|
||||
p.peers.Add(new(enr.Record), conn.RemotePeer(), conn.RemoteMultiaddr(), conn.Stat().Direction)
|
||||
@@ -314,7 +314,7 @@ func (p *TestP2P) AddConnectionHandler(f, _ func(ctx context.Context, id peer.ID
|
||||
// AddDisconnectionHandler --
|
||||
func (p *TestP2P) AddDisconnectionHandler(f func(ctx context.Context, id peer.ID) error) {
|
||||
p.BHost.Network().Notify(&network.NotifyBundle{
|
||||
DisconnectedF: func(net network.Network, conn network.Conn) {
|
||||
DisconnectedF: func(_ network.Network, conn network.Conn) {
|
||||
// Must be handled in a goroutine as this callback cannot be blocking.
|
||||
go func() {
|
||||
p.peers.SetConnectionState(conn.RemotePeer(), peers.PeerDisconnecting)
|
||||
|
||||
@@ -35,22 +35,22 @@ const (
|
||||
|
||||
// Topic Formats
|
||||
//
|
||||
// AttestationSubnetTopicFormat is the topic format for the attestation subnet.
|
||||
AttestationSubnetTopicFormat = GossipProtocolAndDigest + GossipAttestationMessage + "_%d"
|
||||
// BeaconAttestationSubnetTopicFormat is the topic format for the attestation subnet.
|
||||
BeaconAttestationSubnetTopicFormat = GossipProtocolAndDigest + GossipAttestationMessage + "_%d"
|
||||
// SyncCommitteeSubnetTopicFormat is the topic format for the sync committee subnet.
|
||||
SyncCommitteeSubnetTopicFormat = GossipProtocolAndDigest + GossipSyncCommitteeMessage + "_%d"
|
||||
// BlockSubnetTopicFormat is the topic format for the block subnet.
|
||||
BlockSubnetTopicFormat = GossipProtocolAndDigest + GossipBlockMessage
|
||||
// ExitSubnetTopicFormat is the topic format for the voluntary exit subnet.
|
||||
ExitSubnetTopicFormat = GossipProtocolAndDigest + GossipExitMessage
|
||||
// BeaconBlockSubnetTopicFormat is the topic format for the block subnet.
|
||||
BeaconBlockSubnetTopicFormat = GossipProtocolAndDigest + GossipBlockMessage
|
||||
// VoluntaryExitSubnetTopicFormat is the topic format for the voluntary exit subnet.
|
||||
VoluntaryExitSubnetTopicFormat = GossipProtocolAndDigest + GossipExitMessage
|
||||
// ProposerSlashingSubnetTopicFormat is the topic format for the proposer slashing subnet.
|
||||
ProposerSlashingSubnetTopicFormat = GossipProtocolAndDigest + GossipProposerSlashingMessage
|
||||
// AttesterSlashingSubnetTopicFormat is the topic format for the attester slashing subnet.
|
||||
AttesterSlashingSubnetTopicFormat = GossipProtocolAndDigest + GossipAttesterSlashingMessage
|
||||
// AggregateAndProofSubnetTopicFormat is the topic format for the aggregate and proof subnet.
|
||||
AggregateAndProofSubnetTopicFormat = GossipProtocolAndDigest + GossipAggregateAndProofMessage
|
||||
// SyncContributionAndProofSubnetTopicFormat is the topic format for the sync aggregate and proof subnet.
|
||||
SyncContributionAndProofSubnetTopicFormat = GossipProtocolAndDigest + GossipContributionAndProofMessage
|
||||
// BeaconAggregateAndProofSubnetTopicFormat is the topic format for the aggregate and proof subnet.
|
||||
BeaconAggregateAndProofSubnetTopicFormat = GossipProtocolAndDigest + GossipAggregateAndProofMessage
|
||||
// SyncCommitteeContributionAndProofSubnetTopicFormat is the topic format for the sync aggregate and proof subnet.
|
||||
SyncCommitteeContributionAndProofSubnetTopicFormat = GossipProtocolAndDigest + GossipContributionAndProofMessage
|
||||
// BlsToExecutionChangeSubnetTopicFormat is the topic format for the bls to execution change subnet.
|
||||
BlsToExecutionChangeSubnetTopicFormat = GossipProtocolAndDigest + GossipBlsToExecutionChangeMessage
|
||||
// BlobSubnetTopicFormat is the topic format for the blob subnet.
|
||||
|
||||
@@ -3,6 +3,7 @@ package validator
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -300,9 +301,10 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
|
||||
return nil, status.Errorf(codes.Internal, "Could not hash tree root: %v", err)
|
||||
}
|
||||
|
||||
slot := block.Block().Slot()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
errChan := make(chan error, 1)
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
@@ -314,7 +316,7 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
|
||||
}()
|
||||
|
||||
if isPeerDASEnabled {
|
||||
if err := vs.broadcastAndReceiveDataColumns(ctx, dataColumnSideCars, root); err != nil {
|
||||
if err := vs.broadcastAndReceiveDataColumns(ctx, dataColumnSideCars, root, slot); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not broadcast/receive data columns: %v", err)
|
||||
}
|
||||
} else {
|
||||
@@ -456,27 +458,32 @@ func (vs *Server) broadcastAndReceiveBlobs(ctx context.Context, sidecars []*ethp
|
||||
}
|
||||
|
||||
// broadcastAndReceiveDataColumns handles the broadcasting and reception of data columns sidecars.
|
||||
func (vs *Server) broadcastAndReceiveDataColumns(ctx context.Context, sidecars []*ethpb.DataColumnSidecar, root [fieldparams.RootLength]byte) error {
|
||||
func (vs *Server) broadcastAndReceiveDataColumns(
|
||||
ctx context.Context,
|
||||
sidecars []*ethpb.DataColumnSidecar,
|
||||
root [fieldparams.RootLength]byte,
|
||||
slot primitives.Slot,
|
||||
) error {
|
||||
eg, _ := errgroup.WithContext(ctx)
|
||||
|
||||
dataColumnsWithholdCount := features.Get().DataColumnsWithholdCount
|
||||
|
||||
for i, sd := range sidecars {
|
||||
withHeldDataColumns := make([]uint64, 0)
|
||||
|
||||
for _, sd := range sidecars {
|
||||
// Copy the iteration instance to a local variable to give each go-routine its own copy to play with.
|
||||
// See https://golang.org/doc/faq#closures_and_goroutines for more details.
|
||||
colIdx, sidecar := i, sd
|
||||
sidecar := sd
|
||||
|
||||
if sidecar.ColumnIndex < dataColumnsWithholdCount {
|
||||
withHeldDataColumns = append(withHeldDataColumns, sidecar.ColumnIndex)
|
||||
}
|
||||
|
||||
eg.Go(func() error {
|
||||
// Compute the subnet index based on the column index.
|
||||
subnet := uint64(colIdx) % params.BeaconConfig().DataColumnSidecarSubnetCount
|
||||
subnet := sidecar.ColumnIndex % params.BeaconConfig().DataColumnSidecarSubnetCount
|
||||
|
||||
if colIdx < dataColumnsWithholdCount {
|
||||
log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"subnet": subnet,
|
||||
"dataColumnIndex": colIdx,
|
||||
}).Warning("Withholding data column")
|
||||
} else {
|
||||
if sidecar.ColumnIndex >= dataColumnsWithholdCount {
|
||||
if err := vs.P2P.BroadcastDataColumn(ctx, subnet, sidecar); err != nil {
|
||||
return errors.Wrap(err, "broadcast data column")
|
||||
}
|
||||
@@ -496,9 +503,21 @@ func (vs *Server) broadcastAndReceiveDataColumns(ctx context.Context, sidecars [
|
||||
Type: operation.DataColumnSidecarReceived,
|
||||
Data: &operation.DataColumnSidecarReceivedData{DataColumn: &verifiedRODataColumn},
|
||||
})
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if len(withHeldDataColumns) > 0 {
|
||||
slices.Sort[[]uint64](withHeldDataColumns)
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"slot": slot,
|
||||
"indices": withHeldDataColumns,
|
||||
}).Warning("Withholding data columns")
|
||||
}
|
||||
|
||||
return eg.Wait()
|
||||
}
|
||||
|
||||
|
||||
@@ -95,6 +95,7 @@ go_library(
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/p2p/encoder:go_default_library",
|
||||
"//beacon-chain/p2p/peers:go_default_library",
|
||||
"//beacon-chain/p2p/peers/scorers:go_default_library",
|
||||
"//beacon-chain/p2p/types:go_default_library",
|
||||
"//beacon-chain/slasher/types:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
@@ -126,6 +127,7 @@ go_library(
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
"//proto/prysm/v1alpha1/metadata:go_default_library",
|
||||
"//runtime:go_default_library",
|
||||
"//runtime/logging:go_default_library",
|
||||
"//runtime/messagehandler:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time:go_default_library",
|
||||
|
||||
@@ -20,10 +20,6 @@ import (
|
||||
const broadCastMissingDataColumnsTimeIntoSlot = 3 * time.Second
|
||||
|
||||
func (s *Service) reconstructDataColumns(ctx context.Context, verifiedRODataColumn blocks.VerifiedRODataColumn) error {
|
||||
// Lock to prevent concurrent reconstruction.
|
||||
s.dataColumsnReconstructionLock.Lock()
|
||||
defer s.dataColumsnReconstructionLock.Unlock()
|
||||
|
||||
// Get the block root.
|
||||
blockRoot := verifiedRODataColumn.BlockRoot()
|
||||
|
||||
@@ -42,6 +38,18 @@ func (s *Service) reconstructDataColumns(ctx context.Context, verifiedRODataColu
|
||||
return nil
|
||||
}
|
||||
|
||||
// Reconstruction is possible.
|
||||
// Lock to prevent concurrent reconstruction.
|
||||
if !s.dataColumsnReconstructionLock.TryLock() {
|
||||
// If the mutex is already locked, it means that another goroutine is already reconstructing the data columns.
|
||||
// In this case, no need to reconstruct again.
|
||||
// TODO: Implement the (pathological) case where we want to reconstruct data columns corresponding to different blocks at the same time.
|
||||
// This should be a rare case and we can ignore it for now, but it needs to be addressed in the future.
|
||||
return nil
|
||||
}
|
||||
|
||||
defer s.dataColumsnReconstructionLock.Unlock()
|
||||
|
||||
// Retrieve the custodied columns.
|
||||
custodiedColumns, err := peerdas.CustodyColumns(s.cfg.p2p.NodeID(), peerdas.CustodySubnetCount())
|
||||
if err != nil {
|
||||
@@ -206,7 +214,7 @@ func (s *Service) scheduleReconstructedDataColumnsBroadcast(
|
||||
"slot": slot,
|
||||
"timeIntoSlot": broadCastMissingDataColumnsTimeIntoSlot,
|
||||
"columns": missingColumnsList,
|
||||
}).Debug("Broadcasting not seen via gossip but reconstructed data columns.")
|
||||
}).Debug("Broadcasting not seen via gossip but reconstructed data columns")
|
||||
})
|
||||
|
||||
return nil
|
||||
|
||||
@@ -3,6 +3,7 @@ package sync
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -182,14 +183,17 @@ func (d *dataColumnSampler1D) refreshPeerInfo() {
|
||||
}
|
||||
}
|
||||
|
||||
columnWithNoPeers := make([]uint64, 0)
|
||||
columnsWithoutPeers := make([]uint64, 0)
|
||||
for column, peers := range d.peerFromColumn {
|
||||
if len(peers) == 0 {
|
||||
columnWithNoPeers = append(columnWithNoPeers, column)
|
||||
columnsWithoutPeers = append(columnsWithoutPeers, column)
|
||||
}
|
||||
}
|
||||
if len(columnWithNoPeers) > 0 {
|
||||
log.WithField("columnWithNoPeers", columnWithNoPeers).Warn("Some columns have no peers responsible for custody")
|
||||
|
||||
slices.Sort[[]uint64](columnsWithoutPeers)
|
||||
|
||||
if len(columnsWithoutPeers) > 0 {
|
||||
log.WithField("columns", columnsWithoutPeers).Warn("Some columns have no peers responsible for custody")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -426,7 +430,7 @@ func (d *dataColumnSampler1D) sampleDataColumnsFromPeer(
|
||||
// Send the request to the peer.
|
||||
roDataColumns, err := SendDataColumnSidecarByRoot(ctx, d.clock, d.p2p, pid, d.ctxMap, &req)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to send data column sidecar by root")
|
||||
log.WithError(err).WithField("pid", pid).Error("Failed to send data column sidecar by root")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -41,7 +41,7 @@ func (s *Service) decodePubsubMessage(msg *pubsub.Message) (ssz.Unmarshaler, err
|
||||
topic = p2p.GossipTypeMapping[reflect.TypeOf(ðpb.Attestation{})]
|
||||
// Given that both sync message related subnets have the same message name, we have to
|
||||
// differentiate them below.
|
||||
case strings.Contains(topic, p2p.GossipSyncCommitteeMessage) && !strings.Contains(topic, p2p.SyncContributionAndProofSubnetTopicFormat):
|
||||
case strings.Contains(topic, p2p.GossipSyncCommitteeMessage) && !strings.Contains(topic, p2p.SyncCommitteeContributionAndProofSubnetTopicFormat):
|
||||
topic = p2p.GossipTypeMapping[reflect.TypeOf(ðpb.SyncCommitteeMessage{})]
|
||||
case strings.Contains(topic, p2p.GossipBlobSidecarMessage):
|
||||
topic = p2p.GossipTypeMapping[reflect.TypeOf(ðpb.BlobSidecar{})]
|
||||
@@ -83,11 +83,11 @@ func (*Service) replaceForkDigest(topic string) (string, error) {
|
||||
|
||||
func extractValidDataTypeFromTopic(topic string, digest []byte, clock *startup.Clock) (ssz.Unmarshaler, error) {
|
||||
switch topic {
|
||||
case p2p.BlockSubnetTopicFormat:
|
||||
case p2p.BeaconBlockSubnetTopicFormat:
|
||||
return extractDataTypeFromTypeMap(types.BlockMap, digest, clock)
|
||||
case p2p.AttestationSubnetTopicFormat:
|
||||
case p2p.BeaconAttestationSubnetTopicFormat:
|
||||
return extractDataTypeFromTypeMap(types.AttestationMap, digest, clock)
|
||||
case p2p.AggregateAndProofSubnetTopicFormat:
|
||||
case p2p.BeaconAggregateAndProofSubnetTopicFormat:
|
||||
return extractDataTypeFromTypeMap(types.AggregateAttestationMap, digest, clock)
|
||||
}
|
||||
return nil, nil
|
||||
|
||||
@@ -55,11 +55,13 @@ func ReadStatusCode(stream network.Stream, encoding encoder.NetworkEncoding) (ui
|
||||
}
|
||||
|
||||
func writeErrorResponseToStream(responseCode byte, reason string, stream libp2pcore.Stream, encoder p2p.EncodingProvider) {
|
||||
log := log.WithField("topic", stream.Protocol())
|
||||
resp, err := createErrorResponse(responseCode, reason, encoder)
|
||||
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not generate a response error")
|
||||
} else if _, err := stream.Write(resp); err != nil {
|
||||
log.WithError(err).Debugf("Could not write to stream")
|
||||
log.WithError(err).Debug("Could not write to stream")
|
||||
} else {
|
||||
// If sending the error message succeeded, close to send an EOF.
|
||||
closeStream(stream, log)
|
||||
@@ -114,7 +116,7 @@ func closeStreamAndWait(stream network.Stream, log *logrus.Entry) {
|
||||
_err := stream.Reset()
|
||||
_ = _err
|
||||
if isValidStreamError(err) {
|
||||
log.WithError(err).Debugf("Could not reset stream with protocol %s", stream.Protocol())
|
||||
log.WithError(err).WithField("protocol", stream.Protocol()).Debug("Could not reset stream")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -31,7 +31,7 @@ func NewRegularSyncFuzz(opts ...Option) *Service {
|
||||
|
||||
// FuzzValidateBeaconBlockPubSub exports private method validateBeaconBlockPubSub for fuzz testing.
|
||||
func (s *Service) FuzzValidateBeaconBlockPubSub(ctx context.Context, pid peer.ID, msg *pubsub.Message) pubsub.ValidationResult {
|
||||
res, err := s.validateBeaconBlockPubSub(ctx, pid, msg)
|
||||
res, err := s.validateBeaconBlockPubSubMsg(ctx, pid, msg)
|
||||
_ = err
|
||||
return res
|
||||
}
|
||||
|
||||
@@ -97,7 +97,7 @@ func TestRateLimiter_ExceedRawCapacity(t *testing.T) {
|
||||
for i := 0; i < defaultBurstLimit; i++ {
|
||||
assert.ErrorContains(t, p2ptypes.ErrRateLimited.Error(), rlimiter.validateRawRpcRequest(stream))
|
||||
}
|
||||
assert.Equal(t, true, p1.Peers().IsBad(p2.PeerID()), "peer is not marked as a bad peer")
|
||||
assert.Equal(t, true, p1.Peers().Status(p2.PeerID()).IsBad, "peer is not marked as a bad peer")
|
||||
require.NoError(t, stream.Close(), "could not close stream")
|
||||
|
||||
if util.WaitTimeout(&wg, 1*time.Second) {
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Time to first byte timeout. The maximum time to wait for first byte of
|
||||
@@ -182,15 +183,22 @@ func (s *Service) registerRPC(baseTopic string, handle rpcHandler) {
|
||||
|
||||
ctx, span := trace.StartSpan(ctx, "sync.rpc")
|
||||
defer span.End()
|
||||
remotePeerID := stream.Conn().RemotePeer()
|
||||
|
||||
span.AddAttributes(trace.StringAttribute("topic", topic))
|
||||
span.AddAttributes(trace.StringAttribute("peer", stream.Conn().RemotePeer().String()))
|
||||
log := log.WithField("peer", stream.Conn().RemotePeer().String()).WithField("topic", string(stream.Protocol()))
|
||||
span.AddAttributes(trace.StringAttribute("peer", remotePeerID.String()))
|
||||
log := log.WithField("peer", remotePeerID).WithField("topic", string(stream.Protocol()))
|
||||
|
||||
// Check before hand that peer is valid.
|
||||
if s.cfg.p2p.Peers().IsBad(stream.Conn().RemotePeer()) {
|
||||
if err := s.sendGoodByeAndDisconnect(ctx, p2ptypes.GoodbyeCodeBanned, stream.Conn().RemotePeer()); err != nil {
|
||||
if status := s.cfg.p2p.Peers().Status(remotePeerID); status.IsBad {
|
||||
log = log.WithFields(logrus.Fields{"bad": true, "ban": true}).WithFields(status.Details)
|
||||
|
||||
if err := s.sendGoodByeAndDisconnect(ctx, p2ptypes.GoodbyeCodeBanned, remotePeerID); err != nil {
|
||||
log.WithError(err).Debug("Could not disconnect from peer")
|
||||
}
|
||||
|
||||
log.Debug("Peer disconnected")
|
||||
|
||||
return
|
||||
}
|
||||
// Validate request according to peer limits.
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
pb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// beaconBlocksByRangeRPCHandler looks up the request blocks from the database from a given start block.
|
||||
@@ -26,21 +27,31 @@ func (s *Service) beaconBlocksByRangeRPCHandler(ctx context.Context, msg interfa
|
||||
defer cancel()
|
||||
SetRPCStreamDeadlines(stream)
|
||||
|
||||
remotePeer := stream.Conn().RemotePeer()
|
||||
|
||||
m, ok := msg.(*pb.BeaconBlocksByRangeRequest)
|
||||
if !ok {
|
||||
return errors.New("message is not type *pb.BeaconBlockByRangeRequest")
|
||||
}
|
||||
log.WithField("startSlot", m.StartSlot).WithField("count", m.Count).Debug("Serving block by range request")
|
||||
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"startSlot": m.StartSlot,
|
||||
"count": m.Count,
|
||||
"peer": remotePeer,
|
||||
"topic": stream.Protocol(),
|
||||
})
|
||||
|
||||
rp, err := validateRangeRequest(m, s.cfg.clock.CurrentSlot())
|
||||
if err != nil {
|
||||
s.writeErrorResponseToStream(responseCodeInvalidRequest, err.Error(), stream)
|
||||
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer())
|
||||
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(remotePeer)
|
||||
tracing.AnnotateError(span, err)
|
||||
log.WithError(err).Debug("Serving block by range request - Validate range request")
|
||||
return err
|
||||
}
|
||||
available := s.validateRangeAvailability(rp)
|
||||
if !available {
|
||||
log.Debug("error in validating range availability")
|
||||
log.Debug("Serving block by range request - Not available")
|
||||
s.writeErrorResponseToStream(responseCodeResourceUnavailable, p2ptypes.ErrResourceUnavailable.Error(), stream)
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil
|
||||
@@ -48,14 +59,17 @@ func (s *Service) beaconBlocksByRangeRPCHandler(ctx context.Context, msg interfa
|
||||
|
||||
blockLimiter, err := s.rateLimiter.topicCollector(string(stream.Protocol()))
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Serving block by range request - Topic collector")
|
||||
return err
|
||||
}
|
||||
remainingBucketCapacity := blockLimiter.Remaining(stream.Conn().RemotePeer().String())
|
||||
remainingBucketCapacity := blockLimiter.Remaining(remotePeer.String())
|
||||
log = log.WithField("remainingBucketCapacity", remainingBucketCapacity)
|
||||
|
||||
span.AddAttributes(
|
||||
trace.Int64Attribute("start", int64(rp.start)), // lint:ignore uintcast -- This conversion is OK for tracing.
|
||||
trace.Int64Attribute("end", int64(rp.end)), // lint:ignore uintcast -- This conversion is OK for tracing.
|
||||
trace.Int64Attribute("count", int64(m.Count)),
|
||||
trace.StringAttribute("peer", stream.Conn().RemotePeer().String()),
|
||||
trace.StringAttribute("peer", remotePeer.String()),
|
||||
trace.Int64Attribute("remaining_capacity", remainingBucketCapacity),
|
||||
)
|
||||
|
||||
@@ -64,7 +78,7 @@ func (s *Service) beaconBlocksByRangeRPCHandler(ctx context.Context, msg interfa
|
||||
defer ticker.Stop()
|
||||
batcher, err := newBlockRangeBatcher(rp, s.cfg.beaconDB, s.rateLimiter, s.cfg.chain.IsCanonical, ticker)
|
||||
if err != nil {
|
||||
log.WithError(err).Info("error in BlocksByRange batch")
|
||||
log.WithError(err).Debug("Serving block by range request - newBlockRangeBatcher")
|
||||
s.writeErrorResponseToStream(responseCodeServerError, p2ptypes.ErrGeneric.Error(), stream)
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
@@ -77,17 +91,22 @@ func (s *Service) beaconBlocksByRangeRPCHandler(ctx context.Context, msg interfa
|
||||
for batch, more = batcher.next(ctx, stream); more; batch, more = batcher.next(ctx, stream) {
|
||||
batchStart := time.Now()
|
||||
if err := s.writeBlockBatchToStream(ctx, batch, stream); err != nil {
|
||||
log.WithError(err).Debug("Serving block by range request - writeBlockBatchToStream")
|
||||
s.writeErrorResponseToStream(responseCodeServerError, p2ptypes.ErrGeneric.Error(), stream)
|
||||
return err
|
||||
}
|
||||
rpcBlocksByRangeResponseLatency.Observe(float64(time.Since(batchStart).Milliseconds()))
|
||||
}
|
||||
|
||||
log.Debug("Serving block by range request")
|
||||
|
||||
if err := batch.error(); err != nil {
|
||||
log.WithError(err).Debug("error in BlocksByRange batch")
|
||||
log.WithError(err).Debug("Serving block by range request - BlocksByRange batch")
|
||||
s.writeErrorResponseToStream(responseCodeServerError, p2ptypes.ErrGeneric.Error(), stream)
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
|
||||
closeStream(stream, log)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/logging"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
@@ -204,7 +205,7 @@ func (s *Service) sendAndSaveDataColumnSidecars(ctx context.Context, request typ
|
||||
if err := verify.ColumnAlignsWithBlock(sidecar, RoBlock, s.newColumnVerifier); err != nil {
|
||||
return err
|
||||
}
|
||||
log.WithFields(columnFields(sidecar)).Debug("Received data column sidecar RPC")
|
||||
log.WithFields(logging.DataColumnFields(sidecar)).Debug("Received data column sidecar RPC")
|
||||
}
|
||||
|
||||
for i := range sidecars {
|
||||
|
||||
@@ -89,12 +89,17 @@ func (s *Service) dataColumnSidecarByRootRPCHandler(ctx context.Context, msg int
|
||||
return custodiedColumnsList[i] < custodiedColumnsList[j]
|
||||
})
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"custodied": custodiedColumnsList,
|
||||
"requested": requestedColumnsList,
|
||||
"custodiedCount": len(custodiedColumnsList),
|
||||
"requestedCount": len(requestedColumnsList),
|
||||
}).Debug("Data column sidecar by root request received")
|
||||
fields := logrus.Fields{
|
||||
"requested": requestedColumnsList,
|
||||
}
|
||||
|
||||
if uint64(len(custodiedColumnsList)) == params.BeaconConfig().NumberOfColumns {
|
||||
fields["custodied"] = "all"
|
||||
} else {
|
||||
fields["custodied"] = custodiedColumnsList
|
||||
}
|
||||
|
||||
log.WithFields(fields).Debug("Data column sidecar by root request received")
|
||||
|
||||
// Subscribe to the data column feed.
|
||||
rootIndexChan := make(chan filesystem.RootIndexPair)
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/prysmaticlabs/prysm/v5/async"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/scorers"
|
||||
p2ptypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
@@ -55,10 +56,7 @@ func (s *Service) goodbyeRPCHandler(_ context.Context, msg interface{}, stream l
|
||||
|
||||
// disconnectBadPeer checks whether peer is considered bad by some scorer, and tries to disconnect
|
||||
// the peer, if that is the case. Additionally, disconnection reason is obtained from scorer.
|
||||
func (s *Service) disconnectBadPeer(ctx context.Context, id peer.ID) {
|
||||
if !s.cfg.p2p.Peers().IsBad(id) {
|
||||
return
|
||||
}
|
||||
func (s *Service) disconnectBadPeer(ctx context.Context, id peer.ID, peerStatus scorers.PeerStatus) {
|
||||
err := s.cfg.p2p.Peers().Scorers().ValidationError(id)
|
||||
goodbyeCode := p2ptypes.ErrToGoodbyeCode(err)
|
||||
if err == nil {
|
||||
@@ -67,6 +65,13 @@ func (s *Service) disconnectBadPeer(ctx context.Context, id peer.ID) {
|
||||
if err := s.sendGoodByeAndDisconnect(ctx, goodbyeCode, id); err != nil {
|
||||
log.WithError(err).Debug("Error when disconnecting with bad peer")
|
||||
}
|
||||
|
||||
log = log.WithFields(logrus.Fields{
|
||||
"peerID": id,
|
||||
"bad": true,
|
||||
})
|
||||
|
||||
log.WithFields(peerStatus.Details).Debug("Peer disconnected")
|
||||
}
|
||||
|
||||
// A custom goodbye method that is used by our connection handler, in the
|
||||
|
||||
@@ -19,6 +19,8 @@ import (
|
||||
|
||||
// metaDataHandler reads the incoming metadata rpc request from the peer.
|
||||
func (s *Service) metaDataHandler(_ context.Context, _ interface{}, stream libp2pcore.Stream) error {
|
||||
log := log.WithField("topic", stream.Protocol())
|
||||
|
||||
SetRPCStreamDeadlines(stream)
|
||||
|
||||
// Validate the incoming request regarding rate limiting.
|
||||
@@ -104,7 +106,7 @@ func (s *Service) metaDataHandler(_ context.Context, _ interface{}, stream libp2
|
||||
Attnets: metadata.AttnetsBitfield(),
|
||||
SeqNumber: metadata.SequenceNumber(),
|
||||
Syncnets: bitfield.Bitvector4{byte(0x00)},
|
||||
CustodySubnetCount: []byte{0},
|
||||
CustodySubnetCount: 0,
|
||||
})
|
||||
case version.Altair:
|
||||
metadata = wrapper.WrappedMetadataV2(
|
||||
@@ -112,7 +114,7 @@ func (s *Service) metaDataHandler(_ context.Context, _ interface{}, stream libp2
|
||||
Attnets: metadata.AttnetsBitfield(),
|
||||
SeqNumber: metadata.SequenceNumber(),
|
||||
Syncnets: metadata.SyncnetsBitfield(),
|
||||
CustodySubnetCount: []byte{0},
|
||||
CustodySubnetCount: 0,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -153,7 +153,7 @@ func TestMetadataRPCHandler_SendMetadataRequest(t *testing.T) {
|
||||
SeqNumber: seqNumber,
|
||||
Attnets: attnets,
|
||||
Syncnets: syncnets,
|
||||
CustodySubnetCount: []byte{custodySubnetCount},
|
||||
CustodySubnetCount: custodySubnetCount,
|
||||
}),
|
||||
expected: wrapper.WrappedMetadataV0(&pb.MetaDataV0{
|
||||
SeqNumber: seqNumber,
|
||||
@@ -200,7 +200,7 @@ func TestMetadataRPCHandler_SendMetadataRequest(t *testing.T) {
|
||||
SeqNumber: seqNumber,
|
||||
Attnets: attnets,
|
||||
Syncnets: syncnets,
|
||||
CustodySubnetCount: []byte{custodySubnetCount},
|
||||
CustodySubnetCount: custodySubnetCount,
|
||||
}),
|
||||
expected: wrapper.WrappedMetadataV1(&pb.MetaDataV1{
|
||||
SeqNumber: seqNumber,
|
||||
@@ -221,7 +221,7 @@ func TestMetadataRPCHandler_SendMetadataRequest(t *testing.T) {
|
||||
SeqNumber: seqNumber,
|
||||
Attnets: attnets,
|
||||
Syncnets: bitfield.Bitvector4{byte(0x00)},
|
||||
CustodySubnetCount: []byte{0},
|
||||
CustodySubnetCount: 0,
|
||||
}),
|
||||
},
|
||||
{
|
||||
@@ -238,7 +238,7 @@ func TestMetadataRPCHandler_SendMetadataRequest(t *testing.T) {
|
||||
SeqNumber: seqNumber,
|
||||
Attnets: attnets,
|
||||
Syncnets: syncnets,
|
||||
CustodySubnetCount: []byte{0},
|
||||
CustodySubnetCount: 0,
|
||||
}),
|
||||
},
|
||||
{
|
||||
@@ -250,13 +250,13 @@ func TestMetadataRPCHandler_SendMetadataRequest(t *testing.T) {
|
||||
SeqNumber: seqNumber,
|
||||
Attnets: attnets,
|
||||
Syncnets: syncnets,
|
||||
CustodySubnetCount: []byte{custodySubnetCount},
|
||||
CustodySubnetCount: custodySubnetCount,
|
||||
}),
|
||||
expected: wrapper.WrappedMetadataV2(&pb.MetaDataV2{
|
||||
SeqNumber: seqNumber,
|
||||
Attnets: attnets,
|
||||
Syncnets: syncnets,
|
||||
CustodySubnetCount: []byte{custodySubnetCount},
|
||||
CustodySubnetCount: custodySubnetCount,
|
||||
}),
|
||||
},
|
||||
}
|
||||
|
||||
@@ -142,7 +142,7 @@ func (s *Service) sendPingRequest(ctx context.Context, peerID peer.ID) error {
|
||||
// If the peer responded with an error, increment the bad responses scorer.
|
||||
if code != 0 {
|
||||
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(peerID)
|
||||
return errors.New(errMsg)
|
||||
return errors.Errorf("code: %d - %s", code, errMsg)
|
||||
}
|
||||
|
||||
// Decode the sequence number from the peer.
|
||||
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// maintainPeerStatuses by infrequently polling peers for their latest status.
|
||||
// maintainPeerStatuses maintain peer statuses by polling peers for their latest status twice per epoch.
|
||||
func (s *Service) maintainPeerStatuses() {
|
||||
// Run twice per epoch.
|
||||
interval := time.Duration(params.BeaconConfig().SlotsPerEpoch.Div(2).Mul(params.BeaconConfig().SecondsPerSlot)) * time.Second
|
||||
@@ -43,11 +43,15 @@ func (s *Service) maintainPeerStatuses() {
|
||||
log.WithError(err).Debug("Error when disconnecting with peer")
|
||||
}
|
||||
s.cfg.p2p.Peers().SetConnectionState(id, peers.PeerDisconnected)
|
||||
log.WithFields(logrus.Fields{
|
||||
"peer": id,
|
||||
"reason": "maintain peer statuses - peer is not connected",
|
||||
}).Debug("Peer disconnected")
|
||||
return
|
||||
}
|
||||
// Disconnect from peers that are considered bad by any of the registered scorers.
|
||||
if s.cfg.p2p.Peers().IsBad(id) {
|
||||
s.disconnectBadPeer(s.ctx, id)
|
||||
if status := s.cfg.p2p.Peers().Status(id); status.IsBad {
|
||||
s.disconnectBadPeer(s.ctx, id, status)
|
||||
return
|
||||
}
|
||||
// If the status hasn't been updated in the recent interval time.
|
||||
@@ -73,6 +77,11 @@ func (s *Service) maintainPeerStatuses() {
|
||||
if err := s.sendGoodByeAndDisconnect(s.ctx, p2ptypes.GoodbyeCodeTooManyPeers, id); err != nil {
|
||||
log.WithField("peer", id).WithError(err).Debug("Could not disconnect with peer")
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"peer": id,
|
||||
"reason": "to be pruned",
|
||||
}).Debug("Peer disconnected")
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -169,8 +178,8 @@ func (s *Service) sendRPCStatusRequest(ctx context.Context, id peer.ID) error {
|
||||
// If validation fails, validation error is logged, and peer status scorer will mark peer as bad.
|
||||
err = s.validateStatusMessage(ctx, msg)
|
||||
s.cfg.p2p.Peers().Scorers().PeerStatusScorer().SetPeerStatus(id, msg, err)
|
||||
if s.cfg.p2p.Peers().IsBad(id) {
|
||||
s.disconnectBadPeer(s.ctx, id)
|
||||
if status := s.cfg.p2p.Peers().Status(id); status.IsBad {
|
||||
s.disconnectBadPeer(s.ctx, id, status)
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -193,7 +202,12 @@ func (s *Service) statusRPCHandler(ctx context.Context, msg interface{}, stream
|
||||
ctx, cancel := context.WithTimeout(ctx, ttfbTimeout)
|
||||
defer cancel()
|
||||
SetRPCStreamDeadlines(stream)
|
||||
log := log.WithField("handler", "status")
|
||||
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"handler": "statusRPCHandler",
|
||||
"topic": stream.Protocol(),
|
||||
})
|
||||
|
||||
m, ok := msg.(*pb.Status)
|
||||
if !ok {
|
||||
return errors.New("message is not type *pb.Status")
|
||||
|
||||
@@ -877,7 +877,7 @@ func TestStatusRPCRequest_BadPeerHandshake(t *testing.T) {
|
||||
|
||||
require.NoError(t, cw.SetClock(startup.NewClock(chain.Genesis, chain.ValidatorsRoot)))
|
||||
|
||||
assert.Equal(t, false, p1.Peers().Scorers().IsBadPeer(p2.PeerID()), "Peer is marked as bad")
|
||||
assert.Equal(t, false, p1.Peers().Scorers().Status(p2.PeerID()).IsBad, "Peer is marked as bad")
|
||||
p1.Connect(p2)
|
||||
|
||||
if util.WaitTimeout(&wg, time.Second) {
|
||||
@@ -889,7 +889,7 @@ func TestStatusRPCRequest_BadPeerHandshake(t *testing.T) {
|
||||
require.NoError(t, err, "Could not obtain peer connection state")
|
||||
assert.Equal(t, peers.PeerDisconnected, connectionState, "Expected peer to be disconnected")
|
||||
|
||||
assert.Equal(t, true, p1.Peers().Scorers().IsBadPeer(p2.PeerID()), "Peer is not marked as bad")
|
||||
assert.Equal(t, true, p1.Peers().Scorers().Status(p2.PeerID()).IsBad, "Peer is not marked as bad")
|
||||
}
|
||||
|
||||
func TestStatusRPC_ValidGenesisMessage(t *testing.T) {
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/container/slice"
|
||||
@@ -53,117 +54,173 @@ func (s *Service) noopValidator(_ context.Context, _ peer.ID, msg *pubsub.Messag
|
||||
return pubsub.ValidationAccept, nil
|
||||
}
|
||||
|
||||
// Register PubSub subscribers
|
||||
func (s *Service) registerSubscribers(epoch primitives.Epoch, digest [4]byte) {
|
||||
// registerSubscribersFromGenesis registers subscribers for subnets needed from genesis.
|
||||
// It includes: beacon block, beacon aggregate and proof, voluntary exit, proposer slashing, attester slashing and beacon attestation.
|
||||
func (s *Service) registerSubscribersFromGenesis(digest [4]byte) {
|
||||
// Beacon block
|
||||
s.subscribe(
|
||||
p2p.BlockSubnetTopicFormat,
|
||||
s.validateBeaconBlockPubSub,
|
||||
p2p.BeaconBlockSubnetTopicFormat,
|
||||
s.validateBeaconBlockPubSubMsg,
|
||||
s.beaconBlockSubscriber,
|
||||
digest,
|
||||
)
|
||||
|
||||
// Beacon aggregate and proof
|
||||
s.subscribe(
|
||||
p2p.AggregateAndProofSubnetTopicFormat,
|
||||
s.validateAggregateAndProof,
|
||||
s.beaconAggregateProofSubscriber,
|
||||
p2p.BeaconAggregateAndProofSubnetTopicFormat,
|
||||
s.validateBeaconAggregateAndProofPubSubMsg,
|
||||
s.beaconAggregateAndProofSubscriber,
|
||||
digest,
|
||||
)
|
||||
|
||||
// Voluntary exit
|
||||
s.subscribe(
|
||||
p2p.ExitSubnetTopicFormat,
|
||||
s.validateVoluntaryExit,
|
||||
p2p.VoluntaryExitSubnetTopicFormat,
|
||||
s.validateVoluntaryExitPubSubMsg,
|
||||
s.voluntaryExitSubscriber,
|
||||
digest,
|
||||
)
|
||||
|
||||
// Proposer slashing
|
||||
s.subscribe(
|
||||
p2p.ProposerSlashingSubnetTopicFormat,
|
||||
s.validateProposerSlashing,
|
||||
s.validateProposerSlashingPubSubMsg,
|
||||
s.proposerSlashingSubscriber,
|
||||
digest,
|
||||
)
|
||||
|
||||
// Attester slashing
|
||||
s.subscribe(
|
||||
p2p.AttesterSlashingSubnetTopicFormat,
|
||||
s.validateAttesterSlashing,
|
||||
s.validateAttesterSlashingPubSubMsg,
|
||||
s.attesterSlashingSubscriber,
|
||||
digest,
|
||||
)
|
||||
|
||||
// Beacon attestation -- subscribe to all subnets.
|
||||
if flags.Get().SubscribeToAllSubnets {
|
||||
s.subscribeStaticWithSubnets(
|
||||
p2p.AttestationSubnetTopicFormat,
|
||||
s.validateCommitteeIndexBeaconAttestation, /* validator */
|
||||
s.committeeIndexBeaconAttestationSubscriber, /* message handler */
|
||||
p2p.BeaconAttestationSubnetTopicFormat,
|
||||
s.validateBeaconAttestationPubSubMsg,
|
||||
s.beaconAttestationSubscriber,
|
||||
digest,
|
||||
params.BeaconConfig().AttestationSubnetCount,
|
||||
)
|
||||
} else {
|
||||
s.subscribeDynamicWithSubnets(
|
||||
p2p.AttestationSubnetTopicFormat,
|
||||
s.validateCommitteeIndexBeaconAttestation, /* validator */
|
||||
s.committeeIndexBeaconAttestationSubscriber, /* message handler */
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Beacon attestation -- subscribe to required subnets.
|
||||
s.subscribeDynamicWithSubnets(
|
||||
p2p.BeaconAttestationSubnetTopicFormat,
|
||||
s.validateBeaconAttestationPubSubMsg,
|
||||
s.beaconAttestationSubscriber,
|
||||
digest,
|
||||
)
|
||||
}
|
||||
|
||||
// registerSubscribersFromAltair registers subscribers for subnets needed from the Altair hard fork.
|
||||
// It includes: sync committee contribution and proof and sync committee.
|
||||
func (s *Service) registerSubscribersFromAltair(digest [4]byte) {
|
||||
// Sync committee contribution and proof
|
||||
s.subscribe(
|
||||
p2p.SyncCommitteeContributionAndProofSubnetTopicFormat,
|
||||
s.validateSyncCommitteeContributionAndProofPubSubMsg,
|
||||
s.syncCommitteeContributionAndProofSubscriber,
|
||||
digest,
|
||||
)
|
||||
|
||||
// Sync committee -- subscribe to all subnets.
|
||||
if flags.Get().SubscribeToAllSubnets {
|
||||
s.subscribeStaticWithSyncSubnets(
|
||||
p2p.SyncCommitteeSubnetTopicFormat,
|
||||
s.validateSyncCommitteeMessage,
|
||||
s.syncCommitteeMessageSubscriber,
|
||||
digest,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
// Altair Fork Version
|
||||
|
||||
// Sync committee -- subscribe to required subnets.
|
||||
s.subscribeDynamicWithSyncSubnets(
|
||||
p2p.SyncCommitteeSubnetTopicFormat,
|
||||
s.validateSyncCommitteeMessage,
|
||||
s.syncCommitteeMessageSubscriber,
|
||||
digest,
|
||||
)
|
||||
}
|
||||
|
||||
// registerSubscribersFromCapella registers subscribers for subnets needed from the altair fork.
|
||||
// It includes: BLS to execution change
|
||||
func (s *Service) registerSubscribersFromCapella(digest [4]byte) {
|
||||
// BLS to execution change
|
||||
s.subscribe(
|
||||
p2p.BlsToExecutionChangeSubnetTopicFormat,
|
||||
s.validateBlsToExecutionChange,
|
||||
s.blsToExecutionChangeSubscriber,
|
||||
digest,
|
||||
)
|
||||
}
|
||||
|
||||
// registerSubscribersFromDeneb registers subscribers for subnets needed from the deneb fork.
|
||||
// It includes: Blob sidecar and data column sidecar (depending of the peerDAS status).
|
||||
func (s *Service) registerSubscribersFromDeneb(epoch primitives.Epoch, digest [4]byte) {
|
||||
peerDasIsActive := coreTime.PeerDASIsActive(slots.UnsafeEpochStart(epoch))
|
||||
|
||||
if !peerDasIsActive {
|
||||
// PeerDAS is not yet active, blob sidecar
|
||||
s.subscribeStaticWithSubnets(
|
||||
p2p.BlobSubnetTopicFormat,
|
||||
s.validateBlob,
|
||||
s.blobSubscriber,
|
||||
digest,
|
||||
params.BeaconConfig().BlobsidecarSubnetCount,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// PeerDAS is active, data columns sidecar -- subscribe to all subnets.
|
||||
if flags.Get().SubscribeToAllSubnets {
|
||||
s.subscribeStaticWithSubnets(
|
||||
p2p.DataColumnSubnetTopicFormat,
|
||||
s.validateDataColumn,
|
||||
s.dataColumnSubscriber,
|
||||
digest,
|
||||
params.BeaconConfig().DataColumnSidecarSubnetCount,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// PeerDAS is active, data columns sidecar -- subscribe to required subnets.
|
||||
s.subscribeDynamicWithColumnSubnets(
|
||||
p2p.DataColumnSubnetTopicFormat,
|
||||
s.validateDataColumn,
|
||||
s.dataColumnSubscriber,
|
||||
digest,
|
||||
)
|
||||
}
|
||||
|
||||
// registerSubscribers registers subscribers
|
||||
func (s *Service) registerSubscribers(epoch primitives.Epoch, digest [4]byte) {
|
||||
// From genesis
|
||||
s.registerSubscribersFromGenesis(digest)
|
||||
|
||||
// From Altair hard fork
|
||||
if epoch >= params.BeaconConfig().AltairForkEpoch {
|
||||
s.subscribe(
|
||||
p2p.SyncContributionAndProofSubnetTopicFormat,
|
||||
s.validateSyncContributionAndProof,
|
||||
s.syncContributionAndProofSubscriber,
|
||||
digest,
|
||||
)
|
||||
if flags.Get().SubscribeToAllSubnets {
|
||||
s.subscribeStaticWithSyncSubnets(
|
||||
p2p.SyncCommitteeSubnetTopicFormat,
|
||||
s.validateSyncCommitteeMessage, /* validator */
|
||||
s.syncCommitteeMessageSubscriber, /* message handler */
|
||||
digest,
|
||||
)
|
||||
} else {
|
||||
s.subscribeDynamicWithSyncSubnets(
|
||||
p2p.SyncCommitteeSubnetTopicFormat,
|
||||
s.validateSyncCommitteeMessage, /* validator */
|
||||
s.syncCommitteeMessageSubscriber, /* message handler */
|
||||
digest,
|
||||
)
|
||||
}
|
||||
s.registerSubscribersFromAltair(digest)
|
||||
}
|
||||
|
||||
// New Gossip Topic in Capella
|
||||
// From Capella hard fork
|
||||
if epoch >= params.BeaconConfig().CapellaForkEpoch {
|
||||
s.subscribe(
|
||||
p2p.BlsToExecutionChangeSubnetTopicFormat,
|
||||
s.validateBlsToExecutionChange,
|
||||
s.blsToExecutionChangeSubscriber,
|
||||
digest,
|
||||
)
|
||||
s.registerSubscribersFromCapella(digest)
|
||||
}
|
||||
|
||||
// New Gossip Topic in Deneb
|
||||
// From Debeb hard fork
|
||||
if epoch >= params.BeaconConfig().DenebForkEpoch {
|
||||
if coreTime.PeerDASIsActive(slots.UnsafeEpochStart(epoch)) {
|
||||
if flags.Get().SubscribeToAllSubnets {
|
||||
s.subscribeStaticWithSubnets(
|
||||
p2p.DataColumnSubnetTopicFormat,
|
||||
s.validateDataColumn, /* validator */
|
||||
s.dataColumnSubscriber, /* message handler */
|
||||
digest,
|
||||
params.BeaconConfig().DataColumnSidecarSubnetCount,
|
||||
)
|
||||
} else {
|
||||
s.subscribeDynamicWithColumnSubnets(
|
||||
p2p.DataColumnSubnetTopicFormat,
|
||||
s.validateDataColumn, /* validator */
|
||||
s.dataColumnSubscriber, /* message handler */
|
||||
digest,
|
||||
)
|
||||
}
|
||||
} else {
|
||||
s.subscribeStaticWithSubnets(
|
||||
p2p.BlobSubnetTopicFormat,
|
||||
s.validateBlob, /* validator */
|
||||
s.blobSubscriber, /* message handler */
|
||||
digest,
|
||||
params.BeaconConfig().BlobsidecarSubnetCount,
|
||||
)
|
||||
}
|
||||
s.registerSubscribersFromDeneb(epoch, digest)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -191,7 +248,7 @@ func (s *Service) subscribeWithBase(topic string, validator wrappedVal, handle s
|
||||
// Do not resubscribe already seen subscriptions.
|
||||
ok := s.subHandler.topicExists(topic)
|
||||
if ok {
|
||||
log.Debugf("Provided topic already has an active subscription running: %s", topic)
|
||||
log.WithField("topic", topic).Debug("Provided topic already has an active subscription running")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -208,6 +265,7 @@ func (s *Service) subscribeWithBase(topic string, validator wrappedVal, handle s
|
||||
log.WithError(err).Error("Could not subscribe topic")
|
||||
return nil
|
||||
}
|
||||
|
||||
s.subHandler.addTopic(sub.Topic(), sub)
|
||||
|
||||
// Pipeline decodes the incoming subscription data, runs the validation, and handles the
|
||||
@@ -215,6 +273,7 @@ func (s *Service) subscribeWithBase(topic string, validator wrappedVal, handle s
|
||||
pipeline := func(msg *pubsub.Message) {
|
||||
ctx, cancel := context.WithTimeout(s.ctx, pubsubMessageTimeout)
|
||||
defer cancel()
|
||||
|
||||
ctx, span := trace.StartSpan(ctx, "sync.pubsub")
|
||||
defer span.End()
|
||||
|
||||
@@ -332,8 +391,8 @@ func (s *Service) wrapAndReportValidation(topic string, v wrappedVal) (string, p
|
||||
"multiaddress": multiAddr(pid, s.cfg.p2p.Peers()),
|
||||
"peerID": pid.String(),
|
||||
"agent": agentString(pid, s.cfg.p2p.Host()),
|
||||
"gossipScore": s.cfg.p2p.Peers().Scorers().GossipScorer().Score(pid),
|
||||
}).Debugf("Gossip message was ignored")
|
||||
"gossipScore": fmt.Sprintf("%.2f", s.cfg.p2p.Peers().Scorers().GossipScorer().Score(pid)),
|
||||
}).Debug("Gossip message was ignored")
|
||||
}
|
||||
messageIgnoredValidationCounter.WithLabelValues(topic).Inc()
|
||||
}
|
||||
@@ -388,9 +447,7 @@ func (s *Service) subscribeStaticWithSubnets(topic string, validator wrappedVal,
|
||||
}
|
||||
// Check every slot that there are enough peers
|
||||
for i := uint64(0); i < subnetCount; i++ {
|
||||
if !s.validPeersExist(s.addDigestAndIndexToTopic(topic, digest, i)) {
|
||||
log.Debugf("No peers found subscribed to attestation gossip subnet with "+
|
||||
"committee index %d. Searching network for peers subscribed to the subnet.", i)
|
||||
if !s.enoughPeersAreConnected(s.addDigestAndIndexToTopic(topic, digest, i)) {
|
||||
_, err := s.cfg.p2p.FindPeersWithSubnet(
|
||||
s.ctx,
|
||||
s.addDigestAndIndexToTopic(topic, digest, i),
|
||||
@@ -454,10 +511,8 @@ func (s *Service) subscribeDynamicWithSubnets(
|
||||
return
|
||||
}
|
||||
wantedSubs := s.retrievePersistentSubs(currentSlot)
|
||||
// Resize as appropriate.
|
||||
s.reValidateSubscriptions(subscriptions, wantedSubs, topicFormat, digest)
|
||||
|
||||
// subscribe desired aggregator subnets.
|
||||
for _, idx := range wantedSubs {
|
||||
s.subscribeAggregatorSubnet(subscriptions, idx, digest, validate, handle)
|
||||
}
|
||||
@@ -471,9 +526,15 @@ func (s *Service) subscribeDynamicWithSubnets(
|
||||
}()
|
||||
}
|
||||
|
||||
// revalidate that our currently connected subnets are valid.
|
||||
func (s *Service) reValidateSubscriptions(subscriptions map[uint64]*pubsub.Subscription,
|
||||
wantedSubs []uint64, topicFormat string, digest [4]byte) {
|
||||
// reValidateSubscriptions unsubscribe from topics we are currently subscribed to but that are
|
||||
// not in the list of wanted subnets.
|
||||
// TODO: Rename this functions as it does not only revalidate subscriptions.
|
||||
func (s *Service) reValidateSubscriptions(
|
||||
subscriptions map[uint64]*pubsub.Subscription,
|
||||
wantedSubs []uint64,
|
||||
topicFormat string,
|
||||
digest [4]byte,
|
||||
) {
|
||||
for k, v := range subscriptions {
|
||||
var wanted bool
|
||||
for _, idx := range wantedSubs {
|
||||
@@ -482,6 +543,7 @@ func (s *Service) reValidateSubscriptions(subscriptions map[uint64]*pubsub.Subsc
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !wanted && v != nil {
|
||||
v.Cancel()
|
||||
fullTopic := fmt.Sprintf(topicFormat, digest, k) + s.cfg.p2p.Encoding().ProtocolSuffix()
|
||||
@@ -507,35 +569,7 @@ func (s *Service) subscribeAggregatorSubnet(
|
||||
if _, exists := subscriptions[idx]; !exists {
|
||||
subscriptions[idx] = s.subscribeWithBase(subnetTopic, validate, handle)
|
||||
}
|
||||
if !s.validPeersExist(subnetTopic) {
|
||||
log.Debugf("No peers found subscribed to attestation gossip subnet with "+
|
||||
"committee index %d. Searching network for peers subscribed to the subnet.", idx)
|
||||
_, err := s.cfg.p2p.FindPeersWithSubnet(s.ctx, subnetTopic, idx, flags.Get().MinimumPeersPerSubnet)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not search for peers")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// subscribe missing subnets for our sync committee members.
|
||||
func (s *Service) subscribeSyncSubnet(
|
||||
subscriptions map[uint64]*pubsub.Subscription,
|
||||
idx uint64,
|
||||
digest [4]byte,
|
||||
validate wrappedVal,
|
||||
handle subHandler,
|
||||
) {
|
||||
// do not subscribe if we have no peers in the same
|
||||
// subnet
|
||||
topic := p2p.GossipTypeMapping[reflect.TypeOf(ðpb.SyncCommitteeMessage{})]
|
||||
subnetTopic := fmt.Sprintf(topic, digest, idx)
|
||||
// check if subscription exists and if not subscribe the relevant subnet.
|
||||
if _, exists := subscriptions[idx]; !exists {
|
||||
subscriptions[idx] = s.subscribeWithBase(subnetTopic, validate, handle)
|
||||
}
|
||||
if !s.validPeersExist(subnetTopic) {
|
||||
log.Debugf("No peers found subscribed to sync gossip subnet with "+
|
||||
"committee index %d. Searching network for peers subscribed to the subnet.", idx)
|
||||
if !s.enoughPeersAreConnected(subnetTopic) {
|
||||
_, err := s.cfg.p2p.FindPeersWithSubnet(s.ctx, subnetTopic, idx, flags.Get().MinimumPeersPerSubnet)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not search for peers")
|
||||
@@ -588,9 +622,7 @@ func (s *Service) subscribeStaticWithSyncSubnets(topic string, validator wrapped
|
||||
}
|
||||
// Check every slot that there are enough peers
|
||||
for i := uint64(0); i < params.BeaconConfig().SyncCommitteeSubnetCount; i++ {
|
||||
if !s.validPeersExist(s.addDigestAndIndexToTopic(topic, digest, i)) {
|
||||
log.Debugf("No peers found subscribed to sync gossip subnet with "+
|
||||
"committee index %d. Searching network for peers subscribed to the subnet.", i)
|
||||
if !s.enoughPeersAreConnected(s.addDigestAndIndexToTopic(topic, digest, i)) {
|
||||
_, err := s.cfg.p2p.FindPeersWithSubnet(
|
||||
s.ctx,
|
||||
s.addDigestAndIndexToTopic(topic, digest, i),
|
||||
@@ -608,59 +640,138 @@ func (s *Service) subscribeStaticWithSyncSubnets(topic string, validator wrapped
|
||||
}()
|
||||
}
|
||||
|
||||
// subscribe to a dynamically changing list of subnets. This method expects a fmt compatible
|
||||
// string for the topic name and the list of subnets for subscribed topics that should be
|
||||
// maintained.
|
||||
// subscribeToSyncSubnets subscribes to needed sync subnets, unsubscribe from unneeded ones and search for more peers if needed.
|
||||
// Returns `true` if the digest is valid (wrt. the current epoch), `false` otherwise.
|
||||
func (s *Service) subscribeToSyncSubnets(
|
||||
topicFormat string,
|
||||
digest [4]byte,
|
||||
genesisValidatorsRoot [fieldparams.RootLength]byte,
|
||||
genesisTime time.Time,
|
||||
subscriptions map[uint64]*pubsub.Subscription,
|
||||
currentSlot primitives.Slot,
|
||||
validate wrappedVal,
|
||||
handle subHandler,
|
||||
) bool {
|
||||
// Get sync subnets topic.
|
||||
topic := p2p.GossipTypeMapping[reflect.TypeOf(ðpb.SyncCommitteeMessage{})]
|
||||
|
||||
// Do not subscribe if not synced.
|
||||
if s.chainStarted.IsSet() && s.cfg.initialSync.Syncing() {
|
||||
return true
|
||||
}
|
||||
|
||||
// Do not subscribe is the digest is not valid.
|
||||
valid, err := isDigestValid(digest, genesisTime, genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
return true
|
||||
}
|
||||
|
||||
// Unsubscribe from all subnets if the digest is not valid. It's likely to be the case after a hard fork.
|
||||
if !valid {
|
||||
log.WithField("digest", fmt.Sprintf("%#x", digest)).Warn("Sync subnets with this digest are no longer valid, unsubscribing from all of them.")
|
||||
s.reValidateSubscriptions(subscriptions, []uint64{}, topicFormat, digest)
|
||||
return false
|
||||
}
|
||||
|
||||
// Get the current epoch.
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
|
||||
// Retrieve the subnets we want to subscribe to.
|
||||
wantedSubnetsIndex := s.retrieveActiveSyncSubnets(currentEpoch)
|
||||
|
||||
// Remove subscriptions that are no longer wanted.
|
||||
s.reValidateSubscriptions(subscriptions, wantedSubnetsIndex, topicFormat, digest)
|
||||
|
||||
// Subscribe to wanted subnets.
|
||||
for _, subnetIndex := range wantedSubnetsIndex {
|
||||
subnetTopic := fmt.Sprintf(topic, digest, subnetIndex)
|
||||
|
||||
// Check if subscription exists.
|
||||
if _, exists := subscriptions[subnetIndex]; exists {
|
||||
continue
|
||||
}
|
||||
|
||||
// We need to subscribe to the subnet.
|
||||
subscription := s.subscribeWithBase(subnetTopic, validate, handle)
|
||||
subscriptions[subnetIndex] = subscription
|
||||
}
|
||||
|
||||
// Find new peers for wanted subnets if needed.
|
||||
for _, subnetIndex := range wantedSubnetsIndex {
|
||||
subnetTopic := fmt.Sprintf(topic, digest, subnetIndex)
|
||||
|
||||
// Check if we have enough peers in the subnet. Skip if we do.
|
||||
if s.enoughPeersAreConnected(subnetTopic) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Not enough peers in the subnet, we need to search for more.
|
||||
_, err := s.cfg.p2p.FindPeersWithSubnet(s.ctx, subnetTopic, subnetIndex, flags.Get().MinimumPeersPerSubnet)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not search for peers")
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// subscribeDynamicWithSyncSubnets subscribes to a dynamically changing list of subnets.
|
||||
func (s *Service) subscribeDynamicWithSyncSubnets(
|
||||
topicFormat string,
|
||||
validate wrappedVal,
|
||||
handle subHandler,
|
||||
digest [4]byte,
|
||||
) {
|
||||
genRoot := s.cfg.clock.GenesisValidatorsRoot()
|
||||
_, e, err := forks.RetrieveForkDataFromDigest(digest, genRoot[:])
|
||||
// Retrieve the number of committee subnets we need to subscribe to.
|
||||
syncCommiteeSubnetsCount := params.BeaconConfig().SyncCommitteeSubnetCount
|
||||
|
||||
// Initialize the subscriptions map.
|
||||
subscriptions := make(map[uint64]*pubsub.Subscription, syncCommiteeSubnetsCount)
|
||||
|
||||
// Retrieve the genesis validators root.
|
||||
genesisValidatorsRoot := s.cfg.clock.GenesisValidatorsRoot()
|
||||
|
||||
// Retrieve the epoch of the fork corresponding to the digest.
|
||||
_, epoch, err := forks.RetrieveForkDataFromDigest(digest, genesisValidatorsRoot[:])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
base := p2p.GossipTopicMappings(topicFormat, e)
|
||||
|
||||
// Retrieve the base protobuf message.
|
||||
base := p2p.GossipTopicMappings(topicFormat, epoch)
|
||||
if base == nil {
|
||||
panic(fmt.Sprintf("%s is not mapped to any message in GossipTopicMappings", topicFormat))
|
||||
}
|
||||
subscriptions := make(map[uint64]*pubsub.Subscription, params.BeaconConfig().SyncCommitteeSubnetCount)
|
||||
genesis := s.cfg.clock.GenesisTime()
|
||||
ticker := slots.NewSlotTicker(genesis, params.BeaconConfig().SecondsPerSlot)
|
||||
|
||||
// Retrieve the genesis time.
|
||||
genesisTime := s.cfg.clock.GenesisTime()
|
||||
|
||||
// Define a ticker ticking every slot.
|
||||
secondsPerSlot := params.BeaconConfig().SecondsPerSlot
|
||||
ticker := slots.NewSlotTicker(genesisTime, secondsPerSlot)
|
||||
|
||||
// Retrieve the current slot.
|
||||
currentSlot := s.cfg.clock.CurrentSlot()
|
||||
|
||||
// Subscribe to the sync subnets.
|
||||
s.subscribeToSyncSubnets(topicFormat, digest, genesisValidatorsRoot, genesisTime, subscriptions, currentSlot, validate, handle)
|
||||
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
ticker.Done()
|
||||
return
|
||||
case currentSlot := <-ticker.C():
|
||||
if s.chainStarted.IsSet() && s.cfg.initialSync.Syncing() {
|
||||
continue
|
||||
}
|
||||
valid, err := isDigestValid(digest, genesis, genRoot)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
continue
|
||||
}
|
||||
if !valid {
|
||||
log.Warnf("Sync subnets with digest %#x are no longer valid, unsubscribing from all of them.", digest)
|
||||
// Unsubscribes from all our current subnets.
|
||||
s.reValidateSubscriptions(subscriptions, []uint64{}, topicFormat, digest)
|
||||
isDigestValid := s.subscribeToSyncSubnets(topicFormat, digest, genesisValidatorsRoot, genesisTime, subscriptions, currentSlot, validate, handle)
|
||||
|
||||
// Stop the ticker if the digest is not valid. Likely to happen after a hard fork.
|
||||
if !isDigestValid {
|
||||
ticker.Done()
|
||||
return
|
||||
}
|
||||
|
||||
wantedSubs := s.retrieveActiveSyncSubnets(slots.ToEpoch(currentSlot))
|
||||
// Resize as appropriate.
|
||||
s.reValidateSubscriptions(subscriptions, wantedSubs, topicFormat, digest)
|
||||
|
||||
// subscribe desired aggregator subnets.
|
||||
for _, idx := range wantedSubs {
|
||||
s.subscribeSyncSubnet(subscriptions, idx, digest, validate, handle)
|
||||
}
|
||||
case <-s.ctx.Done():
|
||||
ticker.Done()
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
@@ -682,10 +793,11 @@ func (s *Service) subscribeColumnSubnet(
|
||||
if _, exists := subscriptions[idx]; !exists {
|
||||
subscriptions[idx] = s.subscribeWithBase(subnetTopic, validate, handle)
|
||||
}
|
||||
if !s.validPeersExist(subnetTopic) {
|
||||
log.Debugf("No peers found subscribed to column gossip subnet with "+
|
||||
"column index %d. Searching network for peers subscribed to the subnet.", idx)
|
||||
_, err := s.cfg.p2p.FindPeersWithSubnet(s.ctx, subnetTopic, idx, flags.Get().MinimumPeersPerSubnet)
|
||||
|
||||
minimumPeersPerSubnet := flags.Get().MinimumPeersPerSubnet
|
||||
|
||||
if !s.enoughPeersAreConnected(subnetTopic) {
|
||||
_, err := s.cfg.p2p.FindPeersWithSubnet(s.ctx, subnetTopic, idx, minimumPeersPerSubnet)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not search for peers")
|
||||
}
|
||||
@@ -711,6 +823,10 @@ func (s *Service) subscribeDynamicWithColumnSubnets(
|
||||
genesis := s.cfg.clock.GenesisTime()
|
||||
ticker := slots.NewSlotTicker(genesis, params.BeaconConfig().SecondsPerSlot)
|
||||
|
||||
wantedSubs := s.retrieveActiveColumnSubnets()
|
||||
for _, idx := range wantedSubs {
|
||||
s.subscribeWithBase(s.addDigestAndIndexToTopic(topicFormat, digest, idx), validate, handle)
|
||||
}
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
@@ -751,9 +867,7 @@ func (s *Service) subscribeDynamicWithColumnSubnets(
|
||||
func (s *Service) lookupAttesterSubnets(digest [4]byte, idx uint64) {
|
||||
topic := p2p.GossipTypeMapping[reflect.TypeOf(ðpb.Attestation{})]
|
||||
subnetTopic := fmt.Sprintf(topic, digest, idx)
|
||||
if !s.validPeersExist(subnetTopic) {
|
||||
log.Debugf("No peers found subscribed to attestation gossip subnet with "+
|
||||
"committee index %d. Searching network for peers subscribed to the subnet.", idx)
|
||||
if !s.enoughPeersAreConnected(subnetTopic) {
|
||||
// perform a search for peers with the desired committee index.
|
||||
_, err := s.cfg.p2p.FindPeersWithSubnet(s.ctx, subnetTopic, idx, flags.Get().MinimumPeersPerSubnet)
|
||||
if err != nil {
|
||||
@@ -777,10 +891,15 @@ func (s *Service) unSubscribeFromTopic(topic string) {
|
||||
}
|
||||
}
|
||||
|
||||
// find if we have peers who are subscribed to the same subnet
|
||||
func (s *Service) validPeersExist(subnetTopic string) bool {
|
||||
numOfPeers := s.cfg.p2p.PubSub().ListPeers(subnetTopic + s.cfg.p2p.Encoding().ProtocolSuffix())
|
||||
return len(numOfPeers) >= flags.Get().MinimumPeersPerSubnet
|
||||
// enoughPeersAreConnected checks if we have enough peers which are subscribed to the same subnet.
|
||||
func (s *Service) enoughPeersAreConnected(subnetTopic string) bool {
|
||||
topic := subnetTopic + s.cfg.p2p.Encoding().ProtocolSuffix()
|
||||
threshold := flags.Get().MinimumPeersPerSubnet
|
||||
|
||||
peersWithSubnet := s.cfg.p2p.PubSub().ListPeers(topic)
|
||||
peersWithSubnetCount := len(peersWithSubnet)
|
||||
|
||||
return peersWithSubnetCount >= threshold
|
||||
}
|
||||
|
||||
func (s *Service) retrievePersistentSubs(currSlot primitives.Slot) []uint64 {
|
||||
|
||||
@@ -10,9 +10,9 @@ import (
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
// beaconAggregateProofSubscriber forwards the incoming validated aggregated attestation and proof to the
|
||||
// beaconAggregateAndProofSubscriber forwards the incoming validated aggregated attestation and proof to the
|
||||
// attestation pool for processing.
|
||||
func (s *Service) beaconAggregateProofSubscriber(_ context.Context, msg proto.Message) error {
|
||||
func (s *Service) beaconAggregateAndProofSubscriber(_ context.Context, msg proto.Message) error {
|
||||
a, ok := msg.(ethpb.SignedAggregateAttAndProof)
|
||||
if !ok {
|
||||
return fmt.Errorf("message was not type ethpb.SignedAggregateAttAndProof, type=%T", msg)
|
||||
|
||||
@@ -33,7 +33,7 @@ func TestBeaconAggregateProofSubscriber_CanSaveAggregatedAttestation(t *testing.
|
||||
},
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
}
|
||||
require.NoError(t, r.beaconAggregateProofSubscriber(context.Background(), a))
|
||||
require.NoError(t, r.beaconAggregateAndProofSubscriber(context.Background(), a))
|
||||
assert.DeepSSZEqual(t, []ethpb.Att{a.Message.Aggregate}, r.cfg.attPool.AggregatedAttestations(), "Did not save aggregated attestation")
|
||||
}
|
||||
|
||||
@@ -55,7 +55,7 @@ func TestBeaconAggregateProofSubscriber_CanSaveUnaggregatedAttestation(t *testin
|
||||
AggregatorIndex: 100,
|
||||
},
|
||||
}
|
||||
require.NoError(t, r.beaconAggregateProofSubscriber(context.Background(), a))
|
||||
require.NoError(t, r.beaconAggregateAndProofSubscriber(context.Background(), a))
|
||||
|
||||
atts, err := r.cfg.attPool.UnaggregatedAttestations()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func (s *Service) committeeIndexBeaconAttestationSubscriber(_ context.Context, msg proto.Message) error {
|
||||
func (s *Service) beaconAttestationSubscriber(_ context.Context, msg proto.Message) error {
|
||||
a, ok := msg.(eth.Att)
|
||||
if !ok {
|
||||
return fmt.Errorf("message was not type eth.Att, type=%T", msg)
|
||||
|
||||
@@ -9,10 +9,10 @@ import (
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
// syncContributionAndProofSubscriber forwards the incoming validated sync contributions and proof to the
|
||||
// syncCommitteeContributionAndProofSubscriber forwards the incoming validated sync contributions and proof to the
|
||||
// contribution pool for processing.
|
||||
// skipcq: SCC-U1000
|
||||
func (s *Service) syncContributionAndProofSubscriber(_ context.Context, msg proto.Message) error {
|
||||
func (s *Service) syncCommitteeContributionAndProofSubscriber(_ context.Context, msg proto.Message) error {
|
||||
sContr, ok := msg.(*ethpb.SignedContributionAndProof)
|
||||
if !ok {
|
||||
return fmt.Errorf("message was not type *ethpb.SignedContributionAndProof, type=%T", msg)
|
||||
|
||||
@@ -350,7 +350,7 @@ func Test_wrapAndReportValidation(t *testing.T) {
|
||||
}
|
||||
fd, err := forks.CreateForkDigest(mChain.GenesisTime(), mChain.ValidatorsRoot[:])
|
||||
assert.NoError(t, err)
|
||||
mockTopic := fmt.Sprintf(p2p.BlockSubnetTopicFormat, fd) + encoder.SszNetworkEncoder{}.ProtocolSuffix()
|
||||
mockTopic := fmt.Sprintf(p2p.BeaconBlockSubnetTopicFormat, fd) + encoder.SszNetworkEncoder{}.ProtocolSuffix()
|
||||
type args struct {
|
||||
topic string
|
||||
v wrappedVal
|
||||
|
||||
@@ -23,7 +23,7 @@ func TestSubTopicHandler_CRUD(t *testing.T) {
|
||||
enc := encoder.SszNetworkEncoder{}
|
||||
|
||||
// Valid topic added in.
|
||||
topic := fmt.Sprintf(p2p.BlockSubnetTopicFormat, digest) + enc.ProtocolSuffix()
|
||||
topic := fmt.Sprintf(p2p.BeaconBlockSubnetTopicFormat, digest) + enc.ProtocolSuffix()
|
||||
h.addTopic(topic, new(pubsub.Subscription))
|
||||
assert.Equal(t, true, h.topicExists(topic))
|
||||
assert.Equal(t, true, h.digestExists(digest))
|
||||
@@ -36,11 +36,11 @@ func TestSubTopicHandler_CRUD(t *testing.T) {
|
||||
|
||||
h = newSubTopicHandler()
|
||||
// Multiple Topics added in.
|
||||
topic = fmt.Sprintf(p2p.BlockSubnetTopicFormat, digest) + enc.ProtocolSuffix()
|
||||
topic = fmt.Sprintf(p2p.BeaconBlockSubnetTopicFormat, digest) + enc.ProtocolSuffix()
|
||||
h.addTopic(topic, new(pubsub.Subscription))
|
||||
assert.Equal(t, true, h.topicExists(topic))
|
||||
|
||||
topic = fmt.Sprintf(p2p.ExitSubnetTopicFormat, digest) + enc.ProtocolSuffix()
|
||||
topic = fmt.Sprintf(p2p.VoluntaryExitSubnetTopicFormat, digest) + enc.ProtocolSuffix()
|
||||
h.addTopic(topic, new(pubsub.Subscription))
|
||||
assert.Equal(t, true, h.topicExists(topic))
|
||||
|
||||
@@ -52,11 +52,11 @@ func TestSubTopicHandler_CRUD(t *testing.T) {
|
||||
h.addTopic(topic, new(pubsub.Subscription))
|
||||
assert.Equal(t, true, h.topicExists(topic))
|
||||
|
||||
topic = fmt.Sprintf(p2p.AggregateAndProofSubnetTopicFormat, digest) + enc.ProtocolSuffix()
|
||||
topic = fmt.Sprintf(p2p.BeaconAggregateAndProofSubnetTopicFormat, digest) + enc.ProtocolSuffix()
|
||||
h.addTopic(topic, new(pubsub.Subscription))
|
||||
assert.Equal(t, true, h.topicExists(topic))
|
||||
|
||||
topic = fmt.Sprintf(p2p.SyncContributionAndProofSubnetTopicFormat, digest) + enc.ProtocolSuffix()
|
||||
topic = fmt.Sprintf(p2p.SyncCommitteeContributionAndProofSubnetTopicFormat, digest) + enc.ProtocolSuffix()
|
||||
h.addTopic(topic, new(pubsub.Subscription))
|
||||
assert.Equal(t, true, h.topicExists(topic))
|
||||
|
||||
@@ -71,7 +71,7 @@ func TestSubTopicHandler_CRUD(t *testing.T) {
|
||||
h.removeTopic(topic)
|
||||
assert.Equal(t, false, h.topicExists(topic))
|
||||
|
||||
topic = fmt.Sprintf(p2p.ExitSubnetTopicFormat, digest) + enc.ProtocolSuffix()
|
||||
topic = fmt.Sprintf(p2p.VoluntaryExitSubnetTopicFormat, digest) + enc.ProtocolSuffix()
|
||||
h.removeTopic(topic)
|
||||
assert.Equal(t, false, h.topicExists(topic))
|
||||
|
||||
@@ -83,15 +83,15 @@ func TestSubTopicHandler_CRUD(t *testing.T) {
|
||||
assert.Equal(t, 4, len(h.allTopics()))
|
||||
|
||||
// Remove remaining topics.
|
||||
topic = fmt.Sprintf(p2p.BlockSubnetTopicFormat, digest) + enc.ProtocolSuffix()
|
||||
topic = fmt.Sprintf(p2p.BeaconBlockSubnetTopicFormat, digest) + enc.ProtocolSuffix()
|
||||
h.removeTopic(topic)
|
||||
assert.Equal(t, false, h.topicExists(topic))
|
||||
|
||||
topic = fmt.Sprintf(p2p.AggregateAndProofSubnetTopicFormat, digest) + enc.ProtocolSuffix()
|
||||
topic = fmt.Sprintf(p2p.BeaconAggregateAndProofSubnetTopicFormat, digest) + enc.ProtocolSuffix()
|
||||
h.removeTopic(topic)
|
||||
assert.Equal(t, false, h.topicExists(topic))
|
||||
|
||||
topic = fmt.Sprintf(p2p.SyncContributionAndProofSubnetTopicFormat, digest) + enc.ProtocolSuffix()
|
||||
topic = fmt.Sprintf(p2p.SyncCommitteeContributionAndProofSubnetTopicFormat, digest) + enc.ProtocolSuffix()
|
||||
h.removeTopic(topic)
|
||||
assert.Equal(t, false, h.topicExists(topic))
|
||||
|
||||
|
||||
@@ -108,7 +108,7 @@ func FuzzValidateBeaconBlockPubSub_Phase0(f *testing.F) {
|
||||
Topic: &strTop,
|
||||
},
|
||||
}
|
||||
_, err := r.validateBeaconBlockPubSub(ctx, peer.ID(pid), msg)
|
||||
_, err := r.validateBeaconBlockPubSubMsg(ctx, peer.ID(pid), msg)
|
||||
_ = err
|
||||
})
|
||||
}
|
||||
@@ -190,7 +190,7 @@ func FuzzValidateBeaconBlockPubSub_Altair(f *testing.F) {
|
||||
Topic: &strTop,
|
||||
},
|
||||
}
|
||||
_, err := r.validateBeaconBlockPubSub(ctx, peer.ID(pid), msg)
|
||||
_, err := r.validateBeaconBlockPubSubMsg(ctx, peer.ID(pid), msg)
|
||||
_ = err
|
||||
})
|
||||
}
|
||||
@@ -272,7 +272,7 @@ func FuzzValidateBeaconBlockPubSub_Bellatrix(f *testing.F) {
|
||||
Topic: &strTop,
|
||||
},
|
||||
}
|
||||
_, err := r.validateBeaconBlockPubSub(ctx, peer.ID(pid), msg)
|
||||
_, err := r.validateBeaconBlockPubSubMsg(ctx, peer.ID(pid), msg)
|
||||
_ = err
|
||||
})
|
||||
}
|
||||
|
||||
@@ -26,9 +26,9 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
// validateAggregateAndProof verifies the aggregated signature and the selection proof is valid before forwarding to the
|
||||
// validateBeaconAggregateAndProofPubSubMsg verifies the aggregated signature and the selection proof is valid before forwarding to the
|
||||
// network and downstream services.
|
||||
func (s *Service) validateAggregateAndProof(ctx context.Context, pid peer.ID, msg *pubsub.Message) (pubsub.ValidationResult, error) {
|
||||
func (s *Service) validateBeaconAggregateAndProofPubSubMsg(ctx context.Context, pid peer.ID, msg *pubsub.Message) (pubsub.ValidationResult, error) {
|
||||
receivedTime := prysmTime.Now()
|
||||
if pid == s.cfg.p2p.PeerID() {
|
||||
return pubsub.ValidationAccept, nil
|
||||
|
||||
@@ -185,7 +185,7 @@ func TestValidateAggregateAndProof_NoBlock(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
if res, err := r.validateAggregateAndProof(context.Background(), "", msg); res == pubsub.ValidationAccept {
|
||||
if res, err := r.validateBeaconAggregateAndProofPubSubMsg(context.Background(), "", msg); res == pubsub.ValidationAccept {
|
||||
_ = err
|
||||
t.Error("Expected validate to fail")
|
||||
}
|
||||
@@ -255,7 +255,7 @@ func TestValidateAggregateAndProof_NotWithinSlotRange(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
if res, err := r.validateAggregateAndProof(context.Background(), "", msg); res == pubsub.ValidationAccept {
|
||||
if res, err := r.validateBeaconAggregateAndProofPubSubMsg(context.Background(), "", msg); res == pubsub.ValidationAccept {
|
||||
_ = err
|
||||
t.Error("Expected validate to fail")
|
||||
}
|
||||
@@ -272,7 +272,7 @@ func TestValidateAggregateAndProof_NotWithinSlotRange(t *testing.T) {
|
||||
Topic: &topic,
|
||||
},
|
||||
}
|
||||
if res, err := r.validateAggregateAndProof(context.Background(), "", msg); res == pubsub.ValidationAccept {
|
||||
if res, err := r.validateBeaconAggregateAndProofPubSubMsg(context.Background(), "", msg); res == pubsub.ValidationAccept {
|
||||
_ = err
|
||||
t.Error("Expected validate to fail")
|
||||
}
|
||||
@@ -338,7 +338,7 @@ func TestValidateAggregateAndProof_ExistedInPool(t *testing.T) {
|
||||
}
|
||||
|
||||
require.NoError(t, r.cfg.attPool.SaveBlockAttestation(att))
|
||||
if res, err := r.validateAggregateAndProof(context.Background(), "", msg); res == pubsub.ValidationAccept {
|
||||
if res, err := r.validateBeaconAggregateAndProofPubSubMsg(context.Background(), "", msg); res == pubsub.ValidationAccept {
|
||||
_ = err
|
||||
t.Error("Expected validate to fail")
|
||||
}
|
||||
@@ -442,7 +442,7 @@ func TestValidateAggregateAndProof_CanValidate(t *testing.T) {
|
||||
Topic: &topic,
|
||||
},
|
||||
}
|
||||
res, err := r.validateAggregateAndProof(context.Background(), "", msg)
|
||||
res, err := r.validateBeaconAggregateAndProofPubSubMsg(context.Background(), "", msg)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, pubsub.ValidationAccept, res, "Validated status is false")
|
||||
assert.NotNil(t, msg.ValidatorData, "Did not set validator data")
|
||||
@@ -545,7 +545,7 @@ func TestVerifyIndexInCommittee_SeenAggregatorEpoch(t *testing.T) {
|
||||
Topic: &topic,
|
||||
},
|
||||
}
|
||||
res, err := r.validateAggregateAndProof(context.Background(), "", msg)
|
||||
res, err := r.validateBeaconAggregateAndProofPubSubMsg(context.Background(), "", msg)
|
||||
assert.NoError(t, err)
|
||||
require.Equal(t, pubsub.ValidationAccept, res, "Validated status is false")
|
||||
|
||||
@@ -562,7 +562,7 @@ func TestVerifyIndexInCommittee_SeenAggregatorEpoch(t *testing.T) {
|
||||
}
|
||||
|
||||
time.Sleep(10 * time.Millisecond) // Wait for cached value to pass through buffers.
|
||||
if res, err := r.validateAggregateAndProof(context.Background(), "", msg); res == pubsub.ValidationAccept {
|
||||
if res, err := r.validateBeaconAggregateAndProofPubSubMsg(context.Background(), "", msg); res == pubsub.ValidationAccept {
|
||||
_ = err
|
||||
t.Fatal("Validated status is true")
|
||||
}
|
||||
@@ -654,7 +654,7 @@ func TestValidateAggregateAndProof_BadBlock(t *testing.T) {
|
||||
Topic: &topic,
|
||||
},
|
||||
}
|
||||
res, err := r.validateAggregateAndProof(context.Background(), "", msg)
|
||||
res, err := r.validateBeaconAggregateAndProofPubSubMsg(context.Background(), "", msg)
|
||||
assert.NotNil(t, err)
|
||||
assert.Equal(t, pubsub.ValidationReject, res, "Validated status is true")
|
||||
}
|
||||
@@ -744,7 +744,7 @@ func TestValidateAggregateAndProof_RejectWhenAttEpochDoesntEqualTargetEpoch(t *t
|
||||
Topic: &topic,
|
||||
},
|
||||
}
|
||||
res, err := r.validateAggregateAndProof(context.Background(), "", msg)
|
||||
res, err := r.validateBeaconAggregateAndProofPubSubMsg(context.Background(), "", msg)
|
||||
assert.NotNil(t, err)
|
||||
assert.Equal(t, pubsub.ValidationReject, res)
|
||||
}
|
||||
|
||||
@@ -20,7 +20,7 @@ import (
|
||||
|
||||
// Clients who receive an attester slashing on this topic MUST validate the conditions within VerifyAttesterSlashing before
|
||||
// forwarding it across the network.
|
||||
func (s *Service) validateAttesterSlashing(ctx context.Context, pid peer.ID, msg *pubsub.Message) (pubsub.ValidationResult, error) {
|
||||
func (s *Service) validateAttesterSlashingPubSubMsg(ctx context.Context, pid peer.ID, msg *pubsub.Message) (pubsub.ValidationResult, error) {
|
||||
// Validation runs on publish (not just subscriptions), so we should approve any message from
|
||||
// ourselves.
|
||||
if pid == s.cfg.p2p.PeerID() {
|
||||
|
||||
@@ -108,7 +108,7 @@ func TestValidateAttesterSlashing_ValidSlashing(t *testing.T) {
|
||||
Topic: &topic,
|
||||
},
|
||||
}
|
||||
res, err := r.validateAttesterSlashing(ctx, "foobar", msg)
|
||||
res, err := r.validateAttesterSlashingPubSubMsg(ctx, "foobar", msg)
|
||||
assert.NoError(t, err)
|
||||
valid := res == pubsub.ValidationAccept
|
||||
|
||||
@@ -153,7 +153,7 @@ func TestValidateAttesterSlashing_ValidOldSlashing(t *testing.T) {
|
||||
Topic: &topic,
|
||||
},
|
||||
}
|
||||
res, err := r.validateAttesterSlashing(ctx, "foobar", msg)
|
||||
res, err := r.validateAttesterSlashingPubSubMsg(ctx, "foobar", msg)
|
||||
assert.ErrorContains(t, "validators were previously slashed", err)
|
||||
valid := res == pubsub.ValidationIgnore
|
||||
|
||||
@@ -198,7 +198,7 @@ func TestValidateAttesterSlashing_InvalidSlashing_WithdrawableEpoch(t *testing.T
|
||||
Topic: &topic,
|
||||
},
|
||||
}
|
||||
res, err := r.validateAttesterSlashing(ctx, "foobar", msg)
|
||||
res, err := r.validateAttesterSlashingPubSubMsg(ctx, "foobar", msg)
|
||||
assert.NoError(t, err)
|
||||
valid := res == pubsub.ValidationAccept
|
||||
|
||||
@@ -211,7 +211,7 @@ func TestValidateAttesterSlashing_InvalidSlashing_WithdrawableEpoch(t *testing.T
|
||||
}
|
||||
|
||||
require.NoError(t, s.SetValidators(vals))
|
||||
res, err = r.validateAttesterSlashing(ctx, "foobar", msg)
|
||||
res, err = r.validateAttesterSlashingPubSubMsg(ctx, "foobar", msg)
|
||||
assert.ErrorContains(t, "none of the validators are slashable", err)
|
||||
invalid := res == pubsub.ValidationReject
|
||||
|
||||
@@ -257,7 +257,7 @@ func TestValidateAttesterSlashing_CanFilter(t *testing.T) {
|
||||
Topic: &topic,
|
||||
},
|
||||
}
|
||||
res, err := r.validateAttesterSlashing(ctx, "foobar", msg)
|
||||
res, err := r.validateAttesterSlashingPubSubMsg(ctx, "foobar", msg)
|
||||
_ = err
|
||||
ignored := res == pubsub.ValidationIgnore
|
||||
assert.Equal(t, true, ignored)
|
||||
@@ -278,7 +278,7 @@ func TestValidateAttesterSlashing_CanFilter(t *testing.T) {
|
||||
Topic: &topic,
|
||||
},
|
||||
}
|
||||
res, err = r.validateAttesterSlashing(ctx, "foobar", msg)
|
||||
res, err = r.validateAttesterSlashingPubSubMsg(ctx, "foobar", msg)
|
||||
_ = err
|
||||
ignored = res == pubsub.ValidationIgnore
|
||||
assert.Equal(t, true, ignored)
|
||||
@@ -315,7 +315,7 @@ func TestValidateAttesterSlashing_ContextTimeout(t *testing.T) {
|
||||
Topic: &topic,
|
||||
},
|
||||
}
|
||||
res, err := r.validateAttesterSlashing(ctx, "foobar", msg)
|
||||
res, err := r.validateAttesterSlashingPubSubMsg(ctx, "foobar", msg)
|
||||
_ = err
|
||||
valid := res == pubsub.ValidationAccept
|
||||
assert.Equal(t, false, valid, "slashing from the far distant future should have timed out and returned false")
|
||||
@@ -346,7 +346,7 @@ func TestValidateAttesterSlashing_Syncing(t *testing.T) {
|
||||
Topic: &topic,
|
||||
},
|
||||
}
|
||||
res, err := r.validateAttesterSlashing(ctx, "foobar", msg)
|
||||
res, err := r.validateAttesterSlashingPubSubMsg(ctx, "foobar", msg)
|
||||
_ = err
|
||||
valid := res == pubsub.ValidationAccept
|
||||
assert.Equal(t, false, valid, "Passed validation")
|
||||
|
||||
@@ -35,7 +35,7 @@ import (
|
||||
// - The attestation is unaggregated -- that is, it has exactly one participating validator (len(get_attesting_indices(state, attestation.data, attestation.aggregation_bits)) == 1).
|
||||
// - attestation.data.slot is within the last ATTESTATION_PROPAGATION_SLOT_RANGE slots (attestation.data.slot + ATTESTATION_PROPAGATION_SLOT_RANGE >= current_slot >= attestation.data.slot).
|
||||
// - The signature of attestation is valid.
|
||||
func (s *Service) validateCommitteeIndexBeaconAttestation(ctx context.Context, pid peer.ID, msg *pubsub.Message) (pubsub.ValidationResult, error) {
|
||||
func (s *Service) validateBeaconAttestationPubSubMsg(ctx context.Context, pid peer.ID, msg *pubsub.Message) (pubsub.ValidationResult, error) {
|
||||
if pid == s.cfg.p2p.PeerID() {
|
||||
return pubsub.ValidationAccept, nil
|
||||
}
|
||||
|
||||
@@ -290,7 +290,7 @@ func TestService_validateCommitteeIndexBeaconAttestation(t *testing.T) {
|
||||
m.Message.Topic = nil
|
||||
}
|
||||
|
||||
res, err := s.validateCommitteeIndexBeaconAttestation(ctx, "", m)
|
||||
res, err := s.validateBeaconAttestationPubSubMsg(ctx, "", m)
|
||||
received := res == pubsub.ValidationAccept
|
||||
if received != tt.want {
|
||||
t.Fatalf("Did not received wanted validation. Got %v, wanted %v", !tt.want, tt.want)
|
||||
|
||||
@@ -35,10 +35,10 @@ var (
|
||||
errRejectCommitmentLen = errors.New("[REJECT] The length of KZG commitments is less than or equal to the limitation defined in Consensus Layer")
|
||||
)
|
||||
|
||||
// validateBeaconBlockPubSub checks that the incoming block has a valid BLS signature.
|
||||
// validateBeaconBlockPubSubMsg checks that the incoming block has a valid BLS signature.
|
||||
// Blocks that have already been seen are ignored. If the BLS signature is any valid signature,
|
||||
// this method rebroadcasts the message.
|
||||
func (s *Service) validateBeaconBlockPubSub(ctx context.Context, pid peer.ID, msg *pubsub.Message) (pubsub.ValidationResult, error) {
|
||||
func (s *Service) validateBeaconBlockPubSubMsg(ctx context.Context, pid peer.ID, msg *pubsub.Message) (pubsub.ValidationResult, error) {
|
||||
receivedTime := prysmTime.Now()
|
||||
// Validation runs on publish (not just subscriptions), so we should approve any message from
|
||||
// ourselves.
|
||||
|
||||
@@ -101,7 +101,7 @@ func TestValidateBeaconBlockPubSub_InvalidSignature(t *testing.T) {
|
||||
Topic: &topic,
|
||||
},
|
||||
}
|
||||
res, err := r.validateBeaconBlockPubSub(ctx, "", m)
|
||||
res, err := r.validateBeaconBlockPubSubMsg(ctx, "", m)
|
||||
require.ErrorIs(t, err, signing.ErrSigFailedToVerify)
|
||||
result := res == pubsub.ValidationReject
|
||||
assert.Equal(t, true, result)
|
||||
@@ -145,7 +145,7 @@ func TestValidateBeaconBlockPubSub_BlockAlreadyPresentInDB(t *testing.T) {
|
||||
Topic: &topic,
|
||||
},
|
||||
}
|
||||
res, err := r.validateBeaconBlockPubSub(ctx, "", m)
|
||||
res, err := r.validateBeaconBlockPubSubMsg(ctx, "", m)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, res, pubsub.ValidationIgnore, "block present in DB should be ignored")
|
||||
}
|
||||
@@ -208,7 +208,7 @@ func TestValidateBeaconBlockPubSub_CanRecoverStateSummary(t *testing.T) {
|
||||
Topic: &topic,
|
||||
},
|
||||
}
|
||||
res, err := r.validateBeaconBlockPubSub(ctx, "", m)
|
||||
res, err := r.validateBeaconBlockPubSubMsg(ctx, "", m)
|
||||
assert.NoError(t, err)
|
||||
result := res == pubsub.ValidationAccept
|
||||
assert.Equal(t, true, result)
|
||||
@@ -274,7 +274,7 @@ func TestValidateBeaconBlockPubSub_IsInCache(t *testing.T) {
|
||||
Topic: &topic,
|
||||
},
|
||||
}
|
||||
res, err := r.validateBeaconBlockPubSub(ctx, "", m)
|
||||
res, err := r.validateBeaconBlockPubSubMsg(ctx, "", m)
|
||||
assert.NoError(t, err)
|
||||
result := res == pubsub.ValidationAccept
|
||||
assert.Equal(t, true, result)
|
||||
@@ -340,7 +340,7 @@ func TestValidateBeaconBlockPubSub_ValidProposerSignature(t *testing.T) {
|
||||
Topic: &topic,
|
||||
},
|
||||
}
|
||||
res, err := r.validateBeaconBlockPubSub(ctx, "", m)
|
||||
res, err := r.validateBeaconBlockPubSubMsg(ctx, "", m)
|
||||
assert.NoError(t, err)
|
||||
result := res == pubsub.ValidationAccept
|
||||
assert.Equal(t, true, result)
|
||||
@@ -409,7 +409,7 @@ func TestValidateBeaconBlockPubSub_WithLookahead(t *testing.T) {
|
||||
Topic: &topic,
|
||||
},
|
||||
}
|
||||
res, err := r.validateBeaconBlockPubSub(ctx, "", m)
|
||||
res, err := r.validateBeaconBlockPubSubMsg(ctx, "", m)
|
||||
assert.NoError(t, err)
|
||||
result := res == pubsub.ValidationAccept
|
||||
assert.Equal(t, true, result)
|
||||
@@ -477,7 +477,7 @@ func TestValidateBeaconBlockPubSub_AdvanceEpochsForState(t *testing.T) {
|
||||
Topic: &topic,
|
||||
},
|
||||
}
|
||||
res, err := r.validateBeaconBlockPubSub(ctx, "", m)
|
||||
res, err := r.validateBeaconBlockPubSubMsg(ctx, "", m)
|
||||
assert.NoError(t, err)
|
||||
result := res == pubsub.ValidationAccept
|
||||
assert.Equal(t, true, result)
|
||||
@@ -520,7 +520,7 @@ func TestValidateBeaconBlockPubSub_Syncing(t *testing.T) {
|
||||
Topic: &topic,
|
||||
},
|
||||
}
|
||||
res, err := r.validateBeaconBlockPubSub(ctx, "", m)
|
||||
res, err := r.validateBeaconBlockPubSubMsg(ctx, "", m)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, res, pubsub.ValidationIgnore, "block is ignored until fully synced")
|
||||
}
|
||||
@@ -586,7 +586,7 @@ func TestValidateBeaconBlockPubSub_IgnoreAndQueueBlocksFromNearFuture(t *testing
|
||||
Topic: &topic,
|
||||
},
|
||||
}
|
||||
res, err := r.validateBeaconBlockPubSub(ctx, "", m)
|
||||
res, err := r.validateBeaconBlockPubSubMsg(ctx, "", m)
|
||||
require.ErrorContains(t, "early block, with current slot", err)
|
||||
assert.Equal(t, res, pubsub.ValidationIgnore, "early block should be ignored and queued")
|
||||
|
||||
@@ -637,7 +637,7 @@ func TestValidateBeaconBlockPubSub_RejectBlocksFromFuture(t *testing.T) {
|
||||
Topic: &topic,
|
||||
},
|
||||
}
|
||||
res, err := r.validateBeaconBlockPubSub(ctx, "", m)
|
||||
res, err := r.validateBeaconBlockPubSubMsg(ctx, "", m)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, res, pubsub.ValidationIgnore, "block from the future should be ignored")
|
||||
}
|
||||
@@ -688,7 +688,7 @@ func TestValidateBeaconBlockPubSub_RejectBlocksFromThePast(t *testing.T) {
|
||||
Topic: &topic,
|
||||
},
|
||||
}
|
||||
res, err := r.validateBeaconBlockPubSub(ctx, "", m)
|
||||
res, err := r.validateBeaconBlockPubSubMsg(ctx, "", m)
|
||||
require.ErrorContains(t, "greater or equal to block slot", err)
|
||||
assert.Equal(t, res, pubsub.ValidationIgnore, "block from the past should be ignored")
|
||||
}
|
||||
@@ -750,7 +750,7 @@ func TestValidateBeaconBlockPubSub_SeenProposerSlot(t *testing.T) {
|
||||
}
|
||||
r.setSeenBlockIndexSlot(msg.Block.Slot, msg.Block.ProposerIndex)
|
||||
time.Sleep(10 * time.Millisecond) // Wait for cached value to pass through buffers.
|
||||
res, err := r.validateBeaconBlockPubSub(ctx, "", m)
|
||||
res, err := r.validateBeaconBlockPubSubMsg(ctx, "", m)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, res, pubsub.ValidationIgnore, "seen proposer block should be ignored")
|
||||
}
|
||||
@@ -801,7 +801,7 @@ func TestValidateBeaconBlockPubSub_FilterByFinalizedEpoch(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
res, err := r.validateBeaconBlockPubSub(context.Background(), "", m)
|
||||
res, err := r.validateBeaconBlockPubSubMsg(context.Background(), "", m)
|
||||
_ = err
|
||||
assert.Equal(t, pubsub.ValidationIgnore, res)
|
||||
|
||||
@@ -817,7 +817,7 @@ func TestValidateBeaconBlockPubSub_FilterByFinalizedEpoch(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
res, err = r.validateBeaconBlockPubSub(context.Background(), "", m)
|
||||
res, err = r.validateBeaconBlockPubSubMsg(context.Background(), "", m)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, pubsub.ValidationIgnore, res)
|
||||
}
|
||||
@@ -884,7 +884,7 @@ func TestValidateBeaconBlockPubSub_ParentNotFinalizedDescendant(t *testing.T) {
|
||||
Topic: &topic,
|
||||
},
|
||||
}
|
||||
res, err := r.validateBeaconBlockPubSub(ctx, "", m)
|
||||
res, err := r.validateBeaconBlockPubSubMsg(ctx, "", m)
|
||||
assert.Equal(t, pubsub.ValidationReject, res, "Wrong validation result returned")
|
||||
require.ErrorContains(t, "not descendant of finalized checkpoint", err)
|
||||
}
|
||||
@@ -950,7 +950,7 @@ func TestValidateBeaconBlockPubSub_InvalidParentBlock(t *testing.T) {
|
||||
Topic: &topic,
|
||||
},
|
||||
}
|
||||
res, err := r.validateBeaconBlockPubSub(ctx, "", m)
|
||||
res, err := r.validateBeaconBlockPubSubMsg(ctx, "", m)
|
||||
require.ErrorContains(t, "could not unmarshal bytes into signature", err)
|
||||
assert.Equal(t, res, pubsub.ValidationReject, "block with invalid signature should be rejected")
|
||||
|
||||
@@ -982,7 +982,7 @@ func TestValidateBeaconBlockPubSub_InvalidParentBlock(t *testing.T) {
|
||||
r.cfg.chain = chainService
|
||||
r.cfg.clock = startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot)
|
||||
|
||||
res, err = r.validateBeaconBlockPubSub(ctx, "", m)
|
||||
res, err = r.validateBeaconBlockPubSubMsg(ctx, "", m)
|
||||
require.ErrorContains(t, "has an invalid parent", err)
|
||||
// Expect block with bad parent to fail too
|
||||
assert.Equal(t, res, pubsub.ValidationReject, "block with invalid parent should be ignored")
|
||||
@@ -1044,7 +1044,7 @@ func TestValidateBeaconBlockPubSub_InsertValidPendingBlock(t *testing.T) {
|
||||
Topic: &topic,
|
||||
},
|
||||
}
|
||||
res, err := r.validateBeaconBlockPubSub(ctx, "", m)
|
||||
res, err := r.validateBeaconBlockPubSubMsg(ctx, "", m)
|
||||
require.ErrorContains(t, "unknown parent for block", err)
|
||||
assert.Equal(t, res, pubsub.ValidationIgnore, "block with unknown parent should be ignored")
|
||||
bRoot, err = msg.Block.HashTreeRoot()
|
||||
@@ -1129,7 +1129,7 @@ func TestValidateBeaconBlockPubSub_RejectBlocksFromBadParent(t *testing.T) {
|
||||
Topic: &topic,
|
||||
},
|
||||
}
|
||||
res, err := r.validateBeaconBlockPubSub(ctx, "", m)
|
||||
res, err := r.validateBeaconBlockPubSubMsg(ctx, "", m)
|
||||
assert.ErrorContains(t, "invalid parent", err)
|
||||
assert.Equal(t, res, pubsub.ValidationReject)
|
||||
}
|
||||
@@ -1230,7 +1230,7 @@ func TestValidateBeaconBlockPubSub_ValidExecutionPayload(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
res, err := r.validateBeaconBlockPubSub(ctx, "", m)
|
||||
res, err := r.validateBeaconBlockPubSubMsg(ctx, "", m)
|
||||
require.NoError(t, err)
|
||||
result := res == pubsub.ValidationAccept
|
||||
require.Equal(t, true, result)
|
||||
@@ -1301,7 +1301,7 @@ func TestValidateBeaconBlockPubSub_InvalidPayloadTimestamp(t *testing.T) {
|
||||
Topic: &topic,
|
||||
},
|
||||
}
|
||||
res, err := r.validateBeaconBlockPubSub(ctx, "", m)
|
||||
res, err := r.validateBeaconBlockPubSubMsg(ctx, "", m)
|
||||
require.NotNil(t, err)
|
||||
result := res == pubsub.ValidationReject
|
||||
assert.Equal(t, true, result)
|
||||
@@ -1462,7 +1462,7 @@ func Test_validateBeaconBlockProcessingWhenParentIsOptimistic(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
res, err := r.validateBeaconBlockPubSub(ctx, "", m)
|
||||
res, err := r.validateBeaconBlockPubSubMsg(ctx, "", m)
|
||||
require.NoError(t, err)
|
||||
result := res == pubsub.ValidationAccept
|
||||
assert.Equal(t, true, result)
|
||||
|
||||
@@ -169,16 +169,6 @@ func blobFields(b blocks.ROBlob) logrus.Fields {
|
||||
}
|
||||
}
|
||||
|
||||
func columnFields(b blocks.RODataColumn) logrus.Fields {
|
||||
return logrus.Fields{
|
||||
"slot": b.Slot(),
|
||||
"proposerIndex": b.ProposerIndex(),
|
||||
"blockRoot": fmt.Sprintf("%#x", b.BlockRoot()),
|
||||
"kzgCommitments": fmt.Sprintf("%#x", b.KzgCommitments),
|
||||
"columnIndex": b.ColumnIndex,
|
||||
}
|
||||
}
|
||||
|
||||
func computeSubnetForBlobSidecar(index uint64) uint64 {
|
||||
return index % params.BeaconConfig().BlobsidecarSubnetCount
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/rand"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/logging"
|
||||
prysmTime "github.com/prysmaticlabs/prysm/v5/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
@@ -51,13 +52,15 @@ func (s *Service) validateDataColumn(ctx context.Context, pid peer.ID, msg *pubs
|
||||
log.WithField("message", m).Error("Message is not of type *eth.DataColumnSidecar")
|
||||
return pubsub.ValidationReject, errWrongMessage
|
||||
}
|
||||
|
||||
ds, err := blocks.NewRODataColumn(dspb)
|
||||
if err != nil {
|
||||
return pubsub.ValidationReject, errors.Wrap(err, "roDataColumn conversion failure")
|
||||
}
|
||||
vf := s.newColumnVerifier(ds, verification.GossipColumnSidecarRequirements)
|
||||
|
||||
if err := vf.DataColumnIndexInBounds(); err != nil {
|
||||
verifier := s.newColumnVerifier(ds, verification.GossipColumnSidecarRequirements)
|
||||
|
||||
if err := verifier.DataColumnIndexInBounds(); err != nil {
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
@@ -68,7 +71,7 @@ func (s *Service) validateDataColumn(ctx context.Context, pid peer.ID, msg *pubs
|
||||
return pubsub.ValidationReject, fmt.Errorf("wrong topic name: %s", *msg.Topic)
|
||||
}
|
||||
|
||||
if err := vf.NotFromFutureSlot(); err != nil {
|
||||
if err := verifier.NotFromFutureSlot(); err != nil {
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
|
||||
@@ -77,40 +80,40 @@ func (s *Service) validateDataColumn(ctx context.Context, pid peer.ID, msg *pubs
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
|
||||
if err := vf.SlotAboveFinalized(); err != nil {
|
||||
if err := verifier.SlotAboveFinalized(); err != nil {
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
if err := vf.SidecarParentSeen(s.hasBadBlock); err != nil {
|
||||
if err := verifier.SidecarParentSeen(s.hasBadBlock); err != nil {
|
||||
go func() {
|
||||
if err := s.sendBatchRootRequest(context.Background(), [][32]byte{ds.ParentRoot()}, rand.NewGenerator()); err != nil {
|
||||
log.WithError(err).WithFields(columnFields(ds)).Debug("Failed to send batch root request")
|
||||
log.WithError(err).WithFields(logging.DataColumnFields(ds)).Debug("Failed to send batch root request")
|
||||
}
|
||||
}()
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
if err := vf.SidecarParentValid(s.hasBadBlock); err != nil {
|
||||
if err := verifier.SidecarParentValid(s.hasBadBlock); err != nil {
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
if err := vf.SidecarParentSlotLower(); err != nil {
|
||||
if err := verifier.SidecarParentSlotLower(); err != nil {
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
if err := vf.SidecarDescendsFromFinalized(); err != nil {
|
||||
if err := verifier.SidecarDescendsFromFinalized(); err != nil {
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
if err := vf.SidecarInclusionProven(); err != nil {
|
||||
if err := verifier.SidecarInclusionProven(); err != nil {
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
if err := vf.SidecarKzgProofVerified(); err != nil {
|
||||
if err := verifier.SidecarKzgProofVerified(); err != nil {
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
if err := vf.ValidProposerSignature(ctx); err != nil {
|
||||
if err := verifier.ValidProposerSignature(ctx); err != nil {
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
if err := vf.SidecarProposerExpected(ctx); err != nil {
|
||||
if err := verifier.SidecarProposerExpected(ctx); err != nil {
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
@@ -120,19 +123,20 @@ func (s *Service) validateDataColumn(ctx context.Context, pid peer.ID, msg *pubs
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
|
||||
fields := columnFields(ds)
|
||||
sinceSlotStartTime := receivedTime.Sub(startTime)
|
||||
validationTime := s.cfg.clock.Now().Sub(receivedTime)
|
||||
fields["sinceSlotStartTime"] = sinceSlotStartTime
|
||||
fields["validationTime"] = validationTime
|
||||
log.WithFields(fields).Debug("Received data column sidecar gossip")
|
||||
|
||||
verifiedRODataColumn, err := vf.VerifiedRODataColumn()
|
||||
verifiedRODataColumn, err := verifier.VerifiedRODataColumn()
|
||||
if err != nil {
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
msg.ValidatorData = verifiedRODataColumn
|
||||
|
||||
fields := logging.DataColumnFields(ds)
|
||||
sinceSlotStartTime := receivedTime.Sub(startTime)
|
||||
validationTime := s.cfg.clock.Now().Sub(receivedTime)
|
||||
fields["sinceSlotStartTime"] = sinceSlotStartTime
|
||||
fields["validationTime"] = validationTime
|
||||
|
||||
log.WithFields(fields).Debug("Accepted data column sidecar gossip")
|
||||
return pubsub.ValidationAccept, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
|
||||
// Clients who receive a proposer slashing on this topic MUST validate the conditions within VerifyProposerSlashing before
|
||||
// forwarding it across the network.
|
||||
func (s *Service) validateProposerSlashing(ctx context.Context, pid peer.ID, msg *pubsub.Message) (pubsub.ValidationResult, error) {
|
||||
func (s *Service) validateProposerSlashingPubSubMsg(ctx context.Context, pid peer.ID, msg *pubsub.Message) (pubsub.ValidationResult, error) {
|
||||
// Validation runs on publish (not just subscriptions), so we should approve any message from
|
||||
// ourselves.
|
||||
if pid == s.cfg.p2p.PeerID() {
|
||||
|
||||
@@ -140,7 +140,7 @@ func TestValidateProposerSlashing_ValidSlashing(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
res, err := r.validateProposerSlashing(ctx, "", m)
|
||||
res, err := r.validateProposerSlashingPubSubMsg(ctx, "", m)
|
||||
assert.NoError(t, err)
|
||||
valid := res == pubsub.ValidationAccept
|
||||
assert.Equal(t, true, valid, "Failed validation")
|
||||
@@ -183,7 +183,7 @@ func TestValidateProposerSlashing_ValidOldSlashing(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
res, err := r.validateProposerSlashing(ctx, "", m)
|
||||
res, err := r.validateProposerSlashingPubSubMsg(ctx, "", m)
|
||||
assert.ErrorContains(t, "proposer is already slashed", err)
|
||||
valid := res == pubsub.ValidationIgnore
|
||||
assert.Equal(t, true, valid, "Failed validation")
|
||||
@@ -220,7 +220,7 @@ func TestValidateProposerSlashing_ContextTimeout(t *testing.T) {
|
||||
Topic: &topic,
|
||||
},
|
||||
}
|
||||
res, err := r.validateProposerSlashing(ctx, "", m)
|
||||
res, err := r.validateProposerSlashingPubSubMsg(ctx, "", m)
|
||||
assert.NotNil(t, err)
|
||||
valid := res == pubsub.ValidationAccept
|
||||
assert.Equal(t, false, valid, "Slashing from the far distant future should have timed out and returned false")
|
||||
@@ -250,7 +250,7 @@ func TestValidateProposerSlashing_Syncing(t *testing.T) {
|
||||
Topic: &topic,
|
||||
},
|
||||
}
|
||||
res, err := r.validateProposerSlashing(ctx, "", m)
|
||||
res, err := r.validateProposerSlashingPubSubMsg(ctx, "", m)
|
||||
_ = err
|
||||
valid := res == pubsub.ValidationAccept
|
||||
assert.Equal(t, false, valid, "Did not fail validation")
|
||||
|
||||
@@ -20,7 +20,7 @@ import (
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// validateSyncContributionAndProof verifies the aggregated signature and the selection proof is valid before forwarding to the
|
||||
// validateSyncCommitteeContributionAndProofPubSubMsg verifies the aggregated signature and the selection proof is valid before forwarding to the
|
||||
// network and downstream services.
|
||||
// Gossip Validation Conditions:
|
||||
// [IGNORE] The contribution's slot is for the current slot (with a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance), i.e. contribution.slot == current_slot.
|
||||
@@ -38,7 +38,7 @@ import (
|
||||
// [REJECT] The aggregator signature, signed_contribution_and_proof.signature, is valid.
|
||||
// [REJECT] The aggregate signature is valid for the message beacon_block_root and aggregate pubkey derived from the participation
|
||||
// info in aggregation_bits for the subcommittee specified by the contribution.subcommittee_index.
|
||||
func (s *Service) validateSyncContributionAndProof(ctx context.Context, pid peer.ID, msg *pubsub.Message) (pubsub.ValidationResult, error) {
|
||||
func (s *Service) validateSyncCommitteeContributionAndProofPubSubMsg(ctx context.Context, pid peer.ID, msg *pubsub.Message) (pubsub.ValidationResult, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "sync.validateSyncContributionAndProof")
|
||||
defer span.End()
|
||||
|
||||
|
||||
@@ -44,7 +44,7 @@ import (
|
||||
func TestService_ValidateSyncContributionAndProof(t *testing.T) {
|
||||
database := testingdb.SetupDB(t)
|
||||
headRoot, keys := fillUpBlocksAndState(context.Background(), t, database)
|
||||
defaultTopic := p2p.SyncContributionAndProofSubnetTopicFormat
|
||||
defaultTopic := p2p.SyncCommitteeContributionAndProofSubnetTopicFormat
|
||||
defaultTopic = fmt.Sprintf(defaultTopic, []byte{0xAB, 0x00, 0xCC, 0x9E})
|
||||
defaultTopic = defaultTopic + "/" + encoder.ProtocolSuffixSSZSnappy
|
||||
chainService := &mockChain.ChainService{
|
||||
@@ -878,7 +878,7 @@ func TestService_ValidateSyncContributionAndProof(t *testing.T) {
|
||||
}
|
||||
}
|
||||
require.Equal(t, true, svc.chainIsStarted())
|
||||
if got, err := svc.validateSyncContributionAndProof(ctx, tt.args.pid, msg); got != tt.want {
|
||||
if got, err := svc.validateSyncCommitteeContributionAndProofPubSubMsg(ctx, tt.args.pid, msg); got != tt.want {
|
||||
_ = err
|
||||
t.Errorf("validateSyncContributionAndProof() = %v, want %v", got, tt.want)
|
||||
}
|
||||
@@ -890,7 +890,7 @@ func TestValidateSyncContributionAndProof(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
database := testingdb.SetupDB(t)
|
||||
headRoot, keys := fillUpBlocksAndState(ctx, t, database)
|
||||
defaultTopic := p2p.SyncContributionAndProofSubnetTopicFormat
|
||||
defaultTopic := p2p.SyncCommitteeContributionAndProofSubnetTopicFormat
|
||||
defaultTopic = fmt.Sprintf(defaultTopic, []byte{0xAB, 0x00, 0xCC, 0x9E})
|
||||
defaultTopic = defaultTopic + "/" + encoder.ProtocolSuffixSSZSnappy
|
||||
var emptySig [96]byte
|
||||
@@ -1004,7 +1004,7 @@ func TestValidateSyncContributionAndProof(t *testing.T) {
|
||||
opSub := s.cfg.operationNotifier.OperationFeed().Subscribe(opChannel)
|
||||
defer opSub.Unsubscribe()
|
||||
|
||||
_, err = s.validateSyncContributionAndProof(ctx, pid, pubsubMsg)
|
||||
_, err = s.validateSyncCommitteeContributionAndProofPubSubMsg(ctx, pid, pubsubMsg)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Ensure the state notification was broadcast.
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
|
||||
// Clients who receive a voluntary exit on this topic MUST validate the conditions within process_voluntary_exit before
|
||||
// forwarding it across the network.
|
||||
func (s *Service) validateVoluntaryExit(ctx context.Context, pid peer.ID, msg *pubsub.Message) (pubsub.ValidationResult, error) {
|
||||
func (s *Service) validateVoluntaryExitPubSubMsg(ctx context.Context, pid peer.ID, msg *pubsub.Message) (pubsub.ValidationResult, error) {
|
||||
// Validation runs on publish (not just subscriptions), so we should approve any message from
|
||||
// ourselves.
|
||||
if pid == s.cfg.p2p.PeerID() {
|
||||
|
||||
@@ -118,7 +118,7 @@ func TestValidateVoluntaryExit_ValidExit(t *testing.T) {
|
||||
opSub := r.cfg.operationNotifier.OperationFeed().Subscribe(opChannel)
|
||||
defer opSub.Unsubscribe()
|
||||
|
||||
res, err := r.validateVoluntaryExit(ctx, "", m)
|
||||
res, err := r.validateVoluntaryExitPubSubMsg(ctx, "", m)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, pubsub.ValidationAccept, res, "Failed validation")
|
||||
require.NotNil(t, m.ValidatorData, "Decoded message was not set on the message validator data")
|
||||
@@ -166,7 +166,7 @@ func TestValidateVoluntaryExit_InvalidExitSlot(t *testing.T) {
|
||||
Topic: &topic,
|
||||
},
|
||||
}
|
||||
res, err := r.validateVoluntaryExit(ctx, "", m)
|
||||
res, err := r.validateVoluntaryExitPubSubMsg(ctx, "", m)
|
||||
_ = err
|
||||
valid := res == pubsub.ValidationAccept
|
||||
assert.Equal(t, false, valid, "passed validation")
|
||||
@@ -197,7 +197,7 @@ func TestValidateVoluntaryExit_ValidExit_Syncing(t *testing.T) {
|
||||
Topic: &topic,
|
||||
},
|
||||
}
|
||||
res, err := r.validateVoluntaryExit(ctx, "", m)
|
||||
res, err := r.validateVoluntaryExitPubSubMsg(ctx, "", m)
|
||||
_ = err
|
||||
valid := res == pubsub.ValidationAccept
|
||||
assert.Equal(t, false, valid, "Validation should have failed")
|
||||
|
||||
@@ -11,6 +11,7 @@ go_library(
|
||||
"fake.go",
|
||||
"initializer.go",
|
||||
"interface.go",
|
||||
"log.go",
|
||||
"metrics.go",
|
||||
"mock.go",
|
||||
"result.go",
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/logging"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/network/forks"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -50,8 +50,8 @@ type SignatureData struct {
|
||||
Slot primitives.Slot
|
||||
}
|
||||
|
||||
func (d SignatureData) logFields() log.Fields {
|
||||
return log.Fields{
|
||||
func (d SignatureData) logFields() logrus.Fields {
|
||||
return logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", d.Root),
|
||||
"parentRoot": fmt.Sprintf("%#x", d.Parent),
|
||||
"signature": fmt.Sprintf("%#x", d.Signature),
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/logging"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var allColumnSidecarRequirements = []Requirement{
|
||||
@@ -135,7 +134,7 @@ func (dv *RODataColumnVerifier) NotFromFutureSlot() (err error) {
|
||||
earliestStart := dv.clock.SlotStart(dv.dataColumn.Slot()).Add(-1 * params.BeaconConfig().MaximumGossipClockDisparityDuration())
|
||||
// If the system time is still before earliestStart, we consider the column from a future slot and return an error.
|
||||
if dv.clock.Now().Before(earliestStart) {
|
||||
log.WithFields(logging.DataColumnFields(dv.dataColumn)).Debug("sidecar slot is too far in the future")
|
||||
log.WithFields(logging.DataColumnFields(dv.dataColumn)).Debug("Sidecar slot is too far in the future")
|
||||
return columnErrBuilder(ErrFromFutureSlot)
|
||||
}
|
||||
return nil
|
||||
@@ -152,7 +151,7 @@ func (dv *RODataColumnVerifier) SlotAboveFinalized() (err error) {
|
||||
return errors.Wrapf(columnErrBuilder(ErrSlotNotAfterFinalized), "error computing epoch start slot for finalized checkpoint (%d) %s", fcp.Epoch, err.Error())
|
||||
}
|
||||
if dv.dataColumn.Slot() <= fSlot {
|
||||
log.WithFields(logging.DataColumnFields(dv.dataColumn)).Debug("sidecar slot is not after finalized checkpoint")
|
||||
log.WithFields(logging.DataColumnFields(dv.dataColumn)).Debug("Sidecar slot is not after finalized checkpoint")
|
||||
return columnErrBuilder(ErrSlotNotAfterFinalized)
|
||||
}
|
||||
return nil
|
||||
@@ -168,7 +167,7 @@ func (dv *RODataColumnVerifier) ValidProposerSignature(ctx context.Context) (err
|
||||
if seen {
|
||||
columnVerificationProposerSignatureCache.WithLabelValues("hit-valid").Inc()
|
||||
if err != nil {
|
||||
log.WithFields(logging.DataColumnFields(dv.dataColumn)).WithError(err).Debug("reusing failed proposer signature validation from cache")
|
||||
log.WithFields(logging.DataColumnFields(dv.dataColumn)).WithError(err).Debug("Reusing failed proposer signature validation from cache")
|
||||
blobVerificationProposerSignatureCache.WithLabelValues("hit-invalid").Inc()
|
||||
return columnErrBuilder(ErrInvalidProposerSignature)
|
||||
}
|
||||
@@ -179,12 +178,12 @@ func (dv *RODataColumnVerifier) ValidProposerSignature(ctx context.Context) (err
|
||||
// Retrieve the parent state to fallback to full verification.
|
||||
parent, err := dv.parentState(ctx)
|
||||
if err != nil {
|
||||
log.WithFields(logging.DataColumnFields(dv.dataColumn)).WithError(err).Debug("could not replay parent state for column signature verification")
|
||||
log.WithFields(logging.DataColumnFields(dv.dataColumn)).WithError(err).Debug("Could not replay parent state for column signature verification")
|
||||
return columnErrBuilder(ErrInvalidProposerSignature)
|
||||
}
|
||||
// Full verification, which will subsequently be cached for anything sharing the signature cache.
|
||||
if err = dv.sc.VerifySignature(sd, parent); err != nil {
|
||||
log.WithFields(logging.DataColumnFields(dv.dataColumn)).WithError(err).Debug("signature verification failed")
|
||||
log.WithFields(logging.DataColumnFields(dv.dataColumn)).WithError(err).Debug("Signature verification failed")
|
||||
return columnErrBuilder(ErrInvalidProposerSignature)
|
||||
}
|
||||
return nil
|
||||
@@ -201,7 +200,7 @@ func (dv *RODataColumnVerifier) SidecarParentSeen(parentSeen func([32]byte) bool
|
||||
if dv.fc.HasNode(dv.dataColumn.ParentRoot()) {
|
||||
return nil
|
||||
}
|
||||
log.WithFields(logging.DataColumnFields(dv.dataColumn)).Debug("parent root has not been seen")
|
||||
log.WithFields(logging.DataColumnFields(dv.dataColumn)).Debug("Parent root has not been seen")
|
||||
return columnErrBuilder(ErrSidecarParentNotSeen)
|
||||
}
|
||||
|
||||
@@ -210,7 +209,7 @@ func (dv *RODataColumnVerifier) SidecarParentSeen(parentSeen func([32]byte) bool
|
||||
func (dv *RODataColumnVerifier) SidecarParentValid(badParent func([32]byte) bool) (err error) {
|
||||
defer dv.recordResult(RequireSidecarParentValid, &err)
|
||||
if badParent != nil && badParent(dv.dataColumn.ParentRoot()) {
|
||||
log.WithFields(logging.DataColumnFields(dv.dataColumn)).Debug("parent root is invalid")
|
||||
log.WithFields(logging.DataColumnFields(dv.dataColumn)).Debug("Parent root is invalid")
|
||||
return columnErrBuilder(ErrSidecarParentInvalid)
|
||||
}
|
||||
return nil
|
||||
@@ -236,7 +235,7 @@ func (dv *RODataColumnVerifier) SidecarParentSlotLower() (err error) {
|
||||
func (dv *RODataColumnVerifier) SidecarDescendsFromFinalized() (err error) {
|
||||
defer dv.recordResult(RequireSidecarDescendsFromFinalized, &err)
|
||||
if !dv.fc.HasNode(dv.dataColumn.ParentRoot()) {
|
||||
log.WithFields(logging.DataColumnFields(dv.dataColumn)).Debug("parent root not in forkchoice")
|
||||
log.WithFields(logging.DataColumnFields(dv.dataColumn)).Debug("Parent root not in forkchoice")
|
||||
return columnErrBuilder(ErrSidecarNotFinalizedDescendent)
|
||||
}
|
||||
return nil
|
||||
@@ -247,7 +246,7 @@ func (dv *RODataColumnVerifier) SidecarDescendsFromFinalized() (err error) {
|
||||
func (dv *RODataColumnVerifier) SidecarInclusionProven() (err error) {
|
||||
defer dv.recordResult(RequireSidecarInclusionProven, &err)
|
||||
if err = blocks.VerifyKZGInclusionProofColumn(dv.dataColumn); err != nil {
|
||||
log.WithError(err).WithFields(logging.DataColumnFields(dv.dataColumn)).Debug("sidecar inclusion proof verification failed")
|
||||
log.WithError(err).WithFields(logging.DataColumnFields(dv.dataColumn)).Debug("Sidecar inclusion proof verification failed")
|
||||
return columnErrBuilder(ErrSidecarInclusionProofInvalid)
|
||||
}
|
||||
return nil
|
||||
@@ -259,11 +258,11 @@ func (dv *RODataColumnVerifier) SidecarKzgProofVerified() (err error) {
|
||||
defer dv.recordResult(RequireSidecarKzgProofVerified, &err)
|
||||
ok, err := dv.verifyDataColumnCommitment(dv.dataColumn)
|
||||
if err != nil {
|
||||
log.WithError(err).WithFields(logging.DataColumnFields(dv.dataColumn)).Debug("kzg commitment proof verification failed")
|
||||
log.WithError(err).WithFields(logging.DataColumnFields(dv.dataColumn)).Debug("KZG commitment proof verification failed")
|
||||
return columnErrBuilder(ErrSidecarKzgProofInvalid)
|
||||
}
|
||||
if !ok {
|
||||
log.WithFields(logging.DataColumnFields(dv.dataColumn)).Debug("kzg commitment proof verification failed")
|
||||
log.WithFields(logging.DataColumnFields(dv.dataColumn)).Debug("KZG commitment proof verification failed")
|
||||
return columnErrBuilder(ErrSidecarKzgProofInvalid)
|
||||
}
|
||||
return nil
|
||||
@@ -289,12 +288,12 @@ func (dv *RODataColumnVerifier) SidecarProposerExpected(ctx context.Context) (er
|
||||
if !cached {
|
||||
pst, err := dv.parentState(ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).WithFields(logging.DataColumnFields(dv.dataColumn)).Debug("state replay to parent_root failed")
|
||||
log.WithError(err).WithFields(logging.DataColumnFields(dv.dataColumn)).Debug("State replay to parent_root failed")
|
||||
return columnErrBuilder(ErrSidecarUnexpectedProposer)
|
||||
}
|
||||
idx, err = dv.pc.ComputeProposer(ctx, dv.dataColumn.ParentRoot(), dv.dataColumn.Slot(), pst)
|
||||
if err != nil {
|
||||
log.WithError(err).WithFields(logging.DataColumnFields(dv.dataColumn)).Debug("error computing proposer index from parent state")
|
||||
log.WithError(err).WithFields(logging.DataColumnFields(dv.dataColumn)).Debug("Error computing proposer index from parent state")
|
||||
return columnErrBuilder(ErrSidecarUnexpectedProposer)
|
||||
}
|
||||
}
|
||||
|
||||
5
beacon-chain/verification/log.go
Normal file
5
beacon-chain/verification/log.go
Normal file
@@ -0,0 +1,5 @@
|
||||
package verification
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
var log = logrus.WithField("prefix", "verification")
|
||||
@@ -169,7 +169,7 @@ func before(ctx *cli.Context) error {
|
||||
switch format {
|
||||
case "text":
|
||||
formatter := new(prefixed.TextFormatter)
|
||||
formatter.TimestampFormat = "2006-01-02 15:04:05"
|
||||
formatter.TimestampFormat = "2006-01-02 15:04:05.00"
|
||||
formatter.FullTimestamp = true
|
||||
|
||||
// If persistent log files are written - we disable the log messages coloring because
|
||||
@@ -185,7 +185,9 @@ func before(ctx *cli.Context) error {
|
||||
|
||||
logrus.SetFormatter(f)
|
||||
case "json":
|
||||
logrus.SetFormatter(&logrus.JSONFormatter{})
|
||||
logrus.SetFormatter(&logrus.JSONFormatter{
|
||||
TimestampFormat: "2006-01-02 15:04:05.00",
|
||||
})
|
||||
case "journald":
|
||||
if err := journald.Enable(); err != nil {
|
||||
return err
|
||||
|
||||
@@ -83,7 +83,7 @@ type Flags struct {
|
||||
KeystoreImportDebounceInterval time.Duration
|
||||
|
||||
// DataColumnsWithholdCount specifies the likelihood of withholding a data column sidecar when proposing a block (percentage)
|
||||
DataColumnsWithholdCount int
|
||||
DataColumnsWithholdCount uint64
|
||||
|
||||
// AggregateIntervals specifies the time durations at which we aggregate attestations preparing for forkchoice.
|
||||
AggregateIntervals [3]time.Duration
|
||||
@@ -145,6 +145,7 @@ func configureTestnet(ctx *cli.Context) error {
|
||||
} else {
|
||||
if ctx.IsSet(cmd.ChainConfigFileFlag.Name) {
|
||||
log.Warn("Running on custom Ethereum network specified in a chain configuration yaml file")
|
||||
params.UseCustomNetworkConfig()
|
||||
} else {
|
||||
log.Info("Running on Ethereum Mainnet")
|
||||
}
|
||||
@@ -156,11 +157,11 @@ func configureTestnet(ctx *cli.Context) error {
|
||||
}
|
||||
|
||||
// Insert feature flags within the function to be enabled for Sepolia testnet.
|
||||
func applySepoliaFeatureFlags(ctx *cli.Context) {
|
||||
func applySepoliaFeatureFlags(_ *cli.Context) {
|
||||
}
|
||||
|
||||
// Insert feature flags within the function to be enabled for Holesky testnet.
|
||||
func applyHoleskyFeatureFlags(ctx *cli.Context) {
|
||||
func applyHoleskyFeatureFlags(_ *cli.Context) {
|
||||
}
|
||||
|
||||
// ConfigureBeaconChain sets the global config based
|
||||
@@ -265,7 +266,7 @@ func ConfigureBeaconChain(ctx *cli.Context) error {
|
||||
|
||||
if ctx.IsSet(DataColumnsWithholdCount.Name) {
|
||||
logEnabled(DataColumnsWithholdCount)
|
||||
cfg.DataColumnsWithholdCount = ctx.Int(DataColumnsWithholdCount.Name)
|
||||
cfg.DataColumnsWithholdCount = ctx.Uint64(DataColumnsWithholdCount.Name)
|
||||
}
|
||||
|
||||
cfg.AggregateIntervals = [3]time.Duration{aggregateFirstInterval.Value, aggregateSecondInterval.Value, aggregateThirdInterval.Value}
|
||||
|
||||
@@ -176,7 +176,7 @@ var (
|
||||
Usage: "Enables Prysm to run with the experimental peer data availability sampling scheme.",
|
||||
}
|
||||
// DataColumnsWithholdCount is a flag for withholding data columns when proposing a block.
|
||||
DataColumnsWithholdCount = &cli.IntFlag{
|
||||
DataColumnsWithholdCount = &cli.Uint64Flag{
|
||||
Name: "data-columns-withhold-count",
|
||||
Usage: "Number of columns to withhold when proposing a block. DO NOT USE IN PRODUCTION.",
|
||||
Value: 0,
|
||||
|
||||
@@ -14,6 +14,7 @@ go_library(
|
||||
"mainnet_config.go",
|
||||
"minimal_config.go",
|
||||
"network_config.go",
|
||||
"testnet_custom_network_config.go",
|
||||
"testnet_e2e_config.go",
|
||||
"testnet_holesky_config.go",
|
||||
"testnet_sepolia_config.go",
|
||||
|
||||
9
config/params/testnet_custom_network_config.go
Normal file
9
config/params/testnet_custom_network_config.go
Normal file
@@ -0,0 +1,9 @@
|
||||
package params
|
||||
|
||||
func UseCustomNetworkConfig() {
|
||||
cfg := BeaconNetworkConfig().Copy()
|
||||
cfg.ContractDeploymentBlock = 0
|
||||
cfg.BootstrapNodes = []string{}
|
||||
|
||||
OverrideBeaconNetworkConfig(cfg)
|
||||
}
|
||||
@@ -6,7 +6,6 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/consensus-types/wrapper",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/metadata:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
|
||||
@@ -2,7 +2,6 @@ package wrapper
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
pb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/metadata"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
@@ -38,7 +37,7 @@ func (m MetadataV0) SyncnetsBitfield() bitfield.Bitvector4 {
|
||||
}
|
||||
|
||||
// CustodySubnetCount returns custody subnet count from the metadata.
|
||||
func (m MetadataV0) CustodySubnetCount() uint8 {
|
||||
func (m MetadataV0) CustodySubnetCount() uint64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
@@ -132,7 +131,7 @@ func (m MetadataV1) SyncnetsBitfield() bitfield.Bitvector4 {
|
||||
}
|
||||
|
||||
// CustodySubnetCount returns custody subnet count from the metadata.
|
||||
func (m MetadataV1) CustodySubnetCount() uint8 {
|
||||
func (m MetadataV1) CustodySubnetCount() uint64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
@@ -226,8 +225,8 @@ func (m MetadataV2) SyncnetsBitfield() bitfield.Bitvector4 {
|
||||
}
|
||||
|
||||
// CustodySubnetCount returns custody subnet count from the metadata.
|
||||
func (m MetadataV2) CustodySubnetCount() uint8 {
|
||||
return bytesutil.FromBytes1(m.md.CustodySubnetCount)
|
||||
func (m MetadataV2) CustodySubnetCount() uint64 {
|
||||
return m.md.CustodySubnetCount
|
||||
}
|
||||
|
||||
// InnerObject returns the underlying metadata protobuf structure.
|
||||
|
||||
@@ -66,14 +66,6 @@ func Bytes32(x uint64) []byte {
|
||||
return bytes
|
||||
}
|
||||
|
||||
// FromBytes1 returns an integer from a byte slice with a size of 1.
|
||||
func FromBytes1(x []byte) uint8 {
|
||||
if len(x) < 1 {
|
||||
return 0
|
||||
}
|
||||
return x[0]
|
||||
}
|
||||
|
||||
// FromBytes2 returns an integer which is stored in the little-endian format(2, 'little')
|
||||
// from a byte array.
|
||||
func FromBytes2(x []byte) uint16 {
|
||||
|
||||
@@ -11,7 +11,7 @@ type Metadata interface {
|
||||
SequenceNumber() uint64
|
||||
AttnetsBitfield() bitfield.Bitvector64
|
||||
SyncnetsBitfield() bitfield.Bitvector4
|
||||
CustodySubnetCount() uint8
|
||||
CustodySubnetCount() uint64
|
||||
InnerObject() interface{}
|
||||
IsNil() bool
|
||||
Copy() Metadata
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// Code generated by fastssz. DO NOT EDIT.
|
||||
// Hash: 2b6089e02fcaeb5244f5bd8568b0d90eca6cccd827a21f19affac4a07bb8d355
|
||||
// Hash: ac6fbb6f912d4e5a3374b91a3dc73c1d5ac1e5b4a9c32513b3d5ef9de7885be8
|
||||
package eth
|
||||
|
||||
import (
|
||||
@@ -578,11 +578,7 @@ func (m *MetaDataV2) MarshalSSZTo(buf []byte) (dst []byte, err error) {
|
||||
dst = append(dst, m.Syncnets...)
|
||||
|
||||
// Field (3) 'CustodySubnetCount'
|
||||
if size := len(m.CustodySubnetCount); size != 1 {
|
||||
err = ssz.ErrBytesLengthFn("--.CustodySubnetCount", size, 1)
|
||||
return
|
||||
}
|
||||
dst = append(dst, m.CustodySubnetCount...)
|
||||
dst = ssz.MarshalUint64(dst, m.CustodySubnetCount)
|
||||
|
||||
return
|
||||
}
|
||||
@@ -591,7 +587,7 @@ func (m *MetaDataV2) MarshalSSZTo(buf []byte) (dst []byte, err error) {
|
||||
func (m *MetaDataV2) UnmarshalSSZ(buf []byte) error {
|
||||
var err error
|
||||
size := uint64(len(buf))
|
||||
if size != 18 {
|
||||
if size != 25 {
|
||||
return ssz.ErrSize
|
||||
}
|
||||
|
||||
@@ -611,17 +607,14 @@ func (m *MetaDataV2) UnmarshalSSZ(buf []byte) error {
|
||||
m.Syncnets = append(m.Syncnets, buf[16:17]...)
|
||||
|
||||
// Field (3) 'CustodySubnetCount'
|
||||
if cap(m.CustodySubnetCount) == 0 {
|
||||
m.CustodySubnetCount = make([]byte, 0, len(buf[17:18]))
|
||||
}
|
||||
m.CustodySubnetCount = append(m.CustodySubnetCount, buf[17:18]...)
|
||||
m.CustodySubnetCount = ssz.UnmarshallUint64(buf[17:25])
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// SizeSSZ returns the ssz encoded size in bytes for the MetaDataV2 object
|
||||
func (m *MetaDataV2) SizeSSZ() (size int) {
|
||||
size = 18
|
||||
size = 25
|
||||
return
|
||||
}
|
||||
|
||||
@@ -652,11 +645,7 @@ func (m *MetaDataV2) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
hh.PutBytes(m.Syncnets)
|
||||
|
||||
// Field (3) 'CustodySubnetCount'
|
||||
if size := len(m.CustodySubnetCount); size != 1 {
|
||||
err = ssz.ErrBytesLengthFn("--.CustodySubnetCount", size, 1)
|
||||
return
|
||||
}
|
||||
hh.PutBytes(m.CustodySubnetCount)
|
||||
hh.PutUint64(m.CustodySubnetCount)
|
||||
|
||||
hh.Merkleize(indx)
|
||||
return
|
||||
|
||||
77
proto/prysm/v1alpha1/p2p_messages.pb.go
generated
77
proto/prysm/v1alpha1/p2p_messages.pb.go
generated
@@ -356,7 +356,7 @@ type MetaDataV2 struct {
|
||||
SeqNumber uint64 `protobuf:"varint,1,opt,name=seq_number,json=seqNumber,proto3" json:"seq_number,omitempty"`
|
||||
Attnets github_com_prysmaticlabs_go_bitfield.Bitvector64 `protobuf:"bytes,2,opt,name=attnets,proto3" json:"attnets,omitempty" cast-type:"github.com/prysmaticlabs/go-bitfield.Bitvector64" ssz-size:"8"`
|
||||
Syncnets github_com_prysmaticlabs_go_bitfield.Bitvector4 `protobuf:"bytes,3,opt,name=syncnets,proto3" json:"syncnets,omitempty" cast-type:"github.com/prysmaticlabs/go-bitfield.Bitvector4" ssz-size:"1"`
|
||||
CustodySubnetCount []byte `protobuf:"bytes,4,opt,name=custody_subnet_count,json=custodySubnetCount,proto3" json:"custody_subnet_count,omitempty" ssz-size:"1"`
|
||||
CustodySubnetCount uint64 `protobuf:"varint,4,opt,name=custody_subnet_count,json=custodySubnetCount,proto3" json:"custody_subnet_count,omitempty"`
|
||||
}
|
||||
|
||||
func (x *MetaDataV2) Reset() {
|
||||
@@ -412,11 +412,11 @@ func (x *MetaDataV2) GetSyncnets() github_com_prysmaticlabs_go_bitfield.Bitvecto
|
||||
return github_com_prysmaticlabs_go_bitfield.Bitvector4(nil)
|
||||
}
|
||||
|
||||
func (x *MetaDataV2) GetCustodySubnetCount() []byte {
|
||||
func (x *MetaDataV2) GetCustodySubnetCount() uint64 {
|
||||
if x != nil {
|
||||
return x.CustodySubnetCount
|
||||
}
|
||||
return nil
|
||||
return 0
|
||||
}
|
||||
|
||||
type BlobSidecarsByRangeRequest struct {
|
||||
@@ -616,7 +616,7 @@ var file_proto_prysm_v1alpha1_p2p_messages_proto_rawDesc = []byte{
|
||||
0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d,
|
||||
0x61, 0x74, 0x69, 0x63, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x67, 0x6f, 0x2d, 0x62, 0x69, 0x74, 0x66,
|
||||
0x69, 0x65, 0x6c, 0x64, 0x2e, 0x42, 0x69, 0x74, 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x34, 0x8a,
|
||||
0xb5, 0x18, 0x01, 0x31, 0x52, 0x08, 0x73, 0x79, 0x6e, 0x63, 0x6e, 0x65, 0x74, 0x73, 0x22, 0x8f,
|
||||
0xb5, 0x18, 0x01, 0x31, 0x52, 0x08, 0x73, 0x79, 0x6e, 0x63, 0x6e, 0x65, 0x74, 0x73, 0x22, 0x88,
|
||||
0x02, 0x0a, 0x0a, 0x4d, 0x65, 0x74, 0x61, 0x44, 0x61, 0x74, 0x61, 0x56, 0x32, 0x12, 0x1d, 0x0a,
|
||||
0x0a, 0x73, 0x65, 0x71, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28,
|
||||
0x04, 0x52, 0x09, 0x73, 0x65, 0x71, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x53, 0x0a, 0x07,
|
||||
@@ -630,43 +630,42 @@ var file_proto_prysm_v1alpha1_p2p_messages_proto_rawDesc = []byte{
|
||||
0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x61, 0x74, 0x69, 0x63, 0x6c, 0x61, 0x62,
|
||||
0x73, 0x2f, 0x67, 0x6f, 0x2d, 0x62, 0x69, 0x74, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x2e, 0x42, 0x69,
|
||||
0x74, 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x34, 0x8a, 0xb5, 0x18, 0x01, 0x31, 0x52, 0x08, 0x73,
|
||||
0x79, 0x6e, 0x63, 0x6e, 0x65, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x14, 0x63, 0x75, 0x73, 0x74, 0x6f,
|
||||
0x79, 0x6e, 0x63, 0x6e, 0x65, 0x74, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x63, 0x75, 0x73, 0x74, 0x6f,
|
||||
0x64, 0x79, 0x5f, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18,
|
||||
0x04, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x05, 0x8a, 0xb5, 0x18, 0x01, 0x31, 0x52, 0x12, 0x63, 0x75,
|
||||
0x73, 0x74, 0x6f, 0x64, 0x79, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74,
|
||||
0x22, 0x98, 0x01, 0x0a, 0x1a, 0x42, 0x6c, 0x6f, 0x62, 0x53, 0x69, 0x64, 0x65, 0x63, 0x61, 0x72,
|
||||
0x73, 0x42, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
|
||||
0x64, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x73, 0x6c, 0x6f, 0x74, 0x18, 0x01, 0x20,
|
||||
0x01, 0x28, 0x04, 0x42, 0x45, 0x82, 0xb5, 0x18, 0x41, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e,
|
||||
0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x61, 0x74, 0x69, 0x63, 0x6c, 0x61, 0x62,
|
||||
0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x35, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x65,
|
||||
0x6e, 0x73, 0x75, 0x73, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x72, 0x69, 0x6d, 0x69,
|
||||
0x74, 0x69, 0x76, 0x65, 0x73, 0x2e, 0x53, 0x6c, 0x6f, 0x74, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72,
|
||||
0x74, 0x53, 0x6c, 0x6f, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02,
|
||||
0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xc1, 0x01, 0x0a, 0x20,
|
||||
0x44, 0x61, 0x74, 0x61, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x53, 0x69, 0x64, 0x65, 0x63, 0x61,
|
||||
0x72, 0x73, 0x42, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
|
||||
0x12, 0x64, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x73, 0x6c, 0x6f, 0x74, 0x18, 0x01,
|
||||
0x20, 0x01, 0x28, 0x04, 0x42, 0x45, 0x82, 0xb5, 0x18, 0x41, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
|
||||
0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x61, 0x74, 0x69, 0x63, 0x6c, 0x61,
|
||||
0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x35, 0x2f, 0x63, 0x6f, 0x6e, 0x73,
|
||||
0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x72, 0x69, 0x6d,
|
||||
0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2e, 0x53, 0x6c, 0x6f, 0x74, 0x52, 0x09, 0x73, 0x74, 0x61,
|
||||
0x72, 0x74, 0x53, 0x6c, 0x6f, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18,
|
||||
0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x21, 0x0a, 0x07,
|
||||
0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x04, 0x42, 0x07, 0x92,
|
||||
0xb5, 0x18, 0x03, 0x31, 0x32, 0x38, 0x52, 0x07, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x42,
|
||||
0x9b, 0x01, 0x0a, 0x19, 0x6f, 0x72, 0x67, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d,
|
||||
0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x10, 0x50,
|
||||
0x32, 0x50, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50,
|
||||
0x01, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72,
|
||||
0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x12, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x64, 0x79, 0x53, 0x75,
|
||||
0x62, 0x6e, 0x65, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x98, 0x01, 0x0a, 0x1a, 0x42, 0x6c,
|
||||
0x6f, 0x62, 0x53, 0x69, 0x64, 0x65, 0x63, 0x61, 0x72, 0x73, 0x42, 0x79, 0x52, 0x61, 0x6e, 0x67,
|
||||
0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x64, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72,
|
||||
0x74, 0x5f, 0x73, 0x6c, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x42, 0x45, 0x82, 0xb5,
|
||||
0x18, 0x41, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x79,
|
||||
0x73, 0x6d, 0x61, 0x74, 0x69, 0x63, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d,
|
||||
0x2f, 0x76, 0x35, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x74, 0x79,
|
||||
0x70, 0x65, 0x73, 0x2f, 0x70, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2e, 0x53,
|
||||
0x6c, 0x6f, 0x74, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x53, 0x6c, 0x6f, 0x74, 0x12, 0x14,
|
||||
0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x63,
|
||||
0x6f, 0x75, 0x6e, 0x74, 0x22, 0xc1, 0x01, 0x0a, 0x20, 0x44, 0x61, 0x74, 0x61, 0x43, 0x6f, 0x6c,
|
||||
0x75, 0x6d, 0x6e, 0x53, 0x69, 0x64, 0x65, 0x63, 0x61, 0x72, 0x73, 0x42, 0x79, 0x52, 0x61, 0x6e,
|
||||
0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x64, 0x0a, 0x0a, 0x73, 0x74, 0x61,
|
||||
0x72, 0x74, 0x5f, 0x73, 0x6c, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x42, 0x45, 0x82,
|
||||
0xb5, 0x18, 0x41, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72,
|
||||
0x79, 0x73, 0x6d, 0x61, 0x74, 0x69, 0x63, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73,
|
||||
0x6d, 0x2f, 0x76, 0x35, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d,
|
||||
0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x3b, 0x65, 0x74, 0x68, 0xaa, 0x02, 0x15,
|
||||
0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x45, 0x74, 0x68, 0x2e, 0x56, 0x31, 0x61,
|
||||
0x6c, 0x70, 0x68, 0x61, 0x31, 0xca, 0x02, 0x15, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d,
|
||||
0x5c, 0x45, 0x74, 0x68, 0x5c, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x62, 0x06, 0x70,
|
||||
0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
0x6d, 0x2f, 0x76, 0x35, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x74,
|
||||
0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2e,
|
||||
0x53, 0x6c, 0x6f, 0x74, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x53, 0x6c, 0x6f, 0x74, 0x12,
|
||||
0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05,
|
||||
0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x21, 0x0a, 0x07, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73,
|
||||
0x18, 0x03, 0x20, 0x03, 0x28, 0x04, 0x42, 0x07, 0x92, 0xb5, 0x18, 0x03, 0x31, 0x32, 0x38, 0x52,
|
||||
0x07, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x42, 0x9b, 0x01, 0x0a, 0x19, 0x6f, 0x72, 0x67,
|
||||
0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31,
|
||||
0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x10, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x73, 0x73, 0x61,
|
||||
0x67, 0x65, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68,
|
||||
0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x61, 0x74, 0x69, 0x63,
|
||||
0x6c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x35, 0x2f, 0x70, 0x72,
|
||||
0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68,
|
||||
0x61, 0x31, 0x3b, 0x65, 0x74, 0x68, 0xaa, 0x02, 0x15, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75,
|
||||
0x6d, 0x2e, 0x45, 0x74, 0x68, 0x2e, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xca, 0x02,
|
||||
0x15, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x5c, 0x45, 0x74, 0x68, 0x5c, 0x76, 0x31,
|
||||
0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
|
||||
@@ -74,7 +74,7 @@ message MetaDataV2 {
|
||||
uint64 seq_number = 1;
|
||||
bytes attnets = 2 [(ethereum.eth.ext.ssz_size) = "8", (ethereum.eth.ext.cast_type) = "github.com/prysmaticlabs/go-bitfield.Bitvector64"];
|
||||
bytes syncnets = 3 [(ethereum.eth.ext.ssz_size) = "1", (ethereum.eth.ext.cast_type) = "github.com/prysmaticlabs/go-bitfield.Bitvector4"];
|
||||
bytes custody_subnet_count = 4 [(ethereum.eth.ext.ssz_size) = "1"];
|
||||
uint64 custody_subnet_count = 4;
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -10,13 +10,18 @@ import (
|
||||
// DataColumnFields extracts a standard set of fields from a DataColumnSidecar into a logrus.Fields struct
|
||||
// which can be passed to log.WithFields.
|
||||
func DataColumnFields(column blocks.RODataColumn) logrus.Fields {
|
||||
kzgCommitmentsShort := make([][]byte, 0, len(column.KzgCommitments))
|
||||
for _, kzgCommitment := range column.KzgCommitments {
|
||||
kzgCommitmentsShort = append(kzgCommitmentsShort, kzgCommitment[:3])
|
||||
}
|
||||
|
||||
return logrus.Fields{
|
||||
"slot": column.Slot(),
|
||||
"proposerIndex": column.ProposerIndex(),
|
||||
"blockRoot": fmt.Sprintf("%#x", column.BlockRoot()),
|
||||
"parentRoot": fmt.Sprintf("%#x", column.ParentRoot()),
|
||||
"kzgCommitments": fmt.Sprintf("%#x", column.KzgCommitments),
|
||||
"index": column.ColumnIndex,
|
||||
"propIdx": column.ProposerIndex(),
|
||||
"blockRoot": fmt.Sprintf("%#x", column.BlockRoot())[:8],
|
||||
"parentRoot": fmt.Sprintf("%#x", column.ParentRoot())[:8],
|
||||
"kzgCommitments": fmt.Sprintf("%#x", kzgCommitmentsShort),
|
||||
"colIdx": column.ColumnIndex,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -69,14 +69,14 @@ const (
|
||||
var metricComparisonTests = []comparisonTest{
|
||||
{
|
||||
name: "beacon aggregate and proof",
|
||||
topic1: fmt.Sprintf(p2pFailValidationTopic, p2p.AggregateAndProofSubnetTopicFormat),
|
||||
topic2: fmt.Sprintf(p2pReceivedTotalTopic, p2p.AggregateAndProofSubnetTopicFormat),
|
||||
topic1: fmt.Sprintf(p2pFailValidationTopic, p2p.BeaconAggregateAndProofSubnetTopicFormat),
|
||||
topic2: fmt.Sprintf(p2pReceivedTotalTopic, p2p.BeaconAggregateAndProofSubnetTopicFormat),
|
||||
expectedComparison: 0.8,
|
||||
},
|
||||
{
|
||||
name: "committee index beacon attestations",
|
||||
topic1: fmt.Sprintf(p2pFailValidationTopic, formatTopic(p2p.AttestationSubnetTopicFormat)),
|
||||
topic2: fmt.Sprintf(p2pReceivedTotalTopic, formatTopic(p2p.AttestationSubnetTopicFormat)),
|
||||
topic1: fmt.Sprintf(p2pFailValidationTopic, formatTopic(p2p.BeaconAttestationSubnetTopicFormat)),
|
||||
topic2: fmt.Sprintf(p2pReceivedTotalTopic, formatTopic(p2p.BeaconAttestationSubnetTopicFormat)),
|
||||
expectedComparison: 0.15,
|
||||
},
|
||||
{
|
||||
|
||||
Reference in New Issue
Block a user