mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 22:07:59 -05:00
Compare commits
11 Commits
addExtraDa
...
factorize-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9acc2c6601 | ||
|
|
ecc8aa5829 | ||
|
|
9fa49e7bc9 | ||
|
|
1139c90ab2 | ||
|
|
79d05a87bb | ||
|
|
1707cf3ec7 | ||
|
|
bdbb850250 | ||
|
|
b28b1ed6ce | ||
|
|
74bb0821a8 | ||
|
|
8025a483e2 | ||
|
|
0475631543 |
@@ -28,6 +28,9 @@ The format is based on Keep a Changelog, and this project adheres to Semantic Ve
|
||||
- Added Validator REST mode use of Attestation V2 endpoints and Electra attestations.
|
||||
- PeerDAS: Added proto for `DataColumnIdentifier`, `DataColumnSidecar`, `DataColumnSidecarsByRangeRequest` and `MetadataV2`.
|
||||
- Better attestation packing for Electra. [PR](https://github.com/prysmaticlabs/prysm/pull/14534)
|
||||
- P2P: Add logs when a peer is (dis)connected. Add the reason of the disconnection when we initiate it.
|
||||
- Added a Prometheus error counter metric for HTTP requests to track beacon node requests.
|
||||
- Added a Prometheus error counter metric for SSE requests.
|
||||
|
||||
### Changed
|
||||
|
||||
@@ -63,6 +66,10 @@ The format is based on Keep a Changelog, and this project adheres to Semantic Ve
|
||||
- Added `Eth-Consensus-Version` header to `ListAttestationsV2` and `GetAggregateAttestationV2` endpoints.
|
||||
- Updated light client consensus types. [PR](https://github.com/prysmaticlabs/prysm/pull/14652)
|
||||
- Fixed pending deposits processing on Electra.
|
||||
- Modified `ListAttestationsV2`, `GetAttesterSlashingsV2` and `GetAggregateAttestationV2` endpoints to use slot to determine fork version.
|
||||
- Improvements to HTTP response handling. [pr](https://github.com/prysmaticlabs/prysm/pull/14673)
|
||||
- Updated `Blobs` endpoint to return additional metadata fields.
|
||||
- Refactor static and dynamic subnets subscription.
|
||||
|
||||
### Deprecated
|
||||
|
||||
@@ -73,6 +80,7 @@ The format is based on Keep a Changelog, and this project adheres to Semantic Ve
|
||||
- Removed finalized validator index cache, no longer needed.
|
||||
- Removed validator queue position log on key reload and wait for activation.
|
||||
- Removed outdated spectest exclusions for EIP-6110.
|
||||
- Removed kzg proof check from blob reconstructor.
|
||||
|
||||
### Fixed
|
||||
|
||||
@@ -93,6 +101,7 @@ The format is based on Keep a Changelog, and this project adheres to Semantic Ve
|
||||
- corrects nil check on some interface attestation types
|
||||
- temporary solution to handling electra attesation and attester_slashing events. [pr](14655)
|
||||
- Diverse log improvements and comment additions.
|
||||
- P2P: Avoid infinite loop when looking for peers in small networks.
|
||||
|
||||
|
||||
### Security
|
||||
|
||||
@@ -12,6 +12,7 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//api/client:go_default_library",
|
||||
"//api/server/structs:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//consensus-types:go_default_library",
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/api"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/client"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
@@ -176,7 +177,7 @@ func (c *Client) do(ctx context.Context, method string, path string, body io.Rea
|
||||
err = non200Err(r)
|
||||
return
|
||||
}
|
||||
res, err = io.ReadAll(r.Body)
|
||||
res, err = io.ReadAll(io.LimitReader(r.Body, client.MaxBodySize))
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, "error reading http response body from builder server")
|
||||
return
|
||||
@@ -358,7 +359,7 @@ func (c *Client) Status(ctx context.Context) error {
|
||||
}
|
||||
|
||||
func non200Err(response *http.Response) error {
|
||||
bodyBytes, err := io.ReadAll(response.Body)
|
||||
bodyBytes, err := io.ReadAll(io.LimitReader(response.Body, client.MaxErrBodySize))
|
||||
var errMessage ErrorMessage
|
||||
var body string
|
||||
if err != nil {
|
||||
|
||||
@@ -10,11 +10,17 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
MaxBodySize int64 = 1 << 23 // 8MB default, WithMaxBodySize can override
|
||||
MaxErrBodySize int64 = 1 << 17 // 128KB
|
||||
)
|
||||
|
||||
// Client is a wrapper object around the HTTP client.
|
||||
type Client struct {
|
||||
hc *http.Client
|
||||
baseURL *url.URL
|
||||
token string
|
||||
hc *http.Client
|
||||
baseURL *url.URL
|
||||
token string
|
||||
maxBodySize int64
|
||||
}
|
||||
|
||||
// NewClient constructs a new client with the provided options (ex WithTimeout).
|
||||
@@ -26,8 +32,9 @@ func NewClient(host string, opts ...ClientOpt) (*Client, error) {
|
||||
return nil, err
|
||||
}
|
||||
c := &Client{
|
||||
hc: &http.Client{},
|
||||
baseURL: u,
|
||||
hc: &http.Client{},
|
||||
baseURL: u,
|
||||
maxBodySize: MaxBodySize,
|
||||
}
|
||||
for _, o := range opts {
|
||||
o(c)
|
||||
@@ -72,7 +79,7 @@ func (c *Client) NodeURL() string {
|
||||
// Get is a generic, opinionated GET function to reduce boilerplate amongst the getters in this package.
|
||||
func (c *Client) Get(ctx context.Context, path string, opts ...ReqOption) ([]byte, error) {
|
||||
u := c.baseURL.ResolveReference(&url.URL{Path: path})
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), http.NoBody)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -89,7 +96,7 @@ func (c *Client) Get(ctx context.Context, path string, opts ...ReqOption) ([]byt
|
||||
if r.StatusCode != http.StatusOK {
|
||||
return nil, Non200Err(r)
|
||||
}
|
||||
b, err := io.ReadAll(r.Body)
|
||||
b, err := io.ReadAll(io.LimitReader(r.Body, c.maxBodySize))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error reading http response body")
|
||||
}
|
||||
|
||||
@@ -25,16 +25,16 @@ var ErrInvalidNodeVersion = errors.New("invalid node version response")
|
||||
var ErrConnectionIssue = errors.New("could not connect")
|
||||
|
||||
// Non200Err is a function that parses an HTTP response to handle responses that are not 200 with a formatted error.
|
||||
func Non200Err(response *http.Response) error {
|
||||
bodyBytes, err := io.ReadAll(response.Body)
|
||||
func Non200Err(r *http.Response) error {
|
||||
b, err := io.ReadAll(io.LimitReader(r.Body, MaxErrBodySize))
|
||||
var body string
|
||||
if err != nil {
|
||||
body = "(Unable to read response body.)"
|
||||
} else {
|
||||
body = "response body:\n" + string(bodyBytes)
|
||||
body = "response body:\n" + string(b)
|
||||
}
|
||||
msg := fmt.Sprintf("code=%d, url=%s, body=%s", response.StatusCode, response.Request.URL, body)
|
||||
switch response.StatusCode {
|
||||
msg := fmt.Sprintf("code=%d, url=%s, body=%s", r.StatusCode, r.Request.URL, body)
|
||||
switch r.StatusCode {
|
||||
case http.StatusNotFound:
|
||||
return errors.Wrap(ErrNotFound, msg)
|
||||
default:
|
||||
|
||||
@@ -46,3 +46,10 @@ func WithAuthenticationToken(token string) ClientOpt {
|
||||
c.token = token
|
||||
}
|
||||
}
|
||||
|
||||
// WithMaxBodySize overrides the default max body size of 8MB.
|
||||
func WithMaxBodySize(size int64) ClientOpt {
|
||||
return func(c *Client) {
|
||||
c.maxBodySize = size
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
package structs
|
||||
|
||||
type SidecarsResponse struct {
|
||||
Data []*Sidecar `json:"data"`
|
||||
Version string `json:"version"`
|
||||
Data []*Sidecar `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
}
|
||||
|
||||
type Sidecar struct {
|
||||
|
||||
@@ -623,13 +623,7 @@ func (s *Service) ReconstructBlobSidecars(ctx context.Context, block interfaces.
|
||||
continue
|
||||
}
|
||||
|
||||
// Verify the sidecar KZG proof
|
||||
v := s.blobVerifier(roBlob, verification.ELMemPoolRequirements)
|
||||
if err := v.SidecarKzgProofVerified(); err != nil {
|
||||
log.WithError(err).WithField("index", i).Error("failed to verify KZG proof for sidecar")
|
||||
continue
|
||||
}
|
||||
|
||||
verifiedBlob, err := v.VerifiedROBlob()
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("index", i).Error("failed to verify RO blob")
|
||||
|
||||
@@ -17,7 +17,6 @@ go_library(
|
||||
"handshake.go",
|
||||
"info.go",
|
||||
"interfaces.go",
|
||||
"iterator.go",
|
||||
"log.go",
|
||||
"message_id.go",
|
||||
"monitoring.go",
|
||||
@@ -164,12 +163,10 @@ go_test(
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/testing:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//crypto:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/discover:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
|
||||
@@ -225,11 +225,11 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
defer bootListener.Close()
|
||||
|
||||
// Use shorter period for testing.
|
||||
currentPeriod := pollingPeriod
|
||||
pollingPeriod = 1 * time.Second
|
||||
// Use smaller batch size for testing.
|
||||
currentBatchSize := batchSize
|
||||
batchSize = 2
|
||||
defer func() {
|
||||
pollingPeriod = currentPeriod
|
||||
batchSize = currentBatchSize
|
||||
}()
|
||||
|
||||
bootNode := bootListener.Self()
|
||||
|
||||
@@ -33,7 +33,7 @@ func (*Service) InterceptPeerDial(_ peer.ID) (allow bool) {
|
||||
// multiaddr for the given peer.
|
||||
func (s *Service) InterceptAddrDial(pid peer.ID, m multiaddr.Multiaddr) (allow bool) {
|
||||
// Disallow bad peers from dialing in.
|
||||
if s.peers.IsBad(pid) {
|
||||
if s.peers.IsBad(pid) != nil {
|
||||
return false
|
||||
}
|
||||
return filterConnections(s.addrFilter, m)
|
||||
|
||||
@@ -50,7 +50,7 @@ func TestPeer_AtMaxLimit(t *testing.T) {
|
||||
}()
|
||||
|
||||
for i := 0; i < highWatermarkBuffer; i++ {
|
||||
addPeer(t, s.peers, peers.PeerConnected, false)
|
||||
addPeer(t, s.peers, peers.Connected, false)
|
||||
}
|
||||
|
||||
// create alternate host
|
||||
@@ -159,7 +159,7 @@ func TestService_RejectInboundPeersBeyondLimit(t *testing.T) {
|
||||
inboundLimit += 1
|
||||
// Add in up to inbound peer limit.
|
||||
for i := 0; i < int(inboundLimit); i++ {
|
||||
addPeer(t, s.peers, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED), false)
|
||||
addPeer(t, s.peers, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED), false)
|
||||
}
|
||||
valid = s.InterceptAccept(&maEndpoints{raddr: multiAddress})
|
||||
if valid {
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
ecdsaprysm "github.com/prysmaticlabs/prysm/v5/crypto/ecdsa"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type ListenerRebooter interface {
|
||||
@@ -47,10 +48,12 @@ const (
|
||||
udp6
|
||||
)
|
||||
|
||||
const quickProtocolEnrKey = "quic"
|
||||
|
||||
type quicProtocol uint16
|
||||
|
||||
// quicProtocol is the "quic" key, which holds the QUIC port of the node.
|
||||
func (quicProtocol) ENRKey() string { return "quic" }
|
||||
func (quicProtocol) ENRKey() string { return quickProtocolEnrKey }
|
||||
|
||||
type listenerWrapper struct {
|
||||
mu sync.RWMutex
|
||||
@@ -133,68 +136,129 @@ func (l *listenerWrapper) RebootListener() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// RefreshENR uses an epoch to refresh the enr entry for our node
|
||||
// with the tracked committee ids for the epoch, allowing our node
|
||||
// to be dynamically discoverable by others given our tracked committee ids.
|
||||
func (s *Service) RefreshENR() {
|
||||
// return early if discv5 isn't running
|
||||
// RefreshPersistentSubnets checks that we are tracking our local persistent subnets for a variety of gossip topics.
|
||||
// This routine verifies and updates our attestation and sync committee subnets if they have been rotated.
|
||||
func (s *Service) RefreshPersistentSubnets() {
|
||||
// Return early if discv5 service isn't running.
|
||||
if s.dv5Listener == nil || !s.isInitialized() {
|
||||
return
|
||||
}
|
||||
currEpoch := slots.ToEpoch(slots.CurrentSlot(uint64(s.genesisTime.Unix())))
|
||||
if err := initializePersistentSubnets(s.dv5Listener.LocalNode().ID(), currEpoch); err != nil {
|
||||
|
||||
// Get the current epoch.
|
||||
currentSlot := slots.CurrentSlot(uint64(s.genesisTime.Unix()))
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
|
||||
// Get our node ID.
|
||||
nodeID := s.dv5Listener.LocalNode().ID()
|
||||
|
||||
// Get our node record.
|
||||
record := s.dv5Listener.Self().Record()
|
||||
|
||||
// Get the version of our metadata.
|
||||
metadataVersion := s.Metadata().Version()
|
||||
|
||||
// Initialize persistent subnets.
|
||||
if err := initializePersistentSubnets(nodeID, currentEpoch); err != nil {
|
||||
log.WithError(err).Error("Could not initialize persistent subnets")
|
||||
return
|
||||
}
|
||||
|
||||
// Get the current attestation subnet bitfield.
|
||||
bitV := bitfield.NewBitvector64()
|
||||
committees := cache.SubnetIDs.GetAllSubnets()
|
||||
for _, idx := range committees {
|
||||
attestationCommittees := cache.SubnetIDs.GetAllSubnets()
|
||||
for _, idx := range attestationCommittees {
|
||||
bitV.SetBitAt(idx, true)
|
||||
}
|
||||
currentBitV, err := attBitvector(s.dv5Listener.Self().Record())
|
||||
|
||||
// Get the attestation subnet bitfield we store in our record.
|
||||
inRecordBitV, err := attBitvector(record)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not retrieve att bitfield")
|
||||
return
|
||||
}
|
||||
|
||||
// Compare current epoch with our fork epochs
|
||||
// Get the attestation subnet bitfield in our metadata.
|
||||
inMetadataBitV := s.Metadata().AttnetsBitfield()
|
||||
|
||||
// Is our attestation bitvector record up to date?
|
||||
isBitVUpToDate := bytes.Equal(bitV, inRecordBitV) && bytes.Equal(bitV, inMetadataBitV)
|
||||
|
||||
// Compare current epoch with Altair fork epoch
|
||||
altairForkEpoch := params.BeaconConfig().AltairForkEpoch
|
||||
switch {
|
||||
case currEpoch < altairForkEpoch:
|
||||
|
||||
if currentEpoch < altairForkEpoch {
|
||||
// Phase 0 behaviour.
|
||||
if bytes.Equal(bitV, currentBitV) {
|
||||
// return early if bitfield hasn't changed
|
||||
if isBitVUpToDate {
|
||||
// Return early if bitfield hasn't changed.
|
||||
return
|
||||
}
|
||||
|
||||
// Some data changed. Update the record and the metadata.
|
||||
s.updateSubnetRecordWithMetadata(bitV)
|
||||
default:
|
||||
// Retrieve sync subnets from application level
|
||||
// cache.
|
||||
bitS := bitfield.Bitvector4{byte(0x00)}
|
||||
committees = cache.SyncSubnetIDs.GetAllSubnets(currEpoch)
|
||||
for _, idx := range committees {
|
||||
bitS.SetBitAt(idx, true)
|
||||
}
|
||||
currentBitS, err := syncBitvector(s.dv5Listener.Self().Record())
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not retrieve sync bitfield")
|
||||
return
|
||||
}
|
||||
if bytes.Equal(bitV, currentBitV) && bytes.Equal(bitS, currentBitS) &&
|
||||
s.Metadata().Version() == version.Altair {
|
||||
// return early if bitfields haven't changed
|
||||
return
|
||||
}
|
||||
s.updateSubnetRecordWithMetadataV2(bitV, bitS)
|
||||
|
||||
// Ping all peers.
|
||||
s.pingPeersAndLogEnr()
|
||||
|
||||
return
|
||||
}
|
||||
// ping all peers to inform them of new metadata
|
||||
s.pingPeers()
|
||||
|
||||
// Get the current sync subnet bitfield.
|
||||
bitS := bitfield.Bitvector4{byte(0x00)}
|
||||
syncCommittees := cache.SyncSubnetIDs.GetAllSubnets(currentEpoch)
|
||||
for _, idx := range syncCommittees {
|
||||
bitS.SetBitAt(idx, true)
|
||||
}
|
||||
|
||||
// Get the sync subnet bitfield we store in our record.
|
||||
inRecordBitS, err := syncBitvector(record)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not retrieve sync bitfield")
|
||||
return
|
||||
}
|
||||
|
||||
// Get the sync subnet bitfield in our metadata.
|
||||
currentBitSInMetadata := s.Metadata().SyncnetsBitfield()
|
||||
|
||||
// Is our sync bitvector record up to date?
|
||||
isBitSUpToDate := bytes.Equal(bitS, inRecordBitS) && bytes.Equal(bitS, currentBitSInMetadata)
|
||||
|
||||
if metadataVersion == version.Altair && isBitVUpToDate && isBitSUpToDate {
|
||||
// Nothing to do, return early.
|
||||
return
|
||||
}
|
||||
|
||||
// Some data have changed, update our record and metadata.
|
||||
s.updateSubnetRecordWithMetadataV2(bitV, bitS)
|
||||
|
||||
// Ping all peers to inform them of new metadata
|
||||
s.pingPeersAndLogEnr()
|
||||
}
|
||||
|
||||
// listen for new nodes watches for new nodes in the network and adds them to the peerstore.
|
||||
func (s *Service) listenForNewNodes() {
|
||||
iterator := filterNodes(s.ctx, s.dv5Listener.RandomNodes(), s.filterPeer)
|
||||
const (
|
||||
minLogInterval = 1 * time.Minute
|
||||
thresholdLimit = 5
|
||||
)
|
||||
|
||||
peersSummary := func(threshold uint) (uint, uint) {
|
||||
// Retrieve how many active peers we have.
|
||||
activePeers := s.Peers().Active()
|
||||
activePeerCount := uint(len(activePeers))
|
||||
|
||||
// Compute how many peers we are missing to reach the threshold.
|
||||
if activePeerCount >= threshold {
|
||||
return activePeerCount, 0
|
||||
}
|
||||
|
||||
missingPeerCount := threshold - activePeerCount
|
||||
|
||||
return activePeerCount, missingPeerCount
|
||||
}
|
||||
|
||||
var lastLogTime time.Time
|
||||
|
||||
iterator := s.dv5Listener.RandomNodes()
|
||||
defer iterator.Close()
|
||||
connectivityTicker := time.NewTicker(1 * time.Minute)
|
||||
thresholdCount := 0
|
||||
@@ -203,25 +267,31 @@ func (s *Service) listenForNewNodes() {
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
|
||||
case <-connectivityTicker.C:
|
||||
// Skip the connectivity check if not enabled.
|
||||
if !features.Get().EnableDiscoveryReboot {
|
||||
continue
|
||||
}
|
||||
|
||||
if !s.isBelowOutboundPeerThreshold() {
|
||||
// Reset counter if we are beyond the threshold
|
||||
thresholdCount = 0
|
||||
continue
|
||||
}
|
||||
|
||||
thresholdCount++
|
||||
|
||||
// Reboot listener if connectivity drops
|
||||
if thresholdCount > 5 {
|
||||
log.WithField("outboundConnectionCount", len(s.peers.OutboundConnected())).Warn("Rebooting discovery listener, reached threshold.")
|
||||
if thresholdCount > thresholdLimit {
|
||||
outBoundConnectedCount := len(s.peers.OutboundConnected())
|
||||
log.WithField("outboundConnectionCount", outBoundConnectedCount).Warn("Rebooting discovery listener, reached threshold.")
|
||||
if err := s.dv5Listener.RebootListener(); err != nil {
|
||||
log.WithError(err).Error("Could not reboot listener")
|
||||
continue
|
||||
}
|
||||
iterator = filterNodes(s.ctx, s.dv5Listener.RandomNodes(), s.filterPeer)
|
||||
|
||||
iterator = s.dv5Listener.RandomNodes()
|
||||
thresholdCount = 0
|
||||
}
|
||||
default:
|
||||
@@ -232,17 +302,35 @@ func (s *Service) listenForNewNodes() {
|
||||
time.Sleep(pollingPeriod)
|
||||
continue
|
||||
}
|
||||
wantedCount := s.wantedPeerDials()
|
||||
if wantedCount == 0 {
|
||||
|
||||
// Compute the number of new peers we want to dial.
|
||||
activePeerCount, missingPeerCount := peersSummary(s.cfg.MaxPeers)
|
||||
|
||||
fields := logrus.Fields{
|
||||
"currentPeerCount": activePeerCount,
|
||||
"targetPeerCount": s.cfg.MaxPeers,
|
||||
}
|
||||
|
||||
if missingPeerCount == 0 {
|
||||
log.Trace("Not looking for peers, at peer limit")
|
||||
time.Sleep(pollingPeriod)
|
||||
continue
|
||||
}
|
||||
|
||||
if time.Since(lastLogTime) > minLogInterval {
|
||||
lastLogTime = time.Now()
|
||||
log.WithFields(fields).Debug("Searching for new active peers")
|
||||
}
|
||||
|
||||
// Restrict dials if limit is applied.
|
||||
if flags.MaxDialIsActive() {
|
||||
wantedCount = min(wantedCount, flags.Get().MaxConcurrentDials)
|
||||
maxConcurrentDials := uint(flags.Get().MaxConcurrentDials)
|
||||
missingPeerCount = min(missingPeerCount, maxConcurrentDials)
|
||||
}
|
||||
wantedNodes := enode.ReadNodes(iterator, wantedCount)
|
||||
|
||||
// Search for new peers.
|
||||
wantedNodes := searchForPeers(iterator, batchSize, missingPeerCount, s.filterPeer)
|
||||
|
||||
wg := new(sync.WaitGroup)
|
||||
for i := 0; i < len(wantedNodes); i++ {
|
||||
node := wantedNodes[i]
|
||||
@@ -452,12 +540,14 @@ func (s *Service) filterPeer(node *enode.Node) bool {
|
||||
}
|
||||
|
||||
// Ignore bad nodes.
|
||||
if s.peers.IsBad(peerData.ID) {
|
||||
if s.peers.IsBad(peerData.ID) != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Ignore nodes that are already active.
|
||||
if s.peers.IsActive(peerData.ID) {
|
||||
// Constantly update enr for known peers
|
||||
s.peers.UpdateENR(node.Record(), peerData.ID)
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -526,17 +616,6 @@ func (s *Service) isBelowOutboundPeerThreshold() bool {
|
||||
return outBoundCount < outBoundThreshold
|
||||
}
|
||||
|
||||
func (s *Service) wantedPeerDials() int {
|
||||
maxPeers := int(s.cfg.MaxPeers)
|
||||
|
||||
activePeers := len(s.Peers().Active())
|
||||
wantedCount := 0
|
||||
if maxPeers > activePeers {
|
||||
wantedCount = maxPeers - activePeers
|
||||
}
|
||||
return wantedCount
|
||||
}
|
||||
|
||||
// PeersFromStringAddrs converts peer raw ENRs into multiaddrs for p2p.
|
||||
func PeersFromStringAddrs(addrs []string) ([]ma.Multiaddr, error) {
|
||||
var allAddrs []ma.Multiaddr
|
||||
|
||||
@@ -16,6 +16,8 @@ import (
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/libp2p/go-libp2p"
|
||||
"github.com/libp2p/go-libp2p/core/crypto"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
@@ -30,13 +32,12 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/wrapper"
|
||||
leakybucket "github.com/prysmaticlabs/prysm/v5/container/leaky-bucket"
|
||||
ecdsaprysm "github.com/prysmaticlabs/prysm/v5/crypto/ecdsa"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
prysmNetwork "github.com/prysmaticlabs/prysm/v5/network"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
@@ -131,6 +132,10 @@ func TestStartDiscV5_DiscoverAllPeers(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCreateLocalNode(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.Eip7594ForkEpoch = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
testCases := []struct {
|
||||
name string
|
||||
cfg *Config
|
||||
@@ -378,14 +383,14 @@ func TestInboundPeerLimit(t *testing.T) {
|
||||
}
|
||||
|
||||
for i := 0; i < 30; i++ {
|
||||
_ = addPeer(t, s.peers, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED), false)
|
||||
_ = addPeer(t, s.peers, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED), false)
|
||||
}
|
||||
|
||||
require.Equal(t, true, s.isPeerAtLimit(false), "not at limit for outbound peers")
|
||||
require.Equal(t, false, s.isPeerAtLimit(true), "at limit for inbound peers")
|
||||
|
||||
for i := 0; i < highWatermarkBuffer; i++ {
|
||||
_ = addPeer(t, s.peers, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED), false)
|
||||
_ = addPeer(t, s.peers, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED), false)
|
||||
}
|
||||
|
||||
require.Equal(t, true, s.isPeerAtLimit(true), "not at limit for inbound peers")
|
||||
@@ -404,13 +409,13 @@ func TestOutboundPeerThreshold(t *testing.T) {
|
||||
}
|
||||
|
||||
for i := 0; i < 2; i++ {
|
||||
_ = addPeer(t, s.peers, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED), true)
|
||||
_ = addPeer(t, s.peers, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED), true)
|
||||
}
|
||||
|
||||
require.Equal(t, true, s.isBelowOutboundPeerThreshold(), "not at outbound peer threshold")
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
_ = addPeer(t, s.peers, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED), true)
|
||||
_ = addPeer(t, s.peers, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED), true)
|
||||
}
|
||||
|
||||
require.Equal(t, false, s.isBelowOutboundPeerThreshold(), "still at outbound peer threshold")
|
||||
@@ -477,7 +482,7 @@ func TestCorrectUDPVersion(t *testing.T) {
|
||||
}
|
||||
|
||||
// addPeer is a helper to add a peer with a given connection state)
|
||||
func addPeer(t *testing.T, p *peers.Status, state peerdata.PeerConnectionState, outbound bool) peer.ID {
|
||||
func addPeer(t *testing.T, p *peers.Status, state peerdata.ConnectionState, outbound bool) peer.ID {
|
||||
// Set up some peers with different states
|
||||
mhBytes := []byte{0x11, 0x04}
|
||||
idBytes := make([]byte, 4)
|
||||
@@ -499,192 +504,270 @@ func addPeer(t *testing.T, p *peers.Status, state peerdata.PeerConnectionState,
|
||||
return id
|
||||
}
|
||||
|
||||
func TestRefreshENR_ForkBoundaries(t *testing.T) {
|
||||
func createAndConnectPeer(t *testing.T, p2pService *testp2p.TestP2P, offset int) {
|
||||
// Create the private key.
|
||||
privateKeyBytes := make([]byte, 32)
|
||||
for i := 0; i < 32; i++ {
|
||||
privateKeyBytes[i] = byte(offset + i)
|
||||
}
|
||||
|
||||
privateKey, err := crypto.UnmarshalSecp256k1PrivateKey(privateKeyBytes)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create the peer.
|
||||
peer := testp2p.NewTestP2P(t, libp2p.Identity(privateKey))
|
||||
|
||||
// Add the peer and connect it.
|
||||
p2pService.Peers().Add(&enr.Record{}, peer.PeerID(), nil, network.DirOutbound)
|
||||
p2pService.Peers().SetConnectionState(peer.PeerID(), peers.Connected)
|
||||
p2pService.Connect(peer)
|
||||
}
|
||||
|
||||
// Define the ping count.
|
||||
var actualPingCount int
|
||||
|
||||
type check struct {
|
||||
pingCount int
|
||||
metadataSequenceNumber uint64
|
||||
attestationSubnets []uint64
|
||||
syncSubnets []uint64
|
||||
custodySubnetCount *uint64
|
||||
}
|
||||
|
||||
func checkPingCountCacheMetadataRecord(
|
||||
t *testing.T,
|
||||
service *Service,
|
||||
expected check,
|
||||
) {
|
||||
// Check the ping count.
|
||||
require.Equal(t, expected.pingCount, actualPingCount)
|
||||
|
||||
// Check the attestation subnets in the cache.
|
||||
actualAttestationSubnets := cache.SubnetIDs.GetAllSubnets()
|
||||
require.DeepSSZEqual(t, expected.attestationSubnets, actualAttestationSubnets)
|
||||
|
||||
// Check the metadata sequence number.
|
||||
actualMetadataSequenceNumber := service.metaData.SequenceNumber()
|
||||
require.Equal(t, expected.metadataSequenceNumber, actualMetadataSequenceNumber)
|
||||
|
||||
// Compute expected attestation subnets bits.
|
||||
expectedBitV := bitfield.NewBitvector64()
|
||||
exists := false
|
||||
|
||||
for _, idx := range expected.attestationSubnets {
|
||||
exists = true
|
||||
expectedBitV.SetBitAt(idx, true)
|
||||
}
|
||||
|
||||
// Check attnets in ENR.
|
||||
var actualBitVENR bitfield.Bitvector64
|
||||
err := service.dv5Listener.LocalNode().Node().Record().Load(enr.WithEntry(attSubnetEnrKey, &actualBitVENR))
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, expectedBitV, actualBitVENR)
|
||||
|
||||
// Check attnets in metadata.
|
||||
if !exists {
|
||||
expectedBitV = nil
|
||||
}
|
||||
|
||||
actualBitVMetadata := service.metaData.AttnetsBitfield()
|
||||
require.DeepSSZEqual(t, expectedBitV, actualBitVMetadata)
|
||||
|
||||
if expected.syncSubnets != nil {
|
||||
// Compute expected sync subnets bits.
|
||||
expectedBitS := bitfield.NewBitvector4()
|
||||
exists = false
|
||||
|
||||
for _, idx := range expected.syncSubnets {
|
||||
exists = true
|
||||
expectedBitS.SetBitAt(idx, true)
|
||||
}
|
||||
|
||||
// Check syncnets in ENR.
|
||||
var actualBitSENR bitfield.Bitvector4
|
||||
err := service.dv5Listener.LocalNode().Node().Record().Load(enr.WithEntry(syncCommsSubnetEnrKey, &actualBitSENR))
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, expectedBitS, actualBitSENR)
|
||||
|
||||
// Check syncnets in metadata.
|
||||
if !exists {
|
||||
expectedBitS = nil
|
||||
}
|
||||
|
||||
actualBitSMetadata := service.metaData.SyncnetsBitfield()
|
||||
require.DeepSSZEqual(t, expectedBitS, actualBitSMetadata)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRefreshPersistentSubnets(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
|
||||
// Clean up caches after usage.
|
||||
defer cache.SubnetIDs.EmptyAllCaches()
|
||||
defer cache.SyncSubnetIDs.EmptyAllCaches()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
svcBuilder func(t *testing.T) *Service
|
||||
postValidation func(t *testing.T, s *Service)
|
||||
const (
|
||||
altairForkEpoch = 5
|
||||
eip7594ForkEpoch = 10
|
||||
)
|
||||
|
||||
// Set up epochs.
|
||||
defaultCfg := params.BeaconConfig()
|
||||
cfg := defaultCfg.Copy()
|
||||
cfg.AltairForkEpoch = altairForkEpoch
|
||||
cfg.Eip7594ForkEpoch = eip7594ForkEpoch
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Compute the number of seconds per epoch.
|
||||
secondsPerSlot := params.BeaconConfig().SecondsPerSlot
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
secondsPerEpoch := secondsPerSlot * uint64(slotsPerEpoch)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
epochSinceGenesis uint64
|
||||
checks []check
|
||||
}{
|
||||
{
|
||||
name: "metadata no change",
|
||||
svcBuilder: func(t *testing.T) *Service {
|
||||
port := 2000
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
s := &Service{
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
cfg: &Config{UDPPort: uint(port)},
|
||||
}
|
||||
createListener := func() (*discover.UDPv5, error) {
|
||||
return s.createListener(ipAddr, pkey)
|
||||
}
|
||||
listener, err := newListener(createListener)
|
||||
assert.NoError(t, err)
|
||||
s.dv5Listener = listener
|
||||
s.metaData = wrapper.WrappedMetadataV0(new(ethpb.MetaDataV0))
|
||||
s.updateSubnetRecordWithMetadata([]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00})
|
||||
return s
|
||||
},
|
||||
postValidation: func(t *testing.T, s *Service) {
|
||||
currEpoch := slots.ToEpoch(slots.CurrentSlot(uint64(s.genesisTime.Unix())))
|
||||
subs, err := computeSubscribedSubnets(s.dv5Listener.LocalNode().ID(), currEpoch)
|
||||
assert.NoError(t, err)
|
||||
|
||||
bitV := bitfield.NewBitvector64()
|
||||
for _, idx := range subs {
|
||||
bitV.SetBitAt(idx, true)
|
||||
}
|
||||
assert.DeepEqual(t, bitV, s.metaData.AttnetsBitfield())
|
||||
name: "Phase0",
|
||||
epochSinceGenesis: 0,
|
||||
checks: []check{
|
||||
{
|
||||
pingCount: 0,
|
||||
metadataSequenceNumber: 0,
|
||||
attestationSubnets: []uint64{},
|
||||
},
|
||||
{
|
||||
pingCount: 1,
|
||||
metadataSequenceNumber: 1,
|
||||
attestationSubnets: []uint64{40, 41},
|
||||
},
|
||||
{
|
||||
pingCount: 1,
|
||||
metadataSequenceNumber: 1,
|
||||
attestationSubnets: []uint64{40, 41},
|
||||
},
|
||||
{
|
||||
pingCount: 1,
|
||||
metadataSequenceNumber: 1,
|
||||
attestationSubnets: []uint64{40, 41},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "metadata updated",
|
||||
svcBuilder: func(t *testing.T) *Service {
|
||||
port := 2000
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
s := &Service{
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
cfg: &Config{UDPPort: uint(port)},
|
||||
}
|
||||
createListener := func() (*discover.UDPv5, error) {
|
||||
return s.createListener(ipAddr, pkey)
|
||||
}
|
||||
listener, err := newListener(createListener)
|
||||
assert.NoError(t, err)
|
||||
s.dv5Listener = listener
|
||||
s.metaData = wrapper.WrappedMetadataV0(new(ethpb.MetaDataV0))
|
||||
s.updateSubnetRecordWithMetadata([]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01})
|
||||
cache.SubnetIDs.AddPersistentCommittee([]uint64{1, 2, 3, 23}, 0)
|
||||
return s
|
||||
},
|
||||
postValidation: func(t *testing.T, s *Service) {
|
||||
assert.DeepEqual(t, bitfield.Bitvector64{0xe, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0}, s.metaData.AttnetsBitfield())
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "metadata updated at fork epoch",
|
||||
svcBuilder: func(t *testing.T) *Service {
|
||||
port := 2000
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
s := &Service{
|
||||
genesisTime: time.Now().Add(-5 * oneEpochDuration()),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
cfg: &Config{UDPPort: uint(port)},
|
||||
}
|
||||
createListener := func() (*discover.UDPv5, error) {
|
||||
return s.createListener(ipAddr, pkey)
|
||||
}
|
||||
listener, err := newListener(createListener)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Update params
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.AltairForkEpoch = 5
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
|
||||
s.dv5Listener = listener
|
||||
s.metaData = wrapper.WrappedMetadataV0(new(ethpb.MetaDataV0))
|
||||
s.updateSubnetRecordWithMetadata([]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01})
|
||||
cache.SubnetIDs.AddPersistentCommittee([]uint64{1, 2, 3, 23}, 0)
|
||||
return s
|
||||
},
|
||||
postValidation: func(t *testing.T, s *Service) {
|
||||
assert.Equal(t, version.Altair, s.metaData.Version())
|
||||
assert.DeepEqual(t, bitfield.Bitvector4{0x00}, s.metaData.MetadataObjV1().Syncnets)
|
||||
assert.DeepEqual(t, bitfield.Bitvector64{0xe, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0}, s.metaData.AttnetsBitfield())
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "metadata updated at fork epoch with no bitfield",
|
||||
svcBuilder: func(t *testing.T) *Service {
|
||||
port := 2000
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
s := &Service{
|
||||
genesisTime: time.Now().Add(-5 * oneEpochDuration()),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
cfg: &Config{UDPPort: uint(port)},
|
||||
}
|
||||
createListener := func() (*discover.UDPv5, error) {
|
||||
return s.createListener(ipAddr, pkey)
|
||||
}
|
||||
listener, err := newListener(createListener)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Update params
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.AltairForkEpoch = 5
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
|
||||
s.dv5Listener = listener
|
||||
s.metaData = wrapper.WrappedMetadataV0(new(ethpb.MetaDataV0))
|
||||
s.updateSubnetRecordWithMetadata([]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00})
|
||||
return s
|
||||
},
|
||||
postValidation: func(t *testing.T, s *Service) {
|
||||
assert.Equal(t, version.Altair, s.metaData.Version())
|
||||
assert.DeepEqual(t, bitfield.Bitvector4{0x00}, s.metaData.MetadataObjV1().Syncnets)
|
||||
currEpoch := slots.ToEpoch(slots.CurrentSlot(uint64(s.genesisTime.Unix())))
|
||||
subs, err := computeSubscribedSubnets(s.dv5Listener.LocalNode().ID(), currEpoch)
|
||||
assert.NoError(t, err)
|
||||
|
||||
bitV := bitfield.NewBitvector64()
|
||||
for _, idx := range subs {
|
||||
bitV.SetBitAt(idx, true)
|
||||
}
|
||||
assert.DeepEqual(t, bitV, s.metaData.AttnetsBitfield())
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "metadata updated past fork epoch with bitfields",
|
||||
svcBuilder: func(t *testing.T) *Service {
|
||||
port := 2000
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
s := &Service{
|
||||
genesisTime: time.Now().Add(-6 * oneEpochDuration()),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
cfg: &Config{UDPPort: uint(port)},
|
||||
}
|
||||
createListener := func() (*discover.UDPv5, error) {
|
||||
return s.createListener(ipAddr, pkey)
|
||||
}
|
||||
listener, err := newListener(createListener)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Update params
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.AltairForkEpoch = 5
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
|
||||
s.dv5Listener = listener
|
||||
s.metaData = wrapper.WrappedMetadataV0(new(ethpb.MetaDataV0))
|
||||
s.updateSubnetRecordWithMetadata([]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00})
|
||||
cache.SubnetIDs.AddPersistentCommittee([]uint64{1, 2, 3, 23}, 0)
|
||||
cache.SyncSubnetIDs.AddSyncCommitteeSubnets([]byte{'A'}, 0, []uint64{0, 1}, 0)
|
||||
return s
|
||||
},
|
||||
postValidation: func(t *testing.T, s *Service) {
|
||||
assert.Equal(t, version.Altair, s.metaData.Version())
|
||||
assert.DeepEqual(t, bitfield.Bitvector4{0x03}, s.metaData.MetadataObjV1().Syncnets)
|
||||
assert.DeepEqual(t, bitfield.Bitvector64{0xe, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0}, s.metaData.AttnetsBitfield())
|
||||
name: "Altair",
|
||||
epochSinceGenesis: altairForkEpoch,
|
||||
checks: []check{
|
||||
{
|
||||
pingCount: 0,
|
||||
metadataSequenceNumber: 0,
|
||||
attestationSubnets: []uint64{},
|
||||
syncSubnets: nil,
|
||||
},
|
||||
{
|
||||
pingCount: 1,
|
||||
metadataSequenceNumber: 1,
|
||||
attestationSubnets: []uint64{40, 41},
|
||||
syncSubnets: nil,
|
||||
},
|
||||
{
|
||||
pingCount: 2,
|
||||
metadataSequenceNumber: 2,
|
||||
attestationSubnets: []uint64{40, 41},
|
||||
syncSubnets: []uint64{1, 2},
|
||||
},
|
||||
{
|
||||
pingCount: 2,
|
||||
metadataSequenceNumber: 2,
|
||||
attestationSubnets: []uint64{40, 41},
|
||||
syncSubnets: []uint64{1, 2},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s := tt.svcBuilder(t)
|
||||
s.RefreshENR()
|
||||
tt.postValidation(t, s)
|
||||
s.dv5Listener.Close()
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
const peerOffset = 1
|
||||
|
||||
// Initialize the ping count.
|
||||
actualPingCount = 0
|
||||
|
||||
// Create the private key.
|
||||
privateKeyBytes := make([]byte, 32)
|
||||
for i := 0; i < 32; i++ {
|
||||
privateKeyBytes[i] = byte(i)
|
||||
}
|
||||
|
||||
unmarshalledPrivateKey, err := crypto.UnmarshalSecp256k1PrivateKey(privateKeyBytes)
|
||||
require.NoError(t, err)
|
||||
|
||||
privateKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(unmarshalledPrivateKey)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a p2p service.
|
||||
p2p := testp2p.NewTestP2P(t)
|
||||
|
||||
// Create and connect a peer.
|
||||
createAndConnectPeer(t, p2p, peerOffset)
|
||||
|
||||
// Create a service.
|
||||
service := &Service{
|
||||
pingMethod: func(_ context.Context, _ peer.ID) error {
|
||||
actualPingCount++
|
||||
return nil
|
||||
},
|
||||
cfg: &Config{UDPPort: 2000},
|
||||
peers: p2p.Peers(),
|
||||
genesisTime: time.Now().Add(-time.Duration(tc.epochSinceGenesis*secondsPerEpoch) * time.Second),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
}
|
||||
|
||||
// Set the listener and the metadata.
|
||||
createListener := func() (*discover.UDPv5, error) {
|
||||
return service.createListener(nil, privateKey)
|
||||
}
|
||||
|
||||
listener, err := newListener(createListener)
|
||||
require.NoError(t, err)
|
||||
|
||||
service.dv5Listener = listener
|
||||
service.metaData = wrapper.WrappedMetadataV0(new(ethpb.MetaDataV0))
|
||||
|
||||
// Run a check.
|
||||
checkPingCountCacheMetadataRecord(t, service, tc.checks[0])
|
||||
|
||||
// Refresh the persistent subnets.
|
||||
service.RefreshPersistentSubnets()
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Run a check.
|
||||
checkPingCountCacheMetadataRecord(t, service, tc.checks[1])
|
||||
|
||||
// Add a sync committee subnet.
|
||||
cache.SyncSubnetIDs.AddSyncCommitteeSubnets([]byte{'a'}, altairForkEpoch, []uint64{1, 2}, 1*time.Hour)
|
||||
|
||||
// Refresh the persistent subnets.
|
||||
service.RefreshPersistentSubnets()
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Run a check.
|
||||
checkPingCountCacheMetadataRecord(t, service, tc.checks[2])
|
||||
|
||||
// Refresh the persistent subnets.
|
||||
service.RefreshPersistentSubnets()
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Run a check.
|
||||
checkPingCountCacheMetadataRecord(t, service, tc.checks[3])
|
||||
|
||||
// Clean the test.
|
||||
service.dv5Listener.Close()
|
||||
cache.SubnetIDs.EmptyAllCaches()
|
||||
cache.SyncSubnetIDs.EmptyAllCaches()
|
||||
})
|
||||
}
|
||||
|
||||
// Reset the config.
|
||||
params.OverrideBeaconConfig(defaultCfg)
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package p2p
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
@@ -10,6 +9,7 @@ import (
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/peerdata"
|
||||
prysmTime "github.com/prysmaticlabs/prysm/v5/time"
|
||||
@@ -25,6 +25,46 @@ func peerMultiaddrString(conn network.Conn) string {
|
||||
return fmt.Sprintf("%s/p2p/%s", conn.RemoteMultiaddr().String(), conn.RemotePeer().String())
|
||||
}
|
||||
|
||||
func (s *Service) connectToPeer(conn network.Conn) {
|
||||
s.peers.SetConnectionState(conn.RemotePeer(), peers.Connected)
|
||||
// Go through the handshake process.
|
||||
log.WithFields(logrus.Fields{
|
||||
"direction": conn.Stat().Direction.String(),
|
||||
"multiAddr": peerMultiaddrString(conn),
|
||||
"activePeers": len(s.peers.Active()),
|
||||
}).Debug("Initiate peer connection")
|
||||
}
|
||||
|
||||
func (s *Service) disconnectFromPeerOnError(
|
||||
conn network.Conn,
|
||||
goodByeFunc func(ctx context.Context, id peer.ID) error,
|
||||
badPeerErr error,
|
||||
) {
|
||||
// Get the remote peer ID.
|
||||
remotePeerID := conn.RemotePeer()
|
||||
|
||||
// Set the peer to disconnecting state.
|
||||
s.peers.SetConnectionState(remotePeerID, peers.Disconnecting)
|
||||
|
||||
// Only attempt a goodbye if we are still connected to the peer.
|
||||
if s.host.Network().Connectedness(remotePeerID) == network.Connected {
|
||||
if err := goodByeFunc(context.TODO(), remotePeerID); err != nil {
|
||||
log.WithError(err).Error("Unable to disconnect from peer")
|
||||
}
|
||||
}
|
||||
|
||||
log.
|
||||
WithError(badPeerErr).
|
||||
WithFields(logrus.Fields{
|
||||
"multiaddr": peerMultiaddrString(conn),
|
||||
"direction": conn.Stat().Direction.String(),
|
||||
"remainingActivePeers": len(s.peers.Active()),
|
||||
}).
|
||||
Debug("Initiate peer disconnection")
|
||||
|
||||
s.peers.SetConnectionState(remotePeerID, peers.Disconnected)
|
||||
}
|
||||
|
||||
// AddConnectionHandler adds a callback function which handles the connection with a
|
||||
// newly added peer. It performs a handshake with that peer by sending a hello request
|
||||
// and validating the response from the peer.
|
||||
@@ -57,18 +97,9 @@ func (s *Service) AddConnectionHandler(reqFunc, goodByeFunc func(ctx context.Con
|
||||
}
|
||||
|
||||
s.host.Network().Notify(&network.NotifyBundle{
|
||||
ConnectedF: func(net network.Network, conn network.Conn) {
|
||||
ConnectedF: func(_ network.Network, conn network.Conn) {
|
||||
remotePeer := conn.RemotePeer()
|
||||
disconnectFromPeer := func() {
|
||||
s.peers.SetConnectionState(remotePeer, peers.PeerDisconnecting)
|
||||
// Only attempt a goodbye if we are still connected to the peer.
|
||||
if s.host.Network().Connectedness(remotePeer) == network.Connected {
|
||||
if err := goodByeFunc(context.TODO(), remotePeer); err != nil {
|
||||
log.WithError(err).Error("Unable to disconnect from peer")
|
||||
}
|
||||
}
|
||||
s.peers.SetConnectionState(remotePeer, peers.PeerDisconnected)
|
||||
}
|
||||
|
||||
// Connection handler must be non-blocking as part of libp2p design.
|
||||
go func() {
|
||||
if peerHandshaking(remotePeer) {
|
||||
@@ -77,28 +108,21 @@ func (s *Service) AddConnectionHandler(reqFunc, goodByeFunc func(ctx context.Con
|
||||
return
|
||||
}
|
||||
defer peerFinished(remotePeer)
|
||||
|
||||
// Handle the various pre-existing conditions that will result in us not handshaking.
|
||||
peerConnectionState, err := s.peers.ConnectionState(remotePeer)
|
||||
if err == nil && (peerConnectionState == peers.PeerConnected || peerConnectionState == peers.PeerConnecting) {
|
||||
if err == nil && (peerConnectionState == peers.Connected || peerConnectionState == peers.Connecting) {
|
||||
log.WithField("currentState", peerConnectionState).WithField("reason", "already active").Trace("Ignoring connection request")
|
||||
return
|
||||
}
|
||||
|
||||
s.peers.Add(nil /* ENR */, remotePeer, conn.RemoteMultiaddr(), conn.Stat().Direction)
|
||||
|
||||
// Defensive check in the event we still get a bad peer.
|
||||
if s.peers.IsBad(remotePeer) {
|
||||
log.WithField("reason", "bad peer").Trace("Ignoring connection request")
|
||||
disconnectFromPeer()
|
||||
if err := s.peers.IsBad(remotePeer); err != nil {
|
||||
s.disconnectFromPeerOnError(conn, goodByeFunc, err)
|
||||
return
|
||||
}
|
||||
validPeerConnection := func() {
|
||||
s.peers.SetConnectionState(conn.RemotePeer(), peers.PeerConnected)
|
||||
// Go through the handshake process.
|
||||
log.WithFields(logrus.Fields{
|
||||
"direction": conn.Stat().Direction,
|
||||
"multiAddr": peerMultiaddrString(conn),
|
||||
"activePeers": len(s.peers.Active()),
|
||||
}).Debug("Peer connected")
|
||||
}
|
||||
|
||||
// Do not perform handshake on inbound dials.
|
||||
if conn.Stat().Direction == network.DirInbound {
|
||||
@@ -117,63 +141,80 @@ func (s *Service) AddConnectionHandler(reqFunc, goodByeFunc func(ctx context.Con
|
||||
// If peer hasn't sent a status request, we disconnect with them
|
||||
if _, err := s.peers.ChainState(remotePeer); errors.Is(err, peerdata.ErrPeerUnknown) || errors.Is(err, peerdata.ErrNoPeerStatus) {
|
||||
statusMessageMissing.Inc()
|
||||
disconnectFromPeer()
|
||||
s.disconnectFromPeerOnError(conn, goodByeFunc, errors.Wrap(err, "chain state"))
|
||||
return
|
||||
}
|
||||
|
||||
if peerExists {
|
||||
updated, err := s.peers.ChainStateLastUpdated(remotePeer)
|
||||
if err != nil {
|
||||
disconnectFromPeer()
|
||||
s.disconnectFromPeerOnError(conn, goodByeFunc, errors.Wrap(err, "chain state last updated"))
|
||||
return
|
||||
}
|
||||
// exit if we don't receive any current status messages from
|
||||
// peer.
|
||||
if updated.IsZero() || !updated.After(currentTime) {
|
||||
disconnectFromPeer()
|
||||
|
||||
// Exit if we don't receive any current status messages from peer.
|
||||
if updated.IsZero() {
|
||||
s.disconnectFromPeerOnError(conn, goodByeFunc, errors.New("is zero"))
|
||||
return
|
||||
}
|
||||
|
||||
if updated.Before(currentTime) {
|
||||
s.disconnectFromPeerOnError(conn, goodByeFunc, errors.New("did not update"))
|
||||
return
|
||||
}
|
||||
}
|
||||
validPeerConnection()
|
||||
|
||||
s.connectToPeer(conn)
|
||||
return
|
||||
}
|
||||
|
||||
s.peers.SetConnectionState(conn.RemotePeer(), peers.PeerConnecting)
|
||||
s.peers.SetConnectionState(conn.RemotePeer(), peers.Connecting)
|
||||
if err := reqFunc(context.TODO(), conn.RemotePeer()); err != nil && !errors.Is(err, io.EOF) {
|
||||
log.WithError(err).Trace("Handshake failed")
|
||||
disconnectFromPeer()
|
||||
s.disconnectFromPeerOnError(conn, goodByeFunc, err)
|
||||
return
|
||||
}
|
||||
validPeerConnection()
|
||||
|
||||
s.connectToPeer(conn)
|
||||
}()
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// AddDisconnectionHandler disconnects from peers. It handles updating the peer status.
|
||||
// AddDisconnectionHandler disconnects from peers. It handles updating the peer status.
|
||||
// This also calls the handler responsible for maintaining other parts of the sync or p2p system.
|
||||
func (s *Service) AddDisconnectionHandler(handler func(ctx context.Context, id peer.ID) error) {
|
||||
s.host.Network().Notify(&network.NotifyBundle{
|
||||
DisconnectedF: func(net network.Network, conn network.Conn) {
|
||||
log := log.WithField("multiAddr", peerMultiaddrString(conn))
|
||||
peerID := conn.RemotePeer()
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"multiAddr": peerMultiaddrString(conn),
|
||||
"direction": conn.Stat().Direction.String(),
|
||||
})
|
||||
// Must be handled in a goroutine as this callback cannot be blocking.
|
||||
go func() {
|
||||
// Exit early if we are still connected to the peer.
|
||||
if net.Connectedness(conn.RemotePeer()) == network.Connected {
|
||||
if net.Connectedness(peerID) == network.Connected {
|
||||
return
|
||||
}
|
||||
priorState, err := s.peers.ConnectionState(conn.RemotePeer())
|
||||
|
||||
priorState, err := s.peers.ConnectionState(peerID)
|
||||
if err != nil {
|
||||
// Can happen if the peer has already disconnected, so...
|
||||
priorState = peers.PeerDisconnected
|
||||
priorState = peers.Disconnected
|
||||
}
|
||||
s.peers.SetConnectionState(conn.RemotePeer(), peers.PeerDisconnecting)
|
||||
|
||||
s.peers.SetConnectionState(peerID, peers.Disconnecting)
|
||||
if err := handler(context.TODO(), conn.RemotePeer()); err != nil {
|
||||
log.WithError(err).Error("Disconnect handler failed")
|
||||
}
|
||||
s.peers.SetConnectionState(conn.RemotePeer(), peers.PeerDisconnected)
|
||||
|
||||
s.peers.SetConnectionState(peerID, peers.Disconnected)
|
||||
|
||||
// Only log disconnections if we were fully connected.
|
||||
if priorState == peers.PeerConnected {
|
||||
log.WithField("activePeers", len(s.peers.Active())).Debug("Peer disconnected")
|
||||
if priorState == peers.Connected {
|
||||
activePeersCount := len(s.peers.Active())
|
||||
log.WithField("remainingActivePeers", activePeersCount).Debug("Peer disconnected")
|
||||
}
|
||||
}()
|
||||
},
|
||||
|
||||
@@ -82,7 +82,7 @@ type PeerManager interface {
|
||||
Host() host.Host
|
||||
ENR() *enr.Record
|
||||
DiscoveryAddresses() ([]multiaddr.Multiaddr, error)
|
||||
RefreshENR()
|
||||
RefreshPersistentSubnets()
|
||||
FindPeersWithSubnet(ctx context.Context, topic string, subIndex uint64, threshold int) (bool, error)
|
||||
AddPingMethod(reqFunc func(ctx context.Context, id peer.ID) error)
|
||||
}
|
||||
|
||||
@@ -1,36 +0,0 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
)
|
||||
|
||||
// filterNodes wraps an iterator such that Next only returns nodes for which
|
||||
// the 'check' function returns true. This custom implementation also
|
||||
// checks for context deadlines so that in the event the parent context has
|
||||
// expired, we do exit from the search rather than perform more network
|
||||
// lookups for additional peers.
|
||||
func filterNodes(ctx context.Context, it enode.Iterator, check func(*enode.Node) bool) enode.Iterator {
|
||||
return &filterIter{ctx, it, check}
|
||||
}
|
||||
|
||||
type filterIter struct {
|
||||
context.Context
|
||||
enode.Iterator
|
||||
check func(*enode.Node) bool
|
||||
}
|
||||
|
||||
// Next looks up for the next valid node according to our
|
||||
// filter criteria.
|
||||
func (f *filterIter) Next() bool {
|
||||
for f.Iterator.Next() {
|
||||
if f.Context.Err() != nil {
|
||||
return false
|
||||
}
|
||||
if f.check(f.Node()) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -23,8 +23,8 @@ var (
|
||||
ErrNoPeerStatus = errors.New("no chain status for peer")
|
||||
)
|
||||
|
||||
// PeerConnectionState is the state of the connection.
|
||||
type PeerConnectionState ethpb.ConnectionState
|
||||
// ConnectionState is the state of the connection.
|
||||
type ConnectionState ethpb.ConnectionState
|
||||
|
||||
// StoreConfig holds peer store parameters.
|
||||
type StoreConfig struct {
|
||||
@@ -49,7 +49,7 @@ type PeerData struct {
|
||||
// Network related data.
|
||||
Address ma.Multiaddr
|
||||
Direction network.Direction
|
||||
ConnState PeerConnectionState
|
||||
ConnState ConnectionState
|
||||
Enr *enr.Record
|
||||
NextValidTime time.Time
|
||||
// Chain related data.
|
||||
|
||||
@@ -20,6 +20,7 @@ go_library(
|
||||
"//crypto/rand:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/peerdata"
|
||||
)
|
||||
|
||||
@@ -61,7 +62,7 @@ func (s *BadResponsesScorer) Score(pid peer.ID) float64 {
|
||||
|
||||
// scoreNoLock is a lock-free version of Score.
|
||||
func (s *BadResponsesScorer) scoreNoLock(pid peer.ID) float64 {
|
||||
if s.isBadPeerNoLock(pid) {
|
||||
if s.isBadPeerNoLock(pid) != nil {
|
||||
return BadPeerScore
|
||||
}
|
||||
score := float64(0)
|
||||
@@ -116,18 +117,24 @@ func (s *BadResponsesScorer) Increment(pid peer.ID) {
|
||||
|
||||
// IsBadPeer states if the peer is to be considered bad.
|
||||
// If the peer is unknown this will return `false`, which makes using this function easier than returning an error.
|
||||
func (s *BadResponsesScorer) IsBadPeer(pid peer.ID) bool {
|
||||
func (s *BadResponsesScorer) IsBadPeer(pid peer.ID) error {
|
||||
s.store.RLock()
|
||||
defer s.store.RUnlock()
|
||||
|
||||
return s.isBadPeerNoLock(pid)
|
||||
}
|
||||
|
||||
// isBadPeerNoLock is lock-free version of IsBadPeer.
|
||||
func (s *BadResponsesScorer) isBadPeerNoLock(pid peer.ID) bool {
|
||||
func (s *BadResponsesScorer) isBadPeerNoLock(pid peer.ID) error {
|
||||
if peerData, ok := s.store.PeerData(pid); ok {
|
||||
return peerData.BadResponses >= s.config.Threshold
|
||||
if peerData.BadResponses >= s.config.Threshold {
|
||||
return errors.Errorf("peer exceeded bad responses threshold: got %d, threshold %d", peerData.BadResponses, s.config.Threshold)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
return false
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// BadPeers returns the peers that are considered bad.
|
||||
@@ -137,7 +144,7 @@ func (s *BadResponsesScorer) BadPeers() []peer.ID {
|
||||
|
||||
badPeers := make([]peer.ID, 0)
|
||||
for pid := range s.store.Peers() {
|
||||
if s.isBadPeerNoLock(pid) {
|
||||
if s.isBadPeerNoLock(pid) != nil {
|
||||
badPeers = append(badPeers, pid)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -33,19 +33,19 @@ func TestScorers_BadResponses_Score(t *testing.T) {
|
||||
assert.Equal(t, 0., scorer.Score(pid), "Unexpected score for unregistered peer")
|
||||
|
||||
scorer.Increment(pid)
|
||||
assert.Equal(t, false, scorer.IsBadPeer(pid))
|
||||
assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, -2.5, scorer.Score(pid))
|
||||
|
||||
scorer.Increment(pid)
|
||||
assert.Equal(t, false, scorer.IsBadPeer(pid))
|
||||
assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, float64(-5), scorer.Score(pid))
|
||||
|
||||
scorer.Increment(pid)
|
||||
assert.Equal(t, false, scorer.IsBadPeer(pid))
|
||||
assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, float64(-7.5), scorer.Score(pid))
|
||||
|
||||
scorer.Increment(pid)
|
||||
assert.Equal(t, true, scorer.IsBadPeer(pid))
|
||||
assert.NotNil(t, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, -100.0, scorer.Score(pid))
|
||||
}
|
||||
|
||||
@@ -152,17 +152,17 @@ func TestScorers_BadResponses_IsBadPeer(t *testing.T) {
|
||||
})
|
||||
scorer := peerStatuses.Scorers().BadResponsesScorer()
|
||||
pid := peer.ID("peer1")
|
||||
assert.Equal(t, false, scorer.IsBadPeer(pid))
|
||||
assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
|
||||
peerStatuses.Add(nil, pid, nil, network.DirUnknown)
|
||||
assert.Equal(t, false, scorer.IsBadPeer(pid))
|
||||
assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
|
||||
for i := 0; i < scorers.DefaultBadResponsesThreshold; i++ {
|
||||
scorer.Increment(pid)
|
||||
if i == scorers.DefaultBadResponsesThreshold-1 {
|
||||
assert.Equal(t, true, scorer.IsBadPeer(pid), "Unexpected peer status")
|
||||
assert.NotNil(t, scorer.IsBadPeer(pid), "Unexpected peer status")
|
||||
} else {
|
||||
assert.Equal(t, false, scorer.IsBadPeer(pid), "Unexpected peer status")
|
||||
assert.NoError(t, scorer.IsBadPeer(pid), "Unexpected peer status")
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -185,11 +185,11 @@ func TestScorers_BadResponses_BadPeers(t *testing.T) {
|
||||
scorer.Increment(pids[2])
|
||||
scorer.Increment(pids[4])
|
||||
}
|
||||
assert.Equal(t, false, scorer.IsBadPeer(pids[0]), "Invalid peer status")
|
||||
assert.Equal(t, true, scorer.IsBadPeer(pids[1]), "Invalid peer status")
|
||||
assert.Equal(t, true, scorer.IsBadPeer(pids[2]), "Invalid peer status")
|
||||
assert.Equal(t, false, scorer.IsBadPeer(pids[3]), "Invalid peer status")
|
||||
assert.Equal(t, true, scorer.IsBadPeer(pids[4]), "Invalid peer status")
|
||||
assert.NoError(t, scorer.IsBadPeer(pids[0]), "Invalid peer status")
|
||||
assert.NotNil(t, scorer.IsBadPeer(pids[1]), "Invalid peer status")
|
||||
assert.NotNil(t, scorer.IsBadPeer(pids[2]), "Invalid peer status")
|
||||
assert.NoError(t, scorer.IsBadPeer(pids[3]), "Invalid peer status")
|
||||
assert.NotNil(t, scorer.IsBadPeer(pids[4]), "Invalid peer status")
|
||||
want := []peer.ID{pids[1], pids[2], pids[4]}
|
||||
badPeers := scorer.BadPeers()
|
||||
sort.Slice(badPeers, func(i, j int) bool {
|
||||
|
||||
@@ -177,8 +177,8 @@ func (s *BlockProviderScorer) processedBlocksNoLock(pid peer.ID) uint64 {
|
||||
// Block provider scorer cannot guarantee that lower score of a peer is indeed a sign of a bad peer.
|
||||
// Therefore this scorer never marks peers as bad, and relies on scores to probabilistically sort
|
||||
// out low-scorers (see WeightSorted method).
|
||||
func (*BlockProviderScorer) IsBadPeer(_ peer.ID) bool {
|
||||
return false
|
||||
func (*BlockProviderScorer) IsBadPeer(_ peer.ID) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// BadPeers returns the peers that are considered bad.
|
||||
|
||||
@@ -119,7 +119,7 @@ func TestScorers_BlockProvider_Score(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Run(tt.name, func(*testing.T) {
|
||||
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
@@ -224,7 +224,7 @@ func TestScorers_BlockProvider_Sorted(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "no peers",
|
||||
update: func(s *scorers.BlockProviderScorer) {},
|
||||
update: func(*scorers.BlockProviderScorer) {},
|
||||
have: []peer.ID{},
|
||||
want: []peer.ID{},
|
||||
},
|
||||
@@ -451,7 +451,7 @@ func TestScorers_BlockProvider_FormatScorePretty(t *testing.T) {
|
||||
})
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Run(tt.name, func(*testing.T) {
|
||||
peerStatuses := peerStatusGen()
|
||||
scorer := peerStatuses.Scorers().BlockProviderScorer()
|
||||
if tt.update != nil {
|
||||
@@ -481,8 +481,8 @@ func TestScorers_BlockProvider_BadPeerMarking(t *testing.T) {
|
||||
})
|
||||
scorer := peerStatuses.Scorers().BlockProviderScorer()
|
||||
|
||||
assert.Equal(t, false, scorer.IsBadPeer("peer1"), "Unexpected status for unregistered peer")
|
||||
assert.NoError(t, scorer.IsBadPeer("peer1"), "Unexpected status for unregistered peer")
|
||||
scorer.IncrementProcessedBlocks("peer1", 64)
|
||||
assert.Equal(t, false, scorer.IsBadPeer("peer1"))
|
||||
assert.NoError(t, scorer.IsBadPeer("peer1"))
|
||||
assert.Equal(t, 0, len(scorer.BadPeers()))
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package scorers
|
||||
|
||||
import (
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/peerdata"
|
||||
pbrpc "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
@@ -51,19 +52,24 @@ func (s *GossipScorer) scoreNoLock(pid peer.ID) float64 {
|
||||
}
|
||||
|
||||
// IsBadPeer states if the peer is to be considered bad.
|
||||
func (s *GossipScorer) IsBadPeer(pid peer.ID) bool {
|
||||
func (s *GossipScorer) IsBadPeer(pid peer.ID) error {
|
||||
s.store.RLock()
|
||||
defer s.store.RUnlock()
|
||||
return s.isBadPeerNoLock(pid)
|
||||
}
|
||||
|
||||
// isBadPeerNoLock is lock-free version of IsBadPeer.
|
||||
func (s *GossipScorer) isBadPeerNoLock(pid peer.ID) bool {
|
||||
func (s *GossipScorer) isBadPeerNoLock(pid peer.ID) error {
|
||||
peerData, ok := s.store.PeerData(pid)
|
||||
if !ok {
|
||||
return false
|
||||
return nil
|
||||
}
|
||||
return peerData.GossipScore < gossipThreshold
|
||||
|
||||
if peerData.GossipScore < gossipThreshold {
|
||||
return errors.Errorf("gossip score below threshold: got %f - threshold %f", peerData.GossipScore, gossipThreshold)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// BadPeers returns the peers that are considered bad.
|
||||
@@ -73,7 +79,7 @@ func (s *GossipScorer) BadPeers() []peer.ID {
|
||||
|
||||
badPeers := make([]peer.ID, 0)
|
||||
for pid := range s.store.Peers() {
|
||||
if s.isBadPeerNoLock(pid) {
|
||||
if s.isBadPeerNoLock(pid) != nil {
|
||||
badPeers = append(badPeers, pid)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,7 +21,7 @@ func TestScorers_Gossip_Score(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "nonexistent peer",
|
||||
update: func(scorer *scorers.GossipScorer) {
|
||||
update: func(*scorers.GossipScorer) {
|
||||
},
|
||||
check: func(scorer *scorers.GossipScorer) {
|
||||
assert.Equal(t, 0.0, scorer.Score("peer1"), "Unexpected score")
|
||||
@@ -34,7 +34,7 @@ func TestScorers_Gossip_Score(t *testing.T) {
|
||||
},
|
||||
check: func(scorer *scorers.GossipScorer) {
|
||||
assert.Equal(t, -101.0, scorer.Score("peer1"), "Unexpected score")
|
||||
assert.Equal(t, true, scorer.IsBadPeer("peer1"), "Unexpected good peer")
|
||||
assert.NotNil(t, scorer.IsBadPeer("peer1"), "Unexpected good peer")
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -44,7 +44,7 @@ func TestScorers_Gossip_Score(t *testing.T) {
|
||||
},
|
||||
check: func(scorer *scorers.GossipScorer) {
|
||||
assert.Equal(t, 10.0, scorer.Score("peer1"), "Unexpected score")
|
||||
assert.Equal(t, false, scorer.IsBadPeer("peer1"), "Unexpected bad peer")
|
||||
assert.Equal(t, nil, scorer.IsBadPeer("peer1"), "Unexpected bad peer")
|
||||
_, _, topicMap, err := scorer.GossipData("peer1")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(100), topicMap["a"].TimeInMesh, "incorrect time in mesh")
|
||||
@@ -53,7 +53,7 @@ func TestScorers_Gossip_Score(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Run(tt.name, func(*testing.T) {
|
||||
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
ScorerParams: &scorers.Config{},
|
||||
})
|
||||
|
||||
@@ -46,7 +46,7 @@ func (s *PeerStatusScorer) Score(pid peer.ID) float64 {
|
||||
|
||||
// scoreNoLock is a lock-free version of Score.
|
||||
func (s *PeerStatusScorer) scoreNoLock(pid peer.ID) float64 {
|
||||
if s.isBadPeerNoLock(pid) {
|
||||
if s.isBadPeerNoLock(pid) != nil {
|
||||
return BadPeerScore
|
||||
}
|
||||
score := float64(0)
|
||||
@@ -67,30 +67,34 @@ func (s *PeerStatusScorer) scoreNoLock(pid peer.ID) float64 {
|
||||
}
|
||||
|
||||
// IsBadPeer states if the peer is to be considered bad.
|
||||
func (s *PeerStatusScorer) IsBadPeer(pid peer.ID) bool {
|
||||
func (s *PeerStatusScorer) IsBadPeer(pid peer.ID) error {
|
||||
s.store.RLock()
|
||||
defer s.store.RUnlock()
|
||||
|
||||
return s.isBadPeerNoLock(pid)
|
||||
}
|
||||
|
||||
// isBadPeerNoLock is lock-free version of IsBadPeer.
|
||||
func (s *PeerStatusScorer) isBadPeerNoLock(pid peer.ID) bool {
|
||||
func (s *PeerStatusScorer) isBadPeerNoLock(pid peer.ID) error {
|
||||
peerData, ok := s.store.PeerData(pid)
|
||||
if !ok {
|
||||
return false
|
||||
return nil
|
||||
}
|
||||
|
||||
// Mark peer as bad, if the latest error is one of the terminal ones.
|
||||
terminalErrs := []error{
|
||||
p2ptypes.ErrWrongForkDigestVersion,
|
||||
p2ptypes.ErrInvalidFinalizedRoot,
|
||||
p2ptypes.ErrInvalidRequest,
|
||||
}
|
||||
|
||||
for _, err := range terminalErrs {
|
||||
if errors.Is(peerData.ChainStateValidationError, err) {
|
||||
return true
|
||||
return err
|
||||
}
|
||||
}
|
||||
return false
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// BadPeers returns the peers that are considered bad.
|
||||
@@ -100,7 +104,7 @@ func (s *PeerStatusScorer) BadPeers() []peer.ID {
|
||||
|
||||
badPeers := make([]peer.ID, 0)
|
||||
for pid := range s.store.Peers() {
|
||||
if s.isBadPeerNoLock(pid) {
|
||||
if s.isBadPeerNoLock(pid) != nil {
|
||||
badPeers = append(badPeers, pid)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -122,7 +122,7 @@ func TestScorers_PeerStatus_Score(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Run(tt.name, func(*testing.T) {
|
||||
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
ScorerParams: &scorers.Config{},
|
||||
})
|
||||
@@ -140,12 +140,12 @@ func TestScorers_PeerStatus_IsBadPeer(t *testing.T) {
|
||||
ScorerParams: &scorers.Config{},
|
||||
})
|
||||
pid := peer.ID("peer1")
|
||||
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer(pid))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid))
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer(pid))
|
||||
assert.NoError(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid))
|
||||
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid, &pb.Status{}, p2ptypes.ErrWrongForkDigestVersion)
|
||||
assert.Equal(t, true, peerStatuses.Scorers().IsBadPeer(pid))
|
||||
assert.Equal(t, true, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid))
|
||||
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer(pid))
|
||||
assert.NotNil(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid))
|
||||
}
|
||||
|
||||
func TestScorers_PeerStatus_BadPeers(t *testing.T) {
|
||||
@@ -155,22 +155,22 @@ func TestScorers_PeerStatus_BadPeers(t *testing.T) {
|
||||
pid1 := peer.ID("peer1")
|
||||
pid2 := peer.ID("peer2")
|
||||
pid3 := peer.ID("peer3")
|
||||
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer(pid1))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid1))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer(pid2))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid2))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer(pid3))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid3))
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer(pid1))
|
||||
assert.NoError(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid1))
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer(pid2))
|
||||
assert.NoError(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid2))
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer(pid3))
|
||||
assert.NoError(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid3))
|
||||
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid1, &pb.Status{}, p2ptypes.ErrWrongForkDigestVersion)
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid2, &pb.Status{}, nil)
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid3, &pb.Status{}, p2ptypes.ErrWrongForkDigestVersion)
|
||||
assert.Equal(t, true, peerStatuses.Scorers().IsBadPeer(pid1))
|
||||
assert.Equal(t, true, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid1))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer(pid2))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid2))
|
||||
assert.Equal(t, true, peerStatuses.Scorers().IsBadPeer(pid3))
|
||||
assert.Equal(t, true, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid3))
|
||||
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer(pid1))
|
||||
assert.NotNil(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid1))
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer(pid2))
|
||||
assert.NoError(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid2))
|
||||
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer(pid3))
|
||||
assert.NotNil(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid3))
|
||||
assert.Equal(t, 2, len(peerStatuses.Scorers().PeerStatusScorer().BadPeers()))
|
||||
assert.Equal(t, 2, len(peerStatuses.Scorers().BadPeers()))
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/peerdata"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
)
|
||||
@@ -24,7 +25,7 @@ const BadPeerScore = gossipThreshold
|
||||
// Scorer defines minimum set of methods every peer scorer must expose.
|
||||
type Scorer interface {
|
||||
Score(pid peer.ID) float64
|
||||
IsBadPeer(pid peer.ID) bool
|
||||
IsBadPeer(pid peer.ID) error
|
||||
BadPeers() []peer.ID
|
||||
}
|
||||
|
||||
@@ -124,26 +125,29 @@ func (s *Service) ScoreNoLock(pid peer.ID) float64 {
|
||||
}
|
||||
|
||||
// IsBadPeer traverses all the scorers to see if any of them classifies peer as bad.
|
||||
func (s *Service) IsBadPeer(pid peer.ID) bool {
|
||||
func (s *Service) IsBadPeer(pid peer.ID) error {
|
||||
s.store.RLock()
|
||||
defer s.store.RUnlock()
|
||||
return s.IsBadPeerNoLock(pid)
|
||||
}
|
||||
|
||||
// IsBadPeerNoLock is a lock-free version of IsBadPeer.
|
||||
func (s *Service) IsBadPeerNoLock(pid peer.ID) bool {
|
||||
if s.scorers.badResponsesScorer.isBadPeerNoLock(pid) {
|
||||
return true
|
||||
func (s *Service) IsBadPeerNoLock(pid peer.ID) error {
|
||||
if err := s.scorers.badResponsesScorer.isBadPeerNoLock(pid); err != nil {
|
||||
return errors.Wrap(err, "bad responses scorer")
|
||||
}
|
||||
if s.scorers.peerStatusScorer.isBadPeerNoLock(pid) {
|
||||
return true
|
||||
|
||||
if err := s.scorers.peerStatusScorer.isBadPeerNoLock(pid); err != nil {
|
||||
return errors.Wrap(err, "peer status scorer")
|
||||
}
|
||||
|
||||
if features.Get().EnablePeerScorer {
|
||||
if s.scorers.gossipScorer.isBadPeerNoLock(pid) {
|
||||
return true
|
||||
if err := s.scorers.gossipScorer.isBadPeerNoLock(pid); err != nil {
|
||||
return errors.Wrap(err, "gossip scorer")
|
||||
}
|
||||
}
|
||||
return false
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// BadPeers returns the peers that are considered bad by any of registered scorers.
|
||||
@@ -153,7 +157,7 @@ func (s *Service) BadPeers() []peer.ID {
|
||||
|
||||
badPeers := make([]peer.ID, 0)
|
||||
for pid := range s.store.Peers() {
|
||||
if s.IsBadPeerNoLock(pid) {
|
||||
if s.IsBadPeerNoLock(pid) != nil {
|
||||
badPeers = append(badPeers, pid)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -100,7 +100,7 @@ func TestScorers_Service_Score(t *testing.T) {
|
||||
return scores
|
||||
}
|
||||
|
||||
pack := func(scorer *scorers.Service, s1, s2, s3 float64) map[string]float64 {
|
||||
pack := func(_ *scorers.Service, s1, s2, s3 float64) map[string]float64 {
|
||||
return map[string]float64{
|
||||
"peer1": roundScore(s1),
|
||||
"peer2": roundScore(s2),
|
||||
@@ -237,7 +237,7 @@ func TestScorers_Service_loop(t *testing.T) {
|
||||
for i := 0; i < s1.Params().Threshold+5; i++ {
|
||||
s1.Increment(pid1)
|
||||
}
|
||||
assert.Equal(t, true, s1.IsBadPeer(pid1), "Peer should be marked as bad")
|
||||
assert.NotNil(t, s1.IsBadPeer(pid1), "Peer should be marked as bad")
|
||||
|
||||
s2.IncrementProcessedBlocks("peer1", 221)
|
||||
assert.Equal(t, uint64(221), s2.ProcessedBlocks("peer1"))
|
||||
@@ -252,7 +252,7 @@ func TestScorers_Service_loop(t *testing.T) {
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
if s1.IsBadPeer(pid1) == false && s2.ProcessedBlocks("peer1") == 0 {
|
||||
if s1.IsBadPeer(pid1) == nil && s2.ProcessedBlocks("peer1") == 0 {
|
||||
return
|
||||
}
|
||||
case <-ctx.Done():
|
||||
@@ -263,7 +263,7 @@ func TestScorers_Service_loop(t *testing.T) {
|
||||
}()
|
||||
|
||||
<-done
|
||||
assert.Equal(t, false, s1.IsBadPeer(pid1), "Peer should not be marked as bad")
|
||||
assert.NoError(t, s1.IsBadPeer(pid1), "Peer should not be marked as bad")
|
||||
assert.Equal(t, uint64(0), s2.ProcessedBlocks("peer1"), "No blocks are expected")
|
||||
}
|
||||
|
||||
@@ -278,10 +278,10 @@ func TestScorers_Service_IsBadPeer(t *testing.T) {
|
||||
},
|
||||
})
|
||||
|
||||
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
peerStatuses.Scorers().BadResponsesScorer().Increment("peer1")
|
||||
peerStatuses.Scorers().BadResponsesScorer().Increment("peer1")
|
||||
assert.Equal(t, true, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
}
|
||||
|
||||
func TestScorers_Service_BadPeers(t *testing.T) {
|
||||
@@ -295,16 +295,16 @@ func TestScorers_Service_BadPeers(t *testing.T) {
|
||||
},
|
||||
})
|
||||
|
||||
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer("peer2"))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer("peer3"))
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer2"))
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer3"))
|
||||
assert.Equal(t, 0, len(peerStatuses.Scorers().BadPeers()))
|
||||
for _, pid := range []peer.ID{"peer1", "peer3"} {
|
||||
peerStatuses.Scorers().BadResponsesScorer().Increment(pid)
|
||||
peerStatuses.Scorers().BadResponsesScorer().Increment(pid)
|
||||
}
|
||||
assert.Equal(t, true, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer("peer2"))
|
||||
assert.Equal(t, true, peerStatuses.Scorers().IsBadPeer("peer3"))
|
||||
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer2"))
|
||||
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer3"))
|
||||
assert.Equal(t, 2, len(peerStatuses.Scorers().BadPeers()))
|
||||
}
|
||||
|
||||
@@ -34,6 +34,7 @@ import (
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
manet "github.com/multiformats/go-multiaddr/net"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/peerdata"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/scorers"
|
||||
@@ -49,14 +50,14 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
// PeerDisconnected means there is no connection to the peer.
|
||||
PeerDisconnected peerdata.PeerConnectionState = iota
|
||||
// PeerDisconnecting means there is an on-going attempt to disconnect from the peer.
|
||||
PeerDisconnecting
|
||||
// PeerConnected means the peer has an active connection.
|
||||
PeerConnected
|
||||
// PeerConnecting means there is an on-going attempt to connect to the peer.
|
||||
PeerConnecting
|
||||
// Disconnected means there is no connection to the peer.
|
||||
Disconnected peerdata.ConnectionState = iota
|
||||
// Disconnecting means there is an on-going attempt to disconnect from the peer.
|
||||
Disconnecting
|
||||
// Connected means the peer has an active connection.
|
||||
Connected
|
||||
// Connecting means there is an on-going attempt to connect to the peer.
|
||||
Connecting
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -117,6 +118,15 @@ func NewStatus(ctx context.Context, config *StatusConfig) *Status {
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Status) UpdateENR(record *enr.Record, pid peer.ID) {
|
||||
p.store.Lock()
|
||||
defer p.store.Unlock()
|
||||
|
||||
if peerData, ok := p.store.PeerData(pid); ok {
|
||||
peerData.Enr = record
|
||||
}
|
||||
}
|
||||
|
||||
// Scorers exposes peer scoring management service.
|
||||
func (p *Status) Scorers() *scorers.Service {
|
||||
return p.scorers
|
||||
@@ -150,7 +160,7 @@ func (p *Status) Add(record *enr.Record, pid peer.ID, address ma.Multiaddr, dire
|
||||
Address: address,
|
||||
Direction: direction,
|
||||
// Peers start disconnected; state will be updated when the handshake process begins.
|
||||
ConnState: PeerDisconnected,
|
||||
ConnState: Disconnected,
|
||||
}
|
||||
if record != nil {
|
||||
peerData.Enr = record
|
||||
@@ -212,7 +222,7 @@ func (p *Status) IsActive(pid peer.ID) bool {
|
||||
defer p.store.RUnlock()
|
||||
|
||||
peerData, ok := p.store.PeerData(pid)
|
||||
return ok && (peerData.ConnState == PeerConnected || peerData.ConnState == PeerConnecting)
|
||||
return ok && (peerData.ConnState == Connected || peerData.ConnState == Connecting)
|
||||
}
|
||||
|
||||
// IsAboveInboundLimit checks if we are above our current inbound
|
||||
@@ -222,7 +232,7 @@ func (p *Status) IsAboveInboundLimit() bool {
|
||||
defer p.store.RUnlock()
|
||||
totalInbound := 0
|
||||
for _, peerData := range p.store.Peers() {
|
||||
if peerData.ConnState == PeerConnected &&
|
||||
if peerData.ConnState == Connected &&
|
||||
peerData.Direction == network.DirInbound {
|
||||
totalInbound += 1
|
||||
}
|
||||
@@ -286,7 +296,7 @@ func (p *Status) SubscribedToSubnet(index uint64) []peer.ID {
|
||||
peers := make([]peer.ID, 0)
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
// look at active peers
|
||||
connectedStatus := peerData.ConnState == PeerConnecting || peerData.ConnState == PeerConnected
|
||||
connectedStatus := peerData.ConnState == Connecting || peerData.ConnState == Connected
|
||||
if connectedStatus && peerData.MetaData != nil && !peerData.MetaData.IsNil() && peerData.MetaData.AttnetsBitfield() != nil {
|
||||
indices := indicesFromBitfield(peerData.MetaData.AttnetsBitfield())
|
||||
for _, idx := range indices {
|
||||
@@ -301,7 +311,7 @@ func (p *Status) SubscribedToSubnet(index uint64) []peer.ID {
|
||||
}
|
||||
|
||||
// SetConnectionState sets the connection state of the given remote peer.
|
||||
func (p *Status) SetConnectionState(pid peer.ID, state peerdata.PeerConnectionState) {
|
||||
func (p *Status) SetConnectionState(pid peer.ID, state peerdata.ConnectionState) {
|
||||
p.store.Lock()
|
||||
defer p.store.Unlock()
|
||||
|
||||
@@ -311,14 +321,14 @@ func (p *Status) SetConnectionState(pid peer.ID, state peerdata.PeerConnectionSt
|
||||
|
||||
// ConnectionState gets the connection state of the given remote peer.
|
||||
// This will error if the peer does not exist.
|
||||
func (p *Status) ConnectionState(pid peer.ID) (peerdata.PeerConnectionState, error) {
|
||||
func (p *Status) ConnectionState(pid peer.ID) (peerdata.ConnectionState, error) {
|
||||
p.store.RLock()
|
||||
defer p.store.RUnlock()
|
||||
|
||||
if peerData, ok := p.store.PeerData(pid); ok {
|
||||
return peerData.ConnState, nil
|
||||
}
|
||||
return PeerDisconnected, peerdata.ErrPeerUnknown
|
||||
return Disconnected, peerdata.ErrPeerUnknown
|
||||
}
|
||||
|
||||
// ChainStateLastUpdated gets the last time the chain state of the given remote peer was updated.
|
||||
@@ -335,19 +345,29 @@ func (p *Status) ChainStateLastUpdated(pid peer.ID) (time.Time, error) {
|
||||
|
||||
// IsBad states if the peer is to be considered bad (by *any* of the registered scorers).
|
||||
// If the peer is unknown this will return `false`, which makes using this function easier than returning an error.
|
||||
func (p *Status) IsBad(pid peer.ID) bool {
|
||||
func (p *Status) IsBad(pid peer.ID) error {
|
||||
p.store.RLock()
|
||||
defer p.store.RUnlock()
|
||||
|
||||
return p.isBad(pid)
|
||||
}
|
||||
|
||||
// isBad is the lock-free version of IsBad.
|
||||
func (p *Status) isBad(pid peer.ID) bool {
|
||||
func (p *Status) isBad(pid peer.ID) error {
|
||||
// Do not disconnect from trusted peers.
|
||||
if p.store.IsTrustedPeer(pid) {
|
||||
return false
|
||||
return nil
|
||||
}
|
||||
return p.isfromBadIP(pid) || p.scorers.IsBadPeerNoLock(pid)
|
||||
|
||||
if err := p.isfromBadIP(pid); err != nil {
|
||||
return errors.Wrap(err, "peer is from a bad IP")
|
||||
}
|
||||
|
||||
if err := p.scorers.IsBadPeerNoLock(pid); err != nil {
|
||||
return errors.Wrap(err, "is bad peer no lock")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NextValidTime gets the earliest possible time it is to contact/dial
|
||||
@@ -411,7 +431,7 @@ func (p *Status) Connecting() []peer.ID {
|
||||
defer p.store.RUnlock()
|
||||
peers := make([]peer.ID, 0)
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
if peerData.ConnState == PeerConnecting {
|
||||
if peerData.ConnState == Connecting {
|
||||
peers = append(peers, pid)
|
||||
}
|
||||
}
|
||||
@@ -424,7 +444,7 @@ func (p *Status) Connected() []peer.ID {
|
||||
defer p.store.RUnlock()
|
||||
peers := make([]peer.ID, 0)
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
if peerData.ConnState == PeerConnected {
|
||||
if peerData.ConnState == Connected {
|
||||
peers = append(peers, pid)
|
||||
}
|
||||
}
|
||||
@@ -450,7 +470,7 @@ func (p *Status) InboundConnected() []peer.ID {
|
||||
defer p.store.RUnlock()
|
||||
peers := make([]peer.ID, 0)
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
if peerData.ConnState == PeerConnected && peerData.Direction == network.DirInbound {
|
||||
if peerData.ConnState == Connected && peerData.Direction == network.DirInbound {
|
||||
peers = append(peers, pid)
|
||||
}
|
||||
}
|
||||
@@ -463,7 +483,7 @@ func (p *Status) InboundConnectedWithProtocol(protocol InternetProtocol) []peer.
|
||||
defer p.store.RUnlock()
|
||||
peers := make([]peer.ID, 0)
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
if peerData.ConnState == PeerConnected && peerData.Direction == network.DirInbound && strings.Contains(peerData.Address.String(), string(protocol)) {
|
||||
if peerData.ConnState == Connected && peerData.Direction == network.DirInbound && strings.Contains(peerData.Address.String(), string(protocol)) {
|
||||
peers = append(peers, pid)
|
||||
}
|
||||
}
|
||||
@@ -489,7 +509,7 @@ func (p *Status) OutboundConnected() []peer.ID {
|
||||
defer p.store.RUnlock()
|
||||
peers := make([]peer.ID, 0)
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
if peerData.ConnState == PeerConnected && peerData.Direction == network.DirOutbound {
|
||||
if peerData.ConnState == Connected && peerData.Direction == network.DirOutbound {
|
||||
peers = append(peers, pid)
|
||||
}
|
||||
}
|
||||
@@ -502,7 +522,7 @@ func (p *Status) OutboundConnectedWithProtocol(protocol InternetProtocol) []peer
|
||||
defer p.store.RUnlock()
|
||||
peers := make([]peer.ID, 0)
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
if peerData.ConnState == PeerConnected && peerData.Direction == network.DirOutbound && strings.Contains(peerData.Address.String(), string(protocol)) {
|
||||
if peerData.ConnState == Connected && peerData.Direction == network.DirOutbound && strings.Contains(peerData.Address.String(), string(protocol)) {
|
||||
peers = append(peers, pid)
|
||||
}
|
||||
}
|
||||
@@ -515,7 +535,7 @@ func (p *Status) Active() []peer.ID {
|
||||
defer p.store.RUnlock()
|
||||
peers := make([]peer.ID, 0)
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
if peerData.ConnState == PeerConnecting || peerData.ConnState == PeerConnected {
|
||||
if peerData.ConnState == Connecting || peerData.ConnState == Connected {
|
||||
peers = append(peers, pid)
|
||||
}
|
||||
}
|
||||
@@ -528,7 +548,7 @@ func (p *Status) Disconnecting() []peer.ID {
|
||||
defer p.store.RUnlock()
|
||||
peers := make([]peer.ID, 0)
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
if peerData.ConnState == PeerDisconnecting {
|
||||
if peerData.ConnState == Disconnecting {
|
||||
peers = append(peers, pid)
|
||||
}
|
||||
}
|
||||
@@ -541,7 +561,7 @@ func (p *Status) Disconnected() []peer.ID {
|
||||
defer p.store.RUnlock()
|
||||
peers := make([]peer.ID, 0)
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
if peerData.ConnState == PeerDisconnected {
|
||||
if peerData.ConnState == Disconnected {
|
||||
peers = append(peers, pid)
|
||||
}
|
||||
}
|
||||
@@ -554,7 +574,7 @@ func (p *Status) Inactive() []peer.ID {
|
||||
defer p.store.RUnlock()
|
||||
peers := make([]peer.ID, 0)
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
if peerData.ConnState == PeerDisconnecting || peerData.ConnState == PeerDisconnected {
|
||||
if peerData.ConnState == Disconnecting || peerData.ConnState == Disconnected {
|
||||
peers = append(peers, pid)
|
||||
}
|
||||
}
|
||||
@@ -592,7 +612,7 @@ func (p *Status) Prune() {
|
||||
return
|
||||
}
|
||||
notBadPeer := func(pid peer.ID) bool {
|
||||
return !p.isBad(pid)
|
||||
return p.isBad(pid) == nil
|
||||
}
|
||||
notTrustedPeer := func(pid peer.ID) bool {
|
||||
return !p.isTrustedPeers(pid)
|
||||
@@ -605,7 +625,7 @@ func (p *Status) Prune() {
|
||||
// Select disconnected peers with a smaller bad response count.
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
// Should not prune trusted peer or prune the peer dara and unset trusted peer.
|
||||
if peerData.ConnState == PeerDisconnected && notBadPeer(pid) && notTrustedPeer(pid) {
|
||||
if peerData.ConnState == Disconnected && notBadPeer(pid) && notTrustedPeer(pid) {
|
||||
peersToPrune = append(peersToPrune, &peerResp{
|
||||
pid: pid,
|
||||
score: p.Scorers().ScoreNoLock(pid),
|
||||
@@ -657,7 +677,7 @@ func (p *Status) deprecatedPrune() {
|
||||
// Select disconnected peers with a smaller bad response count.
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
// Should not prune trusted peer or prune the peer dara and unset trusted peer.
|
||||
if peerData.ConnState == PeerDisconnected && notBadPeer(peerData) && notTrustedPeer(pid) {
|
||||
if peerData.ConnState == Disconnected && notBadPeer(peerData) && notTrustedPeer(pid) {
|
||||
peersToPrune = append(peersToPrune, &peerResp{
|
||||
pid: pid,
|
||||
badResp: peerData.BadResponses,
|
||||
@@ -814,7 +834,7 @@ func (p *Status) PeersToPrune() []peer.ID {
|
||||
peersToPrune := make([]*peerResp, 0)
|
||||
// Select connected and inbound peers to prune.
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
if peerData.ConnState == PeerConnected &&
|
||||
if peerData.ConnState == Connected &&
|
||||
peerData.Direction == network.DirInbound && !p.store.IsTrustedPeer(pid) {
|
||||
peersToPrune = append(peersToPrune, &peerResp{
|
||||
pid: pid,
|
||||
@@ -880,7 +900,7 @@ func (p *Status) deprecatedPeersToPrune() []peer.ID {
|
||||
peersToPrune := make([]*peerResp, 0)
|
||||
// Select connected and inbound peers to prune.
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
if peerData.ConnState == PeerConnected &&
|
||||
if peerData.ConnState == Connected &&
|
||||
peerData.Direction == network.DirInbound && !p.store.IsTrustedPeer(pid) {
|
||||
peersToPrune = append(peersToPrune, &peerResp{
|
||||
pid: pid,
|
||||
@@ -982,24 +1002,28 @@ func (p *Status) isTrustedPeers(pid peer.ID) bool {
|
||||
|
||||
// this method assumes the store lock is acquired before
|
||||
// executing the method.
|
||||
func (p *Status) isfromBadIP(pid peer.ID) bool {
|
||||
func (p *Status) isfromBadIP(pid peer.ID) error {
|
||||
peerData, ok := p.store.PeerData(pid)
|
||||
if !ok {
|
||||
return false
|
||||
return nil
|
||||
}
|
||||
|
||||
if peerData.Address == nil {
|
||||
return false
|
||||
return nil
|
||||
}
|
||||
|
||||
ip, err := manet.ToIP(peerData.Address)
|
||||
if err != nil {
|
||||
return true
|
||||
return errors.Wrap(err, "to ip")
|
||||
}
|
||||
|
||||
if val, ok := p.ipTracker[ip.String()]; ok {
|
||||
if val > CollocationLimit {
|
||||
return true
|
||||
return errors.Errorf("collocation limit exceeded: got %d - limit %d", val, CollocationLimit)
|
||||
}
|
||||
}
|
||||
return false
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Status) addIpToTracker(pid peer.ID) {
|
||||
|
||||
@@ -215,7 +215,7 @@ func TestPeerSubscribedToSubnet(t *testing.T) {
|
||||
// Add some peers with different states
|
||||
numPeers := 2
|
||||
for i := 0; i < numPeers; i++ {
|
||||
addPeer(t, p, peers.PeerConnected)
|
||||
addPeer(t, p, peers.Connected)
|
||||
}
|
||||
expectedPeer := p.All()[1]
|
||||
bitV := bitfield.NewBitvector64()
|
||||
@@ -230,7 +230,7 @@ func TestPeerSubscribedToSubnet(t *testing.T) {
|
||||
}))
|
||||
numPeers = 3
|
||||
for i := 0; i < numPeers; i++ {
|
||||
addPeer(t, p, peers.PeerDisconnected)
|
||||
addPeer(t, p, peers.Disconnected)
|
||||
}
|
||||
ps := p.SubscribedToSubnet(2)
|
||||
assert.Equal(t, 1, len(ps), "Unexpected num of peers")
|
||||
@@ -259,7 +259,7 @@ func TestPeerImplicitAdd(t *testing.T) {
|
||||
id, err := peer.Decode("16Uiu2HAkyWZ4Ni1TpvDS8dPxsozmHY85KaiFjodQuV6Tz5tkHVeR")
|
||||
require.NoError(t, err)
|
||||
|
||||
connectionState := peers.PeerConnecting
|
||||
connectionState := peers.Connecting
|
||||
p.SetConnectionState(id, connectionState)
|
||||
|
||||
resConnectionState, err := p.ConnectionState(id)
|
||||
@@ -347,7 +347,7 @@ func TestPeerBadResponses(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
assert.Equal(t, false, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
|
||||
address, err := ma.NewMultiaddr("/ip4/213.202.254.180/tcp/13000")
|
||||
require.NoError(t, err, "Failed to create address")
|
||||
@@ -358,25 +358,25 @@ func TestPeerBadResponses(t *testing.T) {
|
||||
resBadResponses, err := scorer.Count(id)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, resBadResponses, "Unexpected bad responses")
|
||||
assert.Equal(t, false, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
|
||||
scorer.Increment(id)
|
||||
resBadResponses, err = scorer.Count(id)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, resBadResponses, "Unexpected bad responses")
|
||||
assert.Equal(t, false, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
|
||||
scorer.Increment(id)
|
||||
resBadResponses, err = scorer.Count(id)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 2, resBadResponses, "Unexpected bad responses")
|
||||
assert.Equal(t, true, p.IsBad(id), "Peer not marked as bad when it should be")
|
||||
assert.NotNil(t, p.IsBad(id), "Peer not marked as bad when it should be")
|
||||
|
||||
scorer.Increment(id)
|
||||
resBadResponses, err = scorer.Count(id)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 3, resBadResponses, "Unexpected bad responses")
|
||||
assert.Equal(t, true, p.IsBad(id), "Peer not marked as bad when it should be")
|
||||
assert.NotNil(t, p.IsBad(id), "Peer not marked as bad when it should be")
|
||||
}
|
||||
|
||||
func TestAddMetaData(t *testing.T) {
|
||||
@@ -393,7 +393,7 @@ func TestAddMetaData(t *testing.T) {
|
||||
// Add some peers with different states
|
||||
numPeers := 5
|
||||
for i := 0; i < numPeers; i++ {
|
||||
addPeer(t, p, peers.PeerConnected)
|
||||
addPeer(t, p, peers.Connected)
|
||||
}
|
||||
newPeer := p.All()[2]
|
||||
|
||||
@@ -422,19 +422,19 @@ func TestPeerConnectionStatuses(t *testing.T) {
|
||||
// Add some peers with different states
|
||||
numPeersDisconnected := 11
|
||||
for i := 0; i < numPeersDisconnected; i++ {
|
||||
addPeer(t, p, peers.PeerDisconnected)
|
||||
addPeer(t, p, peers.Disconnected)
|
||||
}
|
||||
numPeersConnecting := 7
|
||||
for i := 0; i < numPeersConnecting; i++ {
|
||||
addPeer(t, p, peers.PeerConnecting)
|
||||
addPeer(t, p, peers.Connecting)
|
||||
}
|
||||
numPeersConnected := 43
|
||||
for i := 0; i < numPeersConnected; i++ {
|
||||
addPeer(t, p, peers.PeerConnected)
|
||||
addPeer(t, p, peers.Connected)
|
||||
}
|
||||
numPeersDisconnecting := 4
|
||||
for i := 0; i < numPeersDisconnecting; i++ {
|
||||
addPeer(t, p, peers.PeerDisconnecting)
|
||||
addPeer(t, p, peers.Disconnecting)
|
||||
}
|
||||
|
||||
// Now confirm the states
|
||||
@@ -463,7 +463,7 @@ func TestPeerValidTime(t *testing.T) {
|
||||
|
||||
numPeersConnected := 6
|
||||
for i := 0; i < numPeersConnected; i++ {
|
||||
addPeer(t, p, peers.PeerConnected)
|
||||
addPeer(t, p, peers.Connected)
|
||||
}
|
||||
|
||||
allPeers := p.All()
|
||||
@@ -510,10 +510,10 @@ func TestPrune(t *testing.T) {
|
||||
for i := 0; i < p.MaxPeerLimit()+100; i++ {
|
||||
if i%7 == 0 {
|
||||
// Peer added as disconnected.
|
||||
_ = addPeer(t, p, peers.PeerDisconnected)
|
||||
_ = addPeer(t, p, peers.Disconnected)
|
||||
}
|
||||
// Peer added to peer handler.
|
||||
_ = addPeer(t, p, peers.PeerConnected)
|
||||
_ = addPeer(t, p, peers.Connected)
|
||||
}
|
||||
|
||||
disPeers := p.Disconnected()
|
||||
@@ -571,23 +571,23 @@ func TestPeerIPTracker(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
badPeers = append(badPeers, createPeer(t, p, addr, network.DirUnknown, peerdata.PeerConnectionState(ethpb.ConnectionState_DISCONNECTED)))
|
||||
badPeers = append(badPeers, createPeer(t, p, addr, network.DirUnknown, peerdata.ConnectionState(ethpb.ConnectionState_DISCONNECTED)))
|
||||
}
|
||||
for _, pr := range badPeers {
|
||||
assert.Equal(t, true, p.IsBad(pr), "peer with bad ip is not bad")
|
||||
assert.NotNil(t, p.IsBad(pr), "peer with bad ip is not bad")
|
||||
}
|
||||
|
||||
// Add in bad peers, so that our records are trimmed out
|
||||
// from the peer store.
|
||||
for i := 0; i < p.MaxPeerLimit()+100; i++ {
|
||||
// Peer added to peer handler.
|
||||
pid := addPeer(t, p, peers.PeerDisconnected)
|
||||
pid := addPeer(t, p, peers.Disconnected)
|
||||
p.Scorers().BadResponsesScorer().Increment(pid)
|
||||
}
|
||||
p.Prune()
|
||||
|
||||
for _, pr := range badPeers {
|
||||
assert.Equal(t, false, p.IsBad(pr), "peer with good ip is regarded as bad")
|
||||
assert.NoError(t, p.IsBad(pr), "peer with good ip is regarded as bad")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -601,8 +601,11 @@ func TestTrimmedOrderedPeers(t *testing.T) {
|
||||
},
|
||||
})
|
||||
|
||||
expectedTarget := primitives.Epoch(2)
|
||||
maxPeers := 3
|
||||
const (
|
||||
expectedTarget = primitives.Epoch(2)
|
||||
maxPeers = 3
|
||||
)
|
||||
|
||||
var mockroot2 [32]byte
|
||||
var mockroot3 [32]byte
|
||||
var mockroot4 [32]byte
|
||||
@@ -611,36 +614,41 @@ func TestTrimmedOrderedPeers(t *testing.T) {
|
||||
copy(mockroot3[:], "three")
|
||||
copy(mockroot4[:], "four")
|
||||
copy(mockroot5[:], "five")
|
||||
|
||||
// Peer 1
|
||||
pid1 := addPeer(t, p, peers.PeerConnected)
|
||||
pid1 := addPeer(t, p, peers.Connected)
|
||||
p.SetChainState(pid1, &pb.Status{
|
||||
HeadSlot: 3 * params.BeaconConfig().SlotsPerEpoch,
|
||||
FinalizedEpoch: 3,
|
||||
FinalizedRoot: mockroot3[:],
|
||||
})
|
||||
|
||||
// Peer 2
|
||||
pid2 := addPeer(t, p, peers.PeerConnected)
|
||||
pid2 := addPeer(t, p, peers.Connected)
|
||||
p.SetChainState(pid2, &pb.Status{
|
||||
HeadSlot: 4 * params.BeaconConfig().SlotsPerEpoch,
|
||||
FinalizedEpoch: 4,
|
||||
FinalizedRoot: mockroot4[:],
|
||||
})
|
||||
|
||||
// Peer 3
|
||||
pid3 := addPeer(t, p, peers.PeerConnected)
|
||||
pid3 := addPeer(t, p, peers.Connected)
|
||||
p.SetChainState(pid3, &pb.Status{
|
||||
HeadSlot: 5 * params.BeaconConfig().SlotsPerEpoch,
|
||||
FinalizedEpoch: 5,
|
||||
FinalizedRoot: mockroot5[:],
|
||||
})
|
||||
|
||||
// Peer 4
|
||||
pid4 := addPeer(t, p, peers.PeerConnected)
|
||||
pid4 := addPeer(t, p, peers.Connected)
|
||||
p.SetChainState(pid4, &pb.Status{
|
||||
HeadSlot: 2 * params.BeaconConfig().SlotsPerEpoch,
|
||||
FinalizedEpoch: 2,
|
||||
FinalizedRoot: mockroot2[:],
|
||||
})
|
||||
|
||||
// Peer 5
|
||||
pid5 := addPeer(t, p, peers.PeerConnected)
|
||||
pid5 := addPeer(t, p, peers.Connected)
|
||||
p.SetChainState(pid5, &pb.Status{
|
||||
HeadSlot: 2 * params.BeaconConfig().SlotsPerEpoch,
|
||||
FinalizedEpoch: 2,
|
||||
@@ -680,12 +688,12 @@ func TestAtInboundPeerLimit(t *testing.T) {
|
||||
})
|
||||
for i := 0; i < 15; i++ {
|
||||
// Peer added to peer handler.
|
||||
createPeer(t, p, nil, network.DirOutbound, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED))
|
||||
createPeer(t, p, nil, network.DirOutbound, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED))
|
||||
}
|
||||
assert.Equal(t, false, p.IsAboveInboundLimit(), "Inbound limit exceeded")
|
||||
for i := 0; i < 31; i++ {
|
||||
// Peer added to peer handler.
|
||||
createPeer(t, p, nil, network.DirInbound, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED))
|
||||
createPeer(t, p, nil, network.DirInbound, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED))
|
||||
}
|
||||
assert.Equal(t, true, p.IsAboveInboundLimit(), "Inbound limit not exceeded")
|
||||
}
|
||||
@@ -705,7 +713,7 @@ func TestPrunePeers(t *testing.T) {
|
||||
})
|
||||
for i := 0; i < 15; i++ {
|
||||
// Peer added to peer handler.
|
||||
createPeer(t, p, nil, network.DirOutbound, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED))
|
||||
createPeer(t, p, nil, network.DirOutbound, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED))
|
||||
}
|
||||
// Assert there are no prunable peers.
|
||||
peersToPrune := p.PeersToPrune()
|
||||
@@ -713,7 +721,7 @@ func TestPrunePeers(t *testing.T) {
|
||||
|
||||
for i := 0; i < 18; i++ {
|
||||
// Peer added to peer handler.
|
||||
createPeer(t, p, nil, network.DirInbound, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED))
|
||||
createPeer(t, p, nil, network.DirInbound, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED))
|
||||
}
|
||||
|
||||
// Assert there are the correct prunable peers.
|
||||
@@ -723,7 +731,7 @@ func TestPrunePeers(t *testing.T) {
|
||||
// Add in more peers.
|
||||
for i := 0; i < 13; i++ {
|
||||
// Peer added to peer handler.
|
||||
createPeer(t, p, nil, network.DirInbound, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED))
|
||||
createPeer(t, p, nil, network.DirInbound, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED))
|
||||
}
|
||||
|
||||
// Set up bad scores for inbound peers.
|
||||
@@ -767,7 +775,7 @@ func TestPrunePeers_TrustedPeers(t *testing.T) {
|
||||
|
||||
for i := 0; i < 15; i++ {
|
||||
// Peer added to peer handler.
|
||||
createPeer(t, p, nil, network.DirOutbound, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED))
|
||||
createPeer(t, p, nil, network.DirOutbound, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED))
|
||||
}
|
||||
// Assert there are no prunable peers.
|
||||
peersToPrune := p.PeersToPrune()
|
||||
@@ -775,7 +783,7 @@ func TestPrunePeers_TrustedPeers(t *testing.T) {
|
||||
|
||||
for i := 0; i < 18; i++ {
|
||||
// Peer added to peer handler.
|
||||
createPeer(t, p, nil, network.DirInbound, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED))
|
||||
createPeer(t, p, nil, network.DirInbound, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED))
|
||||
}
|
||||
|
||||
// Assert there are the correct prunable peers.
|
||||
@@ -785,7 +793,7 @@ func TestPrunePeers_TrustedPeers(t *testing.T) {
|
||||
// Add in more peers.
|
||||
for i := 0; i < 13; i++ {
|
||||
// Peer added to peer handler.
|
||||
createPeer(t, p, nil, network.DirInbound, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED))
|
||||
createPeer(t, p, nil, network.DirInbound, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED))
|
||||
}
|
||||
|
||||
var trustedPeers []peer.ID
|
||||
@@ -821,7 +829,7 @@ func TestPrunePeers_TrustedPeers(t *testing.T) {
|
||||
// Add more peers to check if trusted peers can be pruned after they are deleted from trusted peer set.
|
||||
for i := 0; i < 9; i++ {
|
||||
// Peer added to peer handler.
|
||||
createPeer(t, p, nil, network.DirInbound, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED))
|
||||
createPeer(t, p, nil, network.DirInbound, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED))
|
||||
}
|
||||
|
||||
// Delete trusted peers.
|
||||
@@ -865,14 +873,14 @@ func TestStatus_BestPeer(t *testing.T) {
|
||||
headSlot primitives.Slot
|
||||
finalizedEpoch primitives.Epoch
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
peers []*peerConfig
|
||||
limitPeers int
|
||||
ourFinalizedEpoch primitives.Epoch
|
||||
targetEpoch primitives.Epoch
|
||||
// targetEpochSupport denotes how many peers support returned epoch.
|
||||
targetEpochSupport int
|
||||
name string
|
||||
peers []*peerConfig
|
||||
limitPeers int
|
||||
ourFinalizedEpoch primitives.Epoch
|
||||
targetEpoch primitives.Epoch
|
||||
targetEpochSupport int // Denotes how many peers support returned epoch.
|
||||
}{
|
||||
{
|
||||
name: "head slot matches finalized epoch",
|
||||
@@ -885,6 +893,7 @@ func TestStatus_BestPeer(t *testing.T) {
|
||||
{finalizedEpoch: 3, headSlot: 3 * params.BeaconConfig().SlotsPerEpoch},
|
||||
},
|
||||
limitPeers: 15,
|
||||
ourFinalizedEpoch: 0,
|
||||
targetEpoch: 4,
|
||||
targetEpochSupport: 4,
|
||||
},
|
||||
@@ -902,6 +911,7 @@ func TestStatus_BestPeer(t *testing.T) {
|
||||
{finalizedEpoch: 3, headSlot: 4 * params.BeaconConfig().SlotsPerEpoch},
|
||||
},
|
||||
limitPeers: 15,
|
||||
ourFinalizedEpoch: 0,
|
||||
targetEpoch: 4,
|
||||
targetEpochSupport: 4,
|
||||
},
|
||||
@@ -916,6 +926,7 @@ func TestStatus_BestPeer(t *testing.T) {
|
||||
{finalizedEpoch: 3, headSlot: 42 * params.BeaconConfig().SlotsPerEpoch},
|
||||
},
|
||||
limitPeers: 15,
|
||||
ourFinalizedEpoch: 0,
|
||||
targetEpoch: 4,
|
||||
targetEpochSupport: 4,
|
||||
},
|
||||
@@ -930,8 +941,8 @@ func TestStatus_BestPeer(t *testing.T) {
|
||||
{finalizedEpoch: 3, headSlot: 46 * params.BeaconConfig().SlotsPerEpoch},
|
||||
{finalizedEpoch: 6, headSlot: 6 * params.BeaconConfig().SlotsPerEpoch},
|
||||
},
|
||||
ourFinalizedEpoch: 5,
|
||||
limitPeers: 15,
|
||||
ourFinalizedEpoch: 5,
|
||||
targetEpoch: 6,
|
||||
targetEpochSupport: 1,
|
||||
},
|
||||
@@ -950,8 +961,8 @@ func TestStatus_BestPeer(t *testing.T) {
|
||||
{finalizedEpoch: 7, headSlot: 7 * params.BeaconConfig().SlotsPerEpoch},
|
||||
{finalizedEpoch: 8, headSlot: 8 * params.BeaconConfig().SlotsPerEpoch},
|
||||
},
|
||||
ourFinalizedEpoch: 5,
|
||||
limitPeers: 15,
|
||||
ourFinalizedEpoch: 5,
|
||||
targetEpoch: 6,
|
||||
targetEpochSupport: 5,
|
||||
},
|
||||
@@ -970,8 +981,8 @@ func TestStatus_BestPeer(t *testing.T) {
|
||||
{finalizedEpoch: 7, headSlot: 7 * params.BeaconConfig().SlotsPerEpoch},
|
||||
{finalizedEpoch: 8, headSlot: 8 * params.BeaconConfig().SlotsPerEpoch},
|
||||
},
|
||||
ourFinalizedEpoch: 5,
|
||||
limitPeers: 4,
|
||||
ourFinalizedEpoch: 5,
|
||||
targetEpoch: 6,
|
||||
targetEpochSupport: 4,
|
||||
},
|
||||
@@ -986,8 +997,8 @@ func TestStatus_BestPeer(t *testing.T) {
|
||||
{finalizedEpoch: 8, headSlot: 8 * params.BeaconConfig().SlotsPerEpoch},
|
||||
{finalizedEpoch: 8, headSlot: 8 * params.BeaconConfig().SlotsPerEpoch},
|
||||
},
|
||||
ourFinalizedEpoch: 5,
|
||||
limitPeers: 15,
|
||||
ourFinalizedEpoch: 5,
|
||||
targetEpoch: 8,
|
||||
targetEpochSupport: 3,
|
||||
},
|
||||
@@ -1002,7 +1013,7 @@ func TestStatus_BestPeer(t *testing.T) {
|
||||
},
|
||||
})
|
||||
for _, peerConfig := range tt.peers {
|
||||
p.SetChainState(addPeer(t, p, peers.PeerConnected), &pb.Status{
|
||||
p.SetChainState(addPeer(t, p, peers.Connected), &pb.Status{
|
||||
FinalizedEpoch: peerConfig.finalizedEpoch,
|
||||
HeadSlot: peerConfig.headSlot,
|
||||
})
|
||||
@@ -1028,7 +1039,7 @@ func TestBestFinalized_returnsMaxValue(t *testing.T) {
|
||||
|
||||
for i := 0; i <= maxPeers+100; i++ {
|
||||
p.Add(new(enr.Record), peer.ID(rune(i)), nil, network.DirOutbound)
|
||||
p.SetConnectionState(peer.ID(rune(i)), peers.PeerConnected)
|
||||
p.SetConnectionState(peer.ID(rune(i)), peers.Connected)
|
||||
p.SetChainState(peer.ID(rune(i)), &pb.Status{
|
||||
FinalizedEpoch: 10,
|
||||
})
|
||||
@@ -1051,7 +1062,7 @@ func TestStatus_BestNonFinalized(t *testing.T) {
|
||||
peerSlots := []primitives.Slot{32, 32, 32, 32, 235, 233, 258, 268, 270}
|
||||
for i, headSlot := range peerSlots {
|
||||
p.Add(new(enr.Record), peer.ID(rune(i)), nil, network.DirOutbound)
|
||||
p.SetConnectionState(peer.ID(rune(i)), peers.PeerConnected)
|
||||
p.SetConnectionState(peer.ID(rune(i)), peers.Connected)
|
||||
p.SetChainState(peer.ID(rune(i)), &pb.Status{
|
||||
HeadSlot: headSlot,
|
||||
})
|
||||
@@ -1074,17 +1085,17 @@ func TestStatus_CurrentEpoch(t *testing.T) {
|
||||
},
|
||||
})
|
||||
// Peer 1
|
||||
pid1 := addPeer(t, p, peers.PeerConnected)
|
||||
pid1 := addPeer(t, p, peers.Connected)
|
||||
p.SetChainState(pid1, &pb.Status{
|
||||
HeadSlot: params.BeaconConfig().SlotsPerEpoch * 4,
|
||||
})
|
||||
// Peer 2
|
||||
pid2 := addPeer(t, p, peers.PeerConnected)
|
||||
pid2 := addPeer(t, p, peers.Connected)
|
||||
p.SetChainState(pid2, &pb.Status{
|
||||
HeadSlot: params.BeaconConfig().SlotsPerEpoch * 5,
|
||||
})
|
||||
// Peer 3
|
||||
pid3 := addPeer(t, p, peers.PeerConnected)
|
||||
pid3 := addPeer(t, p, peers.Connected)
|
||||
p.SetChainState(pid3, &pb.Status{
|
||||
HeadSlot: params.BeaconConfig().SlotsPerEpoch * 4,
|
||||
})
|
||||
@@ -1103,8 +1114,8 @@ func TestInbound(t *testing.T) {
|
||||
})
|
||||
addr, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/33333")
|
||||
require.NoError(t, err)
|
||||
inbound := createPeer(t, p, addr, network.DirInbound, peers.PeerConnected)
|
||||
createPeer(t, p, addr, network.DirOutbound, peers.PeerConnected)
|
||||
inbound := createPeer(t, p, addr, network.DirInbound, peers.Connected)
|
||||
createPeer(t, p, addr, network.DirOutbound, peers.Connected)
|
||||
|
||||
result := p.Inbound()
|
||||
require.Equal(t, 1, len(result))
|
||||
@@ -1123,8 +1134,8 @@ func TestInboundConnected(t *testing.T) {
|
||||
|
||||
addr, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/33333")
|
||||
require.NoError(t, err)
|
||||
inbound := createPeer(t, p, addr, network.DirInbound, peers.PeerConnected)
|
||||
createPeer(t, p, addr, network.DirInbound, peers.PeerConnecting)
|
||||
inbound := createPeer(t, p, addr, network.DirInbound, peers.Connected)
|
||||
createPeer(t, p, addr, network.DirInbound, peers.Connecting)
|
||||
|
||||
result := p.InboundConnected()
|
||||
require.Equal(t, 1, len(result))
|
||||
@@ -1157,7 +1168,7 @@ func TestInboundConnectedWithProtocol(t *testing.T) {
|
||||
multiaddr, err := ma.NewMultiaddr(addr)
|
||||
require.NoError(t, err)
|
||||
|
||||
peer := createPeer(t, p, multiaddr, network.DirInbound, peers.PeerConnected)
|
||||
peer := createPeer(t, p, multiaddr, network.DirInbound, peers.Connected)
|
||||
expectedTCP[peer.String()] = true
|
||||
}
|
||||
|
||||
@@ -1166,7 +1177,7 @@ func TestInboundConnectedWithProtocol(t *testing.T) {
|
||||
multiaddr, err := ma.NewMultiaddr(addr)
|
||||
require.NoError(t, err)
|
||||
|
||||
peer := createPeer(t, p, multiaddr, network.DirInbound, peers.PeerConnected)
|
||||
peer := createPeer(t, p, multiaddr, network.DirInbound, peers.Connected)
|
||||
expectedQUIC[peer.String()] = true
|
||||
}
|
||||
|
||||
@@ -1203,8 +1214,8 @@ func TestOutbound(t *testing.T) {
|
||||
})
|
||||
addr, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/33333")
|
||||
require.NoError(t, err)
|
||||
createPeer(t, p, addr, network.DirInbound, peers.PeerConnected)
|
||||
outbound := createPeer(t, p, addr, network.DirOutbound, peers.PeerConnected)
|
||||
createPeer(t, p, addr, network.DirInbound, peers.Connected)
|
||||
outbound := createPeer(t, p, addr, network.DirOutbound, peers.Connected)
|
||||
|
||||
result := p.Outbound()
|
||||
require.Equal(t, 1, len(result))
|
||||
@@ -1223,8 +1234,8 @@ func TestOutboundConnected(t *testing.T) {
|
||||
|
||||
addr, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/33333")
|
||||
require.NoError(t, err)
|
||||
inbound := createPeer(t, p, addr, network.DirOutbound, peers.PeerConnected)
|
||||
createPeer(t, p, addr, network.DirOutbound, peers.PeerConnecting)
|
||||
inbound := createPeer(t, p, addr, network.DirOutbound, peers.Connected)
|
||||
createPeer(t, p, addr, network.DirOutbound, peers.Connecting)
|
||||
|
||||
result := p.OutboundConnected()
|
||||
require.Equal(t, 1, len(result))
|
||||
@@ -1257,7 +1268,7 @@ func TestOutboundConnectedWithProtocol(t *testing.T) {
|
||||
multiaddr, err := ma.NewMultiaddr(addr)
|
||||
require.NoError(t, err)
|
||||
|
||||
peer := createPeer(t, p, multiaddr, network.DirOutbound, peers.PeerConnected)
|
||||
peer := createPeer(t, p, multiaddr, network.DirOutbound, peers.Connected)
|
||||
expectedTCP[peer.String()] = true
|
||||
}
|
||||
|
||||
@@ -1266,7 +1277,7 @@ func TestOutboundConnectedWithProtocol(t *testing.T) {
|
||||
multiaddr, err := ma.NewMultiaddr(addr)
|
||||
require.NoError(t, err)
|
||||
|
||||
peer := createPeer(t, p, multiaddr, network.DirOutbound, peers.PeerConnected)
|
||||
peer := createPeer(t, p, multiaddr, network.DirOutbound, peers.Connected)
|
||||
expectedQUIC[peer.String()] = true
|
||||
}
|
||||
|
||||
@@ -1293,7 +1304,7 @@ func TestOutboundConnectedWithProtocol(t *testing.T) {
|
||||
}
|
||||
|
||||
// addPeer is a helper to add a peer with a given connection state)
|
||||
func addPeer(t *testing.T, p *peers.Status, state peerdata.PeerConnectionState) peer.ID {
|
||||
func addPeer(t *testing.T, p *peers.Status, state peerdata.ConnectionState) peer.ID {
|
||||
// Set up some peers with different states
|
||||
mhBytes := []byte{0x11, 0x04}
|
||||
idBytes := make([]byte, 4)
|
||||
@@ -1312,7 +1323,7 @@ func addPeer(t *testing.T, p *peers.Status, state peerdata.PeerConnectionState)
|
||||
}
|
||||
|
||||
func createPeer(t *testing.T, p *peers.Status, addr ma.Multiaddr,
|
||||
dir network.Direction, state peerdata.PeerConnectionState) peer.ID {
|
||||
dir network.Direction, state peerdata.ConnectionState) peer.ID {
|
||||
mhBytes := []byte{0x11, 0x04}
|
||||
idBytes := make([]byte, 4)
|
||||
_, err := rand.Read(idBytes)
|
||||
|
||||
@@ -10,15 +10,25 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/encoder"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/network/forks"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var _ pubsub.SubscriptionFilter = (*Service)(nil)
|
||||
|
||||
// It is set at this limit to handle the possibility
|
||||
// of double topic subscriptions at fork boundaries.
|
||||
// -> 64 Attestation Subnets * 2.
|
||||
// -> 4 Sync Committee Subnets * 2.
|
||||
// -> Block,Aggregate,ProposerSlashing,AttesterSlashing,Exits,SyncContribution * 2.
|
||||
// -> BeaconBlock * 2 = 2
|
||||
// -> BeaconAggregateAndProof * 2 = 2
|
||||
// -> VoluntaryExit * 2 = 2
|
||||
// -> ProposerSlashing * 2 = 2
|
||||
// -> AttesterSlashing * 2 = 2
|
||||
// -> 64 Beacon Attestation * 2 = 128
|
||||
// -> SyncContributionAndProof * 2 = 2
|
||||
// -> 4 SyncCommitteeSubnets * 2 = 8
|
||||
// -> BlsToExecutionChange * 2 = 2
|
||||
// -> 6 BlobSidecar * 2 = 12
|
||||
// -------------------------------------
|
||||
// TOTAL = 162
|
||||
const pubsubSubscriptionRequestLimit = 200
|
||||
|
||||
// CanSubscribe returns true if the topic is of interest and we could subscribe to it.
|
||||
@@ -95,8 +105,15 @@ func (s *Service) CanSubscribe(topic string) bool {
|
||||
// FilterIncomingSubscriptions is invoked for all RPCs containing subscription notifications.
|
||||
// This method returns only the topics of interest and may return an error if the subscription
|
||||
// request contains too many topics.
|
||||
func (s *Service) FilterIncomingSubscriptions(_ peer.ID, subs []*pubsubpb.RPC_SubOpts) ([]*pubsubpb.RPC_SubOpts, error) {
|
||||
func (s *Service) FilterIncomingSubscriptions(peerID peer.ID, subs []*pubsubpb.RPC_SubOpts) ([]*pubsubpb.RPC_SubOpts, error) {
|
||||
if len(subs) > pubsubSubscriptionRequestLimit {
|
||||
subsCount := len(subs)
|
||||
log.WithFields(logrus.Fields{
|
||||
"peerID": peerID,
|
||||
"subscriptionCounts": subsCount,
|
||||
"subscriptionLimit": pubsubSubscriptionRequestLimit,
|
||||
}).Debug("Too many incoming subscriptions, filtering them")
|
||||
|
||||
return nil, pubsub.ErrTooManySubscriptions
|
||||
}
|
||||
|
||||
|
||||
@@ -43,6 +43,10 @@ var _ runtime.Service = (*Service)(nil)
|
||||
// defined below.
|
||||
var pollingPeriod = 6 * time.Second
|
||||
|
||||
// When looking for new nodes, if not enough nodes are found,
|
||||
// we stop after this amount of iterations.
|
||||
var batchSize = 2_000
|
||||
|
||||
// Refresh rate of ENR set at twice per slot.
|
||||
var refreshRate = slots.DivideSlotBy(2)
|
||||
|
||||
@@ -202,12 +206,13 @@ func (s *Service) Start() {
|
||||
s.startupErr = err
|
||||
return
|
||||
}
|
||||
err = s.connectToBootnodes()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not add bootnode to the exclusion list")
|
||||
|
||||
if err := s.connectToBootnodes(); err != nil {
|
||||
log.WithError(err).Error("Could not connect to boot nodes")
|
||||
s.startupErr = err
|
||||
return
|
||||
}
|
||||
|
||||
s.dv5Listener = listener
|
||||
go s.listenForNewNodes()
|
||||
}
|
||||
@@ -226,7 +231,7 @@ func (s *Service) Start() {
|
||||
}
|
||||
// Initialize metadata according to the
|
||||
// current epoch.
|
||||
s.RefreshENR()
|
||||
s.RefreshPersistentSubnets()
|
||||
|
||||
// Periodic functions.
|
||||
async.RunEvery(s.ctx, params.BeaconConfig().TtfbTimeoutDuration(), func() {
|
||||
@@ -234,7 +239,7 @@ func (s *Service) Start() {
|
||||
})
|
||||
async.RunEvery(s.ctx, 30*time.Minute, s.Peers().Prune)
|
||||
async.RunEvery(s.ctx, time.Duration(params.BeaconConfig().RespTimeout)*time.Second, s.updateMetrics)
|
||||
async.RunEvery(s.ctx, refreshRate, s.RefreshENR)
|
||||
async.RunEvery(s.ctx, refreshRate, s.RefreshPersistentSubnets)
|
||||
async.RunEvery(s.ctx, 1*time.Minute, func() {
|
||||
inboundQUICCount := len(s.peers.InboundConnectedWithProtocol(peers.QUIC))
|
||||
inboundTCPCount := len(s.peers.InboundConnectedWithProtocol(peers.TCP))
|
||||
@@ -384,12 +389,17 @@ func (s *Service) AddPingMethod(reqFunc func(ctx context.Context, id peer.ID) er
|
||||
s.pingMethodLock.Unlock()
|
||||
}
|
||||
|
||||
func (s *Service) pingPeers() {
|
||||
func (s *Service) pingPeersAndLogEnr() {
|
||||
s.pingMethodLock.RLock()
|
||||
defer s.pingMethodLock.RUnlock()
|
||||
|
||||
localENR := s.dv5Listener.Self()
|
||||
log.WithField("ENR", localENR).Info("New node record")
|
||||
|
||||
if s.pingMethod == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, pid := range s.peers.Connected() {
|
||||
go func(id peer.ID) {
|
||||
if err := s.pingMethod(s.ctx, id); err != nil {
|
||||
@@ -462,8 +472,8 @@ func (s *Service) connectWithPeer(ctx context.Context, info peer.AddrInfo) error
|
||||
if info.ID == s.host.ID() {
|
||||
return nil
|
||||
}
|
||||
if s.Peers().IsBad(info.ID) {
|
||||
return errors.New("refused to connect to bad peer")
|
||||
if err := s.Peers().IsBad(info.ID); err != nil {
|
||||
return errors.Wrap(err, "refused to connect to bad peer")
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(ctx, maxDialTimeout)
|
||||
defer cancel()
|
||||
|
||||
@@ -2,6 +2,7 @@ package p2p
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -19,22 +20,24 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
mathutil "github.com/prysmaticlabs/prysm/v5/math"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
pb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var attestationSubnetCount = params.BeaconConfig().AttestationSubnetCount
|
||||
var syncCommsSubnetCount = params.BeaconConfig().SyncCommitteeSubnetCount
|
||||
var (
|
||||
attestationSubnetCount = params.BeaconConfig().AttestationSubnetCount
|
||||
syncCommsSubnetCount = params.BeaconConfig().SyncCommitteeSubnetCount
|
||||
|
||||
var attSubnetEnrKey = params.BeaconNetworkConfig().AttSubnetKey
|
||||
var syncCommsSubnetEnrKey = params.BeaconNetworkConfig().SyncCommsSubnetKey
|
||||
attSubnetEnrKey = params.BeaconNetworkConfig().AttSubnetKey
|
||||
syncCommsSubnetEnrKey = params.BeaconNetworkConfig().SyncCommsSubnetKey
|
||||
)
|
||||
|
||||
// The value used with the subnet, in order
|
||||
// to create an appropriate key to retrieve
|
||||
// the relevant lock. This is used to differentiate
|
||||
// sync subnets from attestation subnets. This is deliberately
|
||||
// chosen as more than 64(attestation subnet count).
|
||||
// sync subnets from others. This is deliberately
|
||||
// chosen as more than 64 (attestation subnet count).
|
||||
const syncLockerVal = 100
|
||||
|
||||
// The value used with the blob sidecar subnet, in order
|
||||
@@ -44,6 +47,77 @@ const syncLockerVal = 100
|
||||
// chosen more than sync and attestation subnet combined.
|
||||
const blobSubnetLockerVal = 110
|
||||
|
||||
// nodeFilter return a function that filters nodes based on the subnet topic and subnet index.
|
||||
func (s *Service) nodeFilter(topic string, index uint64) (func(node *enode.Node) bool, error) {
|
||||
switch {
|
||||
case strings.Contains(topic, GossipAttestationMessage):
|
||||
return s.filterPeerForAttSubnet(index), nil
|
||||
case strings.Contains(topic, GossipSyncCommitteeMessage):
|
||||
return s.filterPeerForSyncSubnet(index), nil
|
||||
default:
|
||||
return nil, errors.Errorf("no subnet exists for provided topic: %s", topic)
|
||||
}
|
||||
}
|
||||
|
||||
// searchForPeers performs a network search for peers subscribed to a particular subnet.
|
||||
// It exits as soon as one of these conditions is met:
|
||||
// - It looped through `batchSize` nodes.
|
||||
// - It found `peersToFindCount“ peers corresponding to the `filter` criteria.
|
||||
// - Iterator is exhausted.
|
||||
func searchForPeers(
|
||||
iterator enode.Iterator,
|
||||
batchSize int,
|
||||
peersToFindCount uint,
|
||||
filter func(node *enode.Node) bool,
|
||||
) []*enode.Node {
|
||||
nodeFromNodeID := make(map[enode.ID]*enode.Node, batchSize)
|
||||
for i := 0; i < batchSize && uint(len(nodeFromNodeID)) <= peersToFindCount && iterator.Next(); i++ {
|
||||
node := iterator.Node()
|
||||
|
||||
// Filter out nodes that do not meet the criteria.
|
||||
if !filter(node) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Remove duplicates, keeping the node with higher seq.
|
||||
prevNode, ok := nodeFromNodeID[node.ID()]
|
||||
if ok && prevNode.Seq() > node.Seq() {
|
||||
continue
|
||||
}
|
||||
|
||||
nodeFromNodeID[node.ID()] = node
|
||||
}
|
||||
|
||||
// Convert the map to a slice.
|
||||
nodes := make([]*enode.Node, 0, len(nodeFromNodeID))
|
||||
for _, node := range nodeFromNodeID {
|
||||
nodes = append(nodes, node)
|
||||
}
|
||||
|
||||
return nodes
|
||||
}
|
||||
|
||||
// dialPeer dials a peer in a separate goroutine.
|
||||
func (s *Service) dialPeer(ctx context.Context, wg *sync.WaitGroup, node *enode.Node) {
|
||||
info, _, err := convertToAddrInfo(node)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if info == nil {
|
||||
return
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
if err := s.connectWithPeer(ctx, *info); err != nil {
|
||||
log.WithError(err).Tracef("Could not connect with peer %s", info.String())
|
||||
}
|
||||
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
|
||||
// FindPeersWithSubnet performs a network search for peers
|
||||
// subscribed to a particular subnet. Then it tries to connect
|
||||
// with those peers. This method will block until either:
|
||||
@@ -52,67 +126,104 @@ const blobSubnetLockerVal = 110
|
||||
// On some edge cases, this method may hang indefinitely while peers
|
||||
// are actually found. In such a case, the user should cancel the context
|
||||
// and re-run the method again.
|
||||
func (s *Service) FindPeersWithSubnet(ctx context.Context, topic string,
|
||||
index uint64, threshold int) (bool, error) {
|
||||
func (s *Service) FindPeersWithSubnet(
|
||||
ctx context.Context,
|
||||
topic string,
|
||||
index uint64,
|
||||
threshold int,
|
||||
) (bool, error) {
|
||||
const minLogInterval = 1 * time.Minute
|
||||
|
||||
ctx, span := trace.StartSpan(ctx, "p2p.FindPeersWithSubnet")
|
||||
defer span.End()
|
||||
|
||||
span.SetAttributes(trace.Int64Attribute("index", int64(index))) // lint:ignore uintcast -- It's safe to do this for tracing.
|
||||
|
||||
if s.dv5Listener == nil {
|
||||
// return if discovery isn't set
|
||||
// Return if discovery isn't set
|
||||
return false, nil
|
||||
}
|
||||
|
||||
topic += s.Encoding().ProtocolSuffix()
|
||||
iterator := s.dv5Listener.RandomNodes()
|
||||
defer iterator.Close()
|
||||
switch {
|
||||
case strings.Contains(topic, GossipAttestationMessage):
|
||||
iterator = filterNodes(ctx, iterator, s.filterPeerForAttSubnet(index))
|
||||
case strings.Contains(topic, GossipSyncCommitteeMessage):
|
||||
iterator = filterNodes(ctx, iterator, s.filterPeerForSyncSubnet(index))
|
||||
default:
|
||||
return false, errors.New("no subnet exists for provided topic")
|
||||
|
||||
filter, err := s.nodeFilter(topic, index)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "node filter")
|
||||
}
|
||||
|
||||
peersSummary := func(topic string, threshold int) (int, int) {
|
||||
// Retrieve how many peers we have for this topic.
|
||||
peerCountForTopic := len(s.pubsub.ListPeers(topic))
|
||||
|
||||
// Compute how many peers we are missing to reach the threshold.
|
||||
missingPeerCountForTopic := max(0, threshold-peerCountForTopic)
|
||||
|
||||
return peerCountForTopic, missingPeerCountForTopic
|
||||
}
|
||||
|
||||
// Compute how many peers we are missing to reach the threshold.
|
||||
peerCountForTopic, missingPeerCountForTopic := peersSummary(topic, threshold)
|
||||
|
||||
// Exit early if we have enough peers.
|
||||
if missingPeerCountForTopic == 0 {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"topic": topic,
|
||||
"targetPeerCount": threshold,
|
||||
})
|
||||
|
||||
log.WithField("currentPeerCount", peerCountForTopic).Debug("Searching for new peers for a subnet - start")
|
||||
|
||||
lastLogTime := time.Now()
|
||||
|
||||
wg := new(sync.WaitGroup)
|
||||
for {
|
||||
currNum := len(s.pubsub.ListPeers(topic))
|
||||
if currNum >= threshold {
|
||||
// If the context is done, we can exit the loop. This is the unhappy path.
|
||||
if err := ctx.Err(); err != nil {
|
||||
return false, errors.Errorf(
|
||||
"unable to find requisite number of peers for topic %s - only %d out of %d peers available after searching",
|
||||
topic, peerCountForTopic, threshold,
|
||||
)
|
||||
}
|
||||
|
||||
// Search for new peers in the network.
|
||||
nodes := searchForPeers(iterator, batchSize, uint(missingPeerCountForTopic), filter)
|
||||
|
||||
// Restrict dials if limit is applied.
|
||||
maxConcurrentDials := math.MaxInt
|
||||
if flags.MaxDialIsActive() {
|
||||
maxConcurrentDials = flags.Get().MaxConcurrentDials
|
||||
}
|
||||
|
||||
// Dial the peers in batches.
|
||||
for start := 0; start < len(nodes); start += maxConcurrentDials {
|
||||
stop := min(start+maxConcurrentDials, len(nodes))
|
||||
for _, node := range nodes[start:stop] {
|
||||
s.dialPeer(ctx, wg, node)
|
||||
}
|
||||
|
||||
// Wait for all dials to be completed.
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
peerCountForTopic, missingPeerCountForTopic := peersSummary(topic, threshold)
|
||||
|
||||
// If we have enough peers, we can exit the loop. This is the happy path.
|
||||
if missingPeerCountForTopic == 0 {
|
||||
break
|
||||
}
|
||||
if err := ctx.Err(); err != nil {
|
||||
return false, errors.Errorf("unable to find requisite number of peers for topic %s - "+
|
||||
"only %d out of %d peers were able to be found", topic, currNum, threshold)
|
||||
}
|
||||
nodeCount := int(params.BeaconNetworkConfig().MinimumPeersInSubnetSearch)
|
||||
// Restrict dials if limit is applied.
|
||||
if flags.MaxDialIsActive() {
|
||||
nodeCount = min(nodeCount, flags.Get().MaxConcurrentDials)
|
||||
}
|
||||
nodes := enode.ReadNodes(iterator, nodeCount)
|
||||
for _, node := range nodes {
|
||||
info, _, err := convertToAddrInfo(node)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if info == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
if err := s.connectWithPeer(ctx, *info); err != nil {
|
||||
log.WithError(err).Tracef("Could not connect with peer %s", info.String())
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
if time.Since(lastLogTime) > minLogInterval {
|
||||
lastLogTime = time.Now()
|
||||
log.WithField("currentPeerCount", peerCountForTopic).Debug("Searching for new peers for a subnet - continue")
|
||||
}
|
||||
// Wait for all dials to be completed.
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
log.WithField("currentPeerCount", threshold).Debug("Searching for new peers for a subnet - success")
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@@ -156,11 +267,17 @@ func (s *Service) filterPeerForSyncSubnet(index uint64) func(node *enode.Node) b
|
||||
// lower threshold to broadcast object compared to searching
|
||||
// for a subnet. So that even in the event of poor peer
|
||||
// connectivity, we can still broadcast an attestation.
|
||||
func (s *Service) hasPeerWithSubnet(topic string) bool {
|
||||
func (s *Service) hasPeerWithSubnet(subnetTopic string) bool {
|
||||
// In the event peer threshold is lower, we will choose the lower
|
||||
// threshold.
|
||||
minPeers := mathutil.Min(1, uint64(flags.Get().MinimumPeersPerSubnet))
|
||||
return len(s.pubsub.ListPeers(topic+s.Encoding().ProtocolSuffix())) >= int(minPeers) // lint:ignore uintcast -- Min peers can be safely cast to int.
|
||||
minPeers := min(1, flags.Get().MinimumPeersPerSubnet)
|
||||
topic := subnetTopic + s.Encoding().ProtocolSuffix()
|
||||
peersWithSubnet := s.pubsub.ListPeers(topic)
|
||||
peersWithSubnetCount := len(peersWithSubnet)
|
||||
|
||||
enoughPeers := peersWithSubnetCount >= minPeers
|
||||
|
||||
return enoughPeers
|
||||
}
|
||||
|
||||
// Updates the service's discv5 listener record's attestation subnet
|
||||
@@ -355,10 +472,10 @@ func syncBitvector(record *enr.Record) (bitfield.Bitvector4, error) {
|
||||
|
||||
// The subnet locker is a map which keeps track of all
|
||||
// mutexes stored per subnet. This locker is re-used
|
||||
// between both the attestation and sync subnets. In
|
||||
// order to differentiate between attestation and sync
|
||||
// subnets. Sync subnets are stored by (subnet+syncLockerVal). This
|
||||
// is to prevent conflicts while allowing both subnets
|
||||
// between both the attestation, sync and blob subnets.
|
||||
// Sync subnets are stored by (subnet+syncLockerVal).
|
||||
// Blob subnets are stored by (subnet+blobSubnetLockerVal).
|
||||
// This is to prevent conflicts while allowing subnets
|
||||
// to use a single locker.
|
||||
func (s *Service) subnetLocker(i uint64) *sync.RWMutex {
|
||||
s.subnetsLockLock.Lock()
|
||||
|
||||
@@ -27,6 +27,7 @@ go_library(
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//config:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/connmgr:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/control:go_default_library",
|
||||
|
||||
@@ -65,8 +65,8 @@ func (*FakeP2P) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ int
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// RefreshENR mocks the p2p func.
|
||||
func (*FakeP2P) RefreshENR() {}
|
||||
// RefreshPersistentSubnets mocks the p2p func.
|
||||
func (*FakeP2P) RefreshPersistentSubnets() {}
|
||||
|
||||
// LeaveTopic -- fake.
|
||||
func (*FakeP2P) LeaveTopic(_ string) error {
|
||||
|
||||
@@ -20,7 +20,7 @@ type MockPeerManager struct {
|
||||
}
|
||||
|
||||
// Disconnect .
|
||||
func (_ *MockPeerManager) Disconnect(peer.ID) error {
|
||||
func (*MockPeerManager) Disconnect(peer.ID) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -35,25 +35,25 @@ func (m *MockPeerManager) Host() host.Host {
|
||||
}
|
||||
|
||||
// ENR .
|
||||
func (m MockPeerManager) ENR() *enr.Record {
|
||||
func (m *MockPeerManager) ENR() *enr.Record {
|
||||
return m.Enr
|
||||
}
|
||||
|
||||
// DiscoveryAddresses .
|
||||
func (m MockPeerManager) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
|
||||
func (m *MockPeerManager) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
|
||||
if m.FailDiscoveryAddr {
|
||||
return nil, errors.New("fail")
|
||||
}
|
||||
return m.DiscoveryAddr, nil
|
||||
}
|
||||
|
||||
// RefreshENR .
|
||||
func (_ MockPeerManager) RefreshENR() {}
|
||||
// RefreshPersistentSubnets .
|
||||
func (*MockPeerManager) RefreshPersistentSubnets() {}
|
||||
|
||||
// FindPeersWithSubnet .
|
||||
func (_ MockPeerManager) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ int) (bool, error) {
|
||||
func (*MockPeerManager) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ int) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// AddPingMethod .
|
||||
func (_ MockPeerManager) AddPingMethod(_ func(ctx context.Context, id peer.ID) error) {}
|
||||
func (*MockPeerManager) AddPingMethod(_ func(ctx context.Context, id peer.ID) error) {}
|
||||
|
||||
@@ -64,7 +64,7 @@ func (m *MockPeersProvider) Peers() *peers.Status {
|
||||
log.WithError(err).Debug("Cannot decode")
|
||||
}
|
||||
m.peers.Add(createENR(), id0, ma0, network.DirInbound)
|
||||
m.peers.SetConnectionState(id0, peers.PeerConnected)
|
||||
m.peers.SetConnectionState(id0, peers.Connected)
|
||||
m.peers.SetChainState(id0, &pb.Status{FinalizedEpoch: 10})
|
||||
id1, err := peer.Decode(MockRawPeerId1)
|
||||
if err != nil {
|
||||
@@ -75,7 +75,7 @@ func (m *MockPeersProvider) Peers() *peers.Status {
|
||||
log.WithError(err).Debug("Cannot decode")
|
||||
}
|
||||
m.peers.Add(createENR(), id1, ma1, network.DirOutbound)
|
||||
m.peers.SetConnectionState(id1, peers.PeerConnected)
|
||||
m.peers.SetConnectionState(id1, peers.Connected)
|
||||
m.peers.SetChainState(id1, &pb.Status{FinalizedEpoch: 11})
|
||||
}
|
||||
return m.peers
|
||||
|
||||
@@ -10,9 +10,11 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/libp2p/go-libp2p"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/config"
|
||||
core "github.com/libp2p/go-libp2p/core"
|
||||
"github.com/libp2p/go-libp2p/core/control"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
@@ -34,13 +36,17 @@ import (
|
||||
|
||||
// We have to declare this again here to prevent a circular dependency
|
||||
// with the main p2p package.
|
||||
const metatadataV1Topic = "/eth2/beacon_chain/req/metadata/1"
|
||||
const metatadataV2Topic = "/eth2/beacon_chain/req/metadata/2"
|
||||
const (
|
||||
metadataV1Topic = "/eth2/beacon_chain/req/metadata/1"
|
||||
metadataV2Topic = "/eth2/beacon_chain/req/metadata/2"
|
||||
metadataV3Topic = "/eth2/beacon_chain/req/metadata/3"
|
||||
)
|
||||
|
||||
// TestP2P represents a p2p implementation that can be used for testing.
|
||||
type TestP2P struct {
|
||||
t *testing.T
|
||||
BHost host.Host
|
||||
EnodeID enode.ID
|
||||
pubsub *pubsub.PubSub
|
||||
joinedTopics map[string]*pubsub.Topic
|
||||
BroadcastCalled atomic.Bool
|
||||
@@ -51,9 +57,17 @@ type TestP2P struct {
|
||||
}
|
||||
|
||||
// NewTestP2P initializes a new p2p test service.
|
||||
func NewTestP2P(t *testing.T) *TestP2P {
|
||||
func NewTestP2P(t *testing.T, userOptions ...config.Option) *TestP2P {
|
||||
ctx := context.Background()
|
||||
h, err := libp2p.New(libp2p.ResourceManager(&network.NullResourceManager{}), libp2p.Transport(tcp.NewTCPTransport), libp2p.DefaultListenAddrs)
|
||||
options := []config.Option{
|
||||
libp2p.ResourceManager(&network.NullResourceManager{}),
|
||||
libp2p.Transport(tcp.NewTCPTransport),
|
||||
libp2p.DefaultListenAddrs,
|
||||
}
|
||||
|
||||
options = append(options, userOptions...)
|
||||
|
||||
h, err := libp2p.New(options...)
|
||||
require.NoError(t, err)
|
||||
ps, err := pubsub.NewFloodSub(ctx, h,
|
||||
pubsub.WithMessageSigning(false),
|
||||
@@ -239,7 +253,7 @@ func (p *TestP2P) LeaveTopic(topic string) error {
|
||||
}
|
||||
|
||||
// Encoding returns ssz encoding.
|
||||
func (_ *TestP2P) Encoding() encoder.NetworkEncoding {
|
||||
func (*TestP2P) Encoding() encoder.NetworkEncoding {
|
||||
return &encoder.SszNetworkEncoder{}
|
||||
}
|
||||
|
||||
@@ -266,34 +280,39 @@ func (p *TestP2P) Host() host.Host {
|
||||
}
|
||||
|
||||
// ENR returns the enr of the local peer.
|
||||
func (_ *TestP2P) ENR() *enr.Record {
|
||||
func (*TestP2P) ENR() *enr.Record {
|
||||
return new(enr.Record)
|
||||
}
|
||||
|
||||
// NodeID returns the node id of the local peer.
|
||||
func (p *TestP2P) NodeID() enode.ID {
|
||||
return p.EnodeID
|
||||
}
|
||||
|
||||
// DiscoveryAddresses --
|
||||
func (_ *TestP2P) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
|
||||
func (*TestP2P) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// AddConnectionHandler handles the connection with a newly connected peer.
|
||||
func (p *TestP2P) AddConnectionHandler(f, _ func(ctx context.Context, id peer.ID) error) {
|
||||
p.BHost.Network().Notify(&network.NotifyBundle{
|
||||
ConnectedF: func(net network.Network, conn network.Conn) {
|
||||
ConnectedF: func(_ network.Network, conn network.Conn) {
|
||||
// Must be handled in a goroutine as this callback cannot be blocking.
|
||||
go func() {
|
||||
p.peers.Add(new(enr.Record), conn.RemotePeer(), conn.RemoteMultiaddr(), conn.Stat().Direction)
|
||||
ctx := context.Background()
|
||||
|
||||
p.peers.SetConnectionState(conn.RemotePeer(), peers.PeerConnecting)
|
||||
p.peers.SetConnectionState(conn.RemotePeer(), peers.Connecting)
|
||||
if err := f(ctx, conn.RemotePeer()); err != nil {
|
||||
logrus.WithError(err).Error("Could not send successful hello rpc request")
|
||||
if err := p.Disconnect(conn.RemotePeer()); err != nil {
|
||||
logrus.WithError(err).Errorf("Unable to close peer %s", conn.RemotePeer())
|
||||
}
|
||||
p.peers.SetConnectionState(conn.RemotePeer(), peers.PeerDisconnected)
|
||||
p.peers.SetConnectionState(conn.RemotePeer(), peers.Disconnected)
|
||||
return
|
||||
}
|
||||
p.peers.SetConnectionState(conn.RemotePeer(), peers.PeerConnected)
|
||||
p.peers.SetConnectionState(conn.RemotePeer(), peers.Connected)
|
||||
}()
|
||||
},
|
||||
})
|
||||
@@ -302,14 +321,14 @@ func (p *TestP2P) AddConnectionHandler(f, _ func(ctx context.Context, id peer.ID
|
||||
// AddDisconnectionHandler --
|
||||
func (p *TestP2P) AddDisconnectionHandler(f func(ctx context.Context, id peer.ID) error) {
|
||||
p.BHost.Network().Notify(&network.NotifyBundle{
|
||||
DisconnectedF: func(net network.Network, conn network.Conn) {
|
||||
DisconnectedF: func(_ network.Network, conn network.Conn) {
|
||||
// Must be handled in a goroutine as this callback cannot be blocking.
|
||||
go func() {
|
||||
p.peers.SetConnectionState(conn.RemotePeer(), peers.PeerDisconnecting)
|
||||
p.peers.SetConnectionState(conn.RemotePeer(), peers.Disconnecting)
|
||||
if err := f(context.Background(), conn.RemotePeer()); err != nil {
|
||||
logrus.WithError(err).Debug("Unable to invoke callback")
|
||||
}
|
||||
p.peers.SetConnectionState(conn.RemotePeer(), peers.PeerDisconnected)
|
||||
p.peers.SetConnectionState(conn.RemotePeer(), peers.Disconnected)
|
||||
}()
|
||||
},
|
||||
})
|
||||
@@ -317,6 +336,8 @@ func (p *TestP2P) AddDisconnectionHandler(f func(ctx context.Context, id peer.ID
|
||||
|
||||
// Send a message to a specific peer.
|
||||
func (p *TestP2P) Send(ctx context.Context, msg interface{}, topic string, pid peer.ID) (network.Stream, error) {
|
||||
metadataTopics := map[string]bool{metadataV1Topic: true, metadataV2Topic: true, metadataV3Topic: true}
|
||||
|
||||
t := topic
|
||||
if t == "" {
|
||||
return nil, fmt.Errorf("protocol doesn't exist for proto message: %v", msg)
|
||||
@@ -326,7 +347,7 @@ func (p *TestP2P) Send(ctx context.Context, msg interface{}, topic string, pid p
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if topic != metatadataV1Topic && topic != metatadataV2Topic {
|
||||
if !metadataTopics[topic] {
|
||||
castedMsg, ok := msg.(ssz.Marshaler)
|
||||
if !ok {
|
||||
p.t.Fatalf("%T doesn't support ssz marshaler", msg)
|
||||
@@ -353,7 +374,7 @@ func (p *TestP2P) Send(ctx context.Context, msg interface{}, topic string, pid p
|
||||
}
|
||||
|
||||
// Started always returns true.
|
||||
func (_ *TestP2P) Started() bool {
|
||||
func (*TestP2P) Started() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -363,12 +384,12 @@ func (p *TestP2P) Peers() *peers.Status {
|
||||
}
|
||||
|
||||
// FindPeersWithSubnet mocks the p2p func.
|
||||
func (_ *TestP2P) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ int) (bool, error) {
|
||||
func (*TestP2P) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ int) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// RefreshENR mocks the p2p func.
|
||||
func (_ *TestP2P) RefreshENR() {}
|
||||
// RefreshPersistentSubnets mocks the p2p func.
|
||||
func (*TestP2P) RefreshPersistentSubnets() {}
|
||||
|
||||
// ForkDigest mocks the p2p func.
|
||||
func (p *TestP2P) ForkDigest() ([4]byte, error) {
|
||||
@@ -386,31 +407,31 @@ func (p *TestP2P) MetadataSeq() uint64 {
|
||||
}
|
||||
|
||||
// AddPingMethod mocks the p2p func.
|
||||
func (_ *TestP2P) AddPingMethod(_ func(ctx context.Context, id peer.ID) error) {
|
||||
func (*TestP2P) AddPingMethod(_ func(ctx context.Context, id peer.ID) error) {
|
||||
// no-op
|
||||
}
|
||||
|
||||
// InterceptPeerDial .
|
||||
func (_ *TestP2P) InterceptPeerDial(peer.ID) (allow bool) {
|
||||
func (*TestP2P) InterceptPeerDial(peer.ID) (allow bool) {
|
||||
return true
|
||||
}
|
||||
|
||||
// InterceptAddrDial .
|
||||
func (_ *TestP2P) InterceptAddrDial(peer.ID, multiaddr.Multiaddr) (allow bool) {
|
||||
func (*TestP2P) InterceptAddrDial(peer.ID, multiaddr.Multiaddr) (allow bool) {
|
||||
return true
|
||||
}
|
||||
|
||||
// InterceptAccept .
|
||||
func (_ *TestP2P) InterceptAccept(_ network.ConnMultiaddrs) (allow bool) {
|
||||
func (*TestP2P) InterceptAccept(_ network.ConnMultiaddrs) (allow bool) {
|
||||
return true
|
||||
}
|
||||
|
||||
// InterceptSecured .
|
||||
func (_ *TestP2P) InterceptSecured(network.Direction, peer.ID, network.ConnMultiaddrs) (allow bool) {
|
||||
func (*TestP2P) InterceptSecured(network.Direction, peer.ID, network.ConnMultiaddrs) (allow bool) {
|
||||
return true
|
||||
}
|
||||
|
||||
// InterceptUpgraded .
|
||||
func (_ *TestP2P) InterceptUpgraded(network.Conn) (allow bool, reason control.DisconnectReason) {
|
||||
func (*TestP2P) InterceptUpgraded(network.Conn) (allow bool, reason control.DisconnectReason) {
|
||||
return true, 0
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ go_library(
|
||||
srcs = [
|
||||
"endpoints.go",
|
||||
"log.go",
|
||||
"metrics.go",
|
||||
"service.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc",
|
||||
|
||||
@@ -34,18 +34,48 @@ type endpoint struct {
|
||||
methods []string
|
||||
}
|
||||
|
||||
// responseWriter is the wrapper to http Response writer.
|
||||
type responseWriter struct {
|
||||
http.ResponseWriter
|
||||
statusCode int
|
||||
}
|
||||
|
||||
// WriteHeader wraps the WriteHeader method of the underlying http.ResponseWriter to capture the status code.
|
||||
// Refer for WriteHeader doc: https://pkg.go.dev/net/http@go1.23.3#ResponseWriter.
|
||||
func (w *responseWriter) WriteHeader(statusCode int) {
|
||||
w.statusCode = statusCode
|
||||
w.ResponseWriter.WriteHeader(statusCode)
|
||||
}
|
||||
|
||||
func (e *endpoint) handlerWithMiddleware() http.HandlerFunc {
|
||||
handler := http.Handler(e.handler)
|
||||
for _, m := range e.middleware {
|
||||
handler = m(handler)
|
||||
}
|
||||
return promhttp.InstrumentHandlerDuration(
|
||||
|
||||
handler = promhttp.InstrumentHandlerDuration(
|
||||
httpRequestLatency.MustCurryWith(prometheus.Labels{"endpoint": e.name}),
|
||||
promhttp.InstrumentHandlerCounter(
|
||||
httpRequestCount.MustCurryWith(prometheus.Labels{"endpoint": e.name}),
|
||||
handler,
|
||||
),
|
||||
)
|
||||
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
// SSE errors are handled separately to avoid interference with the streaming
|
||||
// mechanism and ensure accurate error tracking.
|
||||
if e.template == "/eth/v1/events" {
|
||||
handler.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
rw := &responseWriter{ResponseWriter: w, statusCode: http.StatusOK}
|
||||
handler.ServeHTTP(rw, r)
|
||||
|
||||
if rw.statusCode >= 400 {
|
||||
httpErrorCount.WithLabelValues(r.URL.Path, http.StatusText(rw.statusCode), r.Method).Inc()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) endpoints(
|
||||
@@ -142,9 +172,11 @@ func (s *Service) builderEndpoints(stater lookup.Stater) []endpoint {
|
||||
}
|
||||
}
|
||||
|
||||
func (*Service) blobEndpoints(blocker lookup.Blocker) []endpoint {
|
||||
func (s *Service) blobEndpoints(blocker lookup.Blocker) []endpoint {
|
||||
server := &blob.Server{
|
||||
Blocker: blocker,
|
||||
Blocker: blocker,
|
||||
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
|
||||
FinalizationFetcher: s.cfg.FinalizationFetcher,
|
||||
}
|
||||
|
||||
const namespace = "blob"
|
||||
|
||||
@@ -87,7 +87,7 @@ func (s *Server) ListAttestations(w http.ResponseWriter, r *http.Request) {
|
||||
// ListAttestationsV2 retrieves attestations known by the node but
|
||||
// not necessarily incorporated into any block. Allows filtering by committee index or slot.
|
||||
func (s *Server) ListAttestationsV2(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "beacon.ListAttestationsV2")
|
||||
_, span := trace.StartSpan(r.Context(), "beacon.ListAttestationsV2")
|
||||
defer span.End()
|
||||
|
||||
rawSlot, slot, ok := shared.UintFromQuery(w, r, "slot", false)
|
||||
@@ -98,13 +98,10 @@ func (s *Server) ListAttestationsV2(w http.ResponseWriter, r *http.Request) {
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
headState, err := s.ChainInfoFetcher.HeadStateReadOnly(ctx)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get head state: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
v := slots.ToForkVersion(primitives.Slot(slot))
|
||||
if rawSlot == "" {
|
||||
v = slots.ToForkVersion(s.TimeFetcher.CurrentSlot())
|
||||
}
|
||||
|
||||
attestations := s.AttestationsPool.AggregatedAttestations()
|
||||
unaggAtts, err := s.AttestationsPool.UnaggregatedAttestations()
|
||||
if err != nil {
|
||||
@@ -116,7 +113,7 @@ func (s *Server) ListAttestationsV2(w http.ResponseWriter, r *http.Request) {
|
||||
filteredAtts := make([]interface{}, 0, len(attestations))
|
||||
for _, att := range attestations {
|
||||
var includeAttestation bool
|
||||
if headState.Version() >= version.Electra {
|
||||
if v >= version.Electra && att.Version() >= version.Electra {
|
||||
attElectra, ok := att.(*eth.AttestationElectra)
|
||||
if !ok {
|
||||
httputil.HandleError(w, fmt.Sprintf("Unable to convert attestation of type %T", att), http.StatusInternalServerError)
|
||||
@@ -128,7 +125,7 @@ func (s *Server) ListAttestationsV2(w http.ResponseWriter, r *http.Request) {
|
||||
attStruct := structs.AttElectraFromConsensus(attElectra)
|
||||
filteredAtts = append(filteredAtts, attStruct)
|
||||
}
|
||||
} else {
|
||||
} else if v < version.Electra && att.Version() < version.Electra {
|
||||
attOld, ok := att.(*eth.Attestation)
|
||||
if !ok {
|
||||
httputil.HandleError(w, fmt.Sprintf("Unable to convert attestation of type %T", att), http.StatusInternalServerError)
|
||||
@@ -149,9 +146,9 @@ func (s *Server) ListAttestationsV2(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set(api.VersionHeader, version.String(headState.Version()))
|
||||
w.Header().Set(api.VersionHeader, version.String(v))
|
||||
httputil.WriteJson(w, &structs.ListAttestationsResponse{
|
||||
Version: version.String(headState.Version()),
|
||||
Version: version.String(v),
|
||||
Data: attsData,
|
||||
})
|
||||
}
|
||||
@@ -726,31 +723,33 @@ func (s *Server) GetAttesterSlashingsV2(w http.ResponseWriter, r *http.Request)
|
||||
ctx, span := trace.StartSpan(r.Context(), "beacon.GetAttesterSlashingsV2")
|
||||
defer span.End()
|
||||
|
||||
v := slots.ToForkVersion(s.TimeFetcher.CurrentSlot())
|
||||
headState, err := s.ChainInfoFetcher.HeadStateReadOnly(ctx)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get head state: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
var attStructs []interface{}
|
||||
sourceSlashings := s.SlashingsPool.PendingAttesterSlashings(ctx, headState, true /* return unlimited slashings */)
|
||||
|
||||
for _, slashing := range sourceSlashings {
|
||||
var attStruct interface{}
|
||||
if headState.Version() >= version.Electra {
|
||||
if v >= version.Electra && slashing.Version() >= version.Electra {
|
||||
a, ok := slashing.(*eth.AttesterSlashingElectra)
|
||||
if !ok {
|
||||
httputil.HandleError(w, fmt.Sprintf("Unable to convert slashing of type %T to an Electra slashing", slashing), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
attStruct = structs.AttesterSlashingElectraFromConsensus(a)
|
||||
} else {
|
||||
} else if v < version.Electra && slashing.Version() < version.Electra {
|
||||
a, ok := slashing.(*eth.AttesterSlashing)
|
||||
if !ok {
|
||||
httputil.HandleError(w, fmt.Sprintf("Unable to convert slashing of type %T to a Phase0 slashing", slashing), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
attStruct = structs.AttesterSlashingFromConsensus(a)
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
attStructs = append(attStructs, attStruct)
|
||||
}
|
||||
@@ -762,10 +761,10 @@ func (s *Server) GetAttesterSlashingsV2(w http.ResponseWriter, r *http.Request)
|
||||
}
|
||||
|
||||
resp := &structs.GetAttesterSlashingsResponse{
|
||||
Version: version.String(headState.Version()),
|
||||
Version: version.String(v),
|
||||
Data: attBytes,
|
||||
}
|
||||
w.Header().Set(api.VersionHeader, version.String(headState.Version()))
|
||||
w.Header().Set(api.VersionHeader, version.String(v))
|
||||
httputil.WriteJson(w, resp)
|
||||
}
|
||||
|
||||
|
||||
@@ -115,9 +115,16 @@ func TestListAttestations(t *testing.T) {
|
||||
Signature: bytesutil.PadTo([]byte("signature4"), 96),
|
||||
}
|
||||
t.Run("V1", func(t *testing.T) {
|
||||
bs, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
|
||||
chainService := &blockchainmock.ChainService{State: bs}
|
||||
s := &Server{
|
||||
ChainInfoFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
AttestationsPool: attestations.NewPool(),
|
||||
}
|
||||
|
||||
require.NoError(t, s.AttestationsPool.SaveAggregatedAttestations([]ethpbv1alpha1.Att{att1, att2}))
|
||||
require.NoError(t, s.AttestationsPool.SaveUnaggregatedAttestations([]ethpbv1alpha1.Att{att3, att4}))
|
||||
|
||||
@@ -204,10 +211,19 @@ func TestListAttestations(t *testing.T) {
|
||||
t.Run("Pre-Electra", func(t *testing.T) {
|
||||
bs, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
|
||||
chainService := &blockchainmock.ChainService{State: bs}
|
||||
s := &Server{
|
||||
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
|
||||
ChainInfoFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
AttestationsPool: attestations.NewPool(),
|
||||
}
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.DenebForkEpoch = 0
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
require.NoError(t, s.AttestationsPool.SaveAggregatedAttestations([]ethpbv1alpha1.Att{att1, att2}))
|
||||
require.NoError(t, s.AttestationsPool.SaveUnaggregatedAttestations([]ethpbv1alpha1.Att{att3, att4}))
|
||||
t.Run("empty request", func(t *testing.T) {
|
||||
@@ -226,7 +242,7 @@ func TestListAttestations(t *testing.T) {
|
||||
var atts []*structs.Attestation
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &atts))
|
||||
assert.Equal(t, 4, len(atts))
|
||||
assert.Equal(t, "phase0", resp.Version)
|
||||
assert.Equal(t, "deneb", resp.Version)
|
||||
})
|
||||
t.Run("slot request", func(t *testing.T) {
|
||||
url := "http://example.com?slot=2"
|
||||
@@ -244,7 +260,7 @@ func TestListAttestations(t *testing.T) {
|
||||
var atts []*structs.Attestation
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &atts))
|
||||
assert.Equal(t, 2, len(atts))
|
||||
assert.Equal(t, "phase0", resp.Version)
|
||||
assert.Equal(t, "deneb", resp.Version)
|
||||
for _, a := range atts {
|
||||
assert.Equal(t, "2", a.Data.Slot)
|
||||
}
|
||||
@@ -265,7 +281,7 @@ func TestListAttestations(t *testing.T) {
|
||||
var atts []*structs.Attestation
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &atts))
|
||||
assert.Equal(t, 2, len(atts))
|
||||
assert.Equal(t, "phase0", resp.Version)
|
||||
assert.Equal(t, "deneb", resp.Version)
|
||||
for _, a := range atts {
|
||||
assert.Equal(t, "4", a.Data.CommitteeIndex)
|
||||
}
|
||||
@@ -286,7 +302,7 @@ func TestListAttestations(t *testing.T) {
|
||||
var atts []*structs.Attestation
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &atts))
|
||||
assert.Equal(t, 1, len(atts))
|
||||
assert.Equal(t, "phase0", resp.Version)
|
||||
assert.Equal(t, "deneb", resp.Version)
|
||||
for _, a := range atts {
|
||||
assert.Equal(t, "2", a.Data.Slot)
|
||||
assert.Equal(t, "4", a.Data.CommitteeIndex)
|
||||
@@ -370,12 +386,21 @@ func TestListAttestations(t *testing.T) {
|
||||
}
|
||||
bs, err := util.NewBeaconStateElectra()
|
||||
require.NoError(t, err)
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.ElectraForkEpoch = 0
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
chainService := &blockchainmock.ChainService{State: bs}
|
||||
s := &Server{
|
||||
AttestationsPool: attestations.NewPool(),
|
||||
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
|
||||
ChainInfoFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
}
|
||||
require.NoError(t, s.AttestationsPool.SaveAggregatedAttestations([]ethpbv1alpha1.Att{attElectra1, attElectra2}))
|
||||
require.NoError(t, s.AttestationsPool.SaveUnaggregatedAttestations([]ethpbv1alpha1.Att{attElectra3, attElectra4}))
|
||||
// Added one pre electra attestation to ensure it is ignored.
|
||||
require.NoError(t, s.AttestationsPool.SaveAggregatedAttestations([]ethpbv1alpha1.Att{attElectra1, attElectra2, att1}))
|
||||
require.NoError(t, s.AttestationsPool.SaveUnaggregatedAttestations([]ethpbv1alpha1.Att{attElectra3, attElectra4, att3}))
|
||||
|
||||
t.Run("empty request", func(t *testing.T) {
|
||||
url := "http://example.com"
|
||||
@@ -1658,12 +1683,59 @@ func TestGetAttesterSlashings(t *testing.T) {
|
||||
})
|
||||
})
|
||||
t.Run("V2", func(t *testing.T) {
|
||||
t.Run("post-electra-ok-1-pre-slashing", func(t *testing.T) {
|
||||
bs, err := util.NewBeaconStateElectra()
|
||||
require.NoError(t, err)
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.ElectraForkEpoch = 100
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
chainService := &blockchainmock.ChainService{State: bs}
|
||||
|
||||
s := &Server{
|
||||
ChainInfoFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
SlashingsPool: &slashingsmock.PoolMock{PendingAttSlashings: []ethpbv1alpha1.AttSlashing{slashing1PostElectra, slashing2PostElectra, slashing1PreElectra}},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v2/beacon/pool/attester_slashings", nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetAttesterSlashingsV2(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.GetAttesterSlashingsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.NotNil(t, resp)
|
||||
require.NotNil(t, resp.Data)
|
||||
assert.Equal(t, "electra", resp.Version)
|
||||
|
||||
// Unmarshal resp.Data into a slice of slashings
|
||||
var slashings []*structs.AttesterSlashingElectra
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &slashings))
|
||||
|
||||
ss, err := structs.AttesterSlashingsElectraToConsensus(slashings)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepEqual(t, slashing1PostElectra, ss[0])
|
||||
require.DeepEqual(t, slashing2PostElectra, ss[1])
|
||||
})
|
||||
t.Run("post-electra-ok", func(t *testing.T) {
|
||||
bs, err := util.NewBeaconStateElectra()
|
||||
require.NoError(t, err)
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.ElectraForkEpoch = 100
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
chainService := &blockchainmock.ChainService{State: bs}
|
||||
|
||||
s := &Server{
|
||||
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
|
||||
ChainInfoFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
SlashingsPool: &slashingsmock.PoolMock{PendingAttSlashings: []ethpbv1alpha1.AttSlashing{slashing1PostElectra, slashing2PostElectra}},
|
||||
}
|
||||
|
||||
@@ -1692,9 +1764,11 @@ func TestGetAttesterSlashings(t *testing.T) {
|
||||
t.Run("pre-electra-ok", func(t *testing.T) {
|
||||
bs, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
chainService := &blockchainmock.ChainService{State: bs}
|
||||
|
||||
s := &Server{
|
||||
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
|
||||
ChainInfoFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
SlashingsPool: &slashingsmock.PoolMock{PendingAttSlashings: []ethpbv1alpha1.AttSlashing{slashing1PreElectra, slashing2PreElectra}},
|
||||
}
|
||||
|
||||
@@ -1722,8 +1796,15 @@ func TestGetAttesterSlashings(t *testing.T) {
|
||||
bs, err := util.NewBeaconStateElectra()
|
||||
require.NoError(t, err)
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.ElectraForkEpoch = 100
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
chainService := &blockchainmock.ChainService{State: bs}
|
||||
s := &Server{
|
||||
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
|
||||
ChainInfoFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
SlashingsPool: &slashingsmock.PoolMock{PendingAttSlashings: []ethpbv1alpha1.AttSlashing{}},
|
||||
}
|
||||
|
||||
|
||||
@@ -10,12 +10,14 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api/server/structs:go_default_library",
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/rpc/core:go_default_library",
|
||||
"//beacon-chain/rpc/lookup:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//network/httputil:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
"github.com/prysmaticlabs/prysm/v5/network/httputil"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
)
|
||||
|
||||
// Blobs is an HTTP handler for Beacon API getBlobs.
|
||||
@@ -59,7 +60,30 @@ func (s *Server) Blobs(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
httputil.WriteJson(w, buildSidecarsJsonResponse(verifiedBlobs))
|
||||
blk, err := s.Blocker.Block(ctx, []byte(blockId))
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not fetch block: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
blkRoot, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not hash block: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
isOptimistic, err := s.OptimisticModeFetcher.IsOptimisticForRoot(ctx, blkRoot)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check if block is optimistic: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
data := buildSidecarsJsonResponse(verifiedBlobs)
|
||||
resp := &structs.SidecarsResponse{
|
||||
Version: version.String(blk.Version()),
|
||||
Data: data,
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
Finalized: s.FinalizationFetcher.IsFinalized(ctx, blkRoot),
|
||||
}
|
||||
httputil.WriteJson(w, resp)
|
||||
}
|
||||
|
||||
// parseIndices filters out invalid and duplicate blob indices
|
||||
@@ -92,14 +116,14 @@ loop:
|
||||
return indices, nil
|
||||
}
|
||||
|
||||
func buildSidecarsJsonResponse(verifiedBlobs []*blocks.VerifiedROBlob) *structs.SidecarsResponse {
|
||||
resp := &structs.SidecarsResponse{Data: make([]*structs.Sidecar, len(verifiedBlobs))}
|
||||
func buildSidecarsJsonResponse(verifiedBlobs []*blocks.VerifiedROBlob) []*structs.Sidecar {
|
||||
sidecars := make([]*structs.Sidecar, len(verifiedBlobs))
|
||||
for i, sc := range verifiedBlobs {
|
||||
proofs := make([]string, len(sc.CommitmentInclusionProof))
|
||||
for j := range sc.CommitmentInclusionProof {
|
||||
proofs[j] = hexutil.Encode(sc.CommitmentInclusionProof[j])
|
||||
}
|
||||
resp.Data[i] = &structs.Sidecar{
|
||||
sidecars[i] = &structs.Sidecar{
|
||||
Index: strconv.FormatUint(sc.Index, 10),
|
||||
Blob: hexutil.Encode(sc.Blob),
|
||||
KzgCommitment: hexutil.Encode(sc.KzgCommitment),
|
||||
@@ -108,7 +132,7 @@ func buildSidecarsJsonResponse(verifiedBlobs []*blocks.VerifiedROBlob) *structs.
|
||||
CommitmentInclusionProof: proofs,
|
||||
}
|
||||
}
|
||||
return resp
|
||||
return sidecars
|
||||
}
|
||||
|
||||
func buildSidecarsSSZResponse(verifiedBlobs []*blocks.VerifiedROBlob) ([]byte, error) {
|
||||
|
||||
@@ -46,16 +46,20 @@ func TestBlobs(t *testing.T) {
|
||||
}
|
||||
blockRoot := blobs[0].BlockRoot()
|
||||
|
||||
mockChainService := &mockChain.ChainService{
|
||||
FinalizedRoots: map[[32]byte]bool{},
|
||||
}
|
||||
s := &Server{
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
FinalizationFetcher: mockChainService,
|
||||
}
|
||||
|
||||
t.Run("genesis", func(t *testing.T) {
|
||||
u := "http://foo.example/genesis"
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
blocker := &lookup.BeaconDbBlocker{}
|
||||
s := &Server{
|
||||
Blocker: blocker,
|
||||
}
|
||||
|
||||
s.Blocker = &lookup.BeaconDbBlocker{}
|
||||
s.Blobs(writer, request)
|
||||
|
||||
assert.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
@@ -69,18 +73,14 @@ func TestBlobs(t *testing.T) {
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
blocker := &lookup.BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{Root: blockRoot[:]},
|
||||
s.Blocker = &lookup.BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{Root: blockRoot[:], Block: denebBlock},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: bs,
|
||||
}
|
||||
s := &Server{
|
||||
Blocker: blocker,
|
||||
}
|
||||
|
||||
s.Blobs(writer, request)
|
||||
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
@@ -111,118 +111,96 @@ func TestBlobs(t *testing.T) {
|
||||
assert.Equal(t, hexutil.Encode(blobs[3].Blob), sidecar.Blob)
|
||||
assert.Equal(t, hexutil.Encode(blobs[3].KzgCommitment), sidecar.KzgCommitment)
|
||||
assert.Equal(t, hexutil.Encode(blobs[3].KzgProof), sidecar.KzgProof)
|
||||
|
||||
require.Equal(t, "deneb", resp.Version)
|
||||
require.Equal(t, false, resp.ExecutionOptimistic)
|
||||
require.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
t.Run("finalized", func(t *testing.T) {
|
||||
u := "http://foo.example/finalized"
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
blocker := &lookup.BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ð.Checkpoint{Root: blockRoot[:]}},
|
||||
s.Blocker = &lookup.BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ð.Checkpoint{Root: blockRoot[:]}, Block: denebBlock},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: bs,
|
||||
}
|
||||
s := &Server{
|
||||
Blocker: blocker,
|
||||
}
|
||||
|
||||
s.Blobs(writer, request)
|
||||
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.SidecarsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, 4, len(resp.Data))
|
||||
})
|
||||
t.Run("justified", func(t *testing.T) {
|
||||
u := "http://foo.example/justified"
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
blocker := &lookup.BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{CurrentJustifiedCheckPoint: ð.Checkpoint{Root: blockRoot[:]}},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: bs,
|
||||
}
|
||||
s := &Server{
|
||||
Blocker: blocker,
|
||||
}
|
||||
|
||||
s.Blobs(writer, request)
|
||||
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.SidecarsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, 4, len(resp.Data))
|
||||
require.Equal(t, "deneb", resp.Version)
|
||||
require.Equal(t, false, resp.ExecutionOptimistic)
|
||||
require.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
t.Run("root", func(t *testing.T) {
|
||||
u := "http://foo.example/" + hexutil.Encode(blockRoot[:])
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
blocker := &lookup.BeaconDbBlocker{
|
||||
BeaconDB: db,
|
||||
s.Blocker = &lookup.BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{Block: denebBlock},
|
||||
BeaconDB: db,
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BlobStorage: bs,
|
||||
}
|
||||
s := &Server{
|
||||
Blocker: blocker,
|
||||
}
|
||||
|
||||
s.Blobs(writer, request)
|
||||
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.SidecarsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, 4, len(resp.Data))
|
||||
|
||||
require.Equal(t, "deneb", resp.Version)
|
||||
require.Equal(t, false, resp.ExecutionOptimistic)
|
||||
require.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
t.Run("slot", func(t *testing.T) {
|
||||
u := "http://foo.example/123"
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
blocker := &lookup.BeaconDbBlocker{
|
||||
BeaconDB: db,
|
||||
s.Blocker = &lookup.BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{Block: denebBlock},
|
||||
BeaconDB: db,
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BlobStorage: bs,
|
||||
}
|
||||
s := &Server{
|
||||
Blocker: blocker,
|
||||
}
|
||||
|
||||
s.Blobs(writer, request)
|
||||
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.SidecarsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, 4, len(resp.Data))
|
||||
|
||||
require.Equal(t, "deneb", resp.Version)
|
||||
require.Equal(t, false, resp.ExecutionOptimistic)
|
||||
require.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
t.Run("one blob only", func(t *testing.T) {
|
||||
u := "http://foo.example/123?indices=2"
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
blocker := &lookup.BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ð.Checkpoint{Root: blockRoot[:]}},
|
||||
s.Blocker = &lookup.BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ð.Checkpoint{Root: blockRoot[:]}, Block: denebBlock},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: bs,
|
||||
}
|
||||
s := &Server{
|
||||
Blocker: blocker,
|
||||
}
|
||||
|
||||
s.Blobs(writer, request)
|
||||
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
@@ -235,45 +213,47 @@ func TestBlobs(t *testing.T) {
|
||||
assert.Equal(t, hexutil.Encode(blobs[2].Blob), sidecar.Blob)
|
||||
assert.Equal(t, hexutil.Encode(blobs[2].KzgCommitment), sidecar.KzgCommitment)
|
||||
assert.Equal(t, hexutil.Encode(blobs[2].KzgProof), sidecar.KzgProof)
|
||||
|
||||
require.Equal(t, "deneb", resp.Version)
|
||||
require.Equal(t, false, resp.ExecutionOptimistic)
|
||||
require.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
t.Run("no blobs returns an empty array", func(t *testing.T) {
|
||||
u := "http://foo.example/123"
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
blocker := &lookup.BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ð.Checkpoint{Root: blockRoot[:]}},
|
||||
s.Blocker = &lookup.BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ð.Checkpoint{Root: blockRoot[:]}, Block: denebBlock},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: filesystem.NewEphemeralBlobStorage(t), // new ephemeral storage
|
||||
}
|
||||
s := &Server{
|
||||
Blocker: blocker,
|
||||
}
|
||||
|
||||
s.Blobs(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.SidecarsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, len(resp.Data), 0)
|
||||
|
||||
require.Equal(t, "deneb", resp.Version)
|
||||
require.Equal(t, false, resp.ExecutionOptimistic)
|
||||
require.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
t.Run("outside retention period returns 200 w/ empty list ", func(t *testing.T) {
|
||||
u := "http://foo.example/123"
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
moc := &mockChain.ChainService{FinalizedCheckPoint: ð.Checkpoint{Root: blockRoot[:]}}
|
||||
blocker := &lookup.BeaconDbBlocker{
|
||||
moc := &mockChain.ChainService{FinalizedCheckPoint: ð.Checkpoint{Root: blockRoot[:]}, Block: denebBlock}
|
||||
s.Blocker = &lookup.BeaconDbBlocker{
|
||||
ChainInfoFetcher: moc,
|
||||
GenesisTimeFetcher: moc, // genesis time is set to 0 here, so it results in current epoch being extremely large
|
||||
BeaconDB: db,
|
||||
BlobStorage: bs,
|
||||
}
|
||||
s := &Server{
|
||||
Blocker: blocker,
|
||||
}
|
||||
|
||||
s.Blobs(writer, request)
|
||||
|
||||
@@ -281,6 +261,10 @@ func TestBlobs(t *testing.T) {
|
||||
resp := &structs.SidecarsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, 0, len(resp.Data))
|
||||
|
||||
require.Equal(t, "deneb", resp.Version)
|
||||
require.Equal(t, false, resp.ExecutionOptimistic)
|
||||
require.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
t.Run("block without commitments returns 200 w/empty list ", func(t *testing.T) {
|
||||
denebBlock, _ := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 333, 0)
|
||||
@@ -293,17 +277,14 @@ func TestBlobs(t *testing.T) {
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
blocker := &lookup.BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ð.Checkpoint{Root: blockRoot[:]}},
|
||||
s.Blocker = &lookup.BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ð.Checkpoint{Root: blockRoot[:]}, Block: denebBlock},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: bs,
|
||||
}
|
||||
s := &Server{
|
||||
Blocker: blocker,
|
||||
}
|
||||
|
||||
s.Blobs(writer, request)
|
||||
|
||||
@@ -311,16 +292,17 @@ func TestBlobs(t *testing.T) {
|
||||
resp := &structs.SidecarsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, 0, len(resp.Data))
|
||||
|
||||
require.Equal(t, "deneb", resp.Version)
|
||||
require.Equal(t, false, resp.ExecutionOptimistic)
|
||||
require.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
t.Run("slot before Deneb fork", func(t *testing.T) {
|
||||
u := "http://foo.example/31"
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
blocker := &lookup.BeaconDbBlocker{}
|
||||
s := &Server{
|
||||
Blocker: blocker,
|
||||
}
|
||||
s.Blocker = &lookup.BeaconDbBlocker{}
|
||||
|
||||
s.Blobs(writer, request)
|
||||
|
||||
@@ -335,11 +317,7 @@ func TestBlobs(t *testing.T) {
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
blocker := &lookup.BeaconDbBlocker{}
|
||||
s := &Server{
|
||||
Blocker: blocker,
|
||||
}
|
||||
|
||||
s.Blocker = &lookup.BeaconDbBlocker{}
|
||||
s.Blobs(writer, request)
|
||||
|
||||
assert.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
@@ -354,7 +332,7 @@ func TestBlobs(t *testing.T) {
|
||||
request.Header.Add("Accept", "application/octet-stream")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
blocker := &lookup.BeaconDbBlocker{
|
||||
s.Blocker = &lookup.BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ð.Checkpoint{Root: blockRoot[:]}},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
@@ -362,10 +340,8 @@ func TestBlobs(t *testing.T) {
|
||||
BeaconDB: db,
|
||||
BlobStorage: bs,
|
||||
}
|
||||
s := &Server{
|
||||
Blocker: blocker,
|
||||
}
|
||||
s.Blobs(writer, request)
|
||||
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
require.Equal(t, len(writer.Body.Bytes()), fieldparams.BlobSidecarSize) // size of each sidecar
|
||||
// can directly unmarshal to sidecar since there's only 1
|
||||
@@ -379,7 +355,7 @@ func TestBlobs(t *testing.T) {
|
||||
request.Header.Add("Accept", "application/octet-stream")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
blocker := &lookup.BeaconDbBlocker{
|
||||
s.Blocker = &lookup.BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ð.Checkpoint{Root: blockRoot[:]}},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
@@ -387,10 +363,8 @@ func TestBlobs(t *testing.T) {
|
||||
BeaconDB: db,
|
||||
BlobStorage: bs,
|
||||
}
|
||||
s := &Server{
|
||||
Blocker: blocker,
|
||||
}
|
||||
s.Blobs(writer, request)
|
||||
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
require.Equal(t, len(writer.Body.Bytes()), fieldparams.BlobSidecarSize*4) // size of each sidecar
|
||||
})
|
||||
|
||||
@@ -1,9 +1,12 @@
|
||||
package blob
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/lookup"
|
||||
)
|
||||
|
||||
type Server struct {
|
||||
Blocker lookup.Blocker
|
||||
Blocker lookup.Blocker
|
||||
OptimisticModeFetcher blockchain.OptimisticModeFetcher
|
||||
FinalizationFetcher blockchain.FinalizationFetcher
|
||||
}
|
||||
|
||||
@@ -32,6 +32,8 @@ go_library(
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -12,6 +12,8 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/prysmaticlabs/prysm/v5/api"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed"
|
||||
@@ -76,6 +78,14 @@ var (
|
||||
errWriterUnusable = errors.New("http response writer is unusable")
|
||||
)
|
||||
|
||||
var httpSSEErrorCount = promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "http_sse_error_count",
|
||||
Help: "Total HTTP errors for server sent events endpoint",
|
||||
},
|
||||
[]string{"endpoint", "error"},
|
||||
)
|
||||
|
||||
// The eventStreamer uses lazyReaders to defer serialization until the moment the value is ready to be written to the client.
|
||||
type lazyReader func() io.Reader
|
||||
|
||||
@@ -145,6 +155,13 @@ func newTopicRequest(topics []string) (*topicRequest, error) {
|
||||
// Servers may send SSE comments beginning with ':' for any purpose,
|
||||
// including to keep the event stream connection alive in the presence of proxy servers.
|
||||
func (s *Server) StreamEvents(w http.ResponseWriter, r *http.Request) {
|
||||
var err error
|
||||
defer func() {
|
||||
if err != nil {
|
||||
httpSSEErrorCount.WithLabelValues(r.URL.Path, err.Error()).Inc()
|
||||
}
|
||||
}()
|
||||
|
||||
log.Debug("Starting StreamEvents handler")
|
||||
ctx, span := trace.StartSpan(r.Context(), "events.StreamEvents")
|
||||
defer span.End()
|
||||
@@ -174,7 +191,7 @@ func (s *Server) StreamEvents(w http.ResponseWriter, r *http.Request) {
|
||||
defer cancel()
|
||||
es := newEventStreamer(buffSize, ka)
|
||||
|
||||
go es.outboxWriteLoop(ctx, cancel, sw)
|
||||
go es.outboxWriteLoop(ctx, cancel, sw, r.URL.Path)
|
||||
if err := es.recvEventLoop(ctx, cancel, topics, s); err != nil {
|
||||
log.WithError(err).Debug("Shutting down StreamEvents handler.")
|
||||
}
|
||||
@@ -264,11 +281,12 @@ func newlineReader() io.Reader {
|
||||
|
||||
// outboxWriteLoop runs in a separate goroutine. Its job is to write the values in the outbox to
|
||||
// the client as fast as the client can read them.
|
||||
func (es *eventStreamer) outboxWriteLoop(ctx context.Context, cancel context.CancelFunc, w *streamingResponseWriterController) {
|
||||
func (es *eventStreamer) outboxWriteLoop(ctx context.Context, cancel context.CancelFunc, w *streamingResponseWriterController, endpoint string) {
|
||||
var err error
|
||||
defer func() {
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Event streamer shutting down due to error.")
|
||||
httpSSEErrorCount.WithLabelValues(endpoint, err.Error()).Inc()
|
||||
}
|
||||
es.exit()
|
||||
}()
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
|
||||
)
|
||||
|
||||
// Server defines a server implementation of the gRPC events service,
|
||||
// Server defines a server implementation of the http events service,
|
||||
// providing RPC endpoints to subscribe to events from the beacon node.
|
||||
type Server struct {
|
||||
StateNotifier statefeed.Notifier
|
||||
|
||||
@@ -117,13 +117,13 @@ func TestGetPeers(t *testing.T) {
|
||||
|
||||
switch i {
|
||||
case 0, 1:
|
||||
peerStatus.SetConnectionState(id, peers.PeerConnecting)
|
||||
peerStatus.SetConnectionState(id, peers.Connecting)
|
||||
case 2, 3:
|
||||
peerStatus.SetConnectionState(id, peers.PeerConnected)
|
||||
peerStatus.SetConnectionState(id, peers.Connected)
|
||||
case 4, 5:
|
||||
peerStatus.SetConnectionState(id, peers.PeerDisconnecting)
|
||||
peerStatus.SetConnectionState(id, peers.Disconnecting)
|
||||
case 6, 7:
|
||||
peerStatus.SetConnectionState(id, peers.PeerDisconnected)
|
||||
peerStatus.SetConnectionState(id, peers.Disconnected)
|
||||
default:
|
||||
t.Fatalf("Failed to set connection state for peer")
|
||||
}
|
||||
@@ -289,13 +289,13 @@ func TestGetPeerCount(t *testing.T) {
|
||||
|
||||
switch i {
|
||||
case 0:
|
||||
peerStatus.SetConnectionState(id, peers.PeerConnecting)
|
||||
peerStatus.SetConnectionState(id, peers.Connecting)
|
||||
case 1, 2:
|
||||
peerStatus.SetConnectionState(id, peers.PeerConnected)
|
||||
peerStatus.SetConnectionState(id, peers.Connected)
|
||||
case 3, 4, 5:
|
||||
peerStatus.SetConnectionState(id, peers.PeerDisconnecting)
|
||||
peerStatus.SetConnectionState(id, peers.Disconnecting)
|
||||
case 6, 7, 8, 9:
|
||||
peerStatus.SetConnectionState(id, peers.PeerDisconnected)
|
||||
peerStatus.SetConnectionState(id, peers.Disconnected)
|
||||
default:
|
||||
t.Fatalf("Failed to set connection state for peer")
|
||||
}
|
||||
|
||||
@@ -75,7 +75,7 @@ func (s *Server) GetAggregateAttestation(w http.ResponseWriter, r *http.Request)
|
||||
|
||||
// GetAggregateAttestationV2 aggregates all attestations matching the given attestation data root and slot, returning the aggregated result.
|
||||
func (s *Server) GetAggregateAttestationV2(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "validator.GetAggregateAttestationV2")
|
||||
_, span := trace.StartSpan(r.Context(), "validator.GetAggregateAttestationV2")
|
||||
defer span.End()
|
||||
|
||||
_, attDataRoot, ok := shared.HexFromQuery(w, r, "attestation_data_root", fieldparams.RootLength, true)
|
||||
@@ -91,14 +91,15 @@ func (s *Server) GetAggregateAttestationV2(w http.ResponseWriter, r *http.Reques
|
||||
return
|
||||
}
|
||||
|
||||
v := slots.ToForkVersion(primitives.Slot(slot))
|
||||
agg := s.aggregatedAttestation(w, primitives.Slot(slot), attDataRoot, primitives.CommitteeIndex(index))
|
||||
if agg == nil {
|
||||
return
|
||||
}
|
||||
resp := &structs.AggregateAttestationResponse{
|
||||
Version: version.String(agg.Version()),
|
||||
Version: version.String(v),
|
||||
}
|
||||
if agg.Version() >= version.Electra {
|
||||
if v >= version.Electra {
|
||||
typedAgg, ok := agg.(*ethpbalpha.AttestationElectra)
|
||||
if !ok {
|
||||
httputil.HandleError(w, fmt.Sprintf("Attestation is not of type %T", ðpbalpha.AttestationElectra{}), http.StatusInternalServerError)
|
||||
@@ -123,12 +124,7 @@ func (s *Server) GetAggregateAttestationV2(w http.ResponseWriter, r *http.Reques
|
||||
}
|
||||
resp.Data = data
|
||||
}
|
||||
headState, err := s.ChainInfoFetcher.HeadStateReadOnly(ctx)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get head state: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
w.Header().Set(api.VersionHeader, version.String(headState.Version()))
|
||||
w.Header().Set(api.VersionHeader, version.String(v))
|
||||
httputil.WriteJson(w, resp)
|
||||
}
|
||||
|
||||
@@ -179,27 +175,37 @@ func matchingAtts(atts []ethpbalpha.Att, slot primitives.Slot, attDataRoot []byt
|
||||
return []ethpbalpha.Att{}, nil
|
||||
}
|
||||
|
||||
postElectra := atts[0].Version() >= version.Electra
|
||||
|
||||
postElectra := slots.ToForkVersion(slot) >= version.Electra
|
||||
result := make([]ethpbalpha.Att, 0)
|
||||
for _, att := range atts {
|
||||
if att.GetData().Slot != slot {
|
||||
continue
|
||||
}
|
||||
|
||||
// We ignore the committee index from the request before Electra.
|
||||
// This is because before Electra the committee index is part of the attestation data,
|
||||
// meaning that comparing the data root is sufficient.
|
||||
// Post-Electra the committee index in the data root is always 0, so we need to
|
||||
// compare the committee index separately.
|
||||
if postElectra {
|
||||
ci, err := att.GetCommitteeIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if att.Version() >= version.Electra {
|
||||
ci, err := att.GetCommitteeIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ci != index {
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
if ci != index {
|
||||
} else {
|
||||
// If postElectra is false and att.Version >= version.Electra, ignore the attestation.
|
||||
if att.Version() >= version.Electra {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
root, err := att.GetData().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get attestation data root")
|
||||
|
||||
@@ -55,15 +55,33 @@ func TestGetAggregateAttestation(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
sig := key.Sign([]byte("sig"))
|
||||
|
||||
t.Run("V1", func(t *testing.T) {
|
||||
createAttestation := func(slot primitives.Slot, aggregationBits bitfield.Bitlist, root []byte) *ethpbalpha.Attestation {
|
||||
return ðpbalpha.Attestation{
|
||||
AggregationBits: aggregationBits,
|
||||
Data: createAttestationData(slot, 1, 1, root),
|
||||
Signature: sig.Marshal(),
|
||||
}
|
||||
}
|
||||
// It is important to use 0 as the index because that's the only way
|
||||
// pre and post-Electra attestations can both match,
|
||||
// which allows us to properly test that attestations from the
|
||||
// wrong fork are ignored.
|
||||
committeeIndex := uint64(0)
|
||||
|
||||
createAttestation := func(slot primitives.Slot, aggregationBits bitfield.Bitlist, root []byte) *ethpbalpha.Attestation {
|
||||
return ðpbalpha.Attestation{
|
||||
AggregationBits: aggregationBits,
|
||||
Data: createAttestationData(slot, primitives.CommitteeIndex(committeeIndex), root),
|
||||
Signature: sig.Marshal(),
|
||||
}
|
||||
}
|
||||
|
||||
createAttestationElectra := func(slot primitives.Slot, aggregationBits bitfield.Bitlist, root []byte) *ethpbalpha.AttestationElectra {
|
||||
committeeBits := bitfield.NewBitvector64()
|
||||
committeeBits.SetBitAt(committeeIndex, true)
|
||||
|
||||
return ðpbalpha.AttestationElectra{
|
||||
CommitteeBits: committeeBits,
|
||||
AggregationBits: aggregationBits,
|
||||
Data: createAttestationData(slot, primitives.CommitteeIndex(committeeIndex), root),
|
||||
Signature: sig.Marshal(),
|
||||
}
|
||||
}
|
||||
|
||||
t.Run("V1", func(t *testing.T) {
|
||||
aggSlot1_Root1_1 := createAttestation(1, bitfield.Bitlist{0b11100}, root1)
|
||||
aggSlot1_Root1_2 := createAttestation(1, bitfield.Bitlist{0b10111}, root1)
|
||||
aggSlot1_Root2 := createAttestation(1, bitfield.Bitlist{0b11100}, root2)
|
||||
@@ -84,7 +102,7 @@ func TestGetAggregateAttestation(t *testing.T) {
|
||||
assert.Equal(t, expectedAggregationBits, attestation.AggregationBits, "Unexpected aggregation bits in attestation")
|
||||
assert.Equal(t, hexutil.Encode(expectedSig), attestation.Signature, "Signature mismatch")
|
||||
assert.Equal(t, expectedSlot, attestation.Data.Slot, "Slot mismatch in attestation data")
|
||||
assert.Equal(t, "1", attestation.Data.CommitteeIndex, "Committee index mismatch")
|
||||
assert.Equal(t, "0", attestation.Data.CommitteeIndex, "Committee index mismatch")
|
||||
assert.Equal(t, hexutil.Encode(expectedRoot), attestation.Data.BeaconBlockRoot, "Beacon block root mismatch")
|
||||
|
||||
// Source checkpoint checks
|
||||
@@ -206,181 +224,288 @@ func TestGetAggregateAttestation(t *testing.T) {
|
||||
})
|
||||
})
|
||||
t.Run("V2", func(t *testing.T) {
|
||||
createAttestation := func(slot primitives.Slot, aggregationBits bitfield.Bitlist, root []byte, bits uint64) *ethpbalpha.AttestationElectra {
|
||||
t.Run("pre-electra", func(t *testing.T) {
|
||||
committeeBits := bitfield.NewBitvector64()
|
||||
committeeBits.SetBitAt(bits, true)
|
||||
committeeBits.SetBitAt(1, true)
|
||||
|
||||
return ðpbalpha.AttestationElectra{
|
||||
CommitteeBits: committeeBits,
|
||||
AggregationBits: aggregationBits,
|
||||
Data: createAttestationData(slot, 0, 1, root),
|
||||
Signature: sig.Marshal(),
|
||||
aggSlot1_Root1_1 := createAttestation(1, bitfield.Bitlist{0b11100}, root1)
|
||||
aggSlot1_Root1_2 := createAttestation(1, bitfield.Bitlist{0b10111}, root1)
|
||||
aggSlot1_Root2 := createAttestation(1, bitfield.Bitlist{0b11100}, root2)
|
||||
aggSlot2 := createAttestation(2, bitfield.Bitlist{0b11100}, root1)
|
||||
unaggSlot3_Root1_1 := createAttestation(3, bitfield.Bitlist{0b11000}, root1)
|
||||
unaggSlot3_Root1_2 := createAttestation(3, bitfield.Bitlist{0b10100}, root1)
|
||||
unaggSlot3_Root2 := createAttestation(3, bitfield.Bitlist{0b11000}, root2)
|
||||
unaggSlot4 := createAttestation(4, bitfield.Bitlist{0b11000}, root1)
|
||||
|
||||
// Add one post-electra attestation to ensure that it is being ignored.
|
||||
// We choose slot 2 where we have one pre-electra attestation with less attestation bits.
|
||||
postElectraAtt := createAttestationElectra(2, bitfield.Bitlist{0b11111}, root1)
|
||||
|
||||
compareResult := func(
|
||||
t *testing.T,
|
||||
attestation structs.Attestation,
|
||||
expectedSlot string,
|
||||
expectedAggregationBits string,
|
||||
expectedRoot []byte,
|
||||
expectedSig []byte,
|
||||
) {
|
||||
assert.Equal(t, expectedAggregationBits, attestation.AggregationBits, "Unexpected aggregation bits in attestation")
|
||||
assert.Equal(t, hexutil.Encode(expectedSig), attestation.Signature, "Signature mismatch")
|
||||
assert.Equal(t, expectedSlot, attestation.Data.Slot, "Slot mismatch in attestation data")
|
||||
assert.Equal(t, "0", attestation.Data.CommitteeIndex, "Committee index mismatch")
|
||||
assert.Equal(t, hexutil.Encode(expectedRoot), attestation.Data.BeaconBlockRoot, "Beacon block root mismatch")
|
||||
|
||||
// Source checkpoint checks
|
||||
require.NotNil(t, attestation.Data.Source, "Source checkpoint should not be nil")
|
||||
assert.Equal(t, "1", attestation.Data.Source.Epoch, "Source epoch mismatch")
|
||||
assert.Equal(t, hexutil.Encode(expectedRoot), attestation.Data.Source.Root, "Source root mismatch")
|
||||
|
||||
// Target checkpoint checks
|
||||
require.NotNil(t, attestation.Data.Target, "Target checkpoint should not be nil")
|
||||
assert.Equal(t, "1", attestation.Data.Target.Epoch, "Target epoch mismatch")
|
||||
assert.Equal(t, hexutil.Encode(expectedRoot), attestation.Data.Target.Root, "Target root mismatch")
|
||||
}
|
||||
}
|
||||
|
||||
aggSlot1_Root1_1 := createAttestation(1, bitfield.Bitlist{0b11100}, root1, 1)
|
||||
aggSlot1_Root1_2 := createAttestation(1, bitfield.Bitlist{0b10111}, root1, 1)
|
||||
aggSlot1_Root2 := createAttestation(1, bitfield.Bitlist{0b11100}, root2, 1)
|
||||
aggSlot2 := createAttestation(2, bitfield.Bitlist{0b11100}, root1, 1)
|
||||
unaggSlot3_Root1_1 := createAttestation(3, bitfield.Bitlist{0b11000}, root1, 1)
|
||||
unaggSlot3_Root1_2 := createAttestation(3, bitfield.Bitlist{0b10100}, root1, 1)
|
||||
unaggSlot3_Root2 := createAttestation(3, bitfield.Bitlist{0b11000}, root2, 1)
|
||||
unaggSlot4 := createAttestation(4, bitfield.Bitlist{0b11000}, root1, 1)
|
||||
|
||||
compareResult := func(
|
||||
t *testing.T,
|
||||
attestation structs.AttestationElectra,
|
||||
expectedSlot string,
|
||||
expectedAggregationBits string,
|
||||
expectedRoot []byte,
|
||||
expectedSig []byte,
|
||||
expectedCommitteeBits string,
|
||||
) {
|
||||
assert.Equal(t, expectedAggregationBits, attestation.AggregationBits, "Unexpected aggregation bits in attestation")
|
||||
assert.Equal(t, expectedCommitteeBits, attestation.CommitteeBits)
|
||||
assert.Equal(t, hexutil.Encode(expectedSig), attestation.Signature, "Signature mismatch")
|
||||
assert.Equal(t, expectedSlot, attestation.Data.Slot, "Slot mismatch in attestation data")
|
||||
assert.Equal(t, "0", attestation.Data.CommitteeIndex, "Committee index mismatch")
|
||||
assert.Equal(t, hexutil.Encode(expectedRoot), attestation.Data.BeaconBlockRoot, "Beacon block root mismatch")
|
||||
|
||||
// Source checkpoint checks
|
||||
require.NotNil(t, attestation.Data.Source, "Source checkpoint should not be nil")
|
||||
assert.Equal(t, "1", attestation.Data.Source.Epoch, "Source epoch mismatch")
|
||||
assert.Equal(t, hexutil.Encode(expectedRoot), attestation.Data.Source.Root, "Source root mismatch")
|
||||
|
||||
// Target checkpoint checks
|
||||
require.NotNil(t, attestation.Data.Target, "Target checkpoint should not be nil")
|
||||
assert.Equal(t, "1", attestation.Data.Target.Epoch, "Target epoch mismatch")
|
||||
assert.Equal(t, hexutil.Encode(expectedRoot), attestation.Data.Target.Root, "Target root mismatch")
|
||||
}
|
||||
|
||||
pool := attestations.NewPool()
|
||||
require.NoError(t, pool.SaveUnaggregatedAttestations([]ethpbalpha.Att{unaggSlot3_Root1_1, unaggSlot3_Root1_2, unaggSlot3_Root2, unaggSlot4}), "Failed to save unaggregated attestations")
|
||||
unagg, err := pool.UnaggregatedAttestations()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 4, len(unagg), "Expected 4 unaggregated attestations")
|
||||
require.NoError(t, pool.SaveAggregatedAttestations([]ethpbalpha.Att{aggSlot1_Root1_1, aggSlot1_Root1_2, aggSlot1_Root2, aggSlot2}), "Failed to save aggregated attestations")
|
||||
agg := pool.AggregatedAttestations()
|
||||
require.Equal(t, 4, len(agg), "Expected 4 aggregated attestations")
|
||||
bs, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
s := &Server{
|
||||
ChainInfoFetcher: &mockChain.ChainService{State: bs},
|
||||
AttestationsPool: pool,
|
||||
}
|
||||
t.Run("non-matching attestation request", func(t *testing.T) {
|
||||
reqRoot, err := aggSlot2.Data.HashTreeRoot()
|
||||
require.NoError(t, err, "Failed to generate attestation data hash tree root")
|
||||
attDataRoot := hexutil.Encode(reqRoot[:])
|
||||
url := "http://example.com?attestation_data_root=" + attDataRoot + "&slot=1" + "&committee_index=1"
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
|
||||
s.GetAggregateAttestationV2(writer, request)
|
||||
assert.Equal(t, http.StatusNotFound, writer.Code, "Expected HTTP status NotFound for non-matching request")
|
||||
})
|
||||
t.Run("1 matching aggregated attestation", func(t *testing.T) {
|
||||
reqRoot, err := aggSlot2.Data.HashTreeRoot()
|
||||
require.NoError(t, err, "Failed to generate attestation data hash tree root")
|
||||
attDataRoot := hexutil.Encode(reqRoot[:])
|
||||
url := "http://example.com?attestation_data_root=" + attDataRoot + "&slot=2" + "&committee_index=1"
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
|
||||
s.GetAggregateAttestationV2(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code, "Expected HTTP status OK")
|
||||
|
||||
var resp structs.AggregateAttestationResponse
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp), "Failed to unmarshal response")
|
||||
require.NotNil(t, resp.Data, "Response data should not be nil")
|
||||
|
||||
var attestation structs.AttestationElectra
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &attestation), "Failed to unmarshal attestation data")
|
||||
|
||||
compareResult(t, attestation, "2", hexutil.Encode(aggSlot2.AggregationBits), root1, sig.Marshal(), hexutil.Encode(aggSlot2.CommitteeBits))
|
||||
})
|
||||
t.Run("multiple matching aggregated attestations - return the one with most bits", func(t *testing.T) {
|
||||
reqRoot, err := aggSlot1_Root1_1.Data.HashTreeRoot()
|
||||
require.NoError(t, err, "Failed to generate attestation data hash tree root")
|
||||
attDataRoot := hexutil.Encode(reqRoot[:])
|
||||
url := "http://example.com?attestation_data_root=" + attDataRoot + "&slot=1" + "&committee_index=1"
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
|
||||
s.GetAggregateAttestationV2(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code, "Expected HTTP status OK")
|
||||
|
||||
var resp structs.AggregateAttestationResponse
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp), "Failed to unmarshal response")
|
||||
require.NotNil(t, resp.Data, "Response data should not be nil")
|
||||
|
||||
var attestation structs.AttestationElectra
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &attestation), "Failed to unmarshal attestation data")
|
||||
|
||||
compareResult(t, attestation, "1", hexutil.Encode(aggSlot1_Root1_2.AggregationBits), root1, sig.Marshal(), hexutil.Encode(aggSlot1_Root1_1.CommitteeBits))
|
||||
})
|
||||
t.Run("1 matching unaggregated attestation", func(t *testing.T) {
|
||||
reqRoot, err := unaggSlot4.Data.HashTreeRoot()
|
||||
require.NoError(t, err, "Failed to generate attestation data hash tree root")
|
||||
attDataRoot := hexutil.Encode(reqRoot[:])
|
||||
url := "http://example.com?attestation_data_root=" + attDataRoot + "&slot=4" + "&committee_index=1"
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
|
||||
s.GetAggregateAttestationV2(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code, "Expected HTTP status OK")
|
||||
|
||||
var resp structs.AggregateAttestationResponse
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp), "Failed to unmarshal response")
|
||||
require.NotNil(t, resp.Data, "Response data should not be nil")
|
||||
|
||||
var attestation structs.AttestationElectra
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &attestation), "Failed to unmarshal attestation data")
|
||||
compareResult(t, attestation, "4", hexutil.Encode(unaggSlot4.AggregationBits), root1, sig.Marshal(), hexutil.Encode(unaggSlot4.CommitteeBits))
|
||||
})
|
||||
t.Run("multiple matching unaggregated attestations - their aggregate is returned", func(t *testing.T) {
|
||||
reqRoot, err := unaggSlot3_Root1_1.Data.HashTreeRoot()
|
||||
require.NoError(t, err, "Failed to generate attestation data hash tree root")
|
||||
attDataRoot := hexutil.Encode(reqRoot[:])
|
||||
url := "http://example.com?attestation_data_root=" + attDataRoot + "&slot=3" + "&committee_index=1"
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
|
||||
s.GetAggregateAttestationV2(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code, "Expected HTTP status OK")
|
||||
|
||||
var resp structs.AggregateAttestationResponse
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp), "Failed to unmarshal response")
|
||||
require.NotNil(t, resp.Data, "Response data should not be nil")
|
||||
|
||||
var attestation structs.AttestationElectra
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &attestation), "Failed to unmarshal attestation data")
|
||||
sig1, err := bls.SignatureFromBytes(unaggSlot3_Root1_1.Signature)
|
||||
pool := attestations.NewPool()
|
||||
require.NoError(t, pool.SaveUnaggregatedAttestations([]ethpbalpha.Att{unaggSlot3_Root1_1, unaggSlot3_Root1_2, unaggSlot3_Root2, unaggSlot4}), "Failed to save unaggregated attestations")
|
||||
unagg, err := pool.UnaggregatedAttestations()
|
||||
require.NoError(t, err)
|
||||
sig2, err := bls.SignatureFromBytes(unaggSlot3_Root1_2.Signature)
|
||||
require.Equal(t, 4, len(unagg), "Expected 4 unaggregated attestations")
|
||||
require.NoError(t, pool.SaveAggregatedAttestations([]ethpbalpha.Att{aggSlot1_Root1_1, aggSlot1_Root1_2, aggSlot1_Root2, aggSlot2, postElectraAtt}), "Failed to save aggregated attestations")
|
||||
agg := pool.AggregatedAttestations()
|
||||
require.Equal(t, 5, len(agg), "Expected 5 aggregated attestations, 4 pre electra and 1 post electra")
|
||||
s := &Server{
|
||||
AttestationsPool: pool,
|
||||
}
|
||||
|
||||
t.Run("non-matching attestation request", func(t *testing.T) {
|
||||
reqRoot, err := aggSlot2.Data.HashTreeRoot()
|
||||
require.NoError(t, err, "Failed to generate attestation data hash tree root")
|
||||
attDataRoot := hexutil.Encode(reqRoot[:])
|
||||
url := "http://example.com?attestation_data_root=" + attDataRoot + "&slot=1" + "&committee_index=0"
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
|
||||
s.GetAggregateAttestationV2(writer, request)
|
||||
assert.Equal(t, http.StatusNotFound, writer.Code, "Expected HTTP status NotFound for non-matching request")
|
||||
})
|
||||
t.Run("1 matching aggregated attestation", func(t *testing.T) {
|
||||
reqRoot, err := aggSlot2.Data.HashTreeRoot()
|
||||
require.NoError(t, err, "Failed to generate attestation data hash tree root")
|
||||
attDataRoot := hexutil.Encode(reqRoot[:])
|
||||
url := "http://example.com?attestation_data_root=" + attDataRoot + "&slot=2" + "&committee_index=0"
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
|
||||
s.GetAggregateAttestationV2(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code, "Expected HTTP status OK")
|
||||
|
||||
var resp structs.AggregateAttestationResponse
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp), "Failed to unmarshal response")
|
||||
require.NotNil(t, resp.Data, "Response data should not be nil")
|
||||
|
||||
var attestation structs.Attestation
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &attestation), "Failed to unmarshal attestation data")
|
||||
|
||||
compareResult(t, attestation, "2", hexutil.Encode(aggSlot2.AggregationBits), root1, sig.Marshal())
|
||||
})
|
||||
t.Run("multiple matching aggregated attestations - return the one with most bits", func(t *testing.T) {
|
||||
reqRoot, err := aggSlot1_Root1_1.Data.HashTreeRoot()
|
||||
require.NoError(t, err, "Failed to generate attestation data hash tree root")
|
||||
attDataRoot := hexutil.Encode(reqRoot[:])
|
||||
url := "http://example.com?attestation_data_root=" + attDataRoot + "&slot=1" + "&committee_index=0"
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
|
||||
s.GetAggregateAttestationV2(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code, "Expected HTTP status OK")
|
||||
|
||||
var resp structs.AggregateAttestationResponse
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp), "Failed to unmarshal response")
|
||||
require.NotNil(t, resp.Data, "Response data should not be nil")
|
||||
|
||||
var attestation structs.Attestation
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &attestation), "Failed to unmarshal attestation data")
|
||||
|
||||
compareResult(t, attestation, "1", hexutil.Encode(aggSlot1_Root1_2.AggregationBits), root1, sig.Marshal())
|
||||
})
|
||||
})
|
||||
t.Run("post-electra", func(t *testing.T) {
|
||||
aggSlot1_Root1_1 := createAttestationElectra(1, bitfield.Bitlist{0b11100}, root1)
|
||||
aggSlot1_Root1_2 := createAttestationElectra(1, bitfield.Bitlist{0b10111}, root1)
|
||||
aggSlot1_Root2 := createAttestationElectra(1, bitfield.Bitlist{0b11100}, root2)
|
||||
aggSlot2 := createAttestationElectra(2, bitfield.Bitlist{0b11100}, root1)
|
||||
unaggSlot3_Root1_1 := createAttestationElectra(3, bitfield.Bitlist{0b11000}, root1)
|
||||
unaggSlot3_Root1_2 := createAttestationElectra(3, bitfield.Bitlist{0b10100}, root1)
|
||||
unaggSlot3_Root2 := createAttestationElectra(3, bitfield.Bitlist{0b11000}, root2)
|
||||
unaggSlot4 := createAttestationElectra(4, bitfield.Bitlist{0b11000}, root1)
|
||||
|
||||
// Add one pre-electra attestation to ensure that it is being ignored.
|
||||
// We choose slot 2 where we have one post-electra attestation with less attestation bits.
|
||||
preElectraAtt := createAttestation(2, bitfield.Bitlist{0b11111}, root1)
|
||||
|
||||
compareResult := func(
|
||||
t *testing.T,
|
||||
attestation structs.AttestationElectra,
|
||||
expectedSlot string,
|
||||
expectedAggregationBits string,
|
||||
expectedRoot []byte,
|
||||
expectedSig []byte,
|
||||
expectedCommitteeBits string,
|
||||
) {
|
||||
assert.Equal(t, expectedAggregationBits, attestation.AggregationBits, "Unexpected aggregation bits in attestation")
|
||||
assert.Equal(t, expectedCommitteeBits, attestation.CommitteeBits)
|
||||
assert.Equal(t, hexutil.Encode(expectedSig), attestation.Signature, "Signature mismatch")
|
||||
assert.Equal(t, expectedSlot, attestation.Data.Slot, "Slot mismatch in attestation data")
|
||||
assert.Equal(t, "0", attestation.Data.CommitteeIndex, "Committee index mismatch")
|
||||
assert.Equal(t, hexutil.Encode(expectedRoot), attestation.Data.BeaconBlockRoot, "Beacon block root mismatch")
|
||||
|
||||
// Source checkpoint checks
|
||||
require.NotNil(t, attestation.Data.Source, "Source checkpoint should not be nil")
|
||||
assert.Equal(t, "1", attestation.Data.Source.Epoch, "Source epoch mismatch")
|
||||
assert.Equal(t, hexutil.Encode(expectedRoot), attestation.Data.Source.Root, "Source root mismatch")
|
||||
|
||||
// Target checkpoint checks
|
||||
require.NotNil(t, attestation.Data.Target, "Target checkpoint should not be nil")
|
||||
assert.Equal(t, "1", attestation.Data.Target.Epoch, "Target epoch mismatch")
|
||||
assert.Equal(t, hexutil.Encode(expectedRoot), attestation.Data.Target.Root, "Target root mismatch")
|
||||
}
|
||||
|
||||
pool := attestations.NewPool()
|
||||
require.NoError(t, pool.SaveUnaggregatedAttestations([]ethpbalpha.Att{unaggSlot3_Root1_1, unaggSlot3_Root1_2, unaggSlot3_Root2, unaggSlot4}), "Failed to save unaggregated attestations")
|
||||
unagg, err := pool.UnaggregatedAttestations()
|
||||
require.NoError(t, err)
|
||||
expectedSig := bls.AggregateSignatures([]common.Signature{sig1, sig2})
|
||||
compareResult(t, attestation, "3", hexutil.Encode(bitfield.Bitlist{0b11100}), root1, expectedSig.Marshal(), hexutil.Encode(unaggSlot3_Root1_1.CommitteeBits))
|
||||
require.Equal(t, 4, len(unagg), "Expected 4 unaggregated attestations")
|
||||
require.NoError(t, pool.SaveAggregatedAttestations([]ethpbalpha.Att{aggSlot1_Root1_1, aggSlot1_Root1_2, aggSlot1_Root2, aggSlot2, preElectraAtt}), "Failed to save aggregated attestations")
|
||||
agg := pool.AggregatedAttestations()
|
||||
require.Equal(t, 5, len(agg), "Expected 5 aggregated attestations, 4 electra and 1 pre electra")
|
||||
bs, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.ElectraForkEpoch = 0
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
chainService := &mockChain.ChainService{State: bs}
|
||||
s := &Server{
|
||||
ChainInfoFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
AttestationsPool: pool,
|
||||
}
|
||||
t.Run("non-matching attestation request", func(t *testing.T) {
|
||||
reqRoot, err := aggSlot2.Data.HashTreeRoot()
|
||||
require.NoError(t, err, "Failed to generate attestation data hash tree root")
|
||||
attDataRoot := hexutil.Encode(reqRoot[:])
|
||||
url := "http://example.com?attestation_data_root=" + attDataRoot + "&slot=1" + "&committee_index=0"
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
|
||||
s.GetAggregateAttestationV2(writer, request)
|
||||
assert.Equal(t, http.StatusNotFound, writer.Code, "Expected HTTP status NotFound for non-matching request")
|
||||
})
|
||||
t.Run("1 matching aggregated attestation", func(t *testing.T) {
|
||||
reqRoot, err := aggSlot2.Data.HashTreeRoot()
|
||||
require.NoError(t, err, "Failed to generate attestation data hash tree root")
|
||||
attDataRoot := hexutil.Encode(reqRoot[:])
|
||||
url := "http://example.com?attestation_data_root=" + attDataRoot + "&slot=2" + "&committee_index=0"
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
|
||||
s.GetAggregateAttestationV2(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code, "Expected HTTP status OK")
|
||||
|
||||
var resp structs.AggregateAttestationResponse
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp), "Failed to unmarshal response")
|
||||
require.NotNil(t, resp.Data, "Response data should not be nil")
|
||||
|
||||
var attestation structs.AttestationElectra
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &attestation), "Failed to unmarshal attestation data")
|
||||
|
||||
compareResult(t, attestation, "2", hexutil.Encode(aggSlot2.AggregationBits), root1, sig.Marshal(), hexutil.Encode(aggSlot2.CommitteeBits))
|
||||
})
|
||||
t.Run("multiple matching aggregated attestations - return the one with most bits", func(t *testing.T) {
|
||||
reqRoot, err := aggSlot1_Root1_1.Data.HashTreeRoot()
|
||||
require.NoError(t, err, "Failed to generate attestation data hash tree root")
|
||||
attDataRoot := hexutil.Encode(reqRoot[:])
|
||||
url := "http://example.com?attestation_data_root=" + attDataRoot + "&slot=1" + "&committee_index=0"
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
|
||||
s.GetAggregateAttestationV2(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code, "Expected HTTP status OK")
|
||||
|
||||
var resp structs.AggregateAttestationResponse
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp), "Failed to unmarshal response")
|
||||
require.NotNil(t, resp.Data, "Response data should not be nil")
|
||||
|
||||
var attestation structs.AttestationElectra
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &attestation), "Failed to unmarshal attestation data")
|
||||
|
||||
compareResult(t, attestation, "1", hexutil.Encode(aggSlot1_Root1_2.AggregationBits), root1, sig.Marshal(), hexutil.Encode(aggSlot1_Root1_1.CommitteeBits))
|
||||
})
|
||||
t.Run("1 matching unaggregated attestation", func(t *testing.T) {
|
||||
reqRoot, err := unaggSlot4.Data.HashTreeRoot()
|
||||
require.NoError(t, err, "Failed to generate attestation data hash tree root")
|
||||
attDataRoot := hexutil.Encode(reqRoot[:])
|
||||
url := "http://example.com?attestation_data_root=" + attDataRoot + "&slot=4" + "&committee_index=0"
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
|
||||
s.GetAggregateAttestationV2(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code, "Expected HTTP status OK")
|
||||
|
||||
var resp structs.AggregateAttestationResponse
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp), "Failed to unmarshal response")
|
||||
require.NotNil(t, resp.Data, "Response data should not be nil")
|
||||
|
||||
var attestation structs.AttestationElectra
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &attestation), "Failed to unmarshal attestation data")
|
||||
compareResult(t, attestation, "4", hexutil.Encode(unaggSlot4.AggregationBits), root1, sig.Marshal(), hexutil.Encode(unaggSlot4.CommitteeBits))
|
||||
})
|
||||
t.Run("multiple matching unaggregated attestations - their aggregate is returned", func(t *testing.T) {
|
||||
reqRoot, err := unaggSlot3_Root1_1.Data.HashTreeRoot()
|
||||
require.NoError(t, err, "Failed to generate attestation data hash tree root")
|
||||
attDataRoot := hexutil.Encode(reqRoot[:])
|
||||
url := "http://example.com?attestation_data_root=" + attDataRoot + "&slot=3" + "&committee_index=0"
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
|
||||
s.GetAggregateAttestationV2(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code, "Expected HTTP status OK")
|
||||
|
||||
var resp structs.AggregateAttestationResponse
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp), "Failed to unmarshal response")
|
||||
require.NotNil(t, resp.Data, "Response data should not be nil")
|
||||
|
||||
var attestation structs.AttestationElectra
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &attestation), "Failed to unmarshal attestation data")
|
||||
sig1, err := bls.SignatureFromBytes(unaggSlot3_Root1_1.Signature)
|
||||
require.NoError(t, err)
|
||||
sig2, err := bls.SignatureFromBytes(unaggSlot3_Root1_2.Signature)
|
||||
require.NoError(t, err)
|
||||
expectedSig := bls.AggregateSignatures([]common.Signature{sig1, sig2})
|
||||
compareResult(t, attestation, "3", hexutil.Encode(bitfield.Bitlist{0b11100}), root1, expectedSig.Marshal(), hexutil.Encode(unaggSlot3_Root1_1.CommitteeBits))
|
||||
})
|
||||
t.Run("pre-electra attestation is ignored", func(t *testing.T) {
|
||||
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func createAttestationData(
|
||||
slot primitives.Slot,
|
||||
committeeIndex primitives.CommitteeIndex,
|
||||
epoch primitives.Epoch,
|
||||
root []byte,
|
||||
) *ethpbalpha.AttestationData {
|
||||
func createAttestationData(slot primitives.Slot, committeeIndex primitives.CommitteeIndex, root []byte) *ethpbalpha.AttestationData {
|
||||
return ðpbalpha.AttestationData{
|
||||
Slot: slot,
|
||||
CommitteeIndex: committeeIndex,
|
||||
BeaconBlockRoot: root,
|
||||
Source: ðpbalpha.Checkpoint{
|
||||
Epoch: epoch,
|
||||
Epoch: 1,
|
||||
Root: root,
|
||||
},
|
||||
Target: ðpbalpha.Checkpoint{
|
||||
Epoch: epoch,
|
||||
Epoch: 1,
|
||||
Root: root,
|
||||
},
|
||||
}
|
||||
|
||||
31
beacon-chain/rpc/metrics.go
Normal file
31
beacon-chain/rpc/metrics.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package rpc
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
var (
|
||||
httpRequestLatency = promauto.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "http_request_latency_seconds",
|
||||
Help: "Latency of HTTP requests in seconds",
|
||||
Buckets: []float64{0.001, 0.01, 0.025, 0.1, 0.25, 1, 2.5, 10},
|
||||
},
|
||||
[]string{"endpoint", "code", "method"},
|
||||
)
|
||||
httpRequestCount = promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "http_request_count",
|
||||
Help: "Number of HTTP requests",
|
||||
},
|
||||
[]string{"endpoint", "code", "method"},
|
||||
)
|
||||
httpErrorCount = promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "http_error_count",
|
||||
Help: "Total HTTP errors for beacon node requests",
|
||||
},
|
||||
[]string{"endpoint", "code", "method"},
|
||||
)
|
||||
)
|
||||
@@ -25,8 +25,8 @@ import (
|
||||
|
||||
type testIdentity enode.ID
|
||||
|
||||
func (_ testIdentity) Verify(_ *enr.Record, _ []byte) error { return nil }
|
||||
func (id testIdentity) NodeAddr(_ *enr.Record) []byte { return id[:] }
|
||||
func (testIdentity) Verify(*enr.Record, []byte) error { return nil }
|
||||
func (id testIdentity) NodeAddr(*enr.Record) []byte { return id[:] }
|
||||
|
||||
func TestListTrustedPeer(t *testing.T) {
|
||||
ids := libp2ptest.GeneratePeerIDs(9)
|
||||
@@ -62,13 +62,13 @@ func TestListTrustedPeer(t *testing.T) {
|
||||
|
||||
switch i {
|
||||
case 0, 1:
|
||||
peerStatus.SetConnectionState(id, peers.PeerConnecting)
|
||||
peerStatus.SetConnectionState(id, peers.Connecting)
|
||||
case 2, 3:
|
||||
peerStatus.SetConnectionState(id, peers.PeerConnected)
|
||||
peerStatus.SetConnectionState(id, peers.Connected)
|
||||
case 4, 5:
|
||||
peerStatus.SetConnectionState(id, peers.PeerDisconnecting)
|
||||
peerStatus.SetConnectionState(id, peers.Disconnecting)
|
||||
case 6, 7:
|
||||
peerStatus.SetConnectionState(id, peers.PeerDisconnected)
|
||||
peerStatus.SetConnectionState(id, peers.Disconnected)
|
||||
default:
|
||||
t.Fatalf("Failed to set connection state for peer")
|
||||
}
|
||||
|
||||
@@ -303,7 +303,7 @@ func (bs *Server) ListIndexedAttestationsElectra(
|
||||
// that it was included in a block. The attestation may have expired.
|
||||
// Refer to the ethereum consensus specification for more details on how
|
||||
// attestations are processed and when they are no longer valid.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/core/0_beacon-chain.md#attestations
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/beacon-chain.md#attestations
|
||||
func (bs *Server) AttestationPool(_ context.Context, req *ethpb.AttestationPoolRequest) (*ethpb.AttestationPoolResponse, error) {
|
||||
atts, err := attestationsFromPool[*ethpb.Attestation](req.PageSize, bs.AttestationsPool)
|
||||
if err != nil {
|
||||
|
||||
@@ -14,8 +14,6 @@ import (
|
||||
grpcopentracing "github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing"
|
||||
grpcprometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/builder"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
@@ -57,24 +55,6 @@ import (
|
||||
|
||||
const attestationBufferSize = 100
|
||||
|
||||
var (
|
||||
httpRequestLatency = promauto.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "http_request_latency_seconds",
|
||||
Help: "Latency of HTTP requests in seconds",
|
||||
Buckets: []float64{0.001, 0.01, 0.025, 0.1, 0.25, 1, 2.5, 10},
|
||||
},
|
||||
[]string{"endpoint", "code", "method"},
|
||||
)
|
||||
httpRequestCount = promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "http_request_count",
|
||||
Help: "Number of HTTP requests",
|
||||
},
|
||||
[]string{"endpoint", "code", "method"},
|
||||
)
|
||||
)
|
||||
|
||||
// Service defining an RPC server for a beacon node.
|
||||
type Service struct {
|
||||
cfg *Config
|
||||
|
||||
@@ -10,6 +10,7 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/sync/checkpoint",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api/client:go_default_library",
|
||||
"//api/client/beacon:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
|
||||
@@ -4,11 +4,14 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/client"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/client/beacon"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
)
|
||||
|
||||
const stateSizeLimit int64 = 1 << 29 // 512MB
|
||||
|
||||
// APIInitializer manages initializing the beacon node using checkpoint sync, retrieving the checkpoint state and root
|
||||
// from the remote beacon node api.
|
||||
type APIInitializer struct {
|
||||
@@ -18,7 +21,7 @@ type APIInitializer struct {
|
||||
// NewAPIInitializer creates an APIInitializer, handling the set up of a beacon node api client
|
||||
// using the provided host string.
|
||||
func NewAPIInitializer(beaconNodeHost string) (*APIInitializer, error) {
|
||||
c, err := beacon.NewClient(beaconNodeHost)
|
||||
c, err := beacon.NewClient(beaconNodeHost, client.WithMaxBodySize(stateSizeLimit))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unable to parse beacon node url or hostname - %s", beaconNodeHost)
|
||||
}
|
||||
@@ -32,10 +35,9 @@ func (dl *APIInitializer) Initialize(ctx context.Context, d db.Database) error {
|
||||
if err == nil && origin != params.BeaconConfig().ZeroHash {
|
||||
log.Warnf("Origin checkpoint root %#x found in db, ignoring checkpoint sync flags", origin)
|
||||
return nil
|
||||
} else {
|
||||
if !errors.Is(err, db.ErrNotFound) {
|
||||
return errors.Wrap(err, "error while checking database for origin root")
|
||||
}
|
||||
}
|
||||
if err != nil && !errors.Is(err, db.ErrNotFound) {
|
||||
return errors.Wrap(err, "error while checking database for origin root")
|
||||
}
|
||||
od, err := beacon.DownloadFinalizedData(ctx, dl.c)
|
||||
if err != nil {
|
||||
|
||||
@@ -1094,7 +1094,7 @@ func TestBlocksQueue_stuckInUnfavourableFork(t *testing.T) {
|
||||
// its claims with actual blocks.
|
||||
emptyPeer := connectPeerHavingBlocks(t, p2p, chain1, finalizedSlot, p2p.Peers())
|
||||
defer func() {
|
||||
p2p.Peers().SetConnectionState(emptyPeer, peers.PeerDisconnected)
|
||||
p2p.Peers().SetConnectionState(emptyPeer, peers.Disconnected)
|
||||
}()
|
||||
chainState, err := p2p.Peers().ChainState(emptyPeer)
|
||||
require.NoError(t, err)
|
||||
@@ -1291,7 +1291,7 @@ func TestBlocksQueue_stuckWhenHeadIsSetToOrphanedBlock(t *testing.T) {
|
||||
// Connect peer that has all the blocks available.
|
||||
allBlocksPeer := connectPeerHavingBlocks(t, p2p, chain, finalizedSlot, p2p.Peers())
|
||||
defer func() {
|
||||
p2p.Peers().SetConnectionState(allBlocksPeer, peers.PeerDisconnected)
|
||||
p2p.Peers().SetConnectionState(allBlocksPeer, peers.Disconnected)
|
||||
}()
|
||||
|
||||
// Queue should be able to fetch whole chain (including slot which comes before the currently set head).
|
||||
|
||||
@@ -227,7 +227,7 @@ func connectPeer(t *testing.T, host *p2pt.TestP2P, datum *peerData, peerStatus *
|
||||
p.Connect(host)
|
||||
|
||||
peerStatus.Add(new(enr.Record), p.PeerID(), nil, network.DirOutbound)
|
||||
peerStatus.SetConnectionState(p.PeerID(), peers.PeerConnected)
|
||||
peerStatus.SetConnectionState(p.PeerID(), peers.Connected)
|
||||
peerStatus.SetChainState(p.PeerID(), ðpb.Status{
|
||||
ForkDigest: params.BeaconConfig().GenesisForkVersion,
|
||||
FinalizedRoot: []byte(fmt.Sprintf("finalized_root %d", datum.finalizedEpoch)),
|
||||
@@ -326,7 +326,7 @@ func connectPeerHavingBlocks(
|
||||
require.NoError(t, err)
|
||||
|
||||
peerStatus.Add(new(enr.Record), p.PeerID(), nil, network.DirOutbound)
|
||||
peerStatus.SetConnectionState(p.PeerID(), peers.PeerConnected)
|
||||
peerStatus.SetConnectionState(p.PeerID(), peers.Connected)
|
||||
peerStatus.SetChainState(p.PeerID(), ðpb.Status{
|
||||
ForkDigest: params.BeaconConfig().GenesisForkVersion,
|
||||
FinalizedRoot: []byte(fmt.Sprintf("finalized_root %d", finalizedEpoch)),
|
||||
|
||||
@@ -40,7 +40,7 @@ func TestProcessPendingAtts_NoBlockRequestBlock(t *testing.T) {
|
||||
p1.Connect(p2)
|
||||
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
||||
p1.Peers().Add(new(enr.Record), p2.PeerID(), nil, network.DirOutbound)
|
||||
p1.Peers().SetConnectionState(p2.PeerID(), peers.PeerConnected)
|
||||
p1.Peers().SetConnectionState(p2.PeerID(), peers.Connected)
|
||||
p1.Peers().SetChainState(p2.PeerID(), ðpb.Status{})
|
||||
|
||||
chain := &mock.ChainService{Genesis: prysmTime.Now(), FinalizedCheckPoint: ðpb.Checkpoint{}}
|
||||
|
||||
@@ -406,7 +406,7 @@ func TestRegularSyncBeaconBlockSubscriber_ProcessPendingBlocks_2Chains(t *testin
|
||||
r.initCaches()
|
||||
|
||||
p1.Peers().Add(new(enr.Record), p2.PeerID(), nil, network.DirOutbound)
|
||||
p1.Peers().SetConnectionState(p2.PeerID(), peers.PeerConnected)
|
||||
p1.Peers().SetConnectionState(p2.PeerID(), peers.Connected)
|
||||
p1.Peers().SetChainState(p2.PeerID(), ðpb.Status{})
|
||||
|
||||
b0 := util.NewBeaconBlock()
|
||||
@@ -505,7 +505,7 @@ func TestRegularSyncBeaconBlockSubscriber_PruneOldPendingBlocks(t *testing.T) {
|
||||
r.initCaches()
|
||||
|
||||
p1.Peers().Add(new(enr.Record), p1.PeerID(), nil, network.DirOutbound)
|
||||
p1.Peers().SetConnectionState(p1.PeerID(), peers.PeerConnected)
|
||||
p1.Peers().SetConnectionState(p1.PeerID(), peers.Connected)
|
||||
p1.Peers().SetChainState(p1.PeerID(), ðpb.Status{})
|
||||
|
||||
b0 := util.NewBeaconBlock()
|
||||
@@ -611,7 +611,7 @@ func TestService_BatchRootRequest(t *testing.T) {
|
||||
r.initCaches()
|
||||
|
||||
p1.Peers().Add(new(enr.Record), p2.PeerID(), nil, network.DirOutbound)
|
||||
p1.Peers().SetConnectionState(p2.PeerID(), peers.PeerConnected)
|
||||
p1.Peers().SetConnectionState(p2.PeerID(), peers.Connected)
|
||||
p1.Peers().SetChainState(p2.PeerID(), ðpb.Status{FinalizedEpoch: 2})
|
||||
|
||||
b0 := util.NewBeaconBlock()
|
||||
|
||||
@@ -7,12 +7,13 @@ import (
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/trailofbits/go-mutexasserts"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
|
||||
p2ptypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
|
||||
leakybucket "github.com/prysmaticlabs/prysm/v5/container/leaky-bucket"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/trailofbits/go-mutexasserts"
|
||||
)
|
||||
|
||||
const defaultBurstLimit = 5
|
||||
@@ -98,19 +99,20 @@ func (l *limiter) validateRequest(stream network.Stream, amt uint64) error {
|
||||
defer l.RUnlock()
|
||||
|
||||
topic := string(stream.Protocol())
|
||||
remotePeer := stream.Conn().RemotePeer()
|
||||
|
||||
collector, err := l.retrieveCollector(topic)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
key := stream.Conn().RemotePeer().String()
|
||||
remaining := collector.Remaining(key)
|
||||
|
||||
remaining := collector.Remaining(remotePeer.String())
|
||||
// Treat each request as a minimum of 1.
|
||||
if amt == 0 {
|
||||
amt = 1
|
||||
}
|
||||
if amt > uint64(remaining) {
|
||||
l.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer())
|
||||
l.p2p.Peers().Scorers().BadResponsesScorer().Increment(remotePeer)
|
||||
writeErrorResponseToStream(responseCodeInvalidRequest, p2ptypes.ErrRateLimited.Error(), stream, l.p2p)
|
||||
return p2ptypes.ErrRateLimited
|
||||
}
|
||||
|
||||
@@ -97,7 +97,7 @@ func TestRateLimiter_ExceedRawCapacity(t *testing.T) {
|
||||
for i := 0; i < defaultBurstLimit; i++ {
|
||||
assert.ErrorContains(t, p2ptypes.ErrRateLimited.Error(), rlimiter.validateRawRpcRequest(stream))
|
||||
}
|
||||
assert.Equal(t, true, p1.Peers().IsBad(p2.PeerID()), "peer is not marked as a bad peer")
|
||||
assert.NotNil(t, p1.Peers().IsBad(p2.PeerID()), "peer is not marked as a bad peer")
|
||||
require.NoError(t, stream.Close(), "could not close stream")
|
||||
|
||||
if util.WaitTimeout(&wg, 1*time.Second) {
|
||||
|
||||
@@ -51,6 +51,7 @@ func (s *Service) registerRPCHandlers() {
|
||||
s.pingHandler,
|
||||
)
|
||||
s.registerRPCHandlersAltair()
|
||||
|
||||
if currEpoch >= params.BeaconConfig().DenebForkEpoch {
|
||||
s.registerRPCHandlersDeneb()
|
||||
}
|
||||
@@ -138,6 +139,9 @@ func (s *Service) registerRPC(baseTopic string, handle rpcHandler) {
|
||||
ctx, cancel := context.WithTimeout(s.ctx, ttfbTimeout)
|
||||
defer cancel()
|
||||
|
||||
conn := stream.Conn()
|
||||
remotePeer := conn.RemotePeer()
|
||||
|
||||
// Resetting after closing is a no-op so defer a reset in case something goes wrong.
|
||||
// It's up to the handler to Close the stream (send an EOF) if
|
||||
// it successfully writes a response. We don't blindly call
|
||||
@@ -157,12 +161,12 @@ func (s *Service) registerRPC(baseTopic string, handle rpcHandler) {
|
||||
ctx, span := trace.StartSpan(ctx, "sync.rpc")
|
||||
defer span.End()
|
||||
span.SetAttributes(trace.StringAttribute("topic", topic))
|
||||
span.SetAttributes(trace.StringAttribute("peer", stream.Conn().RemotePeer().String()))
|
||||
span.SetAttributes(trace.StringAttribute("peer", remotePeer.String()))
|
||||
log := log.WithField("peer", stream.Conn().RemotePeer().String()).WithField("topic", string(stream.Protocol()))
|
||||
|
||||
// Check before hand that peer is valid.
|
||||
if s.cfg.p2p.Peers().IsBad(stream.Conn().RemotePeer()) {
|
||||
if err := s.sendGoodByeAndDisconnect(ctx, p2ptypes.GoodbyeCodeBanned, stream.Conn().RemotePeer()); err != nil {
|
||||
if err := s.cfg.p2p.Peers().IsBad(remotePeer); err != nil {
|
||||
if err := s.sendGoodByeAndDisconnect(ctx, p2ptypes.GoodbyeCodeBanned, remotePeer); err != nil {
|
||||
log.WithError(err).Debug("Could not disconnect from peer")
|
||||
}
|
||||
return
|
||||
|
||||
@@ -395,7 +395,7 @@ func TestRequestPendingBlobs(t *testing.T) {
|
||||
Genesis: time.Now(),
|
||||
}
|
||||
p1.Peers().Add(new(enr.Record), p2.PeerID(), nil, network.DirOutbound)
|
||||
p1.Peers().SetConnectionState(p2.PeerID(), peers.PeerConnected)
|
||||
p1.Peers().SetConnectionState(p2.PeerID(), peers.Connected)
|
||||
p1.Peers().SetChainState(p2.PeerID(), ðpb.Status{FinalizedEpoch: 1})
|
||||
s := &Service{
|
||||
cfg: &config{
|
||||
|
||||
@@ -55,10 +55,7 @@ func (s *Service) goodbyeRPCHandler(_ context.Context, msg interface{}, stream l
|
||||
|
||||
// disconnectBadPeer checks whether peer is considered bad by some scorer, and tries to disconnect
|
||||
// the peer, if that is the case. Additionally, disconnection reason is obtained from scorer.
|
||||
func (s *Service) disconnectBadPeer(ctx context.Context, id peer.ID) {
|
||||
if !s.cfg.p2p.Peers().IsBad(id) {
|
||||
return
|
||||
}
|
||||
func (s *Service) disconnectBadPeer(ctx context.Context, id peer.ID, badPeerErr error) {
|
||||
err := s.cfg.p2p.Peers().Scorers().ValidationError(id)
|
||||
goodbyeCode := p2ptypes.ErrToGoodbyeCode(err)
|
||||
if err == nil {
|
||||
@@ -67,6 +64,8 @@ func (s *Service) disconnectBadPeer(ctx context.Context, id peer.ID) {
|
||||
if err := s.sendGoodByeAndDisconnect(ctx, goodbyeCode, id); err != nil {
|
||||
log.WithError(err).Debug("Error when disconnecting with bad peer")
|
||||
}
|
||||
|
||||
log.WithError(badPeerErr).WithField("peerID", id).Debug("Initiate peer disconnection")
|
||||
}
|
||||
|
||||
// A custom goodbye method that is used by our connection handler, in the
|
||||
|
||||
@@ -21,97 +21,148 @@ import (
|
||||
func (s *Service) metaDataHandler(_ context.Context, _ interface{}, stream libp2pcore.Stream) error {
|
||||
SetRPCStreamDeadlines(stream)
|
||||
|
||||
// Validate the incoming request regarding rate limiting.
|
||||
if err := s.rateLimiter.validateRequest(stream, 1); err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "validate request")
|
||||
}
|
||||
|
||||
s.rateLimiter.add(stream, 1)
|
||||
|
||||
if s.cfg.p2p.Metadata() == nil || s.cfg.p2p.Metadata().IsNil() {
|
||||
// Retrieve our metadata.
|
||||
metadata := s.cfg.p2p.Metadata()
|
||||
|
||||
// Handle the case our metadata is nil.
|
||||
if metadata == nil || metadata.IsNil() {
|
||||
nilErr := errors.New("nil metadata stored for host")
|
||||
|
||||
resp, err := s.generateErrorResponse(responseCodeServerError, types.ErrGeneric.Error())
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not generate a response error")
|
||||
} else if _, err := stream.Write(resp); err != nil {
|
||||
return nilErr
|
||||
}
|
||||
|
||||
if _, err := stream.Write(resp); err != nil {
|
||||
log.WithError(err).Debug("Could not write to stream")
|
||||
}
|
||||
|
||||
return nilErr
|
||||
}
|
||||
|
||||
// Get the stream version from the protocol.
|
||||
_, _, streamVersion, err := p2p.TopicDeconstructor(string(stream.Protocol()))
|
||||
if err != nil {
|
||||
wrappedErr := errors.Wrap(err, "topic deconstructor")
|
||||
|
||||
resp, genErr := s.generateErrorResponse(responseCodeServerError, types.ErrGeneric.Error())
|
||||
if genErr != nil {
|
||||
log.WithError(genErr).Debug("Could not generate a response error")
|
||||
} else if _, wErr := stream.Write(resp); wErr != nil {
|
||||
return wrappedErr
|
||||
}
|
||||
|
||||
if _, wErr := stream.Write(resp); wErr != nil {
|
||||
log.WithError(wErr).Debug("Could not write to stream")
|
||||
}
|
||||
return err
|
||||
return wrappedErr
|
||||
}
|
||||
currMd := s.cfg.p2p.Metadata()
|
||||
|
||||
// Handle the case where the stream version is not recognized.
|
||||
metadataVersion := metadata.Version()
|
||||
switch streamVersion {
|
||||
case p2p.SchemaVersionV1:
|
||||
// We have a v1 metadata object saved locally, so we
|
||||
// convert it back to a v0 metadata object.
|
||||
if currMd.Version() != version.Phase0 {
|
||||
currMd = wrapper.WrappedMetadataV0(
|
||||
switch metadataVersion {
|
||||
case version.Altair, version.Deneb:
|
||||
metadata = wrapper.WrappedMetadataV0(
|
||||
&pb.MetaDataV0{
|
||||
Attnets: currMd.AttnetsBitfield(),
|
||||
SeqNumber: currMd.SequenceNumber(),
|
||||
Attnets: metadata.AttnetsBitfield(),
|
||||
SeqNumber: metadata.SequenceNumber(),
|
||||
})
|
||||
}
|
||||
|
||||
case p2p.SchemaVersionV2:
|
||||
// We have a v0 metadata object saved locally, so we
|
||||
// convert it to a v1 metadata object.
|
||||
if currMd.Version() != version.Altair {
|
||||
currMd = wrapper.WrappedMetadataV1(
|
||||
switch metadataVersion {
|
||||
case version.Phase0:
|
||||
metadata = wrapper.WrappedMetadataV1(
|
||||
&pb.MetaDataV1{
|
||||
Attnets: currMd.AttnetsBitfield(),
|
||||
SeqNumber: currMd.SequenceNumber(),
|
||||
Attnets: metadata.AttnetsBitfield(),
|
||||
SeqNumber: metadata.SequenceNumber(),
|
||||
Syncnets: bitfield.Bitvector4{byte(0x00)},
|
||||
})
|
||||
case version.Deneb:
|
||||
metadata = wrapper.WrappedMetadataV1(
|
||||
&pb.MetaDataV1{
|
||||
Attnets: metadata.AttnetsBitfield(),
|
||||
SeqNumber: metadata.SequenceNumber(),
|
||||
Syncnets: metadata.SyncnetsBitfield(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Write the METADATA response into the stream.
|
||||
if _, err := stream.Write([]byte{responseCodeSuccess}); err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "write metadata response")
|
||||
}
|
||||
_, err = s.cfg.p2p.Encoding().EncodeWithMaxLength(stream, currMd)
|
||||
|
||||
// Encode the metadata and write it to the stream.
|
||||
_, err = s.cfg.p2p.Encoding().EncodeWithMaxLength(stream, metadata)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "encode metadata")
|
||||
}
|
||||
|
||||
closeStream(stream, log)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) sendMetaDataRequest(ctx context.Context, id peer.ID) (metadata.Metadata, error) {
|
||||
// sendMetaDataRequest sends a METADATA request to the peer and return the response.
|
||||
func (s *Service) sendMetaDataRequest(ctx context.Context, peerID peer.ID) (metadata.Metadata, error) {
|
||||
ctx, cancel := context.WithTimeout(ctx, respTimeout)
|
||||
defer cancel()
|
||||
|
||||
topic, err := p2p.TopicFromMessage(p2p.MetadataMessageName, slots.ToEpoch(s.cfg.clock.CurrentSlot()))
|
||||
// Compute the current epoch.
|
||||
currentSlot := s.cfg.clock.CurrentSlot()
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
|
||||
// Compute the topic for the metadata request regarding the current epoch.
|
||||
topic, err := p2p.TopicFromMessage(p2p.MetadataMessageName, currentEpoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "topic from message")
|
||||
}
|
||||
stream, err := s.cfg.p2p.Send(ctx, new(interface{}), topic, id)
|
||||
|
||||
// Send the METADATA request to the peer.
|
||||
message := new(interface{})
|
||||
stream, err := s.cfg.p2p.Send(ctx, message, topic, peerID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "send metadata request")
|
||||
}
|
||||
|
||||
defer closeStream(stream, log)
|
||||
|
||||
// Read the METADATA response from the peer.
|
||||
code, errMsg, err := ReadStatusCode(stream, s.cfg.p2p.Encoding())
|
||||
if err != nil {
|
||||
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer())
|
||||
return nil, err
|
||||
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(peerID)
|
||||
return nil, errors.Wrap(err, "read status code")
|
||||
}
|
||||
|
||||
if code != 0 {
|
||||
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer())
|
||||
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(peerID)
|
||||
return nil, errors.New(errMsg)
|
||||
}
|
||||
|
||||
// Get the genesis validators root.
|
||||
valRoot := s.cfg.clock.GenesisValidatorsRoot()
|
||||
rpcCtx, err := forks.ForkDigestFromEpoch(slots.ToEpoch(s.cfg.clock.CurrentSlot()), valRoot[:])
|
||||
|
||||
// Get the fork digest from the current epoch and the genesis validators root.
|
||||
rpcCtx, err := forks.ForkDigestFromEpoch(currentEpoch, valRoot[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "fork digest from epoch")
|
||||
}
|
||||
|
||||
// Instantiate zero value of the metadata.
|
||||
msg, err := extractDataTypeFromTypeMap(types.MetaDataMap, rpcCtx[:], s.cfg.clock)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "extract data type from type map")
|
||||
}
|
||||
|
||||
// Defensive check to ensure valid objects are being sent.
|
||||
topicVersion := ""
|
||||
switch msg.Version() {
|
||||
@@ -120,12 +171,17 @@ func (s *Service) sendMetaDataRequest(ctx context.Context, id peer.ID) (metadata
|
||||
case version.Altair:
|
||||
topicVersion = p2p.SchemaVersionV2
|
||||
}
|
||||
|
||||
// Validate the version of the topic.
|
||||
if err := validateVersion(topicVersion, stream); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Decode the metadata from the peer.
|
||||
if err := s.cfg.p2p.Encoding().DecodeWithMaxLength(stream, msg); err != nil {
|
||||
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer())
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return msg, nil
|
||||
}
|
||||
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// maintainPeerStatuses by infrequently polling peers for their latest status.
|
||||
// maintainPeerStatuses maintains peer statuses by polling peers for their latest status twice per epoch.
|
||||
func (s *Service) maintainPeerStatuses() {
|
||||
// Run twice per epoch.
|
||||
interval := time.Duration(params.BeaconConfig().SlotsPerEpoch.Div(2).Mul(params.BeaconConfig().SecondsPerSlot)) * time.Second
|
||||
@@ -38,16 +38,20 @@ func (s *Service) maintainPeerStatuses() {
|
||||
// If our peer status has not been updated correctly we disconnect over here
|
||||
// and set the connection state over here instead.
|
||||
if s.cfg.p2p.Host().Network().Connectedness(id) != network.Connected {
|
||||
s.cfg.p2p.Peers().SetConnectionState(id, peers.PeerDisconnecting)
|
||||
s.cfg.p2p.Peers().SetConnectionState(id, peers.Disconnecting)
|
||||
if err := s.cfg.p2p.Disconnect(id); err != nil {
|
||||
log.WithError(err).Debug("Error when disconnecting with peer")
|
||||
}
|
||||
s.cfg.p2p.Peers().SetConnectionState(id, peers.PeerDisconnected)
|
||||
s.cfg.p2p.Peers().SetConnectionState(id, peers.Disconnected)
|
||||
log.WithFields(logrus.Fields{
|
||||
"peer": id,
|
||||
"reason": "maintain peer statuses - peer is not connected",
|
||||
}).Debug("Initiate peer disconnection")
|
||||
return
|
||||
}
|
||||
// Disconnect from peers that are considered bad by any of the registered scorers.
|
||||
if s.cfg.p2p.Peers().IsBad(id) {
|
||||
s.disconnectBadPeer(s.ctx, id)
|
||||
if err := s.cfg.p2p.Peers().IsBad(id); err != nil {
|
||||
s.disconnectBadPeer(s.ctx, id, err)
|
||||
return
|
||||
}
|
||||
// If the status hasn't been updated in the recent interval time.
|
||||
@@ -73,6 +77,11 @@ func (s *Service) maintainPeerStatuses() {
|
||||
if err := s.sendGoodByeAndDisconnect(s.ctx, p2ptypes.GoodbyeCodeTooManyPeers, id); err != nil {
|
||||
log.WithField("peer", id).WithError(err).Debug("Could not disconnect with peer")
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"peer": id,
|
||||
"reason": "to be pruned",
|
||||
}).Debug("Initiate peer disconnection")
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -169,8 +178,8 @@ func (s *Service) sendRPCStatusRequest(ctx context.Context, id peer.ID) error {
|
||||
// If validation fails, validation error is logged, and peer status scorer will mark peer as bad.
|
||||
err = s.validateStatusMessage(ctx, msg)
|
||||
s.cfg.p2p.Peers().Scorers().PeerStatusScorer().SetPeerStatus(id, msg, err)
|
||||
if s.cfg.p2p.Peers().IsBad(id) {
|
||||
s.disconnectBadPeer(s.ctx, id)
|
||||
if err := s.cfg.p2p.Peers().IsBad(id); err != nil {
|
||||
s.disconnectBadPeer(s.ctx, id, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -182,7 +191,7 @@ func (s *Service) reValidatePeer(ctx context.Context, id peer.ID) error {
|
||||
}
|
||||
// Do not return an error for ping requests.
|
||||
if err := s.sendPingRequest(ctx, id); err != nil && !isUnwantedError(err) {
|
||||
log.WithError(err).Debug("Could not ping peer")
|
||||
log.WithError(err).WithField("pid", id).Debug("Could not ping peer")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -413,7 +413,7 @@ func TestHandshakeHandlers_Roundtrip(t *testing.T) {
|
||||
assert.Equal(t, numActive1+1, numActive2, "Number of active peers unexpected")
|
||||
|
||||
require.NoError(t, p2.Disconnect(p1.PeerID()))
|
||||
p1.Peers().SetConnectionState(p2.PeerID(), peers.PeerDisconnected)
|
||||
p1.Peers().SetConnectionState(p2.PeerID(), peers.Disconnected)
|
||||
|
||||
// Wait for disconnect event to trigger.
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
@@ -877,7 +877,7 @@ func TestStatusRPCRequest_BadPeerHandshake(t *testing.T) {
|
||||
|
||||
require.NoError(t, cw.SetClock(startup.NewClock(chain.Genesis, chain.ValidatorsRoot)))
|
||||
|
||||
assert.Equal(t, false, p1.Peers().Scorers().IsBadPeer(p2.PeerID()), "Peer is marked as bad")
|
||||
assert.NoError(t, p1.Peers().Scorers().IsBadPeer(p2.PeerID()), "Peer is marked as bad")
|
||||
p1.Connect(p2)
|
||||
|
||||
if util.WaitTimeout(&wg, time.Second) {
|
||||
@@ -887,9 +887,9 @@ func TestStatusRPCRequest_BadPeerHandshake(t *testing.T) {
|
||||
|
||||
connectionState, err := p1.Peers().ConnectionState(p2.PeerID())
|
||||
require.NoError(t, err, "Could not obtain peer connection state")
|
||||
assert.Equal(t, peers.PeerDisconnected, connectionState, "Expected peer to be disconnected")
|
||||
assert.Equal(t, peers.Disconnected, connectionState, "Expected peer to be disconnected")
|
||||
|
||||
assert.Equal(t, true, p1.Peers().Scorers().IsBadPeer(p2.PeerID()), "Peer is not marked as bad")
|
||||
assert.NotNil(t, p1.Peers().Scorers().IsBadPeer(p2.PeerID()), "Peer is not marked as bad")
|
||||
}
|
||||
|
||||
func TestStatusRPC_ValidGenesisMessage(t *testing.T) {
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/container/slice"
|
||||
@@ -54,52 +55,32 @@ func (s *Service) noopValidator(_ context.Context, _ peer.ID, msg *pubsub.Messag
|
||||
|
||||
// Register PubSub subscribers
|
||||
func (s *Service) registerSubscribers(epoch primitives.Epoch, digest [4]byte) {
|
||||
s.subscribe(
|
||||
p2p.BlockSubnetTopicFormat,
|
||||
s.validateBeaconBlockPubSub,
|
||||
s.beaconBlockSubscriber,
|
||||
digest,
|
||||
)
|
||||
s.subscribe(
|
||||
p2p.AggregateAndProofSubnetTopicFormat,
|
||||
s.validateAggregateAndProof,
|
||||
s.beaconAggregateProofSubscriber,
|
||||
digest,
|
||||
)
|
||||
s.subscribe(
|
||||
p2p.ExitSubnetTopicFormat,
|
||||
s.validateVoluntaryExit,
|
||||
s.voluntaryExitSubscriber,
|
||||
digest,
|
||||
)
|
||||
s.subscribe(
|
||||
p2p.ProposerSlashingSubnetTopicFormat,
|
||||
s.validateProposerSlashing,
|
||||
s.proposerSlashingSubscriber,
|
||||
digest,
|
||||
)
|
||||
s.subscribe(
|
||||
p2p.AttesterSlashingSubnetTopicFormat,
|
||||
s.validateAttesterSlashing,
|
||||
s.attesterSlashingSubscriber,
|
||||
digest,
|
||||
)
|
||||
s.subscribe(p2p.BlockSubnetTopicFormat, s.validateBeaconBlockPubSub, s.beaconBlockSubscriber, digest)
|
||||
s.subscribe(p2p.AggregateAndProofSubnetTopicFormat, s.validateAggregateAndProof, s.beaconAggregateProofSubscriber, digest)
|
||||
s.subscribe(p2p.ExitSubnetTopicFormat, s.validateVoluntaryExit, s.voluntaryExitSubscriber, digest)
|
||||
s.subscribe(p2p.ProposerSlashingSubnetTopicFormat, s.validateProposerSlashing, s.proposerSlashingSubscriber, digest)
|
||||
s.subscribe(p2p.AttesterSlashingSubnetTopicFormat, s.validateAttesterSlashing, s.attesterSlashingSubscriber, digest)
|
||||
|
||||
if flags.Get().SubscribeToAllSubnets {
|
||||
s.subscribeStaticWithSubnets(
|
||||
p2p.AttestationSubnetTopicFormat,
|
||||
s.validateCommitteeIndexBeaconAttestation, /* validator */
|
||||
s.committeeIndexBeaconAttestationSubscriber, /* message handler */
|
||||
s.validateCommitteeIndexBeaconAttestation,
|
||||
s.committeeIndexBeaconAttestationSubscriber,
|
||||
digest,
|
||||
"Attestation",
|
||||
params.BeaconConfig().AttestationSubnetCount,
|
||||
)
|
||||
} else {
|
||||
s.subscribeDynamicWithSubnets(
|
||||
p2p.AttestationSubnetTopicFormat,
|
||||
s.validateCommitteeIndexBeaconAttestation, /* validator */
|
||||
s.committeeIndexBeaconAttestationSubscriber, /* message handler */
|
||||
s.validateCommitteeIndexBeaconAttestation,
|
||||
s.committeeIndexBeaconAttestationSubscriber,
|
||||
digest,
|
||||
params.BeaconConfig().MaxCommitteesPerSlot,
|
||||
s.subscribeToAttestationsSubnetsDynamic,
|
||||
)
|
||||
}
|
||||
|
||||
// Altair Fork Version
|
||||
if epoch >= params.BeaconConfig().AltairForkEpoch {
|
||||
s.subscribe(
|
||||
@@ -108,19 +89,24 @@ func (s *Service) registerSubscribers(epoch primitives.Epoch, digest [4]byte) {
|
||||
s.syncContributionAndProofSubscriber,
|
||||
digest,
|
||||
)
|
||||
|
||||
if flags.Get().SubscribeToAllSubnets {
|
||||
s.subscribeStaticWithSyncSubnets(
|
||||
s.subscribeStaticWithSubnets(
|
||||
p2p.SyncCommitteeSubnetTopicFormat,
|
||||
s.validateSyncCommitteeMessage, /* validator */
|
||||
s.syncCommitteeMessageSubscriber, /* message handler */
|
||||
s.validateSyncCommitteeMessage,
|
||||
s.syncCommitteeMessageSubscriber,
|
||||
digest,
|
||||
"Sync committee",
|
||||
params.BeaconConfig().SyncCommitteeSubnetCount,
|
||||
)
|
||||
} else {
|
||||
s.subscribeDynamicWithSyncSubnets(
|
||||
s.subscribeDynamicWithSubnets(
|
||||
p2p.SyncCommitteeSubnetTopicFormat,
|
||||
s.validateSyncCommitteeMessage, /* validator */
|
||||
s.syncCommitteeMessageSubscriber, /* message handler */
|
||||
s.validateSyncCommitteeMessage,
|
||||
s.syncCommitteeMessageSubscriber,
|
||||
digest,
|
||||
params.BeaconConfig().SyncCommitteeSubnetCount,
|
||||
s.subscribeToSyncSubnetsDynamic,
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -142,6 +128,7 @@ func (s *Service) registerSubscribers(epoch primitives.Epoch, digest [4]byte) {
|
||||
s.validateBlob, /* validator */
|
||||
s.blobSubscriber, /* message handler */
|
||||
digest,
|
||||
"Blob sidecar",
|
||||
params.BeaconConfig().BlobsidecarSubnetCount,
|
||||
)
|
||||
}
|
||||
@@ -171,7 +158,7 @@ func (s *Service) subscribeWithBase(topic string, validator wrappedVal, handle s
|
||||
// Do not resubscribe already seen subscriptions.
|
||||
ok := s.subHandler.topicExists(topic)
|
||||
if ok {
|
||||
log.Debugf("Provided topic already has an active subscription running: %s", topic)
|
||||
log.WithField("topic", topic).Debug("Provided topic already has an active subscription running")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -188,6 +175,7 @@ func (s *Service) subscribeWithBase(topic string, validator wrappedVal, handle s
|
||||
log.WithError(err).Error("Could not subscribe topic")
|
||||
return nil
|
||||
}
|
||||
|
||||
s.subHandler.addTopic(sub.Topic(), sub)
|
||||
|
||||
// Pipeline decodes the incoming subscription data, runs the validation, and handles the
|
||||
@@ -195,6 +183,7 @@ func (s *Service) subscribeWithBase(topic string, validator wrappedVal, handle s
|
||||
pipeline := func(msg *pubsub.Message) {
|
||||
ctx, cancel := context.WithTimeout(s.ctx, pubsubMessageTimeout)
|
||||
defer cancel()
|
||||
|
||||
ctx, span := trace.StartSpan(ctx, "sync.pubsub")
|
||||
defer span.End()
|
||||
|
||||
@@ -312,8 +301,8 @@ func (s *Service) wrapAndReportValidation(topic string, v wrappedVal) (string, p
|
||||
"multiaddress": multiAddr(pid, s.cfg.p2p.Peers()),
|
||||
"peerID": pid.String(),
|
||||
"agent": agentString(pid, s.cfg.p2p.Host()),
|
||||
"gossipScore": s.cfg.p2p.Peers().Scorers().GossipScorer().Score(pid),
|
||||
}).Debugf("Gossip message was ignored")
|
||||
"gossipScore": fmt.Sprintf("%.2f", s.cfg.p2p.Peers().Scorers().GossipScorer().Score(pid)),
|
||||
}).Debug("Gossip message was ignored")
|
||||
}
|
||||
messageIgnoredValidationCounter.WithLabelValues(topic).Inc()
|
||||
}
|
||||
@@ -323,137 +312,292 @@ func (s *Service) wrapAndReportValidation(topic string, v wrappedVal) (string, p
|
||||
|
||||
// subscribe to a static subnet with the given topic and index. A given validator and subscription handler is
|
||||
// used to handle messages from the subnet. The base protobuf message is used to initialize new messages for decoding.
|
||||
func (s *Service) subscribeStaticWithSubnets(topic string, validator wrappedVal, handle subHandler, digest [4]byte, subnetCount uint64) {
|
||||
genRoot := s.cfg.clock.GenesisValidatorsRoot()
|
||||
_, e, err := forks.RetrieveForkDataFromDigest(digest, genRoot[:])
|
||||
func (s *Service) subscribeStaticWithSubnets(
|
||||
topic string,
|
||||
validator wrappedVal,
|
||||
handle subHandler,
|
||||
digest [4]byte,
|
||||
humanDescription string,
|
||||
subnetCount uint64,
|
||||
) {
|
||||
// Retrieve the genesis validators root.
|
||||
genesisValidatorsRoot := s.cfg.clock.GenesisValidatorsRoot()
|
||||
|
||||
// Retrieve the epoch of the fork corresponding to the digest.
|
||||
_, epoch, err := forks.RetrieveForkDataFromDigest(digest, genesisValidatorsRoot[:])
|
||||
if err != nil {
|
||||
// Impossible condition as it would mean digest does not exist.
|
||||
panic(err)
|
||||
}
|
||||
base := p2p.GossipTopicMappings(topic, e)
|
||||
|
||||
// Retrieve the base protobuf message.
|
||||
base := p2p.GossipTopicMappings(topic, epoch)
|
||||
if base == nil {
|
||||
// Impossible condition as it would mean topic does not exist.
|
||||
panic(fmt.Sprintf("%s is not mapped to any message in GossipTopicMappings", topic))
|
||||
}
|
||||
for i := uint64(0); i < subnetCount; i++ {
|
||||
s.subscribeWithBase(s.addDigestAndIndexToTopic(topic, digest, i), validator, handle)
|
||||
|
||||
// Define a ticker ticking every slot.
|
||||
genesisTime := s.cfg.clock.GenesisTime()
|
||||
secondsPerSlot := params.BeaconConfig().SecondsPerSlot
|
||||
ticker := slots.NewSlotTicker(genesisTime, secondsPerSlot)
|
||||
|
||||
minimumPeersPerSubnet := flags.Get().MinimumPeersPerSubnet
|
||||
|
||||
// Subscribe to all subnets.
|
||||
for i := range subnetCount {
|
||||
fullTopic := s.addDigestAndIndexToTopic(topic, digest, i)
|
||||
s.subscribeWithBase(fullTopic, validator, handle)
|
||||
}
|
||||
genesis := s.cfg.clock.GenesisTime()
|
||||
ticker := slots.NewSlotTicker(genesis, params.BeaconConfig().SecondsPerSlot)
|
||||
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
ticker.Done()
|
||||
return
|
||||
case <-ticker.C():
|
||||
if s.chainStarted.IsSet() && s.cfg.initialSync.Syncing() {
|
||||
continue
|
||||
}
|
||||
valid, err := isDigestValid(digest, genesis, genRoot)
|
||||
|
||||
valid, err := isDigestValid(digest, genesisTime, genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
continue
|
||||
}
|
||||
|
||||
if !valid {
|
||||
log.Warnf("Attestation subnets with digest %#x are no longer valid, unsubscribing from all of them.", digest)
|
||||
message := fmt.Sprintf("%s subnets with this digest are no longer valid, unsubscribing from all of them.", humanDescription)
|
||||
log.WithField("digest", fmt.Sprintf("%#x", digest)).Warning(message)
|
||||
|
||||
// Unsubscribes from all our current subnets.
|
||||
for i := uint64(0); i < subnetCount; i++ {
|
||||
for i := range subnetCount {
|
||||
fullTopic := fmt.Sprintf(topic, digest, i) + s.cfg.p2p.Encoding().ProtocolSuffix()
|
||||
s.unSubscribeFromTopic(fullTopic)
|
||||
}
|
||||
|
||||
ticker.Done()
|
||||
return
|
||||
}
|
||||
// Check every slot that there are enough peers
|
||||
for i := uint64(0); i < subnetCount; i++ {
|
||||
if !s.validPeersExist(s.addDigestAndIndexToTopic(topic, digest, i)) {
|
||||
log.Debugf("No peers found subscribed to attestation gossip subnet with "+
|
||||
"committee index %d. Searching network for peers subscribed to the subnet.", i)
|
||||
_, err := s.cfg.p2p.FindPeersWithSubnet(
|
||||
s.ctx,
|
||||
s.addDigestAndIndexToTopic(topic, digest, i),
|
||||
i,
|
||||
flags.Get().MinimumPeersPerSubnet,
|
||||
)
|
||||
if err != nil {
|
||||
|
||||
// Check that all subnets have enough peers.
|
||||
for i := range subnetCount {
|
||||
fullTopic := s.addDigestAndIndexToTopic(topic, digest, i)
|
||||
|
||||
if !s.enoughPeersAreConnected(fullTopic) {
|
||||
if _, err := s.cfg.p2p.FindPeersWithSubnet(s.ctx, fullTopic, i, minimumPeersPerSubnet); err != nil {
|
||||
log.WithError(err).Debug("Could not search for peers")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
case <-s.ctx.Done():
|
||||
ticker.Done()
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// subscribe to a dynamically changing list of subnets. This method expects a fmt compatible
|
||||
// string for the topic name and the list of subnets for subscribed topics that should be
|
||||
// maintained.
|
||||
type specificSubscribeFunc func(
|
||||
topic string,
|
||||
digest [4]byte,
|
||||
genesisValidatorsRoot [fieldparams.RootLength]byte,
|
||||
genesisTime time.Time,
|
||||
subscriptions map[uint64]*pubsub.Subscription,
|
||||
currentSlot primitives.Slot,
|
||||
validate wrappedVal,
|
||||
handle subHandler,
|
||||
) bool
|
||||
|
||||
// subscribeDynamicWithSubnets subscribes to a dynamically changing list of subnets.
|
||||
func (s *Service) subscribeDynamicWithSubnets(
|
||||
topicFormat string,
|
||||
validate wrappedVal,
|
||||
handle subHandler,
|
||||
digest [4]byte,
|
||||
subnetCount uint64,
|
||||
specificSubscribe specificSubscribeFunc,
|
||||
) {
|
||||
genRoot := s.cfg.clock.GenesisValidatorsRoot()
|
||||
_, e, err := forks.RetrieveForkDataFromDigest(digest, genRoot[:])
|
||||
// Initialize the subscriptions map.
|
||||
subscriptions := make(map[uint64]*pubsub.Subscription, subnetCount)
|
||||
|
||||
// Retrieve the genesis validators root.
|
||||
genesisValidatorsRoot := s.cfg.clock.GenesisValidatorsRoot()
|
||||
|
||||
// Retrieve the epoch of the fork corresponding to the digest.
|
||||
_, epoch, err := forks.RetrieveForkDataFromDigest(digest, genesisValidatorsRoot[:])
|
||||
if err != nil {
|
||||
// Impossible condition as it would mean digest does not exist.
|
||||
panic(err)
|
||||
}
|
||||
base := p2p.GossipTopicMappings(topicFormat, e)
|
||||
|
||||
// Retrieve the base protobuf message.
|
||||
base := p2p.GossipTopicMappings(topicFormat, epoch)
|
||||
if base == nil {
|
||||
panic(fmt.Sprintf("%s is not mapped to any message in GossipTopicMappings", topicFormat))
|
||||
}
|
||||
subscriptions := make(map[uint64]*pubsub.Subscription, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
genesis := s.cfg.clock.GenesisTime()
|
||||
ticker := slots.NewSlotTicker(genesis, params.BeaconConfig().SecondsPerSlot)
|
||||
|
||||
// Retrieve the genesis time.
|
||||
genesisTime := s.cfg.clock.GenesisTime()
|
||||
|
||||
// Define a ticker ticking every slot.
|
||||
secondsPerSlot := params.BeaconConfig().SecondsPerSlot
|
||||
ticker := slots.NewSlotTicker(genesisTime, secondsPerSlot)
|
||||
|
||||
// Retrieve the current slot.
|
||||
currentSlot := s.cfg.clock.CurrentSlot()
|
||||
|
||||
go func() {
|
||||
// Subscribe to the sync subnets.
|
||||
specificSubscribe(topicFormat, digest, genesisValidatorsRoot, genesisTime, subscriptions, currentSlot, validate, handle)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
ticker.Done()
|
||||
return
|
||||
case currentSlot := <-ticker.C():
|
||||
if s.chainStarted.IsSet() && s.cfg.initialSync.Syncing() {
|
||||
continue
|
||||
}
|
||||
valid, err := isDigestValid(digest, genesis, genRoot)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
continue
|
||||
}
|
||||
if !valid {
|
||||
log.Warnf("Attestation subnets with digest %#x are no longer valid, unsubscribing from all of them.", digest)
|
||||
// Unsubscribes from all our current subnets.
|
||||
s.reValidateSubscriptions(subscriptions, []uint64{}, topicFormat, digest)
|
||||
isDigestValid := specificSubscribe(topicFormat, digest, genesisValidatorsRoot, genesisTime, subscriptions, currentSlot, validate, handle)
|
||||
|
||||
// Stop the ticker if the digest is not valid. Likely to happen after a hard fork.
|
||||
if !isDigestValid {
|
||||
ticker.Done()
|
||||
return
|
||||
}
|
||||
wantedSubs := s.retrievePersistentSubs(currentSlot)
|
||||
// Resize as appropriate.
|
||||
s.reValidateSubscriptions(subscriptions, wantedSubs, topicFormat, digest)
|
||||
|
||||
// subscribe desired aggregator subnets.
|
||||
for _, idx := range wantedSubs {
|
||||
s.subscribeAggregatorSubnet(subscriptions, idx, digest, validate, handle)
|
||||
}
|
||||
// find desired subs for attesters
|
||||
attesterSubs := s.attesterSubnetIndices(currentSlot)
|
||||
for _, idx := range attesterSubs {
|
||||
s.lookupAttesterSubnets(digest, idx)
|
||||
}
|
||||
case <-s.ctx.Done():
|
||||
ticker.Done()
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// revalidate that our currently connected subnets are valid.
|
||||
func (s *Service) reValidateSubscriptions(subscriptions map[uint64]*pubsub.Subscription,
|
||||
wantedSubs []uint64, topicFormat string, digest [4]byte) {
|
||||
func (s *Service) subscribeToAttestationsSubnetsDynamic(
|
||||
topicFormat string,
|
||||
digest [4]byte,
|
||||
genesisValidatorsRoot [fieldparams.RootLength]byte,
|
||||
genesisTime time.Time,
|
||||
subscriptions map[uint64]*pubsub.Subscription,
|
||||
currentSlot primitives.Slot,
|
||||
validate wrappedVal,
|
||||
handle subHandler,
|
||||
) bool {
|
||||
// Do not subscribe if not synced.
|
||||
if s.chainStarted.IsSet() && s.cfg.initialSync.Syncing() {
|
||||
return true
|
||||
}
|
||||
|
||||
// Do not subscribe is the digest is not valid.
|
||||
valid, err := isDigestValid(digest, genesisTime, genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
return true
|
||||
}
|
||||
|
||||
// Unsubscribe from all subnets if the digest is not valid. It's likely to be the case after a hard fork.
|
||||
if !valid {
|
||||
const message = "Attestation subnets with this digest are no longer valid, unsubscribing from all of them."
|
||||
log.WithField("digest", fmt.Sprintf("%#x", digest)).Warn(message)
|
||||
s.reValidateSubscriptions(subscriptions, []uint64{}, topicFormat, digest)
|
||||
return false
|
||||
}
|
||||
|
||||
wantedSubnetsIndex := s.retrievePersistentSubs(currentSlot)
|
||||
s.reValidateSubscriptions(subscriptions, wantedSubnetsIndex, topicFormat, digest)
|
||||
|
||||
for _, index := range wantedSubnetsIndex {
|
||||
s.subscribeAggregatorSubnet(subscriptions, index, digest, validate, handle)
|
||||
}
|
||||
|
||||
// Find desired subnets for attesters.
|
||||
attesterSubnets := s.attesterSubnetIndices(currentSlot)
|
||||
for _, index := range attesterSubnets {
|
||||
s.lookupAttesterSubnets(digest, index)
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (s *Service) subscribeToSyncSubnetsDynamic(
|
||||
topicFormat string,
|
||||
digest [4]byte,
|
||||
genesisValidatorsRoot [fieldparams.RootLength]byte,
|
||||
genesisTime time.Time,
|
||||
subscriptions map[uint64]*pubsub.Subscription,
|
||||
currentSlot primitives.Slot,
|
||||
validate wrappedVal,
|
||||
handle subHandler,
|
||||
) bool {
|
||||
// Get sync subnets topic.
|
||||
topic := p2p.GossipTypeMapping[reflect.TypeOf(ðpb.SyncCommitteeMessage{})]
|
||||
|
||||
// Do not subscribe if not synced.
|
||||
if s.chainStarted.IsSet() && s.cfg.initialSync.Syncing() {
|
||||
return true
|
||||
}
|
||||
|
||||
// Do not subscribe is the digest is not valid.
|
||||
valid, err := isDigestValid(digest, genesisTime, genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
return true
|
||||
}
|
||||
|
||||
// Unsubscribe from all subnets if the digest is not valid. It's likely to be the case after a hard fork.
|
||||
if !valid {
|
||||
const message = "Sync subnets with this digest are no longer valid, unsubscribing from all of them."
|
||||
log.WithField("digest", fmt.Sprintf("%#x", digest)).Warn(message)
|
||||
s.reValidateSubscriptions(subscriptions, []uint64{}, topicFormat, digest)
|
||||
return false
|
||||
}
|
||||
|
||||
// Get the current epoch.
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
|
||||
// Retrieve the subnets we want to subscribe to.
|
||||
wantedSubnetsIndex := s.retrieveActiveSyncSubnets(currentEpoch)
|
||||
|
||||
// Remove subscriptions that are no longer wanted.
|
||||
s.reValidateSubscriptions(subscriptions, wantedSubnetsIndex, topicFormat, digest)
|
||||
|
||||
// Subscribe to wanted subnets.
|
||||
for _, subnetIndex := range wantedSubnetsIndex {
|
||||
subnetTopic := fmt.Sprintf(topic, digest, subnetIndex)
|
||||
|
||||
// Check if subscription exists.
|
||||
if _, exists := subscriptions[subnetIndex]; exists {
|
||||
continue
|
||||
}
|
||||
|
||||
// We need to subscribe to the subnet.
|
||||
subscription := s.subscribeWithBase(subnetTopic, validate, handle)
|
||||
subscriptions[subnetIndex] = subscription
|
||||
}
|
||||
|
||||
// Find new peers for wanted subnets if needed.
|
||||
for _, subnetIndex := range wantedSubnetsIndex {
|
||||
subnetTopic := fmt.Sprintf(topic, digest, subnetIndex)
|
||||
|
||||
// Check if we have enough peers in the subnet. Skip if we do.
|
||||
if s.enoughPeersAreConnected(subnetTopic) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Not enough peers in the subnet, we need to search for more.
|
||||
_, err := s.cfg.p2p.FindPeersWithSubnet(s.ctx, subnetTopic, subnetIndex, flags.Get().MinimumPeersPerSubnet)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not search for peers")
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// reValidateSubscriptions unsubscribe from topics we are currently subscribed to but that are
|
||||
// not in the list of wanted subnets.
|
||||
// TODO: Rename this functions as it does not only revalidate subscriptions.
|
||||
func (s *Service) reValidateSubscriptions(
|
||||
subscriptions map[uint64]*pubsub.Subscription,
|
||||
wantedSubs []uint64,
|
||||
topicFormat string,
|
||||
digest [4]byte,
|
||||
) {
|
||||
for k, v := range subscriptions {
|
||||
var wanted bool
|
||||
for _, idx := range wantedSubs {
|
||||
@@ -462,6 +606,7 @@ func (s *Service) reValidateSubscriptions(subscriptions map[uint64]*pubsub.Subsc
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !wanted && v != nil {
|
||||
v.Cancel()
|
||||
fullTopic := fmt.Sprintf(topicFormat, digest, k) + s.cfg.p2p.Encoding().ProtocolSuffix()
|
||||
@@ -487,9 +632,7 @@ func (s *Service) subscribeAggregatorSubnet(
|
||||
if _, exists := subscriptions[idx]; !exists {
|
||||
subscriptions[idx] = s.subscribeWithBase(subnetTopic, validate, handle)
|
||||
}
|
||||
if !s.validPeersExist(subnetTopic) {
|
||||
log.Debugf("No peers found subscribed to attestation gossip subnet with "+
|
||||
"committee index %d. Searching network for peers subscribed to the subnet.", idx)
|
||||
if !s.enoughPeersAreConnected(subnetTopic) {
|
||||
_, err := s.cfg.p2p.FindPeersWithSubnet(s.ctx, subnetTopic, idx, flags.Get().MinimumPeersPerSubnet)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not search for peers")
|
||||
@@ -497,162 +640,11 @@ func (s *Service) subscribeAggregatorSubnet(
|
||||
}
|
||||
}
|
||||
|
||||
// subscribe missing subnets for our sync committee members.
|
||||
func (s *Service) subscribeSyncSubnet(
|
||||
subscriptions map[uint64]*pubsub.Subscription,
|
||||
idx uint64,
|
||||
digest [4]byte,
|
||||
validate wrappedVal,
|
||||
handle subHandler,
|
||||
) {
|
||||
// do not subscribe if we have no peers in the same
|
||||
// subnet
|
||||
topic := p2p.GossipTypeMapping[reflect.TypeOf(ðpb.SyncCommitteeMessage{})]
|
||||
subnetTopic := fmt.Sprintf(topic, digest, idx)
|
||||
// check if subscription exists and if not subscribe the relevant subnet.
|
||||
if _, exists := subscriptions[idx]; !exists {
|
||||
subscriptions[idx] = s.subscribeWithBase(subnetTopic, validate, handle)
|
||||
}
|
||||
if !s.validPeersExist(subnetTopic) {
|
||||
log.Debugf("No peers found subscribed to sync gossip subnet with "+
|
||||
"committee index %d. Searching network for peers subscribed to the subnet.", idx)
|
||||
_, err := s.cfg.p2p.FindPeersWithSubnet(s.ctx, subnetTopic, idx, flags.Get().MinimumPeersPerSubnet)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not search for peers")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// subscribe to a static subnet with the given topic and index. A given validator and subscription handler is
|
||||
// used to handle messages from the subnet. The base protobuf message is used to initialize new messages for decoding.
|
||||
func (s *Service) subscribeStaticWithSyncSubnets(topic string, validator wrappedVal, handle subHandler, digest [4]byte) {
|
||||
genRoot := s.cfg.clock.GenesisValidatorsRoot()
|
||||
_, e, err := forks.RetrieveForkDataFromDigest(digest, genRoot[:])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
base := p2p.GossipTopicMappings(topic, e)
|
||||
if base == nil {
|
||||
panic(fmt.Sprintf("%s is not mapped to any message in GossipTopicMappings", topic))
|
||||
}
|
||||
for i := uint64(0); i < params.BeaconConfig().SyncCommitteeSubnetCount; i++ {
|
||||
s.subscribeWithBase(s.addDigestAndIndexToTopic(topic, digest, i), validator, handle)
|
||||
}
|
||||
genesis := s.cfg.clock.GenesisTime()
|
||||
ticker := slots.NewSlotTicker(genesis, params.BeaconConfig().SecondsPerSlot)
|
||||
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
ticker.Done()
|
||||
return
|
||||
case <-ticker.C():
|
||||
if s.chainStarted.IsSet() && s.cfg.initialSync.Syncing() {
|
||||
continue
|
||||
}
|
||||
valid, err := isDigestValid(digest, genesis, genRoot)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
continue
|
||||
}
|
||||
if !valid {
|
||||
log.Warnf("Sync subnets with digest %#x are no longer valid, unsubscribing from all of them.", digest)
|
||||
// Unsubscribes from all our current subnets.
|
||||
for i := uint64(0); i < params.BeaconConfig().SyncCommitteeSubnetCount; i++ {
|
||||
fullTopic := fmt.Sprintf(topic, digest, i) + s.cfg.p2p.Encoding().ProtocolSuffix()
|
||||
s.unSubscribeFromTopic(fullTopic)
|
||||
}
|
||||
ticker.Done()
|
||||
return
|
||||
}
|
||||
// Check every slot that there are enough peers
|
||||
for i := uint64(0); i < params.BeaconConfig().SyncCommitteeSubnetCount; i++ {
|
||||
if !s.validPeersExist(s.addDigestAndIndexToTopic(topic, digest, i)) {
|
||||
log.Debugf("No peers found subscribed to sync gossip subnet with "+
|
||||
"committee index %d. Searching network for peers subscribed to the subnet.", i)
|
||||
_, err := s.cfg.p2p.FindPeersWithSubnet(
|
||||
s.ctx,
|
||||
s.addDigestAndIndexToTopic(topic, digest, i),
|
||||
i,
|
||||
flags.Get().MinimumPeersPerSubnet,
|
||||
)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not search for peers")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// subscribe to a dynamically changing list of subnets. This method expects a fmt compatible
|
||||
// string for the topic name and the list of subnets for subscribed topics that should be
|
||||
// maintained.
|
||||
func (s *Service) subscribeDynamicWithSyncSubnets(
|
||||
topicFormat string,
|
||||
validate wrappedVal,
|
||||
handle subHandler,
|
||||
digest [4]byte,
|
||||
) {
|
||||
genRoot := s.cfg.clock.GenesisValidatorsRoot()
|
||||
_, e, err := forks.RetrieveForkDataFromDigest(digest, genRoot[:])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
base := p2p.GossipTopicMappings(topicFormat, e)
|
||||
if base == nil {
|
||||
panic(fmt.Sprintf("%s is not mapped to any message in GossipTopicMappings", topicFormat))
|
||||
}
|
||||
subscriptions := make(map[uint64]*pubsub.Subscription, params.BeaconConfig().SyncCommitteeSubnetCount)
|
||||
genesis := s.cfg.clock.GenesisTime()
|
||||
ticker := slots.NewSlotTicker(genesis, params.BeaconConfig().SecondsPerSlot)
|
||||
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
ticker.Done()
|
||||
return
|
||||
case currentSlot := <-ticker.C():
|
||||
if s.chainStarted.IsSet() && s.cfg.initialSync.Syncing() {
|
||||
continue
|
||||
}
|
||||
valid, err := isDigestValid(digest, genesis, genRoot)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
continue
|
||||
}
|
||||
if !valid {
|
||||
log.Warnf("Sync subnets with digest %#x are no longer valid, unsubscribing from all of them.", digest)
|
||||
// Unsubscribes from all our current subnets.
|
||||
s.reValidateSubscriptions(subscriptions, []uint64{}, topicFormat, digest)
|
||||
ticker.Done()
|
||||
return
|
||||
}
|
||||
|
||||
wantedSubs := s.retrieveActiveSyncSubnets(slots.ToEpoch(currentSlot))
|
||||
// Resize as appropriate.
|
||||
s.reValidateSubscriptions(subscriptions, wantedSubs, topicFormat, digest)
|
||||
|
||||
// subscribe desired aggregator subnets.
|
||||
for _, idx := range wantedSubs {
|
||||
s.subscribeSyncSubnet(subscriptions, idx, digest, validate, handle)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// lookup peers for attester specific subnets.
|
||||
func (s *Service) lookupAttesterSubnets(digest [4]byte, idx uint64) {
|
||||
topic := p2p.GossipTypeMapping[reflect.TypeOf(ðpb.Attestation{})]
|
||||
subnetTopic := fmt.Sprintf(topic, digest, idx)
|
||||
if !s.validPeersExist(subnetTopic) {
|
||||
log.Debugf("No peers found subscribed to attestation gossip subnet with "+
|
||||
"committee index %d. Searching network for peers subscribed to the subnet.", idx)
|
||||
if !s.enoughPeersAreConnected(subnetTopic) {
|
||||
// perform a search for peers with the desired committee index.
|
||||
_, err := s.cfg.p2p.FindPeersWithSubnet(s.ctx, subnetTopic, idx, flags.Get().MinimumPeersPerSubnet)
|
||||
if err != nil {
|
||||
@@ -676,10 +668,15 @@ func (s *Service) unSubscribeFromTopic(topic string) {
|
||||
}
|
||||
}
|
||||
|
||||
// find if we have peers who are subscribed to the same subnet
|
||||
func (s *Service) validPeersExist(subnetTopic string) bool {
|
||||
numOfPeers := s.cfg.p2p.PubSub().ListPeers(subnetTopic + s.cfg.p2p.Encoding().ProtocolSuffix())
|
||||
return len(numOfPeers) >= flags.Get().MinimumPeersPerSubnet
|
||||
// enoughPeersAreConnected checks if we have enough peers which are subscribed to the same subnet.
|
||||
func (s *Service) enoughPeersAreConnected(subnetTopic string) bool {
|
||||
topic := subnetTopic + s.cfg.p2p.Encoding().ProtocolSuffix()
|
||||
threshold := flags.Get().MinimumPeersPerSubnet
|
||||
|
||||
peersWithSubnet := s.cfg.p2p.PubSub().ListPeers(topic)
|
||||
peersWithSubnetCount := len(peersWithSubnet)
|
||||
|
||||
return peersWithSubnetCount >= threshold
|
||||
}
|
||||
|
||||
func (s *Service) retrievePersistentSubs(currSlot primitives.Slot) []uint64 {
|
||||
|
||||
@@ -335,7 +335,7 @@ func TestStaticSubnets(t *testing.T) {
|
||||
r.subscribeStaticWithSubnets(defaultTopic, r.noopValidator, func(_ context.Context, msg proto.Message) error {
|
||||
// no-op
|
||||
return nil
|
||||
}, d, params.BeaconConfig().AttestationSubnetCount)
|
||||
}, d, "Attestation", params.BeaconConfig().AttestationSubnetCount)
|
||||
topics := r.cfg.p2p.PubSub().GetTopics()
|
||||
if uint64(len(topics)) != params.BeaconConfig().AttestationSubnetCount {
|
||||
t.Errorf("Wanted the number of subnet topics registered to be %d but got %d", params.BeaconConfig().AttestationSubnetCount, len(topics))
|
||||
@@ -496,6 +496,7 @@ func TestFilterSubnetPeers(t *testing.T) {
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
|
||||
// Empty cache at the end of the test.
|
||||
defer cache.SubnetIDs.EmptyAllCaches()
|
||||
digest, err := r.currentForkDigest()
|
||||
@@ -511,8 +512,7 @@ func TestFilterSubnetPeers(t *testing.T) {
|
||||
p2 := createPeer(t, subnet10, subnet20)
|
||||
p3 := createPeer(t)
|
||||
|
||||
// Connect to all
|
||||
// peers.
|
||||
// Connect to all peers.
|
||||
p.Connect(p1)
|
||||
p.Connect(p2)
|
||||
p.Connect(p3)
|
||||
@@ -565,7 +565,7 @@ func TestSubscribeWithSyncSubnets_StaticOK(t *testing.T) {
|
||||
defer cache.SyncSubnetIDs.EmptyAllCaches()
|
||||
digest, err := r.currentForkDigest()
|
||||
assert.NoError(t, err)
|
||||
r.subscribeStaticWithSyncSubnets(p2p.SyncCommitteeSubnetTopicFormat, nil, nil, digest)
|
||||
r.subscribeStaticWithSubnets(p2p.SyncCommitteeSubnetTopicFormat, nil, nil, digest, "Sync committee", params.BeaconConfig().SyncCommitteeSubnetCount)
|
||||
assert.Equal(t, int(params.BeaconConfig().SyncCommitteeSubnetCount), len(r.cfg.p2p.PubSub().GetTopics()))
|
||||
cancel()
|
||||
}
|
||||
@@ -600,7 +600,7 @@ func TestSubscribeWithSyncSubnets_DynamicOK(t *testing.T) {
|
||||
cache.SyncSubnetIDs.AddSyncCommitteeSubnets([]byte("pubkey"), currEpoch, []uint64{0, 1}, 10*time.Second)
|
||||
digest, err := r.currentForkDigest()
|
||||
assert.NoError(t, err)
|
||||
r.subscribeDynamicWithSyncSubnets(p2p.SyncCommitteeSubnetTopicFormat, nil, nil, digest)
|
||||
r.subscribeDynamicWithSubnets(p2p.SyncCommitteeSubnetTopicFormat, nil, nil, digest, params.BeaconConfig().SyncCommitteeSubnetCount, r.subscribeToSyncSubnetsDynamic)
|
||||
time.Sleep(2 * time.Second)
|
||||
assert.Equal(t, 2, len(r.cfg.p2p.PubSub().GetTopics()))
|
||||
topicMap := map[string]bool{}
|
||||
@@ -645,7 +645,7 @@ func TestSubscribeWithSyncSubnets_StaticSwitchFork(t *testing.T) {
|
||||
genRoot := r.cfg.clock.GenesisValidatorsRoot()
|
||||
digest, err := signing.ComputeForkDigest(params.BeaconConfig().GenesisForkVersion, genRoot[:])
|
||||
assert.NoError(t, err)
|
||||
r.subscribeStaticWithSyncSubnets(p2p.SyncCommitteeSubnetTopicFormat, nil, nil, digest)
|
||||
r.subscribeStaticWithSubnets(p2p.SyncCommitteeSubnetTopicFormat, nil, nil, digest, "Sync committee", params.BeaconConfig().SyncCommitteeSubnetCount)
|
||||
assert.Equal(t, int(params.BeaconConfig().SyncCommitteeSubnetCount), len(r.cfg.p2p.PubSub().GetTopics()))
|
||||
|
||||
// Expect that all old topics will be unsubscribed.
|
||||
@@ -689,7 +689,7 @@ func TestSubscribeWithSyncSubnets_DynamicSwitchFork(t *testing.T) {
|
||||
digest, err := signing.ComputeForkDigest(params.BeaconConfig().GenesisForkVersion, genRoot[:])
|
||||
assert.NoError(t, err)
|
||||
|
||||
r.subscribeDynamicWithSyncSubnets(p2p.SyncCommitteeSubnetTopicFormat, nil, nil, digest)
|
||||
r.subscribeDynamicWithSubnets(p2p.SyncCommitteeSubnetTopicFormat, nil, nil, digest, params.BeaconConfig().SyncCommitteeSubnetCount, r.subscribeToSyncSubnetsDynamic)
|
||||
time.Sleep(2 * time.Second)
|
||||
assert.Equal(t, 2, len(r.cfg.p2p.PubSub().GetTopics()))
|
||||
topicMap := map[string]bool{}
|
||||
|
||||
@@ -211,16 +211,11 @@ func (s *Service) validateBeaconBlockPubSub(ctx context.Context, pid peer.ID, ms
|
||||
|
||||
// Log the arrival time of the accepted block
|
||||
graffiti := blk.Block().Body().Graffiti()
|
||||
exec, err := blk.Block().Body().Execution()
|
||||
if err != nil {
|
||||
log.WithError(err)
|
||||
}
|
||||
startTime, err := slots.ToTime(genesisTime, blk.Block().Slot())
|
||||
logFields := logrus.Fields{
|
||||
"blockSlot": blk.Block().Slot(),
|
||||
"proposerIndex": blk.Block().ProposerIndex(),
|
||||
"graffiti": string(graffiti[:]),
|
||||
"extraData": string(exec.ExtraData()),
|
||||
}
|
||||
if err != nil {
|
||||
log.WithError(err).WithFields(logFields).Warn("Received block, could not report timing information.")
|
||||
|
||||
@@ -67,9 +67,11 @@ var InitsyncBlobSidecarRequirements = requirementList(GossipBlobSidecarRequireme
|
||||
RequireSidecarProposerExpected,
|
||||
)
|
||||
|
||||
// ELMemPoolRequirements is a list of verification requirements to be used when importing blobs and proof from
|
||||
// execution layer mempool. Only the KZG proof verification is required.
|
||||
var ELMemPoolRequirements = []Requirement{RequireSidecarKzgProofVerified}
|
||||
// ELMemPoolRequirements defines the verification requirements for importing blobs and proofs
|
||||
// from the execution layer's mempool. Currently, no requirements are enforced because it is
|
||||
// assumed that blobs and proofs from the execution layer are correctly formatted,
|
||||
// given the trusted relationship between the consensus layer and execution layer.
|
||||
var ELMemPoolRequirements []Requirement
|
||||
|
||||
// BackfillBlobSidecarRequirements is the same as InitsyncBlobSidecarRequirements.
|
||||
var BackfillBlobSidecarRequirements = requirementList(InitsyncBlobSidecarRequirements).excluding()
|
||||
|
||||
@@ -42,11 +42,13 @@ var downloadCmd = &cli.Command{
|
||||
},
|
||||
}
|
||||
|
||||
const stateSizeLimit int64 = 1 << 29 // 512MB to accommodate future state growth
|
||||
|
||||
func cliActionDownload(_ *cli.Context) error {
|
||||
ctx := context.Background()
|
||||
f := downloadFlags
|
||||
|
||||
opts := []client.ClientOpt{client.WithTimeout(f.Timeout)}
|
||||
opts := []client.ClientOpt{client.WithTimeout(f.Timeout), client.WithMaxBodySize(stateSizeLimit)}
|
||||
client, err := beacon.NewClient(downloadFlags.BeaconNodeHost, opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -8,6 +8,9 @@ import (
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
// MetadataV0
|
||||
// ----------
|
||||
|
||||
// MetadataV0 is a convenience wrapper around our metadata protobuf object.
|
||||
type MetadataV0 struct {
|
||||
md *pb.MetaDataV0
|
||||
@@ -28,6 +31,11 @@ func (m MetadataV0) AttnetsBitfield() bitfield.Bitvector64 {
|
||||
return m.md.Attnets
|
||||
}
|
||||
|
||||
// SyncnetsBitfield returns the bitfield stored in the metadata.
|
||||
func (m MetadataV0) SyncnetsBitfield() bitfield.Bitvector4 {
|
||||
return bitfield.Bitvector4{0}
|
||||
}
|
||||
|
||||
// InnerObject returns the underlying metadata protobuf structure.
|
||||
func (m MetadataV0) InnerObject() interface{} {
|
||||
return m.md
|
||||
@@ -74,16 +82,19 @@ func (m MetadataV0) MetadataObjV0() *pb.MetaDataV0 {
|
||||
|
||||
// MetadataObjV1 returns the inner metadata object in its type
|
||||
// specified form. If it doesn't exist then we return nothing.
|
||||
func (_ MetadataV0) MetadataObjV1() *pb.MetaDataV1 {
|
||||
func (MetadataV0) MetadataObjV1() *pb.MetaDataV1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Version returns the fork version of the underlying object.
|
||||
func (_ MetadataV0) Version() int {
|
||||
func (MetadataV0) Version() int {
|
||||
return version.Phase0
|
||||
}
|
||||
|
||||
// MetadataV1 is a convenience wrapper around our metadata v2 protobuf object.
|
||||
// MetadataV1
|
||||
// ----------
|
||||
|
||||
// MetadataV1 is a convenience wrapper around our metadata v1 protobuf object.
|
||||
type MetadataV1 struct {
|
||||
md *pb.MetaDataV1
|
||||
}
|
||||
@@ -103,6 +114,11 @@ func (m MetadataV1) AttnetsBitfield() bitfield.Bitvector64 {
|
||||
return m.md.Attnets
|
||||
}
|
||||
|
||||
// SyncnetsBitfield returns the bitfield stored in the metadata.
|
||||
func (m MetadataV1) SyncnetsBitfield() bitfield.Bitvector4 {
|
||||
return m.md.Syncnets
|
||||
}
|
||||
|
||||
// InnerObject returns the underlying metadata protobuf structure.
|
||||
func (m MetadataV1) InnerObject() interface{} {
|
||||
return m.md
|
||||
@@ -143,7 +159,7 @@ func (m MetadataV1) UnmarshalSSZ(buf []byte) error {
|
||||
|
||||
// MetadataObjV0 returns the inner metadata object in its type
|
||||
// specified form. If it doesn't exist then we return nothing.
|
||||
func (_ MetadataV1) MetadataObjV0() *pb.MetaDataV0 {
|
||||
func (MetadataV1) MetadataObjV0() *pb.MetaDataV0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -154,6 +170,6 @@ func (m MetadataV1) MetadataObjV1() *pb.MetaDataV1 {
|
||||
}
|
||||
|
||||
// Version returns the fork version of the underlying object.
|
||||
func (_ MetadataV1) Version() int {
|
||||
func (MetadataV1) Version() int {
|
||||
return version.Altair
|
||||
}
|
||||
|
||||
@@ -91,7 +91,7 @@ service BeaconChain {
|
||||
// that it was included in a block. The attestation may have expired.
|
||||
// Refer to the Ethereum Beacon Chain specification for more details on how
|
||||
// attestations are processed and when they are no longer valid.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/core/0_beacon-chain.md#attestations
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/beacon-chain.md#attestations
|
||||
rpc AttestationPool(AttestationPoolRequest) returns (AttestationPoolResponse) {
|
||||
option (google.api.http) = {
|
||||
get: "/eth/v1alpha1/beacon/attestations/pool"
|
||||
@@ -106,7 +106,7 @@ service BeaconChain {
|
||||
// that it was included in a block. The attestation may have expired.
|
||||
// Refer to the Ethereum Beacon Chain specification for more details on how
|
||||
// attestations are processed and when they are no longer valid.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/core/0_beacon-chain.md#attestations
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/beacon-chain.md#attestations
|
||||
rpc AttestationPoolElectra(AttestationPoolRequest) returns (AttestationPoolElectraResponse) {
|
||||
option (google.api.http) = {
|
||||
get: "/eth/v1alpha1/beacon/attestations/pool_electra"
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
type Metadata interface {
|
||||
SequenceNumber() uint64
|
||||
AttnetsBitfield() bitfield.Bitvector64
|
||||
SyncnetsBitfield() bitfield.Bitvector4
|
||||
InnerObject() interface{}
|
||||
IsNil() bool
|
||||
Copy() Metadata
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package version
|
||||
|
||||
import "github.com/pkg/errors"
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
Phase0 = iota
|
||||
|
||||
@@ -13,6 +13,7 @@ go_library(
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
@@ -32,6 +33,7 @@ go_test(
|
||||
deps = [
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//time:go_default_library",
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
mathutil "github.com/prysmaticlabs/prysm/v5/math"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
prysmTime "github.com/prysmaticlabs/prysm/v5/time"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@@ -81,6 +82,25 @@ func ToEpoch(slot primitives.Slot) primitives.Epoch {
|
||||
return primitives.Epoch(slot.DivSlot(params.BeaconConfig().SlotsPerEpoch))
|
||||
}
|
||||
|
||||
// ToForkVersion translates a slot into it's corresponding version.
|
||||
func ToForkVersion(slot primitives.Slot) int {
|
||||
epoch := ToEpoch(slot)
|
||||
switch {
|
||||
case epoch >= params.BeaconConfig().ElectraForkEpoch:
|
||||
return version.Electra
|
||||
case epoch >= params.BeaconConfig().DenebForkEpoch:
|
||||
return version.Deneb
|
||||
case epoch >= params.BeaconConfig().CapellaForkEpoch:
|
||||
return version.Capella
|
||||
case epoch >= params.BeaconConfig().BellatrixForkEpoch:
|
||||
return version.Bellatrix
|
||||
case epoch >= params.BeaconConfig().AltairForkEpoch:
|
||||
return version.Altair
|
||||
default:
|
||||
return version.Phase0
|
||||
}
|
||||
}
|
||||
|
||||
// EpochStart returns the first slot number of the
|
||||
// current epoch.
|
||||
//
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
prysmTime "github.com/prysmaticlabs/prysm/v5/time"
|
||||
@@ -632,3 +633,58 @@ func TestSecondsUntilNextEpochStart(t *testing.T) {
|
||||
require.Equal(t, true, IsEpochStart(currentSlot))
|
||||
|
||||
}
|
||||
|
||||
func TestToForkVersion(t *testing.T) {
|
||||
t.Run("Electra fork version", func(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.ElectraForkEpoch = 100
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
slot, err := EpochStart(params.BeaconConfig().ElectraForkEpoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
result := ToForkVersion(slot)
|
||||
require.Equal(t, version.Electra, result)
|
||||
})
|
||||
|
||||
t.Run("Deneb fork version", func(t *testing.T) {
|
||||
slot, err := EpochStart(params.BeaconConfig().DenebForkEpoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
result := ToForkVersion(slot)
|
||||
require.Equal(t, version.Deneb, result)
|
||||
})
|
||||
|
||||
t.Run("Capella fork version", func(t *testing.T) {
|
||||
slot, err := EpochStart(params.BeaconConfig().CapellaForkEpoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
result := ToForkVersion(slot)
|
||||
require.Equal(t, version.Capella, result)
|
||||
})
|
||||
|
||||
t.Run("Bellatrix fork version", func(t *testing.T) {
|
||||
slot, err := EpochStart(params.BeaconConfig().BellatrixForkEpoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
result := ToForkVersion(slot)
|
||||
require.Equal(t, version.Bellatrix, result)
|
||||
})
|
||||
|
||||
t.Run("Altair fork version", func(t *testing.T) {
|
||||
slot, err := EpochStart(params.BeaconConfig().AltairForkEpoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
result := ToForkVersion(slot)
|
||||
require.Equal(t, version.Altair, result)
|
||||
})
|
||||
|
||||
t.Run("Phase0 fork version", func(t *testing.T) {
|
||||
slot, err := EpochStart(params.BeaconConfig().AltairForkEpoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
result := ToForkVersion(slot - 1)
|
||||
require.Equal(t, version.Phase0, result)
|
||||
})
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user