Peer status peer scorer (#7480)

* define and enforce minimum scorer interface

* better decoupling of multiple scorers in service

* removes redundant weight

* adds peer_status scorer

* minir re-arrangement

* rely on scorer in peer status service

* gazelle

* updates rpc_status

* fix build

* better interface verifying

* remove unnecessary locks

* mark todo

* simplify service

* remove redundant references

* avoid passing contexts

* remove unused context

* refactor errors to p2p package

* refactor goodbye codes into p2p

* simplify status api

* remove isbad method from peers

* update scoring service

* introduce validation error

* gazelle

* add score

* restore isbad method

* resolve dep cycle

* gazelle

* peer status scorer: test score calculation

* bad responses scorer: bad peer score

* remove redundant type checks

* pass nil config

* add rounding

* test IsBadPeer

* test bad peers list

* more tests

* check validation error on non-existent peer

* max peer slot -> highest peer slot

* remove redundant comment

* combine

* combine

* introduce var

* fix tests

* remove redundant update

* minor fix

* Nishant's suggestion

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
This commit is contained in:
Victor Farazdagi
2020-11-18 18:51:42 +03:00
committed by GitHub
parent 59d63087b1
commit 095c4d5dd5
10 changed files with 423 additions and 44 deletions

View File

@@ -48,10 +48,11 @@ type PeerData struct {
Enr *enr.Record
NextValidTime time.Time
// Chain related data.
ChainState *pb.Status
MetaData *pb.MetaData
ChainStateLastUpdated time.Time
// Scorers related data.
MetaData *pb.MetaData
ChainState *pb.Status
ChainStateLastUpdated time.Time
ChainStateValidationError error
// Scorers internal data.
BadResponses int
ProcessedBlocks uint64
BlockProviderUpdated time.Time

View File

@@ -6,6 +6,7 @@ go_library(
srcs = [
"bad_responses.go",
"block_providers.go",
"peer_status.go",
"service.go",
],
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers/scorers",
@@ -13,6 +14,8 @@ go_library(
deps = [
"//beacon-chain/flags:go_default_library",
"//beacon-chain/p2p/peers/peerdata:go_default_library",
"//beacon-chain/p2p/types:go_default_library",
"//proto/beacon/p2p/v1:go_default_library",
"//shared/featureconfig:go_default_library",
"//shared/rand:go_default_library",
"//shared/timeutils:go_default_library",
@@ -25,6 +28,7 @@ go_test(
srcs = [
"bad_responses_test.go",
"block_providers_test.go",
"peer_status_test.go",
"scorers_test.go",
"service_test.go",
],
@@ -33,6 +37,8 @@ go_test(
"//beacon-chain/flags:go_default_library",
"//beacon-chain/p2p/peers:go_default_library",
"//beacon-chain/p2p/peers/peerdata:go_default_library",
"//beacon-chain/p2p/types:go_default_library",
"//proto/beacon/p2p/v1:go_default_library",
"//shared/featureconfig:go_default_library",
"//shared/rand:go_default_library",
"//shared/testutil/assert:go_default_library",

View File

@@ -58,6 +58,9 @@ func (s *BadResponsesScorer) Score(pid peer.ID) float64 {
// score is a lock-free version of Score.
func (s *BadResponsesScorer) score(pid peer.ID) float64 {
if s.isBadPeer(pid) {
return BadPeerScore
}
score := float64(0)
peerData, ok := s.store.PeerData(pid)
if !ok {

View File

@@ -0,0 +1,143 @@
package scorers
import (
"errors"
"math"
"github.com/libp2p/go-libp2p-core/peer"
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers/peerdata"
p2ptypes "github.com/prysmaticlabs/prysm/beacon-chain/p2p/types"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/timeutils"
)
var _ Scorer = (*PeerStatusScorer)(nil)
// PeerStatusScorer represents scorer that evaluates peers based on their statuses.
// Peer statuses are updated by regularly polling peers (see sync/rpc_status.go).
type PeerStatusScorer struct {
config *PeerStatusScorerConfig
store *peerdata.Store
ourHeadSlot uint64
highestPeerHeadSlot uint64
}
// PeerStatusScorerConfig holds configuration parameters for peer status scoring service.
type PeerStatusScorerConfig struct{}
// newPeerStatusScorer creates new peer status scoring service.
func newPeerStatusScorer(store *peerdata.Store, config *PeerStatusScorerConfig) *PeerStatusScorer {
if config == nil {
config = &PeerStatusScorerConfig{}
}
return &PeerStatusScorer{
config: config,
store: store,
}
}
// Score returns calculated peer score.
func (s *PeerStatusScorer) Score(pid peer.ID) float64 {
s.store.RLock()
defer s.store.RUnlock()
return s.score(pid)
}
// score is a lock-free version of Score.
func (s *PeerStatusScorer) score(pid peer.ID) float64 {
if s.isBadPeer(pid) {
return BadPeerScore
}
score := float64(0)
peerData, ok := s.store.PeerData(pid)
if !ok || peerData.ChainState == nil {
return score
}
if peerData.ChainState.HeadSlot < s.ourHeadSlot {
return score
}
// Calculate score as a ratio to the known maximum head slot.
// The closer the current peer's head slot to the maximum, the higher is the calculated score.
if s.highestPeerHeadSlot > 0 {
score = float64(peerData.ChainState.HeadSlot) / float64(s.highestPeerHeadSlot)
return math.Round(score*ScoreRoundingFactor) / ScoreRoundingFactor
}
return score
}
// IsBadPeer states if the peer is to be considered bad.
func (s *PeerStatusScorer) IsBadPeer(pid peer.ID) bool {
s.store.RLock()
defer s.store.RUnlock()
return s.isBadPeer(pid)
}
// isBadPeer is lock-free version of IsBadPeer.
func (s *PeerStatusScorer) isBadPeer(pid peer.ID) bool {
peerData, ok := s.store.PeerData(pid)
if !ok {
return false
}
// Mark peer as bad, if the latest error is one of the terminal ones.
terminalErrs := []error{
p2ptypes.ErrWrongForkDigestVersion,
}
for _, err := range terminalErrs {
if errors.Is(peerData.ChainStateValidationError, err) {
return true
}
}
return false
}
// BadPeers returns the peers that are considered bad.
func (s *PeerStatusScorer) BadPeers() []peer.ID {
s.store.RLock()
defer s.store.RUnlock()
badPeers := make([]peer.ID, 0)
for pid := range s.store.Peers() {
if s.isBadPeer(pid) {
badPeers = append(badPeers, pid)
}
}
return badPeers
}
// SetPeerStatus sets chain state data for a given peer.
func (s *PeerStatusScorer) SetPeerStatus(pid peer.ID, chainState *pb.Status, validationError error) {
s.store.Lock()
defer s.store.Unlock()
peerData := s.store.PeerDataGetOrCreate(pid)
peerData.ChainState = chainState
peerData.ChainStateLastUpdated = timeutils.Now()
peerData.ChainStateValidationError = validationError
// Update maximum known head slot (scores will be calculated with respect to that maximum value).
if chainState != nil && chainState.HeadSlot > s.highestPeerHeadSlot {
s.highestPeerHeadSlot = chainState.HeadSlot
}
}
// PeerStatus gets the chain state of the given remote peer.
// This can return nil if there is no known chain state for the peer.
// This will error if the peer does not exist.
func (s *PeerStatusScorer) PeerStatus(pid peer.ID) (*pb.Status, error) {
s.store.RLock()
defer s.store.RUnlock()
return s.peerStatus(pid)
}
// peerStatus lock-free version of PeerStatus.
func (s *PeerStatusScorer) peerStatus(pid peer.ID) (*pb.Status, error) {
if peerData, ok := s.store.PeerData(pid); ok {
return peerData.ChainState, nil
}
return nil, peerdata.ErrPeerUnknown
}
// SetHeadSlot updates known head slot.
func (s *PeerStatusScorer) SetHeadSlot(slot uint64) {
s.ourHeadSlot = slot
}

View File

@@ -0,0 +1,197 @@
package scorers_test
import (
"context"
"testing"
"github.com/libp2p/go-libp2p-core/peer"
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers"
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers/peerdata"
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers/scorers"
p2ptypes "github.com/prysmaticlabs/prysm/beacon-chain/p2p/types"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
)
func TestScorers_PeerStatus_Score(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tests := []struct {
name string
update func(scorer *scorers.PeerStatusScorer)
check func(scorer *scorers.PeerStatusScorer)
}{
{
name: "nonexistent peer",
update: func(scorer *scorers.PeerStatusScorer) {
scorer.SetHeadSlot(64)
},
check: func(scorer *scorers.PeerStatusScorer) {
assert.Equal(t, 0.0, scorer.Score("peer1"), "Unexpected score")
},
},
{
name: "existent bad peer",
update: func(scorer *scorers.PeerStatusScorer) {
scorer.SetHeadSlot(0)
scorer.SetPeerStatus("peer1", &pb.Status{
HeadRoot: make([]byte, 32),
HeadSlot: 64,
}, p2ptypes.ErrWrongForkDigestVersion)
},
check: func(scorer *scorers.PeerStatusScorer) {
assert.Equal(t, scorers.BadPeerScore, scorer.Score("peer1"), "Unexpected score")
},
},
{
name: "existent peer no head slot for the host node is known",
update: func(scorer *scorers.PeerStatusScorer) {
scorer.SetHeadSlot(0)
scorer.SetPeerStatus("peer1", &pb.Status{
HeadRoot: make([]byte, 32),
HeadSlot: 64,
}, nil)
},
check: func(scorer *scorers.PeerStatusScorer) {
assert.Equal(t, 1.0, scorer.Score("peer1"), "Unexpected score")
},
},
{
name: "existent peer head is before ours",
update: func(scorer *scorers.PeerStatusScorer) {
scorer.SetHeadSlot(128)
scorer.SetPeerStatus("peer1", &pb.Status{
HeadRoot: make([]byte, 32),
HeadSlot: 64,
}, nil)
},
check: func(scorer *scorers.PeerStatusScorer) {
assert.Equal(t, 0.0, scorer.Score("peer1"), "Unexpected score")
},
},
{
name: "existent peer partial score",
update: func(scorer *scorers.PeerStatusScorer) {
headSlot := uint64(128)
scorer.SetHeadSlot(headSlot)
scorer.SetPeerStatus("peer1", &pb.Status{
HeadRoot: make([]byte, 32),
HeadSlot: headSlot + 64,
}, nil)
// Set another peer to a higher score.
scorer.SetPeerStatus("peer2", &pb.Status{
HeadRoot: make([]byte, 32),
HeadSlot: headSlot + 128,
}, nil)
},
check: func(scorer *scorers.PeerStatusScorer) {
headSlot := uint64(128)
assert.Equal(t, float64(headSlot+64)/float64(headSlot+128), scorer.Score("peer1"), "Unexpected score")
},
},
{
name: "existent peer full score",
update: func(scorer *scorers.PeerStatusScorer) {
headSlot := uint64(128)
scorer.SetHeadSlot(headSlot)
scorer.SetPeerStatus("peer1", &pb.Status{
HeadRoot: make([]byte, 32),
HeadSlot: headSlot + 64,
}, nil)
},
check: func(scorer *scorers.PeerStatusScorer) {
assert.Equal(t, 1.0, scorer.Score("peer1"), "Unexpected score")
},
},
{
name: "existent peer no max known slot",
update: func(scorer *scorers.PeerStatusScorer) {
scorer.SetHeadSlot(0)
scorer.SetPeerStatus("peer1", &pb.Status{
HeadRoot: make([]byte, 32),
HeadSlot: 0,
}, nil)
},
check: func(scorer *scorers.PeerStatusScorer) {
assert.Equal(t, 0.0, scorer.Score("peer1"), "Unexpected score")
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
ScorerParams: &scorers.Config{},
})
scorer := peerStatuses.Scorers().PeerStatusScorer()
if tt.update != nil {
tt.update(scorer)
}
tt.check(scorer)
})
}
}
func TestScorers_PeerStatus_IsBadPeer(t *testing.T) {
peerStatuses := peers.NewStatus(context.Background(), &peers.StatusConfig{
ScorerParams: &scorers.Config{},
})
pid := peer.ID("peer1")
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer(pid))
assert.Equal(t, false, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid))
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid, &pb.Status{}, p2ptypes.ErrWrongForkDigestVersion)
assert.Equal(t, true, peerStatuses.Scorers().IsBadPeer(pid))
assert.Equal(t, true, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid))
}
func TestScorers_PeerStatus_BadPeers(t *testing.T) {
peerStatuses := peers.NewStatus(context.Background(), &peers.StatusConfig{
ScorerParams: &scorers.Config{},
})
pid1 := peer.ID("peer1")
pid2 := peer.ID("peer2")
pid3 := peer.ID("peer3")
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer(pid1))
assert.Equal(t, false, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid1))
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer(pid2))
assert.Equal(t, false, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid2))
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer(pid3))
assert.Equal(t, false, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid3))
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid1, &pb.Status{}, p2ptypes.ErrWrongForkDigestVersion)
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid2, &pb.Status{}, nil)
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid3, &pb.Status{}, p2ptypes.ErrWrongForkDigestVersion)
assert.Equal(t, true, peerStatuses.Scorers().IsBadPeer(pid1))
assert.Equal(t, true, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid1))
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer(pid2))
assert.Equal(t, false, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid2))
assert.Equal(t, true, peerStatuses.Scorers().IsBadPeer(pid3))
assert.Equal(t, true, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid3))
assert.Equal(t, 2, len(peerStatuses.Scorers().PeerStatusScorer().BadPeers()))
assert.Equal(t, 2, len(peerStatuses.Scorers().BadPeers()))
}
func TestScorers_PeerStatus_PeerStatus(t *testing.T) {
peerStatuses := peers.NewStatus(context.Background(), &peers.StatusConfig{
ScorerParams: &scorers.Config{},
})
status, err := peerStatuses.Scorers().PeerStatusScorer().PeerStatus("peer1")
require.ErrorContains(t, peerdata.ErrPeerUnknown.Error(), err)
assert.Equal(t, (*pb.Status)(nil), status)
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus("peer1", &pb.Status{
HeadSlot: 128,
}, nil)
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus("peer2", &pb.Status{
HeadSlot: 128,
}, p2ptypes.ErrInvalidEpoch)
status, err = peerStatuses.Scorers().PeerStatusScorer().PeerStatus("peer1")
require.NoError(t, err)
assert.Equal(t, uint64(128), status.HeadSlot)
assert.Equal(t, nil, peerStatuses.Scorers().ValidationError("peer1"))
assert.ErrorContains(t, p2ptypes.ErrInvalidEpoch.Error(), peerStatuses.Scorers().ValidationError("peer2"))
assert.Equal(t, nil, peerStatuses.Scorers().ValidationError("peer3"))
}

View File

@@ -15,6 +15,9 @@ var _ Scorer = (*Service)(nil)
// This parameter is used in math.Round(score*ScoreRoundingFactor) / ScoreRoundingFactor.
const ScoreRoundingFactor = 10000
// BadPeerScore defines score that is returned for a bad peer (all other metrics are ignored).
const BadPeerScore = -1.00
// Scorer defines minimum set of methods every peer scorer must expose.
type Scorer interface {
Score(pid peer.ID) float64
@@ -28,6 +31,7 @@ type Service struct {
scorers struct {
badResponsesScorer *BadResponsesScorer
blockProviderScorer *BlockProviderScorer
peerStatusScorer *PeerStatusScorer
}
weights map[Scorer]float64
totalWeight float64
@@ -37,6 +41,7 @@ type Service struct {
type Config struct {
BadResponsesScorerConfig *BadResponsesScorerConfig
BlockProviderScorerConfig *BlockProviderScorerConfig
PeerStatusScorerConfig *PeerStatusScorerConfig
}
// NewService provides fully initialized peer scoring service.
@@ -51,6 +56,8 @@ func NewService(ctx context.Context, store *peerdata.Store, config *Config) *Ser
s.setScorerWeight(s.scorers.badResponsesScorer, 1.0)
s.scorers.blockProviderScorer = newBlockProviderScorer(store, config.BlockProviderScorerConfig)
s.setScorerWeight(s.scorers.blockProviderScorer, 1.0)
s.scorers.peerStatusScorer = newPeerStatusScorer(store, config.PeerStatusScorerConfig)
s.setScorerWeight(s.scorers.peerStatusScorer, 0.0)
// Start background tasks.
go s.loop(ctx)
@@ -68,6 +75,11 @@ func (s *Service) BlockProviderScorer() *BlockProviderScorer {
return s.scorers.blockProviderScorer
}
// PeerStatusScorer exposes peer chain status scoring service.
func (s *Service) PeerStatusScorer() *PeerStatusScorer {
return s.scorers.peerStatusScorer
}
// ActiveScorersCount returns number of scorers that can affect score (have non-zero weight).
func (s *Service) ActiveScorersCount() int {
cnt := 0
@@ -90,6 +102,7 @@ func (s *Service) Score(pid peer.ID) float64 {
}
score += s.scorers.badResponsesScorer.score(pid) * s.scorerWeight(s.scorers.badResponsesScorer)
score += s.scorers.blockProviderScorer.score(pid) * s.scorerWeight(s.scorers.blockProviderScorer)
score += s.scorers.peerStatusScorer.score(pid) * s.scorerWeight(s.scorers.peerStatusScorer)
return math.Round(score*ScoreRoundingFactor) / ScoreRoundingFactor
}
@@ -102,7 +115,13 @@ func (s *Service) IsBadPeer(pid peer.ID) bool {
// isBadPeer is a lock-free version of isBadPeer.
func (s *Service) isBadPeer(pid peer.ID) bool {
return s.scorers.badResponsesScorer.isBadPeer(pid)
if s.scorers.badResponsesScorer.isBadPeer(pid) {
return true
}
if s.scorers.peerStatusScorer.isBadPeer(pid) {
return true
}
return false
}
// BadPeers returns the peers that are considered bad by any of registered scorers.
@@ -119,6 +138,19 @@ func (s *Service) BadPeers() []peer.ID {
return badPeers
}
// ValidationError returns peer data validation error, which potentially provides more information
// why peer is considered bad.
func (s *Service) ValidationError(pid peer.ID) error {
s.store.RLock()
defer s.store.RUnlock()
peerData, ok := s.store.PeerData(pid)
if !ok {
return nil
}
return peerData.ChainStateValidationError
}
// loop handles background tasks.
func (s *Service) loop(ctx context.Context) {
decayBadResponsesStats := time.NewTicker(s.scorers.badResponsesScorer.Params().DecayInterval)

View File

@@ -156,25 +156,14 @@ func (p *Status) ENR(pid peer.ID) (*enr.Record, error) {
// SetChainState sets the chain state of the given remote peer.
func (p *Status) SetChainState(pid peer.ID, chainState *pb.Status) {
p.store.Lock()
defer p.store.Unlock()
peerData := p.store.PeerDataGetOrCreate(pid)
peerData.ChainState = chainState
peerData.ChainStateLastUpdated = timeutils.Now()
p.scorers.PeerStatusScorer().SetPeerStatus(pid, chainState, nil)
}
// ChainState gets the chain state of the given remote peer.
// This can return nil if there is no known chain state for the peer.
// This will error if the peer does not exist.
func (p *Status) ChainState(pid peer.ID) (*pb.Status, error) {
p.store.RLock()
defer p.store.RUnlock()
if peerData, ok := p.store.PeerData(pid); ok {
return peerData.ChainState, nil
}
return nil, peerdata.ErrPeerUnknown
return p.scorers.PeerStatusScorer().PeerStatus(pid)
}
// IsActive checks if a peers is active and returns the result appropriately.
@@ -277,7 +266,7 @@ func (p *Status) ChainStateLastUpdated(pid peer.ID) (time.Time, error) {
return timeutils.Now(), peerdata.ErrPeerUnknown
}
// IsBad states if the peer is to be considered bad.
// IsBad states if the peer is to be considered bad (by *any* of the registered scorers).
// If the peer is unknown this will return `false`, which makes using this function easier than returning an error.
func (p *Status) IsBad(pid peer.ID) bool {
return p.scorers.IsBadPeer(pid)

View File

@@ -55,6 +55,18 @@ func (s *Service) goodbyeRPCHandler(_ context.Context, msg interface{}, stream l
return s.p2p.Disconnect(stream.Conn().RemotePeer())
}
// disconnectBadPeer checks whether peer is considered bad by some scorer, and tries to disconnect
// the peer, if that is the case. Additionally, disconnection reason is obtained from scorer.
func (s *Service) disconnectBadPeer(ctx context.Context, id peer.ID) {
if !s.p2p.Peers().IsBad(id) {
return
}
goodbyeCode := types.ErrToGoodbyeCode(s.p2p.Peers().Scorers().ValidationError(id))
if err := s.sendGoodByeAndDisconnect(ctx, goodbyeCode, id); err != nil {
log.Debugf("Error when disconnecting with bad peer: %v", err)
}
}
// A custom goodbye method that is used by our connection handler, in the
// event we receive bad peers.
func (s *Service) sendGoodbye(ctx context.Context, id peer.ID) error {

View File

@@ -42,10 +42,9 @@ func (s *Service) maintainPeerStatuses() {
s.p2p.Peers().SetConnectionState(id, peers.PeerDisconnected)
return
}
// Disconnect from peers that are considered bad by any of the registered scorers.
if s.p2p.Peers().IsBad(id) {
if err := s.sendGoodByeAndDisconnect(s.ctx, p2ptypes.GoodbyeCodeGenericError, id); err != nil {
log.Debugf("Error when disconnecting with bad peer: %v", err)
}
s.disconnectBadPeer(s.ctx, id)
return
}
// If the status hasn't been updated in the recent interval time.
@@ -65,8 +64,8 @@ func (s *Service) maintainPeerStatuses() {
})
}
// resyncIfBehind checks periodically to see if we are in normal sync but have fallen behind our peers by more than an epoch,
// in which case we attempt a resync using the initial sync method to catch up.
// resyncIfBehind checks periodically to see if we are in normal sync but have fallen behind our peers
// by more than an epoch, in which case we attempt a resync using the initial sync method to catch up.
func (s *Service) resyncIfBehind() {
millisecondsPerEpoch := params.BeaconConfig().SecondsPerSlot * params.BeaconConfig().SlotsPerEpoch * 1000
// Run sixteen times per epoch.
@@ -143,7 +142,7 @@ func (s *Service) sendRPCStatusRequest(ctx context.Context, id peer.ID) error {
}
if code != 0 {
s.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer())
s.p2p.Peers().Scorers().BadResponsesScorer().Increment(id)
return errors.New(errMsg)
}
@@ -151,22 +150,18 @@ func (s *Service) sendRPCStatusRequest(ctx context.Context, id peer.ID) error {
if err := s.p2p.Encoding().DecodeWithMaxLength(stream, msg); err != nil {
return err
}
s.p2p.Peers().SetChainState(stream.Conn().RemotePeer(), msg)
// If validation fails, validation error is logged, and peer status scorer will mark peer as bad.
err = s.validateStatusMessage(ctx, msg)
if err != nil {
s.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer())
// Disconnect if on a wrong fork.
if errors.Is(err, p2ptypes.ErrWrongForkDigestVersion) {
if err := s.sendGoodByeAndDisconnect(ctx, p2ptypes.GoodbyeCodeWrongNetwork, stream.Conn().RemotePeer()); err != nil {
return err
}
}
s.p2p.Peers().Scorers().PeerStatusScorer().SetPeerStatus(id, msg, err)
if s.p2p.Peers().IsBad(id) {
s.disconnectBadPeer(s.ctx, id)
}
return err
}
func (s *Service) reValidatePeer(ctx context.Context, id peer.ID) error {
s.p2p.Peers().Scorers().PeerStatusScorer().SetHeadSlot(s.chain.HeadSlot())
if err := s.sendRPCStatusRequest(ctx, id); err != nil {
return err
}
@@ -198,10 +193,12 @@ func (s *Service) statusRPCHandler(ctx context.Context, msg interface{}, stream
}
s.rateLimiter.add(stream, 1)
remotePeer := stream.Conn().RemotePeer()
if err := s.validateStatusMessage(ctx, m); err != nil {
log.WithFields(logrus.Fields{
"peer": stream.Conn().RemotePeer(),
"error": err}).Debug("Invalid status message from peer")
"peer": remotePeer,
"error": err,
}).Debug("Invalid status message from peer")
respCode := byte(0)
switch err {
@@ -209,20 +206,20 @@ func (s *Service) statusRPCHandler(ctx context.Context, msg interface{}, stream
respCode = responseCodeServerError
case p2ptypes.ErrWrongForkDigestVersion:
// Respond with our status and disconnect with the peer.
s.p2p.Peers().SetChainState(stream.Conn().RemotePeer(), m)
s.p2p.Peers().SetChainState(remotePeer, m)
if err := s.respondWithStatus(ctx, stream); err != nil {
return err
}
if err := stream.Close(); err != nil { // Close before disconnecting.
log.WithError(err).Debug("Failed to close stream")
}
if err := s.sendGoodByeAndDisconnect(ctx, p2ptypes.GoodbyeCodeWrongNetwork, stream.Conn().RemotePeer()); err != nil {
if err := s.sendGoodByeAndDisconnect(ctx, p2ptypes.GoodbyeCodeWrongNetwork, remotePeer); err != nil {
return err
}
return nil
default:
respCode = responseCodeInvalidRequest
s.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer())
s.p2p.Peers().Scorers().BadResponsesScorer().Increment(remotePeer)
}
originalErr := err
@@ -236,12 +233,12 @@ func (s *Service) statusRPCHandler(ctx context.Context, msg interface{}, stream
if err := stream.Close(); err != nil { // Close before disconnecting.
log.WithError(err).Debug("Failed to close stream")
}
if err := s.sendGoodByeAndDisconnect(ctx, p2ptypes.GoodbyeCodeGenericError, stream.Conn().RemotePeer()); err != nil {
if err := s.sendGoodByeAndDisconnect(ctx, p2ptypes.GoodbyeCodeGenericError, remotePeer); err != nil {
return err
}
return originalErr
}
s.p2p.Peers().SetChainState(stream.Conn().RemotePeer(), m)
s.p2p.Peers().SetChainState(remotePeer, m)
return s.respondWithStatus(ctx, stream)
}

View File

@@ -764,6 +764,7 @@ func TestStatusRPCRequest_BadPeerHandshake(t *testing.T) {
assert.NoError(t, err)
})
assert.Equal(t, false, p1.Peers().Scorers().IsBadPeer(p2.PeerID()), "Peer is marked as bad")
p1.Connect(p2)
if testutil.WaitTimeout(&wg, time.Second) {
@@ -775,9 +776,7 @@ func TestStatusRPCRequest_BadPeerHandshake(t *testing.T) {
require.NoError(t, err, "Failed to obtain peer connection state")
assert.Equal(t, peers.PeerDisconnected, connectionState, "Expected peer to be disconnected")
badResponses, err := p1.Peers().Scorers().BadResponsesScorer().Count(p2.PeerID())
require.NoError(t, err, "Failed to obtain peer connection state")
assert.Equal(t, 1, badResponses, "Bad response was not bumped to one")
assert.Equal(t, true, p1.Peers().Scorers().IsBadPeer(p2.PeerID()), "Peer is not marked as bad")
}
func TestStatusRPC_ValidGenesisMessage(t *testing.T) {