mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 05:47:59 -05:00
Compare commits
9 Commits
peerDAS-20
...
refactor-r
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fd37e6fe13 | ||
|
|
1707cf3ec7 | ||
|
|
bdbb850250 | ||
|
|
b28b1ed6ce | ||
|
|
74bb0821a8 | ||
|
|
8025a483e2 | ||
|
|
0475631543 | ||
|
|
f27092fa91 | ||
|
|
67cef41cbf |
@@ -27,6 +27,9 @@ The format is based on Keep a Changelog, and this project adheres to Semantic Ve
|
||||
- Added validator index label to `validator_statuses` metric.
|
||||
- Added Validator REST mode use of Attestation V2 endpoints and Electra attestations.
|
||||
- PeerDAS: Added proto for `DataColumnIdentifier`, `DataColumnSidecar`, `DataColumnSidecarsByRangeRequest` and `MetadataV2`.
|
||||
- Better attestation packing for Electra. [PR](https://github.com/prysmaticlabs/prysm/pull/14534)
|
||||
- P2P: Add logs when a peer is (dis)connected. Add the reason of the disconnection when we initiate it.
|
||||
- Added a Prometheus error counter metric for HTTP requests to track beacon node requests.
|
||||
|
||||
### Changed
|
||||
|
||||
@@ -61,6 +64,9 @@ The format is based on Keep a Changelog, and this project adheres to Semantic Ve
|
||||
- Updated light client protobufs. [PR](https://github.com/prysmaticlabs/prysm/pull/14650)
|
||||
- Added `Eth-Consensus-Version` header to `ListAttestationsV2` and `GetAggregateAttestationV2` endpoints.
|
||||
- Updated light client consensus types. [PR](https://github.com/prysmaticlabs/prysm/pull/14652)
|
||||
- Fixed pending deposits processing on Electra.
|
||||
- Modified `ListAttestationsV2`, `GetAttesterSlashingsV2` and `GetAggregateAttestationV2` endpoints to use slot to determine fork version.
|
||||
- Improvements to HTTP response handling. [pr](https://github.com/prysmaticlabs/prysm/pull/14673)
|
||||
|
||||
### Deprecated
|
||||
|
||||
@@ -71,6 +77,7 @@ The format is based on Keep a Changelog, and this project adheres to Semantic Ve
|
||||
- Removed finalized validator index cache, no longer needed.
|
||||
- Removed validator queue position log on key reload and wait for activation.
|
||||
- Removed outdated spectest exclusions for EIP-6110.
|
||||
- Removed kzg proof check from blob reconstructor.
|
||||
|
||||
### Fixed
|
||||
|
||||
|
||||
@@ -12,6 +12,7 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//api/client:go_default_library",
|
||||
"//api/server/structs:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//consensus-types:go_default_library",
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/api"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/client"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
@@ -176,7 +177,7 @@ func (c *Client) do(ctx context.Context, method string, path string, body io.Rea
|
||||
err = non200Err(r)
|
||||
return
|
||||
}
|
||||
res, err = io.ReadAll(r.Body)
|
||||
res, err = io.ReadAll(io.LimitReader(r.Body, client.MaxBodySize))
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, "error reading http response body from builder server")
|
||||
return
|
||||
@@ -358,7 +359,7 @@ func (c *Client) Status(ctx context.Context) error {
|
||||
}
|
||||
|
||||
func non200Err(response *http.Response) error {
|
||||
bodyBytes, err := io.ReadAll(response.Body)
|
||||
bodyBytes, err := io.ReadAll(io.LimitReader(response.Body, client.MaxErrBodySize))
|
||||
var errMessage ErrorMessage
|
||||
var body string
|
||||
if err != nil {
|
||||
|
||||
@@ -10,11 +10,17 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
MaxBodySize int64 = 1 << 23 // 8MB default, WithMaxBodySize can override
|
||||
MaxErrBodySize int64 = 1 << 17 // 128KB
|
||||
)
|
||||
|
||||
// Client is a wrapper object around the HTTP client.
|
||||
type Client struct {
|
||||
hc *http.Client
|
||||
baseURL *url.URL
|
||||
token string
|
||||
hc *http.Client
|
||||
baseURL *url.URL
|
||||
token string
|
||||
maxBodySize int64
|
||||
}
|
||||
|
||||
// NewClient constructs a new client with the provided options (ex WithTimeout).
|
||||
@@ -26,8 +32,9 @@ func NewClient(host string, opts ...ClientOpt) (*Client, error) {
|
||||
return nil, err
|
||||
}
|
||||
c := &Client{
|
||||
hc: &http.Client{},
|
||||
baseURL: u,
|
||||
hc: &http.Client{},
|
||||
baseURL: u,
|
||||
maxBodySize: MaxBodySize,
|
||||
}
|
||||
for _, o := range opts {
|
||||
o(c)
|
||||
@@ -72,7 +79,7 @@ func (c *Client) NodeURL() string {
|
||||
// Get is a generic, opinionated GET function to reduce boilerplate amongst the getters in this package.
|
||||
func (c *Client) Get(ctx context.Context, path string, opts ...ReqOption) ([]byte, error) {
|
||||
u := c.baseURL.ResolveReference(&url.URL{Path: path})
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), http.NoBody)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -89,7 +96,7 @@ func (c *Client) Get(ctx context.Context, path string, opts ...ReqOption) ([]byt
|
||||
if r.StatusCode != http.StatusOK {
|
||||
return nil, Non200Err(r)
|
||||
}
|
||||
b, err := io.ReadAll(r.Body)
|
||||
b, err := io.ReadAll(io.LimitReader(r.Body, c.maxBodySize))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error reading http response body")
|
||||
}
|
||||
|
||||
@@ -25,16 +25,16 @@ var ErrInvalidNodeVersion = errors.New("invalid node version response")
|
||||
var ErrConnectionIssue = errors.New("could not connect")
|
||||
|
||||
// Non200Err is a function that parses an HTTP response to handle responses that are not 200 with a formatted error.
|
||||
func Non200Err(response *http.Response) error {
|
||||
bodyBytes, err := io.ReadAll(response.Body)
|
||||
func Non200Err(r *http.Response) error {
|
||||
b, err := io.ReadAll(io.LimitReader(r.Body, MaxErrBodySize))
|
||||
var body string
|
||||
if err != nil {
|
||||
body = "(Unable to read response body.)"
|
||||
} else {
|
||||
body = "response body:\n" + string(bodyBytes)
|
||||
body = "response body:\n" + string(b)
|
||||
}
|
||||
msg := fmt.Sprintf("code=%d, url=%s, body=%s", response.StatusCode, response.Request.URL, body)
|
||||
switch response.StatusCode {
|
||||
msg := fmt.Sprintf("code=%d, url=%s, body=%s", r.StatusCode, r.Request.URL, body)
|
||||
switch r.StatusCode {
|
||||
case http.StatusNotFound:
|
||||
return errors.Wrap(ErrNotFound, msg)
|
||||
default:
|
||||
|
||||
@@ -46,3 +46,10 @@ func WithAuthenticationToken(token string) ClientOpt {
|
||||
c.token = token
|
||||
}
|
||||
}
|
||||
|
||||
// WithMaxBodySize overrides the default max body size of 8MB.
|
||||
func WithMaxBodySize(size int64) ClientOpt {
|
||||
return func(c *Client) {
|
||||
c.maxBodySize = size
|
||||
}
|
||||
}
|
||||
|
||||
@@ -386,8 +386,14 @@ func batchProcessNewPendingDeposits(ctx context.Context, state state.BeaconState
|
||||
return errors.Wrap(err, "batch signature verification failed")
|
||||
}
|
||||
|
||||
pubKeyMap := make(map[[48]byte]struct{}, len(pendingDeposits))
|
||||
|
||||
// Process each deposit individually
|
||||
for _, pendingDeposit := range pendingDeposits {
|
||||
_, found := pubKeyMap[bytesutil.ToBytes48(pendingDeposit.PublicKey)]
|
||||
if !found {
|
||||
pubKeyMap[bytesutil.ToBytes48(pendingDeposit.PublicKey)] = struct{}{}
|
||||
}
|
||||
validSignature := allSignaturesVerified
|
||||
|
||||
// If batch verification failed, check the individual deposit signature
|
||||
@@ -405,9 +411,16 @@ func batchProcessNewPendingDeposits(ctx context.Context, state state.BeaconState
|
||||
|
||||
// Add validator to the registry if the signature is valid
|
||||
if validSignature {
|
||||
err = AddValidatorToRegistry(state, pendingDeposit.PublicKey, pendingDeposit.WithdrawalCredentials, pendingDeposit.Amount)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to add validator to registry")
|
||||
if found {
|
||||
index, _ := state.ValidatorIndexByPubkey(bytesutil.ToBytes48(pendingDeposit.PublicKey))
|
||||
if err := helpers.IncreaseBalance(state, index, pendingDeposit.Amount); err != nil {
|
||||
return errors.Wrap(err, "could not increase balance")
|
||||
}
|
||||
} else {
|
||||
err = AddValidatorToRegistry(state, pendingDeposit.PublicKey, pendingDeposit.WithdrawalCredentials, pendingDeposit.Amount)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to add validator to registry")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,6 +22,40 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
)
|
||||
|
||||
func TestProcessPendingDepositsMultiplesSameDeposits(t *testing.T) {
|
||||
st := stateWithActiveBalanceETH(t, 1000)
|
||||
deps := make([]*eth.PendingDeposit, 2) // Make same deposit twice
|
||||
validators := st.Validators()
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
for i := 0; i < len(deps); i += 1 {
|
||||
wc := make([]byte, 32)
|
||||
wc[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
wc[31] = byte(i)
|
||||
validators[i].PublicKey = sk.PublicKey().Marshal()
|
||||
validators[i].WithdrawalCredentials = wc
|
||||
deps[i] = stateTesting.GeneratePendingDeposit(t, sk, 32, bytesutil.ToBytes32(wc), 0)
|
||||
}
|
||||
require.NoError(t, st.SetPendingDeposits(deps))
|
||||
|
||||
err = electra.ProcessPendingDeposits(context.TODO(), st, 10000)
|
||||
require.NoError(t, err)
|
||||
|
||||
val := st.Validators()
|
||||
seenPubkeys := make(map[string]struct{})
|
||||
for i := 0; i < len(val); i += 1 {
|
||||
if len(val[i].PublicKey) == 0 {
|
||||
continue
|
||||
}
|
||||
_, ok := seenPubkeys[string(val[i].PublicKey)]
|
||||
if ok {
|
||||
t.Fatalf("duplicated pubkeys")
|
||||
} else {
|
||||
seenPubkeys[string(val[i].PublicKey)] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessPendingDeposits(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -285,7 +319,7 @@ func TestBatchProcessNewPendingDeposits(t *testing.T) {
|
||||
wc[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
wc[31] = byte(0)
|
||||
validDep := stateTesting.GeneratePendingDeposit(t, sk, params.BeaconConfig().MinActivationBalance, bytesutil.ToBytes32(wc), 0)
|
||||
invalidDep := ð.PendingDeposit{}
|
||||
invalidDep := ð.PendingDeposit{PublicKey: make([]byte, 48)}
|
||||
// have a combination of valid and invalid deposits
|
||||
deps := []*eth.PendingDeposit{validDep, invalidDep}
|
||||
require.NoError(t, electra.BatchProcessNewPendingDeposits(context.Background(), st, deps))
|
||||
|
||||
@@ -623,13 +623,7 @@ func (s *Service) ReconstructBlobSidecars(ctx context.Context, block interfaces.
|
||||
continue
|
||||
}
|
||||
|
||||
// Verify the sidecar KZG proof
|
||||
v := s.blobVerifier(roBlob, verification.ELMemPoolRequirements)
|
||||
if err := v.SidecarKzgProofVerified(); err != nil {
|
||||
log.WithError(err).WithField("index", i).Error("failed to verify KZG proof for sidecar")
|
||||
continue
|
||||
}
|
||||
|
||||
verifiedBlob, err := v.VerifiedROBlob()
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("index", i).Error("failed to verify RO blob")
|
||||
|
||||
@@ -7,6 +7,7 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/attestations/mock",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
],
|
||||
|
||||
@@ -3,13 +3,17 @@ package mock
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
var _ attestations.Pool = &PoolMock{}
|
||||
|
||||
// PoolMock --
|
||||
type PoolMock struct {
|
||||
AggregatedAtts []*ethpb.Attestation
|
||||
AggregatedAtts []ethpb.Att
|
||||
UnaggregatedAtts []ethpb.Att
|
||||
}
|
||||
|
||||
// AggregateUnaggregatedAttestations --
|
||||
@@ -23,18 +27,18 @@ func (*PoolMock) AggregateUnaggregatedAttestationsBySlotIndex(_ context.Context,
|
||||
}
|
||||
|
||||
// SaveAggregatedAttestation --
|
||||
func (*PoolMock) SaveAggregatedAttestation(_ *ethpb.Attestation) error {
|
||||
func (*PoolMock) SaveAggregatedAttestation(_ ethpb.Att) error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
// SaveAggregatedAttestations --
|
||||
func (m *PoolMock) SaveAggregatedAttestations(atts []*ethpb.Attestation) error {
|
||||
func (m *PoolMock) SaveAggregatedAttestations(atts []ethpb.Att) error {
|
||||
m.AggregatedAtts = append(m.AggregatedAtts, atts...)
|
||||
return nil
|
||||
}
|
||||
|
||||
// AggregatedAttestations --
|
||||
func (m *PoolMock) AggregatedAttestations() []*ethpb.Attestation {
|
||||
func (m *PoolMock) AggregatedAttestations() []ethpb.Att {
|
||||
return m.AggregatedAtts
|
||||
}
|
||||
|
||||
@@ -43,13 +47,18 @@ func (*PoolMock) AggregatedAttestationsBySlotIndex(_ context.Context, _ primitiv
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
// AggregatedAttestationsBySlotIndexElectra --
|
||||
func (*PoolMock) AggregatedAttestationsBySlotIndexElectra(_ context.Context, _ primitives.Slot, _ primitives.CommitteeIndex) []*ethpb.AttestationElectra {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
// DeleteAggregatedAttestation --
|
||||
func (*PoolMock) DeleteAggregatedAttestation(_ *ethpb.Attestation) error {
|
||||
func (*PoolMock) DeleteAggregatedAttestation(_ ethpb.Att) error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
// HasAggregatedAttestation --
|
||||
func (*PoolMock) HasAggregatedAttestation(_ *ethpb.Attestation) (bool, error) {
|
||||
func (*PoolMock) HasAggregatedAttestation(_ ethpb.Att) (bool, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
@@ -59,18 +68,19 @@ func (*PoolMock) AggregatedAttestationCount() int {
|
||||
}
|
||||
|
||||
// SaveUnaggregatedAttestation --
|
||||
func (*PoolMock) SaveUnaggregatedAttestation(_ *ethpb.Attestation) error {
|
||||
func (*PoolMock) SaveUnaggregatedAttestation(_ ethpb.Att) error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
// SaveUnaggregatedAttestations --
|
||||
func (*PoolMock) SaveUnaggregatedAttestations(_ []*ethpb.Attestation) error {
|
||||
panic("implement me")
|
||||
func (m *PoolMock) SaveUnaggregatedAttestations(atts []ethpb.Att) error {
|
||||
m.UnaggregatedAtts = append(m.UnaggregatedAtts, atts...)
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnaggregatedAttestations --
|
||||
func (*PoolMock) UnaggregatedAttestations() ([]*ethpb.Attestation, error) {
|
||||
panic("implement me")
|
||||
func (m *PoolMock) UnaggregatedAttestations() ([]ethpb.Att, error) {
|
||||
return m.UnaggregatedAtts, nil
|
||||
}
|
||||
|
||||
// UnaggregatedAttestationsBySlotIndex --
|
||||
@@ -78,8 +88,13 @@ func (*PoolMock) UnaggregatedAttestationsBySlotIndex(_ context.Context, _ primit
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
// UnaggregatedAttestationsBySlotIndexElectra --
|
||||
func (*PoolMock) UnaggregatedAttestationsBySlotIndexElectra(_ context.Context, _ primitives.Slot, _ primitives.CommitteeIndex) []*ethpb.AttestationElectra {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
// DeleteUnaggregatedAttestation --
|
||||
func (*PoolMock) DeleteUnaggregatedAttestation(_ *ethpb.Attestation) error {
|
||||
func (*PoolMock) DeleteUnaggregatedAttestation(_ ethpb.Att) error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
@@ -94,42 +109,42 @@ func (*PoolMock) UnaggregatedAttestationCount() int {
|
||||
}
|
||||
|
||||
// SaveBlockAttestation --
|
||||
func (*PoolMock) SaveBlockAttestation(_ *ethpb.Attestation) error {
|
||||
func (*PoolMock) SaveBlockAttestation(_ ethpb.Att) error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
// SaveBlockAttestations --
|
||||
func (*PoolMock) SaveBlockAttestations(_ []*ethpb.Attestation) error {
|
||||
func (*PoolMock) SaveBlockAttestations(_ []ethpb.Att) error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
// BlockAttestations --
|
||||
func (*PoolMock) BlockAttestations() []*ethpb.Attestation {
|
||||
func (*PoolMock) BlockAttestations() []ethpb.Att {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
// DeleteBlockAttestation --
|
||||
func (*PoolMock) DeleteBlockAttestation(_ *ethpb.Attestation) error {
|
||||
func (*PoolMock) DeleteBlockAttestation(_ ethpb.Att) error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
// SaveForkchoiceAttestation --
|
||||
func (*PoolMock) SaveForkchoiceAttestation(_ *ethpb.Attestation) error {
|
||||
func (*PoolMock) SaveForkchoiceAttestation(_ ethpb.Att) error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
// SaveForkchoiceAttestations --
|
||||
func (*PoolMock) SaveForkchoiceAttestations(_ []*ethpb.Attestation) error {
|
||||
func (*PoolMock) SaveForkchoiceAttestations(_ []ethpb.Att) error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
// ForkchoiceAttestations --
|
||||
func (*PoolMock) ForkchoiceAttestations() []*ethpb.Attestation {
|
||||
func (*PoolMock) ForkchoiceAttestations() []ethpb.Att {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
// DeleteForkchoiceAttestation --
|
||||
func (*PoolMock) DeleteForkchoiceAttestation(_ *ethpb.Attestation) error {
|
||||
func (*PoolMock) DeleteForkchoiceAttestation(_ ethpb.Att) error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
|
||||
@@ -33,7 +33,7 @@ func (*Service) InterceptPeerDial(_ peer.ID) (allow bool) {
|
||||
// multiaddr for the given peer.
|
||||
func (s *Service) InterceptAddrDial(pid peer.ID, m multiaddr.Multiaddr) (allow bool) {
|
||||
// Disallow bad peers from dialing in.
|
||||
if s.peers.IsBad(pid) {
|
||||
if s.peers.IsBad(pid) != nil {
|
||||
return false
|
||||
}
|
||||
return filterConnections(s.addrFilter, m)
|
||||
|
||||
@@ -50,7 +50,7 @@ func TestPeer_AtMaxLimit(t *testing.T) {
|
||||
}()
|
||||
|
||||
for i := 0; i < highWatermarkBuffer; i++ {
|
||||
addPeer(t, s.peers, peers.PeerConnected, false)
|
||||
addPeer(t, s.peers, peers.Connected, false)
|
||||
}
|
||||
|
||||
// create alternate host
|
||||
@@ -159,7 +159,7 @@ func TestService_RejectInboundPeersBeyondLimit(t *testing.T) {
|
||||
inboundLimit += 1
|
||||
// Add in up to inbound peer limit.
|
||||
for i := 0; i < int(inboundLimit); i++ {
|
||||
addPeer(t, s.peers, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED), false)
|
||||
addPeer(t, s.peers, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED), false)
|
||||
}
|
||||
valid = s.InterceptAccept(&maEndpoints{raddr: multiAddress})
|
||||
if valid {
|
||||
|
||||
@@ -189,7 +189,7 @@ func (s *Service) RefreshENR() {
|
||||
s.updateSubnetRecordWithMetadataV2(bitV, bitS)
|
||||
}
|
||||
// ping all peers to inform them of new metadata
|
||||
s.pingPeers()
|
||||
s.pingPeersAndLogEnr()
|
||||
}
|
||||
|
||||
// listen for new nodes watches for new nodes in the network and adds them to the peerstore.
|
||||
@@ -452,7 +452,7 @@ func (s *Service) filterPeer(node *enode.Node) bool {
|
||||
}
|
||||
|
||||
// Ignore bad nodes.
|
||||
if s.peers.IsBad(peerData.ID) {
|
||||
if s.peers.IsBad(peerData.ID) != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
|
||||
@@ -378,14 +378,14 @@ func TestInboundPeerLimit(t *testing.T) {
|
||||
}
|
||||
|
||||
for i := 0; i < 30; i++ {
|
||||
_ = addPeer(t, s.peers, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED), false)
|
||||
_ = addPeer(t, s.peers, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED), false)
|
||||
}
|
||||
|
||||
require.Equal(t, true, s.isPeerAtLimit(false), "not at limit for outbound peers")
|
||||
require.Equal(t, false, s.isPeerAtLimit(true), "at limit for inbound peers")
|
||||
|
||||
for i := 0; i < highWatermarkBuffer; i++ {
|
||||
_ = addPeer(t, s.peers, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED), false)
|
||||
_ = addPeer(t, s.peers, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED), false)
|
||||
}
|
||||
|
||||
require.Equal(t, true, s.isPeerAtLimit(true), "not at limit for inbound peers")
|
||||
@@ -404,13 +404,13 @@ func TestOutboundPeerThreshold(t *testing.T) {
|
||||
}
|
||||
|
||||
for i := 0; i < 2; i++ {
|
||||
_ = addPeer(t, s.peers, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED), true)
|
||||
_ = addPeer(t, s.peers, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED), true)
|
||||
}
|
||||
|
||||
require.Equal(t, true, s.isBelowOutboundPeerThreshold(), "not at outbound peer threshold")
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
_ = addPeer(t, s.peers, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED), true)
|
||||
_ = addPeer(t, s.peers, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED), true)
|
||||
}
|
||||
|
||||
require.Equal(t, false, s.isBelowOutboundPeerThreshold(), "still at outbound peer threshold")
|
||||
@@ -477,7 +477,7 @@ func TestCorrectUDPVersion(t *testing.T) {
|
||||
}
|
||||
|
||||
// addPeer is a helper to add a peer with a given connection state)
|
||||
func addPeer(t *testing.T, p *peers.Status, state peerdata.PeerConnectionState, outbound bool) peer.ID {
|
||||
func addPeer(t *testing.T, p *peers.Status, state peerdata.ConnectionState, outbound bool) peer.ID {
|
||||
// Set up some peers with different states
|
||||
mhBytes := []byte{0x11, 0x04}
|
||||
idBytes := make([]byte, 4)
|
||||
|
||||
@@ -2,7 +2,6 @@ package p2p
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
@@ -10,6 +9,7 @@ import (
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/peerdata"
|
||||
prysmTime "github.com/prysmaticlabs/prysm/v5/time"
|
||||
@@ -25,6 +25,46 @@ func peerMultiaddrString(conn network.Conn) string {
|
||||
return fmt.Sprintf("%s/p2p/%s", conn.RemoteMultiaddr().String(), conn.RemotePeer().String())
|
||||
}
|
||||
|
||||
func (s *Service) connectToPeer(conn network.Conn) {
|
||||
s.peers.SetConnectionState(conn.RemotePeer(), peers.Connected)
|
||||
// Go through the handshake process.
|
||||
log.WithFields(logrus.Fields{
|
||||
"direction": conn.Stat().Direction.String(),
|
||||
"multiAddr": peerMultiaddrString(conn),
|
||||
"activePeers": len(s.peers.Active()),
|
||||
}).Debug("Initiate peer connection")
|
||||
}
|
||||
|
||||
func (s *Service) disconnectFromPeerOnError(
|
||||
conn network.Conn,
|
||||
goodByeFunc func(ctx context.Context, id peer.ID) error,
|
||||
badPeerErr error,
|
||||
) {
|
||||
// Get the remote peer ID.
|
||||
remotePeerID := conn.RemotePeer()
|
||||
|
||||
// Set the peer to disconnecting state.
|
||||
s.peers.SetConnectionState(remotePeerID, peers.Disconnecting)
|
||||
|
||||
// Only attempt a goodbye if we are still connected to the peer.
|
||||
if s.host.Network().Connectedness(remotePeerID) == network.Connected {
|
||||
if err := goodByeFunc(context.TODO(), remotePeerID); err != nil {
|
||||
log.WithError(err).Error("Unable to disconnect from peer")
|
||||
}
|
||||
}
|
||||
|
||||
log.
|
||||
WithError(badPeerErr).
|
||||
WithFields(logrus.Fields{
|
||||
"multiaddr": peerMultiaddrString(conn),
|
||||
"direction": conn.Stat().Direction.String(),
|
||||
"remainingActivePeers": len(s.peers.Active()),
|
||||
}).
|
||||
Debug("Initiate peer disconnection")
|
||||
|
||||
s.peers.SetConnectionState(remotePeerID, peers.Disconnected)
|
||||
}
|
||||
|
||||
// AddConnectionHandler adds a callback function which handles the connection with a
|
||||
// newly added peer. It performs a handshake with that peer by sending a hello request
|
||||
// and validating the response from the peer.
|
||||
@@ -57,18 +97,9 @@ func (s *Service) AddConnectionHandler(reqFunc, goodByeFunc func(ctx context.Con
|
||||
}
|
||||
|
||||
s.host.Network().Notify(&network.NotifyBundle{
|
||||
ConnectedF: func(net network.Network, conn network.Conn) {
|
||||
ConnectedF: func(_ network.Network, conn network.Conn) {
|
||||
remotePeer := conn.RemotePeer()
|
||||
disconnectFromPeer := func() {
|
||||
s.peers.SetConnectionState(remotePeer, peers.PeerDisconnecting)
|
||||
// Only attempt a goodbye if we are still connected to the peer.
|
||||
if s.host.Network().Connectedness(remotePeer) == network.Connected {
|
||||
if err := goodByeFunc(context.TODO(), remotePeer); err != nil {
|
||||
log.WithError(err).Error("Unable to disconnect from peer")
|
||||
}
|
||||
}
|
||||
s.peers.SetConnectionState(remotePeer, peers.PeerDisconnected)
|
||||
}
|
||||
|
||||
// Connection handler must be non-blocking as part of libp2p design.
|
||||
go func() {
|
||||
if peerHandshaking(remotePeer) {
|
||||
@@ -77,28 +108,21 @@ func (s *Service) AddConnectionHandler(reqFunc, goodByeFunc func(ctx context.Con
|
||||
return
|
||||
}
|
||||
defer peerFinished(remotePeer)
|
||||
|
||||
// Handle the various pre-existing conditions that will result in us not handshaking.
|
||||
peerConnectionState, err := s.peers.ConnectionState(remotePeer)
|
||||
if err == nil && (peerConnectionState == peers.PeerConnected || peerConnectionState == peers.PeerConnecting) {
|
||||
if err == nil && (peerConnectionState == peers.Connected || peerConnectionState == peers.Connecting) {
|
||||
log.WithField("currentState", peerConnectionState).WithField("reason", "already active").Trace("Ignoring connection request")
|
||||
return
|
||||
}
|
||||
|
||||
s.peers.Add(nil /* ENR */, remotePeer, conn.RemoteMultiaddr(), conn.Stat().Direction)
|
||||
|
||||
// Defensive check in the event we still get a bad peer.
|
||||
if s.peers.IsBad(remotePeer) {
|
||||
log.WithField("reason", "bad peer").Trace("Ignoring connection request")
|
||||
disconnectFromPeer()
|
||||
if err := s.peers.IsBad(remotePeer); err != nil {
|
||||
s.disconnectFromPeerOnError(conn, goodByeFunc, err)
|
||||
return
|
||||
}
|
||||
validPeerConnection := func() {
|
||||
s.peers.SetConnectionState(conn.RemotePeer(), peers.PeerConnected)
|
||||
// Go through the handshake process.
|
||||
log.WithFields(logrus.Fields{
|
||||
"direction": conn.Stat().Direction,
|
||||
"multiAddr": peerMultiaddrString(conn),
|
||||
"activePeers": len(s.peers.Active()),
|
||||
}).Debug("Peer connected")
|
||||
}
|
||||
|
||||
// Do not perform handshake on inbound dials.
|
||||
if conn.Stat().Direction == network.DirInbound {
|
||||
@@ -117,63 +141,80 @@ func (s *Service) AddConnectionHandler(reqFunc, goodByeFunc func(ctx context.Con
|
||||
// If peer hasn't sent a status request, we disconnect with them
|
||||
if _, err := s.peers.ChainState(remotePeer); errors.Is(err, peerdata.ErrPeerUnknown) || errors.Is(err, peerdata.ErrNoPeerStatus) {
|
||||
statusMessageMissing.Inc()
|
||||
disconnectFromPeer()
|
||||
s.disconnectFromPeerOnError(conn, goodByeFunc, errors.Wrap(err, "chain state"))
|
||||
return
|
||||
}
|
||||
|
||||
if peerExists {
|
||||
updated, err := s.peers.ChainStateLastUpdated(remotePeer)
|
||||
if err != nil {
|
||||
disconnectFromPeer()
|
||||
s.disconnectFromPeerOnError(conn, goodByeFunc, errors.Wrap(err, "chain state last updated"))
|
||||
return
|
||||
}
|
||||
// exit if we don't receive any current status messages from
|
||||
// peer.
|
||||
if updated.IsZero() || !updated.After(currentTime) {
|
||||
disconnectFromPeer()
|
||||
|
||||
// Exit if we don't receive any current status messages from peer.
|
||||
if updated.IsZero() {
|
||||
s.disconnectFromPeerOnError(conn, goodByeFunc, errors.New("is zero"))
|
||||
return
|
||||
}
|
||||
|
||||
if updated.Before(currentTime) {
|
||||
s.disconnectFromPeerOnError(conn, goodByeFunc, errors.New("did not update"))
|
||||
return
|
||||
}
|
||||
}
|
||||
validPeerConnection()
|
||||
|
||||
s.connectToPeer(conn)
|
||||
return
|
||||
}
|
||||
|
||||
s.peers.SetConnectionState(conn.RemotePeer(), peers.PeerConnecting)
|
||||
s.peers.SetConnectionState(conn.RemotePeer(), peers.Connecting)
|
||||
if err := reqFunc(context.TODO(), conn.RemotePeer()); err != nil && !errors.Is(err, io.EOF) {
|
||||
log.WithError(err).Trace("Handshake failed")
|
||||
disconnectFromPeer()
|
||||
s.disconnectFromPeerOnError(conn, goodByeFunc, err)
|
||||
return
|
||||
}
|
||||
validPeerConnection()
|
||||
|
||||
s.connectToPeer(conn)
|
||||
}()
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// AddDisconnectionHandler disconnects from peers. It handles updating the peer status.
|
||||
// AddDisconnectionHandler disconnects from peers. It handles updating the peer status.
|
||||
// This also calls the handler responsible for maintaining other parts of the sync or p2p system.
|
||||
func (s *Service) AddDisconnectionHandler(handler func(ctx context.Context, id peer.ID) error) {
|
||||
s.host.Network().Notify(&network.NotifyBundle{
|
||||
DisconnectedF: func(net network.Network, conn network.Conn) {
|
||||
log := log.WithField("multiAddr", peerMultiaddrString(conn))
|
||||
peerID := conn.RemotePeer()
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"multiAddr": peerMultiaddrString(conn),
|
||||
"direction": conn.Stat().Direction.String(),
|
||||
})
|
||||
// Must be handled in a goroutine as this callback cannot be blocking.
|
||||
go func() {
|
||||
// Exit early if we are still connected to the peer.
|
||||
if net.Connectedness(conn.RemotePeer()) == network.Connected {
|
||||
if net.Connectedness(peerID) == network.Connected {
|
||||
return
|
||||
}
|
||||
priorState, err := s.peers.ConnectionState(conn.RemotePeer())
|
||||
|
||||
priorState, err := s.peers.ConnectionState(peerID)
|
||||
if err != nil {
|
||||
// Can happen if the peer has already disconnected, so...
|
||||
priorState = peers.PeerDisconnected
|
||||
priorState = peers.Disconnected
|
||||
}
|
||||
s.peers.SetConnectionState(conn.RemotePeer(), peers.PeerDisconnecting)
|
||||
|
||||
s.peers.SetConnectionState(peerID, peers.Disconnecting)
|
||||
if err := handler(context.TODO(), conn.RemotePeer()); err != nil {
|
||||
log.WithError(err).Error("Disconnect handler failed")
|
||||
}
|
||||
s.peers.SetConnectionState(conn.RemotePeer(), peers.PeerDisconnected)
|
||||
|
||||
s.peers.SetConnectionState(peerID, peers.Disconnected)
|
||||
|
||||
// Only log disconnections if we were fully connected.
|
||||
if priorState == peers.PeerConnected {
|
||||
log.WithField("activePeers", len(s.peers.Active())).Debug("Peer disconnected")
|
||||
if priorState == peers.Connected {
|
||||
activePeersCount := len(s.peers.Active())
|
||||
log.WithField("remainingActivePeers", activePeersCount).Debug("Peer disconnected")
|
||||
}
|
||||
}()
|
||||
},
|
||||
|
||||
@@ -23,8 +23,8 @@ var (
|
||||
ErrNoPeerStatus = errors.New("no chain status for peer")
|
||||
)
|
||||
|
||||
// PeerConnectionState is the state of the connection.
|
||||
type PeerConnectionState ethpb.ConnectionState
|
||||
// ConnectionState is the state of the connection.
|
||||
type ConnectionState ethpb.ConnectionState
|
||||
|
||||
// StoreConfig holds peer store parameters.
|
||||
type StoreConfig struct {
|
||||
@@ -49,7 +49,7 @@ type PeerData struct {
|
||||
// Network related data.
|
||||
Address ma.Multiaddr
|
||||
Direction network.Direction
|
||||
ConnState PeerConnectionState
|
||||
ConnState ConnectionState
|
||||
Enr *enr.Record
|
||||
NextValidTime time.Time
|
||||
// Chain related data.
|
||||
|
||||
@@ -20,6 +20,7 @@ go_library(
|
||||
"//crypto/rand:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/peerdata"
|
||||
)
|
||||
|
||||
@@ -61,7 +62,7 @@ func (s *BadResponsesScorer) Score(pid peer.ID) float64 {
|
||||
|
||||
// scoreNoLock is a lock-free version of Score.
|
||||
func (s *BadResponsesScorer) scoreNoLock(pid peer.ID) float64 {
|
||||
if s.isBadPeerNoLock(pid) {
|
||||
if s.isBadPeerNoLock(pid) != nil {
|
||||
return BadPeerScore
|
||||
}
|
||||
score := float64(0)
|
||||
@@ -116,18 +117,24 @@ func (s *BadResponsesScorer) Increment(pid peer.ID) {
|
||||
|
||||
// IsBadPeer states if the peer is to be considered bad.
|
||||
// If the peer is unknown this will return `false`, which makes using this function easier than returning an error.
|
||||
func (s *BadResponsesScorer) IsBadPeer(pid peer.ID) bool {
|
||||
func (s *BadResponsesScorer) IsBadPeer(pid peer.ID) error {
|
||||
s.store.RLock()
|
||||
defer s.store.RUnlock()
|
||||
|
||||
return s.isBadPeerNoLock(pid)
|
||||
}
|
||||
|
||||
// isBadPeerNoLock is lock-free version of IsBadPeer.
|
||||
func (s *BadResponsesScorer) isBadPeerNoLock(pid peer.ID) bool {
|
||||
func (s *BadResponsesScorer) isBadPeerNoLock(pid peer.ID) error {
|
||||
if peerData, ok := s.store.PeerData(pid); ok {
|
||||
return peerData.BadResponses >= s.config.Threshold
|
||||
if peerData.BadResponses >= s.config.Threshold {
|
||||
return errors.Errorf("peer exceeded bad responses threshold: got %d, threshold %d", peerData.BadResponses, s.config.Threshold)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
return false
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// BadPeers returns the peers that are considered bad.
|
||||
@@ -137,7 +144,7 @@ func (s *BadResponsesScorer) BadPeers() []peer.ID {
|
||||
|
||||
badPeers := make([]peer.ID, 0)
|
||||
for pid := range s.store.Peers() {
|
||||
if s.isBadPeerNoLock(pid) {
|
||||
if s.isBadPeerNoLock(pid) != nil {
|
||||
badPeers = append(badPeers, pid)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -33,19 +33,19 @@ func TestScorers_BadResponses_Score(t *testing.T) {
|
||||
assert.Equal(t, 0., scorer.Score(pid), "Unexpected score for unregistered peer")
|
||||
|
||||
scorer.Increment(pid)
|
||||
assert.Equal(t, false, scorer.IsBadPeer(pid))
|
||||
assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, -2.5, scorer.Score(pid))
|
||||
|
||||
scorer.Increment(pid)
|
||||
assert.Equal(t, false, scorer.IsBadPeer(pid))
|
||||
assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, float64(-5), scorer.Score(pid))
|
||||
|
||||
scorer.Increment(pid)
|
||||
assert.Equal(t, false, scorer.IsBadPeer(pid))
|
||||
assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, float64(-7.5), scorer.Score(pid))
|
||||
|
||||
scorer.Increment(pid)
|
||||
assert.Equal(t, true, scorer.IsBadPeer(pid))
|
||||
assert.NotNil(t, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, -100.0, scorer.Score(pid))
|
||||
}
|
||||
|
||||
@@ -152,17 +152,17 @@ func TestScorers_BadResponses_IsBadPeer(t *testing.T) {
|
||||
})
|
||||
scorer := peerStatuses.Scorers().BadResponsesScorer()
|
||||
pid := peer.ID("peer1")
|
||||
assert.Equal(t, false, scorer.IsBadPeer(pid))
|
||||
assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
|
||||
peerStatuses.Add(nil, pid, nil, network.DirUnknown)
|
||||
assert.Equal(t, false, scorer.IsBadPeer(pid))
|
||||
assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
|
||||
for i := 0; i < scorers.DefaultBadResponsesThreshold; i++ {
|
||||
scorer.Increment(pid)
|
||||
if i == scorers.DefaultBadResponsesThreshold-1 {
|
||||
assert.Equal(t, true, scorer.IsBadPeer(pid), "Unexpected peer status")
|
||||
assert.NotNil(t, scorer.IsBadPeer(pid), "Unexpected peer status")
|
||||
} else {
|
||||
assert.Equal(t, false, scorer.IsBadPeer(pid), "Unexpected peer status")
|
||||
assert.NoError(t, scorer.IsBadPeer(pid), "Unexpected peer status")
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -185,11 +185,11 @@ func TestScorers_BadResponses_BadPeers(t *testing.T) {
|
||||
scorer.Increment(pids[2])
|
||||
scorer.Increment(pids[4])
|
||||
}
|
||||
assert.Equal(t, false, scorer.IsBadPeer(pids[0]), "Invalid peer status")
|
||||
assert.Equal(t, true, scorer.IsBadPeer(pids[1]), "Invalid peer status")
|
||||
assert.Equal(t, true, scorer.IsBadPeer(pids[2]), "Invalid peer status")
|
||||
assert.Equal(t, false, scorer.IsBadPeer(pids[3]), "Invalid peer status")
|
||||
assert.Equal(t, true, scorer.IsBadPeer(pids[4]), "Invalid peer status")
|
||||
assert.NoError(t, scorer.IsBadPeer(pids[0]), "Invalid peer status")
|
||||
assert.NotNil(t, scorer.IsBadPeer(pids[1]), "Invalid peer status")
|
||||
assert.NotNil(t, scorer.IsBadPeer(pids[2]), "Invalid peer status")
|
||||
assert.NoError(t, scorer.IsBadPeer(pids[3]), "Invalid peer status")
|
||||
assert.NotNil(t, scorer.IsBadPeer(pids[4]), "Invalid peer status")
|
||||
want := []peer.ID{pids[1], pids[2], pids[4]}
|
||||
badPeers := scorer.BadPeers()
|
||||
sort.Slice(badPeers, func(i, j int) bool {
|
||||
|
||||
@@ -177,8 +177,8 @@ func (s *BlockProviderScorer) processedBlocksNoLock(pid peer.ID) uint64 {
|
||||
// Block provider scorer cannot guarantee that lower score of a peer is indeed a sign of a bad peer.
|
||||
// Therefore this scorer never marks peers as bad, and relies on scores to probabilistically sort
|
||||
// out low-scorers (see WeightSorted method).
|
||||
func (*BlockProviderScorer) IsBadPeer(_ peer.ID) bool {
|
||||
return false
|
||||
func (*BlockProviderScorer) IsBadPeer(_ peer.ID) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// BadPeers returns the peers that are considered bad.
|
||||
|
||||
@@ -119,7 +119,7 @@ func TestScorers_BlockProvider_Score(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Run(tt.name, func(*testing.T) {
|
||||
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
@@ -224,7 +224,7 @@ func TestScorers_BlockProvider_Sorted(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "no peers",
|
||||
update: func(s *scorers.BlockProviderScorer) {},
|
||||
update: func(*scorers.BlockProviderScorer) {},
|
||||
have: []peer.ID{},
|
||||
want: []peer.ID{},
|
||||
},
|
||||
@@ -451,7 +451,7 @@ func TestScorers_BlockProvider_FormatScorePretty(t *testing.T) {
|
||||
})
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Run(tt.name, func(*testing.T) {
|
||||
peerStatuses := peerStatusGen()
|
||||
scorer := peerStatuses.Scorers().BlockProviderScorer()
|
||||
if tt.update != nil {
|
||||
@@ -481,8 +481,8 @@ func TestScorers_BlockProvider_BadPeerMarking(t *testing.T) {
|
||||
})
|
||||
scorer := peerStatuses.Scorers().BlockProviderScorer()
|
||||
|
||||
assert.Equal(t, false, scorer.IsBadPeer("peer1"), "Unexpected status for unregistered peer")
|
||||
assert.NoError(t, scorer.IsBadPeer("peer1"), "Unexpected status for unregistered peer")
|
||||
scorer.IncrementProcessedBlocks("peer1", 64)
|
||||
assert.Equal(t, false, scorer.IsBadPeer("peer1"))
|
||||
assert.NoError(t, scorer.IsBadPeer("peer1"))
|
||||
assert.Equal(t, 0, len(scorer.BadPeers()))
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package scorers
|
||||
|
||||
import (
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/peerdata"
|
||||
pbrpc "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
@@ -51,19 +52,24 @@ func (s *GossipScorer) scoreNoLock(pid peer.ID) float64 {
|
||||
}
|
||||
|
||||
// IsBadPeer states if the peer is to be considered bad.
|
||||
func (s *GossipScorer) IsBadPeer(pid peer.ID) bool {
|
||||
func (s *GossipScorer) IsBadPeer(pid peer.ID) error {
|
||||
s.store.RLock()
|
||||
defer s.store.RUnlock()
|
||||
return s.isBadPeerNoLock(pid)
|
||||
}
|
||||
|
||||
// isBadPeerNoLock is lock-free version of IsBadPeer.
|
||||
func (s *GossipScorer) isBadPeerNoLock(pid peer.ID) bool {
|
||||
func (s *GossipScorer) isBadPeerNoLock(pid peer.ID) error {
|
||||
peerData, ok := s.store.PeerData(pid)
|
||||
if !ok {
|
||||
return false
|
||||
return nil
|
||||
}
|
||||
return peerData.GossipScore < gossipThreshold
|
||||
|
||||
if peerData.GossipScore < gossipThreshold {
|
||||
return errors.Errorf("gossip score below threshold: got %f - threshold %f", peerData.GossipScore, gossipThreshold)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// BadPeers returns the peers that are considered bad.
|
||||
@@ -73,7 +79,7 @@ func (s *GossipScorer) BadPeers() []peer.ID {
|
||||
|
||||
badPeers := make([]peer.ID, 0)
|
||||
for pid := range s.store.Peers() {
|
||||
if s.isBadPeerNoLock(pid) {
|
||||
if s.isBadPeerNoLock(pid) != nil {
|
||||
badPeers = append(badPeers, pid)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,7 +21,7 @@ func TestScorers_Gossip_Score(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "nonexistent peer",
|
||||
update: func(scorer *scorers.GossipScorer) {
|
||||
update: func(*scorers.GossipScorer) {
|
||||
},
|
||||
check: func(scorer *scorers.GossipScorer) {
|
||||
assert.Equal(t, 0.0, scorer.Score("peer1"), "Unexpected score")
|
||||
@@ -34,7 +34,7 @@ func TestScorers_Gossip_Score(t *testing.T) {
|
||||
},
|
||||
check: func(scorer *scorers.GossipScorer) {
|
||||
assert.Equal(t, -101.0, scorer.Score("peer1"), "Unexpected score")
|
||||
assert.Equal(t, true, scorer.IsBadPeer("peer1"), "Unexpected good peer")
|
||||
assert.NotNil(t, scorer.IsBadPeer("peer1"), "Unexpected good peer")
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -44,7 +44,7 @@ func TestScorers_Gossip_Score(t *testing.T) {
|
||||
},
|
||||
check: func(scorer *scorers.GossipScorer) {
|
||||
assert.Equal(t, 10.0, scorer.Score("peer1"), "Unexpected score")
|
||||
assert.Equal(t, false, scorer.IsBadPeer("peer1"), "Unexpected bad peer")
|
||||
assert.Equal(t, nil, scorer.IsBadPeer("peer1"), "Unexpected bad peer")
|
||||
_, _, topicMap, err := scorer.GossipData("peer1")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(100), topicMap["a"].TimeInMesh, "incorrect time in mesh")
|
||||
@@ -53,7 +53,7 @@ func TestScorers_Gossip_Score(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Run(tt.name, func(*testing.T) {
|
||||
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
ScorerParams: &scorers.Config{},
|
||||
})
|
||||
|
||||
@@ -46,7 +46,7 @@ func (s *PeerStatusScorer) Score(pid peer.ID) float64 {
|
||||
|
||||
// scoreNoLock is a lock-free version of Score.
|
||||
func (s *PeerStatusScorer) scoreNoLock(pid peer.ID) float64 {
|
||||
if s.isBadPeerNoLock(pid) {
|
||||
if s.isBadPeerNoLock(pid) != nil {
|
||||
return BadPeerScore
|
||||
}
|
||||
score := float64(0)
|
||||
@@ -67,30 +67,34 @@ func (s *PeerStatusScorer) scoreNoLock(pid peer.ID) float64 {
|
||||
}
|
||||
|
||||
// IsBadPeer states if the peer is to be considered bad.
|
||||
func (s *PeerStatusScorer) IsBadPeer(pid peer.ID) bool {
|
||||
func (s *PeerStatusScorer) IsBadPeer(pid peer.ID) error {
|
||||
s.store.RLock()
|
||||
defer s.store.RUnlock()
|
||||
|
||||
return s.isBadPeerNoLock(pid)
|
||||
}
|
||||
|
||||
// isBadPeerNoLock is lock-free version of IsBadPeer.
|
||||
func (s *PeerStatusScorer) isBadPeerNoLock(pid peer.ID) bool {
|
||||
func (s *PeerStatusScorer) isBadPeerNoLock(pid peer.ID) error {
|
||||
peerData, ok := s.store.PeerData(pid)
|
||||
if !ok {
|
||||
return false
|
||||
return nil
|
||||
}
|
||||
|
||||
// Mark peer as bad, if the latest error is one of the terminal ones.
|
||||
terminalErrs := []error{
|
||||
p2ptypes.ErrWrongForkDigestVersion,
|
||||
p2ptypes.ErrInvalidFinalizedRoot,
|
||||
p2ptypes.ErrInvalidRequest,
|
||||
}
|
||||
|
||||
for _, err := range terminalErrs {
|
||||
if errors.Is(peerData.ChainStateValidationError, err) {
|
||||
return true
|
||||
return err
|
||||
}
|
||||
}
|
||||
return false
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// BadPeers returns the peers that are considered bad.
|
||||
@@ -100,7 +104,7 @@ func (s *PeerStatusScorer) BadPeers() []peer.ID {
|
||||
|
||||
badPeers := make([]peer.ID, 0)
|
||||
for pid := range s.store.Peers() {
|
||||
if s.isBadPeerNoLock(pid) {
|
||||
if s.isBadPeerNoLock(pid) != nil {
|
||||
badPeers = append(badPeers, pid)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -122,7 +122,7 @@ func TestScorers_PeerStatus_Score(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Run(tt.name, func(*testing.T) {
|
||||
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
ScorerParams: &scorers.Config{},
|
||||
})
|
||||
@@ -140,12 +140,12 @@ func TestScorers_PeerStatus_IsBadPeer(t *testing.T) {
|
||||
ScorerParams: &scorers.Config{},
|
||||
})
|
||||
pid := peer.ID("peer1")
|
||||
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer(pid))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid))
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer(pid))
|
||||
assert.NoError(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid))
|
||||
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid, &pb.Status{}, p2ptypes.ErrWrongForkDigestVersion)
|
||||
assert.Equal(t, true, peerStatuses.Scorers().IsBadPeer(pid))
|
||||
assert.Equal(t, true, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid))
|
||||
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer(pid))
|
||||
assert.NotNil(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid))
|
||||
}
|
||||
|
||||
func TestScorers_PeerStatus_BadPeers(t *testing.T) {
|
||||
@@ -155,22 +155,22 @@ func TestScorers_PeerStatus_BadPeers(t *testing.T) {
|
||||
pid1 := peer.ID("peer1")
|
||||
pid2 := peer.ID("peer2")
|
||||
pid3 := peer.ID("peer3")
|
||||
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer(pid1))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid1))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer(pid2))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid2))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer(pid3))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid3))
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer(pid1))
|
||||
assert.NoError(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid1))
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer(pid2))
|
||||
assert.NoError(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid2))
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer(pid3))
|
||||
assert.NoError(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid3))
|
||||
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid1, &pb.Status{}, p2ptypes.ErrWrongForkDigestVersion)
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid2, &pb.Status{}, nil)
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid3, &pb.Status{}, p2ptypes.ErrWrongForkDigestVersion)
|
||||
assert.Equal(t, true, peerStatuses.Scorers().IsBadPeer(pid1))
|
||||
assert.Equal(t, true, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid1))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer(pid2))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid2))
|
||||
assert.Equal(t, true, peerStatuses.Scorers().IsBadPeer(pid3))
|
||||
assert.Equal(t, true, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid3))
|
||||
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer(pid1))
|
||||
assert.NotNil(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid1))
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer(pid2))
|
||||
assert.NoError(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid2))
|
||||
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer(pid3))
|
||||
assert.NotNil(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid3))
|
||||
assert.Equal(t, 2, len(peerStatuses.Scorers().PeerStatusScorer().BadPeers()))
|
||||
assert.Equal(t, 2, len(peerStatuses.Scorers().BadPeers()))
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/peerdata"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
)
|
||||
@@ -24,7 +25,7 @@ const BadPeerScore = gossipThreshold
|
||||
// Scorer defines minimum set of methods every peer scorer must expose.
|
||||
type Scorer interface {
|
||||
Score(pid peer.ID) float64
|
||||
IsBadPeer(pid peer.ID) bool
|
||||
IsBadPeer(pid peer.ID) error
|
||||
BadPeers() []peer.ID
|
||||
}
|
||||
|
||||
@@ -124,26 +125,29 @@ func (s *Service) ScoreNoLock(pid peer.ID) float64 {
|
||||
}
|
||||
|
||||
// IsBadPeer traverses all the scorers to see if any of them classifies peer as bad.
|
||||
func (s *Service) IsBadPeer(pid peer.ID) bool {
|
||||
func (s *Service) IsBadPeer(pid peer.ID) error {
|
||||
s.store.RLock()
|
||||
defer s.store.RUnlock()
|
||||
return s.IsBadPeerNoLock(pid)
|
||||
}
|
||||
|
||||
// IsBadPeerNoLock is a lock-free version of IsBadPeer.
|
||||
func (s *Service) IsBadPeerNoLock(pid peer.ID) bool {
|
||||
if s.scorers.badResponsesScorer.isBadPeerNoLock(pid) {
|
||||
return true
|
||||
func (s *Service) IsBadPeerNoLock(pid peer.ID) error {
|
||||
if err := s.scorers.badResponsesScorer.isBadPeerNoLock(pid); err != nil {
|
||||
return errors.Wrap(err, "bad responses scorer")
|
||||
}
|
||||
if s.scorers.peerStatusScorer.isBadPeerNoLock(pid) {
|
||||
return true
|
||||
|
||||
if err := s.scorers.peerStatusScorer.isBadPeerNoLock(pid); err != nil {
|
||||
return errors.Wrap(err, "peer status scorer")
|
||||
}
|
||||
|
||||
if features.Get().EnablePeerScorer {
|
||||
if s.scorers.gossipScorer.isBadPeerNoLock(pid) {
|
||||
return true
|
||||
if err := s.scorers.gossipScorer.isBadPeerNoLock(pid); err != nil {
|
||||
return errors.Wrap(err, "gossip scorer")
|
||||
}
|
||||
}
|
||||
return false
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// BadPeers returns the peers that are considered bad by any of registered scorers.
|
||||
@@ -153,7 +157,7 @@ func (s *Service) BadPeers() []peer.ID {
|
||||
|
||||
badPeers := make([]peer.ID, 0)
|
||||
for pid := range s.store.Peers() {
|
||||
if s.IsBadPeerNoLock(pid) {
|
||||
if s.IsBadPeerNoLock(pid) != nil {
|
||||
badPeers = append(badPeers, pid)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -100,7 +100,7 @@ func TestScorers_Service_Score(t *testing.T) {
|
||||
return scores
|
||||
}
|
||||
|
||||
pack := func(scorer *scorers.Service, s1, s2, s3 float64) map[string]float64 {
|
||||
pack := func(_ *scorers.Service, s1, s2, s3 float64) map[string]float64 {
|
||||
return map[string]float64{
|
||||
"peer1": roundScore(s1),
|
||||
"peer2": roundScore(s2),
|
||||
@@ -237,7 +237,7 @@ func TestScorers_Service_loop(t *testing.T) {
|
||||
for i := 0; i < s1.Params().Threshold+5; i++ {
|
||||
s1.Increment(pid1)
|
||||
}
|
||||
assert.Equal(t, true, s1.IsBadPeer(pid1), "Peer should be marked as bad")
|
||||
assert.NotNil(t, s1.IsBadPeer(pid1), "Peer should be marked as bad")
|
||||
|
||||
s2.IncrementProcessedBlocks("peer1", 221)
|
||||
assert.Equal(t, uint64(221), s2.ProcessedBlocks("peer1"))
|
||||
@@ -252,7 +252,7 @@ func TestScorers_Service_loop(t *testing.T) {
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
if s1.IsBadPeer(pid1) == false && s2.ProcessedBlocks("peer1") == 0 {
|
||||
if s1.IsBadPeer(pid1) == nil && s2.ProcessedBlocks("peer1") == 0 {
|
||||
return
|
||||
}
|
||||
case <-ctx.Done():
|
||||
@@ -263,7 +263,7 @@ func TestScorers_Service_loop(t *testing.T) {
|
||||
}()
|
||||
|
||||
<-done
|
||||
assert.Equal(t, false, s1.IsBadPeer(pid1), "Peer should not be marked as bad")
|
||||
assert.NoError(t, s1.IsBadPeer(pid1), "Peer should not be marked as bad")
|
||||
assert.Equal(t, uint64(0), s2.ProcessedBlocks("peer1"), "No blocks are expected")
|
||||
}
|
||||
|
||||
@@ -278,10 +278,10 @@ func TestScorers_Service_IsBadPeer(t *testing.T) {
|
||||
},
|
||||
})
|
||||
|
||||
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
peerStatuses.Scorers().BadResponsesScorer().Increment("peer1")
|
||||
peerStatuses.Scorers().BadResponsesScorer().Increment("peer1")
|
||||
assert.Equal(t, true, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
}
|
||||
|
||||
func TestScorers_Service_BadPeers(t *testing.T) {
|
||||
@@ -295,16 +295,16 @@ func TestScorers_Service_BadPeers(t *testing.T) {
|
||||
},
|
||||
})
|
||||
|
||||
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer("peer2"))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer("peer3"))
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer2"))
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer3"))
|
||||
assert.Equal(t, 0, len(peerStatuses.Scorers().BadPeers()))
|
||||
for _, pid := range []peer.ID{"peer1", "peer3"} {
|
||||
peerStatuses.Scorers().BadResponsesScorer().Increment(pid)
|
||||
peerStatuses.Scorers().BadResponsesScorer().Increment(pid)
|
||||
}
|
||||
assert.Equal(t, true, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer("peer2"))
|
||||
assert.Equal(t, true, peerStatuses.Scorers().IsBadPeer("peer3"))
|
||||
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer2"))
|
||||
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer3"))
|
||||
assert.Equal(t, 2, len(peerStatuses.Scorers().BadPeers()))
|
||||
}
|
||||
|
||||
@@ -34,6 +34,7 @@ import (
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
manet "github.com/multiformats/go-multiaddr/net"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/peerdata"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/scorers"
|
||||
@@ -49,14 +50,14 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
// PeerDisconnected means there is no connection to the peer.
|
||||
PeerDisconnected peerdata.PeerConnectionState = iota
|
||||
// PeerDisconnecting means there is an on-going attempt to disconnect from the peer.
|
||||
PeerDisconnecting
|
||||
// PeerConnected means the peer has an active connection.
|
||||
PeerConnected
|
||||
// PeerConnecting means there is an on-going attempt to connect to the peer.
|
||||
PeerConnecting
|
||||
// Disconnected means there is no connection to the peer.
|
||||
Disconnected peerdata.ConnectionState = iota
|
||||
// Disconnecting means there is an on-going attempt to disconnect from the peer.
|
||||
Disconnecting
|
||||
// Connected means the peer has an active connection.
|
||||
Connected
|
||||
// Connecting means there is an on-going attempt to connect to the peer.
|
||||
Connecting
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -150,7 +151,7 @@ func (p *Status) Add(record *enr.Record, pid peer.ID, address ma.Multiaddr, dire
|
||||
Address: address,
|
||||
Direction: direction,
|
||||
// Peers start disconnected; state will be updated when the handshake process begins.
|
||||
ConnState: PeerDisconnected,
|
||||
ConnState: Disconnected,
|
||||
}
|
||||
if record != nil {
|
||||
peerData.Enr = record
|
||||
@@ -212,7 +213,7 @@ func (p *Status) IsActive(pid peer.ID) bool {
|
||||
defer p.store.RUnlock()
|
||||
|
||||
peerData, ok := p.store.PeerData(pid)
|
||||
return ok && (peerData.ConnState == PeerConnected || peerData.ConnState == PeerConnecting)
|
||||
return ok && (peerData.ConnState == Connected || peerData.ConnState == Connecting)
|
||||
}
|
||||
|
||||
// IsAboveInboundLimit checks if we are above our current inbound
|
||||
@@ -222,7 +223,7 @@ func (p *Status) IsAboveInboundLimit() bool {
|
||||
defer p.store.RUnlock()
|
||||
totalInbound := 0
|
||||
for _, peerData := range p.store.Peers() {
|
||||
if peerData.ConnState == PeerConnected &&
|
||||
if peerData.ConnState == Connected &&
|
||||
peerData.Direction == network.DirInbound {
|
||||
totalInbound += 1
|
||||
}
|
||||
@@ -286,7 +287,7 @@ func (p *Status) SubscribedToSubnet(index uint64) []peer.ID {
|
||||
peers := make([]peer.ID, 0)
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
// look at active peers
|
||||
connectedStatus := peerData.ConnState == PeerConnecting || peerData.ConnState == PeerConnected
|
||||
connectedStatus := peerData.ConnState == Connecting || peerData.ConnState == Connected
|
||||
if connectedStatus && peerData.MetaData != nil && !peerData.MetaData.IsNil() && peerData.MetaData.AttnetsBitfield() != nil {
|
||||
indices := indicesFromBitfield(peerData.MetaData.AttnetsBitfield())
|
||||
for _, idx := range indices {
|
||||
@@ -301,7 +302,7 @@ func (p *Status) SubscribedToSubnet(index uint64) []peer.ID {
|
||||
}
|
||||
|
||||
// SetConnectionState sets the connection state of the given remote peer.
|
||||
func (p *Status) SetConnectionState(pid peer.ID, state peerdata.PeerConnectionState) {
|
||||
func (p *Status) SetConnectionState(pid peer.ID, state peerdata.ConnectionState) {
|
||||
p.store.Lock()
|
||||
defer p.store.Unlock()
|
||||
|
||||
@@ -311,14 +312,14 @@ func (p *Status) SetConnectionState(pid peer.ID, state peerdata.PeerConnectionSt
|
||||
|
||||
// ConnectionState gets the connection state of the given remote peer.
|
||||
// This will error if the peer does not exist.
|
||||
func (p *Status) ConnectionState(pid peer.ID) (peerdata.PeerConnectionState, error) {
|
||||
func (p *Status) ConnectionState(pid peer.ID) (peerdata.ConnectionState, error) {
|
||||
p.store.RLock()
|
||||
defer p.store.RUnlock()
|
||||
|
||||
if peerData, ok := p.store.PeerData(pid); ok {
|
||||
return peerData.ConnState, nil
|
||||
}
|
||||
return PeerDisconnected, peerdata.ErrPeerUnknown
|
||||
return Disconnected, peerdata.ErrPeerUnknown
|
||||
}
|
||||
|
||||
// ChainStateLastUpdated gets the last time the chain state of the given remote peer was updated.
|
||||
@@ -335,19 +336,29 @@ func (p *Status) ChainStateLastUpdated(pid peer.ID) (time.Time, error) {
|
||||
|
||||
// IsBad states if the peer is to be considered bad (by *any* of the registered scorers).
|
||||
// If the peer is unknown this will return `false`, which makes using this function easier than returning an error.
|
||||
func (p *Status) IsBad(pid peer.ID) bool {
|
||||
func (p *Status) IsBad(pid peer.ID) error {
|
||||
p.store.RLock()
|
||||
defer p.store.RUnlock()
|
||||
|
||||
return p.isBad(pid)
|
||||
}
|
||||
|
||||
// isBad is the lock-free version of IsBad.
|
||||
func (p *Status) isBad(pid peer.ID) bool {
|
||||
func (p *Status) isBad(pid peer.ID) error {
|
||||
// Do not disconnect from trusted peers.
|
||||
if p.store.IsTrustedPeer(pid) {
|
||||
return false
|
||||
return nil
|
||||
}
|
||||
return p.isfromBadIP(pid) || p.scorers.IsBadPeerNoLock(pid)
|
||||
|
||||
if err := p.isfromBadIP(pid); err != nil {
|
||||
return errors.Wrap(err, "peer is from a bad IP")
|
||||
}
|
||||
|
||||
if err := p.scorers.IsBadPeerNoLock(pid); err != nil {
|
||||
return errors.Wrap(err, "is bad peer no lock")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NextValidTime gets the earliest possible time it is to contact/dial
|
||||
@@ -411,7 +422,7 @@ func (p *Status) Connecting() []peer.ID {
|
||||
defer p.store.RUnlock()
|
||||
peers := make([]peer.ID, 0)
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
if peerData.ConnState == PeerConnecting {
|
||||
if peerData.ConnState == Connecting {
|
||||
peers = append(peers, pid)
|
||||
}
|
||||
}
|
||||
@@ -424,7 +435,7 @@ func (p *Status) Connected() []peer.ID {
|
||||
defer p.store.RUnlock()
|
||||
peers := make([]peer.ID, 0)
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
if peerData.ConnState == PeerConnected {
|
||||
if peerData.ConnState == Connected {
|
||||
peers = append(peers, pid)
|
||||
}
|
||||
}
|
||||
@@ -450,7 +461,7 @@ func (p *Status) InboundConnected() []peer.ID {
|
||||
defer p.store.RUnlock()
|
||||
peers := make([]peer.ID, 0)
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
if peerData.ConnState == PeerConnected && peerData.Direction == network.DirInbound {
|
||||
if peerData.ConnState == Connected && peerData.Direction == network.DirInbound {
|
||||
peers = append(peers, pid)
|
||||
}
|
||||
}
|
||||
@@ -463,7 +474,7 @@ func (p *Status) InboundConnectedWithProtocol(protocol InternetProtocol) []peer.
|
||||
defer p.store.RUnlock()
|
||||
peers := make([]peer.ID, 0)
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
if peerData.ConnState == PeerConnected && peerData.Direction == network.DirInbound && strings.Contains(peerData.Address.String(), string(protocol)) {
|
||||
if peerData.ConnState == Connected && peerData.Direction == network.DirInbound && strings.Contains(peerData.Address.String(), string(protocol)) {
|
||||
peers = append(peers, pid)
|
||||
}
|
||||
}
|
||||
@@ -489,7 +500,7 @@ func (p *Status) OutboundConnected() []peer.ID {
|
||||
defer p.store.RUnlock()
|
||||
peers := make([]peer.ID, 0)
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
if peerData.ConnState == PeerConnected && peerData.Direction == network.DirOutbound {
|
||||
if peerData.ConnState == Connected && peerData.Direction == network.DirOutbound {
|
||||
peers = append(peers, pid)
|
||||
}
|
||||
}
|
||||
@@ -502,7 +513,7 @@ func (p *Status) OutboundConnectedWithProtocol(protocol InternetProtocol) []peer
|
||||
defer p.store.RUnlock()
|
||||
peers := make([]peer.ID, 0)
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
if peerData.ConnState == PeerConnected && peerData.Direction == network.DirOutbound && strings.Contains(peerData.Address.String(), string(protocol)) {
|
||||
if peerData.ConnState == Connected && peerData.Direction == network.DirOutbound && strings.Contains(peerData.Address.String(), string(protocol)) {
|
||||
peers = append(peers, pid)
|
||||
}
|
||||
}
|
||||
@@ -515,7 +526,7 @@ func (p *Status) Active() []peer.ID {
|
||||
defer p.store.RUnlock()
|
||||
peers := make([]peer.ID, 0)
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
if peerData.ConnState == PeerConnecting || peerData.ConnState == PeerConnected {
|
||||
if peerData.ConnState == Connecting || peerData.ConnState == Connected {
|
||||
peers = append(peers, pid)
|
||||
}
|
||||
}
|
||||
@@ -528,7 +539,7 @@ func (p *Status) Disconnecting() []peer.ID {
|
||||
defer p.store.RUnlock()
|
||||
peers := make([]peer.ID, 0)
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
if peerData.ConnState == PeerDisconnecting {
|
||||
if peerData.ConnState == Disconnecting {
|
||||
peers = append(peers, pid)
|
||||
}
|
||||
}
|
||||
@@ -541,7 +552,7 @@ func (p *Status) Disconnected() []peer.ID {
|
||||
defer p.store.RUnlock()
|
||||
peers := make([]peer.ID, 0)
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
if peerData.ConnState == PeerDisconnected {
|
||||
if peerData.ConnState == Disconnected {
|
||||
peers = append(peers, pid)
|
||||
}
|
||||
}
|
||||
@@ -554,7 +565,7 @@ func (p *Status) Inactive() []peer.ID {
|
||||
defer p.store.RUnlock()
|
||||
peers := make([]peer.ID, 0)
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
if peerData.ConnState == PeerDisconnecting || peerData.ConnState == PeerDisconnected {
|
||||
if peerData.ConnState == Disconnecting || peerData.ConnState == Disconnected {
|
||||
peers = append(peers, pid)
|
||||
}
|
||||
}
|
||||
@@ -592,7 +603,7 @@ func (p *Status) Prune() {
|
||||
return
|
||||
}
|
||||
notBadPeer := func(pid peer.ID) bool {
|
||||
return !p.isBad(pid)
|
||||
return p.isBad(pid) == nil
|
||||
}
|
||||
notTrustedPeer := func(pid peer.ID) bool {
|
||||
return !p.isTrustedPeers(pid)
|
||||
@@ -605,7 +616,7 @@ func (p *Status) Prune() {
|
||||
// Select disconnected peers with a smaller bad response count.
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
// Should not prune trusted peer or prune the peer dara and unset trusted peer.
|
||||
if peerData.ConnState == PeerDisconnected && notBadPeer(pid) && notTrustedPeer(pid) {
|
||||
if peerData.ConnState == Disconnected && notBadPeer(pid) && notTrustedPeer(pid) {
|
||||
peersToPrune = append(peersToPrune, &peerResp{
|
||||
pid: pid,
|
||||
score: p.Scorers().ScoreNoLock(pid),
|
||||
@@ -657,7 +668,7 @@ func (p *Status) deprecatedPrune() {
|
||||
// Select disconnected peers with a smaller bad response count.
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
// Should not prune trusted peer or prune the peer dara and unset trusted peer.
|
||||
if peerData.ConnState == PeerDisconnected && notBadPeer(peerData) && notTrustedPeer(pid) {
|
||||
if peerData.ConnState == Disconnected && notBadPeer(peerData) && notTrustedPeer(pid) {
|
||||
peersToPrune = append(peersToPrune, &peerResp{
|
||||
pid: pid,
|
||||
badResp: peerData.BadResponses,
|
||||
@@ -814,7 +825,7 @@ func (p *Status) PeersToPrune() []peer.ID {
|
||||
peersToPrune := make([]*peerResp, 0)
|
||||
// Select connected and inbound peers to prune.
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
if peerData.ConnState == PeerConnected &&
|
||||
if peerData.ConnState == Connected &&
|
||||
peerData.Direction == network.DirInbound && !p.store.IsTrustedPeer(pid) {
|
||||
peersToPrune = append(peersToPrune, &peerResp{
|
||||
pid: pid,
|
||||
@@ -880,7 +891,7 @@ func (p *Status) deprecatedPeersToPrune() []peer.ID {
|
||||
peersToPrune := make([]*peerResp, 0)
|
||||
// Select connected and inbound peers to prune.
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
if peerData.ConnState == PeerConnected &&
|
||||
if peerData.ConnState == Connected &&
|
||||
peerData.Direction == network.DirInbound && !p.store.IsTrustedPeer(pid) {
|
||||
peersToPrune = append(peersToPrune, &peerResp{
|
||||
pid: pid,
|
||||
@@ -982,24 +993,28 @@ func (p *Status) isTrustedPeers(pid peer.ID) bool {
|
||||
|
||||
// this method assumes the store lock is acquired before
|
||||
// executing the method.
|
||||
func (p *Status) isfromBadIP(pid peer.ID) bool {
|
||||
func (p *Status) isfromBadIP(pid peer.ID) error {
|
||||
peerData, ok := p.store.PeerData(pid)
|
||||
if !ok {
|
||||
return false
|
||||
return nil
|
||||
}
|
||||
|
||||
if peerData.Address == nil {
|
||||
return false
|
||||
return nil
|
||||
}
|
||||
|
||||
ip, err := manet.ToIP(peerData.Address)
|
||||
if err != nil {
|
||||
return true
|
||||
return errors.Wrap(err, "to ip")
|
||||
}
|
||||
|
||||
if val, ok := p.ipTracker[ip.String()]; ok {
|
||||
if val > CollocationLimit {
|
||||
return true
|
||||
return errors.Errorf("collocation limit exceeded: got %d - limit %d", val, CollocationLimit)
|
||||
}
|
||||
}
|
||||
return false
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Status) addIpToTracker(pid peer.ID) {
|
||||
|
||||
@@ -215,7 +215,7 @@ func TestPeerSubscribedToSubnet(t *testing.T) {
|
||||
// Add some peers with different states
|
||||
numPeers := 2
|
||||
for i := 0; i < numPeers; i++ {
|
||||
addPeer(t, p, peers.PeerConnected)
|
||||
addPeer(t, p, peers.Connected)
|
||||
}
|
||||
expectedPeer := p.All()[1]
|
||||
bitV := bitfield.NewBitvector64()
|
||||
@@ -230,7 +230,7 @@ func TestPeerSubscribedToSubnet(t *testing.T) {
|
||||
}))
|
||||
numPeers = 3
|
||||
for i := 0; i < numPeers; i++ {
|
||||
addPeer(t, p, peers.PeerDisconnected)
|
||||
addPeer(t, p, peers.Disconnected)
|
||||
}
|
||||
ps := p.SubscribedToSubnet(2)
|
||||
assert.Equal(t, 1, len(ps), "Unexpected num of peers")
|
||||
@@ -259,7 +259,7 @@ func TestPeerImplicitAdd(t *testing.T) {
|
||||
id, err := peer.Decode("16Uiu2HAkyWZ4Ni1TpvDS8dPxsozmHY85KaiFjodQuV6Tz5tkHVeR")
|
||||
require.NoError(t, err)
|
||||
|
||||
connectionState := peers.PeerConnecting
|
||||
connectionState := peers.Connecting
|
||||
p.SetConnectionState(id, connectionState)
|
||||
|
||||
resConnectionState, err := p.ConnectionState(id)
|
||||
@@ -347,7 +347,7 @@ func TestPeerBadResponses(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
assert.Equal(t, false, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
|
||||
address, err := ma.NewMultiaddr("/ip4/213.202.254.180/tcp/13000")
|
||||
require.NoError(t, err, "Failed to create address")
|
||||
@@ -358,25 +358,25 @@ func TestPeerBadResponses(t *testing.T) {
|
||||
resBadResponses, err := scorer.Count(id)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, resBadResponses, "Unexpected bad responses")
|
||||
assert.Equal(t, false, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
|
||||
scorer.Increment(id)
|
||||
resBadResponses, err = scorer.Count(id)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, resBadResponses, "Unexpected bad responses")
|
||||
assert.Equal(t, false, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
|
||||
scorer.Increment(id)
|
||||
resBadResponses, err = scorer.Count(id)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 2, resBadResponses, "Unexpected bad responses")
|
||||
assert.Equal(t, true, p.IsBad(id), "Peer not marked as bad when it should be")
|
||||
assert.NotNil(t, p.IsBad(id), "Peer not marked as bad when it should be")
|
||||
|
||||
scorer.Increment(id)
|
||||
resBadResponses, err = scorer.Count(id)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 3, resBadResponses, "Unexpected bad responses")
|
||||
assert.Equal(t, true, p.IsBad(id), "Peer not marked as bad when it should be")
|
||||
assert.NotNil(t, p.IsBad(id), "Peer not marked as bad when it should be")
|
||||
}
|
||||
|
||||
func TestAddMetaData(t *testing.T) {
|
||||
@@ -393,7 +393,7 @@ func TestAddMetaData(t *testing.T) {
|
||||
// Add some peers with different states
|
||||
numPeers := 5
|
||||
for i := 0; i < numPeers; i++ {
|
||||
addPeer(t, p, peers.PeerConnected)
|
||||
addPeer(t, p, peers.Connected)
|
||||
}
|
||||
newPeer := p.All()[2]
|
||||
|
||||
@@ -422,19 +422,19 @@ func TestPeerConnectionStatuses(t *testing.T) {
|
||||
// Add some peers with different states
|
||||
numPeersDisconnected := 11
|
||||
for i := 0; i < numPeersDisconnected; i++ {
|
||||
addPeer(t, p, peers.PeerDisconnected)
|
||||
addPeer(t, p, peers.Disconnected)
|
||||
}
|
||||
numPeersConnecting := 7
|
||||
for i := 0; i < numPeersConnecting; i++ {
|
||||
addPeer(t, p, peers.PeerConnecting)
|
||||
addPeer(t, p, peers.Connecting)
|
||||
}
|
||||
numPeersConnected := 43
|
||||
for i := 0; i < numPeersConnected; i++ {
|
||||
addPeer(t, p, peers.PeerConnected)
|
||||
addPeer(t, p, peers.Connected)
|
||||
}
|
||||
numPeersDisconnecting := 4
|
||||
for i := 0; i < numPeersDisconnecting; i++ {
|
||||
addPeer(t, p, peers.PeerDisconnecting)
|
||||
addPeer(t, p, peers.Disconnecting)
|
||||
}
|
||||
|
||||
// Now confirm the states
|
||||
@@ -463,7 +463,7 @@ func TestPeerValidTime(t *testing.T) {
|
||||
|
||||
numPeersConnected := 6
|
||||
for i := 0; i < numPeersConnected; i++ {
|
||||
addPeer(t, p, peers.PeerConnected)
|
||||
addPeer(t, p, peers.Connected)
|
||||
}
|
||||
|
||||
allPeers := p.All()
|
||||
@@ -510,10 +510,10 @@ func TestPrune(t *testing.T) {
|
||||
for i := 0; i < p.MaxPeerLimit()+100; i++ {
|
||||
if i%7 == 0 {
|
||||
// Peer added as disconnected.
|
||||
_ = addPeer(t, p, peers.PeerDisconnected)
|
||||
_ = addPeer(t, p, peers.Disconnected)
|
||||
}
|
||||
// Peer added to peer handler.
|
||||
_ = addPeer(t, p, peers.PeerConnected)
|
||||
_ = addPeer(t, p, peers.Connected)
|
||||
}
|
||||
|
||||
disPeers := p.Disconnected()
|
||||
@@ -571,23 +571,23 @@ func TestPeerIPTracker(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
badPeers = append(badPeers, createPeer(t, p, addr, network.DirUnknown, peerdata.PeerConnectionState(ethpb.ConnectionState_DISCONNECTED)))
|
||||
badPeers = append(badPeers, createPeer(t, p, addr, network.DirUnknown, peerdata.ConnectionState(ethpb.ConnectionState_DISCONNECTED)))
|
||||
}
|
||||
for _, pr := range badPeers {
|
||||
assert.Equal(t, true, p.IsBad(pr), "peer with bad ip is not bad")
|
||||
assert.NotNil(t, p.IsBad(pr), "peer with bad ip is not bad")
|
||||
}
|
||||
|
||||
// Add in bad peers, so that our records are trimmed out
|
||||
// from the peer store.
|
||||
for i := 0; i < p.MaxPeerLimit()+100; i++ {
|
||||
// Peer added to peer handler.
|
||||
pid := addPeer(t, p, peers.PeerDisconnected)
|
||||
pid := addPeer(t, p, peers.Disconnected)
|
||||
p.Scorers().BadResponsesScorer().Increment(pid)
|
||||
}
|
||||
p.Prune()
|
||||
|
||||
for _, pr := range badPeers {
|
||||
assert.Equal(t, false, p.IsBad(pr), "peer with good ip is regarded as bad")
|
||||
assert.NoError(t, p.IsBad(pr), "peer with good ip is regarded as bad")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -601,8 +601,11 @@ func TestTrimmedOrderedPeers(t *testing.T) {
|
||||
},
|
||||
})
|
||||
|
||||
expectedTarget := primitives.Epoch(2)
|
||||
maxPeers := 3
|
||||
const (
|
||||
expectedTarget = primitives.Epoch(2)
|
||||
maxPeers = 3
|
||||
)
|
||||
|
||||
var mockroot2 [32]byte
|
||||
var mockroot3 [32]byte
|
||||
var mockroot4 [32]byte
|
||||
@@ -611,36 +614,41 @@ func TestTrimmedOrderedPeers(t *testing.T) {
|
||||
copy(mockroot3[:], "three")
|
||||
copy(mockroot4[:], "four")
|
||||
copy(mockroot5[:], "five")
|
||||
|
||||
// Peer 1
|
||||
pid1 := addPeer(t, p, peers.PeerConnected)
|
||||
pid1 := addPeer(t, p, peers.Connected)
|
||||
p.SetChainState(pid1, &pb.Status{
|
||||
HeadSlot: 3 * params.BeaconConfig().SlotsPerEpoch,
|
||||
FinalizedEpoch: 3,
|
||||
FinalizedRoot: mockroot3[:],
|
||||
})
|
||||
|
||||
// Peer 2
|
||||
pid2 := addPeer(t, p, peers.PeerConnected)
|
||||
pid2 := addPeer(t, p, peers.Connected)
|
||||
p.SetChainState(pid2, &pb.Status{
|
||||
HeadSlot: 4 * params.BeaconConfig().SlotsPerEpoch,
|
||||
FinalizedEpoch: 4,
|
||||
FinalizedRoot: mockroot4[:],
|
||||
})
|
||||
|
||||
// Peer 3
|
||||
pid3 := addPeer(t, p, peers.PeerConnected)
|
||||
pid3 := addPeer(t, p, peers.Connected)
|
||||
p.SetChainState(pid3, &pb.Status{
|
||||
HeadSlot: 5 * params.BeaconConfig().SlotsPerEpoch,
|
||||
FinalizedEpoch: 5,
|
||||
FinalizedRoot: mockroot5[:],
|
||||
})
|
||||
|
||||
// Peer 4
|
||||
pid4 := addPeer(t, p, peers.PeerConnected)
|
||||
pid4 := addPeer(t, p, peers.Connected)
|
||||
p.SetChainState(pid4, &pb.Status{
|
||||
HeadSlot: 2 * params.BeaconConfig().SlotsPerEpoch,
|
||||
FinalizedEpoch: 2,
|
||||
FinalizedRoot: mockroot2[:],
|
||||
})
|
||||
|
||||
// Peer 5
|
||||
pid5 := addPeer(t, p, peers.PeerConnected)
|
||||
pid5 := addPeer(t, p, peers.Connected)
|
||||
p.SetChainState(pid5, &pb.Status{
|
||||
HeadSlot: 2 * params.BeaconConfig().SlotsPerEpoch,
|
||||
FinalizedEpoch: 2,
|
||||
@@ -680,12 +688,12 @@ func TestAtInboundPeerLimit(t *testing.T) {
|
||||
})
|
||||
for i := 0; i < 15; i++ {
|
||||
// Peer added to peer handler.
|
||||
createPeer(t, p, nil, network.DirOutbound, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED))
|
||||
createPeer(t, p, nil, network.DirOutbound, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED))
|
||||
}
|
||||
assert.Equal(t, false, p.IsAboveInboundLimit(), "Inbound limit exceeded")
|
||||
for i := 0; i < 31; i++ {
|
||||
// Peer added to peer handler.
|
||||
createPeer(t, p, nil, network.DirInbound, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED))
|
||||
createPeer(t, p, nil, network.DirInbound, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED))
|
||||
}
|
||||
assert.Equal(t, true, p.IsAboveInboundLimit(), "Inbound limit not exceeded")
|
||||
}
|
||||
@@ -705,7 +713,7 @@ func TestPrunePeers(t *testing.T) {
|
||||
})
|
||||
for i := 0; i < 15; i++ {
|
||||
// Peer added to peer handler.
|
||||
createPeer(t, p, nil, network.DirOutbound, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED))
|
||||
createPeer(t, p, nil, network.DirOutbound, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED))
|
||||
}
|
||||
// Assert there are no prunable peers.
|
||||
peersToPrune := p.PeersToPrune()
|
||||
@@ -713,7 +721,7 @@ func TestPrunePeers(t *testing.T) {
|
||||
|
||||
for i := 0; i < 18; i++ {
|
||||
// Peer added to peer handler.
|
||||
createPeer(t, p, nil, network.DirInbound, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED))
|
||||
createPeer(t, p, nil, network.DirInbound, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED))
|
||||
}
|
||||
|
||||
// Assert there are the correct prunable peers.
|
||||
@@ -723,7 +731,7 @@ func TestPrunePeers(t *testing.T) {
|
||||
// Add in more peers.
|
||||
for i := 0; i < 13; i++ {
|
||||
// Peer added to peer handler.
|
||||
createPeer(t, p, nil, network.DirInbound, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED))
|
||||
createPeer(t, p, nil, network.DirInbound, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED))
|
||||
}
|
||||
|
||||
// Set up bad scores for inbound peers.
|
||||
@@ -767,7 +775,7 @@ func TestPrunePeers_TrustedPeers(t *testing.T) {
|
||||
|
||||
for i := 0; i < 15; i++ {
|
||||
// Peer added to peer handler.
|
||||
createPeer(t, p, nil, network.DirOutbound, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED))
|
||||
createPeer(t, p, nil, network.DirOutbound, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED))
|
||||
}
|
||||
// Assert there are no prunable peers.
|
||||
peersToPrune := p.PeersToPrune()
|
||||
@@ -775,7 +783,7 @@ func TestPrunePeers_TrustedPeers(t *testing.T) {
|
||||
|
||||
for i := 0; i < 18; i++ {
|
||||
// Peer added to peer handler.
|
||||
createPeer(t, p, nil, network.DirInbound, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED))
|
||||
createPeer(t, p, nil, network.DirInbound, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED))
|
||||
}
|
||||
|
||||
// Assert there are the correct prunable peers.
|
||||
@@ -785,7 +793,7 @@ func TestPrunePeers_TrustedPeers(t *testing.T) {
|
||||
// Add in more peers.
|
||||
for i := 0; i < 13; i++ {
|
||||
// Peer added to peer handler.
|
||||
createPeer(t, p, nil, network.DirInbound, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED))
|
||||
createPeer(t, p, nil, network.DirInbound, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED))
|
||||
}
|
||||
|
||||
var trustedPeers []peer.ID
|
||||
@@ -821,7 +829,7 @@ func TestPrunePeers_TrustedPeers(t *testing.T) {
|
||||
// Add more peers to check if trusted peers can be pruned after they are deleted from trusted peer set.
|
||||
for i := 0; i < 9; i++ {
|
||||
// Peer added to peer handler.
|
||||
createPeer(t, p, nil, network.DirInbound, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED))
|
||||
createPeer(t, p, nil, network.DirInbound, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED))
|
||||
}
|
||||
|
||||
// Delete trusted peers.
|
||||
@@ -865,14 +873,14 @@ func TestStatus_BestPeer(t *testing.T) {
|
||||
headSlot primitives.Slot
|
||||
finalizedEpoch primitives.Epoch
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
peers []*peerConfig
|
||||
limitPeers int
|
||||
ourFinalizedEpoch primitives.Epoch
|
||||
targetEpoch primitives.Epoch
|
||||
// targetEpochSupport denotes how many peers support returned epoch.
|
||||
targetEpochSupport int
|
||||
name string
|
||||
peers []*peerConfig
|
||||
limitPeers int
|
||||
ourFinalizedEpoch primitives.Epoch
|
||||
targetEpoch primitives.Epoch
|
||||
targetEpochSupport int // Denotes how many peers support returned epoch.
|
||||
}{
|
||||
{
|
||||
name: "head slot matches finalized epoch",
|
||||
@@ -885,6 +893,7 @@ func TestStatus_BestPeer(t *testing.T) {
|
||||
{finalizedEpoch: 3, headSlot: 3 * params.BeaconConfig().SlotsPerEpoch},
|
||||
},
|
||||
limitPeers: 15,
|
||||
ourFinalizedEpoch: 0,
|
||||
targetEpoch: 4,
|
||||
targetEpochSupport: 4,
|
||||
},
|
||||
@@ -902,6 +911,7 @@ func TestStatus_BestPeer(t *testing.T) {
|
||||
{finalizedEpoch: 3, headSlot: 4 * params.BeaconConfig().SlotsPerEpoch},
|
||||
},
|
||||
limitPeers: 15,
|
||||
ourFinalizedEpoch: 0,
|
||||
targetEpoch: 4,
|
||||
targetEpochSupport: 4,
|
||||
},
|
||||
@@ -916,6 +926,7 @@ func TestStatus_BestPeer(t *testing.T) {
|
||||
{finalizedEpoch: 3, headSlot: 42 * params.BeaconConfig().SlotsPerEpoch},
|
||||
},
|
||||
limitPeers: 15,
|
||||
ourFinalizedEpoch: 0,
|
||||
targetEpoch: 4,
|
||||
targetEpochSupport: 4,
|
||||
},
|
||||
@@ -930,8 +941,8 @@ func TestStatus_BestPeer(t *testing.T) {
|
||||
{finalizedEpoch: 3, headSlot: 46 * params.BeaconConfig().SlotsPerEpoch},
|
||||
{finalizedEpoch: 6, headSlot: 6 * params.BeaconConfig().SlotsPerEpoch},
|
||||
},
|
||||
ourFinalizedEpoch: 5,
|
||||
limitPeers: 15,
|
||||
ourFinalizedEpoch: 5,
|
||||
targetEpoch: 6,
|
||||
targetEpochSupport: 1,
|
||||
},
|
||||
@@ -950,8 +961,8 @@ func TestStatus_BestPeer(t *testing.T) {
|
||||
{finalizedEpoch: 7, headSlot: 7 * params.BeaconConfig().SlotsPerEpoch},
|
||||
{finalizedEpoch: 8, headSlot: 8 * params.BeaconConfig().SlotsPerEpoch},
|
||||
},
|
||||
ourFinalizedEpoch: 5,
|
||||
limitPeers: 15,
|
||||
ourFinalizedEpoch: 5,
|
||||
targetEpoch: 6,
|
||||
targetEpochSupport: 5,
|
||||
},
|
||||
@@ -970,8 +981,8 @@ func TestStatus_BestPeer(t *testing.T) {
|
||||
{finalizedEpoch: 7, headSlot: 7 * params.BeaconConfig().SlotsPerEpoch},
|
||||
{finalizedEpoch: 8, headSlot: 8 * params.BeaconConfig().SlotsPerEpoch},
|
||||
},
|
||||
ourFinalizedEpoch: 5,
|
||||
limitPeers: 4,
|
||||
ourFinalizedEpoch: 5,
|
||||
targetEpoch: 6,
|
||||
targetEpochSupport: 4,
|
||||
},
|
||||
@@ -986,8 +997,8 @@ func TestStatus_BestPeer(t *testing.T) {
|
||||
{finalizedEpoch: 8, headSlot: 8 * params.BeaconConfig().SlotsPerEpoch},
|
||||
{finalizedEpoch: 8, headSlot: 8 * params.BeaconConfig().SlotsPerEpoch},
|
||||
},
|
||||
ourFinalizedEpoch: 5,
|
||||
limitPeers: 15,
|
||||
ourFinalizedEpoch: 5,
|
||||
targetEpoch: 8,
|
||||
targetEpochSupport: 3,
|
||||
},
|
||||
@@ -1002,7 +1013,7 @@ func TestStatus_BestPeer(t *testing.T) {
|
||||
},
|
||||
})
|
||||
for _, peerConfig := range tt.peers {
|
||||
p.SetChainState(addPeer(t, p, peers.PeerConnected), &pb.Status{
|
||||
p.SetChainState(addPeer(t, p, peers.Connected), &pb.Status{
|
||||
FinalizedEpoch: peerConfig.finalizedEpoch,
|
||||
HeadSlot: peerConfig.headSlot,
|
||||
})
|
||||
@@ -1028,7 +1039,7 @@ func TestBestFinalized_returnsMaxValue(t *testing.T) {
|
||||
|
||||
for i := 0; i <= maxPeers+100; i++ {
|
||||
p.Add(new(enr.Record), peer.ID(rune(i)), nil, network.DirOutbound)
|
||||
p.SetConnectionState(peer.ID(rune(i)), peers.PeerConnected)
|
||||
p.SetConnectionState(peer.ID(rune(i)), peers.Connected)
|
||||
p.SetChainState(peer.ID(rune(i)), &pb.Status{
|
||||
FinalizedEpoch: 10,
|
||||
})
|
||||
@@ -1051,7 +1062,7 @@ func TestStatus_BestNonFinalized(t *testing.T) {
|
||||
peerSlots := []primitives.Slot{32, 32, 32, 32, 235, 233, 258, 268, 270}
|
||||
for i, headSlot := range peerSlots {
|
||||
p.Add(new(enr.Record), peer.ID(rune(i)), nil, network.DirOutbound)
|
||||
p.SetConnectionState(peer.ID(rune(i)), peers.PeerConnected)
|
||||
p.SetConnectionState(peer.ID(rune(i)), peers.Connected)
|
||||
p.SetChainState(peer.ID(rune(i)), &pb.Status{
|
||||
HeadSlot: headSlot,
|
||||
})
|
||||
@@ -1074,17 +1085,17 @@ func TestStatus_CurrentEpoch(t *testing.T) {
|
||||
},
|
||||
})
|
||||
// Peer 1
|
||||
pid1 := addPeer(t, p, peers.PeerConnected)
|
||||
pid1 := addPeer(t, p, peers.Connected)
|
||||
p.SetChainState(pid1, &pb.Status{
|
||||
HeadSlot: params.BeaconConfig().SlotsPerEpoch * 4,
|
||||
})
|
||||
// Peer 2
|
||||
pid2 := addPeer(t, p, peers.PeerConnected)
|
||||
pid2 := addPeer(t, p, peers.Connected)
|
||||
p.SetChainState(pid2, &pb.Status{
|
||||
HeadSlot: params.BeaconConfig().SlotsPerEpoch * 5,
|
||||
})
|
||||
// Peer 3
|
||||
pid3 := addPeer(t, p, peers.PeerConnected)
|
||||
pid3 := addPeer(t, p, peers.Connected)
|
||||
p.SetChainState(pid3, &pb.Status{
|
||||
HeadSlot: params.BeaconConfig().SlotsPerEpoch * 4,
|
||||
})
|
||||
@@ -1103,8 +1114,8 @@ func TestInbound(t *testing.T) {
|
||||
})
|
||||
addr, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/33333")
|
||||
require.NoError(t, err)
|
||||
inbound := createPeer(t, p, addr, network.DirInbound, peers.PeerConnected)
|
||||
createPeer(t, p, addr, network.DirOutbound, peers.PeerConnected)
|
||||
inbound := createPeer(t, p, addr, network.DirInbound, peers.Connected)
|
||||
createPeer(t, p, addr, network.DirOutbound, peers.Connected)
|
||||
|
||||
result := p.Inbound()
|
||||
require.Equal(t, 1, len(result))
|
||||
@@ -1123,8 +1134,8 @@ func TestInboundConnected(t *testing.T) {
|
||||
|
||||
addr, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/33333")
|
||||
require.NoError(t, err)
|
||||
inbound := createPeer(t, p, addr, network.DirInbound, peers.PeerConnected)
|
||||
createPeer(t, p, addr, network.DirInbound, peers.PeerConnecting)
|
||||
inbound := createPeer(t, p, addr, network.DirInbound, peers.Connected)
|
||||
createPeer(t, p, addr, network.DirInbound, peers.Connecting)
|
||||
|
||||
result := p.InboundConnected()
|
||||
require.Equal(t, 1, len(result))
|
||||
@@ -1157,7 +1168,7 @@ func TestInboundConnectedWithProtocol(t *testing.T) {
|
||||
multiaddr, err := ma.NewMultiaddr(addr)
|
||||
require.NoError(t, err)
|
||||
|
||||
peer := createPeer(t, p, multiaddr, network.DirInbound, peers.PeerConnected)
|
||||
peer := createPeer(t, p, multiaddr, network.DirInbound, peers.Connected)
|
||||
expectedTCP[peer.String()] = true
|
||||
}
|
||||
|
||||
@@ -1166,7 +1177,7 @@ func TestInboundConnectedWithProtocol(t *testing.T) {
|
||||
multiaddr, err := ma.NewMultiaddr(addr)
|
||||
require.NoError(t, err)
|
||||
|
||||
peer := createPeer(t, p, multiaddr, network.DirInbound, peers.PeerConnected)
|
||||
peer := createPeer(t, p, multiaddr, network.DirInbound, peers.Connected)
|
||||
expectedQUIC[peer.String()] = true
|
||||
}
|
||||
|
||||
@@ -1203,8 +1214,8 @@ func TestOutbound(t *testing.T) {
|
||||
})
|
||||
addr, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/33333")
|
||||
require.NoError(t, err)
|
||||
createPeer(t, p, addr, network.DirInbound, peers.PeerConnected)
|
||||
outbound := createPeer(t, p, addr, network.DirOutbound, peers.PeerConnected)
|
||||
createPeer(t, p, addr, network.DirInbound, peers.Connected)
|
||||
outbound := createPeer(t, p, addr, network.DirOutbound, peers.Connected)
|
||||
|
||||
result := p.Outbound()
|
||||
require.Equal(t, 1, len(result))
|
||||
@@ -1223,8 +1234,8 @@ func TestOutboundConnected(t *testing.T) {
|
||||
|
||||
addr, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/33333")
|
||||
require.NoError(t, err)
|
||||
inbound := createPeer(t, p, addr, network.DirOutbound, peers.PeerConnected)
|
||||
createPeer(t, p, addr, network.DirOutbound, peers.PeerConnecting)
|
||||
inbound := createPeer(t, p, addr, network.DirOutbound, peers.Connected)
|
||||
createPeer(t, p, addr, network.DirOutbound, peers.Connecting)
|
||||
|
||||
result := p.OutboundConnected()
|
||||
require.Equal(t, 1, len(result))
|
||||
@@ -1257,7 +1268,7 @@ func TestOutboundConnectedWithProtocol(t *testing.T) {
|
||||
multiaddr, err := ma.NewMultiaddr(addr)
|
||||
require.NoError(t, err)
|
||||
|
||||
peer := createPeer(t, p, multiaddr, network.DirOutbound, peers.PeerConnected)
|
||||
peer := createPeer(t, p, multiaddr, network.DirOutbound, peers.Connected)
|
||||
expectedTCP[peer.String()] = true
|
||||
}
|
||||
|
||||
@@ -1266,7 +1277,7 @@ func TestOutboundConnectedWithProtocol(t *testing.T) {
|
||||
multiaddr, err := ma.NewMultiaddr(addr)
|
||||
require.NoError(t, err)
|
||||
|
||||
peer := createPeer(t, p, multiaddr, network.DirOutbound, peers.PeerConnected)
|
||||
peer := createPeer(t, p, multiaddr, network.DirOutbound, peers.Connected)
|
||||
expectedQUIC[peer.String()] = true
|
||||
}
|
||||
|
||||
@@ -1293,7 +1304,7 @@ func TestOutboundConnectedWithProtocol(t *testing.T) {
|
||||
}
|
||||
|
||||
// addPeer is a helper to add a peer with a given connection state)
|
||||
func addPeer(t *testing.T, p *peers.Status, state peerdata.PeerConnectionState) peer.ID {
|
||||
func addPeer(t *testing.T, p *peers.Status, state peerdata.ConnectionState) peer.ID {
|
||||
// Set up some peers with different states
|
||||
mhBytes := []byte{0x11, 0x04}
|
||||
idBytes := make([]byte, 4)
|
||||
@@ -1312,7 +1323,7 @@ func addPeer(t *testing.T, p *peers.Status, state peerdata.PeerConnectionState)
|
||||
}
|
||||
|
||||
func createPeer(t *testing.T, p *peers.Status, addr ma.Multiaddr,
|
||||
dir network.Direction, state peerdata.PeerConnectionState) peer.ID {
|
||||
dir network.Direction, state peerdata.ConnectionState) peer.ID {
|
||||
mhBytes := []byte{0x11, 0x04}
|
||||
idBytes := make([]byte, 4)
|
||||
_, err := rand.Read(idBytes)
|
||||
|
||||
@@ -202,12 +202,13 @@ func (s *Service) Start() {
|
||||
s.startupErr = err
|
||||
return
|
||||
}
|
||||
err = s.connectToBootnodes()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not add bootnode to the exclusion list")
|
||||
|
||||
if err := s.connectToBootnodes(); err != nil {
|
||||
log.WithError(err).Error("Could not connect to boot nodes")
|
||||
s.startupErr = err
|
||||
return
|
||||
}
|
||||
|
||||
s.dv5Listener = listener
|
||||
go s.listenForNewNodes()
|
||||
}
|
||||
@@ -384,12 +385,17 @@ func (s *Service) AddPingMethod(reqFunc func(ctx context.Context, id peer.ID) er
|
||||
s.pingMethodLock.Unlock()
|
||||
}
|
||||
|
||||
func (s *Service) pingPeers() {
|
||||
func (s *Service) pingPeersAndLogEnr() {
|
||||
s.pingMethodLock.RLock()
|
||||
defer s.pingMethodLock.RUnlock()
|
||||
|
||||
localENR := s.dv5Listener.Self()
|
||||
log.WithField("ENR", localENR).Info("New node record")
|
||||
|
||||
if s.pingMethod == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, pid := range s.peers.Connected() {
|
||||
go func(id peer.ID) {
|
||||
if err := s.pingMethod(s.ctx, id); err != nil {
|
||||
@@ -462,8 +468,8 @@ func (s *Service) connectWithPeer(ctx context.Context, info peer.AddrInfo) error
|
||||
if info.ID == s.host.ID() {
|
||||
return nil
|
||||
}
|
||||
if s.Peers().IsBad(info.ID) {
|
||||
return errors.New("refused to connect to bad peer")
|
||||
if err := s.Peers().IsBad(info.ID); err != nil {
|
||||
return errors.Wrap(err, "refused to connect to bad peer")
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(ctx, maxDialTimeout)
|
||||
defer cancel()
|
||||
|
||||
@@ -20,7 +20,7 @@ type MockPeerManager struct {
|
||||
}
|
||||
|
||||
// Disconnect .
|
||||
func (_ *MockPeerManager) Disconnect(peer.ID) error {
|
||||
func (*MockPeerManager) Disconnect(peer.ID) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -35,12 +35,12 @@ func (m *MockPeerManager) Host() host.Host {
|
||||
}
|
||||
|
||||
// ENR .
|
||||
func (m MockPeerManager) ENR() *enr.Record {
|
||||
func (m *MockPeerManager) ENR() *enr.Record {
|
||||
return m.Enr
|
||||
}
|
||||
|
||||
// DiscoveryAddresses .
|
||||
func (m MockPeerManager) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
|
||||
func (m *MockPeerManager) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
|
||||
if m.FailDiscoveryAddr {
|
||||
return nil, errors.New("fail")
|
||||
}
|
||||
@@ -48,12 +48,12 @@ func (m MockPeerManager) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
|
||||
}
|
||||
|
||||
// RefreshENR .
|
||||
func (_ MockPeerManager) RefreshENR() {}
|
||||
func (*MockPeerManager) RefreshENR() {}
|
||||
|
||||
// FindPeersWithSubnet .
|
||||
func (_ MockPeerManager) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ int) (bool, error) {
|
||||
func (*MockPeerManager) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ int) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// AddPingMethod .
|
||||
func (_ MockPeerManager) AddPingMethod(_ func(ctx context.Context, id peer.ID) error) {}
|
||||
func (*MockPeerManager) AddPingMethod(_ func(ctx context.Context, id peer.ID) error) {}
|
||||
|
||||
@@ -64,7 +64,7 @@ func (m *MockPeersProvider) Peers() *peers.Status {
|
||||
log.WithError(err).Debug("Cannot decode")
|
||||
}
|
||||
m.peers.Add(createENR(), id0, ma0, network.DirInbound)
|
||||
m.peers.SetConnectionState(id0, peers.PeerConnected)
|
||||
m.peers.SetConnectionState(id0, peers.Connected)
|
||||
m.peers.SetChainState(id0, &pb.Status{FinalizedEpoch: 10})
|
||||
id1, err := peer.Decode(MockRawPeerId1)
|
||||
if err != nil {
|
||||
@@ -75,7 +75,7 @@ func (m *MockPeersProvider) Peers() *peers.Status {
|
||||
log.WithError(err).Debug("Cannot decode")
|
||||
}
|
||||
m.peers.Add(createENR(), id1, ma1, network.DirOutbound)
|
||||
m.peers.SetConnectionState(id1, peers.PeerConnected)
|
||||
m.peers.SetConnectionState(id1, peers.Connected)
|
||||
m.peers.SetChainState(id1, &pb.Status{FinalizedEpoch: 11})
|
||||
}
|
||||
return m.peers
|
||||
|
||||
@@ -239,7 +239,7 @@ func (p *TestP2P) LeaveTopic(topic string) error {
|
||||
}
|
||||
|
||||
// Encoding returns ssz encoding.
|
||||
func (_ *TestP2P) Encoding() encoder.NetworkEncoding {
|
||||
func (*TestP2P) Encoding() encoder.NetworkEncoding {
|
||||
return &encoder.SszNetworkEncoder{}
|
||||
}
|
||||
|
||||
@@ -266,12 +266,12 @@ func (p *TestP2P) Host() host.Host {
|
||||
}
|
||||
|
||||
// ENR returns the enr of the local peer.
|
||||
func (_ *TestP2P) ENR() *enr.Record {
|
||||
func (*TestP2P) ENR() *enr.Record {
|
||||
return new(enr.Record)
|
||||
}
|
||||
|
||||
// DiscoveryAddresses --
|
||||
func (_ *TestP2P) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
|
||||
func (*TestP2P) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -284,16 +284,16 @@ func (p *TestP2P) AddConnectionHandler(f, _ func(ctx context.Context, id peer.ID
|
||||
p.peers.Add(new(enr.Record), conn.RemotePeer(), conn.RemoteMultiaddr(), conn.Stat().Direction)
|
||||
ctx := context.Background()
|
||||
|
||||
p.peers.SetConnectionState(conn.RemotePeer(), peers.PeerConnecting)
|
||||
p.peers.SetConnectionState(conn.RemotePeer(), peers.Connecting)
|
||||
if err := f(ctx, conn.RemotePeer()); err != nil {
|
||||
logrus.WithError(err).Error("Could not send successful hello rpc request")
|
||||
if err := p.Disconnect(conn.RemotePeer()); err != nil {
|
||||
logrus.WithError(err).Errorf("Unable to close peer %s", conn.RemotePeer())
|
||||
}
|
||||
p.peers.SetConnectionState(conn.RemotePeer(), peers.PeerDisconnected)
|
||||
p.peers.SetConnectionState(conn.RemotePeer(), peers.Disconnected)
|
||||
return
|
||||
}
|
||||
p.peers.SetConnectionState(conn.RemotePeer(), peers.PeerConnected)
|
||||
p.peers.SetConnectionState(conn.RemotePeer(), peers.Connected)
|
||||
}()
|
||||
},
|
||||
})
|
||||
@@ -305,11 +305,11 @@ func (p *TestP2P) AddDisconnectionHandler(f func(ctx context.Context, id peer.ID
|
||||
DisconnectedF: func(net network.Network, conn network.Conn) {
|
||||
// Must be handled in a goroutine as this callback cannot be blocking.
|
||||
go func() {
|
||||
p.peers.SetConnectionState(conn.RemotePeer(), peers.PeerDisconnecting)
|
||||
p.peers.SetConnectionState(conn.RemotePeer(), peers.Disconnecting)
|
||||
if err := f(context.Background(), conn.RemotePeer()); err != nil {
|
||||
logrus.WithError(err).Debug("Unable to invoke callback")
|
||||
}
|
||||
p.peers.SetConnectionState(conn.RemotePeer(), peers.PeerDisconnected)
|
||||
p.peers.SetConnectionState(conn.RemotePeer(), peers.Disconnected)
|
||||
}()
|
||||
},
|
||||
})
|
||||
@@ -353,7 +353,7 @@ func (p *TestP2P) Send(ctx context.Context, msg interface{}, topic string, pid p
|
||||
}
|
||||
|
||||
// Started always returns true.
|
||||
func (_ *TestP2P) Started() bool {
|
||||
func (*TestP2P) Started() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -363,12 +363,12 @@ func (p *TestP2P) Peers() *peers.Status {
|
||||
}
|
||||
|
||||
// FindPeersWithSubnet mocks the p2p func.
|
||||
func (_ *TestP2P) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ int) (bool, error) {
|
||||
func (*TestP2P) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ int) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// RefreshENR mocks the p2p func.
|
||||
func (_ *TestP2P) RefreshENR() {}
|
||||
func (*TestP2P) RefreshENR() {}
|
||||
|
||||
// ForkDigest mocks the p2p func.
|
||||
func (p *TestP2P) ForkDigest() ([4]byte, error) {
|
||||
@@ -386,31 +386,31 @@ func (p *TestP2P) MetadataSeq() uint64 {
|
||||
}
|
||||
|
||||
// AddPingMethod mocks the p2p func.
|
||||
func (_ *TestP2P) AddPingMethod(_ func(ctx context.Context, id peer.ID) error) {
|
||||
func (*TestP2P) AddPingMethod(_ func(ctx context.Context, id peer.ID) error) {
|
||||
// no-op
|
||||
}
|
||||
|
||||
// InterceptPeerDial .
|
||||
func (_ *TestP2P) InterceptPeerDial(peer.ID) (allow bool) {
|
||||
func (*TestP2P) InterceptPeerDial(peer.ID) (allow bool) {
|
||||
return true
|
||||
}
|
||||
|
||||
// InterceptAddrDial .
|
||||
func (_ *TestP2P) InterceptAddrDial(peer.ID, multiaddr.Multiaddr) (allow bool) {
|
||||
func (*TestP2P) InterceptAddrDial(peer.ID, multiaddr.Multiaddr) (allow bool) {
|
||||
return true
|
||||
}
|
||||
|
||||
// InterceptAccept .
|
||||
func (_ *TestP2P) InterceptAccept(_ network.ConnMultiaddrs) (allow bool) {
|
||||
func (*TestP2P) InterceptAccept(_ network.ConnMultiaddrs) (allow bool) {
|
||||
return true
|
||||
}
|
||||
|
||||
// InterceptSecured .
|
||||
func (_ *TestP2P) InterceptSecured(network.Direction, peer.ID, network.ConnMultiaddrs) (allow bool) {
|
||||
func (*TestP2P) InterceptSecured(network.Direction, peer.ID, network.ConnMultiaddrs) (allow bool) {
|
||||
return true
|
||||
}
|
||||
|
||||
// InterceptUpgraded .
|
||||
func (_ *TestP2P) InterceptUpgraded(network.Conn) (allow bool, reason control.DisconnectReason) {
|
||||
func (*TestP2P) InterceptUpgraded(network.Conn) (allow bool, reason control.DisconnectReason) {
|
||||
return true, 0
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ go_library(
|
||||
srcs = [
|
||||
"endpoints.go",
|
||||
"log.go",
|
||||
"metrics.go",
|
||||
"service.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc",
|
||||
|
||||
@@ -34,18 +34,41 @@ type endpoint struct {
|
||||
methods []string
|
||||
}
|
||||
|
||||
// responseWriter is the wrapper to http Response writer.
|
||||
type responseWriter struct {
|
||||
http.ResponseWriter
|
||||
statusCode int
|
||||
}
|
||||
|
||||
// WriteHeader wraps the WriteHeader method of the underlying http.ResponseWriter to capture the status code.
|
||||
// Refer for WriteHeader doc: https://pkg.go.dev/net/http@go1.23.3#ResponseWriter.
|
||||
func (w *responseWriter) WriteHeader(statusCode int) {
|
||||
w.statusCode = statusCode
|
||||
w.ResponseWriter.WriteHeader(statusCode)
|
||||
}
|
||||
|
||||
func (e *endpoint) handlerWithMiddleware() http.HandlerFunc {
|
||||
handler := http.Handler(e.handler)
|
||||
for _, m := range e.middleware {
|
||||
handler = m(handler)
|
||||
}
|
||||
return promhttp.InstrumentHandlerDuration(
|
||||
|
||||
handler = promhttp.InstrumentHandlerDuration(
|
||||
httpRequestLatency.MustCurryWith(prometheus.Labels{"endpoint": e.name}),
|
||||
promhttp.InstrumentHandlerCounter(
|
||||
httpRequestCount.MustCurryWith(prometheus.Labels{"endpoint": e.name}),
|
||||
handler,
|
||||
),
|
||||
)
|
||||
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
rw := &responseWriter{ResponseWriter: w, statusCode: http.StatusOK}
|
||||
handler.ServeHTTP(rw, r)
|
||||
|
||||
if rw.statusCode >= 400 {
|
||||
httpErrorCount.WithLabelValues(r.URL.Path, http.StatusText(rw.statusCode), r.Method).Inc()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) endpoints(
|
||||
|
||||
@@ -6,6 +6,7 @@ go_library(
|
||||
"handlers.go",
|
||||
"handlers_pool.go",
|
||||
"handlers_state.go",
|
||||
"handlers_validation.go",
|
||||
"handlers_validator.go",
|
||||
"log.go",
|
||||
"server.go",
|
||||
@@ -58,6 +59,7 @@ go_library(
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//crypto/kzg4844:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
|
||||
@@ -17,7 +17,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositsnapshot"
|
||||
corehelpers "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filters"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/shared"
|
||||
@@ -1029,63 +1028,6 @@ func unmarshalStrict(data []byte, v interface{}) error {
|
||||
return dec.Decode(v)
|
||||
}
|
||||
|
||||
func (s *Server) validateBroadcast(ctx context.Context, r *http.Request, blk *eth.GenericSignedBeaconBlock) error {
|
||||
switch r.URL.Query().Get(broadcastValidationQueryParam) {
|
||||
case broadcastValidationConsensus:
|
||||
b, err := blocks.NewSignedBeaconBlock(blk.Block)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not create signed beacon block")
|
||||
}
|
||||
if err = s.validateConsensus(ctx, b); err != nil {
|
||||
return errors.Wrap(err, "consensus validation failed")
|
||||
}
|
||||
case broadcastValidationConsensusAndEquivocation:
|
||||
b, err := blocks.NewSignedBeaconBlock(blk.Block)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not create signed beacon block")
|
||||
}
|
||||
if err = s.validateConsensus(r.Context(), b); err != nil {
|
||||
return errors.Wrap(err, "consensus validation failed")
|
||||
}
|
||||
if err = s.validateEquivocation(b.Block()); err != nil {
|
||||
return errors.Wrap(err, "equivocation validation failed")
|
||||
}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) validateConsensus(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
parentBlockRoot := blk.Block().ParentRoot()
|
||||
parentBlock, err := s.Blocker.Block(ctx, parentBlockRoot[:])
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get parent block")
|
||||
}
|
||||
|
||||
if err := blocks.BeaconBlockIsNil(blk); err != nil {
|
||||
return errors.Wrap(err, "could not validate block")
|
||||
}
|
||||
|
||||
parentStateRoot := parentBlock.Block().StateRoot()
|
||||
parentState, err := s.Stater.State(ctx, parentStateRoot[:])
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get parent state")
|
||||
}
|
||||
_, err = transition.ExecuteStateTransition(ctx, parentState, blk)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not execute state transition")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) validateEquivocation(blk interfaces.ReadOnlyBeaconBlock) error {
|
||||
if s.ForkchoiceFetcher.HighestReceivedBlockSlot() == blk.Slot() {
|
||||
return errors.Wrapf(errEquivocatedBlock, "block for slot %d already exists in fork choice", blk.Slot())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetBlockRoot retrieves the root of a block.
|
||||
func (s *Server) GetBlockRoot(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "beacon.GetBlockRoot")
|
||||
|
||||
@@ -87,7 +87,7 @@ func (s *Server) ListAttestations(w http.ResponseWriter, r *http.Request) {
|
||||
// ListAttestationsV2 retrieves attestations known by the node but
|
||||
// not necessarily incorporated into any block. Allows filtering by committee index or slot.
|
||||
func (s *Server) ListAttestationsV2(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "beacon.ListAttestationsV2")
|
||||
_, span := trace.StartSpan(r.Context(), "beacon.ListAttestationsV2")
|
||||
defer span.End()
|
||||
|
||||
rawSlot, slot, ok := shared.UintFromQuery(w, r, "slot", false)
|
||||
@@ -98,13 +98,10 @@ func (s *Server) ListAttestationsV2(w http.ResponseWriter, r *http.Request) {
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
headState, err := s.ChainInfoFetcher.HeadStateReadOnly(ctx)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get head state: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
v := slots.ToForkVersion(primitives.Slot(slot))
|
||||
if rawSlot == "" {
|
||||
v = slots.ToForkVersion(s.TimeFetcher.CurrentSlot())
|
||||
}
|
||||
|
||||
attestations := s.AttestationsPool.AggregatedAttestations()
|
||||
unaggAtts, err := s.AttestationsPool.UnaggregatedAttestations()
|
||||
if err != nil {
|
||||
@@ -116,7 +113,7 @@ func (s *Server) ListAttestationsV2(w http.ResponseWriter, r *http.Request) {
|
||||
filteredAtts := make([]interface{}, 0, len(attestations))
|
||||
for _, att := range attestations {
|
||||
var includeAttestation bool
|
||||
if headState.Version() >= version.Electra {
|
||||
if v >= version.Electra && att.Version() >= version.Electra {
|
||||
attElectra, ok := att.(*eth.AttestationElectra)
|
||||
if !ok {
|
||||
httputil.HandleError(w, fmt.Sprintf("Unable to convert attestation of type %T", att), http.StatusInternalServerError)
|
||||
@@ -128,7 +125,7 @@ func (s *Server) ListAttestationsV2(w http.ResponseWriter, r *http.Request) {
|
||||
attStruct := structs.AttElectraFromConsensus(attElectra)
|
||||
filteredAtts = append(filteredAtts, attStruct)
|
||||
}
|
||||
} else {
|
||||
} else if v < version.Electra && att.Version() < version.Electra {
|
||||
attOld, ok := att.(*eth.Attestation)
|
||||
if !ok {
|
||||
httputil.HandleError(w, fmt.Sprintf("Unable to convert attestation of type %T", att), http.StatusInternalServerError)
|
||||
@@ -149,9 +146,9 @@ func (s *Server) ListAttestationsV2(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set(api.VersionHeader, version.String(headState.Version()))
|
||||
w.Header().Set(api.VersionHeader, version.String(v))
|
||||
httputil.WriteJson(w, &structs.ListAttestationsResponse{
|
||||
Version: version.String(headState.Version()),
|
||||
Version: version.String(v),
|
||||
Data: attsData,
|
||||
})
|
||||
}
|
||||
@@ -726,31 +723,33 @@ func (s *Server) GetAttesterSlashingsV2(w http.ResponseWriter, r *http.Request)
|
||||
ctx, span := trace.StartSpan(r.Context(), "beacon.GetAttesterSlashingsV2")
|
||||
defer span.End()
|
||||
|
||||
v := slots.ToForkVersion(s.TimeFetcher.CurrentSlot())
|
||||
headState, err := s.ChainInfoFetcher.HeadStateReadOnly(ctx)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get head state: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
var attStructs []interface{}
|
||||
sourceSlashings := s.SlashingsPool.PendingAttesterSlashings(ctx, headState, true /* return unlimited slashings */)
|
||||
|
||||
for _, slashing := range sourceSlashings {
|
||||
var attStruct interface{}
|
||||
if headState.Version() >= version.Electra {
|
||||
if v >= version.Electra && slashing.Version() >= version.Electra {
|
||||
a, ok := slashing.(*eth.AttesterSlashingElectra)
|
||||
if !ok {
|
||||
httputil.HandleError(w, fmt.Sprintf("Unable to convert slashing of type %T to an Electra slashing", slashing), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
attStruct = structs.AttesterSlashingElectraFromConsensus(a)
|
||||
} else {
|
||||
} else if v < version.Electra && slashing.Version() < version.Electra {
|
||||
a, ok := slashing.(*eth.AttesterSlashing)
|
||||
if !ok {
|
||||
httputil.HandleError(w, fmt.Sprintf("Unable to convert slashing of type %T to a Phase0 slashing", slashing), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
attStruct = structs.AttesterSlashingFromConsensus(a)
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
attStructs = append(attStructs, attStruct)
|
||||
}
|
||||
@@ -762,10 +761,10 @@ func (s *Server) GetAttesterSlashingsV2(w http.ResponseWriter, r *http.Request)
|
||||
}
|
||||
|
||||
resp := &structs.GetAttesterSlashingsResponse{
|
||||
Version: version.String(headState.Version()),
|
||||
Version: version.String(v),
|
||||
Data: attBytes,
|
||||
}
|
||||
w.Header().Set(api.VersionHeader, version.String(headState.Version()))
|
||||
w.Header().Set(api.VersionHeader, version.String(v))
|
||||
httputil.WriteJson(w, resp)
|
||||
}
|
||||
|
||||
|
||||
@@ -115,9 +115,16 @@ func TestListAttestations(t *testing.T) {
|
||||
Signature: bytesutil.PadTo([]byte("signature4"), 96),
|
||||
}
|
||||
t.Run("V1", func(t *testing.T) {
|
||||
bs, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
|
||||
chainService := &blockchainmock.ChainService{State: bs}
|
||||
s := &Server{
|
||||
ChainInfoFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
AttestationsPool: attestations.NewPool(),
|
||||
}
|
||||
|
||||
require.NoError(t, s.AttestationsPool.SaveAggregatedAttestations([]ethpbv1alpha1.Att{att1, att2}))
|
||||
require.NoError(t, s.AttestationsPool.SaveUnaggregatedAttestations([]ethpbv1alpha1.Att{att3, att4}))
|
||||
|
||||
@@ -204,10 +211,19 @@ func TestListAttestations(t *testing.T) {
|
||||
t.Run("Pre-Electra", func(t *testing.T) {
|
||||
bs, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
|
||||
chainService := &blockchainmock.ChainService{State: bs}
|
||||
s := &Server{
|
||||
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
|
||||
ChainInfoFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
AttestationsPool: attestations.NewPool(),
|
||||
}
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.DenebForkEpoch = 0
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
require.NoError(t, s.AttestationsPool.SaveAggregatedAttestations([]ethpbv1alpha1.Att{att1, att2}))
|
||||
require.NoError(t, s.AttestationsPool.SaveUnaggregatedAttestations([]ethpbv1alpha1.Att{att3, att4}))
|
||||
t.Run("empty request", func(t *testing.T) {
|
||||
@@ -226,7 +242,7 @@ func TestListAttestations(t *testing.T) {
|
||||
var atts []*structs.Attestation
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &atts))
|
||||
assert.Equal(t, 4, len(atts))
|
||||
assert.Equal(t, "phase0", resp.Version)
|
||||
assert.Equal(t, "deneb", resp.Version)
|
||||
})
|
||||
t.Run("slot request", func(t *testing.T) {
|
||||
url := "http://example.com?slot=2"
|
||||
@@ -244,7 +260,7 @@ func TestListAttestations(t *testing.T) {
|
||||
var atts []*structs.Attestation
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &atts))
|
||||
assert.Equal(t, 2, len(atts))
|
||||
assert.Equal(t, "phase0", resp.Version)
|
||||
assert.Equal(t, "deneb", resp.Version)
|
||||
for _, a := range atts {
|
||||
assert.Equal(t, "2", a.Data.Slot)
|
||||
}
|
||||
@@ -265,7 +281,7 @@ func TestListAttestations(t *testing.T) {
|
||||
var atts []*structs.Attestation
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &atts))
|
||||
assert.Equal(t, 2, len(atts))
|
||||
assert.Equal(t, "phase0", resp.Version)
|
||||
assert.Equal(t, "deneb", resp.Version)
|
||||
for _, a := range atts {
|
||||
assert.Equal(t, "4", a.Data.CommitteeIndex)
|
||||
}
|
||||
@@ -286,7 +302,7 @@ func TestListAttestations(t *testing.T) {
|
||||
var atts []*structs.Attestation
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &atts))
|
||||
assert.Equal(t, 1, len(atts))
|
||||
assert.Equal(t, "phase0", resp.Version)
|
||||
assert.Equal(t, "deneb", resp.Version)
|
||||
for _, a := range atts {
|
||||
assert.Equal(t, "2", a.Data.Slot)
|
||||
assert.Equal(t, "4", a.Data.CommitteeIndex)
|
||||
@@ -370,12 +386,21 @@ func TestListAttestations(t *testing.T) {
|
||||
}
|
||||
bs, err := util.NewBeaconStateElectra()
|
||||
require.NoError(t, err)
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.ElectraForkEpoch = 0
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
chainService := &blockchainmock.ChainService{State: bs}
|
||||
s := &Server{
|
||||
AttestationsPool: attestations.NewPool(),
|
||||
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
|
||||
ChainInfoFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
}
|
||||
require.NoError(t, s.AttestationsPool.SaveAggregatedAttestations([]ethpbv1alpha1.Att{attElectra1, attElectra2}))
|
||||
require.NoError(t, s.AttestationsPool.SaveUnaggregatedAttestations([]ethpbv1alpha1.Att{attElectra3, attElectra4}))
|
||||
// Added one pre electra attestation to ensure it is ignored.
|
||||
require.NoError(t, s.AttestationsPool.SaveAggregatedAttestations([]ethpbv1alpha1.Att{attElectra1, attElectra2, att1}))
|
||||
require.NoError(t, s.AttestationsPool.SaveUnaggregatedAttestations([]ethpbv1alpha1.Att{attElectra3, attElectra4, att3}))
|
||||
|
||||
t.Run("empty request", func(t *testing.T) {
|
||||
url := "http://example.com"
|
||||
@@ -1658,12 +1683,59 @@ func TestGetAttesterSlashings(t *testing.T) {
|
||||
})
|
||||
})
|
||||
t.Run("V2", func(t *testing.T) {
|
||||
t.Run("post-electra-ok-1-pre-slashing", func(t *testing.T) {
|
||||
bs, err := util.NewBeaconStateElectra()
|
||||
require.NoError(t, err)
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.ElectraForkEpoch = 100
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
chainService := &blockchainmock.ChainService{State: bs}
|
||||
|
||||
s := &Server{
|
||||
ChainInfoFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
SlashingsPool: &slashingsmock.PoolMock{PendingAttSlashings: []ethpbv1alpha1.AttSlashing{slashing1PostElectra, slashing2PostElectra, slashing1PreElectra}},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v2/beacon/pool/attester_slashings", nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetAttesterSlashingsV2(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.GetAttesterSlashingsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.NotNil(t, resp)
|
||||
require.NotNil(t, resp.Data)
|
||||
assert.Equal(t, "electra", resp.Version)
|
||||
|
||||
// Unmarshal resp.Data into a slice of slashings
|
||||
var slashings []*structs.AttesterSlashingElectra
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &slashings))
|
||||
|
||||
ss, err := structs.AttesterSlashingsElectraToConsensus(slashings)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepEqual(t, slashing1PostElectra, ss[0])
|
||||
require.DeepEqual(t, slashing2PostElectra, ss[1])
|
||||
})
|
||||
t.Run("post-electra-ok", func(t *testing.T) {
|
||||
bs, err := util.NewBeaconStateElectra()
|
||||
require.NoError(t, err)
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.ElectraForkEpoch = 100
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
chainService := &blockchainmock.ChainService{State: bs}
|
||||
|
||||
s := &Server{
|
||||
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
|
||||
ChainInfoFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
SlashingsPool: &slashingsmock.PoolMock{PendingAttSlashings: []ethpbv1alpha1.AttSlashing{slashing1PostElectra, slashing2PostElectra}},
|
||||
}
|
||||
|
||||
@@ -1692,9 +1764,11 @@ func TestGetAttesterSlashings(t *testing.T) {
|
||||
t.Run("pre-electra-ok", func(t *testing.T) {
|
||||
bs, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
chainService := &blockchainmock.ChainService{State: bs}
|
||||
|
||||
s := &Server{
|
||||
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
|
||||
ChainInfoFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
SlashingsPool: &slashingsmock.PoolMock{PendingAttSlashings: []ethpbv1alpha1.AttSlashing{slashing1PreElectra, slashing2PreElectra}},
|
||||
}
|
||||
|
||||
@@ -1722,8 +1796,15 @@ func TestGetAttesterSlashings(t *testing.T) {
|
||||
bs, err := util.NewBeaconStateElectra()
|
||||
require.NoError(t, err)
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.ElectraForkEpoch = 100
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
chainService := &blockchainmock.ChainService{State: bs}
|
||||
s := &Server{
|
||||
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
|
||||
ChainInfoFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
SlashingsPool: &slashingsmock.PoolMock{PendingAttSlashings: []ethpbv1alpha1.AttSlashing{}},
|
||||
}
|
||||
|
||||
|
||||
@@ -19,10 +19,8 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
|
||||
chainMock "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositsnapshot"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db"
|
||||
dbTest "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
mockp2p "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/testing"
|
||||
rpctesting "github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/shared/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/lookup"
|
||||
@@ -2910,69 +2908,6 @@ func TestPublishBlindedBlockV2SSZ(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestValidateConsensus(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
parentState, privs := util.DeterministicGenesisState(t, params.MinimalSpecConfig().MinGenesisActiveValidatorCount)
|
||||
parentBlock, err := util.GenerateFullBlock(parentState, privs, util.DefaultBlockGenConfig(), parentState.Slot())
|
||||
require.NoError(t, err)
|
||||
parentSbb, err := blocks.NewSignedBeaconBlock(parentBlock)
|
||||
require.NoError(t, err)
|
||||
st, err := transition.ExecuteStateTransition(ctx, parentState, parentSbb)
|
||||
require.NoError(t, err)
|
||||
block, err := util.GenerateFullBlock(st, privs, util.DefaultBlockGenConfig(), st.Slot())
|
||||
require.NoError(t, err)
|
||||
sbb, err := blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
parentRoot, err := parentSbb.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
server := &Server{
|
||||
Blocker: &testutil.MockBlocker{RootBlockMap: map[[32]byte]interfaces.ReadOnlySignedBeaconBlock{parentRoot: parentSbb}},
|
||||
Stater: &testutil.MockStater{StatesByRoot: map[[32]byte]state.BeaconState{bytesutil.ToBytes32(parentBlock.Block.StateRoot): parentState}},
|
||||
}
|
||||
|
||||
require.NoError(t, server.validateConsensus(ctx, sbb))
|
||||
}
|
||||
|
||||
func TestValidateEquivocation(t *testing.T) {
|
||||
t.Run("ok", func(t *testing.T) {
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetSlot(10))
|
||||
blk, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
roblock, err := blocks.NewROBlockWithRoot(blk, bytesutil.ToBytes32([]byte("root")))
|
||||
require.NoError(t, err)
|
||||
fc := doublylinkedtree.New()
|
||||
require.NoError(t, fc.InsertNode(context.Background(), st, roblock))
|
||||
server := &Server{
|
||||
ForkchoiceFetcher: &chainMock.ChainService{ForkChoiceStore: fc},
|
||||
}
|
||||
blk.SetSlot(st.Slot() + 1)
|
||||
|
||||
require.NoError(t, server.validateEquivocation(blk.Block()))
|
||||
})
|
||||
t.Run("block already exists", func(t *testing.T) {
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetSlot(10))
|
||||
blk, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
blk.SetSlot(st.Slot())
|
||||
roblock, err := blocks.NewROBlockWithRoot(blk, bytesutil.ToBytes32([]byte("root")))
|
||||
require.NoError(t, err)
|
||||
|
||||
fc := doublylinkedtree.New()
|
||||
require.NoError(t, fc.InsertNode(context.Background(), st, roblock))
|
||||
server := &Server{
|
||||
ForkchoiceFetcher: &chainMock.ChainService{ForkChoiceStore: fc},
|
||||
}
|
||||
err = server.validateEquivocation(blk.Block())
|
||||
assert.ErrorContains(t, "already exists", err)
|
||||
require.ErrorIs(t, err, errEquivocatedBlock)
|
||||
})
|
||||
}
|
||||
|
||||
func TestServer_GetBlockRoot(t *testing.T) {
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
109
beacon-chain/rpc/eth/beacon/handlers_validation.go
Normal file
109
beacon-chain/rpc/eth/beacon/handlers_validation.go
Normal file
@@ -0,0 +1,109 @@
|
||||
package beacon
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
|
||||
"github.com/ethereum/go-ethereum/crypto/kzg4844"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
)
|
||||
|
||||
func (s *Server) validateBroadcast(ctx context.Context, r *http.Request, blk *eth.GenericSignedBeaconBlock) error {
|
||||
switch r.URL.Query().Get(broadcastValidationQueryParam) {
|
||||
case broadcastValidationConsensus:
|
||||
if err := s.validateConsensus(ctx, blk); err != nil {
|
||||
return errors.Wrap(err, "consensus validation failed")
|
||||
}
|
||||
case broadcastValidationConsensusAndEquivocation:
|
||||
b, err := blocks.NewSignedBeaconBlock(blk.Block)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not create signed beacon block")
|
||||
}
|
||||
if err = s.validateConsensus(r.Context(), blk); err != nil {
|
||||
return errors.Wrap(err, "consensus validation failed")
|
||||
}
|
||||
if err = s.validateEquivocation(b.Block()); err != nil {
|
||||
return errors.Wrap(err, "equivocation validation failed")
|
||||
}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) validateBlobSidecars(blk interfaces.SignedBeaconBlock, blobs [][]byte, proofs [][]byte) error {
|
||||
if blk.Version() < version.Deneb {
|
||||
return nil
|
||||
}
|
||||
kzgs, err := blk.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get blob kzg commitments")
|
||||
}
|
||||
if len(blobs) != len(proofs) || len(blobs) != len(kzgs) {
|
||||
return errors.New("number of blobs, proofs, and commitments do not match")
|
||||
}
|
||||
for i, blob := range blobs {
|
||||
if err := kzg4844.VerifyBlobProof(kzg4844.Blob(blob), kzg4844.Commitment(kzgs[i]), kzg4844.Proof(proofs[i])); err != nil {
|
||||
return errors.Wrap(err, "could not verify blob proof")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) validateConsensus(ctx context.Context, b *eth.GenericSignedBeaconBlock) error {
|
||||
blk, err := blocks.NewSignedBeaconBlock(b.Block)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not create signed beacon block")
|
||||
}
|
||||
|
||||
parentBlockRoot := blk.Block().ParentRoot()
|
||||
parentBlock, err := s.Blocker.Block(ctx, parentBlockRoot[:])
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get parent block")
|
||||
}
|
||||
|
||||
if err := blocks.BeaconBlockIsNil(blk); err != nil {
|
||||
return errors.Wrap(err, "could not validate block")
|
||||
}
|
||||
|
||||
parentStateRoot := parentBlock.Block().StateRoot()
|
||||
parentState, err := s.Stater.State(ctx, parentStateRoot[:])
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get parent state")
|
||||
}
|
||||
_, err = transition.ExecuteStateTransition(ctx, parentState, blk)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not execute state transition")
|
||||
}
|
||||
|
||||
var blobs [][]byte
|
||||
var proofs [][]byte
|
||||
switch {
|
||||
case blk.Version() == version.Electra:
|
||||
blobs = b.GetElectra().Blobs
|
||||
proofs = b.GetElectra().KzgProofs
|
||||
case blk.Version() == version.Deneb:
|
||||
blobs = b.GetDeneb().Blobs
|
||||
proofs = b.GetDeneb().KzgProofs
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := s.validateBlobSidecars(blk, blobs, proofs); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) validateEquivocation(blk interfaces.ReadOnlyBeaconBlock) error {
|
||||
if s.ForkchoiceFetcher.HighestReceivedBlockSlot() == blk.Slot() {
|
||||
return errors.Wrapf(errEquivocatedBlock, "block for slot %d already exists in fork choice", blk.Slot())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
85
beacon-chain/rpc/eth/beacon/handlers_validation_test.go
Normal file
85
beacon-chain/rpc/eth/beacon/handlers_validation_test.go
Normal file
@@ -0,0 +1,85 @@
|
||||
package beacon
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
chainMock "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/testutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
)
|
||||
|
||||
func TestValidateConsensus(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
parentState, privs := util.DeterministicGenesisState(t, params.MinimalSpecConfig().MinGenesisActiveValidatorCount)
|
||||
parentBlock, err := util.GenerateFullBlock(parentState, privs, util.DefaultBlockGenConfig(), parentState.Slot())
|
||||
require.NoError(t, err)
|
||||
parentSbb, err := blocks.NewSignedBeaconBlock(parentBlock)
|
||||
require.NoError(t, err)
|
||||
st, err := transition.ExecuteStateTransition(ctx, parentState, parentSbb)
|
||||
require.NoError(t, err)
|
||||
block, err := util.GenerateFullBlock(st, privs, util.DefaultBlockGenConfig(), st.Slot())
|
||||
require.NoError(t, err)
|
||||
parentRoot, err := parentSbb.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
server := &Server{
|
||||
Blocker: &testutil.MockBlocker{RootBlockMap: map[[32]byte]interfaces.ReadOnlySignedBeaconBlock{parentRoot: parentSbb}},
|
||||
Stater: &testutil.MockStater{StatesByRoot: map[[32]byte]state.BeaconState{bytesutil.ToBytes32(parentBlock.Block.StateRoot): parentState}},
|
||||
}
|
||||
|
||||
require.NoError(t, server.validateConsensus(ctx, ð.GenericSignedBeaconBlock{
|
||||
Block: ð.GenericSignedBeaconBlock_Phase0{
|
||||
Phase0: block,
|
||||
},
|
||||
}))
|
||||
}
|
||||
|
||||
func TestValidateEquivocation(t *testing.T) {
|
||||
t.Run("ok", func(t *testing.T) {
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetSlot(10))
|
||||
blk, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
roblock, err := blocks.NewROBlockWithRoot(blk, bytesutil.ToBytes32([]byte("root")))
|
||||
require.NoError(t, err)
|
||||
fc := doublylinkedtree.New()
|
||||
require.NoError(t, fc.InsertNode(context.Background(), st, roblock))
|
||||
server := &Server{
|
||||
ForkchoiceFetcher: &chainMock.ChainService{ForkChoiceStore: fc},
|
||||
}
|
||||
blk.SetSlot(st.Slot() + 1)
|
||||
|
||||
require.NoError(t, server.validateEquivocation(blk.Block()))
|
||||
})
|
||||
t.Run("block already exists", func(t *testing.T) {
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetSlot(10))
|
||||
blk, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
blk.SetSlot(st.Slot())
|
||||
roblock, err := blocks.NewROBlockWithRoot(blk, bytesutil.ToBytes32([]byte("root")))
|
||||
require.NoError(t, err)
|
||||
|
||||
fc := doublylinkedtree.New()
|
||||
require.NoError(t, fc.InsertNode(context.Background(), st, roblock))
|
||||
server := &Server{
|
||||
ForkchoiceFetcher: &chainMock.ChainService{ForkChoiceStore: fc},
|
||||
}
|
||||
err = server.validateEquivocation(blk.Block())
|
||||
assert.ErrorContains(t, "already exists", err)
|
||||
require.ErrorIs(t, err, errEquivocatedBlock)
|
||||
})
|
||||
}
|
||||
@@ -117,13 +117,13 @@ func TestGetPeers(t *testing.T) {
|
||||
|
||||
switch i {
|
||||
case 0, 1:
|
||||
peerStatus.SetConnectionState(id, peers.PeerConnecting)
|
||||
peerStatus.SetConnectionState(id, peers.Connecting)
|
||||
case 2, 3:
|
||||
peerStatus.SetConnectionState(id, peers.PeerConnected)
|
||||
peerStatus.SetConnectionState(id, peers.Connected)
|
||||
case 4, 5:
|
||||
peerStatus.SetConnectionState(id, peers.PeerDisconnecting)
|
||||
peerStatus.SetConnectionState(id, peers.Disconnecting)
|
||||
case 6, 7:
|
||||
peerStatus.SetConnectionState(id, peers.PeerDisconnected)
|
||||
peerStatus.SetConnectionState(id, peers.Disconnected)
|
||||
default:
|
||||
t.Fatalf("Failed to set connection state for peer")
|
||||
}
|
||||
@@ -289,13 +289,13 @@ func TestGetPeerCount(t *testing.T) {
|
||||
|
||||
switch i {
|
||||
case 0:
|
||||
peerStatus.SetConnectionState(id, peers.PeerConnecting)
|
||||
peerStatus.SetConnectionState(id, peers.Connecting)
|
||||
case 1, 2:
|
||||
peerStatus.SetConnectionState(id, peers.PeerConnected)
|
||||
peerStatus.SetConnectionState(id, peers.Connected)
|
||||
case 3, 4, 5:
|
||||
peerStatus.SetConnectionState(id, peers.PeerDisconnecting)
|
||||
peerStatus.SetConnectionState(id, peers.Disconnecting)
|
||||
case 6, 7, 8, 9:
|
||||
peerStatus.SetConnectionState(id, peers.PeerDisconnected)
|
||||
peerStatus.SetConnectionState(id, peers.Disconnected)
|
||||
default:
|
||||
t.Fatalf("Failed to set connection state for peer")
|
||||
}
|
||||
|
||||
@@ -75,7 +75,7 @@ func (s *Server) GetAggregateAttestation(w http.ResponseWriter, r *http.Request)
|
||||
|
||||
// GetAggregateAttestationV2 aggregates all attestations matching the given attestation data root and slot, returning the aggregated result.
|
||||
func (s *Server) GetAggregateAttestationV2(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "validator.GetAggregateAttestationV2")
|
||||
_, span := trace.StartSpan(r.Context(), "validator.GetAggregateAttestationV2")
|
||||
defer span.End()
|
||||
|
||||
_, attDataRoot, ok := shared.HexFromQuery(w, r, "attestation_data_root", fieldparams.RootLength, true)
|
||||
@@ -91,14 +91,15 @@ func (s *Server) GetAggregateAttestationV2(w http.ResponseWriter, r *http.Reques
|
||||
return
|
||||
}
|
||||
|
||||
v := slots.ToForkVersion(primitives.Slot(slot))
|
||||
agg := s.aggregatedAttestation(w, primitives.Slot(slot), attDataRoot, primitives.CommitteeIndex(index))
|
||||
if agg == nil {
|
||||
return
|
||||
}
|
||||
resp := &structs.AggregateAttestationResponse{
|
||||
Version: version.String(agg.Version()),
|
||||
Version: version.String(v),
|
||||
}
|
||||
if agg.Version() >= version.Electra {
|
||||
if v >= version.Electra {
|
||||
typedAgg, ok := agg.(*ethpbalpha.AttestationElectra)
|
||||
if !ok {
|
||||
httputil.HandleError(w, fmt.Sprintf("Attestation is not of type %T", ðpbalpha.AttestationElectra{}), http.StatusInternalServerError)
|
||||
@@ -123,12 +124,7 @@ func (s *Server) GetAggregateAttestationV2(w http.ResponseWriter, r *http.Reques
|
||||
}
|
||||
resp.Data = data
|
||||
}
|
||||
headState, err := s.ChainInfoFetcher.HeadStateReadOnly(ctx)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get head state: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
w.Header().Set(api.VersionHeader, version.String(headState.Version()))
|
||||
w.Header().Set(api.VersionHeader, version.String(v))
|
||||
httputil.WriteJson(w, resp)
|
||||
}
|
||||
|
||||
@@ -179,27 +175,37 @@ func matchingAtts(atts []ethpbalpha.Att, slot primitives.Slot, attDataRoot []byt
|
||||
return []ethpbalpha.Att{}, nil
|
||||
}
|
||||
|
||||
postElectra := atts[0].Version() >= version.Electra
|
||||
|
||||
postElectra := slots.ToForkVersion(slot) >= version.Electra
|
||||
result := make([]ethpbalpha.Att, 0)
|
||||
for _, att := range atts {
|
||||
if att.GetData().Slot != slot {
|
||||
continue
|
||||
}
|
||||
|
||||
// We ignore the committee index from the request before Electra.
|
||||
// This is because before Electra the committee index is part of the attestation data,
|
||||
// meaning that comparing the data root is sufficient.
|
||||
// Post-Electra the committee index in the data root is always 0, so we need to
|
||||
// compare the committee index separately.
|
||||
if postElectra {
|
||||
ci, err := att.GetCommitteeIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if att.Version() >= version.Electra {
|
||||
ci, err := att.GetCommitteeIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ci != index {
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
if ci != index {
|
||||
} else {
|
||||
// If postElectra is false and att.Version >= version.Electra, ignore the attestation.
|
||||
if att.Version() >= version.Electra {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
root, err := att.GetData().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get attestation data root")
|
||||
|
||||
@@ -55,15 +55,33 @@ func TestGetAggregateAttestation(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
sig := key.Sign([]byte("sig"))
|
||||
|
||||
t.Run("V1", func(t *testing.T) {
|
||||
createAttestation := func(slot primitives.Slot, aggregationBits bitfield.Bitlist, root []byte) *ethpbalpha.Attestation {
|
||||
return ðpbalpha.Attestation{
|
||||
AggregationBits: aggregationBits,
|
||||
Data: createAttestationData(slot, 1, 1, root),
|
||||
Signature: sig.Marshal(),
|
||||
}
|
||||
}
|
||||
// It is important to use 0 as the index because that's the only way
|
||||
// pre and post-Electra attestations can both match,
|
||||
// which allows us to properly test that attestations from the
|
||||
// wrong fork are ignored.
|
||||
committeeIndex := uint64(0)
|
||||
|
||||
createAttestation := func(slot primitives.Slot, aggregationBits bitfield.Bitlist, root []byte) *ethpbalpha.Attestation {
|
||||
return ðpbalpha.Attestation{
|
||||
AggregationBits: aggregationBits,
|
||||
Data: createAttestationData(slot, primitives.CommitteeIndex(committeeIndex), root),
|
||||
Signature: sig.Marshal(),
|
||||
}
|
||||
}
|
||||
|
||||
createAttestationElectra := func(slot primitives.Slot, aggregationBits bitfield.Bitlist, root []byte) *ethpbalpha.AttestationElectra {
|
||||
committeeBits := bitfield.NewBitvector64()
|
||||
committeeBits.SetBitAt(committeeIndex, true)
|
||||
|
||||
return ðpbalpha.AttestationElectra{
|
||||
CommitteeBits: committeeBits,
|
||||
AggregationBits: aggregationBits,
|
||||
Data: createAttestationData(slot, primitives.CommitteeIndex(committeeIndex), root),
|
||||
Signature: sig.Marshal(),
|
||||
}
|
||||
}
|
||||
|
||||
t.Run("V1", func(t *testing.T) {
|
||||
aggSlot1_Root1_1 := createAttestation(1, bitfield.Bitlist{0b11100}, root1)
|
||||
aggSlot1_Root1_2 := createAttestation(1, bitfield.Bitlist{0b10111}, root1)
|
||||
aggSlot1_Root2 := createAttestation(1, bitfield.Bitlist{0b11100}, root2)
|
||||
@@ -84,7 +102,7 @@ func TestGetAggregateAttestation(t *testing.T) {
|
||||
assert.Equal(t, expectedAggregationBits, attestation.AggregationBits, "Unexpected aggregation bits in attestation")
|
||||
assert.Equal(t, hexutil.Encode(expectedSig), attestation.Signature, "Signature mismatch")
|
||||
assert.Equal(t, expectedSlot, attestation.Data.Slot, "Slot mismatch in attestation data")
|
||||
assert.Equal(t, "1", attestation.Data.CommitteeIndex, "Committee index mismatch")
|
||||
assert.Equal(t, "0", attestation.Data.CommitteeIndex, "Committee index mismatch")
|
||||
assert.Equal(t, hexutil.Encode(expectedRoot), attestation.Data.BeaconBlockRoot, "Beacon block root mismatch")
|
||||
|
||||
// Source checkpoint checks
|
||||
@@ -206,181 +224,288 @@ func TestGetAggregateAttestation(t *testing.T) {
|
||||
})
|
||||
})
|
||||
t.Run("V2", func(t *testing.T) {
|
||||
createAttestation := func(slot primitives.Slot, aggregationBits bitfield.Bitlist, root []byte, bits uint64) *ethpbalpha.AttestationElectra {
|
||||
t.Run("pre-electra", func(t *testing.T) {
|
||||
committeeBits := bitfield.NewBitvector64()
|
||||
committeeBits.SetBitAt(bits, true)
|
||||
committeeBits.SetBitAt(1, true)
|
||||
|
||||
return ðpbalpha.AttestationElectra{
|
||||
CommitteeBits: committeeBits,
|
||||
AggregationBits: aggregationBits,
|
||||
Data: createAttestationData(slot, 0, 1, root),
|
||||
Signature: sig.Marshal(),
|
||||
aggSlot1_Root1_1 := createAttestation(1, bitfield.Bitlist{0b11100}, root1)
|
||||
aggSlot1_Root1_2 := createAttestation(1, bitfield.Bitlist{0b10111}, root1)
|
||||
aggSlot1_Root2 := createAttestation(1, bitfield.Bitlist{0b11100}, root2)
|
||||
aggSlot2 := createAttestation(2, bitfield.Bitlist{0b11100}, root1)
|
||||
unaggSlot3_Root1_1 := createAttestation(3, bitfield.Bitlist{0b11000}, root1)
|
||||
unaggSlot3_Root1_2 := createAttestation(3, bitfield.Bitlist{0b10100}, root1)
|
||||
unaggSlot3_Root2 := createAttestation(3, bitfield.Bitlist{0b11000}, root2)
|
||||
unaggSlot4 := createAttestation(4, bitfield.Bitlist{0b11000}, root1)
|
||||
|
||||
// Add one post-electra attestation to ensure that it is being ignored.
|
||||
// We choose slot 2 where we have one pre-electra attestation with less attestation bits.
|
||||
postElectraAtt := createAttestationElectra(2, bitfield.Bitlist{0b11111}, root1)
|
||||
|
||||
compareResult := func(
|
||||
t *testing.T,
|
||||
attestation structs.Attestation,
|
||||
expectedSlot string,
|
||||
expectedAggregationBits string,
|
||||
expectedRoot []byte,
|
||||
expectedSig []byte,
|
||||
) {
|
||||
assert.Equal(t, expectedAggregationBits, attestation.AggregationBits, "Unexpected aggregation bits in attestation")
|
||||
assert.Equal(t, hexutil.Encode(expectedSig), attestation.Signature, "Signature mismatch")
|
||||
assert.Equal(t, expectedSlot, attestation.Data.Slot, "Slot mismatch in attestation data")
|
||||
assert.Equal(t, "0", attestation.Data.CommitteeIndex, "Committee index mismatch")
|
||||
assert.Equal(t, hexutil.Encode(expectedRoot), attestation.Data.BeaconBlockRoot, "Beacon block root mismatch")
|
||||
|
||||
// Source checkpoint checks
|
||||
require.NotNil(t, attestation.Data.Source, "Source checkpoint should not be nil")
|
||||
assert.Equal(t, "1", attestation.Data.Source.Epoch, "Source epoch mismatch")
|
||||
assert.Equal(t, hexutil.Encode(expectedRoot), attestation.Data.Source.Root, "Source root mismatch")
|
||||
|
||||
// Target checkpoint checks
|
||||
require.NotNil(t, attestation.Data.Target, "Target checkpoint should not be nil")
|
||||
assert.Equal(t, "1", attestation.Data.Target.Epoch, "Target epoch mismatch")
|
||||
assert.Equal(t, hexutil.Encode(expectedRoot), attestation.Data.Target.Root, "Target root mismatch")
|
||||
}
|
||||
}
|
||||
|
||||
aggSlot1_Root1_1 := createAttestation(1, bitfield.Bitlist{0b11100}, root1, 1)
|
||||
aggSlot1_Root1_2 := createAttestation(1, bitfield.Bitlist{0b10111}, root1, 1)
|
||||
aggSlot1_Root2 := createAttestation(1, bitfield.Bitlist{0b11100}, root2, 1)
|
||||
aggSlot2 := createAttestation(2, bitfield.Bitlist{0b11100}, root1, 1)
|
||||
unaggSlot3_Root1_1 := createAttestation(3, bitfield.Bitlist{0b11000}, root1, 1)
|
||||
unaggSlot3_Root1_2 := createAttestation(3, bitfield.Bitlist{0b10100}, root1, 1)
|
||||
unaggSlot3_Root2 := createAttestation(3, bitfield.Bitlist{0b11000}, root2, 1)
|
||||
unaggSlot4 := createAttestation(4, bitfield.Bitlist{0b11000}, root1, 1)
|
||||
|
||||
compareResult := func(
|
||||
t *testing.T,
|
||||
attestation structs.AttestationElectra,
|
||||
expectedSlot string,
|
||||
expectedAggregationBits string,
|
||||
expectedRoot []byte,
|
||||
expectedSig []byte,
|
||||
expectedCommitteeBits string,
|
||||
) {
|
||||
assert.Equal(t, expectedAggregationBits, attestation.AggregationBits, "Unexpected aggregation bits in attestation")
|
||||
assert.Equal(t, expectedCommitteeBits, attestation.CommitteeBits)
|
||||
assert.Equal(t, hexutil.Encode(expectedSig), attestation.Signature, "Signature mismatch")
|
||||
assert.Equal(t, expectedSlot, attestation.Data.Slot, "Slot mismatch in attestation data")
|
||||
assert.Equal(t, "0", attestation.Data.CommitteeIndex, "Committee index mismatch")
|
||||
assert.Equal(t, hexutil.Encode(expectedRoot), attestation.Data.BeaconBlockRoot, "Beacon block root mismatch")
|
||||
|
||||
// Source checkpoint checks
|
||||
require.NotNil(t, attestation.Data.Source, "Source checkpoint should not be nil")
|
||||
assert.Equal(t, "1", attestation.Data.Source.Epoch, "Source epoch mismatch")
|
||||
assert.Equal(t, hexutil.Encode(expectedRoot), attestation.Data.Source.Root, "Source root mismatch")
|
||||
|
||||
// Target checkpoint checks
|
||||
require.NotNil(t, attestation.Data.Target, "Target checkpoint should not be nil")
|
||||
assert.Equal(t, "1", attestation.Data.Target.Epoch, "Target epoch mismatch")
|
||||
assert.Equal(t, hexutil.Encode(expectedRoot), attestation.Data.Target.Root, "Target root mismatch")
|
||||
}
|
||||
|
||||
pool := attestations.NewPool()
|
||||
require.NoError(t, pool.SaveUnaggregatedAttestations([]ethpbalpha.Att{unaggSlot3_Root1_1, unaggSlot3_Root1_2, unaggSlot3_Root2, unaggSlot4}), "Failed to save unaggregated attestations")
|
||||
unagg, err := pool.UnaggregatedAttestations()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 4, len(unagg), "Expected 4 unaggregated attestations")
|
||||
require.NoError(t, pool.SaveAggregatedAttestations([]ethpbalpha.Att{aggSlot1_Root1_1, aggSlot1_Root1_2, aggSlot1_Root2, aggSlot2}), "Failed to save aggregated attestations")
|
||||
agg := pool.AggregatedAttestations()
|
||||
require.Equal(t, 4, len(agg), "Expected 4 aggregated attestations")
|
||||
bs, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
s := &Server{
|
||||
ChainInfoFetcher: &mockChain.ChainService{State: bs},
|
||||
AttestationsPool: pool,
|
||||
}
|
||||
t.Run("non-matching attestation request", func(t *testing.T) {
|
||||
reqRoot, err := aggSlot2.Data.HashTreeRoot()
|
||||
require.NoError(t, err, "Failed to generate attestation data hash tree root")
|
||||
attDataRoot := hexutil.Encode(reqRoot[:])
|
||||
url := "http://example.com?attestation_data_root=" + attDataRoot + "&slot=1" + "&committee_index=1"
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
|
||||
s.GetAggregateAttestationV2(writer, request)
|
||||
assert.Equal(t, http.StatusNotFound, writer.Code, "Expected HTTP status NotFound for non-matching request")
|
||||
})
|
||||
t.Run("1 matching aggregated attestation", func(t *testing.T) {
|
||||
reqRoot, err := aggSlot2.Data.HashTreeRoot()
|
||||
require.NoError(t, err, "Failed to generate attestation data hash tree root")
|
||||
attDataRoot := hexutil.Encode(reqRoot[:])
|
||||
url := "http://example.com?attestation_data_root=" + attDataRoot + "&slot=2" + "&committee_index=1"
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
|
||||
s.GetAggregateAttestationV2(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code, "Expected HTTP status OK")
|
||||
|
||||
var resp structs.AggregateAttestationResponse
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp), "Failed to unmarshal response")
|
||||
require.NotNil(t, resp.Data, "Response data should not be nil")
|
||||
|
||||
var attestation structs.AttestationElectra
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &attestation), "Failed to unmarshal attestation data")
|
||||
|
||||
compareResult(t, attestation, "2", hexutil.Encode(aggSlot2.AggregationBits), root1, sig.Marshal(), hexutil.Encode(aggSlot2.CommitteeBits))
|
||||
})
|
||||
t.Run("multiple matching aggregated attestations - return the one with most bits", func(t *testing.T) {
|
||||
reqRoot, err := aggSlot1_Root1_1.Data.HashTreeRoot()
|
||||
require.NoError(t, err, "Failed to generate attestation data hash tree root")
|
||||
attDataRoot := hexutil.Encode(reqRoot[:])
|
||||
url := "http://example.com?attestation_data_root=" + attDataRoot + "&slot=1" + "&committee_index=1"
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
|
||||
s.GetAggregateAttestationV2(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code, "Expected HTTP status OK")
|
||||
|
||||
var resp structs.AggregateAttestationResponse
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp), "Failed to unmarshal response")
|
||||
require.NotNil(t, resp.Data, "Response data should not be nil")
|
||||
|
||||
var attestation structs.AttestationElectra
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &attestation), "Failed to unmarshal attestation data")
|
||||
|
||||
compareResult(t, attestation, "1", hexutil.Encode(aggSlot1_Root1_2.AggregationBits), root1, sig.Marshal(), hexutil.Encode(aggSlot1_Root1_1.CommitteeBits))
|
||||
})
|
||||
t.Run("1 matching unaggregated attestation", func(t *testing.T) {
|
||||
reqRoot, err := unaggSlot4.Data.HashTreeRoot()
|
||||
require.NoError(t, err, "Failed to generate attestation data hash tree root")
|
||||
attDataRoot := hexutil.Encode(reqRoot[:])
|
||||
url := "http://example.com?attestation_data_root=" + attDataRoot + "&slot=4" + "&committee_index=1"
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
|
||||
s.GetAggregateAttestationV2(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code, "Expected HTTP status OK")
|
||||
|
||||
var resp structs.AggregateAttestationResponse
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp), "Failed to unmarshal response")
|
||||
require.NotNil(t, resp.Data, "Response data should not be nil")
|
||||
|
||||
var attestation structs.AttestationElectra
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &attestation), "Failed to unmarshal attestation data")
|
||||
compareResult(t, attestation, "4", hexutil.Encode(unaggSlot4.AggregationBits), root1, sig.Marshal(), hexutil.Encode(unaggSlot4.CommitteeBits))
|
||||
})
|
||||
t.Run("multiple matching unaggregated attestations - their aggregate is returned", func(t *testing.T) {
|
||||
reqRoot, err := unaggSlot3_Root1_1.Data.HashTreeRoot()
|
||||
require.NoError(t, err, "Failed to generate attestation data hash tree root")
|
||||
attDataRoot := hexutil.Encode(reqRoot[:])
|
||||
url := "http://example.com?attestation_data_root=" + attDataRoot + "&slot=3" + "&committee_index=1"
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
|
||||
s.GetAggregateAttestationV2(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code, "Expected HTTP status OK")
|
||||
|
||||
var resp structs.AggregateAttestationResponse
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp), "Failed to unmarshal response")
|
||||
require.NotNil(t, resp.Data, "Response data should not be nil")
|
||||
|
||||
var attestation structs.AttestationElectra
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &attestation), "Failed to unmarshal attestation data")
|
||||
sig1, err := bls.SignatureFromBytes(unaggSlot3_Root1_1.Signature)
|
||||
pool := attestations.NewPool()
|
||||
require.NoError(t, pool.SaveUnaggregatedAttestations([]ethpbalpha.Att{unaggSlot3_Root1_1, unaggSlot3_Root1_2, unaggSlot3_Root2, unaggSlot4}), "Failed to save unaggregated attestations")
|
||||
unagg, err := pool.UnaggregatedAttestations()
|
||||
require.NoError(t, err)
|
||||
sig2, err := bls.SignatureFromBytes(unaggSlot3_Root1_2.Signature)
|
||||
require.Equal(t, 4, len(unagg), "Expected 4 unaggregated attestations")
|
||||
require.NoError(t, pool.SaveAggregatedAttestations([]ethpbalpha.Att{aggSlot1_Root1_1, aggSlot1_Root1_2, aggSlot1_Root2, aggSlot2, postElectraAtt}), "Failed to save aggregated attestations")
|
||||
agg := pool.AggregatedAttestations()
|
||||
require.Equal(t, 5, len(agg), "Expected 5 aggregated attestations, 4 pre electra and 1 post electra")
|
||||
s := &Server{
|
||||
AttestationsPool: pool,
|
||||
}
|
||||
|
||||
t.Run("non-matching attestation request", func(t *testing.T) {
|
||||
reqRoot, err := aggSlot2.Data.HashTreeRoot()
|
||||
require.NoError(t, err, "Failed to generate attestation data hash tree root")
|
||||
attDataRoot := hexutil.Encode(reqRoot[:])
|
||||
url := "http://example.com?attestation_data_root=" + attDataRoot + "&slot=1" + "&committee_index=0"
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
|
||||
s.GetAggregateAttestationV2(writer, request)
|
||||
assert.Equal(t, http.StatusNotFound, writer.Code, "Expected HTTP status NotFound for non-matching request")
|
||||
})
|
||||
t.Run("1 matching aggregated attestation", func(t *testing.T) {
|
||||
reqRoot, err := aggSlot2.Data.HashTreeRoot()
|
||||
require.NoError(t, err, "Failed to generate attestation data hash tree root")
|
||||
attDataRoot := hexutil.Encode(reqRoot[:])
|
||||
url := "http://example.com?attestation_data_root=" + attDataRoot + "&slot=2" + "&committee_index=0"
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
|
||||
s.GetAggregateAttestationV2(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code, "Expected HTTP status OK")
|
||||
|
||||
var resp structs.AggregateAttestationResponse
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp), "Failed to unmarshal response")
|
||||
require.NotNil(t, resp.Data, "Response data should not be nil")
|
||||
|
||||
var attestation structs.Attestation
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &attestation), "Failed to unmarshal attestation data")
|
||||
|
||||
compareResult(t, attestation, "2", hexutil.Encode(aggSlot2.AggregationBits), root1, sig.Marshal())
|
||||
})
|
||||
t.Run("multiple matching aggregated attestations - return the one with most bits", func(t *testing.T) {
|
||||
reqRoot, err := aggSlot1_Root1_1.Data.HashTreeRoot()
|
||||
require.NoError(t, err, "Failed to generate attestation data hash tree root")
|
||||
attDataRoot := hexutil.Encode(reqRoot[:])
|
||||
url := "http://example.com?attestation_data_root=" + attDataRoot + "&slot=1" + "&committee_index=0"
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
|
||||
s.GetAggregateAttestationV2(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code, "Expected HTTP status OK")
|
||||
|
||||
var resp structs.AggregateAttestationResponse
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp), "Failed to unmarshal response")
|
||||
require.NotNil(t, resp.Data, "Response data should not be nil")
|
||||
|
||||
var attestation structs.Attestation
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &attestation), "Failed to unmarshal attestation data")
|
||||
|
||||
compareResult(t, attestation, "1", hexutil.Encode(aggSlot1_Root1_2.AggregationBits), root1, sig.Marshal())
|
||||
})
|
||||
})
|
||||
t.Run("post-electra", func(t *testing.T) {
|
||||
aggSlot1_Root1_1 := createAttestationElectra(1, bitfield.Bitlist{0b11100}, root1)
|
||||
aggSlot1_Root1_2 := createAttestationElectra(1, bitfield.Bitlist{0b10111}, root1)
|
||||
aggSlot1_Root2 := createAttestationElectra(1, bitfield.Bitlist{0b11100}, root2)
|
||||
aggSlot2 := createAttestationElectra(2, bitfield.Bitlist{0b11100}, root1)
|
||||
unaggSlot3_Root1_1 := createAttestationElectra(3, bitfield.Bitlist{0b11000}, root1)
|
||||
unaggSlot3_Root1_2 := createAttestationElectra(3, bitfield.Bitlist{0b10100}, root1)
|
||||
unaggSlot3_Root2 := createAttestationElectra(3, bitfield.Bitlist{0b11000}, root2)
|
||||
unaggSlot4 := createAttestationElectra(4, bitfield.Bitlist{0b11000}, root1)
|
||||
|
||||
// Add one pre-electra attestation to ensure that it is being ignored.
|
||||
// We choose slot 2 where we have one post-electra attestation with less attestation bits.
|
||||
preElectraAtt := createAttestation(2, bitfield.Bitlist{0b11111}, root1)
|
||||
|
||||
compareResult := func(
|
||||
t *testing.T,
|
||||
attestation structs.AttestationElectra,
|
||||
expectedSlot string,
|
||||
expectedAggregationBits string,
|
||||
expectedRoot []byte,
|
||||
expectedSig []byte,
|
||||
expectedCommitteeBits string,
|
||||
) {
|
||||
assert.Equal(t, expectedAggregationBits, attestation.AggregationBits, "Unexpected aggregation bits in attestation")
|
||||
assert.Equal(t, expectedCommitteeBits, attestation.CommitteeBits)
|
||||
assert.Equal(t, hexutil.Encode(expectedSig), attestation.Signature, "Signature mismatch")
|
||||
assert.Equal(t, expectedSlot, attestation.Data.Slot, "Slot mismatch in attestation data")
|
||||
assert.Equal(t, "0", attestation.Data.CommitteeIndex, "Committee index mismatch")
|
||||
assert.Equal(t, hexutil.Encode(expectedRoot), attestation.Data.BeaconBlockRoot, "Beacon block root mismatch")
|
||||
|
||||
// Source checkpoint checks
|
||||
require.NotNil(t, attestation.Data.Source, "Source checkpoint should not be nil")
|
||||
assert.Equal(t, "1", attestation.Data.Source.Epoch, "Source epoch mismatch")
|
||||
assert.Equal(t, hexutil.Encode(expectedRoot), attestation.Data.Source.Root, "Source root mismatch")
|
||||
|
||||
// Target checkpoint checks
|
||||
require.NotNil(t, attestation.Data.Target, "Target checkpoint should not be nil")
|
||||
assert.Equal(t, "1", attestation.Data.Target.Epoch, "Target epoch mismatch")
|
||||
assert.Equal(t, hexutil.Encode(expectedRoot), attestation.Data.Target.Root, "Target root mismatch")
|
||||
}
|
||||
|
||||
pool := attestations.NewPool()
|
||||
require.NoError(t, pool.SaveUnaggregatedAttestations([]ethpbalpha.Att{unaggSlot3_Root1_1, unaggSlot3_Root1_2, unaggSlot3_Root2, unaggSlot4}), "Failed to save unaggregated attestations")
|
||||
unagg, err := pool.UnaggregatedAttestations()
|
||||
require.NoError(t, err)
|
||||
expectedSig := bls.AggregateSignatures([]common.Signature{sig1, sig2})
|
||||
compareResult(t, attestation, "3", hexutil.Encode(bitfield.Bitlist{0b11100}), root1, expectedSig.Marshal(), hexutil.Encode(unaggSlot3_Root1_1.CommitteeBits))
|
||||
require.Equal(t, 4, len(unagg), "Expected 4 unaggregated attestations")
|
||||
require.NoError(t, pool.SaveAggregatedAttestations([]ethpbalpha.Att{aggSlot1_Root1_1, aggSlot1_Root1_2, aggSlot1_Root2, aggSlot2, preElectraAtt}), "Failed to save aggregated attestations")
|
||||
agg := pool.AggregatedAttestations()
|
||||
require.Equal(t, 5, len(agg), "Expected 5 aggregated attestations, 4 electra and 1 pre electra")
|
||||
bs, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.ElectraForkEpoch = 0
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
chainService := &mockChain.ChainService{State: bs}
|
||||
s := &Server{
|
||||
ChainInfoFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
AttestationsPool: pool,
|
||||
}
|
||||
t.Run("non-matching attestation request", func(t *testing.T) {
|
||||
reqRoot, err := aggSlot2.Data.HashTreeRoot()
|
||||
require.NoError(t, err, "Failed to generate attestation data hash tree root")
|
||||
attDataRoot := hexutil.Encode(reqRoot[:])
|
||||
url := "http://example.com?attestation_data_root=" + attDataRoot + "&slot=1" + "&committee_index=0"
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
|
||||
s.GetAggregateAttestationV2(writer, request)
|
||||
assert.Equal(t, http.StatusNotFound, writer.Code, "Expected HTTP status NotFound for non-matching request")
|
||||
})
|
||||
t.Run("1 matching aggregated attestation", func(t *testing.T) {
|
||||
reqRoot, err := aggSlot2.Data.HashTreeRoot()
|
||||
require.NoError(t, err, "Failed to generate attestation data hash tree root")
|
||||
attDataRoot := hexutil.Encode(reqRoot[:])
|
||||
url := "http://example.com?attestation_data_root=" + attDataRoot + "&slot=2" + "&committee_index=0"
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
|
||||
s.GetAggregateAttestationV2(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code, "Expected HTTP status OK")
|
||||
|
||||
var resp structs.AggregateAttestationResponse
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp), "Failed to unmarshal response")
|
||||
require.NotNil(t, resp.Data, "Response data should not be nil")
|
||||
|
||||
var attestation structs.AttestationElectra
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &attestation), "Failed to unmarshal attestation data")
|
||||
|
||||
compareResult(t, attestation, "2", hexutil.Encode(aggSlot2.AggregationBits), root1, sig.Marshal(), hexutil.Encode(aggSlot2.CommitteeBits))
|
||||
})
|
||||
t.Run("multiple matching aggregated attestations - return the one with most bits", func(t *testing.T) {
|
||||
reqRoot, err := aggSlot1_Root1_1.Data.HashTreeRoot()
|
||||
require.NoError(t, err, "Failed to generate attestation data hash tree root")
|
||||
attDataRoot := hexutil.Encode(reqRoot[:])
|
||||
url := "http://example.com?attestation_data_root=" + attDataRoot + "&slot=1" + "&committee_index=0"
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
|
||||
s.GetAggregateAttestationV2(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code, "Expected HTTP status OK")
|
||||
|
||||
var resp structs.AggregateAttestationResponse
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp), "Failed to unmarshal response")
|
||||
require.NotNil(t, resp.Data, "Response data should not be nil")
|
||||
|
||||
var attestation structs.AttestationElectra
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &attestation), "Failed to unmarshal attestation data")
|
||||
|
||||
compareResult(t, attestation, "1", hexutil.Encode(aggSlot1_Root1_2.AggregationBits), root1, sig.Marshal(), hexutil.Encode(aggSlot1_Root1_1.CommitteeBits))
|
||||
})
|
||||
t.Run("1 matching unaggregated attestation", func(t *testing.T) {
|
||||
reqRoot, err := unaggSlot4.Data.HashTreeRoot()
|
||||
require.NoError(t, err, "Failed to generate attestation data hash tree root")
|
||||
attDataRoot := hexutil.Encode(reqRoot[:])
|
||||
url := "http://example.com?attestation_data_root=" + attDataRoot + "&slot=4" + "&committee_index=0"
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
|
||||
s.GetAggregateAttestationV2(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code, "Expected HTTP status OK")
|
||||
|
||||
var resp structs.AggregateAttestationResponse
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp), "Failed to unmarshal response")
|
||||
require.NotNil(t, resp.Data, "Response data should not be nil")
|
||||
|
||||
var attestation structs.AttestationElectra
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &attestation), "Failed to unmarshal attestation data")
|
||||
compareResult(t, attestation, "4", hexutil.Encode(unaggSlot4.AggregationBits), root1, sig.Marshal(), hexutil.Encode(unaggSlot4.CommitteeBits))
|
||||
})
|
||||
t.Run("multiple matching unaggregated attestations - their aggregate is returned", func(t *testing.T) {
|
||||
reqRoot, err := unaggSlot3_Root1_1.Data.HashTreeRoot()
|
||||
require.NoError(t, err, "Failed to generate attestation data hash tree root")
|
||||
attDataRoot := hexutil.Encode(reqRoot[:])
|
||||
url := "http://example.com?attestation_data_root=" + attDataRoot + "&slot=3" + "&committee_index=0"
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
|
||||
s.GetAggregateAttestationV2(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code, "Expected HTTP status OK")
|
||||
|
||||
var resp structs.AggregateAttestationResponse
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp), "Failed to unmarshal response")
|
||||
require.NotNil(t, resp.Data, "Response data should not be nil")
|
||||
|
||||
var attestation structs.AttestationElectra
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &attestation), "Failed to unmarshal attestation data")
|
||||
sig1, err := bls.SignatureFromBytes(unaggSlot3_Root1_1.Signature)
|
||||
require.NoError(t, err)
|
||||
sig2, err := bls.SignatureFromBytes(unaggSlot3_Root1_2.Signature)
|
||||
require.NoError(t, err)
|
||||
expectedSig := bls.AggregateSignatures([]common.Signature{sig1, sig2})
|
||||
compareResult(t, attestation, "3", hexutil.Encode(bitfield.Bitlist{0b11100}), root1, expectedSig.Marshal(), hexutil.Encode(unaggSlot3_Root1_1.CommitteeBits))
|
||||
})
|
||||
t.Run("pre-electra attestation is ignored", func(t *testing.T) {
|
||||
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func createAttestationData(
|
||||
slot primitives.Slot,
|
||||
committeeIndex primitives.CommitteeIndex,
|
||||
epoch primitives.Epoch,
|
||||
root []byte,
|
||||
) *ethpbalpha.AttestationData {
|
||||
func createAttestationData(slot primitives.Slot, committeeIndex primitives.CommitteeIndex, root []byte) *ethpbalpha.AttestationData {
|
||||
return ðpbalpha.AttestationData{
|
||||
Slot: slot,
|
||||
CommitteeIndex: committeeIndex,
|
||||
BeaconBlockRoot: root,
|
||||
Source: ðpbalpha.Checkpoint{
|
||||
Epoch: epoch,
|
||||
Epoch: 1,
|
||||
Root: root,
|
||||
},
|
||||
Target: ðpbalpha.Checkpoint{
|
||||
Epoch: epoch,
|
||||
Epoch: 1,
|
||||
Root: root,
|
||||
},
|
||||
}
|
||||
|
||||
31
beacon-chain/rpc/metrics.go
Normal file
31
beacon-chain/rpc/metrics.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package rpc
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
var (
|
||||
httpRequestLatency = promauto.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "http_request_latency_seconds",
|
||||
Help: "Latency of HTTP requests in seconds",
|
||||
Buckets: []float64{0.001, 0.01, 0.025, 0.1, 0.25, 1, 2.5, 10},
|
||||
},
|
||||
[]string{"endpoint", "code", "method"},
|
||||
)
|
||||
httpRequestCount = promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "http_request_count",
|
||||
Help: "Number of HTTP requests",
|
||||
},
|
||||
[]string{"endpoint", "code", "method"},
|
||||
)
|
||||
httpErrorCount = promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "http_error_count",
|
||||
Help: "Total HTTP errors for beacon node requests",
|
||||
},
|
||||
[]string{"endpoint", "code", "method"},
|
||||
)
|
||||
)
|
||||
@@ -25,8 +25,8 @@ import (
|
||||
|
||||
type testIdentity enode.ID
|
||||
|
||||
func (_ testIdentity) Verify(_ *enr.Record, _ []byte) error { return nil }
|
||||
func (id testIdentity) NodeAddr(_ *enr.Record) []byte { return id[:] }
|
||||
func (testIdentity) Verify(*enr.Record, []byte) error { return nil }
|
||||
func (id testIdentity) NodeAddr(*enr.Record) []byte { return id[:] }
|
||||
|
||||
func TestListTrustedPeer(t *testing.T) {
|
||||
ids := libp2ptest.GeneratePeerIDs(9)
|
||||
@@ -62,13 +62,13 @@ func TestListTrustedPeer(t *testing.T) {
|
||||
|
||||
switch i {
|
||||
case 0, 1:
|
||||
peerStatus.SetConnectionState(id, peers.PeerConnecting)
|
||||
peerStatus.SetConnectionState(id, peers.Connecting)
|
||||
case 2, 3:
|
||||
peerStatus.SetConnectionState(id, peers.PeerConnected)
|
||||
peerStatus.SetConnectionState(id, peers.Connected)
|
||||
case 4, 5:
|
||||
peerStatus.SetConnectionState(id, peers.PeerDisconnecting)
|
||||
peerStatus.SetConnectionState(id, peers.Disconnecting)
|
||||
case 6, 7:
|
||||
peerStatus.SetConnectionState(id, peers.PeerDisconnected)
|
||||
peerStatus.SetConnectionState(id, peers.Disconnected)
|
||||
default:
|
||||
t.Fatalf("Failed to set connection state for peer")
|
||||
}
|
||||
|
||||
@@ -303,7 +303,7 @@ func (bs *Server) ListIndexedAttestationsElectra(
|
||||
// that it was included in a block. The attestation may have expired.
|
||||
// Refer to the ethereum consensus specification for more details on how
|
||||
// attestations are processed and when they are no longer valid.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/core/0_beacon-chain.md#attestations
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/beacon-chain.md#attestations
|
||||
func (bs *Server) AttestationPool(_ context.Context, req *ethpb.AttestationPoolRequest) (*ethpb.AttestationPoolResponse, error) {
|
||||
atts, err := attestationsFromPool[*ethpb.Attestation](req.PageSize, bs.AttestationsPool)
|
||||
if err != nil {
|
||||
|
||||
@@ -212,7 +212,9 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
eth_network = "minimal",
|
||||
tags = ["minimal"],
|
||||
deps = common_deps,
|
||||
deps = common_deps + [
|
||||
"//beacon-chain/operations/attestations/mock:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
|
||||
@@ -91,14 +91,7 @@ func (vs *Server) packAttestations(ctx context.Context, latestState state.Beacon
|
||||
|
||||
var attsForInclusion proposerAtts
|
||||
if postElectra {
|
||||
// TODO: hack for Electra devnet-1, take only one aggregate per ID
|
||||
// (which essentially means one aggregate for an attestation_data+committee combination
|
||||
topAggregates := make([]ethpb.Att, 0)
|
||||
for _, v := range attsById {
|
||||
topAggregates = append(topAggregates, v[0])
|
||||
}
|
||||
|
||||
attsForInclusion, err = computeOnChainAggregate(topAggregates)
|
||||
attsForInclusion, err = onChainAggregates(attsById)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -113,14 +106,68 @@ func (vs *Server) packAttestations(ctx context.Context, latestState state.Beacon
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sorted, err := deduped.sort()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
var sorted proposerAtts
|
||||
if postElectra {
|
||||
sorted, err = deduped.sortOnChainAggregates()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
sorted, err = deduped.sort()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
atts = sorted.limitToMaxAttestations()
|
||||
return vs.filterAttestationBySignature(ctx, atts, latestState)
|
||||
}
|
||||
|
||||
func onChainAggregates(attsById map[attestation.Id][]ethpb.Att) (proposerAtts, error) {
|
||||
var result proposerAtts
|
||||
var err error
|
||||
|
||||
// When constructing on-chain aggregates, we want to combine the most profitable
|
||||
// aggregate for each ID, then the second most profitable, and so on and so forth.
|
||||
// Because of this we sort attestations at the beginning.
|
||||
for id, as := range attsById {
|
||||
attsById[id], err = proposerAtts(as).sort()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// We construct the first on-chain aggregate by taking the first aggregate for each ID.
|
||||
// We construct the second on-chain aggregate by taking the second aggregate for each ID.
|
||||
// We continue doing this until we run out of aggregates.
|
||||
idx := 0
|
||||
for {
|
||||
topAggregates := make([]ethpb.Att, 0, len(attsById))
|
||||
for _, as := range attsById {
|
||||
// In case there are no more aggregates for an ID, we skip that ID.
|
||||
if len(as) > idx {
|
||||
topAggregates = append(topAggregates, as[idx])
|
||||
}
|
||||
}
|
||||
|
||||
// Once there are no more aggregates for any ID, we are done.
|
||||
if len(topAggregates) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
onChainAggs, err := computeOnChainAggregate(topAggregates)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result = append(result, onChainAggs...)
|
||||
|
||||
idx++
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// filter separates attestation list into two groups: valid and invalid attestations.
|
||||
// The first group passes the all the required checks for attestation to be considered for proposing.
|
||||
// And attestations from the second group should be deleted.
|
||||
@@ -223,6 +270,14 @@ func (a proposerAtts) sort() (proposerAtts, error) {
|
||||
return a.sortBySlotAndCommittee()
|
||||
}
|
||||
|
||||
func (a proposerAtts) sortOnChainAggregates() (proposerAtts, error) {
|
||||
if len(a) < 2 {
|
||||
return a, nil
|
||||
}
|
||||
|
||||
return a.sortByProfitabilityUsingMaxCover()
|
||||
}
|
||||
|
||||
// Separate attestations by slot, as slot number takes higher precedence when sorting.
|
||||
// Also separate by committee index because maxcover will prefer attestations for the same
|
||||
// committee with disjoint bits over attestations for different committees with overlapping
|
||||
@@ -231,7 +286,6 @@ func (a proposerAtts) sortBySlotAndCommittee() (proposerAtts, error) {
|
||||
type slotAtts struct {
|
||||
candidates map[primitives.CommitteeIndex]proposerAtts
|
||||
selected map[primitives.CommitteeIndex]proposerAtts
|
||||
leftover map[primitives.CommitteeIndex]proposerAtts
|
||||
}
|
||||
|
||||
var slots []primitives.Slot
|
||||
@@ -250,7 +304,6 @@ func (a proposerAtts) sortBySlotAndCommittee() (proposerAtts, error) {
|
||||
var err error
|
||||
for _, sa := range attsBySlot {
|
||||
sa.selected = make(map[primitives.CommitteeIndex]proposerAtts)
|
||||
sa.leftover = make(map[primitives.CommitteeIndex]proposerAtts)
|
||||
for ci, committeeAtts := range sa.candidates {
|
||||
sa.selected[ci], err = committeeAtts.sortByProfitabilityUsingMaxCover_committeeAwarePacking()
|
||||
if err != nil {
|
||||
@@ -266,9 +319,6 @@ func (a proposerAtts) sortBySlotAndCommittee() (proposerAtts, error) {
|
||||
for _, slot := range slots {
|
||||
sortedAtts = append(sortedAtts, sortSlotAttestations(attsBySlot[slot].selected)...)
|
||||
}
|
||||
for _, slot := range slots {
|
||||
sortedAtts = append(sortedAtts, sortSlotAttestations(attsBySlot[slot].leftover)...)
|
||||
}
|
||||
|
||||
return sortedAtts, nil
|
||||
}
|
||||
@@ -287,15 +337,11 @@ func (a proposerAtts) sortByProfitabilityUsingMaxCover_committeeAwarePacking() (
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// Add selected candidates on top, those that are not selected - append at bottom.
|
||||
selectedKeys, _, err := aggregation.MaxCover(candidates, len(candidates), true /* allowOverlaps */)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("MaxCover aggregation failed")
|
||||
return a, nil
|
||||
}
|
||||
|
||||
// Pick selected attestations first, leftover attestations will be appended at the end.
|
||||
// Both lists will be sorted by number of bits set.
|
||||
selected := make(proposerAtts, selectedKeys.Count())
|
||||
for i, key := range selectedKeys.BitIndices() {
|
||||
selected[i] = a[key]
|
||||
|
||||
@@ -13,6 +13,9 @@ import (
|
||||
// computeOnChainAggregate constructs a final aggregate form a list of network aggregates with equal attestation data.
|
||||
// It assumes that each network aggregate has exactly one committee bit set.
|
||||
//
|
||||
// Our implementation allows to pass aggregates for different attestation data, in which case the function will return
|
||||
// one final aggregate per attestation data.
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
// def compute_on_chain_aggregate(network_aggregates: Sequence[Attestation]) -> Attestation:
|
||||
|
||||
@@ -3,16 +3,21 @@ package validator
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"math/rand"
|
||||
"sort"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
chainMock "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/attestations/mock"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/bls/blst"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
@@ -680,6 +685,212 @@ func Test_packAttestations(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func Test_packAttestations_ElectraOnChainAggregates(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.ElectraForkEpoch = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
key, err := blst.RandKey()
|
||||
require.NoError(t, err)
|
||||
sig := key.Sign([]byte{'X'})
|
||||
|
||||
cb0 := primitives.NewAttestationCommitteeBits()
|
||||
cb0.SetBitAt(0, true)
|
||||
cb1 := primitives.NewAttestationCommitteeBits()
|
||||
cb1.SetBitAt(1, true)
|
||||
|
||||
data0 := util.HydrateAttestationData(ðpb.AttestationData{BeaconBlockRoot: bytesutil.PadTo([]byte{'0'}, 32)})
|
||||
data1 := util.HydrateAttestationData(ðpb.AttestationData{BeaconBlockRoot: bytesutil.PadTo([]byte{'1'}, 32)})
|
||||
|
||||
// Glossary:
|
||||
// - Single Aggregate: aggregate with exactly one committee bit set, from which an On-Chain Aggregate is constructed
|
||||
// - On-Chain Aggregate: final aggregate packed into a block
|
||||
//
|
||||
// We construct the following number of single aggregates:
|
||||
// - data_root_0 and committee_index_0: 3 single aggregates
|
||||
// - data_root_0 and committee_index_1: 2 single aggregates
|
||||
// - data_root_1 and committee_index_0: 1 single aggregate
|
||||
// - data_root_1 and committee_index_1: 3 single aggregates
|
||||
//
|
||||
// Because the function tries to aggregate attestations, we have to create attestations which are not aggregatable
|
||||
// and are not redundant when using MaxCover.
|
||||
// The function should also sort attestation by ID before computing the On-Chain Aggregate, so we want unsorted aggregation bits
|
||||
// to test the sorting part.
|
||||
//
|
||||
// The result should be the following six on-chain aggregates:
|
||||
// - for data_root_0 combining the most profitable aggregate for each committee
|
||||
// - for data_root_0 combining the second most profitable aggregate for each committee
|
||||
// - for data_root_0 constructed from the single aggregate at index 2 for committee_index_0
|
||||
// - for data_root_1 combining the most profitable aggregate for each committee
|
||||
// - for data_root_1 constructed from the single aggregate at index 1 for committee_index_1
|
||||
// - for data_root_1 constructed from the single aggregate at index 2 for committee_index_1
|
||||
|
||||
d0_c0_a1 := ðpb.AttestationElectra{
|
||||
AggregationBits: bitfield.Bitlist{0b1000011},
|
||||
CommitteeBits: cb0,
|
||||
Data: data0,
|
||||
Signature: sig.Marshal(),
|
||||
}
|
||||
d0_c0_a2 := ðpb.AttestationElectra{
|
||||
AggregationBits: bitfield.Bitlist{0b1100101},
|
||||
CommitteeBits: cb0,
|
||||
Data: data0,
|
||||
Signature: sig.Marshal(),
|
||||
}
|
||||
d0_c0_a3 := ðpb.AttestationElectra{
|
||||
AggregationBits: bitfield.Bitlist{0b1111000},
|
||||
CommitteeBits: cb0,
|
||||
Data: data0,
|
||||
Signature: sig.Marshal(),
|
||||
}
|
||||
d0_c1_a1 := ðpb.AttestationElectra{
|
||||
AggregationBits: bitfield.Bitlist{0b1111100},
|
||||
CommitteeBits: cb1,
|
||||
Data: data0,
|
||||
Signature: sig.Marshal(),
|
||||
}
|
||||
d0_c1_a2 := ðpb.AttestationElectra{
|
||||
AggregationBits: bitfield.Bitlist{0b1001111},
|
||||
CommitteeBits: cb1,
|
||||
Data: data0,
|
||||
Signature: sig.Marshal(),
|
||||
}
|
||||
d1_c0_a1 := ðpb.AttestationElectra{
|
||||
AggregationBits: bitfield.Bitlist{0b1111111},
|
||||
CommitteeBits: cb0,
|
||||
Data: data1,
|
||||
Signature: sig.Marshal(),
|
||||
}
|
||||
d1_c1_a1 := ðpb.AttestationElectra{
|
||||
AggregationBits: bitfield.Bitlist{0b1000011},
|
||||
CommitteeBits: cb1,
|
||||
Data: data1,
|
||||
Signature: sig.Marshal(),
|
||||
}
|
||||
d1_c1_a2 := ðpb.AttestationElectra{
|
||||
AggregationBits: bitfield.Bitlist{0b1100101},
|
||||
CommitteeBits: cb1,
|
||||
Data: data1,
|
||||
Signature: sig.Marshal(),
|
||||
}
|
||||
d1_c1_a3 := ðpb.AttestationElectra{
|
||||
AggregationBits: bitfield.Bitlist{0b1111000},
|
||||
CommitteeBits: cb1,
|
||||
Data: data1,
|
||||
Signature: sig.Marshal(),
|
||||
}
|
||||
|
||||
pool := &mock.PoolMock{}
|
||||
require.NoError(t, pool.SaveAggregatedAttestations([]ethpb.Att{d0_c0_a1, d0_c0_a2, d0_c0_a3, d0_c1_a1, d0_c1_a2, d1_c0_a1, d1_c1_a1, d1_c1_a2, d1_c1_a3}))
|
||||
slot := primitives.Slot(1)
|
||||
s := &Server{AttPool: pool, HeadFetcher: &chainMock.ChainService{}, TimeFetcher: &chainMock.ChainService{Slot: &slot}}
|
||||
|
||||
// We need the correct number of validators so that there are at least 2 committees per slot
|
||||
// and each committee has exactly 6 validators (this is because we have 6 aggregation bits).
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, 192)
|
||||
|
||||
require.NoError(t, st.SetSlot(params.BeaconConfig().SlotsPerEpoch+1))
|
||||
|
||||
atts, err := s.packAttestations(ctx, st, params.BeaconConfig().SlotsPerEpoch)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 6, len(atts))
|
||||
assert.Equal(t, true,
|
||||
atts[0].GetAggregationBits().Count() >= atts[1].GetAggregationBits().Count() &&
|
||||
atts[1].GetAggregationBits().Count() >= atts[2].GetAggregationBits().Count() &&
|
||||
atts[2].GetAggregationBits().Count() >= atts[3].GetAggregationBits().Count() &&
|
||||
atts[3].GetAggregationBits().Count() >= atts[4].GetAggregationBits().Count() &&
|
||||
atts[4].GetAggregationBits().Count() >= atts[5].GetAggregationBits().Count(),
|
||||
"on-chain aggregates are not sorted by aggregation bit count",
|
||||
)
|
||||
|
||||
t.Run("slot takes precedence", func(t *testing.T) {
|
||||
moreRecentAtt := ðpb.AttestationElectra{
|
||||
AggregationBits: bitfield.Bitlist{0b1100000}, // we set only one bit for committee_index_0
|
||||
CommitteeBits: cb1,
|
||||
Data: util.HydrateAttestationData(ðpb.AttestationData{Slot: 1, BeaconBlockRoot: bytesutil.PadTo([]byte{'0'}, 32)}),
|
||||
Signature: sig.Marshal(),
|
||||
}
|
||||
require.NoError(t, pool.SaveUnaggregatedAttestations([]ethpb.Att{moreRecentAtt}))
|
||||
atts, err = s.packAttestations(ctx, st, params.BeaconConfig().SlotsPerEpoch)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 7, len(atts))
|
||||
assert.Equal(t, true, atts[0].GetData().Slot == 1)
|
||||
})
|
||||
}
|
||||
|
||||
func Benchmark_packAttestations_Electra(b *testing.B) {
|
||||
ctx := context.Background()
|
||||
|
||||
params.SetupTestConfigCleanup(b)
|
||||
cfg := params.MainnetConfig().Copy()
|
||||
cfg.ElectraForkEpoch = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
valCount := uint64(1048576)
|
||||
committeeCount := helpers.SlotCommitteeCount(valCount)
|
||||
valsPerCommittee := valCount / committeeCount / uint64(params.BeaconConfig().SlotsPerEpoch)
|
||||
|
||||
st, _ := util.DeterministicGenesisStateElectra(b, valCount)
|
||||
|
||||
key, err := blst.RandKey()
|
||||
require.NoError(b, err)
|
||||
sig := key.Sign([]byte{'X'})
|
||||
|
||||
r := rand.New(rand.NewSource(123))
|
||||
|
||||
var atts []ethpb.Att
|
||||
for c := uint64(0); c < committeeCount; c++ {
|
||||
for a := uint64(0); a < params.BeaconConfig().TargetAggregatorsPerCommittee; a++ {
|
||||
cb := primitives.NewAttestationCommitteeBits()
|
||||
cb.SetBitAt(c, true)
|
||||
|
||||
var att *ethpb.AttestationElectra
|
||||
// Last two aggregators send aggregates for some random block root with only a few bits set.
|
||||
if a >= params.BeaconConfig().TargetAggregatorsPerCommittee-2 {
|
||||
root := bytesutil.PadTo([]byte("root_"+strconv.Itoa(r.Intn(100))), 32)
|
||||
att = ðpb.AttestationElectra{
|
||||
Data: util.HydrateAttestationData(ðpb.AttestationData{Slot: params.BeaconConfig().SlotsPerEpoch - 1, BeaconBlockRoot: root}),
|
||||
AggregationBits: bitfield.NewBitlist(valsPerCommittee),
|
||||
CommitteeBits: cb,
|
||||
Signature: sig.Marshal(),
|
||||
}
|
||||
for bit := uint64(0); bit < valsPerCommittee; bit++ {
|
||||
att.AggregationBits.SetBitAt(bit, r.Intn(100) < 2) // 2% that the bit is set
|
||||
}
|
||||
} else {
|
||||
att = ðpb.AttestationElectra{
|
||||
Data: util.HydrateAttestationData(ðpb.AttestationData{Slot: params.BeaconConfig().SlotsPerEpoch - 1, BeaconBlockRoot: bytesutil.PadTo([]byte("root"), 32)}),
|
||||
AggregationBits: bitfield.NewBitlist(valsPerCommittee),
|
||||
CommitteeBits: cb,
|
||||
Signature: sig.Marshal(),
|
||||
}
|
||||
for bit := uint64(0); bit < valsPerCommittee; bit++ {
|
||||
att.AggregationBits.SetBitAt(bit, r.Intn(100) < 98) // 98% that the bit is set
|
||||
}
|
||||
}
|
||||
|
||||
atts = append(atts, att)
|
||||
}
|
||||
}
|
||||
|
||||
pool := &mock.PoolMock{}
|
||||
require.NoError(b, pool.SaveAggregatedAttestations(atts))
|
||||
|
||||
slot := primitives.Slot(1)
|
||||
s := &Server{AttPool: pool, HeadFetcher: &chainMock.ChainService{}, TimeFetcher: &chainMock.ChainService{Slot: &slot}}
|
||||
|
||||
require.NoError(b, st.SetSlot(params.BeaconConfig().SlotsPerEpoch))
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err = s.packAttestations(ctx, st, params.BeaconConfig().SlotsPerEpoch+1)
|
||||
require.NoError(b, err)
|
||||
}
|
||||
}
|
||||
|
||||
func Test_limitToMaxAttestations(t *testing.T) {
|
||||
t.Run("Phase 0", func(t *testing.T) {
|
||||
atts := make([]ethpb.Att, params.BeaconConfig().MaxAttestations+1)
|
||||
|
||||
@@ -14,8 +14,6 @@ import (
|
||||
grpcopentracing "github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing"
|
||||
grpcprometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/builder"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
@@ -57,24 +55,6 @@ import (
|
||||
|
||||
const attestationBufferSize = 100
|
||||
|
||||
var (
|
||||
httpRequestLatency = promauto.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "http_request_latency_seconds",
|
||||
Help: "Latency of HTTP requests in seconds",
|
||||
Buckets: []float64{0.001, 0.01, 0.025, 0.1, 0.25, 1, 2.5, 10},
|
||||
},
|
||||
[]string{"endpoint", "code", "method"},
|
||||
)
|
||||
httpRequestCount = promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "http_request_count",
|
||||
Help: "Number of HTTP requests",
|
||||
},
|
||||
[]string{"endpoint", "code", "method"},
|
||||
)
|
||||
)
|
||||
|
||||
// Service defining an RPC server for a beacon node.
|
||||
type Service struct {
|
||||
cfg *Config
|
||||
|
||||
@@ -10,6 +10,7 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/sync/checkpoint",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api/client:go_default_library",
|
||||
"//api/client/beacon:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
|
||||
@@ -4,11 +4,14 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/client"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/client/beacon"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
)
|
||||
|
||||
const stateSizeLimit int64 = 1 << 29 // 512MB
|
||||
|
||||
// APIInitializer manages initializing the beacon node using checkpoint sync, retrieving the checkpoint state and root
|
||||
// from the remote beacon node api.
|
||||
type APIInitializer struct {
|
||||
@@ -18,7 +21,7 @@ type APIInitializer struct {
|
||||
// NewAPIInitializer creates an APIInitializer, handling the set up of a beacon node api client
|
||||
// using the provided host string.
|
||||
func NewAPIInitializer(beaconNodeHost string) (*APIInitializer, error) {
|
||||
c, err := beacon.NewClient(beaconNodeHost)
|
||||
c, err := beacon.NewClient(beaconNodeHost, client.WithMaxBodySize(stateSizeLimit))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unable to parse beacon node url or hostname - %s", beaconNodeHost)
|
||||
}
|
||||
@@ -32,10 +35,9 @@ func (dl *APIInitializer) Initialize(ctx context.Context, d db.Database) error {
|
||||
if err == nil && origin != params.BeaconConfig().ZeroHash {
|
||||
log.Warnf("Origin checkpoint root %#x found in db, ignoring checkpoint sync flags", origin)
|
||||
return nil
|
||||
} else {
|
||||
if !errors.Is(err, db.ErrNotFound) {
|
||||
return errors.Wrap(err, "error while checking database for origin root")
|
||||
}
|
||||
}
|
||||
if err != nil && !errors.Is(err, db.ErrNotFound) {
|
||||
return errors.Wrap(err, "error while checking database for origin root")
|
||||
}
|
||||
od, err := beacon.DownloadFinalizedData(ctx, dl.c)
|
||||
if err != nil {
|
||||
|
||||
@@ -1094,7 +1094,7 @@ func TestBlocksQueue_stuckInUnfavourableFork(t *testing.T) {
|
||||
// its claims with actual blocks.
|
||||
emptyPeer := connectPeerHavingBlocks(t, p2p, chain1, finalizedSlot, p2p.Peers())
|
||||
defer func() {
|
||||
p2p.Peers().SetConnectionState(emptyPeer, peers.PeerDisconnected)
|
||||
p2p.Peers().SetConnectionState(emptyPeer, peers.Disconnected)
|
||||
}()
|
||||
chainState, err := p2p.Peers().ChainState(emptyPeer)
|
||||
require.NoError(t, err)
|
||||
@@ -1291,7 +1291,7 @@ func TestBlocksQueue_stuckWhenHeadIsSetToOrphanedBlock(t *testing.T) {
|
||||
// Connect peer that has all the blocks available.
|
||||
allBlocksPeer := connectPeerHavingBlocks(t, p2p, chain, finalizedSlot, p2p.Peers())
|
||||
defer func() {
|
||||
p2p.Peers().SetConnectionState(allBlocksPeer, peers.PeerDisconnected)
|
||||
p2p.Peers().SetConnectionState(allBlocksPeer, peers.Disconnected)
|
||||
}()
|
||||
|
||||
// Queue should be able to fetch whole chain (including slot which comes before the currently set head).
|
||||
|
||||
@@ -227,7 +227,7 @@ func connectPeer(t *testing.T, host *p2pt.TestP2P, datum *peerData, peerStatus *
|
||||
p.Connect(host)
|
||||
|
||||
peerStatus.Add(new(enr.Record), p.PeerID(), nil, network.DirOutbound)
|
||||
peerStatus.SetConnectionState(p.PeerID(), peers.PeerConnected)
|
||||
peerStatus.SetConnectionState(p.PeerID(), peers.Connected)
|
||||
peerStatus.SetChainState(p.PeerID(), ðpb.Status{
|
||||
ForkDigest: params.BeaconConfig().GenesisForkVersion,
|
||||
FinalizedRoot: []byte(fmt.Sprintf("finalized_root %d", datum.finalizedEpoch)),
|
||||
@@ -326,7 +326,7 @@ func connectPeerHavingBlocks(
|
||||
require.NoError(t, err)
|
||||
|
||||
peerStatus.Add(new(enr.Record), p.PeerID(), nil, network.DirOutbound)
|
||||
peerStatus.SetConnectionState(p.PeerID(), peers.PeerConnected)
|
||||
peerStatus.SetConnectionState(p.PeerID(), peers.Connected)
|
||||
peerStatus.SetChainState(p.PeerID(), ðpb.Status{
|
||||
ForkDigest: params.BeaconConfig().GenesisForkVersion,
|
||||
FinalizedRoot: []byte(fmt.Sprintf("finalized_root %d", finalizedEpoch)),
|
||||
|
||||
@@ -40,7 +40,7 @@ func TestProcessPendingAtts_NoBlockRequestBlock(t *testing.T) {
|
||||
p1.Connect(p2)
|
||||
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
||||
p1.Peers().Add(new(enr.Record), p2.PeerID(), nil, network.DirOutbound)
|
||||
p1.Peers().SetConnectionState(p2.PeerID(), peers.PeerConnected)
|
||||
p1.Peers().SetConnectionState(p2.PeerID(), peers.Connected)
|
||||
p1.Peers().SetChainState(p2.PeerID(), ðpb.Status{})
|
||||
|
||||
chain := &mock.ChainService{Genesis: prysmTime.Now(), FinalizedCheckPoint: ðpb.Checkpoint{}}
|
||||
|
||||
@@ -406,7 +406,7 @@ func TestRegularSyncBeaconBlockSubscriber_ProcessPendingBlocks_2Chains(t *testin
|
||||
r.initCaches()
|
||||
|
||||
p1.Peers().Add(new(enr.Record), p2.PeerID(), nil, network.DirOutbound)
|
||||
p1.Peers().SetConnectionState(p2.PeerID(), peers.PeerConnected)
|
||||
p1.Peers().SetConnectionState(p2.PeerID(), peers.Connected)
|
||||
p1.Peers().SetChainState(p2.PeerID(), ðpb.Status{})
|
||||
|
||||
b0 := util.NewBeaconBlock()
|
||||
@@ -505,7 +505,7 @@ func TestRegularSyncBeaconBlockSubscriber_PruneOldPendingBlocks(t *testing.T) {
|
||||
r.initCaches()
|
||||
|
||||
p1.Peers().Add(new(enr.Record), p1.PeerID(), nil, network.DirOutbound)
|
||||
p1.Peers().SetConnectionState(p1.PeerID(), peers.PeerConnected)
|
||||
p1.Peers().SetConnectionState(p1.PeerID(), peers.Connected)
|
||||
p1.Peers().SetChainState(p1.PeerID(), ðpb.Status{})
|
||||
|
||||
b0 := util.NewBeaconBlock()
|
||||
@@ -611,7 +611,7 @@ func TestService_BatchRootRequest(t *testing.T) {
|
||||
r.initCaches()
|
||||
|
||||
p1.Peers().Add(new(enr.Record), p2.PeerID(), nil, network.DirOutbound)
|
||||
p1.Peers().SetConnectionState(p2.PeerID(), peers.PeerConnected)
|
||||
p1.Peers().SetConnectionState(p2.PeerID(), peers.Connected)
|
||||
p1.Peers().SetChainState(p2.PeerID(), ðpb.Status{FinalizedEpoch: 2})
|
||||
|
||||
b0 := util.NewBeaconBlock()
|
||||
|
||||
@@ -7,12 +7,13 @@ import (
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/trailofbits/go-mutexasserts"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
|
||||
p2ptypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
|
||||
leakybucket "github.com/prysmaticlabs/prysm/v5/container/leaky-bucket"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/trailofbits/go-mutexasserts"
|
||||
)
|
||||
|
||||
const defaultBurstLimit = 5
|
||||
@@ -98,19 +99,20 @@ func (l *limiter) validateRequest(stream network.Stream, amt uint64) error {
|
||||
defer l.RUnlock()
|
||||
|
||||
topic := string(stream.Protocol())
|
||||
remotePeer := stream.Conn().RemotePeer()
|
||||
|
||||
collector, err := l.retrieveCollector(topic)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
key := stream.Conn().RemotePeer().String()
|
||||
remaining := collector.Remaining(key)
|
||||
|
||||
remaining := collector.Remaining(remotePeer.String())
|
||||
// Treat each request as a minimum of 1.
|
||||
if amt == 0 {
|
||||
amt = 1
|
||||
}
|
||||
if amt > uint64(remaining) {
|
||||
l.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer())
|
||||
l.p2p.Peers().Scorers().BadResponsesScorer().Increment(remotePeer)
|
||||
writeErrorResponseToStream(responseCodeInvalidRequest, p2ptypes.ErrRateLimited.Error(), stream, l.p2p)
|
||||
return p2ptypes.ErrRateLimited
|
||||
}
|
||||
|
||||
@@ -97,7 +97,7 @@ func TestRateLimiter_ExceedRawCapacity(t *testing.T) {
|
||||
for i := 0; i < defaultBurstLimit; i++ {
|
||||
assert.ErrorContains(t, p2ptypes.ErrRateLimited.Error(), rlimiter.validateRawRpcRequest(stream))
|
||||
}
|
||||
assert.Equal(t, true, p1.Peers().IsBad(p2.PeerID()), "peer is not marked as a bad peer")
|
||||
assert.NotNil(t, p1.Peers().IsBad(p2.PeerID()), "peer is not marked as a bad peer")
|
||||
require.NoError(t, stream.Close(), "could not close stream")
|
||||
|
||||
if util.WaitTimeout(&wg, 1*time.Second) {
|
||||
|
||||
@@ -51,6 +51,7 @@ func (s *Service) registerRPCHandlers() {
|
||||
s.pingHandler,
|
||||
)
|
||||
s.registerRPCHandlersAltair()
|
||||
|
||||
if currEpoch >= params.BeaconConfig().DenebForkEpoch {
|
||||
s.registerRPCHandlersDeneb()
|
||||
}
|
||||
@@ -138,6 +139,9 @@ func (s *Service) registerRPC(baseTopic string, handle rpcHandler) {
|
||||
ctx, cancel := context.WithTimeout(s.ctx, ttfbTimeout)
|
||||
defer cancel()
|
||||
|
||||
conn := stream.Conn()
|
||||
remotePeer := conn.RemotePeer()
|
||||
|
||||
// Resetting after closing is a no-op so defer a reset in case something goes wrong.
|
||||
// It's up to the handler to Close the stream (send an EOF) if
|
||||
// it successfully writes a response. We don't blindly call
|
||||
@@ -157,12 +161,12 @@ func (s *Service) registerRPC(baseTopic string, handle rpcHandler) {
|
||||
ctx, span := trace.StartSpan(ctx, "sync.rpc")
|
||||
defer span.End()
|
||||
span.SetAttributes(trace.StringAttribute("topic", topic))
|
||||
span.SetAttributes(trace.StringAttribute("peer", stream.Conn().RemotePeer().String()))
|
||||
span.SetAttributes(trace.StringAttribute("peer", remotePeer.String()))
|
||||
log := log.WithField("peer", stream.Conn().RemotePeer().String()).WithField("topic", string(stream.Protocol()))
|
||||
|
||||
// Check before hand that peer is valid.
|
||||
if s.cfg.p2p.Peers().IsBad(stream.Conn().RemotePeer()) {
|
||||
if err := s.sendGoodByeAndDisconnect(ctx, p2ptypes.GoodbyeCodeBanned, stream.Conn().RemotePeer()); err != nil {
|
||||
if err := s.cfg.p2p.Peers().IsBad(remotePeer); err != nil {
|
||||
if err := s.sendGoodByeAndDisconnect(ctx, p2ptypes.GoodbyeCodeBanned, remotePeer); err != nil {
|
||||
log.WithError(err).Debug("Could not disconnect from peer")
|
||||
}
|
||||
return
|
||||
|
||||
@@ -395,7 +395,7 @@ func TestRequestPendingBlobs(t *testing.T) {
|
||||
Genesis: time.Now(),
|
||||
}
|
||||
p1.Peers().Add(new(enr.Record), p2.PeerID(), nil, network.DirOutbound)
|
||||
p1.Peers().SetConnectionState(p2.PeerID(), peers.PeerConnected)
|
||||
p1.Peers().SetConnectionState(p2.PeerID(), peers.Connected)
|
||||
p1.Peers().SetChainState(p2.PeerID(), ðpb.Status{FinalizedEpoch: 1})
|
||||
s := &Service{
|
||||
cfg: &config{
|
||||
|
||||
@@ -55,10 +55,7 @@ func (s *Service) goodbyeRPCHandler(_ context.Context, msg interface{}, stream l
|
||||
|
||||
// disconnectBadPeer checks whether peer is considered bad by some scorer, and tries to disconnect
|
||||
// the peer, if that is the case. Additionally, disconnection reason is obtained from scorer.
|
||||
func (s *Service) disconnectBadPeer(ctx context.Context, id peer.ID) {
|
||||
if !s.cfg.p2p.Peers().IsBad(id) {
|
||||
return
|
||||
}
|
||||
func (s *Service) disconnectBadPeer(ctx context.Context, id peer.ID, badPeerErr error) {
|
||||
err := s.cfg.p2p.Peers().Scorers().ValidationError(id)
|
||||
goodbyeCode := p2ptypes.ErrToGoodbyeCode(err)
|
||||
if err == nil {
|
||||
@@ -67,6 +64,8 @@ func (s *Service) disconnectBadPeer(ctx context.Context, id peer.ID) {
|
||||
if err := s.sendGoodByeAndDisconnect(ctx, goodbyeCode, id); err != nil {
|
||||
log.WithError(err).Debug("Error when disconnecting with bad peer")
|
||||
}
|
||||
|
||||
log.WithError(badPeerErr).WithField("peerID", id).Debug("Initiate peer disconnection")
|
||||
}
|
||||
|
||||
// A custom goodbye method that is used by our connection handler, in the
|
||||
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// maintainPeerStatuses by infrequently polling peers for their latest status.
|
||||
// maintainPeerStatuses maintains peer statuses by polling peers for their latest status twice per epoch.
|
||||
func (s *Service) maintainPeerStatuses() {
|
||||
// Run twice per epoch.
|
||||
interval := time.Duration(params.BeaconConfig().SlotsPerEpoch.Div(2).Mul(params.BeaconConfig().SecondsPerSlot)) * time.Second
|
||||
@@ -38,16 +38,20 @@ func (s *Service) maintainPeerStatuses() {
|
||||
// If our peer status has not been updated correctly we disconnect over here
|
||||
// and set the connection state over here instead.
|
||||
if s.cfg.p2p.Host().Network().Connectedness(id) != network.Connected {
|
||||
s.cfg.p2p.Peers().SetConnectionState(id, peers.PeerDisconnecting)
|
||||
s.cfg.p2p.Peers().SetConnectionState(id, peers.Disconnecting)
|
||||
if err := s.cfg.p2p.Disconnect(id); err != nil {
|
||||
log.WithError(err).Debug("Error when disconnecting with peer")
|
||||
}
|
||||
s.cfg.p2p.Peers().SetConnectionState(id, peers.PeerDisconnected)
|
||||
s.cfg.p2p.Peers().SetConnectionState(id, peers.Disconnected)
|
||||
log.WithFields(logrus.Fields{
|
||||
"peer": id,
|
||||
"reason": "maintain peer statuses - peer is not connected",
|
||||
}).Debug("Initiate peer disconnection")
|
||||
return
|
||||
}
|
||||
// Disconnect from peers that are considered bad by any of the registered scorers.
|
||||
if s.cfg.p2p.Peers().IsBad(id) {
|
||||
s.disconnectBadPeer(s.ctx, id)
|
||||
if err := s.cfg.p2p.Peers().IsBad(id); err != nil {
|
||||
s.disconnectBadPeer(s.ctx, id, err)
|
||||
return
|
||||
}
|
||||
// If the status hasn't been updated in the recent interval time.
|
||||
@@ -73,6 +77,11 @@ func (s *Service) maintainPeerStatuses() {
|
||||
if err := s.sendGoodByeAndDisconnect(s.ctx, p2ptypes.GoodbyeCodeTooManyPeers, id); err != nil {
|
||||
log.WithField("peer", id).WithError(err).Debug("Could not disconnect with peer")
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"peer": id,
|
||||
"reason": "to be pruned",
|
||||
}).Debug("Initiate peer disconnection")
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -169,8 +178,8 @@ func (s *Service) sendRPCStatusRequest(ctx context.Context, id peer.ID) error {
|
||||
// If validation fails, validation error is logged, and peer status scorer will mark peer as bad.
|
||||
err = s.validateStatusMessage(ctx, msg)
|
||||
s.cfg.p2p.Peers().Scorers().PeerStatusScorer().SetPeerStatus(id, msg, err)
|
||||
if s.cfg.p2p.Peers().IsBad(id) {
|
||||
s.disconnectBadPeer(s.ctx, id)
|
||||
if err := s.cfg.p2p.Peers().IsBad(id); err != nil {
|
||||
s.disconnectBadPeer(s.ctx, id, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -182,7 +191,7 @@ func (s *Service) reValidatePeer(ctx context.Context, id peer.ID) error {
|
||||
}
|
||||
// Do not return an error for ping requests.
|
||||
if err := s.sendPingRequest(ctx, id); err != nil && !isUnwantedError(err) {
|
||||
log.WithError(err).Debug("Could not ping peer")
|
||||
log.WithError(err).WithField("pid", id).Debug("Could not ping peer")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -413,7 +413,7 @@ func TestHandshakeHandlers_Roundtrip(t *testing.T) {
|
||||
assert.Equal(t, numActive1+1, numActive2, "Number of active peers unexpected")
|
||||
|
||||
require.NoError(t, p2.Disconnect(p1.PeerID()))
|
||||
p1.Peers().SetConnectionState(p2.PeerID(), peers.PeerDisconnected)
|
||||
p1.Peers().SetConnectionState(p2.PeerID(), peers.Disconnected)
|
||||
|
||||
// Wait for disconnect event to trigger.
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
@@ -877,7 +877,7 @@ func TestStatusRPCRequest_BadPeerHandshake(t *testing.T) {
|
||||
|
||||
require.NoError(t, cw.SetClock(startup.NewClock(chain.Genesis, chain.ValidatorsRoot)))
|
||||
|
||||
assert.Equal(t, false, p1.Peers().Scorers().IsBadPeer(p2.PeerID()), "Peer is marked as bad")
|
||||
assert.NoError(t, p1.Peers().Scorers().IsBadPeer(p2.PeerID()), "Peer is marked as bad")
|
||||
p1.Connect(p2)
|
||||
|
||||
if util.WaitTimeout(&wg, time.Second) {
|
||||
@@ -887,9 +887,9 @@ func TestStatusRPCRequest_BadPeerHandshake(t *testing.T) {
|
||||
|
||||
connectionState, err := p1.Peers().ConnectionState(p2.PeerID())
|
||||
require.NoError(t, err, "Could not obtain peer connection state")
|
||||
assert.Equal(t, peers.PeerDisconnected, connectionState, "Expected peer to be disconnected")
|
||||
assert.Equal(t, peers.Disconnected, connectionState, "Expected peer to be disconnected")
|
||||
|
||||
assert.Equal(t, true, p1.Peers().Scorers().IsBadPeer(p2.PeerID()), "Peer is not marked as bad")
|
||||
assert.NotNil(t, p1.Peers().Scorers().IsBadPeer(p2.PeerID()), "Peer is not marked as bad")
|
||||
}
|
||||
|
||||
func TestStatusRPC_ValidGenesisMessage(t *testing.T) {
|
||||
|
||||
@@ -67,9 +67,11 @@ var InitsyncBlobSidecarRequirements = requirementList(GossipBlobSidecarRequireme
|
||||
RequireSidecarProposerExpected,
|
||||
)
|
||||
|
||||
// ELMemPoolRequirements is a list of verification requirements to be used when importing blobs and proof from
|
||||
// execution layer mempool. Only the KZG proof verification is required.
|
||||
var ELMemPoolRequirements = []Requirement{RequireSidecarKzgProofVerified}
|
||||
// ELMemPoolRequirements defines the verification requirements for importing blobs and proofs
|
||||
// from the execution layer's mempool. Currently, no requirements are enforced because it is
|
||||
// assumed that blobs and proofs from the execution layer are correctly formatted,
|
||||
// given the trusted relationship between the consensus layer and execution layer.
|
||||
var ELMemPoolRequirements []Requirement
|
||||
|
||||
// BackfillBlobSidecarRequirements is the same as InitsyncBlobSidecarRequirements.
|
||||
var BackfillBlobSidecarRequirements = requirementList(InitsyncBlobSidecarRequirements).excluding()
|
||||
|
||||
@@ -42,11 +42,13 @@ var downloadCmd = &cli.Command{
|
||||
},
|
||||
}
|
||||
|
||||
const stateSizeLimit int64 = 1 << 29 // 512MB to accommodate future state growth
|
||||
|
||||
func cliActionDownload(_ *cli.Context) error {
|
||||
ctx := context.Background()
|
||||
f := downloadFlags
|
||||
|
||||
opts := []client.ClientOpt{client.WithTimeout(f.Timeout)}
|
||||
opts := []client.ClientOpt{client.WithTimeout(f.Timeout), client.WithMaxBodySize(stateSizeLimit)}
|
||||
client, err := beacon.NewClient(downloadFlags.BeaconNodeHost, opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -91,7 +91,7 @@ service BeaconChain {
|
||||
// that it was included in a block. The attestation may have expired.
|
||||
// Refer to the Ethereum Beacon Chain specification for more details on how
|
||||
// attestations are processed and when they are no longer valid.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/core/0_beacon-chain.md#attestations
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/beacon-chain.md#attestations
|
||||
rpc AttestationPool(AttestationPoolRequest) returns (AttestationPoolResponse) {
|
||||
option (google.api.http) = {
|
||||
get: "/eth/v1alpha1/beacon/attestations/pool"
|
||||
@@ -106,7 +106,7 @@ service BeaconChain {
|
||||
// that it was included in a block. The attestation may have expired.
|
||||
// Refer to the Ethereum Beacon Chain specification for more details on how
|
||||
// attestations are processed and when they are no longer valid.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/core/0_beacon-chain.md#attestations
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/beacon-chain.md#attestations
|
||||
rpc AttestationPoolElectra(AttestationPoolRequest) returns (AttestationPoolElectraResponse) {
|
||||
option (google.api.http) = {
|
||||
get: "/eth/v1alpha1/beacon/attestations/pool_electra"
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package version
|
||||
|
||||
import "github.com/pkg/errors"
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
Phase0 = iota
|
||||
|
||||
@@ -13,6 +13,7 @@ go_library(
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
@@ -32,6 +33,7 @@ go_test(
|
||||
deps = [
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//time:go_default_library",
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
mathutil "github.com/prysmaticlabs/prysm/v5/math"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
prysmTime "github.com/prysmaticlabs/prysm/v5/time"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@@ -81,6 +82,25 @@ func ToEpoch(slot primitives.Slot) primitives.Epoch {
|
||||
return primitives.Epoch(slot.DivSlot(params.BeaconConfig().SlotsPerEpoch))
|
||||
}
|
||||
|
||||
// ToForkVersion translates a slot into it's corresponding version.
|
||||
func ToForkVersion(slot primitives.Slot) int {
|
||||
epoch := ToEpoch(slot)
|
||||
switch {
|
||||
case epoch >= params.BeaconConfig().ElectraForkEpoch:
|
||||
return version.Electra
|
||||
case epoch >= params.BeaconConfig().DenebForkEpoch:
|
||||
return version.Deneb
|
||||
case epoch >= params.BeaconConfig().CapellaForkEpoch:
|
||||
return version.Capella
|
||||
case epoch >= params.BeaconConfig().BellatrixForkEpoch:
|
||||
return version.Bellatrix
|
||||
case epoch >= params.BeaconConfig().AltairForkEpoch:
|
||||
return version.Altair
|
||||
default:
|
||||
return version.Phase0
|
||||
}
|
||||
}
|
||||
|
||||
// EpochStart returns the first slot number of the
|
||||
// current epoch.
|
||||
//
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
prysmTime "github.com/prysmaticlabs/prysm/v5/time"
|
||||
@@ -632,3 +633,58 @@ func TestSecondsUntilNextEpochStart(t *testing.T) {
|
||||
require.Equal(t, true, IsEpochStart(currentSlot))
|
||||
|
||||
}
|
||||
|
||||
func TestToForkVersion(t *testing.T) {
|
||||
t.Run("Electra fork version", func(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.ElectraForkEpoch = 100
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
slot, err := EpochStart(params.BeaconConfig().ElectraForkEpoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
result := ToForkVersion(slot)
|
||||
require.Equal(t, version.Electra, result)
|
||||
})
|
||||
|
||||
t.Run("Deneb fork version", func(t *testing.T) {
|
||||
slot, err := EpochStart(params.BeaconConfig().DenebForkEpoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
result := ToForkVersion(slot)
|
||||
require.Equal(t, version.Deneb, result)
|
||||
})
|
||||
|
||||
t.Run("Capella fork version", func(t *testing.T) {
|
||||
slot, err := EpochStart(params.BeaconConfig().CapellaForkEpoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
result := ToForkVersion(slot)
|
||||
require.Equal(t, version.Capella, result)
|
||||
})
|
||||
|
||||
t.Run("Bellatrix fork version", func(t *testing.T) {
|
||||
slot, err := EpochStart(params.BeaconConfig().BellatrixForkEpoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
result := ToForkVersion(slot)
|
||||
require.Equal(t, version.Bellatrix, result)
|
||||
})
|
||||
|
||||
t.Run("Altair fork version", func(t *testing.T) {
|
||||
slot, err := EpochStart(params.BeaconConfig().AltairForkEpoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
result := ToForkVersion(slot)
|
||||
require.Equal(t, version.Altair, result)
|
||||
})
|
||||
|
||||
t.Run("Phase0 fork version", func(t *testing.T) {
|
||||
slot, err := EpochStart(params.BeaconConfig().AltairForkEpoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
result := ToForkVersion(slot - 1)
|
||||
require.Equal(t, version.Phase0, result)
|
||||
})
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user