mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 05:47:59 -05:00
Compare commits
10 Commits
revert-wai
...
refactor-r
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fd37e6fe13 | ||
|
|
1707cf3ec7 | ||
|
|
bdbb850250 | ||
|
|
b28b1ed6ce | ||
|
|
74bb0821a8 | ||
|
|
8025a483e2 | ||
|
|
0475631543 | ||
|
|
f27092fa91 | ||
|
|
67cef41cbf | ||
|
|
258908d50e |
2
.github/workflows/go.yml
vendored
2
.github/workflows/go.yml
vendored
@@ -54,7 +54,7 @@ jobs:
|
||||
- name: Golangci-lint
|
||||
uses: golangci/golangci-lint-action@v5
|
||||
with:
|
||||
version: v1.55.2
|
||||
version: v1.56.1
|
||||
args: --config=.golangci.yml --out-${NO_FUTURE}format colored-line-number
|
||||
|
||||
build:
|
||||
|
||||
@@ -73,6 +73,7 @@ linters:
|
||||
- promlinter
|
||||
- protogetter
|
||||
- revive
|
||||
- spancheck
|
||||
- staticcheck
|
||||
- stylecheck
|
||||
- tagalign
|
||||
|
||||
@@ -27,6 +27,9 @@ The format is based on Keep a Changelog, and this project adheres to Semantic Ve
|
||||
- Added validator index label to `validator_statuses` metric.
|
||||
- Added Validator REST mode use of Attestation V2 endpoints and Electra attestations.
|
||||
- PeerDAS: Added proto for `DataColumnIdentifier`, `DataColumnSidecar`, `DataColumnSidecarsByRangeRequest` and `MetadataV2`.
|
||||
- Better attestation packing for Electra. [PR](https://github.com/prysmaticlabs/prysm/pull/14534)
|
||||
- P2P: Add logs when a peer is (dis)connected. Add the reason of the disconnection when we initiate it.
|
||||
- Added a Prometheus error counter metric for HTTP requests to track beacon node requests.
|
||||
|
||||
### Changed
|
||||
|
||||
@@ -61,6 +64,9 @@ The format is based on Keep a Changelog, and this project adheres to Semantic Ve
|
||||
- Updated light client protobufs. [PR](https://github.com/prysmaticlabs/prysm/pull/14650)
|
||||
- Added `Eth-Consensus-Version` header to `ListAttestationsV2` and `GetAggregateAttestationV2` endpoints.
|
||||
- Updated light client consensus types. [PR](https://github.com/prysmaticlabs/prysm/pull/14652)
|
||||
- Fixed pending deposits processing on Electra.
|
||||
- Modified `ListAttestationsV2`, `GetAttesterSlashingsV2` and `GetAggregateAttestationV2` endpoints to use slot to determine fork version.
|
||||
- Improvements to HTTP response handling. [pr](https://github.com/prysmaticlabs/prysm/pull/14673)
|
||||
|
||||
### Deprecated
|
||||
|
||||
@@ -71,6 +77,7 @@ The format is based on Keep a Changelog, and this project adheres to Semantic Ve
|
||||
- Removed finalized validator index cache, no longer needed.
|
||||
- Removed validator queue position log on key reload and wait for activation.
|
||||
- Removed outdated spectest exclusions for EIP-6110.
|
||||
- Removed kzg proof check from blob reconstructor.
|
||||
|
||||
### Fixed
|
||||
|
||||
@@ -90,6 +97,7 @@ The format is based on Keep a Changelog, and this project adheres to Semantic Ve
|
||||
- Fix panic on attestation interface since we call data before validation
|
||||
- corrects nil check on some interface attestation types
|
||||
- temporary solution to handling electra attesation and attester_slashing events. [pr](14655)
|
||||
- Diverse log improvements and comment additions.
|
||||
|
||||
|
||||
### Security
|
||||
|
||||
@@ -12,6 +12,7 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//api/client:go_default_library",
|
||||
"//api/server/structs:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//consensus-types:go_default_library",
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/api"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/client"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
@@ -176,7 +177,7 @@ func (c *Client) do(ctx context.Context, method string, path string, body io.Rea
|
||||
err = non200Err(r)
|
||||
return
|
||||
}
|
||||
res, err = io.ReadAll(r.Body)
|
||||
res, err = io.ReadAll(io.LimitReader(r.Body, client.MaxBodySize))
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, "error reading http response body from builder server")
|
||||
return
|
||||
@@ -358,7 +359,7 @@ func (c *Client) Status(ctx context.Context) error {
|
||||
}
|
||||
|
||||
func non200Err(response *http.Response) error {
|
||||
bodyBytes, err := io.ReadAll(response.Body)
|
||||
bodyBytes, err := io.ReadAll(io.LimitReader(response.Body, client.MaxErrBodySize))
|
||||
var errMessage ErrorMessage
|
||||
var body string
|
||||
if err != nil {
|
||||
|
||||
@@ -10,11 +10,17 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
MaxBodySize int64 = 1 << 23 // 8MB default, WithMaxBodySize can override
|
||||
MaxErrBodySize int64 = 1 << 17 // 128KB
|
||||
)
|
||||
|
||||
// Client is a wrapper object around the HTTP client.
|
||||
type Client struct {
|
||||
hc *http.Client
|
||||
baseURL *url.URL
|
||||
token string
|
||||
hc *http.Client
|
||||
baseURL *url.URL
|
||||
token string
|
||||
maxBodySize int64
|
||||
}
|
||||
|
||||
// NewClient constructs a new client with the provided options (ex WithTimeout).
|
||||
@@ -26,8 +32,9 @@ func NewClient(host string, opts ...ClientOpt) (*Client, error) {
|
||||
return nil, err
|
||||
}
|
||||
c := &Client{
|
||||
hc: &http.Client{},
|
||||
baseURL: u,
|
||||
hc: &http.Client{},
|
||||
baseURL: u,
|
||||
maxBodySize: MaxBodySize,
|
||||
}
|
||||
for _, o := range opts {
|
||||
o(c)
|
||||
@@ -72,7 +79,7 @@ func (c *Client) NodeURL() string {
|
||||
// Get is a generic, opinionated GET function to reduce boilerplate amongst the getters in this package.
|
||||
func (c *Client) Get(ctx context.Context, path string, opts ...ReqOption) ([]byte, error) {
|
||||
u := c.baseURL.ResolveReference(&url.URL{Path: path})
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), http.NoBody)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -89,7 +96,7 @@ func (c *Client) Get(ctx context.Context, path string, opts ...ReqOption) ([]byt
|
||||
if r.StatusCode != http.StatusOK {
|
||||
return nil, Non200Err(r)
|
||||
}
|
||||
b, err := io.ReadAll(r.Body)
|
||||
b, err := io.ReadAll(io.LimitReader(r.Body, c.maxBodySize))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error reading http response body")
|
||||
}
|
||||
|
||||
@@ -25,16 +25,16 @@ var ErrInvalidNodeVersion = errors.New("invalid node version response")
|
||||
var ErrConnectionIssue = errors.New("could not connect")
|
||||
|
||||
// Non200Err is a function that parses an HTTP response to handle responses that are not 200 with a formatted error.
|
||||
func Non200Err(response *http.Response) error {
|
||||
bodyBytes, err := io.ReadAll(response.Body)
|
||||
func Non200Err(r *http.Response) error {
|
||||
b, err := io.ReadAll(io.LimitReader(r.Body, MaxErrBodySize))
|
||||
var body string
|
||||
if err != nil {
|
||||
body = "(Unable to read response body.)"
|
||||
} else {
|
||||
body = "response body:\n" + string(bodyBytes)
|
||||
body = "response body:\n" + string(b)
|
||||
}
|
||||
msg := fmt.Sprintf("code=%d, url=%s, body=%s", response.StatusCode, response.Request.URL, body)
|
||||
switch response.StatusCode {
|
||||
msg := fmt.Sprintf("code=%d, url=%s, body=%s", r.StatusCode, r.Request.URL, body)
|
||||
switch r.StatusCode {
|
||||
case http.StatusNotFound:
|
||||
return errors.Wrap(ErrNotFound, msg)
|
||||
default:
|
||||
|
||||
@@ -46,3 +46,10 @@ func WithAuthenticationToken(token string) ClientOpt {
|
||||
c.token = token
|
||||
}
|
||||
}
|
||||
|
||||
// WithMaxBodySize overrides the default max body size of 8MB.
|
||||
func WithMaxBodySize(size int64) ClientOpt {
|
||||
return func(c *Client) {
|
||||
c.maxBodySize = size
|
||||
}
|
||||
}
|
||||
|
||||
@@ -386,8 +386,14 @@ func batchProcessNewPendingDeposits(ctx context.Context, state state.BeaconState
|
||||
return errors.Wrap(err, "batch signature verification failed")
|
||||
}
|
||||
|
||||
pubKeyMap := make(map[[48]byte]struct{}, len(pendingDeposits))
|
||||
|
||||
// Process each deposit individually
|
||||
for _, pendingDeposit := range pendingDeposits {
|
||||
_, found := pubKeyMap[bytesutil.ToBytes48(pendingDeposit.PublicKey)]
|
||||
if !found {
|
||||
pubKeyMap[bytesutil.ToBytes48(pendingDeposit.PublicKey)] = struct{}{}
|
||||
}
|
||||
validSignature := allSignaturesVerified
|
||||
|
||||
// If batch verification failed, check the individual deposit signature
|
||||
@@ -405,9 +411,16 @@ func batchProcessNewPendingDeposits(ctx context.Context, state state.BeaconState
|
||||
|
||||
// Add validator to the registry if the signature is valid
|
||||
if validSignature {
|
||||
err = AddValidatorToRegistry(state, pendingDeposit.PublicKey, pendingDeposit.WithdrawalCredentials, pendingDeposit.Amount)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to add validator to registry")
|
||||
if found {
|
||||
index, _ := state.ValidatorIndexByPubkey(bytesutil.ToBytes48(pendingDeposit.PublicKey))
|
||||
if err := helpers.IncreaseBalance(state, index, pendingDeposit.Amount); err != nil {
|
||||
return errors.Wrap(err, "could not increase balance")
|
||||
}
|
||||
} else {
|
||||
err = AddValidatorToRegistry(state, pendingDeposit.PublicKey, pendingDeposit.WithdrawalCredentials, pendingDeposit.Amount)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to add validator to registry")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,6 +22,40 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
)
|
||||
|
||||
func TestProcessPendingDepositsMultiplesSameDeposits(t *testing.T) {
|
||||
st := stateWithActiveBalanceETH(t, 1000)
|
||||
deps := make([]*eth.PendingDeposit, 2) // Make same deposit twice
|
||||
validators := st.Validators()
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
for i := 0; i < len(deps); i += 1 {
|
||||
wc := make([]byte, 32)
|
||||
wc[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
wc[31] = byte(i)
|
||||
validators[i].PublicKey = sk.PublicKey().Marshal()
|
||||
validators[i].WithdrawalCredentials = wc
|
||||
deps[i] = stateTesting.GeneratePendingDeposit(t, sk, 32, bytesutil.ToBytes32(wc), 0)
|
||||
}
|
||||
require.NoError(t, st.SetPendingDeposits(deps))
|
||||
|
||||
err = electra.ProcessPendingDeposits(context.TODO(), st, 10000)
|
||||
require.NoError(t, err)
|
||||
|
||||
val := st.Validators()
|
||||
seenPubkeys := make(map[string]struct{})
|
||||
for i := 0; i < len(val); i += 1 {
|
||||
if len(val[i].PublicKey) == 0 {
|
||||
continue
|
||||
}
|
||||
_, ok := seenPubkeys[string(val[i].PublicKey)]
|
||||
if ok {
|
||||
t.Fatalf("duplicated pubkeys")
|
||||
} else {
|
||||
seenPubkeys[string(val[i].PublicKey)] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessPendingDeposits(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -285,7 +319,7 @@ func TestBatchProcessNewPendingDeposits(t *testing.T) {
|
||||
wc[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
wc[31] = byte(0)
|
||||
validDep := stateTesting.GeneratePendingDeposit(t, sk, params.BeaconConfig().MinActivationBalance, bytesutil.ToBytes32(wc), 0)
|
||||
invalidDep := ð.PendingDeposit{}
|
||||
invalidDep := ð.PendingDeposit{PublicKey: make([]byte, 48)}
|
||||
// have a combination of valid and invalid deposits
|
||||
deps := []*eth.PendingDeposit{validDep, invalidDep}
|
||||
require.NoError(t, electra.BatchProcessNewPendingDeposits(context.Background(), st, deps))
|
||||
|
||||
@@ -23,10 +23,10 @@ import (
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
// used to represent errors for inconsistent slot ranges.
|
||||
// Used to represent errors for inconsistent slot ranges.
|
||||
var errInvalidSlotRange = errors.New("invalid end slot and start slot provided")
|
||||
|
||||
// Block retrieval by root.
|
||||
// Block retrieval by root. Return nil if block is not found.
|
||||
func (s *Store) Block(ctx context.Context, blockRoot [32]byte) (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.Block")
|
||||
defer span.End()
|
||||
|
||||
@@ -623,13 +623,7 @@ func (s *Service) ReconstructBlobSidecars(ctx context.Context, block interfaces.
|
||||
continue
|
||||
}
|
||||
|
||||
// Verify the sidecar KZG proof
|
||||
v := s.blobVerifier(roBlob, verification.ELMemPoolRequirements)
|
||||
if err := v.SidecarKzgProofVerified(); err != nil {
|
||||
log.WithError(err).WithField("index", i).Error("failed to verify KZG proof for sidecar")
|
||||
continue
|
||||
}
|
||||
|
||||
verifiedBlob, err := v.VerifiedROBlob()
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("index", i).Error("failed to verify RO blob")
|
||||
|
||||
@@ -53,7 +53,7 @@ func (f *ForkChoice) ShouldOverrideFCU() (override bool) {
|
||||
// Only reorg blocks that arrive late
|
||||
early, err := head.arrivedEarly(f.store.genesisTime)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not check if block arrived early")
|
||||
log.WithError(err).Error("Could not check if block arrived early")
|
||||
return
|
||||
}
|
||||
if early {
|
||||
|
||||
@@ -7,6 +7,7 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/attestations/mock",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
],
|
||||
|
||||
@@ -3,13 +3,17 @@ package mock
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
var _ attestations.Pool = &PoolMock{}
|
||||
|
||||
// PoolMock --
|
||||
type PoolMock struct {
|
||||
AggregatedAtts []*ethpb.Attestation
|
||||
AggregatedAtts []ethpb.Att
|
||||
UnaggregatedAtts []ethpb.Att
|
||||
}
|
||||
|
||||
// AggregateUnaggregatedAttestations --
|
||||
@@ -23,18 +27,18 @@ func (*PoolMock) AggregateUnaggregatedAttestationsBySlotIndex(_ context.Context,
|
||||
}
|
||||
|
||||
// SaveAggregatedAttestation --
|
||||
func (*PoolMock) SaveAggregatedAttestation(_ *ethpb.Attestation) error {
|
||||
func (*PoolMock) SaveAggregatedAttestation(_ ethpb.Att) error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
// SaveAggregatedAttestations --
|
||||
func (m *PoolMock) SaveAggregatedAttestations(atts []*ethpb.Attestation) error {
|
||||
func (m *PoolMock) SaveAggregatedAttestations(atts []ethpb.Att) error {
|
||||
m.AggregatedAtts = append(m.AggregatedAtts, atts...)
|
||||
return nil
|
||||
}
|
||||
|
||||
// AggregatedAttestations --
|
||||
func (m *PoolMock) AggregatedAttestations() []*ethpb.Attestation {
|
||||
func (m *PoolMock) AggregatedAttestations() []ethpb.Att {
|
||||
return m.AggregatedAtts
|
||||
}
|
||||
|
||||
@@ -43,13 +47,18 @@ func (*PoolMock) AggregatedAttestationsBySlotIndex(_ context.Context, _ primitiv
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
// AggregatedAttestationsBySlotIndexElectra --
|
||||
func (*PoolMock) AggregatedAttestationsBySlotIndexElectra(_ context.Context, _ primitives.Slot, _ primitives.CommitteeIndex) []*ethpb.AttestationElectra {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
// DeleteAggregatedAttestation --
|
||||
func (*PoolMock) DeleteAggregatedAttestation(_ *ethpb.Attestation) error {
|
||||
func (*PoolMock) DeleteAggregatedAttestation(_ ethpb.Att) error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
// HasAggregatedAttestation --
|
||||
func (*PoolMock) HasAggregatedAttestation(_ *ethpb.Attestation) (bool, error) {
|
||||
func (*PoolMock) HasAggregatedAttestation(_ ethpb.Att) (bool, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
@@ -59,18 +68,19 @@ func (*PoolMock) AggregatedAttestationCount() int {
|
||||
}
|
||||
|
||||
// SaveUnaggregatedAttestation --
|
||||
func (*PoolMock) SaveUnaggregatedAttestation(_ *ethpb.Attestation) error {
|
||||
func (*PoolMock) SaveUnaggregatedAttestation(_ ethpb.Att) error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
// SaveUnaggregatedAttestations --
|
||||
func (*PoolMock) SaveUnaggregatedAttestations(_ []*ethpb.Attestation) error {
|
||||
panic("implement me")
|
||||
func (m *PoolMock) SaveUnaggregatedAttestations(atts []ethpb.Att) error {
|
||||
m.UnaggregatedAtts = append(m.UnaggregatedAtts, atts...)
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnaggregatedAttestations --
|
||||
func (*PoolMock) UnaggregatedAttestations() ([]*ethpb.Attestation, error) {
|
||||
panic("implement me")
|
||||
func (m *PoolMock) UnaggregatedAttestations() ([]ethpb.Att, error) {
|
||||
return m.UnaggregatedAtts, nil
|
||||
}
|
||||
|
||||
// UnaggregatedAttestationsBySlotIndex --
|
||||
@@ -78,8 +88,13 @@ func (*PoolMock) UnaggregatedAttestationsBySlotIndex(_ context.Context, _ primit
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
// UnaggregatedAttestationsBySlotIndexElectra --
|
||||
func (*PoolMock) UnaggregatedAttestationsBySlotIndexElectra(_ context.Context, _ primitives.Slot, _ primitives.CommitteeIndex) []*ethpb.AttestationElectra {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
// DeleteUnaggregatedAttestation --
|
||||
func (*PoolMock) DeleteUnaggregatedAttestation(_ *ethpb.Attestation) error {
|
||||
func (*PoolMock) DeleteUnaggregatedAttestation(_ ethpb.Att) error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
@@ -94,42 +109,42 @@ func (*PoolMock) UnaggregatedAttestationCount() int {
|
||||
}
|
||||
|
||||
// SaveBlockAttestation --
|
||||
func (*PoolMock) SaveBlockAttestation(_ *ethpb.Attestation) error {
|
||||
func (*PoolMock) SaveBlockAttestation(_ ethpb.Att) error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
// SaveBlockAttestations --
|
||||
func (*PoolMock) SaveBlockAttestations(_ []*ethpb.Attestation) error {
|
||||
func (*PoolMock) SaveBlockAttestations(_ []ethpb.Att) error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
// BlockAttestations --
|
||||
func (*PoolMock) BlockAttestations() []*ethpb.Attestation {
|
||||
func (*PoolMock) BlockAttestations() []ethpb.Att {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
// DeleteBlockAttestation --
|
||||
func (*PoolMock) DeleteBlockAttestation(_ *ethpb.Attestation) error {
|
||||
func (*PoolMock) DeleteBlockAttestation(_ ethpb.Att) error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
// SaveForkchoiceAttestation --
|
||||
func (*PoolMock) SaveForkchoiceAttestation(_ *ethpb.Attestation) error {
|
||||
func (*PoolMock) SaveForkchoiceAttestation(_ ethpb.Att) error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
// SaveForkchoiceAttestations --
|
||||
func (*PoolMock) SaveForkchoiceAttestations(_ []*ethpb.Attestation) error {
|
||||
func (*PoolMock) SaveForkchoiceAttestations(_ []ethpb.Att) error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
// ForkchoiceAttestations --
|
||||
func (*PoolMock) ForkchoiceAttestations() []*ethpb.Attestation {
|
||||
func (*PoolMock) ForkchoiceAttestations() []ethpb.Att {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
// DeleteForkchoiceAttestation --
|
||||
func (*PoolMock) DeleteForkchoiceAttestation(_ *ethpb.Attestation) error {
|
||||
func (*PoolMock) DeleteForkchoiceAttestation(_ ethpb.Att) error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
|
||||
@@ -75,6 +75,8 @@ go_library(
|
||||
"//runtime/version:go_default_library",
|
||||
"//time:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_btcsuite_btcd_btcec_v2//:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//crypto:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/discover:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
|
||||
|
||||
@@ -33,7 +33,7 @@ func (*Service) InterceptPeerDial(_ peer.ID) (allow bool) {
|
||||
// multiaddr for the given peer.
|
||||
func (s *Service) InterceptAddrDial(pid peer.ID, m multiaddr.Multiaddr) (allow bool) {
|
||||
// Disallow bad peers from dialing in.
|
||||
if s.peers.IsBad(pid) {
|
||||
if s.peers.IsBad(pid) != nil {
|
||||
return false
|
||||
}
|
||||
return filterConnections(s.addrFilter, m)
|
||||
|
||||
@@ -50,7 +50,7 @@ func TestPeer_AtMaxLimit(t *testing.T) {
|
||||
}()
|
||||
|
||||
for i := 0; i < highWatermarkBuffer; i++ {
|
||||
addPeer(t, s.peers, peers.PeerConnected, false)
|
||||
addPeer(t, s.peers, peers.Connected, false)
|
||||
}
|
||||
|
||||
// create alternate host
|
||||
@@ -159,7 +159,7 @@ func TestService_RejectInboundPeersBeyondLimit(t *testing.T) {
|
||||
inboundLimit += 1
|
||||
// Add in up to inbound peer limit.
|
||||
for i := 0; i < int(inboundLimit); i++ {
|
||||
addPeer(t, s.peers, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED), false)
|
||||
addPeer(t, s.peers, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED), false)
|
||||
}
|
||||
valid = s.InterceptAccept(&maEndpoints{raddr: multiAddress})
|
||||
if valid {
|
||||
|
||||
@@ -189,7 +189,7 @@ func (s *Service) RefreshENR() {
|
||||
s.updateSubnetRecordWithMetadataV2(bitV, bitS)
|
||||
}
|
||||
// ping all peers to inform them of new metadata
|
||||
s.pingPeers()
|
||||
s.pingPeersAndLogEnr()
|
||||
}
|
||||
|
||||
// listen for new nodes watches for new nodes in the network and adds them to the peerstore.
|
||||
@@ -452,7 +452,7 @@ func (s *Service) filterPeer(node *enode.Node) bool {
|
||||
}
|
||||
|
||||
// Ignore bad nodes.
|
||||
if s.peers.IsBad(peerData.ID) {
|
||||
if s.peers.IsBad(peerData.ID) != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
|
||||
@@ -378,14 +378,14 @@ func TestInboundPeerLimit(t *testing.T) {
|
||||
}
|
||||
|
||||
for i := 0; i < 30; i++ {
|
||||
_ = addPeer(t, s.peers, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED), false)
|
||||
_ = addPeer(t, s.peers, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED), false)
|
||||
}
|
||||
|
||||
require.Equal(t, true, s.isPeerAtLimit(false), "not at limit for outbound peers")
|
||||
require.Equal(t, false, s.isPeerAtLimit(true), "at limit for inbound peers")
|
||||
|
||||
for i := 0; i < highWatermarkBuffer; i++ {
|
||||
_ = addPeer(t, s.peers, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED), false)
|
||||
_ = addPeer(t, s.peers, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED), false)
|
||||
}
|
||||
|
||||
require.Equal(t, true, s.isPeerAtLimit(true), "not at limit for inbound peers")
|
||||
@@ -404,13 +404,13 @@ func TestOutboundPeerThreshold(t *testing.T) {
|
||||
}
|
||||
|
||||
for i := 0; i < 2; i++ {
|
||||
_ = addPeer(t, s.peers, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED), true)
|
||||
_ = addPeer(t, s.peers, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED), true)
|
||||
}
|
||||
|
||||
require.Equal(t, true, s.isBelowOutboundPeerThreshold(), "not at outbound peer threshold")
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
_ = addPeer(t, s.peers, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED), true)
|
||||
_ = addPeer(t, s.peers, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED), true)
|
||||
}
|
||||
|
||||
require.Equal(t, false, s.isBelowOutboundPeerThreshold(), "still at outbound peer threshold")
|
||||
@@ -477,7 +477,7 @@ func TestCorrectUDPVersion(t *testing.T) {
|
||||
}
|
||||
|
||||
// addPeer is a helper to add a peer with a given connection state)
|
||||
func addPeer(t *testing.T, p *peers.Status, state peerdata.PeerConnectionState, outbound bool) peer.ID {
|
||||
func addPeer(t *testing.T, p *peers.Status, state peerdata.ConnectionState, outbound bool) peer.ID {
|
||||
// Set up some peers with different states
|
||||
mhBytes := []byte{0x11, 0x04}
|
||||
idBytes := make([]byte, 4)
|
||||
|
||||
@@ -2,7 +2,6 @@ package p2p
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
@@ -10,6 +9,7 @@ import (
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/peerdata"
|
||||
prysmTime "github.com/prysmaticlabs/prysm/v5/time"
|
||||
@@ -25,6 +25,46 @@ func peerMultiaddrString(conn network.Conn) string {
|
||||
return fmt.Sprintf("%s/p2p/%s", conn.RemoteMultiaddr().String(), conn.RemotePeer().String())
|
||||
}
|
||||
|
||||
func (s *Service) connectToPeer(conn network.Conn) {
|
||||
s.peers.SetConnectionState(conn.RemotePeer(), peers.Connected)
|
||||
// Go through the handshake process.
|
||||
log.WithFields(logrus.Fields{
|
||||
"direction": conn.Stat().Direction.String(),
|
||||
"multiAddr": peerMultiaddrString(conn),
|
||||
"activePeers": len(s.peers.Active()),
|
||||
}).Debug("Initiate peer connection")
|
||||
}
|
||||
|
||||
func (s *Service) disconnectFromPeerOnError(
|
||||
conn network.Conn,
|
||||
goodByeFunc func(ctx context.Context, id peer.ID) error,
|
||||
badPeerErr error,
|
||||
) {
|
||||
// Get the remote peer ID.
|
||||
remotePeerID := conn.RemotePeer()
|
||||
|
||||
// Set the peer to disconnecting state.
|
||||
s.peers.SetConnectionState(remotePeerID, peers.Disconnecting)
|
||||
|
||||
// Only attempt a goodbye if we are still connected to the peer.
|
||||
if s.host.Network().Connectedness(remotePeerID) == network.Connected {
|
||||
if err := goodByeFunc(context.TODO(), remotePeerID); err != nil {
|
||||
log.WithError(err).Error("Unable to disconnect from peer")
|
||||
}
|
||||
}
|
||||
|
||||
log.
|
||||
WithError(badPeerErr).
|
||||
WithFields(logrus.Fields{
|
||||
"multiaddr": peerMultiaddrString(conn),
|
||||
"direction": conn.Stat().Direction.String(),
|
||||
"remainingActivePeers": len(s.peers.Active()),
|
||||
}).
|
||||
Debug("Initiate peer disconnection")
|
||||
|
||||
s.peers.SetConnectionState(remotePeerID, peers.Disconnected)
|
||||
}
|
||||
|
||||
// AddConnectionHandler adds a callback function which handles the connection with a
|
||||
// newly added peer. It performs a handshake with that peer by sending a hello request
|
||||
// and validating the response from the peer.
|
||||
@@ -57,18 +97,9 @@ func (s *Service) AddConnectionHandler(reqFunc, goodByeFunc func(ctx context.Con
|
||||
}
|
||||
|
||||
s.host.Network().Notify(&network.NotifyBundle{
|
||||
ConnectedF: func(net network.Network, conn network.Conn) {
|
||||
ConnectedF: func(_ network.Network, conn network.Conn) {
|
||||
remotePeer := conn.RemotePeer()
|
||||
disconnectFromPeer := func() {
|
||||
s.peers.SetConnectionState(remotePeer, peers.PeerDisconnecting)
|
||||
// Only attempt a goodbye if we are still connected to the peer.
|
||||
if s.host.Network().Connectedness(remotePeer) == network.Connected {
|
||||
if err := goodByeFunc(context.TODO(), remotePeer); err != nil {
|
||||
log.WithError(err).Error("Unable to disconnect from peer")
|
||||
}
|
||||
}
|
||||
s.peers.SetConnectionState(remotePeer, peers.PeerDisconnected)
|
||||
}
|
||||
|
||||
// Connection handler must be non-blocking as part of libp2p design.
|
||||
go func() {
|
||||
if peerHandshaking(remotePeer) {
|
||||
@@ -77,28 +108,21 @@ func (s *Service) AddConnectionHandler(reqFunc, goodByeFunc func(ctx context.Con
|
||||
return
|
||||
}
|
||||
defer peerFinished(remotePeer)
|
||||
|
||||
// Handle the various pre-existing conditions that will result in us not handshaking.
|
||||
peerConnectionState, err := s.peers.ConnectionState(remotePeer)
|
||||
if err == nil && (peerConnectionState == peers.PeerConnected || peerConnectionState == peers.PeerConnecting) {
|
||||
if err == nil && (peerConnectionState == peers.Connected || peerConnectionState == peers.Connecting) {
|
||||
log.WithField("currentState", peerConnectionState).WithField("reason", "already active").Trace("Ignoring connection request")
|
||||
return
|
||||
}
|
||||
|
||||
s.peers.Add(nil /* ENR */, remotePeer, conn.RemoteMultiaddr(), conn.Stat().Direction)
|
||||
|
||||
// Defensive check in the event we still get a bad peer.
|
||||
if s.peers.IsBad(remotePeer) {
|
||||
log.WithField("reason", "bad peer").Trace("Ignoring connection request")
|
||||
disconnectFromPeer()
|
||||
if err := s.peers.IsBad(remotePeer); err != nil {
|
||||
s.disconnectFromPeerOnError(conn, goodByeFunc, err)
|
||||
return
|
||||
}
|
||||
validPeerConnection := func() {
|
||||
s.peers.SetConnectionState(conn.RemotePeer(), peers.PeerConnected)
|
||||
// Go through the handshake process.
|
||||
log.WithFields(logrus.Fields{
|
||||
"direction": conn.Stat().Direction,
|
||||
"multiAddr": peerMultiaddrString(conn),
|
||||
"activePeers": len(s.peers.Active()),
|
||||
}).Debug("Peer connected")
|
||||
}
|
||||
|
||||
// Do not perform handshake on inbound dials.
|
||||
if conn.Stat().Direction == network.DirInbound {
|
||||
@@ -117,63 +141,80 @@ func (s *Service) AddConnectionHandler(reqFunc, goodByeFunc func(ctx context.Con
|
||||
// If peer hasn't sent a status request, we disconnect with them
|
||||
if _, err := s.peers.ChainState(remotePeer); errors.Is(err, peerdata.ErrPeerUnknown) || errors.Is(err, peerdata.ErrNoPeerStatus) {
|
||||
statusMessageMissing.Inc()
|
||||
disconnectFromPeer()
|
||||
s.disconnectFromPeerOnError(conn, goodByeFunc, errors.Wrap(err, "chain state"))
|
||||
return
|
||||
}
|
||||
|
||||
if peerExists {
|
||||
updated, err := s.peers.ChainStateLastUpdated(remotePeer)
|
||||
if err != nil {
|
||||
disconnectFromPeer()
|
||||
s.disconnectFromPeerOnError(conn, goodByeFunc, errors.Wrap(err, "chain state last updated"))
|
||||
return
|
||||
}
|
||||
// exit if we don't receive any current status messages from
|
||||
// peer.
|
||||
if updated.IsZero() || !updated.After(currentTime) {
|
||||
disconnectFromPeer()
|
||||
|
||||
// Exit if we don't receive any current status messages from peer.
|
||||
if updated.IsZero() {
|
||||
s.disconnectFromPeerOnError(conn, goodByeFunc, errors.New("is zero"))
|
||||
return
|
||||
}
|
||||
|
||||
if updated.Before(currentTime) {
|
||||
s.disconnectFromPeerOnError(conn, goodByeFunc, errors.New("did not update"))
|
||||
return
|
||||
}
|
||||
}
|
||||
validPeerConnection()
|
||||
|
||||
s.connectToPeer(conn)
|
||||
return
|
||||
}
|
||||
|
||||
s.peers.SetConnectionState(conn.RemotePeer(), peers.PeerConnecting)
|
||||
s.peers.SetConnectionState(conn.RemotePeer(), peers.Connecting)
|
||||
if err := reqFunc(context.TODO(), conn.RemotePeer()); err != nil && !errors.Is(err, io.EOF) {
|
||||
log.WithError(err).Trace("Handshake failed")
|
||||
disconnectFromPeer()
|
||||
s.disconnectFromPeerOnError(conn, goodByeFunc, err)
|
||||
return
|
||||
}
|
||||
validPeerConnection()
|
||||
|
||||
s.connectToPeer(conn)
|
||||
}()
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// AddDisconnectionHandler disconnects from peers. It handles updating the peer status.
|
||||
// AddDisconnectionHandler disconnects from peers. It handles updating the peer status.
|
||||
// This also calls the handler responsible for maintaining other parts of the sync or p2p system.
|
||||
func (s *Service) AddDisconnectionHandler(handler func(ctx context.Context, id peer.ID) error) {
|
||||
s.host.Network().Notify(&network.NotifyBundle{
|
||||
DisconnectedF: func(net network.Network, conn network.Conn) {
|
||||
log := log.WithField("multiAddr", peerMultiaddrString(conn))
|
||||
peerID := conn.RemotePeer()
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"multiAddr": peerMultiaddrString(conn),
|
||||
"direction": conn.Stat().Direction.String(),
|
||||
})
|
||||
// Must be handled in a goroutine as this callback cannot be blocking.
|
||||
go func() {
|
||||
// Exit early if we are still connected to the peer.
|
||||
if net.Connectedness(conn.RemotePeer()) == network.Connected {
|
||||
if net.Connectedness(peerID) == network.Connected {
|
||||
return
|
||||
}
|
||||
priorState, err := s.peers.ConnectionState(conn.RemotePeer())
|
||||
|
||||
priorState, err := s.peers.ConnectionState(peerID)
|
||||
if err != nil {
|
||||
// Can happen if the peer has already disconnected, so...
|
||||
priorState = peers.PeerDisconnected
|
||||
priorState = peers.Disconnected
|
||||
}
|
||||
s.peers.SetConnectionState(conn.RemotePeer(), peers.PeerDisconnecting)
|
||||
|
||||
s.peers.SetConnectionState(peerID, peers.Disconnecting)
|
||||
if err := handler(context.TODO(), conn.RemotePeer()); err != nil {
|
||||
log.WithError(err).Error("Disconnect handler failed")
|
||||
}
|
||||
s.peers.SetConnectionState(conn.RemotePeer(), peers.PeerDisconnected)
|
||||
|
||||
s.peers.SetConnectionState(peerID, peers.Disconnected)
|
||||
|
||||
// Only log disconnections if we were fully connected.
|
||||
if priorState == peers.PeerConnected {
|
||||
log.WithField("activePeers", len(s.peers.Active())).Debug("Peer disconnected")
|
||||
if priorState == peers.Connected {
|
||||
activePeersCount := len(s.peers.Active())
|
||||
log.WithField("remainingActivePeers", activePeersCount).Debug("Peer disconnected")
|
||||
}
|
||||
}()
|
||||
},
|
||||
|
||||
@@ -23,8 +23,8 @@ var (
|
||||
ErrNoPeerStatus = errors.New("no chain status for peer")
|
||||
)
|
||||
|
||||
// PeerConnectionState is the state of the connection.
|
||||
type PeerConnectionState ethpb.ConnectionState
|
||||
// ConnectionState is the state of the connection.
|
||||
type ConnectionState ethpb.ConnectionState
|
||||
|
||||
// StoreConfig holds peer store parameters.
|
||||
type StoreConfig struct {
|
||||
@@ -49,7 +49,7 @@ type PeerData struct {
|
||||
// Network related data.
|
||||
Address ma.Multiaddr
|
||||
Direction network.Direction
|
||||
ConnState PeerConnectionState
|
||||
ConnState ConnectionState
|
||||
Enr *enr.Record
|
||||
NextValidTime time.Time
|
||||
// Chain related data.
|
||||
|
||||
@@ -20,6 +20,7 @@ go_library(
|
||||
"//crypto/rand:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/peerdata"
|
||||
)
|
||||
|
||||
@@ -61,7 +62,7 @@ func (s *BadResponsesScorer) Score(pid peer.ID) float64 {
|
||||
|
||||
// scoreNoLock is a lock-free version of Score.
|
||||
func (s *BadResponsesScorer) scoreNoLock(pid peer.ID) float64 {
|
||||
if s.isBadPeerNoLock(pid) {
|
||||
if s.isBadPeerNoLock(pid) != nil {
|
||||
return BadPeerScore
|
||||
}
|
||||
score := float64(0)
|
||||
@@ -116,18 +117,24 @@ func (s *BadResponsesScorer) Increment(pid peer.ID) {
|
||||
|
||||
// IsBadPeer states if the peer is to be considered bad.
|
||||
// If the peer is unknown this will return `false`, which makes using this function easier than returning an error.
|
||||
func (s *BadResponsesScorer) IsBadPeer(pid peer.ID) bool {
|
||||
func (s *BadResponsesScorer) IsBadPeer(pid peer.ID) error {
|
||||
s.store.RLock()
|
||||
defer s.store.RUnlock()
|
||||
|
||||
return s.isBadPeerNoLock(pid)
|
||||
}
|
||||
|
||||
// isBadPeerNoLock is lock-free version of IsBadPeer.
|
||||
func (s *BadResponsesScorer) isBadPeerNoLock(pid peer.ID) bool {
|
||||
func (s *BadResponsesScorer) isBadPeerNoLock(pid peer.ID) error {
|
||||
if peerData, ok := s.store.PeerData(pid); ok {
|
||||
return peerData.BadResponses >= s.config.Threshold
|
||||
if peerData.BadResponses >= s.config.Threshold {
|
||||
return errors.Errorf("peer exceeded bad responses threshold: got %d, threshold %d", peerData.BadResponses, s.config.Threshold)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
return false
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// BadPeers returns the peers that are considered bad.
|
||||
@@ -137,7 +144,7 @@ func (s *BadResponsesScorer) BadPeers() []peer.ID {
|
||||
|
||||
badPeers := make([]peer.ID, 0)
|
||||
for pid := range s.store.Peers() {
|
||||
if s.isBadPeerNoLock(pid) {
|
||||
if s.isBadPeerNoLock(pid) != nil {
|
||||
badPeers = append(badPeers, pid)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -33,19 +33,19 @@ func TestScorers_BadResponses_Score(t *testing.T) {
|
||||
assert.Equal(t, 0., scorer.Score(pid), "Unexpected score for unregistered peer")
|
||||
|
||||
scorer.Increment(pid)
|
||||
assert.Equal(t, false, scorer.IsBadPeer(pid))
|
||||
assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, -2.5, scorer.Score(pid))
|
||||
|
||||
scorer.Increment(pid)
|
||||
assert.Equal(t, false, scorer.IsBadPeer(pid))
|
||||
assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, float64(-5), scorer.Score(pid))
|
||||
|
||||
scorer.Increment(pid)
|
||||
assert.Equal(t, false, scorer.IsBadPeer(pid))
|
||||
assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, float64(-7.5), scorer.Score(pid))
|
||||
|
||||
scorer.Increment(pid)
|
||||
assert.Equal(t, true, scorer.IsBadPeer(pid))
|
||||
assert.NotNil(t, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, -100.0, scorer.Score(pid))
|
||||
}
|
||||
|
||||
@@ -152,17 +152,17 @@ func TestScorers_BadResponses_IsBadPeer(t *testing.T) {
|
||||
})
|
||||
scorer := peerStatuses.Scorers().BadResponsesScorer()
|
||||
pid := peer.ID("peer1")
|
||||
assert.Equal(t, false, scorer.IsBadPeer(pid))
|
||||
assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
|
||||
peerStatuses.Add(nil, pid, nil, network.DirUnknown)
|
||||
assert.Equal(t, false, scorer.IsBadPeer(pid))
|
||||
assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
|
||||
for i := 0; i < scorers.DefaultBadResponsesThreshold; i++ {
|
||||
scorer.Increment(pid)
|
||||
if i == scorers.DefaultBadResponsesThreshold-1 {
|
||||
assert.Equal(t, true, scorer.IsBadPeer(pid), "Unexpected peer status")
|
||||
assert.NotNil(t, scorer.IsBadPeer(pid), "Unexpected peer status")
|
||||
} else {
|
||||
assert.Equal(t, false, scorer.IsBadPeer(pid), "Unexpected peer status")
|
||||
assert.NoError(t, scorer.IsBadPeer(pid), "Unexpected peer status")
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -185,11 +185,11 @@ func TestScorers_BadResponses_BadPeers(t *testing.T) {
|
||||
scorer.Increment(pids[2])
|
||||
scorer.Increment(pids[4])
|
||||
}
|
||||
assert.Equal(t, false, scorer.IsBadPeer(pids[0]), "Invalid peer status")
|
||||
assert.Equal(t, true, scorer.IsBadPeer(pids[1]), "Invalid peer status")
|
||||
assert.Equal(t, true, scorer.IsBadPeer(pids[2]), "Invalid peer status")
|
||||
assert.Equal(t, false, scorer.IsBadPeer(pids[3]), "Invalid peer status")
|
||||
assert.Equal(t, true, scorer.IsBadPeer(pids[4]), "Invalid peer status")
|
||||
assert.NoError(t, scorer.IsBadPeer(pids[0]), "Invalid peer status")
|
||||
assert.NotNil(t, scorer.IsBadPeer(pids[1]), "Invalid peer status")
|
||||
assert.NotNil(t, scorer.IsBadPeer(pids[2]), "Invalid peer status")
|
||||
assert.NoError(t, scorer.IsBadPeer(pids[3]), "Invalid peer status")
|
||||
assert.NotNil(t, scorer.IsBadPeer(pids[4]), "Invalid peer status")
|
||||
want := []peer.ID{pids[1], pids[2], pids[4]}
|
||||
badPeers := scorer.BadPeers()
|
||||
sort.Slice(badPeers, func(i, j int) bool {
|
||||
|
||||
@@ -177,8 +177,8 @@ func (s *BlockProviderScorer) processedBlocksNoLock(pid peer.ID) uint64 {
|
||||
// Block provider scorer cannot guarantee that lower score of a peer is indeed a sign of a bad peer.
|
||||
// Therefore this scorer never marks peers as bad, and relies on scores to probabilistically sort
|
||||
// out low-scorers (see WeightSorted method).
|
||||
func (*BlockProviderScorer) IsBadPeer(_ peer.ID) bool {
|
||||
return false
|
||||
func (*BlockProviderScorer) IsBadPeer(_ peer.ID) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// BadPeers returns the peers that are considered bad.
|
||||
|
||||
@@ -119,7 +119,7 @@ func TestScorers_BlockProvider_Score(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Run(tt.name, func(*testing.T) {
|
||||
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
@@ -224,7 +224,7 @@ func TestScorers_BlockProvider_Sorted(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "no peers",
|
||||
update: func(s *scorers.BlockProviderScorer) {},
|
||||
update: func(*scorers.BlockProviderScorer) {},
|
||||
have: []peer.ID{},
|
||||
want: []peer.ID{},
|
||||
},
|
||||
@@ -451,7 +451,7 @@ func TestScorers_BlockProvider_FormatScorePretty(t *testing.T) {
|
||||
})
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Run(tt.name, func(*testing.T) {
|
||||
peerStatuses := peerStatusGen()
|
||||
scorer := peerStatuses.Scorers().BlockProviderScorer()
|
||||
if tt.update != nil {
|
||||
@@ -481,8 +481,8 @@ func TestScorers_BlockProvider_BadPeerMarking(t *testing.T) {
|
||||
})
|
||||
scorer := peerStatuses.Scorers().BlockProviderScorer()
|
||||
|
||||
assert.Equal(t, false, scorer.IsBadPeer("peer1"), "Unexpected status for unregistered peer")
|
||||
assert.NoError(t, scorer.IsBadPeer("peer1"), "Unexpected status for unregistered peer")
|
||||
scorer.IncrementProcessedBlocks("peer1", 64)
|
||||
assert.Equal(t, false, scorer.IsBadPeer("peer1"))
|
||||
assert.NoError(t, scorer.IsBadPeer("peer1"))
|
||||
assert.Equal(t, 0, len(scorer.BadPeers()))
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package scorers
|
||||
|
||||
import (
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/peerdata"
|
||||
pbrpc "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
@@ -51,19 +52,24 @@ func (s *GossipScorer) scoreNoLock(pid peer.ID) float64 {
|
||||
}
|
||||
|
||||
// IsBadPeer states if the peer is to be considered bad.
|
||||
func (s *GossipScorer) IsBadPeer(pid peer.ID) bool {
|
||||
func (s *GossipScorer) IsBadPeer(pid peer.ID) error {
|
||||
s.store.RLock()
|
||||
defer s.store.RUnlock()
|
||||
return s.isBadPeerNoLock(pid)
|
||||
}
|
||||
|
||||
// isBadPeerNoLock is lock-free version of IsBadPeer.
|
||||
func (s *GossipScorer) isBadPeerNoLock(pid peer.ID) bool {
|
||||
func (s *GossipScorer) isBadPeerNoLock(pid peer.ID) error {
|
||||
peerData, ok := s.store.PeerData(pid)
|
||||
if !ok {
|
||||
return false
|
||||
return nil
|
||||
}
|
||||
return peerData.GossipScore < gossipThreshold
|
||||
|
||||
if peerData.GossipScore < gossipThreshold {
|
||||
return errors.Errorf("gossip score below threshold: got %f - threshold %f", peerData.GossipScore, gossipThreshold)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// BadPeers returns the peers that are considered bad.
|
||||
@@ -73,7 +79,7 @@ func (s *GossipScorer) BadPeers() []peer.ID {
|
||||
|
||||
badPeers := make([]peer.ID, 0)
|
||||
for pid := range s.store.Peers() {
|
||||
if s.isBadPeerNoLock(pid) {
|
||||
if s.isBadPeerNoLock(pid) != nil {
|
||||
badPeers = append(badPeers, pid)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,7 +21,7 @@ func TestScorers_Gossip_Score(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "nonexistent peer",
|
||||
update: func(scorer *scorers.GossipScorer) {
|
||||
update: func(*scorers.GossipScorer) {
|
||||
},
|
||||
check: func(scorer *scorers.GossipScorer) {
|
||||
assert.Equal(t, 0.0, scorer.Score("peer1"), "Unexpected score")
|
||||
@@ -34,7 +34,7 @@ func TestScorers_Gossip_Score(t *testing.T) {
|
||||
},
|
||||
check: func(scorer *scorers.GossipScorer) {
|
||||
assert.Equal(t, -101.0, scorer.Score("peer1"), "Unexpected score")
|
||||
assert.Equal(t, true, scorer.IsBadPeer("peer1"), "Unexpected good peer")
|
||||
assert.NotNil(t, scorer.IsBadPeer("peer1"), "Unexpected good peer")
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -44,7 +44,7 @@ func TestScorers_Gossip_Score(t *testing.T) {
|
||||
},
|
||||
check: func(scorer *scorers.GossipScorer) {
|
||||
assert.Equal(t, 10.0, scorer.Score("peer1"), "Unexpected score")
|
||||
assert.Equal(t, false, scorer.IsBadPeer("peer1"), "Unexpected bad peer")
|
||||
assert.Equal(t, nil, scorer.IsBadPeer("peer1"), "Unexpected bad peer")
|
||||
_, _, topicMap, err := scorer.GossipData("peer1")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(100), topicMap["a"].TimeInMesh, "incorrect time in mesh")
|
||||
@@ -53,7 +53,7 @@ func TestScorers_Gossip_Score(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Run(tt.name, func(*testing.T) {
|
||||
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
ScorerParams: &scorers.Config{},
|
||||
})
|
||||
|
||||
@@ -46,7 +46,7 @@ func (s *PeerStatusScorer) Score(pid peer.ID) float64 {
|
||||
|
||||
// scoreNoLock is a lock-free version of Score.
|
||||
func (s *PeerStatusScorer) scoreNoLock(pid peer.ID) float64 {
|
||||
if s.isBadPeerNoLock(pid) {
|
||||
if s.isBadPeerNoLock(pid) != nil {
|
||||
return BadPeerScore
|
||||
}
|
||||
score := float64(0)
|
||||
@@ -67,30 +67,34 @@ func (s *PeerStatusScorer) scoreNoLock(pid peer.ID) float64 {
|
||||
}
|
||||
|
||||
// IsBadPeer states if the peer is to be considered bad.
|
||||
func (s *PeerStatusScorer) IsBadPeer(pid peer.ID) bool {
|
||||
func (s *PeerStatusScorer) IsBadPeer(pid peer.ID) error {
|
||||
s.store.RLock()
|
||||
defer s.store.RUnlock()
|
||||
|
||||
return s.isBadPeerNoLock(pid)
|
||||
}
|
||||
|
||||
// isBadPeerNoLock is lock-free version of IsBadPeer.
|
||||
func (s *PeerStatusScorer) isBadPeerNoLock(pid peer.ID) bool {
|
||||
func (s *PeerStatusScorer) isBadPeerNoLock(pid peer.ID) error {
|
||||
peerData, ok := s.store.PeerData(pid)
|
||||
if !ok {
|
||||
return false
|
||||
return nil
|
||||
}
|
||||
|
||||
// Mark peer as bad, if the latest error is one of the terminal ones.
|
||||
terminalErrs := []error{
|
||||
p2ptypes.ErrWrongForkDigestVersion,
|
||||
p2ptypes.ErrInvalidFinalizedRoot,
|
||||
p2ptypes.ErrInvalidRequest,
|
||||
}
|
||||
|
||||
for _, err := range terminalErrs {
|
||||
if errors.Is(peerData.ChainStateValidationError, err) {
|
||||
return true
|
||||
return err
|
||||
}
|
||||
}
|
||||
return false
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// BadPeers returns the peers that are considered bad.
|
||||
@@ -100,7 +104,7 @@ func (s *PeerStatusScorer) BadPeers() []peer.ID {
|
||||
|
||||
badPeers := make([]peer.ID, 0)
|
||||
for pid := range s.store.Peers() {
|
||||
if s.isBadPeerNoLock(pid) {
|
||||
if s.isBadPeerNoLock(pid) != nil {
|
||||
badPeers = append(badPeers, pid)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -122,7 +122,7 @@ func TestScorers_PeerStatus_Score(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Run(tt.name, func(*testing.T) {
|
||||
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
ScorerParams: &scorers.Config{},
|
||||
})
|
||||
@@ -140,12 +140,12 @@ func TestScorers_PeerStatus_IsBadPeer(t *testing.T) {
|
||||
ScorerParams: &scorers.Config{},
|
||||
})
|
||||
pid := peer.ID("peer1")
|
||||
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer(pid))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid))
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer(pid))
|
||||
assert.NoError(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid))
|
||||
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid, &pb.Status{}, p2ptypes.ErrWrongForkDigestVersion)
|
||||
assert.Equal(t, true, peerStatuses.Scorers().IsBadPeer(pid))
|
||||
assert.Equal(t, true, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid))
|
||||
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer(pid))
|
||||
assert.NotNil(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid))
|
||||
}
|
||||
|
||||
func TestScorers_PeerStatus_BadPeers(t *testing.T) {
|
||||
@@ -155,22 +155,22 @@ func TestScorers_PeerStatus_BadPeers(t *testing.T) {
|
||||
pid1 := peer.ID("peer1")
|
||||
pid2 := peer.ID("peer2")
|
||||
pid3 := peer.ID("peer3")
|
||||
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer(pid1))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid1))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer(pid2))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid2))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer(pid3))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid3))
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer(pid1))
|
||||
assert.NoError(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid1))
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer(pid2))
|
||||
assert.NoError(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid2))
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer(pid3))
|
||||
assert.NoError(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid3))
|
||||
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid1, &pb.Status{}, p2ptypes.ErrWrongForkDigestVersion)
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid2, &pb.Status{}, nil)
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid3, &pb.Status{}, p2ptypes.ErrWrongForkDigestVersion)
|
||||
assert.Equal(t, true, peerStatuses.Scorers().IsBadPeer(pid1))
|
||||
assert.Equal(t, true, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid1))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer(pid2))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid2))
|
||||
assert.Equal(t, true, peerStatuses.Scorers().IsBadPeer(pid3))
|
||||
assert.Equal(t, true, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid3))
|
||||
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer(pid1))
|
||||
assert.NotNil(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid1))
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer(pid2))
|
||||
assert.NoError(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid2))
|
||||
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer(pid3))
|
||||
assert.NotNil(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid3))
|
||||
assert.Equal(t, 2, len(peerStatuses.Scorers().PeerStatusScorer().BadPeers()))
|
||||
assert.Equal(t, 2, len(peerStatuses.Scorers().BadPeers()))
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/peerdata"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
)
|
||||
@@ -24,7 +25,7 @@ const BadPeerScore = gossipThreshold
|
||||
// Scorer defines minimum set of methods every peer scorer must expose.
|
||||
type Scorer interface {
|
||||
Score(pid peer.ID) float64
|
||||
IsBadPeer(pid peer.ID) bool
|
||||
IsBadPeer(pid peer.ID) error
|
||||
BadPeers() []peer.ID
|
||||
}
|
||||
|
||||
@@ -124,26 +125,29 @@ func (s *Service) ScoreNoLock(pid peer.ID) float64 {
|
||||
}
|
||||
|
||||
// IsBadPeer traverses all the scorers to see if any of them classifies peer as bad.
|
||||
func (s *Service) IsBadPeer(pid peer.ID) bool {
|
||||
func (s *Service) IsBadPeer(pid peer.ID) error {
|
||||
s.store.RLock()
|
||||
defer s.store.RUnlock()
|
||||
return s.IsBadPeerNoLock(pid)
|
||||
}
|
||||
|
||||
// IsBadPeerNoLock is a lock-free version of IsBadPeer.
|
||||
func (s *Service) IsBadPeerNoLock(pid peer.ID) bool {
|
||||
if s.scorers.badResponsesScorer.isBadPeerNoLock(pid) {
|
||||
return true
|
||||
func (s *Service) IsBadPeerNoLock(pid peer.ID) error {
|
||||
if err := s.scorers.badResponsesScorer.isBadPeerNoLock(pid); err != nil {
|
||||
return errors.Wrap(err, "bad responses scorer")
|
||||
}
|
||||
if s.scorers.peerStatusScorer.isBadPeerNoLock(pid) {
|
||||
return true
|
||||
|
||||
if err := s.scorers.peerStatusScorer.isBadPeerNoLock(pid); err != nil {
|
||||
return errors.Wrap(err, "peer status scorer")
|
||||
}
|
||||
|
||||
if features.Get().EnablePeerScorer {
|
||||
if s.scorers.gossipScorer.isBadPeerNoLock(pid) {
|
||||
return true
|
||||
if err := s.scorers.gossipScorer.isBadPeerNoLock(pid); err != nil {
|
||||
return errors.Wrap(err, "gossip scorer")
|
||||
}
|
||||
}
|
||||
return false
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// BadPeers returns the peers that are considered bad by any of registered scorers.
|
||||
@@ -153,7 +157,7 @@ func (s *Service) BadPeers() []peer.ID {
|
||||
|
||||
badPeers := make([]peer.ID, 0)
|
||||
for pid := range s.store.Peers() {
|
||||
if s.IsBadPeerNoLock(pid) {
|
||||
if s.IsBadPeerNoLock(pid) != nil {
|
||||
badPeers = append(badPeers, pid)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -100,7 +100,7 @@ func TestScorers_Service_Score(t *testing.T) {
|
||||
return scores
|
||||
}
|
||||
|
||||
pack := func(scorer *scorers.Service, s1, s2, s3 float64) map[string]float64 {
|
||||
pack := func(_ *scorers.Service, s1, s2, s3 float64) map[string]float64 {
|
||||
return map[string]float64{
|
||||
"peer1": roundScore(s1),
|
||||
"peer2": roundScore(s2),
|
||||
@@ -237,7 +237,7 @@ func TestScorers_Service_loop(t *testing.T) {
|
||||
for i := 0; i < s1.Params().Threshold+5; i++ {
|
||||
s1.Increment(pid1)
|
||||
}
|
||||
assert.Equal(t, true, s1.IsBadPeer(pid1), "Peer should be marked as bad")
|
||||
assert.NotNil(t, s1.IsBadPeer(pid1), "Peer should be marked as bad")
|
||||
|
||||
s2.IncrementProcessedBlocks("peer1", 221)
|
||||
assert.Equal(t, uint64(221), s2.ProcessedBlocks("peer1"))
|
||||
@@ -252,7 +252,7 @@ func TestScorers_Service_loop(t *testing.T) {
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
if s1.IsBadPeer(pid1) == false && s2.ProcessedBlocks("peer1") == 0 {
|
||||
if s1.IsBadPeer(pid1) == nil && s2.ProcessedBlocks("peer1") == 0 {
|
||||
return
|
||||
}
|
||||
case <-ctx.Done():
|
||||
@@ -263,7 +263,7 @@ func TestScorers_Service_loop(t *testing.T) {
|
||||
}()
|
||||
|
||||
<-done
|
||||
assert.Equal(t, false, s1.IsBadPeer(pid1), "Peer should not be marked as bad")
|
||||
assert.NoError(t, s1.IsBadPeer(pid1), "Peer should not be marked as bad")
|
||||
assert.Equal(t, uint64(0), s2.ProcessedBlocks("peer1"), "No blocks are expected")
|
||||
}
|
||||
|
||||
@@ -278,10 +278,10 @@ func TestScorers_Service_IsBadPeer(t *testing.T) {
|
||||
},
|
||||
})
|
||||
|
||||
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
peerStatuses.Scorers().BadResponsesScorer().Increment("peer1")
|
||||
peerStatuses.Scorers().BadResponsesScorer().Increment("peer1")
|
||||
assert.Equal(t, true, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
}
|
||||
|
||||
func TestScorers_Service_BadPeers(t *testing.T) {
|
||||
@@ -295,16 +295,16 @@ func TestScorers_Service_BadPeers(t *testing.T) {
|
||||
},
|
||||
})
|
||||
|
||||
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer("peer2"))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer("peer3"))
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer2"))
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer3"))
|
||||
assert.Equal(t, 0, len(peerStatuses.Scorers().BadPeers()))
|
||||
for _, pid := range []peer.ID{"peer1", "peer3"} {
|
||||
peerStatuses.Scorers().BadResponsesScorer().Increment(pid)
|
||||
peerStatuses.Scorers().BadResponsesScorer().Increment(pid)
|
||||
}
|
||||
assert.Equal(t, true, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer("peer2"))
|
||||
assert.Equal(t, true, peerStatuses.Scorers().IsBadPeer("peer3"))
|
||||
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer2"))
|
||||
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer3"))
|
||||
assert.Equal(t, 2, len(peerStatuses.Scorers().BadPeers()))
|
||||
}
|
||||
|
||||
@@ -34,6 +34,7 @@ import (
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
manet "github.com/multiformats/go-multiaddr/net"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/peerdata"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/scorers"
|
||||
@@ -49,14 +50,14 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
// PeerDisconnected means there is no connection to the peer.
|
||||
PeerDisconnected peerdata.PeerConnectionState = iota
|
||||
// PeerDisconnecting means there is an on-going attempt to disconnect from the peer.
|
||||
PeerDisconnecting
|
||||
// PeerConnected means the peer has an active connection.
|
||||
PeerConnected
|
||||
// PeerConnecting means there is an on-going attempt to connect to the peer.
|
||||
PeerConnecting
|
||||
// Disconnected means there is no connection to the peer.
|
||||
Disconnected peerdata.ConnectionState = iota
|
||||
// Disconnecting means there is an on-going attempt to disconnect from the peer.
|
||||
Disconnecting
|
||||
// Connected means the peer has an active connection.
|
||||
Connected
|
||||
// Connecting means there is an on-going attempt to connect to the peer.
|
||||
Connecting
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -150,7 +151,7 @@ func (p *Status) Add(record *enr.Record, pid peer.ID, address ma.Multiaddr, dire
|
||||
Address: address,
|
||||
Direction: direction,
|
||||
// Peers start disconnected; state will be updated when the handshake process begins.
|
||||
ConnState: PeerDisconnected,
|
||||
ConnState: Disconnected,
|
||||
}
|
||||
if record != nil {
|
||||
peerData.Enr = record
|
||||
@@ -212,7 +213,7 @@ func (p *Status) IsActive(pid peer.ID) bool {
|
||||
defer p.store.RUnlock()
|
||||
|
||||
peerData, ok := p.store.PeerData(pid)
|
||||
return ok && (peerData.ConnState == PeerConnected || peerData.ConnState == PeerConnecting)
|
||||
return ok && (peerData.ConnState == Connected || peerData.ConnState == Connecting)
|
||||
}
|
||||
|
||||
// IsAboveInboundLimit checks if we are above our current inbound
|
||||
@@ -222,7 +223,7 @@ func (p *Status) IsAboveInboundLimit() bool {
|
||||
defer p.store.RUnlock()
|
||||
totalInbound := 0
|
||||
for _, peerData := range p.store.Peers() {
|
||||
if peerData.ConnState == PeerConnected &&
|
||||
if peerData.ConnState == Connected &&
|
||||
peerData.Direction == network.DirInbound {
|
||||
totalInbound += 1
|
||||
}
|
||||
@@ -286,7 +287,7 @@ func (p *Status) SubscribedToSubnet(index uint64) []peer.ID {
|
||||
peers := make([]peer.ID, 0)
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
// look at active peers
|
||||
connectedStatus := peerData.ConnState == PeerConnecting || peerData.ConnState == PeerConnected
|
||||
connectedStatus := peerData.ConnState == Connecting || peerData.ConnState == Connected
|
||||
if connectedStatus && peerData.MetaData != nil && !peerData.MetaData.IsNil() && peerData.MetaData.AttnetsBitfield() != nil {
|
||||
indices := indicesFromBitfield(peerData.MetaData.AttnetsBitfield())
|
||||
for _, idx := range indices {
|
||||
@@ -301,7 +302,7 @@ func (p *Status) SubscribedToSubnet(index uint64) []peer.ID {
|
||||
}
|
||||
|
||||
// SetConnectionState sets the connection state of the given remote peer.
|
||||
func (p *Status) SetConnectionState(pid peer.ID, state peerdata.PeerConnectionState) {
|
||||
func (p *Status) SetConnectionState(pid peer.ID, state peerdata.ConnectionState) {
|
||||
p.store.Lock()
|
||||
defer p.store.Unlock()
|
||||
|
||||
@@ -311,14 +312,14 @@ func (p *Status) SetConnectionState(pid peer.ID, state peerdata.PeerConnectionSt
|
||||
|
||||
// ConnectionState gets the connection state of the given remote peer.
|
||||
// This will error if the peer does not exist.
|
||||
func (p *Status) ConnectionState(pid peer.ID) (peerdata.PeerConnectionState, error) {
|
||||
func (p *Status) ConnectionState(pid peer.ID) (peerdata.ConnectionState, error) {
|
||||
p.store.RLock()
|
||||
defer p.store.RUnlock()
|
||||
|
||||
if peerData, ok := p.store.PeerData(pid); ok {
|
||||
return peerData.ConnState, nil
|
||||
}
|
||||
return PeerDisconnected, peerdata.ErrPeerUnknown
|
||||
return Disconnected, peerdata.ErrPeerUnknown
|
||||
}
|
||||
|
||||
// ChainStateLastUpdated gets the last time the chain state of the given remote peer was updated.
|
||||
@@ -335,19 +336,29 @@ func (p *Status) ChainStateLastUpdated(pid peer.ID) (time.Time, error) {
|
||||
|
||||
// IsBad states if the peer is to be considered bad (by *any* of the registered scorers).
|
||||
// If the peer is unknown this will return `false`, which makes using this function easier than returning an error.
|
||||
func (p *Status) IsBad(pid peer.ID) bool {
|
||||
func (p *Status) IsBad(pid peer.ID) error {
|
||||
p.store.RLock()
|
||||
defer p.store.RUnlock()
|
||||
|
||||
return p.isBad(pid)
|
||||
}
|
||||
|
||||
// isBad is the lock-free version of IsBad.
|
||||
func (p *Status) isBad(pid peer.ID) bool {
|
||||
func (p *Status) isBad(pid peer.ID) error {
|
||||
// Do not disconnect from trusted peers.
|
||||
if p.store.IsTrustedPeer(pid) {
|
||||
return false
|
||||
return nil
|
||||
}
|
||||
return p.isfromBadIP(pid) || p.scorers.IsBadPeerNoLock(pid)
|
||||
|
||||
if err := p.isfromBadIP(pid); err != nil {
|
||||
return errors.Wrap(err, "peer is from a bad IP")
|
||||
}
|
||||
|
||||
if err := p.scorers.IsBadPeerNoLock(pid); err != nil {
|
||||
return errors.Wrap(err, "is bad peer no lock")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NextValidTime gets the earliest possible time it is to contact/dial
|
||||
@@ -411,7 +422,7 @@ func (p *Status) Connecting() []peer.ID {
|
||||
defer p.store.RUnlock()
|
||||
peers := make([]peer.ID, 0)
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
if peerData.ConnState == PeerConnecting {
|
||||
if peerData.ConnState == Connecting {
|
||||
peers = append(peers, pid)
|
||||
}
|
||||
}
|
||||
@@ -424,7 +435,7 @@ func (p *Status) Connected() []peer.ID {
|
||||
defer p.store.RUnlock()
|
||||
peers := make([]peer.ID, 0)
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
if peerData.ConnState == PeerConnected {
|
||||
if peerData.ConnState == Connected {
|
||||
peers = append(peers, pid)
|
||||
}
|
||||
}
|
||||
@@ -450,7 +461,7 @@ func (p *Status) InboundConnected() []peer.ID {
|
||||
defer p.store.RUnlock()
|
||||
peers := make([]peer.ID, 0)
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
if peerData.ConnState == PeerConnected && peerData.Direction == network.DirInbound {
|
||||
if peerData.ConnState == Connected && peerData.Direction == network.DirInbound {
|
||||
peers = append(peers, pid)
|
||||
}
|
||||
}
|
||||
@@ -463,7 +474,7 @@ func (p *Status) InboundConnectedWithProtocol(protocol InternetProtocol) []peer.
|
||||
defer p.store.RUnlock()
|
||||
peers := make([]peer.ID, 0)
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
if peerData.ConnState == PeerConnected && peerData.Direction == network.DirInbound && strings.Contains(peerData.Address.String(), string(protocol)) {
|
||||
if peerData.ConnState == Connected && peerData.Direction == network.DirInbound && strings.Contains(peerData.Address.String(), string(protocol)) {
|
||||
peers = append(peers, pid)
|
||||
}
|
||||
}
|
||||
@@ -489,7 +500,7 @@ func (p *Status) OutboundConnected() []peer.ID {
|
||||
defer p.store.RUnlock()
|
||||
peers := make([]peer.ID, 0)
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
if peerData.ConnState == PeerConnected && peerData.Direction == network.DirOutbound {
|
||||
if peerData.ConnState == Connected && peerData.Direction == network.DirOutbound {
|
||||
peers = append(peers, pid)
|
||||
}
|
||||
}
|
||||
@@ -502,7 +513,7 @@ func (p *Status) OutboundConnectedWithProtocol(protocol InternetProtocol) []peer
|
||||
defer p.store.RUnlock()
|
||||
peers := make([]peer.ID, 0)
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
if peerData.ConnState == PeerConnected && peerData.Direction == network.DirOutbound && strings.Contains(peerData.Address.String(), string(protocol)) {
|
||||
if peerData.ConnState == Connected && peerData.Direction == network.DirOutbound && strings.Contains(peerData.Address.String(), string(protocol)) {
|
||||
peers = append(peers, pid)
|
||||
}
|
||||
}
|
||||
@@ -515,7 +526,7 @@ func (p *Status) Active() []peer.ID {
|
||||
defer p.store.RUnlock()
|
||||
peers := make([]peer.ID, 0)
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
if peerData.ConnState == PeerConnecting || peerData.ConnState == PeerConnected {
|
||||
if peerData.ConnState == Connecting || peerData.ConnState == Connected {
|
||||
peers = append(peers, pid)
|
||||
}
|
||||
}
|
||||
@@ -528,7 +539,7 @@ func (p *Status) Disconnecting() []peer.ID {
|
||||
defer p.store.RUnlock()
|
||||
peers := make([]peer.ID, 0)
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
if peerData.ConnState == PeerDisconnecting {
|
||||
if peerData.ConnState == Disconnecting {
|
||||
peers = append(peers, pid)
|
||||
}
|
||||
}
|
||||
@@ -541,7 +552,7 @@ func (p *Status) Disconnected() []peer.ID {
|
||||
defer p.store.RUnlock()
|
||||
peers := make([]peer.ID, 0)
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
if peerData.ConnState == PeerDisconnected {
|
||||
if peerData.ConnState == Disconnected {
|
||||
peers = append(peers, pid)
|
||||
}
|
||||
}
|
||||
@@ -554,7 +565,7 @@ func (p *Status) Inactive() []peer.ID {
|
||||
defer p.store.RUnlock()
|
||||
peers := make([]peer.ID, 0)
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
if peerData.ConnState == PeerDisconnecting || peerData.ConnState == PeerDisconnected {
|
||||
if peerData.ConnState == Disconnecting || peerData.ConnState == Disconnected {
|
||||
peers = append(peers, pid)
|
||||
}
|
||||
}
|
||||
@@ -592,7 +603,7 @@ func (p *Status) Prune() {
|
||||
return
|
||||
}
|
||||
notBadPeer := func(pid peer.ID) bool {
|
||||
return !p.isBad(pid)
|
||||
return p.isBad(pid) == nil
|
||||
}
|
||||
notTrustedPeer := func(pid peer.ID) bool {
|
||||
return !p.isTrustedPeers(pid)
|
||||
@@ -605,7 +616,7 @@ func (p *Status) Prune() {
|
||||
// Select disconnected peers with a smaller bad response count.
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
// Should not prune trusted peer or prune the peer dara and unset trusted peer.
|
||||
if peerData.ConnState == PeerDisconnected && notBadPeer(pid) && notTrustedPeer(pid) {
|
||||
if peerData.ConnState == Disconnected && notBadPeer(pid) && notTrustedPeer(pid) {
|
||||
peersToPrune = append(peersToPrune, &peerResp{
|
||||
pid: pid,
|
||||
score: p.Scorers().ScoreNoLock(pid),
|
||||
@@ -657,7 +668,7 @@ func (p *Status) deprecatedPrune() {
|
||||
// Select disconnected peers with a smaller bad response count.
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
// Should not prune trusted peer or prune the peer dara and unset trusted peer.
|
||||
if peerData.ConnState == PeerDisconnected && notBadPeer(peerData) && notTrustedPeer(pid) {
|
||||
if peerData.ConnState == Disconnected && notBadPeer(peerData) && notTrustedPeer(pid) {
|
||||
peersToPrune = append(peersToPrune, &peerResp{
|
||||
pid: pid,
|
||||
badResp: peerData.BadResponses,
|
||||
@@ -814,7 +825,7 @@ func (p *Status) PeersToPrune() []peer.ID {
|
||||
peersToPrune := make([]*peerResp, 0)
|
||||
// Select connected and inbound peers to prune.
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
if peerData.ConnState == PeerConnected &&
|
||||
if peerData.ConnState == Connected &&
|
||||
peerData.Direction == network.DirInbound && !p.store.IsTrustedPeer(pid) {
|
||||
peersToPrune = append(peersToPrune, &peerResp{
|
||||
pid: pid,
|
||||
@@ -880,7 +891,7 @@ func (p *Status) deprecatedPeersToPrune() []peer.ID {
|
||||
peersToPrune := make([]*peerResp, 0)
|
||||
// Select connected and inbound peers to prune.
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
if peerData.ConnState == PeerConnected &&
|
||||
if peerData.ConnState == Connected &&
|
||||
peerData.Direction == network.DirInbound && !p.store.IsTrustedPeer(pid) {
|
||||
peersToPrune = append(peersToPrune, &peerResp{
|
||||
pid: pid,
|
||||
@@ -982,24 +993,28 @@ func (p *Status) isTrustedPeers(pid peer.ID) bool {
|
||||
|
||||
// this method assumes the store lock is acquired before
|
||||
// executing the method.
|
||||
func (p *Status) isfromBadIP(pid peer.ID) bool {
|
||||
func (p *Status) isfromBadIP(pid peer.ID) error {
|
||||
peerData, ok := p.store.PeerData(pid)
|
||||
if !ok {
|
||||
return false
|
||||
return nil
|
||||
}
|
||||
|
||||
if peerData.Address == nil {
|
||||
return false
|
||||
return nil
|
||||
}
|
||||
|
||||
ip, err := manet.ToIP(peerData.Address)
|
||||
if err != nil {
|
||||
return true
|
||||
return errors.Wrap(err, "to ip")
|
||||
}
|
||||
|
||||
if val, ok := p.ipTracker[ip.String()]; ok {
|
||||
if val > CollocationLimit {
|
||||
return true
|
||||
return errors.Errorf("collocation limit exceeded: got %d - limit %d", val, CollocationLimit)
|
||||
}
|
||||
}
|
||||
return false
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Status) addIpToTracker(pid peer.ID) {
|
||||
|
||||
@@ -215,7 +215,7 @@ func TestPeerSubscribedToSubnet(t *testing.T) {
|
||||
// Add some peers with different states
|
||||
numPeers := 2
|
||||
for i := 0; i < numPeers; i++ {
|
||||
addPeer(t, p, peers.PeerConnected)
|
||||
addPeer(t, p, peers.Connected)
|
||||
}
|
||||
expectedPeer := p.All()[1]
|
||||
bitV := bitfield.NewBitvector64()
|
||||
@@ -230,7 +230,7 @@ func TestPeerSubscribedToSubnet(t *testing.T) {
|
||||
}))
|
||||
numPeers = 3
|
||||
for i := 0; i < numPeers; i++ {
|
||||
addPeer(t, p, peers.PeerDisconnected)
|
||||
addPeer(t, p, peers.Disconnected)
|
||||
}
|
||||
ps := p.SubscribedToSubnet(2)
|
||||
assert.Equal(t, 1, len(ps), "Unexpected num of peers")
|
||||
@@ -259,7 +259,7 @@ func TestPeerImplicitAdd(t *testing.T) {
|
||||
id, err := peer.Decode("16Uiu2HAkyWZ4Ni1TpvDS8dPxsozmHY85KaiFjodQuV6Tz5tkHVeR")
|
||||
require.NoError(t, err)
|
||||
|
||||
connectionState := peers.PeerConnecting
|
||||
connectionState := peers.Connecting
|
||||
p.SetConnectionState(id, connectionState)
|
||||
|
||||
resConnectionState, err := p.ConnectionState(id)
|
||||
@@ -347,7 +347,7 @@ func TestPeerBadResponses(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
assert.Equal(t, false, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
|
||||
address, err := ma.NewMultiaddr("/ip4/213.202.254.180/tcp/13000")
|
||||
require.NoError(t, err, "Failed to create address")
|
||||
@@ -358,25 +358,25 @@ func TestPeerBadResponses(t *testing.T) {
|
||||
resBadResponses, err := scorer.Count(id)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, resBadResponses, "Unexpected bad responses")
|
||||
assert.Equal(t, false, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
|
||||
scorer.Increment(id)
|
||||
resBadResponses, err = scorer.Count(id)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, resBadResponses, "Unexpected bad responses")
|
||||
assert.Equal(t, false, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
|
||||
scorer.Increment(id)
|
||||
resBadResponses, err = scorer.Count(id)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 2, resBadResponses, "Unexpected bad responses")
|
||||
assert.Equal(t, true, p.IsBad(id), "Peer not marked as bad when it should be")
|
||||
assert.NotNil(t, p.IsBad(id), "Peer not marked as bad when it should be")
|
||||
|
||||
scorer.Increment(id)
|
||||
resBadResponses, err = scorer.Count(id)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 3, resBadResponses, "Unexpected bad responses")
|
||||
assert.Equal(t, true, p.IsBad(id), "Peer not marked as bad when it should be")
|
||||
assert.NotNil(t, p.IsBad(id), "Peer not marked as bad when it should be")
|
||||
}
|
||||
|
||||
func TestAddMetaData(t *testing.T) {
|
||||
@@ -393,7 +393,7 @@ func TestAddMetaData(t *testing.T) {
|
||||
// Add some peers with different states
|
||||
numPeers := 5
|
||||
for i := 0; i < numPeers; i++ {
|
||||
addPeer(t, p, peers.PeerConnected)
|
||||
addPeer(t, p, peers.Connected)
|
||||
}
|
||||
newPeer := p.All()[2]
|
||||
|
||||
@@ -422,19 +422,19 @@ func TestPeerConnectionStatuses(t *testing.T) {
|
||||
// Add some peers with different states
|
||||
numPeersDisconnected := 11
|
||||
for i := 0; i < numPeersDisconnected; i++ {
|
||||
addPeer(t, p, peers.PeerDisconnected)
|
||||
addPeer(t, p, peers.Disconnected)
|
||||
}
|
||||
numPeersConnecting := 7
|
||||
for i := 0; i < numPeersConnecting; i++ {
|
||||
addPeer(t, p, peers.PeerConnecting)
|
||||
addPeer(t, p, peers.Connecting)
|
||||
}
|
||||
numPeersConnected := 43
|
||||
for i := 0; i < numPeersConnected; i++ {
|
||||
addPeer(t, p, peers.PeerConnected)
|
||||
addPeer(t, p, peers.Connected)
|
||||
}
|
||||
numPeersDisconnecting := 4
|
||||
for i := 0; i < numPeersDisconnecting; i++ {
|
||||
addPeer(t, p, peers.PeerDisconnecting)
|
||||
addPeer(t, p, peers.Disconnecting)
|
||||
}
|
||||
|
||||
// Now confirm the states
|
||||
@@ -463,7 +463,7 @@ func TestPeerValidTime(t *testing.T) {
|
||||
|
||||
numPeersConnected := 6
|
||||
for i := 0; i < numPeersConnected; i++ {
|
||||
addPeer(t, p, peers.PeerConnected)
|
||||
addPeer(t, p, peers.Connected)
|
||||
}
|
||||
|
||||
allPeers := p.All()
|
||||
@@ -510,10 +510,10 @@ func TestPrune(t *testing.T) {
|
||||
for i := 0; i < p.MaxPeerLimit()+100; i++ {
|
||||
if i%7 == 0 {
|
||||
// Peer added as disconnected.
|
||||
_ = addPeer(t, p, peers.PeerDisconnected)
|
||||
_ = addPeer(t, p, peers.Disconnected)
|
||||
}
|
||||
// Peer added to peer handler.
|
||||
_ = addPeer(t, p, peers.PeerConnected)
|
||||
_ = addPeer(t, p, peers.Connected)
|
||||
}
|
||||
|
||||
disPeers := p.Disconnected()
|
||||
@@ -571,23 +571,23 @@ func TestPeerIPTracker(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
badPeers = append(badPeers, createPeer(t, p, addr, network.DirUnknown, peerdata.PeerConnectionState(ethpb.ConnectionState_DISCONNECTED)))
|
||||
badPeers = append(badPeers, createPeer(t, p, addr, network.DirUnknown, peerdata.ConnectionState(ethpb.ConnectionState_DISCONNECTED)))
|
||||
}
|
||||
for _, pr := range badPeers {
|
||||
assert.Equal(t, true, p.IsBad(pr), "peer with bad ip is not bad")
|
||||
assert.NotNil(t, p.IsBad(pr), "peer with bad ip is not bad")
|
||||
}
|
||||
|
||||
// Add in bad peers, so that our records are trimmed out
|
||||
// from the peer store.
|
||||
for i := 0; i < p.MaxPeerLimit()+100; i++ {
|
||||
// Peer added to peer handler.
|
||||
pid := addPeer(t, p, peers.PeerDisconnected)
|
||||
pid := addPeer(t, p, peers.Disconnected)
|
||||
p.Scorers().BadResponsesScorer().Increment(pid)
|
||||
}
|
||||
p.Prune()
|
||||
|
||||
for _, pr := range badPeers {
|
||||
assert.Equal(t, false, p.IsBad(pr), "peer with good ip is regarded as bad")
|
||||
assert.NoError(t, p.IsBad(pr), "peer with good ip is regarded as bad")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -601,8 +601,11 @@ func TestTrimmedOrderedPeers(t *testing.T) {
|
||||
},
|
||||
})
|
||||
|
||||
expectedTarget := primitives.Epoch(2)
|
||||
maxPeers := 3
|
||||
const (
|
||||
expectedTarget = primitives.Epoch(2)
|
||||
maxPeers = 3
|
||||
)
|
||||
|
||||
var mockroot2 [32]byte
|
||||
var mockroot3 [32]byte
|
||||
var mockroot4 [32]byte
|
||||
@@ -611,36 +614,41 @@ func TestTrimmedOrderedPeers(t *testing.T) {
|
||||
copy(mockroot3[:], "three")
|
||||
copy(mockroot4[:], "four")
|
||||
copy(mockroot5[:], "five")
|
||||
|
||||
// Peer 1
|
||||
pid1 := addPeer(t, p, peers.PeerConnected)
|
||||
pid1 := addPeer(t, p, peers.Connected)
|
||||
p.SetChainState(pid1, &pb.Status{
|
||||
HeadSlot: 3 * params.BeaconConfig().SlotsPerEpoch,
|
||||
FinalizedEpoch: 3,
|
||||
FinalizedRoot: mockroot3[:],
|
||||
})
|
||||
|
||||
// Peer 2
|
||||
pid2 := addPeer(t, p, peers.PeerConnected)
|
||||
pid2 := addPeer(t, p, peers.Connected)
|
||||
p.SetChainState(pid2, &pb.Status{
|
||||
HeadSlot: 4 * params.BeaconConfig().SlotsPerEpoch,
|
||||
FinalizedEpoch: 4,
|
||||
FinalizedRoot: mockroot4[:],
|
||||
})
|
||||
|
||||
// Peer 3
|
||||
pid3 := addPeer(t, p, peers.PeerConnected)
|
||||
pid3 := addPeer(t, p, peers.Connected)
|
||||
p.SetChainState(pid3, &pb.Status{
|
||||
HeadSlot: 5 * params.BeaconConfig().SlotsPerEpoch,
|
||||
FinalizedEpoch: 5,
|
||||
FinalizedRoot: mockroot5[:],
|
||||
})
|
||||
|
||||
// Peer 4
|
||||
pid4 := addPeer(t, p, peers.PeerConnected)
|
||||
pid4 := addPeer(t, p, peers.Connected)
|
||||
p.SetChainState(pid4, &pb.Status{
|
||||
HeadSlot: 2 * params.BeaconConfig().SlotsPerEpoch,
|
||||
FinalizedEpoch: 2,
|
||||
FinalizedRoot: mockroot2[:],
|
||||
})
|
||||
|
||||
// Peer 5
|
||||
pid5 := addPeer(t, p, peers.PeerConnected)
|
||||
pid5 := addPeer(t, p, peers.Connected)
|
||||
p.SetChainState(pid5, &pb.Status{
|
||||
HeadSlot: 2 * params.BeaconConfig().SlotsPerEpoch,
|
||||
FinalizedEpoch: 2,
|
||||
@@ -680,12 +688,12 @@ func TestAtInboundPeerLimit(t *testing.T) {
|
||||
})
|
||||
for i := 0; i < 15; i++ {
|
||||
// Peer added to peer handler.
|
||||
createPeer(t, p, nil, network.DirOutbound, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED))
|
||||
createPeer(t, p, nil, network.DirOutbound, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED))
|
||||
}
|
||||
assert.Equal(t, false, p.IsAboveInboundLimit(), "Inbound limit exceeded")
|
||||
for i := 0; i < 31; i++ {
|
||||
// Peer added to peer handler.
|
||||
createPeer(t, p, nil, network.DirInbound, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED))
|
||||
createPeer(t, p, nil, network.DirInbound, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED))
|
||||
}
|
||||
assert.Equal(t, true, p.IsAboveInboundLimit(), "Inbound limit not exceeded")
|
||||
}
|
||||
@@ -705,7 +713,7 @@ func TestPrunePeers(t *testing.T) {
|
||||
})
|
||||
for i := 0; i < 15; i++ {
|
||||
// Peer added to peer handler.
|
||||
createPeer(t, p, nil, network.DirOutbound, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED))
|
||||
createPeer(t, p, nil, network.DirOutbound, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED))
|
||||
}
|
||||
// Assert there are no prunable peers.
|
||||
peersToPrune := p.PeersToPrune()
|
||||
@@ -713,7 +721,7 @@ func TestPrunePeers(t *testing.T) {
|
||||
|
||||
for i := 0; i < 18; i++ {
|
||||
// Peer added to peer handler.
|
||||
createPeer(t, p, nil, network.DirInbound, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED))
|
||||
createPeer(t, p, nil, network.DirInbound, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED))
|
||||
}
|
||||
|
||||
// Assert there are the correct prunable peers.
|
||||
@@ -723,7 +731,7 @@ func TestPrunePeers(t *testing.T) {
|
||||
// Add in more peers.
|
||||
for i := 0; i < 13; i++ {
|
||||
// Peer added to peer handler.
|
||||
createPeer(t, p, nil, network.DirInbound, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED))
|
||||
createPeer(t, p, nil, network.DirInbound, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED))
|
||||
}
|
||||
|
||||
// Set up bad scores for inbound peers.
|
||||
@@ -767,7 +775,7 @@ func TestPrunePeers_TrustedPeers(t *testing.T) {
|
||||
|
||||
for i := 0; i < 15; i++ {
|
||||
// Peer added to peer handler.
|
||||
createPeer(t, p, nil, network.DirOutbound, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED))
|
||||
createPeer(t, p, nil, network.DirOutbound, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED))
|
||||
}
|
||||
// Assert there are no prunable peers.
|
||||
peersToPrune := p.PeersToPrune()
|
||||
@@ -775,7 +783,7 @@ func TestPrunePeers_TrustedPeers(t *testing.T) {
|
||||
|
||||
for i := 0; i < 18; i++ {
|
||||
// Peer added to peer handler.
|
||||
createPeer(t, p, nil, network.DirInbound, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED))
|
||||
createPeer(t, p, nil, network.DirInbound, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED))
|
||||
}
|
||||
|
||||
// Assert there are the correct prunable peers.
|
||||
@@ -785,7 +793,7 @@ func TestPrunePeers_TrustedPeers(t *testing.T) {
|
||||
// Add in more peers.
|
||||
for i := 0; i < 13; i++ {
|
||||
// Peer added to peer handler.
|
||||
createPeer(t, p, nil, network.DirInbound, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED))
|
||||
createPeer(t, p, nil, network.DirInbound, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED))
|
||||
}
|
||||
|
||||
var trustedPeers []peer.ID
|
||||
@@ -821,7 +829,7 @@ func TestPrunePeers_TrustedPeers(t *testing.T) {
|
||||
// Add more peers to check if trusted peers can be pruned after they are deleted from trusted peer set.
|
||||
for i := 0; i < 9; i++ {
|
||||
// Peer added to peer handler.
|
||||
createPeer(t, p, nil, network.DirInbound, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED))
|
||||
createPeer(t, p, nil, network.DirInbound, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED))
|
||||
}
|
||||
|
||||
// Delete trusted peers.
|
||||
@@ -865,14 +873,14 @@ func TestStatus_BestPeer(t *testing.T) {
|
||||
headSlot primitives.Slot
|
||||
finalizedEpoch primitives.Epoch
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
peers []*peerConfig
|
||||
limitPeers int
|
||||
ourFinalizedEpoch primitives.Epoch
|
||||
targetEpoch primitives.Epoch
|
||||
// targetEpochSupport denotes how many peers support returned epoch.
|
||||
targetEpochSupport int
|
||||
name string
|
||||
peers []*peerConfig
|
||||
limitPeers int
|
||||
ourFinalizedEpoch primitives.Epoch
|
||||
targetEpoch primitives.Epoch
|
||||
targetEpochSupport int // Denotes how many peers support returned epoch.
|
||||
}{
|
||||
{
|
||||
name: "head slot matches finalized epoch",
|
||||
@@ -885,6 +893,7 @@ func TestStatus_BestPeer(t *testing.T) {
|
||||
{finalizedEpoch: 3, headSlot: 3 * params.BeaconConfig().SlotsPerEpoch},
|
||||
},
|
||||
limitPeers: 15,
|
||||
ourFinalizedEpoch: 0,
|
||||
targetEpoch: 4,
|
||||
targetEpochSupport: 4,
|
||||
},
|
||||
@@ -902,6 +911,7 @@ func TestStatus_BestPeer(t *testing.T) {
|
||||
{finalizedEpoch: 3, headSlot: 4 * params.BeaconConfig().SlotsPerEpoch},
|
||||
},
|
||||
limitPeers: 15,
|
||||
ourFinalizedEpoch: 0,
|
||||
targetEpoch: 4,
|
||||
targetEpochSupport: 4,
|
||||
},
|
||||
@@ -916,6 +926,7 @@ func TestStatus_BestPeer(t *testing.T) {
|
||||
{finalizedEpoch: 3, headSlot: 42 * params.BeaconConfig().SlotsPerEpoch},
|
||||
},
|
||||
limitPeers: 15,
|
||||
ourFinalizedEpoch: 0,
|
||||
targetEpoch: 4,
|
||||
targetEpochSupport: 4,
|
||||
},
|
||||
@@ -930,8 +941,8 @@ func TestStatus_BestPeer(t *testing.T) {
|
||||
{finalizedEpoch: 3, headSlot: 46 * params.BeaconConfig().SlotsPerEpoch},
|
||||
{finalizedEpoch: 6, headSlot: 6 * params.BeaconConfig().SlotsPerEpoch},
|
||||
},
|
||||
ourFinalizedEpoch: 5,
|
||||
limitPeers: 15,
|
||||
ourFinalizedEpoch: 5,
|
||||
targetEpoch: 6,
|
||||
targetEpochSupport: 1,
|
||||
},
|
||||
@@ -950,8 +961,8 @@ func TestStatus_BestPeer(t *testing.T) {
|
||||
{finalizedEpoch: 7, headSlot: 7 * params.BeaconConfig().SlotsPerEpoch},
|
||||
{finalizedEpoch: 8, headSlot: 8 * params.BeaconConfig().SlotsPerEpoch},
|
||||
},
|
||||
ourFinalizedEpoch: 5,
|
||||
limitPeers: 15,
|
||||
ourFinalizedEpoch: 5,
|
||||
targetEpoch: 6,
|
||||
targetEpochSupport: 5,
|
||||
},
|
||||
@@ -970,8 +981,8 @@ func TestStatus_BestPeer(t *testing.T) {
|
||||
{finalizedEpoch: 7, headSlot: 7 * params.BeaconConfig().SlotsPerEpoch},
|
||||
{finalizedEpoch: 8, headSlot: 8 * params.BeaconConfig().SlotsPerEpoch},
|
||||
},
|
||||
ourFinalizedEpoch: 5,
|
||||
limitPeers: 4,
|
||||
ourFinalizedEpoch: 5,
|
||||
targetEpoch: 6,
|
||||
targetEpochSupport: 4,
|
||||
},
|
||||
@@ -986,8 +997,8 @@ func TestStatus_BestPeer(t *testing.T) {
|
||||
{finalizedEpoch: 8, headSlot: 8 * params.BeaconConfig().SlotsPerEpoch},
|
||||
{finalizedEpoch: 8, headSlot: 8 * params.BeaconConfig().SlotsPerEpoch},
|
||||
},
|
||||
ourFinalizedEpoch: 5,
|
||||
limitPeers: 15,
|
||||
ourFinalizedEpoch: 5,
|
||||
targetEpoch: 8,
|
||||
targetEpochSupport: 3,
|
||||
},
|
||||
@@ -1002,7 +1013,7 @@ func TestStatus_BestPeer(t *testing.T) {
|
||||
},
|
||||
})
|
||||
for _, peerConfig := range tt.peers {
|
||||
p.SetChainState(addPeer(t, p, peers.PeerConnected), &pb.Status{
|
||||
p.SetChainState(addPeer(t, p, peers.Connected), &pb.Status{
|
||||
FinalizedEpoch: peerConfig.finalizedEpoch,
|
||||
HeadSlot: peerConfig.headSlot,
|
||||
})
|
||||
@@ -1028,7 +1039,7 @@ func TestBestFinalized_returnsMaxValue(t *testing.T) {
|
||||
|
||||
for i := 0; i <= maxPeers+100; i++ {
|
||||
p.Add(new(enr.Record), peer.ID(rune(i)), nil, network.DirOutbound)
|
||||
p.SetConnectionState(peer.ID(rune(i)), peers.PeerConnected)
|
||||
p.SetConnectionState(peer.ID(rune(i)), peers.Connected)
|
||||
p.SetChainState(peer.ID(rune(i)), &pb.Status{
|
||||
FinalizedEpoch: 10,
|
||||
})
|
||||
@@ -1051,7 +1062,7 @@ func TestStatus_BestNonFinalized(t *testing.T) {
|
||||
peerSlots := []primitives.Slot{32, 32, 32, 32, 235, 233, 258, 268, 270}
|
||||
for i, headSlot := range peerSlots {
|
||||
p.Add(new(enr.Record), peer.ID(rune(i)), nil, network.DirOutbound)
|
||||
p.SetConnectionState(peer.ID(rune(i)), peers.PeerConnected)
|
||||
p.SetConnectionState(peer.ID(rune(i)), peers.Connected)
|
||||
p.SetChainState(peer.ID(rune(i)), &pb.Status{
|
||||
HeadSlot: headSlot,
|
||||
})
|
||||
@@ -1074,17 +1085,17 @@ func TestStatus_CurrentEpoch(t *testing.T) {
|
||||
},
|
||||
})
|
||||
// Peer 1
|
||||
pid1 := addPeer(t, p, peers.PeerConnected)
|
||||
pid1 := addPeer(t, p, peers.Connected)
|
||||
p.SetChainState(pid1, &pb.Status{
|
||||
HeadSlot: params.BeaconConfig().SlotsPerEpoch * 4,
|
||||
})
|
||||
// Peer 2
|
||||
pid2 := addPeer(t, p, peers.PeerConnected)
|
||||
pid2 := addPeer(t, p, peers.Connected)
|
||||
p.SetChainState(pid2, &pb.Status{
|
||||
HeadSlot: params.BeaconConfig().SlotsPerEpoch * 5,
|
||||
})
|
||||
// Peer 3
|
||||
pid3 := addPeer(t, p, peers.PeerConnected)
|
||||
pid3 := addPeer(t, p, peers.Connected)
|
||||
p.SetChainState(pid3, &pb.Status{
|
||||
HeadSlot: params.BeaconConfig().SlotsPerEpoch * 4,
|
||||
})
|
||||
@@ -1103,8 +1114,8 @@ func TestInbound(t *testing.T) {
|
||||
})
|
||||
addr, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/33333")
|
||||
require.NoError(t, err)
|
||||
inbound := createPeer(t, p, addr, network.DirInbound, peers.PeerConnected)
|
||||
createPeer(t, p, addr, network.DirOutbound, peers.PeerConnected)
|
||||
inbound := createPeer(t, p, addr, network.DirInbound, peers.Connected)
|
||||
createPeer(t, p, addr, network.DirOutbound, peers.Connected)
|
||||
|
||||
result := p.Inbound()
|
||||
require.Equal(t, 1, len(result))
|
||||
@@ -1123,8 +1134,8 @@ func TestInboundConnected(t *testing.T) {
|
||||
|
||||
addr, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/33333")
|
||||
require.NoError(t, err)
|
||||
inbound := createPeer(t, p, addr, network.DirInbound, peers.PeerConnected)
|
||||
createPeer(t, p, addr, network.DirInbound, peers.PeerConnecting)
|
||||
inbound := createPeer(t, p, addr, network.DirInbound, peers.Connected)
|
||||
createPeer(t, p, addr, network.DirInbound, peers.Connecting)
|
||||
|
||||
result := p.InboundConnected()
|
||||
require.Equal(t, 1, len(result))
|
||||
@@ -1157,7 +1168,7 @@ func TestInboundConnectedWithProtocol(t *testing.T) {
|
||||
multiaddr, err := ma.NewMultiaddr(addr)
|
||||
require.NoError(t, err)
|
||||
|
||||
peer := createPeer(t, p, multiaddr, network.DirInbound, peers.PeerConnected)
|
||||
peer := createPeer(t, p, multiaddr, network.DirInbound, peers.Connected)
|
||||
expectedTCP[peer.String()] = true
|
||||
}
|
||||
|
||||
@@ -1166,7 +1177,7 @@ func TestInboundConnectedWithProtocol(t *testing.T) {
|
||||
multiaddr, err := ma.NewMultiaddr(addr)
|
||||
require.NoError(t, err)
|
||||
|
||||
peer := createPeer(t, p, multiaddr, network.DirInbound, peers.PeerConnected)
|
||||
peer := createPeer(t, p, multiaddr, network.DirInbound, peers.Connected)
|
||||
expectedQUIC[peer.String()] = true
|
||||
}
|
||||
|
||||
@@ -1203,8 +1214,8 @@ func TestOutbound(t *testing.T) {
|
||||
})
|
||||
addr, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/33333")
|
||||
require.NoError(t, err)
|
||||
createPeer(t, p, addr, network.DirInbound, peers.PeerConnected)
|
||||
outbound := createPeer(t, p, addr, network.DirOutbound, peers.PeerConnected)
|
||||
createPeer(t, p, addr, network.DirInbound, peers.Connected)
|
||||
outbound := createPeer(t, p, addr, network.DirOutbound, peers.Connected)
|
||||
|
||||
result := p.Outbound()
|
||||
require.Equal(t, 1, len(result))
|
||||
@@ -1223,8 +1234,8 @@ func TestOutboundConnected(t *testing.T) {
|
||||
|
||||
addr, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/33333")
|
||||
require.NoError(t, err)
|
||||
inbound := createPeer(t, p, addr, network.DirOutbound, peers.PeerConnected)
|
||||
createPeer(t, p, addr, network.DirOutbound, peers.PeerConnecting)
|
||||
inbound := createPeer(t, p, addr, network.DirOutbound, peers.Connected)
|
||||
createPeer(t, p, addr, network.DirOutbound, peers.Connecting)
|
||||
|
||||
result := p.OutboundConnected()
|
||||
require.Equal(t, 1, len(result))
|
||||
@@ -1257,7 +1268,7 @@ func TestOutboundConnectedWithProtocol(t *testing.T) {
|
||||
multiaddr, err := ma.NewMultiaddr(addr)
|
||||
require.NoError(t, err)
|
||||
|
||||
peer := createPeer(t, p, multiaddr, network.DirOutbound, peers.PeerConnected)
|
||||
peer := createPeer(t, p, multiaddr, network.DirOutbound, peers.Connected)
|
||||
expectedTCP[peer.String()] = true
|
||||
}
|
||||
|
||||
@@ -1266,7 +1277,7 @@ func TestOutboundConnectedWithProtocol(t *testing.T) {
|
||||
multiaddr, err := ma.NewMultiaddr(addr)
|
||||
require.NoError(t, err)
|
||||
|
||||
peer := createPeer(t, p, multiaddr, network.DirOutbound, peers.PeerConnected)
|
||||
peer := createPeer(t, p, multiaddr, network.DirOutbound, peers.Connected)
|
||||
expectedQUIC[peer.String()] = true
|
||||
}
|
||||
|
||||
@@ -1293,7 +1304,7 @@ func TestOutboundConnectedWithProtocol(t *testing.T) {
|
||||
}
|
||||
|
||||
// addPeer is a helper to add a peer with a given connection state)
|
||||
func addPeer(t *testing.T, p *peers.Status, state peerdata.PeerConnectionState) peer.ID {
|
||||
func addPeer(t *testing.T, p *peers.Status, state peerdata.ConnectionState) peer.ID {
|
||||
// Set up some peers with different states
|
||||
mhBytes := []byte{0x11, 0x04}
|
||||
idBytes := make([]byte, 4)
|
||||
@@ -1312,7 +1323,7 @@ func addPeer(t *testing.T, p *peers.Status, state peerdata.PeerConnectionState)
|
||||
}
|
||||
|
||||
func createPeer(t *testing.T, p *peers.Status, addr ma.Multiaddr,
|
||||
dir network.Direction, state peerdata.PeerConnectionState) peer.ID {
|
||||
dir network.Direction, state peerdata.ConnectionState) peer.ID {
|
||||
mhBytes := []byte{0x11, 0x04}
|
||||
idBytes := make([]byte, 4)
|
||||
_, err := rand.Read(idBytes)
|
||||
|
||||
@@ -165,14 +165,14 @@ func (s *Service) pubsubOptions() []pubsub.Option {
|
||||
func parsePeersEnr(peers []string) ([]peer.AddrInfo, error) {
|
||||
addrs, err := PeersFromStringAddrs(peers)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Cannot convert peers raw ENRs into multiaddresses: %w", err)
|
||||
return nil, fmt.Errorf("cannot convert peers raw ENRs into multiaddresses: %w", err)
|
||||
}
|
||||
if len(addrs) == 0 {
|
||||
return nil, fmt.Errorf("Converting peers raw ENRs into multiaddresses resulted in an empty list")
|
||||
return nil, fmt.Errorf("converting peers raw ENRs into multiaddresses resulted in an empty list")
|
||||
}
|
||||
directAddrInfos, err := peer.AddrInfosFromP2pAddrs(addrs...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Cannot convert peers multiaddresses into AddrInfos: %w", err)
|
||||
return nil, fmt.Errorf("cannot convert peers multiaddresses into AddrInfos: %w", err)
|
||||
}
|
||||
return directAddrInfos, nil
|
||||
}
|
||||
|
||||
@@ -202,12 +202,13 @@ func (s *Service) Start() {
|
||||
s.startupErr = err
|
||||
return
|
||||
}
|
||||
err = s.connectToBootnodes()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not add bootnode to the exclusion list")
|
||||
|
||||
if err := s.connectToBootnodes(); err != nil {
|
||||
log.WithError(err).Error("Could not connect to boot nodes")
|
||||
s.startupErr = err
|
||||
return
|
||||
}
|
||||
|
||||
s.dv5Listener = listener
|
||||
go s.listenForNewNodes()
|
||||
}
|
||||
@@ -384,12 +385,17 @@ func (s *Service) AddPingMethod(reqFunc func(ctx context.Context, id peer.ID) er
|
||||
s.pingMethodLock.Unlock()
|
||||
}
|
||||
|
||||
func (s *Service) pingPeers() {
|
||||
func (s *Service) pingPeersAndLogEnr() {
|
||||
s.pingMethodLock.RLock()
|
||||
defer s.pingMethodLock.RUnlock()
|
||||
|
||||
localENR := s.dv5Listener.Self()
|
||||
log.WithField("ENR", localENR).Info("New node record")
|
||||
|
||||
if s.pingMethod == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, pid := range s.peers.Connected() {
|
||||
go func(id peer.ID) {
|
||||
if err := s.pingMethod(s.ctx, id); err != nil {
|
||||
@@ -462,8 +468,8 @@ func (s *Service) connectWithPeer(ctx context.Context, info peer.AddrInfo) error
|
||||
if info.ID == s.host.ID() {
|
||||
return nil
|
||||
}
|
||||
if s.Peers().IsBad(info.ID) {
|
||||
return errors.New("refused to connect to bad peer")
|
||||
if err := s.Peers().IsBad(info.ID); err != nil {
|
||||
return errors.Wrap(err, "refused to connect to bad peer")
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(ctx, maxDialTimeout)
|
||||
defer cancel()
|
||||
|
||||
@@ -27,148 +27,148 @@ func NewFuzzTestP2P() *FakeP2P {
|
||||
}
|
||||
|
||||
// Encoding -- fake.
|
||||
func (_ *FakeP2P) Encoding() encoder.NetworkEncoding {
|
||||
func (*FakeP2P) Encoding() encoder.NetworkEncoding {
|
||||
return &encoder.SszNetworkEncoder{}
|
||||
}
|
||||
|
||||
// AddConnectionHandler -- fake.
|
||||
func (_ *FakeP2P) AddConnectionHandler(_, _ func(ctx context.Context, id peer.ID) error) {
|
||||
func (*FakeP2P) AddConnectionHandler(_, _ func(ctx context.Context, id peer.ID) error) {
|
||||
|
||||
}
|
||||
|
||||
// AddDisconnectionHandler -- fake.
|
||||
func (_ *FakeP2P) AddDisconnectionHandler(_ func(ctx context.Context, id peer.ID) error) {
|
||||
func (*FakeP2P) AddDisconnectionHandler(_ func(ctx context.Context, id peer.ID) error) {
|
||||
}
|
||||
|
||||
// AddPingMethod -- fake.
|
||||
func (_ *FakeP2P) AddPingMethod(_ func(ctx context.Context, id peer.ID) error) {
|
||||
func (*FakeP2P) AddPingMethod(_ func(ctx context.Context, id peer.ID) error) {
|
||||
|
||||
}
|
||||
|
||||
// PeerID -- fake.
|
||||
func (_ *FakeP2P) PeerID() peer.ID {
|
||||
func (*FakeP2P) PeerID() peer.ID {
|
||||
return "fake"
|
||||
}
|
||||
|
||||
// ENR returns the enr of the local peer.
|
||||
func (_ *FakeP2P) ENR() *enr.Record {
|
||||
func (*FakeP2P) ENR() *enr.Record {
|
||||
return new(enr.Record)
|
||||
}
|
||||
|
||||
// DiscoveryAddresses -- fake
|
||||
func (_ *FakeP2P) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
|
||||
func (*FakeP2P) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// FindPeersWithSubnet mocks the p2p func.
|
||||
func (_ *FakeP2P) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ int) (bool, error) {
|
||||
func (*FakeP2P) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ int) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// RefreshENR mocks the p2p func.
|
||||
func (_ *FakeP2P) RefreshENR() {}
|
||||
func (*FakeP2P) RefreshENR() {}
|
||||
|
||||
// LeaveTopic -- fake.
|
||||
func (_ *FakeP2P) LeaveTopic(_ string) error {
|
||||
func (*FakeP2P) LeaveTopic(_ string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Metadata -- fake.
|
||||
func (_ *FakeP2P) Metadata() metadata.Metadata {
|
||||
func (*FakeP2P) Metadata() metadata.Metadata {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Peers -- fake.
|
||||
func (_ *FakeP2P) Peers() *peers.Status {
|
||||
func (*FakeP2P) Peers() *peers.Status {
|
||||
return nil
|
||||
}
|
||||
|
||||
// PublishToTopic -- fake.
|
||||
func (_ *FakeP2P) PublishToTopic(_ context.Context, _ string, _ []byte, _ ...pubsub.PubOpt) error {
|
||||
func (*FakeP2P) PublishToTopic(_ context.Context, _ string, _ []byte, _ ...pubsub.PubOpt) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Send -- fake.
|
||||
func (_ *FakeP2P) Send(_ context.Context, _ interface{}, _ string, _ peer.ID) (network.Stream, error) {
|
||||
func (*FakeP2P) Send(_ context.Context, _ interface{}, _ string, _ peer.ID) (network.Stream, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// PubSub -- fake.
|
||||
func (_ *FakeP2P) PubSub() *pubsub.PubSub {
|
||||
func (*FakeP2P) PubSub() *pubsub.PubSub {
|
||||
return nil
|
||||
}
|
||||
|
||||
// MetadataSeq -- fake.
|
||||
func (_ *FakeP2P) MetadataSeq() uint64 {
|
||||
func (*FakeP2P) MetadataSeq() uint64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// SetStreamHandler -- fake.
|
||||
func (_ *FakeP2P) SetStreamHandler(_ string, _ network.StreamHandler) {
|
||||
func (*FakeP2P) SetStreamHandler(_ string, _ network.StreamHandler) {
|
||||
|
||||
}
|
||||
|
||||
// SubscribeToTopic -- fake.
|
||||
func (_ *FakeP2P) SubscribeToTopic(_ string, _ ...pubsub.SubOpt) (*pubsub.Subscription, error) {
|
||||
func (*FakeP2P) SubscribeToTopic(_ string, _ ...pubsub.SubOpt) (*pubsub.Subscription, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// JoinTopic -- fake.
|
||||
func (_ *FakeP2P) JoinTopic(_ string, _ ...pubsub.TopicOpt) (*pubsub.Topic, error) {
|
||||
func (*FakeP2P) JoinTopic(_ string, _ ...pubsub.TopicOpt) (*pubsub.Topic, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Host -- fake.
|
||||
func (_ *FakeP2P) Host() host.Host {
|
||||
func (*FakeP2P) Host() host.Host {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Disconnect -- fake.
|
||||
func (_ *FakeP2P) Disconnect(_ peer.ID) error {
|
||||
func (*FakeP2P) Disconnect(_ peer.ID) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Broadcast -- fake.
|
||||
func (_ *FakeP2P) Broadcast(_ context.Context, _ proto.Message) error {
|
||||
func (*FakeP2P) Broadcast(_ context.Context, _ proto.Message) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastAttestation -- fake.
|
||||
func (_ *FakeP2P) BroadcastAttestation(_ context.Context, _ uint64, _ ethpb.Att) error {
|
||||
func (*FakeP2P) BroadcastAttestation(_ context.Context, _ uint64, _ ethpb.Att) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastSyncCommitteeMessage -- fake.
|
||||
func (_ *FakeP2P) BroadcastSyncCommitteeMessage(_ context.Context, _ uint64, _ *ethpb.SyncCommitteeMessage) error {
|
||||
func (*FakeP2P) BroadcastSyncCommitteeMessage(_ context.Context, _ uint64, _ *ethpb.SyncCommitteeMessage) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastBlob -- fake.
|
||||
func (_ *FakeP2P) BroadcastBlob(_ context.Context, _ uint64, _ *ethpb.BlobSidecar) error {
|
||||
func (*FakeP2P) BroadcastBlob(_ context.Context, _ uint64, _ *ethpb.BlobSidecar) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// InterceptPeerDial -- fake.
|
||||
func (_ *FakeP2P) InterceptPeerDial(peer.ID) (allow bool) {
|
||||
func (*FakeP2P) InterceptPeerDial(peer.ID) (allow bool) {
|
||||
return true
|
||||
}
|
||||
|
||||
// InterceptAddrDial -- fake.
|
||||
func (_ *FakeP2P) InterceptAddrDial(peer.ID, multiaddr.Multiaddr) (allow bool) {
|
||||
func (*FakeP2P) InterceptAddrDial(peer.ID, multiaddr.Multiaddr) (allow bool) {
|
||||
return true
|
||||
}
|
||||
|
||||
// InterceptAccept -- fake.
|
||||
func (_ *FakeP2P) InterceptAccept(_ network.ConnMultiaddrs) (allow bool) {
|
||||
func (*FakeP2P) InterceptAccept(_ network.ConnMultiaddrs) (allow bool) {
|
||||
return true
|
||||
}
|
||||
|
||||
// InterceptSecured -- fake.
|
||||
func (_ *FakeP2P) InterceptSecured(network.Direction, peer.ID, network.ConnMultiaddrs) (allow bool) {
|
||||
func (*FakeP2P) InterceptSecured(network.Direction, peer.ID, network.ConnMultiaddrs) (allow bool) {
|
||||
return true
|
||||
}
|
||||
|
||||
// InterceptUpgraded -- fake.
|
||||
func (_ *FakeP2P) InterceptUpgraded(network.Conn) (allow bool, reason control.DisconnectReason) {
|
||||
func (*FakeP2P) InterceptUpgraded(network.Conn) (allow bool, reason control.DisconnectReason) {
|
||||
return true, 0
|
||||
}
|
||||
|
||||
@@ -18,12 +18,12 @@ type MockHost struct {
|
||||
}
|
||||
|
||||
// ID --
|
||||
func (_ *MockHost) ID() peer.ID {
|
||||
func (*MockHost) ID() peer.ID {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Peerstore --
|
||||
func (_ *MockHost) Peerstore() peerstore.Peerstore {
|
||||
func (*MockHost) Peerstore() peerstore.Peerstore {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -33,46 +33,46 @@ func (m *MockHost) Addrs() []ma.Multiaddr {
|
||||
}
|
||||
|
||||
// Network --
|
||||
func (_ *MockHost) Network() network.Network {
|
||||
func (*MockHost) Network() network.Network {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Mux --
|
||||
func (_ *MockHost) Mux() protocol.Switch {
|
||||
func (*MockHost) Mux() protocol.Switch {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Connect --
|
||||
func (_ *MockHost) Connect(_ context.Context, _ peer.AddrInfo) error {
|
||||
func (*MockHost) Connect(_ context.Context, _ peer.AddrInfo) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetStreamHandler --
|
||||
func (_ *MockHost) SetStreamHandler(_ protocol.ID, _ network.StreamHandler) {}
|
||||
func (*MockHost) SetStreamHandler(_ protocol.ID, _ network.StreamHandler) {}
|
||||
|
||||
// SetStreamHandlerMatch --
|
||||
func (_ *MockHost) SetStreamHandlerMatch(protocol.ID, func(id protocol.ID) bool, network.StreamHandler) {
|
||||
func (*MockHost) SetStreamHandlerMatch(protocol.ID, func(id protocol.ID) bool, network.StreamHandler) {
|
||||
}
|
||||
|
||||
// RemoveStreamHandler --
|
||||
func (_ *MockHost) RemoveStreamHandler(_ protocol.ID) {}
|
||||
func (*MockHost) RemoveStreamHandler(_ protocol.ID) {}
|
||||
|
||||
// NewStream --
|
||||
func (_ *MockHost) NewStream(_ context.Context, _ peer.ID, _ ...protocol.ID) (network.Stream, error) {
|
||||
func (*MockHost) NewStream(_ context.Context, _ peer.ID, _ ...protocol.ID) (network.Stream, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Close --
|
||||
func (_ *MockHost) Close() error {
|
||||
func (*MockHost) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ConnManager --
|
||||
func (_ *MockHost) ConnManager() connmgr.ConnManager {
|
||||
func (*MockHost) ConnManager() connmgr.ConnManager {
|
||||
return nil
|
||||
}
|
||||
|
||||
// EventBus --
|
||||
func (_ *MockHost) EventBus() event.Bus {
|
||||
func (*MockHost) EventBus() event.Bus {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -20,7 +20,7 @@ type MockPeerManager struct {
|
||||
}
|
||||
|
||||
// Disconnect .
|
||||
func (_ *MockPeerManager) Disconnect(peer.ID) error {
|
||||
func (*MockPeerManager) Disconnect(peer.ID) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -35,12 +35,12 @@ func (m *MockPeerManager) Host() host.Host {
|
||||
}
|
||||
|
||||
// ENR .
|
||||
func (m MockPeerManager) ENR() *enr.Record {
|
||||
func (m *MockPeerManager) ENR() *enr.Record {
|
||||
return m.Enr
|
||||
}
|
||||
|
||||
// DiscoveryAddresses .
|
||||
func (m MockPeerManager) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
|
||||
func (m *MockPeerManager) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
|
||||
if m.FailDiscoveryAddr {
|
||||
return nil, errors.New("fail")
|
||||
}
|
||||
@@ -48,12 +48,12 @@ func (m MockPeerManager) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
|
||||
}
|
||||
|
||||
// RefreshENR .
|
||||
func (_ MockPeerManager) RefreshENR() {}
|
||||
func (*MockPeerManager) RefreshENR() {}
|
||||
|
||||
// FindPeersWithSubnet .
|
||||
func (_ MockPeerManager) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ int) (bool, error) {
|
||||
func (*MockPeerManager) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ int) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// AddPingMethod .
|
||||
func (_ MockPeerManager) AddPingMethod(_ func(ctx context.Context, id peer.ID) error) {}
|
||||
func (*MockPeerManager) AddPingMethod(_ func(ctx context.Context, id peer.ID) error) {}
|
||||
|
||||
@@ -64,7 +64,7 @@ func (m *MockPeersProvider) Peers() *peers.Status {
|
||||
log.WithError(err).Debug("Cannot decode")
|
||||
}
|
||||
m.peers.Add(createENR(), id0, ma0, network.DirInbound)
|
||||
m.peers.SetConnectionState(id0, peers.PeerConnected)
|
||||
m.peers.SetConnectionState(id0, peers.Connected)
|
||||
m.peers.SetChainState(id0, &pb.Status{FinalizedEpoch: 10})
|
||||
id1, err := peer.Decode(MockRawPeerId1)
|
||||
if err != nil {
|
||||
@@ -75,7 +75,7 @@ func (m *MockPeersProvider) Peers() *peers.Status {
|
||||
log.WithError(err).Debug("Cannot decode")
|
||||
}
|
||||
m.peers.Add(createENR(), id1, ma1, network.DirOutbound)
|
||||
m.peers.SetConnectionState(id1, peers.PeerConnected)
|
||||
m.peers.SetConnectionState(id1, peers.Connected)
|
||||
m.peers.SetChainState(id1, &pb.Status{FinalizedEpoch: 11})
|
||||
}
|
||||
return m.peers
|
||||
|
||||
@@ -239,7 +239,7 @@ func (p *TestP2P) LeaveTopic(topic string) error {
|
||||
}
|
||||
|
||||
// Encoding returns ssz encoding.
|
||||
func (_ *TestP2P) Encoding() encoder.NetworkEncoding {
|
||||
func (*TestP2P) Encoding() encoder.NetworkEncoding {
|
||||
return &encoder.SszNetworkEncoder{}
|
||||
}
|
||||
|
||||
@@ -266,12 +266,12 @@ func (p *TestP2P) Host() host.Host {
|
||||
}
|
||||
|
||||
// ENR returns the enr of the local peer.
|
||||
func (_ *TestP2P) ENR() *enr.Record {
|
||||
func (*TestP2P) ENR() *enr.Record {
|
||||
return new(enr.Record)
|
||||
}
|
||||
|
||||
// DiscoveryAddresses --
|
||||
func (_ *TestP2P) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
|
||||
func (*TestP2P) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -284,16 +284,16 @@ func (p *TestP2P) AddConnectionHandler(f, _ func(ctx context.Context, id peer.ID
|
||||
p.peers.Add(new(enr.Record), conn.RemotePeer(), conn.RemoteMultiaddr(), conn.Stat().Direction)
|
||||
ctx := context.Background()
|
||||
|
||||
p.peers.SetConnectionState(conn.RemotePeer(), peers.PeerConnecting)
|
||||
p.peers.SetConnectionState(conn.RemotePeer(), peers.Connecting)
|
||||
if err := f(ctx, conn.RemotePeer()); err != nil {
|
||||
logrus.WithError(err).Error("Could not send successful hello rpc request")
|
||||
if err := p.Disconnect(conn.RemotePeer()); err != nil {
|
||||
logrus.WithError(err).Errorf("Unable to close peer %s", conn.RemotePeer())
|
||||
}
|
||||
p.peers.SetConnectionState(conn.RemotePeer(), peers.PeerDisconnected)
|
||||
p.peers.SetConnectionState(conn.RemotePeer(), peers.Disconnected)
|
||||
return
|
||||
}
|
||||
p.peers.SetConnectionState(conn.RemotePeer(), peers.PeerConnected)
|
||||
p.peers.SetConnectionState(conn.RemotePeer(), peers.Connected)
|
||||
}()
|
||||
},
|
||||
})
|
||||
@@ -305,11 +305,11 @@ func (p *TestP2P) AddDisconnectionHandler(f func(ctx context.Context, id peer.ID
|
||||
DisconnectedF: func(net network.Network, conn network.Conn) {
|
||||
// Must be handled in a goroutine as this callback cannot be blocking.
|
||||
go func() {
|
||||
p.peers.SetConnectionState(conn.RemotePeer(), peers.PeerDisconnecting)
|
||||
p.peers.SetConnectionState(conn.RemotePeer(), peers.Disconnecting)
|
||||
if err := f(context.Background(), conn.RemotePeer()); err != nil {
|
||||
logrus.WithError(err).Debug("Unable to invoke callback")
|
||||
}
|
||||
p.peers.SetConnectionState(conn.RemotePeer(), peers.PeerDisconnected)
|
||||
p.peers.SetConnectionState(conn.RemotePeer(), peers.Disconnected)
|
||||
}()
|
||||
},
|
||||
})
|
||||
@@ -353,7 +353,7 @@ func (p *TestP2P) Send(ctx context.Context, msg interface{}, topic string, pid p
|
||||
}
|
||||
|
||||
// Started always returns true.
|
||||
func (_ *TestP2P) Started() bool {
|
||||
func (*TestP2P) Started() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -363,12 +363,12 @@ func (p *TestP2P) Peers() *peers.Status {
|
||||
}
|
||||
|
||||
// FindPeersWithSubnet mocks the p2p func.
|
||||
func (_ *TestP2P) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ int) (bool, error) {
|
||||
func (*TestP2P) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ int) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// RefreshENR mocks the p2p func.
|
||||
func (_ *TestP2P) RefreshENR() {}
|
||||
func (*TestP2P) RefreshENR() {}
|
||||
|
||||
// ForkDigest mocks the p2p func.
|
||||
func (p *TestP2P) ForkDigest() ([4]byte, error) {
|
||||
@@ -386,31 +386,31 @@ func (p *TestP2P) MetadataSeq() uint64 {
|
||||
}
|
||||
|
||||
// AddPingMethod mocks the p2p func.
|
||||
func (_ *TestP2P) AddPingMethod(_ func(ctx context.Context, id peer.ID) error) {
|
||||
func (*TestP2P) AddPingMethod(_ func(ctx context.Context, id peer.ID) error) {
|
||||
// no-op
|
||||
}
|
||||
|
||||
// InterceptPeerDial .
|
||||
func (_ *TestP2P) InterceptPeerDial(peer.ID) (allow bool) {
|
||||
func (*TestP2P) InterceptPeerDial(peer.ID) (allow bool) {
|
||||
return true
|
||||
}
|
||||
|
||||
// InterceptAddrDial .
|
||||
func (_ *TestP2P) InterceptAddrDial(peer.ID, multiaddr.Multiaddr) (allow bool) {
|
||||
func (*TestP2P) InterceptAddrDial(peer.ID, multiaddr.Multiaddr) (allow bool) {
|
||||
return true
|
||||
}
|
||||
|
||||
// InterceptAccept .
|
||||
func (_ *TestP2P) InterceptAccept(_ network.ConnMultiaddrs) (allow bool) {
|
||||
func (*TestP2P) InterceptAccept(_ network.ConnMultiaddrs) (allow bool) {
|
||||
return true
|
||||
}
|
||||
|
||||
// InterceptSecured .
|
||||
func (_ *TestP2P) InterceptSecured(network.Direction, peer.ID, network.ConnMultiaddrs) (allow bool) {
|
||||
func (*TestP2P) InterceptSecured(network.Direction, peer.ID, network.ConnMultiaddrs) (allow bool) {
|
||||
return true
|
||||
}
|
||||
|
||||
// InterceptUpgraded .
|
||||
func (_ *TestP2P) InterceptUpgraded(network.Conn) (allow bool, reason control.DisconnectReason) {
|
||||
func (*TestP2P) InterceptUpgraded(network.Conn) (allow bool, reason control.DisconnectReason) {
|
||||
return true, 0
|
||||
}
|
||||
|
||||
@@ -12,10 +12,15 @@ import (
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/btcsuite/btcd/btcec/v2"
|
||||
gCrypto "github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/libp2p/go-libp2p/core/crypto"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/wrapper"
|
||||
ecdsaprysm "github.com/prysmaticlabs/prysm/v5/crypto/ecdsa"
|
||||
"github.com/prysmaticlabs/prysm/v5/io/file"
|
||||
@@ -62,6 +67,7 @@ func privKey(cfg *Config) (*ecdsa.PrivateKey, error) {
|
||||
}
|
||||
|
||||
if defaultKeysExist {
|
||||
log.WithField("filePath", defaultKeyPath).Info("Reading static P2P private key from a file. To generate a new random private key at every start, please remove this file.")
|
||||
return privKeyFromFile(defaultKeyPath)
|
||||
}
|
||||
|
||||
@@ -71,8 +77,8 @@ func privKey(cfg *Config) (*ecdsa.PrivateKey, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If the StaticPeerID flag is not set, return the private key.
|
||||
if !cfg.StaticPeerID {
|
||||
// If the StaticPeerID flag is not set and if peerDAS is not enabled, return the private key.
|
||||
if !(cfg.StaticPeerID || params.PeerDASEnabled()) {
|
||||
return ecdsaprysm.ConvertFromInterfacePrivKey(priv)
|
||||
}
|
||||
|
||||
@@ -89,7 +95,7 @@ func privKey(cfg *Config) (*ecdsa.PrivateKey, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Info("Wrote network key to file")
|
||||
log.WithField("path", defaultKeyPath).Info("Wrote network key to file")
|
||||
// Read the key from the defaultKeyPath file just written
|
||||
// for the strongest guarantee that the next start will be the same as this one.
|
||||
return privKeyFromFile(defaultKeyPath)
|
||||
@@ -173,3 +179,27 @@ func verifyConnectivity(addr string, port uint, protocol string) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ConvertPeerIDToNodeID converts a peer ID (libp2p) to a node ID (devp2p).
|
||||
func ConvertPeerIDToNodeID(pid peer.ID) (enode.ID, error) {
|
||||
// Retrieve the public key object of the peer under "crypto" form.
|
||||
pubkeyObjCrypto, err := pid.ExtractPublicKey()
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrapf(err, "extract public key from peer ID `%s`", pid)
|
||||
}
|
||||
|
||||
// Extract the bytes representation of the public key.
|
||||
compressedPubKeyBytes, err := pubkeyObjCrypto.Raw()
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "public key raw")
|
||||
}
|
||||
|
||||
// Retrieve the public key object of the peer under "SECP256K1" form.
|
||||
pubKeyObjSecp256k1, err := btcec.ParsePubKey(compressedPubKeyBytes)
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "parse public key")
|
||||
}
|
||||
|
||||
newPubkey := &ecdsa.PublicKey{Curve: gCrypto.S256(), X: pubKeyObjSecp256k1.X(), Y: pubKeyObjSecp256k1.Y()}
|
||||
return enode.PubkeyToIDV4(newPubkey), nil
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
@@ -64,3 +65,19 @@ func TestSerializeENR(t *testing.T) {
|
||||
assert.ErrorContains(t, "could not serialize nil record", err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestConvertPeerIDToNodeID(t *testing.T) {
|
||||
const (
|
||||
peerIDStr = "16Uiu2HAmRrhnqEfybLYimCiAYer2AtZKDGamQrL1VwRCyeh2YiFc"
|
||||
expectedNodeIDStr = "eed26c5d2425ab95f57246a5dca87317c41cacee4bcafe8bbe57e5965527c290"
|
||||
)
|
||||
|
||||
peerID, err := peer.Decode(peerIDStr)
|
||||
require.NoError(t, err)
|
||||
|
||||
actualNodeID, err := ConvertPeerIDToNodeID(peerID)
|
||||
require.NoError(t, err)
|
||||
|
||||
actualNodeIDStr := actualNodeID.String()
|
||||
require.Equal(t, expectedNodeIDStr, actualNodeIDStr)
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ go_library(
|
||||
srcs = [
|
||||
"endpoints.go",
|
||||
"log.go",
|
||||
"metrics.go",
|
||||
"service.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc",
|
||||
|
||||
@@ -34,18 +34,41 @@ type endpoint struct {
|
||||
methods []string
|
||||
}
|
||||
|
||||
// responseWriter is the wrapper to http Response writer.
|
||||
type responseWriter struct {
|
||||
http.ResponseWriter
|
||||
statusCode int
|
||||
}
|
||||
|
||||
// WriteHeader wraps the WriteHeader method of the underlying http.ResponseWriter to capture the status code.
|
||||
// Refer for WriteHeader doc: https://pkg.go.dev/net/http@go1.23.3#ResponseWriter.
|
||||
func (w *responseWriter) WriteHeader(statusCode int) {
|
||||
w.statusCode = statusCode
|
||||
w.ResponseWriter.WriteHeader(statusCode)
|
||||
}
|
||||
|
||||
func (e *endpoint) handlerWithMiddleware() http.HandlerFunc {
|
||||
handler := http.Handler(e.handler)
|
||||
for _, m := range e.middleware {
|
||||
handler = m(handler)
|
||||
}
|
||||
return promhttp.InstrumentHandlerDuration(
|
||||
|
||||
handler = promhttp.InstrumentHandlerDuration(
|
||||
httpRequestLatency.MustCurryWith(prometheus.Labels{"endpoint": e.name}),
|
||||
promhttp.InstrumentHandlerCounter(
|
||||
httpRequestCount.MustCurryWith(prometheus.Labels{"endpoint": e.name}),
|
||||
handler,
|
||||
),
|
||||
)
|
||||
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
rw := &responseWriter{ResponseWriter: w, statusCode: http.StatusOK}
|
||||
handler.ServeHTTP(rw, r)
|
||||
|
||||
if rw.statusCode >= 400 {
|
||||
httpErrorCount.WithLabelValues(r.URL.Path, http.StatusText(rw.statusCode), r.Method).Inc()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) endpoints(
|
||||
|
||||
@@ -6,6 +6,7 @@ go_library(
|
||||
"handlers.go",
|
||||
"handlers_pool.go",
|
||||
"handlers_state.go",
|
||||
"handlers_validation.go",
|
||||
"handlers_validator.go",
|
||||
"log.go",
|
||||
"server.go",
|
||||
@@ -58,6 +59,7 @@ go_library(
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//crypto/kzg4844:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
|
||||
@@ -17,7 +17,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositsnapshot"
|
||||
corehelpers "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filters"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/shared"
|
||||
@@ -1029,63 +1028,6 @@ func unmarshalStrict(data []byte, v interface{}) error {
|
||||
return dec.Decode(v)
|
||||
}
|
||||
|
||||
func (s *Server) validateBroadcast(ctx context.Context, r *http.Request, blk *eth.GenericSignedBeaconBlock) error {
|
||||
switch r.URL.Query().Get(broadcastValidationQueryParam) {
|
||||
case broadcastValidationConsensus:
|
||||
b, err := blocks.NewSignedBeaconBlock(blk.Block)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not create signed beacon block")
|
||||
}
|
||||
if err = s.validateConsensus(ctx, b); err != nil {
|
||||
return errors.Wrap(err, "consensus validation failed")
|
||||
}
|
||||
case broadcastValidationConsensusAndEquivocation:
|
||||
b, err := blocks.NewSignedBeaconBlock(blk.Block)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not create signed beacon block")
|
||||
}
|
||||
if err = s.validateConsensus(r.Context(), b); err != nil {
|
||||
return errors.Wrap(err, "consensus validation failed")
|
||||
}
|
||||
if err = s.validateEquivocation(b.Block()); err != nil {
|
||||
return errors.Wrap(err, "equivocation validation failed")
|
||||
}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) validateConsensus(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
parentBlockRoot := blk.Block().ParentRoot()
|
||||
parentBlock, err := s.Blocker.Block(ctx, parentBlockRoot[:])
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get parent block")
|
||||
}
|
||||
|
||||
if err := blocks.BeaconBlockIsNil(blk); err != nil {
|
||||
return errors.Wrap(err, "could not validate block")
|
||||
}
|
||||
|
||||
parentStateRoot := parentBlock.Block().StateRoot()
|
||||
parentState, err := s.Stater.State(ctx, parentStateRoot[:])
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get parent state")
|
||||
}
|
||||
_, err = transition.ExecuteStateTransition(ctx, parentState, blk)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not execute state transition")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) validateEquivocation(blk interfaces.ReadOnlyBeaconBlock) error {
|
||||
if s.ForkchoiceFetcher.HighestReceivedBlockSlot() == blk.Slot() {
|
||||
return errors.Wrapf(errEquivocatedBlock, "block for slot %d already exists in fork choice", blk.Slot())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetBlockRoot retrieves the root of a block.
|
||||
func (s *Server) GetBlockRoot(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "beacon.GetBlockRoot")
|
||||
|
||||
@@ -87,7 +87,7 @@ func (s *Server) ListAttestations(w http.ResponseWriter, r *http.Request) {
|
||||
// ListAttestationsV2 retrieves attestations known by the node but
|
||||
// not necessarily incorporated into any block. Allows filtering by committee index or slot.
|
||||
func (s *Server) ListAttestationsV2(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "beacon.ListAttestationsV2")
|
||||
_, span := trace.StartSpan(r.Context(), "beacon.ListAttestationsV2")
|
||||
defer span.End()
|
||||
|
||||
rawSlot, slot, ok := shared.UintFromQuery(w, r, "slot", false)
|
||||
@@ -98,13 +98,10 @@ func (s *Server) ListAttestationsV2(w http.ResponseWriter, r *http.Request) {
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
headState, err := s.ChainInfoFetcher.HeadStateReadOnly(ctx)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get head state: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
v := slots.ToForkVersion(primitives.Slot(slot))
|
||||
if rawSlot == "" {
|
||||
v = slots.ToForkVersion(s.TimeFetcher.CurrentSlot())
|
||||
}
|
||||
|
||||
attestations := s.AttestationsPool.AggregatedAttestations()
|
||||
unaggAtts, err := s.AttestationsPool.UnaggregatedAttestations()
|
||||
if err != nil {
|
||||
@@ -116,7 +113,7 @@ func (s *Server) ListAttestationsV2(w http.ResponseWriter, r *http.Request) {
|
||||
filteredAtts := make([]interface{}, 0, len(attestations))
|
||||
for _, att := range attestations {
|
||||
var includeAttestation bool
|
||||
if headState.Version() >= version.Electra {
|
||||
if v >= version.Electra && att.Version() >= version.Electra {
|
||||
attElectra, ok := att.(*eth.AttestationElectra)
|
||||
if !ok {
|
||||
httputil.HandleError(w, fmt.Sprintf("Unable to convert attestation of type %T", att), http.StatusInternalServerError)
|
||||
@@ -128,7 +125,7 @@ func (s *Server) ListAttestationsV2(w http.ResponseWriter, r *http.Request) {
|
||||
attStruct := structs.AttElectraFromConsensus(attElectra)
|
||||
filteredAtts = append(filteredAtts, attStruct)
|
||||
}
|
||||
} else {
|
||||
} else if v < version.Electra && att.Version() < version.Electra {
|
||||
attOld, ok := att.(*eth.Attestation)
|
||||
if !ok {
|
||||
httputil.HandleError(w, fmt.Sprintf("Unable to convert attestation of type %T", att), http.StatusInternalServerError)
|
||||
@@ -149,9 +146,9 @@ func (s *Server) ListAttestationsV2(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set(api.VersionHeader, version.String(headState.Version()))
|
||||
w.Header().Set(api.VersionHeader, version.String(v))
|
||||
httputil.WriteJson(w, &structs.ListAttestationsResponse{
|
||||
Version: version.String(headState.Version()),
|
||||
Version: version.String(v),
|
||||
Data: attsData,
|
||||
})
|
||||
}
|
||||
@@ -726,31 +723,33 @@ func (s *Server) GetAttesterSlashingsV2(w http.ResponseWriter, r *http.Request)
|
||||
ctx, span := trace.StartSpan(r.Context(), "beacon.GetAttesterSlashingsV2")
|
||||
defer span.End()
|
||||
|
||||
v := slots.ToForkVersion(s.TimeFetcher.CurrentSlot())
|
||||
headState, err := s.ChainInfoFetcher.HeadStateReadOnly(ctx)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get head state: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
var attStructs []interface{}
|
||||
sourceSlashings := s.SlashingsPool.PendingAttesterSlashings(ctx, headState, true /* return unlimited slashings */)
|
||||
|
||||
for _, slashing := range sourceSlashings {
|
||||
var attStruct interface{}
|
||||
if headState.Version() >= version.Electra {
|
||||
if v >= version.Electra && slashing.Version() >= version.Electra {
|
||||
a, ok := slashing.(*eth.AttesterSlashingElectra)
|
||||
if !ok {
|
||||
httputil.HandleError(w, fmt.Sprintf("Unable to convert slashing of type %T to an Electra slashing", slashing), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
attStruct = structs.AttesterSlashingElectraFromConsensus(a)
|
||||
} else {
|
||||
} else if v < version.Electra && slashing.Version() < version.Electra {
|
||||
a, ok := slashing.(*eth.AttesterSlashing)
|
||||
if !ok {
|
||||
httputil.HandleError(w, fmt.Sprintf("Unable to convert slashing of type %T to a Phase0 slashing", slashing), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
attStruct = structs.AttesterSlashingFromConsensus(a)
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
attStructs = append(attStructs, attStruct)
|
||||
}
|
||||
@@ -762,10 +761,10 @@ func (s *Server) GetAttesterSlashingsV2(w http.ResponseWriter, r *http.Request)
|
||||
}
|
||||
|
||||
resp := &structs.GetAttesterSlashingsResponse{
|
||||
Version: version.String(headState.Version()),
|
||||
Version: version.String(v),
|
||||
Data: attBytes,
|
||||
}
|
||||
w.Header().Set(api.VersionHeader, version.String(headState.Version()))
|
||||
w.Header().Set(api.VersionHeader, version.String(v))
|
||||
httputil.WriteJson(w, resp)
|
||||
}
|
||||
|
||||
|
||||
@@ -115,9 +115,16 @@ func TestListAttestations(t *testing.T) {
|
||||
Signature: bytesutil.PadTo([]byte("signature4"), 96),
|
||||
}
|
||||
t.Run("V1", func(t *testing.T) {
|
||||
bs, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
|
||||
chainService := &blockchainmock.ChainService{State: bs}
|
||||
s := &Server{
|
||||
ChainInfoFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
AttestationsPool: attestations.NewPool(),
|
||||
}
|
||||
|
||||
require.NoError(t, s.AttestationsPool.SaveAggregatedAttestations([]ethpbv1alpha1.Att{att1, att2}))
|
||||
require.NoError(t, s.AttestationsPool.SaveUnaggregatedAttestations([]ethpbv1alpha1.Att{att3, att4}))
|
||||
|
||||
@@ -204,10 +211,19 @@ func TestListAttestations(t *testing.T) {
|
||||
t.Run("Pre-Electra", func(t *testing.T) {
|
||||
bs, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
|
||||
chainService := &blockchainmock.ChainService{State: bs}
|
||||
s := &Server{
|
||||
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
|
||||
ChainInfoFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
AttestationsPool: attestations.NewPool(),
|
||||
}
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.DenebForkEpoch = 0
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
require.NoError(t, s.AttestationsPool.SaveAggregatedAttestations([]ethpbv1alpha1.Att{att1, att2}))
|
||||
require.NoError(t, s.AttestationsPool.SaveUnaggregatedAttestations([]ethpbv1alpha1.Att{att3, att4}))
|
||||
t.Run("empty request", func(t *testing.T) {
|
||||
@@ -226,7 +242,7 @@ func TestListAttestations(t *testing.T) {
|
||||
var atts []*structs.Attestation
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &atts))
|
||||
assert.Equal(t, 4, len(atts))
|
||||
assert.Equal(t, "phase0", resp.Version)
|
||||
assert.Equal(t, "deneb", resp.Version)
|
||||
})
|
||||
t.Run("slot request", func(t *testing.T) {
|
||||
url := "http://example.com?slot=2"
|
||||
@@ -244,7 +260,7 @@ func TestListAttestations(t *testing.T) {
|
||||
var atts []*structs.Attestation
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &atts))
|
||||
assert.Equal(t, 2, len(atts))
|
||||
assert.Equal(t, "phase0", resp.Version)
|
||||
assert.Equal(t, "deneb", resp.Version)
|
||||
for _, a := range atts {
|
||||
assert.Equal(t, "2", a.Data.Slot)
|
||||
}
|
||||
@@ -265,7 +281,7 @@ func TestListAttestations(t *testing.T) {
|
||||
var atts []*structs.Attestation
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &atts))
|
||||
assert.Equal(t, 2, len(atts))
|
||||
assert.Equal(t, "phase0", resp.Version)
|
||||
assert.Equal(t, "deneb", resp.Version)
|
||||
for _, a := range atts {
|
||||
assert.Equal(t, "4", a.Data.CommitteeIndex)
|
||||
}
|
||||
@@ -286,7 +302,7 @@ func TestListAttestations(t *testing.T) {
|
||||
var atts []*structs.Attestation
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &atts))
|
||||
assert.Equal(t, 1, len(atts))
|
||||
assert.Equal(t, "phase0", resp.Version)
|
||||
assert.Equal(t, "deneb", resp.Version)
|
||||
for _, a := range atts {
|
||||
assert.Equal(t, "2", a.Data.Slot)
|
||||
assert.Equal(t, "4", a.Data.CommitteeIndex)
|
||||
@@ -370,12 +386,21 @@ func TestListAttestations(t *testing.T) {
|
||||
}
|
||||
bs, err := util.NewBeaconStateElectra()
|
||||
require.NoError(t, err)
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.ElectraForkEpoch = 0
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
chainService := &blockchainmock.ChainService{State: bs}
|
||||
s := &Server{
|
||||
AttestationsPool: attestations.NewPool(),
|
||||
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
|
||||
ChainInfoFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
}
|
||||
require.NoError(t, s.AttestationsPool.SaveAggregatedAttestations([]ethpbv1alpha1.Att{attElectra1, attElectra2}))
|
||||
require.NoError(t, s.AttestationsPool.SaveUnaggregatedAttestations([]ethpbv1alpha1.Att{attElectra3, attElectra4}))
|
||||
// Added one pre electra attestation to ensure it is ignored.
|
||||
require.NoError(t, s.AttestationsPool.SaveAggregatedAttestations([]ethpbv1alpha1.Att{attElectra1, attElectra2, att1}))
|
||||
require.NoError(t, s.AttestationsPool.SaveUnaggregatedAttestations([]ethpbv1alpha1.Att{attElectra3, attElectra4, att3}))
|
||||
|
||||
t.Run("empty request", func(t *testing.T) {
|
||||
url := "http://example.com"
|
||||
@@ -1658,12 +1683,59 @@ func TestGetAttesterSlashings(t *testing.T) {
|
||||
})
|
||||
})
|
||||
t.Run("V2", func(t *testing.T) {
|
||||
t.Run("post-electra-ok-1-pre-slashing", func(t *testing.T) {
|
||||
bs, err := util.NewBeaconStateElectra()
|
||||
require.NoError(t, err)
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.ElectraForkEpoch = 100
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
chainService := &blockchainmock.ChainService{State: bs}
|
||||
|
||||
s := &Server{
|
||||
ChainInfoFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
SlashingsPool: &slashingsmock.PoolMock{PendingAttSlashings: []ethpbv1alpha1.AttSlashing{slashing1PostElectra, slashing2PostElectra, slashing1PreElectra}},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v2/beacon/pool/attester_slashings", nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetAttesterSlashingsV2(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.GetAttesterSlashingsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.NotNil(t, resp)
|
||||
require.NotNil(t, resp.Data)
|
||||
assert.Equal(t, "electra", resp.Version)
|
||||
|
||||
// Unmarshal resp.Data into a slice of slashings
|
||||
var slashings []*structs.AttesterSlashingElectra
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &slashings))
|
||||
|
||||
ss, err := structs.AttesterSlashingsElectraToConsensus(slashings)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepEqual(t, slashing1PostElectra, ss[0])
|
||||
require.DeepEqual(t, slashing2PostElectra, ss[1])
|
||||
})
|
||||
t.Run("post-electra-ok", func(t *testing.T) {
|
||||
bs, err := util.NewBeaconStateElectra()
|
||||
require.NoError(t, err)
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.ElectraForkEpoch = 100
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
chainService := &blockchainmock.ChainService{State: bs}
|
||||
|
||||
s := &Server{
|
||||
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
|
||||
ChainInfoFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
SlashingsPool: &slashingsmock.PoolMock{PendingAttSlashings: []ethpbv1alpha1.AttSlashing{slashing1PostElectra, slashing2PostElectra}},
|
||||
}
|
||||
|
||||
@@ -1692,9 +1764,11 @@ func TestGetAttesterSlashings(t *testing.T) {
|
||||
t.Run("pre-electra-ok", func(t *testing.T) {
|
||||
bs, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
chainService := &blockchainmock.ChainService{State: bs}
|
||||
|
||||
s := &Server{
|
||||
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
|
||||
ChainInfoFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
SlashingsPool: &slashingsmock.PoolMock{PendingAttSlashings: []ethpbv1alpha1.AttSlashing{slashing1PreElectra, slashing2PreElectra}},
|
||||
}
|
||||
|
||||
@@ -1722,8 +1796,15 @@ func TestGetAttesterSlashings(t *testing.T) {
|
||||
bs, err := util.NewBeaconStateElectra()
|
||||
require.NoError(t, err)
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.ElectraForkEpoch = 100
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
chainService := &blockchainmock.ChainService{State: bs}
|
||||
s := &Server{
|
||||
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
|
||||
ChainInfoFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
SlashingsPool: &slashingsmock.PoolMock{PendingAttSlashings: []ethpbv1alpha1.AttSlashing{}},
|
||||
}
|
||||
|
||||
|
||||
@@ -19,10 +19,8 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
|
||||
chainMock "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache/depositsnapshot"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db"
|
||||
dbTest "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
mockp2p "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/testing"
|
||||
rpctesting "github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/shared/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/lookup"
|
||||
@@ -2910,69 +2908,6 @@ func TestPublishBlindedBlockV2SSZ(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestValidateConsensus(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
parentState, privs := util.DeterministicGenesisState(t, params.MinimalSpecConfig().MinGenesisActiveValidatorCount)
|
||||
parentBlock, err := util.GenerateFullBlock(parentState, privs, util.DefaultBlockGenConfig(), parentState.Slot())
|
||||
require.NoError(t, err)
|
||||
parentSbb, err := blocks.NewSignedBeaconBlock(parentBlock)
|
||||
require.NoError(t, err)
|
||||
st, err := transition.ExecuteStateTransition(ctx, parentState, parentSbb)
|
||||
require.NoError(t, err)
|
||||
block, err := util.GenerateFullBlock(st, privs, util.DefaultBlockGenConfig(), st.Slot())
|
||||
require.NoError(t, err)
|
||||
sbb, err := blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
parentRoot, err := parentSbb.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
server := &Server{
|
||||
Blocker: &testutil.MockBlocker{RootBlockMap: map[[32]byte]interfaces.ReadOnlySignedBeaconBlock{parentRoot: parentSbb}},
|
||||
Stater: &testutil.MockStater{StatesByRoot: map[[32]byte]state.BeaconState{bytesutil.ToBytes32(parentBlock.Block.StateRoot): parentState}},
|
||||
}
|
||||
|
||||
require.NoError(t, server.validateConsensus(ctx, sbb))
|
||||
}
|
||||
|
||||
func TestValidateEquivocation(t *testing.T) {
|
||||
t.Run("ok", func(t *testing.T) {
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetSlot(10))
|
||||
blk, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
roblock, err := blocks.NewROBlockWithRoot(blk, bytesutil.ToBytes32([]byte("root")))
|
||||
require.NoError(t, err)
|
||||
fc := doublylinkedtree.New()
|
||||
require.NoError(t, fc.InsertNode(context.Background(), st, roblock))
|
||||
server := &Server{
|
||||
ForkchoiceFetcher: &chainMock.ChainService{ForkChoiceStore: fc},
|
||||
}
|
||||
blk.SetSlot(st.Slot() + 1)
|
||||
|
||||
require.NoError(t, server.validateEquivocation(blk.Block()))
|
||||
})
|
||||
t.Run("block already exists", func(t *testing.T) {
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetSlot(10))
|
||||
blk, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
blk.SetSlot(st.Slot())
|
||||
roblock, err := blocks.NewROBlockWithRoot(blk, bytesutil.ToBytes32([]byte("root")))
|
||||
require.NoError(t, err)
|
||||
|
||||
fc := doublylinkedtree.New()
|
||||
require.NoError(t, fc.InsertNode(context.Background(), st, roblock))
|
||||
server := &Server{
|
||||
ForkchoiceFetcher: &chainMock.ChainService{ForkChoiceStore: fc},
|
||||
}
|
||||
err = server.validateEquivocation(blk.Block())
|
||||
assert.ErrorContains(t, "already exists", err)
|
||||
require.ErrorIs(t, err, errEquivocatedBlock)
|
||||
})
|
||||
}
|
||||
|
||||
func TestServer_GetBlockRoot(t *testing.T) {
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
109
beacon-chain/rpc/eth/beacon/handlers_validation.go
Normal file
109
beacon-chain/rpc/eth/beacon/handlers_validation.go
Normal file
@@ -0,0 +1,109 @@
|
||||
package beacon
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
|
||||
"github.com/ethereum/go-ethereum/crypto/kzg4844"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
)
|
||||
|
||||
func (s *Server) validateBroadcast(ctx context.Context, r *http.Request, blk *eth.GenericSignedBeaconBlock) error {
|
||||
switch r.URL.Query().Get(broadcastValidationQueryParam) {
|
||||
case broadcastValidationConsensus:
|
||||
if err := s.validateConsensus(ctx, blk); err != nil {
|
||||
return errors.Wrap(err, "consensus validation failed")
|
||||
}
|
||||
case broadcastValidationConsensusAndEquivocation:
|
||||
b, err := blocks.NewSignedBeaconBlock(blk.Block)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not create signed beacon block")
|
||||
}
|
||||
if err = s.validateConsensus(r.Context(), blk); err != nil {
|
||||
return errors.Wrap(err, "consensus validation failed")
|
||||
}
|
||||
if err = s.validateEquivocation(b.Block()); err != nil {
|
||||
return errors.Wrap(err, "equivocation validation failed")
|
||||
}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) validateBlobSidecars(blk interfaces.SignedBeaconBlock, blobs [][]byte, proofs [][]byte) error {
|
||||
if blk.Version() < version.Deneb {
|
||||
return nil
|
||||
}
|
||||
kzgs, err := blk.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get blob kzg commitments")
|
||||
}
|
||||
if len(blobs) != len(proofs) || len(blobs) != len(kzgs) {
|
||||
return errors.New("number of blobs, proofs, and commitments do not match")
|
||||
}
|
||||
for i, blob := range blobs {
|
||||
if err := kzg4844.VerifyBlobProof(kzg4844.Blob(blob), kzg4844.Commitment(kzgs[i]), kzg4844.Proof(proofs[i])); err != nil {
|
||||
return errors.Wrap(err, "could not verify blob proof")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) validateConsensus(ctx context.Context, b *eth.GenericSignedBeaconBlock) error {
|
||||
blk, err := blocks.NewSignedBeaconBlock(b.Block)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not create signed beacon block")
|
||||
}
|
||||
|
||||
parentBlockRoot := blk.Block().ParentRoot()
|
||||
parentBlock, err := s.Blocker.Block(ctx, parentBlockRoot[:])
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get parent block")
|
||||
}
|
||||
|
||||
if err := blocks.BeaconBlockIsNil(blk); err != nil {
|
||||
return errors.Wrap(err, "could not validate block")
|
||||
}
|
||||
|
||||
parentStateRoot := parentBlock.Block().StateRoot()
|
||||
parentState, err := s.Stater.State(ctx, parentStateRoot[:])
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get parent state")
|
||||
}
|
||||
_, err = transition.ExecuteStateTransition(ctx, parentState, blk)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not execute state transition")
|
||||
}
|
||||
|
||||
var blobs [][]byte
|
||||
var proofs [][]byte
|
||||
switch {
|
||||
case blk.Version() == version.Electra:
|
||||
blobs = b.GetElectra().Blobs
|
||||
proofs = b.GetElectra().KzgProofs
|
||||
case blk.Version() == version.Deneb:
|
||||
blobs = b.GetDeneb().Blobs
|
||||
proofs = b.GetDeneb().KzgProofs
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := s.validateBlobSidecars(blk, blobs, proofs); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) validateEquivocation(blk interfaces.ReadOnlyBeaconBlock) error {
|
||||
if s.ForkchoiceFetcher.HighestReceivedBlockSlot() == blk.Slot() {
|
||||
return errors.Wrapf(errEquivocatedBlock, "block for slot %d already exists in fork choice", blk.Slot())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
85
beacon-chain/rpc/eth/beacon/handlers_validation_test.go
Normal file
85
beacon-chain/rpc/eth/beacon/handlers_validation_test.go
Normal file
@@ -0,0 +1,85 @@
|
||||
package beacon
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
chainMock "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/testutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
)
|
||||
|
||||
func TestValidateConsensus(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
parentState, privs := util.DeterministicGenesisState(t, params.MinimalSpecConfig().MinGenesisActiveValidatorCount)
|
||||
parentBlock, err := util.GenerateFullBlock(parentState, privs, util.DefaultBlockGenConfig(), parentState.Slot())
|
||||
require.NoError(t, err)
|
||||
parentSbb, err := blocks.NewSignedBeaconBlock(parentBlock)
|
||||
require.NoError(t, err)
|
||||
st, err := transition.ExecuteStateTransition(ctx, parentState, parentSbb)
|
||||
require.NoError(t, err)
|
||||
block, err := util.GenerateFullBlock(st, privs, util.DefaultBlockGenConfig(), st.Slot())
|
||||
require.NoError(t, err)
|
||||
parentRoot, err := parentSbb.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
server := &Server{
|
||||
Blocker: &testutil.MockBlocker{RootBlockMap: map[[32]byte]interfaces.ReadOnlySignedBeaconBlock{parentRoot: parentSbb}},
|
||||
Stater: &testutil.MockStater{StatesByRoot: map[[32]byte]state.BeaconState{bytesutil.ToBytes32(parentBlock.Block.StateRoot): parentState}},
|
||||
}
|
||||
|
||||
require.NoError(t, server.validateConsensus(ctx, ð.GenericSignedBeaconBlock{
|
||||
Block: ð.GenericSignedBeaconBlock_Phase0{
|
||||
Phase0: block,
|
||||
},
|
||||
}))
|
||||
}
|
||||
|
||||
func TestValidateEquivocation(t *testing.T) {
|
||||
t.Run("ok", func(t *testing.T) {
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetSlot(10))
|
||||
blk, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
roblock, err := blocks.NewROBlockWithRoot(blk, bytesutil.ToBytes32([]byte("root")))
|
||||
require.NoError(t, err)
|
||||
fc := doublylinkedtree.New()
|
||||
require.NoError(t, fc.InsertNode(context.Background(), st, roblock))
|
||||
server := &Server{
|
||||
ForkchoiceFetcher: &chainMock.ChainService{ForkChoiceStore: fc},
|
||||
}
|
||||
blk.SetSlot(st.Slot() + 1)
|
||||
|
||||
require.NoError(t, server.validateEquivocation(blk.Block()))
|
||||
})
|
||||
t.Run("block already exists", func(t *testing.T) {
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetSlot(10))
|
||||
blk, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
blk.SetSlot(st.Slot())
|
||||
roblock, err := blocks.NewROBlockWithRoot(blk, bytesutil.ToBytes32([]byte("root")))
|
||||
require.NoError(t, err)
|
||||
|
||||
fc := doublylinkedtree.New()
|
||||
require.NoError(t, fc.InsertNode(context.Background(), st, roblock))
|
||||
server := &Server{
|
||||
ForkchoiceFetcher: &chainMock.ChainService{ForkChoiceStore: fc},
|
||||
}
|
||||
err = server.validateEquivocation(blk.Block())
|
||||
assert.ErrorContains(t, "already exists", err)
|
||||
require.ErrorIs(t, err, errEquivocatedBlock)
|
||||
})
|
||||
}
|
||||
@@ -117,13 +117,13 @@ func TestGetPeers(t *testing.T) {
|
||||
|
||||
switch i {
|
||||
case 0, 1:
|
||||
peerStatus.SetConnectionState(id, peers.PeerConnecting)
|
||||
peerStatus.SetConnectionState(id, peers.Connecting)
|
||||
case 2, 3:
|
||||
peerStatus.SetConnectionState(id, peers.PeerConnected)
|
||||
peerStatus.SetConnectionState(id, peers.Connected)
|
||||
case 4, 5:
|
||||
peerStatus.SetConnectionState(id, peers.PeerDisconnecting)
|
||||
peerStatus.SetConnectionState(id, peers.Disconnecting)
|
||||
case 6, 7:
|
||||
peerStatus.SetConnectionState(id, peers.PeerDisconnected)
|
||||
peerStatus.SetConnectionState(id, peers.Disconnected)
|
||||
default:
|
||||
t.Fatalf("Failed to set connection state for peer")
|
||||
}
|
||||
@@ -289,13 +289,13 @@ func TestGetPeerCount(t *testing.T) {
|
||||
|
||||
switch i {
|
||||
case 0:
|
||||
peerStatus.SetConnectionState(id, peers.PeerConnecting)
|
||||
peerStatus.SetConnectionState(id, peers.Connecting)
|
||||
case 1, 2:
|
||||
peerStatus.SetConnectionState(id, peers.PeerConnected)
|
||||
peerStatus.SetConnectionState(id, peers.Connected)
|
||||
case 3, 4, 5:
|
||||
peerStatus.SetConnectionState(id, peers.PeerDisconnecting)
|
||||
peerStatus.SetConnectionState(id, peers.Disconnecting)
|
||||
case 6, 7, 8, 9:
|
||||
peerStatus.SetConnectionState(id, peers.PeerDisconnected)
|
||||
peerStatus.SetConnectionState(id, peers.Disconnected)
|
||||
default:
|
||||
t.Fatalf("Failed to set connection state for peer")
|
||||
}
|
||||
|
||||
@@ -75,7 +75,7 @@ func (s *Server) GetAggregateAttestation(w http.ResponseWriter, r *http.Request)
|
||||
|
||||
// GetAggregateAttestationV2 aggregates all attestations matching the given attestation data root and slot, returning the aggregated result.
|
||||
func (s *Server) GetAggregateAttestationV2(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "validator.GetAggregateAttestationV2")
|
||||
_, span := trace.StartSpan(r.Context(), "validator.GetAggregateAttestationV2")
|
||||
defer span.End()
|
||||
|
||||
_, attDataRoot, ok := shared.HexFromQuery(w, r, "attestation_data_root", fieldparams.RootLength, true)
|
||||
@@ -91,14 +91,15 @@ func (s *Server) GetAggregateAttestationV2(w http.ResponseWriter, r *http.Reques
|
||||
return
|
||||
}
|
||||
|
||||
v := slots.ToForkVersion(primitives.Slot(slot))
|
||||
agg := s.aggregatedAttestation(w, primitives.Slot(slot), attDataRoot, primitives.CommitteeIndex(index))
|
||||
if agg == nil {
|
||||
return
|
||||
}
|
||||
resp := &structs.AggregateAttestationResponse{
|
||||
Version: version.String(agg.Version()),
|
||||
Version: version.String(v),
|
||||
}
|
||||
if agg.Version() >= version.Electra {
|
||||
if v >= version.Electra {
|
||||
typedAgg, ok := agg.(*ethpbalpha.AttestationElectra)
|
||||
if !ok {
|
||||
httputil.HandleError(w, fmt.Sprintf("Attestation is not of type %T", ðpbalpha.AttestationElectra{}), http.StatusInternalServerError)
|
||||
@@ -123,12 +124,7 @@ func (s *Server) GetAggregateAttestationV2(w http.ResponseWriter, r *http.Reques
|
||||
}
|
||||
resp.Data = data
|
||||
}
|
||||
headState, err := s.ChainInfoFetcher.HeadStateReadOnly(ctx)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get head state: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
w.Header().Set(api.VersionHeader, version.String(headState.Version()))
|
||||
w.Header().Set(api.VersionHeader, version.String(v))
|
||||
httputil.WriteJson(w, resp)
|
||||
}
|
||||
|
||||
@@ -179,27 +175,37 @@ func matchingAtts(atts []ethpbalpha.Att, slot primitives.Slot, attDataRoot []byt
|
||||
return []ethpbalpha.Att{}, nil
|
||||
}
|
||||
|
||||
postElectra := atts[0].Version() >= version.Electra
|
||||
|
||||
postElectra := slots.ToForkVersion(slot) >= version.Electra
|
||||
result := make([]ethpbalpha.Att, 0)
|
||||
for _, att := range atts {
|
||||
if att.GetData().Slot != slot {
|
||||
continue
|
||||
}
|
||||
|
||||
// We ignore the committee index from the request before Electra.
|
||||
// This is because before Electra the committee index is part of the attestation data,
|
||||
// meaning that comparing the data root is sufficient.
|
||||
// Post-Electra the committee index in the data root is always 0, so we need to
|
||||
// compare the committee index separately.
|
||||
if postElectra {
|
||||
ci, err := att.GetCommitteeIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if att.Version() >= version.Electra {
|
||||
ci, err := att.GetCommitteeIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ci != index {
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
if ci != index {
|
||||
} else {
|
||||
// If postElectra is false and att.Version >= version.Electra, ignore the attestation.
|
||||
if att.Version() >= version.Electra {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
root, err := att.GetData().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get attestation data root")
|
||||
|
||||
@@ -55,15 +55,33 @@ func TestGetAggregateAttestation(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
sig := key.Sign([]byte("sig"))
|
||||
|
||||
t.Run("V1", func(t *testing.T) {
|
||||
createAttestation := func(slot primitives.Slot, aggregationBits bitfield.Bitlist, root []byte) *ethpbalpha.Attestation {
|
||||
return ðpbalpha.Attestation{
|
||||
AggregationBits: aggregationBits,
|
||||
Data: createAttestationData(slot, 1, 1, root),
|
||||
Signature: sig.Marshal(),
|
||||
}
|
||||
}
|
||||
// It is important to use 0 as the index because that's the only way
|
||||
// pre and post-Electra attestations can both match,
|
||||
// which allows us to properly test that attestations from the
|
||||
// wrong fork are ignored.
|
||||
committeeIndex := uint64(0)
|
||||
|
||||
createAttestation := func(slot primitives.Slot, aggregationBits bitfield.Bitlist, root []byte) *ethpbalpha.Attestation {
|
||||
return ðpbalpha.Attestation{
|
||||
AggregationBits: aggregationBits,
|
||||
Data: createAttestationData(slot, primitives.CommitteeIndex(committeeIndex), root),
|
||||
Signature: sig.Marshal(),
|
||||
}
|
||||
}
|
||||
|
||||
createAttestationElectra := func(slot primitives.Slot, aggregationBits bitfield.Bitlist, root []byte) *ethpbalpha.AttestationElectra {
|
||||
committeeBits := bitfield.NewBitvector64()
|
||||
committeeBits.SetBitAt(committeeIndex, true)
|
||||
|
||||
return ðpbalpha.AttestationElectra{
|
||||
CommitteeBits: committeeBits,
|
||||
AggregationBits: aggregationBits,
|
||||
Data: createAttestationData(slot, primitives.CommitteeIndex(committeeIndex), root),
|
||||
Signature: sig.Marshal(),
|
||||
}
|
||||
}
|
||||
|
||||
t.Run("V1", func(t *testing.T) {
|
||||
aggSlot1_Root1_1 := createAttestation(1, bitfield.Bitlist{0b11100}, root1)
|
||||
aggSlot1_Root1_2 := createAttestation(1, bitfield.Bitlist{0b10111}, root1)
|
||||
aggSlot1_Root2 := createAttestation(1, bitfield.Bitlist{0b11100}, root2)
|
||||
@@ -84,7 +102,7 @@ func TestGetAggregateAttestation(t *testing.T) {
|
||||
assert.Equal(t, expectedAggregationBits, attestation.AggregationBits, "Unexpected aggregation bits in attestation")
|
||||
assert.Equal(t, hexutil.Encode(expectedSig), attestation.Signature, "Signature mismatch")
|
||||
assert.Equal(t, expectedSlot, attestation.Data.Slot, "Slot mismatch in attestation data")
|
||||
assert.Equal(t, "1", attestation.Data.CommitteeIndex, "Committee index mismatch")
|
||||
assert.Equal(t, "0", attestation.Data.CommitteeIndex, "Committee index mismatch")
|
||||
assert.Equal(t, hexutil.Encode(expectedRoot), attestation.Data.BeaconBlockRoot, "Beacon block root mismatch")
|
||||
|
||||
// Source checkpoint checks
|
||||
@@ -206,181 +224,288 @@ func TestGetAggregateAttestation(t *testing.T) {
|
||||
})
|
||||
})
|
||||
t.Run("V2", func(t *testing.T) {
|
||||
createAttestation := func(slot primitives.Slot, aggregationBits bitfield.Bitlist, root []byte, bits uint64) *ethpbalpha.AttestationElectra {
|
||||
t.Run("pre-electra", func(t *testing.T) {
|
||||
committeeBits := bitfield.NewBitvector64()
|
||||
committeeBits.SetBitAt(bits, true)
|
||||
committeeBits.SetBitAt(1, true)
|
||||
|
||||
return ðpbalpha.AttestationElectra{
|
||||
CommitteeBits: committeeBits,
|
||||
AggregationBits: aggregationBits,
|
||||
Data: createAttestationData(slot, 0, 1, root),
|
||||
Signature: sig.Marshal(),
|
||||
aggSlot1_Root1_1 := createAttestation(1, bitfield.Bitlist{0b11100}, root1)
|
||||
aggSlot1_Root1_2 := createAttestation(1, bitfield.Bitlist{0b10111}, root1)
|
||||
aggSlot1_Root2 := createAttestation(1, bitfield.Bitlist{0b11100}, root2)
|
||||
aggSlot2 := createAttestation(2, bitfield.Bitlist{0b11100}, root1)
|
||||
unaggSlot3_Root1_1 := createAttestation(3, bitfield.Bitlist{0b11000}, root1)
|
||||
unaggSlot3_Root1_2 := createAttestation(3, bitfield.Bitlist{0b10100}, root1)
|
||||
unaggSlot3_Root2 := createAttestation(3, bitfield.Bitlist{0b11000}, root2)
|
||||
unaggSlot4 := createAttestation(4, bitfield.Bitlist{0b11000}, root1)
|
||||
|
||||
// Add one post-electra attestation to ensure that it is being ignored.
|
||||
// We choose slot 2 where we have one pre-electra attestation with less attestation bits.
|
||||
postElectraAtt := createAttestationElectra(2, bitfield.Bitlist{0b11111}, root1)
|
||||
|
||||
compareResult := func(
|
||||
t *testing.T,
|
||||
attestation structs.Attestation,
|
||||
expectedSlot string,
|
||||
expectedAggregationBits string,
|
||||
expectedRoot []byte,
|
||||
expectedSig []byte,
|
||||
) {
|
||||
assert.Equal(t, expectedAggregationBits, attestation.AggregationBits, "Unexpected aggregation bits in attestation")
|
||||
assert.Equal(t, hexutil.Encode(expectedSig), attestation.Signature, "Signature mismatch")
|
||||
assert.Equal(t, expectedSlot, attestation.Data.Slot, "Slot mismatch in attestation data")
|
||||
assert.Equal(t, "0", attestation.Data.CommitteeIndex, "Committee index mismatch")
|
||||
assert.Equal(t, hexutil.Encode(expectedRoot), attestation.Data.BeaconBlockRoot, "Beacon block root mismatch")
|
||||
|
||||
// Source checkpoint checks
|
||||
require.NotNil(t, attestation.Data.Source, "Source checkpoint should not be nil")
|
||||
assert.Equal(t, "1", attestation.Data.Source.Epoch, "Source epoch mismatch")
|
||||
assert.Equal(t, hexutil.Encode(expectedRoot), attestation.Data.Source.Root, "Source root mismatch")
|
||||
|
||||
// Target checkpoint checks
|
||||
require.NotNil(t, attestation.Data.Target, "Target checkpoint should not be nil")
|
||||
assert.Equal(t, "1", attestation.Data.Target.Epoch, "Target epoch mismatch")
|
||||
assert.Equal(t, hexutil.Encode(expectedRoot), attestation.Data.Target.Root, "Target root mismatch")
|
||||
}
|
||||
}
|
||||
|
||||
aggSlot1_Root1_1 := createAttestation(1, bitfield.Bitlist{0b11100}, root1, 1)
|
||||
aggSlot1_Root1_2 := createAttestation(1, bitfield.Bitlist{0b10111}, root1, 1)
|
||||
aggSlot1_Root2 := createAttestation(1, bitfield.Bitlist{0b11100}, root2, 1)
|
||||
aggSlot2 := createAttestation(2, bitfield.Bitlist{0b11100}, root1, 1)
|
||||
unaggSlot3_Root1_1 := createAttestation(3, bitfield.Bitlist{0b11000}, root1, 1)
|
||||
unaggSlot3_Root1_2 := createAttestation(3, bitfield.Bitlist{0b10100}, root1, 1)
|
||||
unaggSlot3_Root2 := createAttestation(3, bitfield.Bitlist{0b11000}, root2, 1)
|
||||
unaggSlot4 := createAttestation(4, bitfield.Bitlist{0b11000}, root1, 1)
|
||||
|
||||
compareResult := func(
|
||||
t *testing.T,
|
||||
attestation structs.AttestationElectra,
|
||||
expectedSlot string,
|
||||
expectedAggregationBits string,
|
||||
expectedRoot []byte,
|
||||
expectedSig []byte,
|
||||
expectedCommitteeBits string,
|
||||
) {
|
||||
assert.Equal(t, expectedAggregationBits, attestation.AggregationBits, "Unexpected aggregation bits in attestation")
|
||||
assert.Equal(t, expectedCommitteeBits, attestation.CommitteeBits)
|
||||
assert.Equal(t, hexutil.Encode(expectedSig), attestation.Signature, "Signature mismatch")
|
||||
assert.Equal(t, expectedSlot, attestation.Data.Slot, "Slot mismatch in attestation data")
|
||||
assert.Equal(t, "0", attestation.Data.CommitteeIndex, "Committee index mismatch")
|
||||
assert.Equal(t, hexutil.Encode(expectedRoot), attestation.Data.BeaconBlockRoot, "Beacon block root mismatch")
|
||||
|
||||
// Source checkpoint checks
|
||||
require.NotNil(t, attestation.Data.Source, "Source checkpoint should not be nil")
|
||||
assert.Equal(t, "1", attestation.Data.Source.Epoch, "Source epoch mismatch")
|
||||
assert.Equal(t, hexutil.Encode(expectedRoot), attestation.Data.Source.Root, "Source root mismatch")
|
||||
|
||||
// Target checkpoint checks
|
||||
require.NotNil(t, attestation.Data.Target, "Target checkpoint should not be nil")
|
||||
assert.Equal(t, "1", attestation.Data.Target.Epoch, "Target epoch mismatch")
|
||||
assert.Equal(t, hexutil.Encode(expectedRoot), attestation.Data.Target.Root, "Target root mismatch")
|
||||
}
|
||||
|
||||
pool := attestations.NewPool()
|
||||
require.NoError(t, pool.SaveUnaggregatedAttestations([]ethpbalpha.Att{unaggSlot3_Root1_1, unaggSlot3_Root1_2, unaggSlot3_Root2, unaggSlot4}), "Failed to save unaggregated attestations")
|
||||
unagg, err := pool.UnaggregatedAttestations()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 4, len(unagg), "Expected 4 unaggregated attestations")
|
||||
require.NoError(t, pool.SaveAggregatedAttestations([]ethpbalpha.Att{aggSlot1_Root1_1, aggSlot1_Root1_2, aggSlot1_Root2, aggSlot2}), "Failed to save aggregated attestations")
|
||||
agg := pool.AggregatedAttestations()
|
||||
require.Equal(t, 4, len(agg), "Expected 4 aggregated attestations")
|
||||
bs, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
s := &Server{
|
||||
ChainInfoFetcher: &mockChain.ChainService{State: bs},
|
||||
AttestationsPool: pool,
|
||||
}
|
||||
t.Run("non-matching attestation request", func(t *testing.T) {
|
||||
reqRoot, err := aggSlot2.Data.HashTreeRoot()
|
||||
require.NoError(t, err, "Failed to generate attestation data hash tree root")
|
||||
attDataRoot := hexutil.Encode(reqRoot[:])
|
||||
url := "http://example.com?attestation_data_root=" + attDataRoot + "&slot=1" + "&committee_index=1"
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
|
||||
s.GetAggregateAttestationV2(writer, request)
|
||||
assert.Equal(t, http.StatusNotFound, writer.Code, "Expected HTTP status NotFound for non-matching request")
|
||||
})
|
||||
t.Run("1 matching aggregated attestation", func(t *testing.T) {
|
||||
reqRoot, err := aggSlot2.Data.HashTreeRoot()
|
||||
require.NoError(t, err, "Failed to generate attestation data hash tree root")
|
||||
attDataRoot := hexutil.Encode(reqRoot[:])
|
||||
url := "http://example.com?attestation_data_root=" + attDataRoot + "&slot=2" + "&committee_index=1"
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
|
||||
s.GetAggregateAttestationV2(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code, "Expected HTTP status OK")
|
||||
|
||||
var resp structs.AggregateAttestationResponse
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp), "Failed to unmarshal response")
|
||||
require.NotNil(t, resp.Data, "Response data should not be nil")
|
||||
|
||||
var attestation structs.AttestationElectra
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &attestation), "Failed to unmarshal attestation data")
|
||||
|
||||
compareResult(t, attestation, "2", hexutil.Encode(aggSlot2.AggregationBits), root1, sig.Marshal(), hexutil.Encode(aggSlot2.CommitteeBits))
|
||||
})
|
||||
t.Run("multiple matching aggregated attestations - return the one with most bits", func(t *testing.T) {
|
||||
reqRoot, err := aggSlot1_Root1_1.Data.HashTreeRoot()
|
||||
require.NoError(t, err, "Failed to generate attestation data hash tree root")
|
||||
attDataRoot := hexutil.Encode(reqRoot[:])
|
||||
url := "http://example.com?attestation_data_root=" + attDataRoot + "&slot=1" + "&committee_index=1"
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
|
||||
s.GetAggregateAttestationV2(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code, "Expected HTTP status OK")
|
||||
|
||||
var resp structs.AggregateAttestationResponse
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp), "Failed to unmarshal response")
|
||||
require.NotNil(t, resp.Data, "Response data should not be nil")
|
||||
|
||||
var attestation structs.AttestationElectra
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &attestation), "Failed to unmarshal attestation data")
|
||||
|
||||
compareResult(t, attestation, "1", hexutil.Encode(aggSlot1_Root1_2.AggregationBits), root1, sig.Marshal(), hexutil.Encode(aggSlot1_Root1_1.CommitteeBits))
|
||||
})
|
||||
t.Run("1 matching unaggregated attestation", func(t *testing.T) {
|
||||
reqRoot, err := unaggSlot4.Data.HashTreeRoot()
|
||||
require.NoError(t, err, "Failed to generate attestation data hash tree root")
|
||||
attDataRoot := hexutil.Encode(reqRoot[:])
|
||||
url := "http://example.com?attestation_data_root=" + attDataRoot + "&slot=4" + "&committee_index=1"
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
|
||||
s.GetAggregateAttestationV2(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code, "Expected HTTP status OK")
|
||||
|
||||
var resp structs.AggregateAttestationResponse
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp), "Failed to unmarshal response")
|
||||
require.NotNil(t, resp.Data, "Response data should not be nil")
|
||||
|
||||
var attestation structs.AttestationElectra
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &attestation), "Failed to unmarshal attestation data")
|
||||
compareResult(t, attestation, "4", hexutil.Encode(unaggSlot4.AggregationBits), root1, sig.Marshal(), hexutil.Encode(unaggSlot4.CommitteeBits))
|
||||
})
|
||||
t.Run("multiple matching unaggregated attestations - their aggregate is returned", func(t *testing.T) {
|
||||
reqRoot, err := unaggSlot3_Root1_1.Data.HashTreeRoot()
|
||||
require.NoError(t, err, "Failed to generate attestation data hash tree root")
|
||||
attDataRoot := hexutil.Encode(reqRoot[:])
|
||||
url := "http://example.com?attestation_data_root=" + attDataRoot + "&slot=3" + "&committee_index=1"
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
|
||||
s.GetAggregateAttestationV2(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code, "Expected HTTP status OK")
|
||||
|
||||
var resp structs.AggregateAttestationResponse
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp), "Failed to unmarshal response")
|
||||
require.NotNil(t, resp.Data, "Response data should not be nil")
|
||||
|
||||
var attestation structs.AttestationElectra
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &attestation), "Failed to unmarshal attestation data")
|
||||
sig1, err := bls.SignatureFromBytes(unaggSlot3_Root1_1.Signature)
|
||||
pool := attestations.NewPool()
|
||||
require.NoError(t, pool.SaveUnaggregatedAttestations([]ethpbalpha.Att{unaggSlot3_Root1_1, unaggSlot3_Root1_2, unaggSlot3_Root2, unaggSlot4}), "Failed to save unaggregated attestations")
|
||||
unagg, err := pool.UnaggregatedAttestations()
|
||||
require.NoError(t, err)
|
||||
sig2, err := bls.SignatureFromBytes(unaggSlot3_Root1_2.Signature)
|
||||
require.Equal(t, 4, len(unagg), "Expected 4 unaggregated attestations")
|
||||
require.NoError(t, pool.SaveAggregatedAttestations([]ethpbalpha.Att{aggSlot1_Root1_1, aggSlot1_Root1_2, aggSlot1_Root2, aggSlot2, postElectraAtt}), "Failed to save aggregated attestations")
|
||||
agg := pool.AggregatedAttestations()
|
||||
require.Equal(t, 5, len(agg), "Expected 5 aggregated attestations, 4 pre electra and 1 post electra")
|
||||
s := &Server{
|
||||
AttestationsPool: pool,
|
||||
}
|
||||
|
||||
t.Run("non-matching attestation request", func(t *testing.T) {
|
||||
reqRoot, err := aggSlot2.Data.HashTreeRoot()
|
||||
require.NoError(t, err, "Failed to generate attestation data hash tree root")
|
||||
attDataRoot := hexutil.Encode(reqRoot[:])
|
||||
url := "http://example.com?attestation_data_root=" + attDataRoot + "&slot=1" + "&committee_index=0"
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
|
||||
s.GetAggregateAttestationV2(writer, request)
|
||||
assert.Equal(t, http.StatusNotFound, writer.Code, "Expected HTTP status NotFound for non-matching request")
|
||||
})
|
||||
t.Run("1 matching aggregated attestation", func(t *testing.T) {
|
||||
reqRoot, err := aggSlot2.Data.HashTreeRoot()
|
||||
require.NoError(t, err, "Failed to generate attestation data hash tree root")
|
||||
attDataRoot := hexutil.Encode(reqRoot[:])
|
||||
url := "http://example.com?attestation_data_root=" + attDataRoot + "&slot=2" + "&committee_index=0"
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
|
||||
s.GetAggregateAttestationV2(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code, "Expected HTTP status OK")
|
||||
|
||||
var resp structs.AggregateAttestationResponse
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp), "Failed to unmarshal response")
|
||||
require.NotNil(t, resp.Data, "Response data should not be nil")
|
||||
|
||||
var attestation structs.Attestation
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &attestation), "Failed to unmarshal attestation data")
|
||||
|
||||
compareResult(t, attestation, "2", hexutil.Encode(aggSlot2.AggregationBits), root1, sig.Marshal())
|
||||
})
|
||||
t.Run("multiple matching aggregated attestations - return the one with most bits", func(t *testing.T) {
|
||||
reqRoot, err := aggSlot1_Root1_1.Data.HashTreeRoot()
|
||||
require.NoError(t, err, "Failed to generate attestation data hash tree root")
|
||||
attDataRoot := hexutil.Encode(reqRoot[:])
|
||||
url := "http://example.com?attestation_data_root=" + attDataRoot + "&slot=1" + "&committee_index=0"
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
|
||||
s.GetAggregateAttestationV2(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code, "Expected HTTP status OK")
|
||||
|
||||
var resp structs.AggregateAttestationResponse
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp), "Failed to unmarshal response")
|
||||
require.NotNil(t, resp.Data, "Response data should not be nil")
|
||||
|
||||
var attestation structs.Attestation
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &attestation), "Failed to unmarshal attestation data")
|
||||
|
||||
compareResult(t, attestation, "1", hexutil.Encode(aggSlot1_Root1_2.AggregationBits), root1, sig.Marshal())
|
||||
})
|
||||
})
|
||||
t.Run("post-electra", func(t *testing.T) {
|
||||
aggSlot1_Root1_1 := createAttestationElectra(1, bitfield.Bitlist{0b11100}, root1)
|
||||
aggSlot1_Root1_2 := createAttestationElectra(1, bitfield.Bitlist{0b10111}, root1)
|
||||
aggSlot1_Root2 := createAttestationElectra(1, bitfield.Bitlist{0b11100}, root2)
|
||||
aggSlot2 := createAttestationElectra(2, bitfield.Bitlist{0b11100}, root1)
|
||||
unaggSlot3_Root1_1 := createAttestationElectra(3, bitfield.Bitlist{0b11000}, root1)
|
||||
unaggSlot3_Root1_2 := createAttestationElectra(3, bitfield.Bitlist{0b10100}, root1)
|
||||
unaggSlot3_Root2 := createAttestationElectra(3, bitfield.Bitlist{0b11000}, root2)
|
||||
unaggSlot4 := createAttestationElectra(4, bitfield.Bitlist{0b11000}, root1)
|
||||
|
||||
// Add one pre-electra attestation to ensure that it is being ignored.
|
||||
// We choose slot 2 where we have one post-electra attestation with less attestation bits.
|
||||
preElectraAtt := createAttestation(2, bitfield.Bitlist{0b11111}, root1)
|
||||
|
||||
compareResult := func(
|
||||
t *testing.T,
|
||||
attestation structs.AttestationElectra,
|
||||
expectedSlot string,
|
||||
expectedAggregationBits string,
|
||||
expectedRoot []byte,
|
||||
expectedSig []byte,
|
||||
expectedCommitteeBits string,
|
||||
) {
|
||||
assert.Equal(t, expectedAggregationBits, attestation.AggregationBits, "Unexpected aggregation bits in attestation")
|
||||
assert.Equal(t, expectedCommitteeBits, attestation.CommitteeBits)
|
||||
assert.Equal(t, hexutil.Encode(expectedSig), attestation.Signature, "Signature mismatch")
|
||||
assert.Equal(t, expectedSlot, attestation.Data.Slot, "Slot mismatch in attestation data")
|
||||
assert.Equal(t, "0", attestation.Data.CommitteeIndex, "Committee index mismatch")
|
||||
assert.Equal(t, hexutil.Encode(expectedRoot), attestation.Data.BeaconBlockRoot, "Beacon block root mismatch")
|
||||
|
||||
// Source checkpoint checks
|
||||
require.NotNil(t, attestation.Data.Source, "Source checkpoint should not be nil")
|
||||
assert.Equal(t, "1", attestation.Data.Source.Epoch, "Source epoch mismatch")
|
||||
assert.Equal(t, hexutil.Encode(expectedRoot), attestation.Data.Source.Root, "Source root mismatch")
|
||||
|
||||
// Target checkpoint checks
|
||||
require.NotNil(t, attestation.Data.Target, "Target checkpoint should not be nil")
|
||||
assert.Equal(t, "1", attestation.Data.Target.Epoch, "Target epoch mismatch")
|
||||
assert.Equal(t, hexutil.Encode(expectedRoot), attestation.Data.Target.Root, "Target root mismatch")
|
||||
}
|
||||
|
||||
pool := attestations.NewPool()
|
||||
require.NoError(t, pool.SaveUnaggregatedAttestations([]ethpbalpha.Att{unaggSlot3_Root1_1, unaggSlot3_Root1_2, unaggSlot3_Root2, unaggSlot4}), "Failed to save unaggregated attestations")
|
||||
unagg, err := pool.UnaggregatedAttestations()
|
||||
require.NoError(t, err)
|
||||
expectedSig := bls.AggregateSignatures([]common.Signature{sig1, sig2})
|
||||
compareResult(t, attestation, "3", hexutil.Encode(bitfield.Bitlist{0b11100}), root1, expectedSig.Marshal(), hexutil.Encode(unaggSlot3_Root1_1.CommitteeBits))
|
||||
require.Equal(t, 4, len(unagg), "Expected 4 unaggregated attestations")
|
||||
require.NoError(t, pool.SaveAggregatedAttestations([]ethpbalpha.Att{aggSlot1_Root1_1, aggSlot1_Root1_2, aggSlot1_Root2, aggSlot2, preElectraAtt}), "Failed to save aggregated attestations")
|
||||
agg := pool.AggregatedAttestations()
|
||||
require.Equal(t, 5, len(agg), "Expected 5 aggregated attestations, 4 electra and 1 pre electra")
|
||||
bs, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.ElectraForkEpoch = 0
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
chainService := &mockChain.ChainService{State: bs}
|
||||
s := &Server{
|
||||
ChainInfoFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
AttestationsPool: pool,
|
||||
}
|
||||
t.Run("non-matching attestation request", func(t *testing.T) {
|
||||
reqRoot, err := aggSlot2.Data.HashTreeRoot()
|
||||
require.NoError(t, err, "Failed to generate attestation data hash tree root")
|
||||
attDataRoot := hexutil.Encode(reqRoot[:])
|
||||
url := "http://example.com?attestation_data_root=" + attDataRoot + "&slot=1" + "&committee_index=0"
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
|
||||
s.GetAggregateAttestationV2(writer, request)
|
||||
assert.Equal(t, http.StatusNotFound, writer.Code, "Expected HTTP status NotFound for non-matching request")
|
||||
})
|
||||
t.Run("1 matching aggregated attestation", func(t *testing.T) {
|
||||
reqRoot, err := aggSlot2.Data.HashTreeRoot()
|
||||
require.NoError(t, err, "Failed to generate attestation data hash tree root")
|
||||
attDataRoot := hexutil.Encode(reqRoot[:])
|
||||
url := "http://example.com?attestation_data_root=" + attDataRoot + "&slot=2" + "&committee_index=0"
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
|
||||
s.GetAggregateAttestationV2(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code, "Expected HTTP status OK")
|
||||
|
||||
var resp structs.AggregateAttestationResponse
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp), "Failed to unmarshal response")
|
||||
require.NotNil(t, resp.Data, "Response data should not be nil")
|
||||
|
||||
var attestation structs.AttestationElectra
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &attestation), "Failed to unmarshal attestation data")
|
||||
|
||||
compareResult(t, attestation, "2", hexutil.Encode(aggSlot2.AggregationBits), root1, sig.Marshal(), hexutil.Encode(aggSlot2.CommitteeBits))
|
||||
})
|
||||
t.Run("multiple matching aggregated attestations - return the one with most bits", func(t *testing.T) {
|
||||
reqRoot, err := aggSlot1_Root1_1.Data.HashTreeRoot()
|
||||
require.NoError(t, err, "Failed to generate attestation data hash tree root")
|
||||
attDataRoot := hexutil.Encode(reqRoot[:])
|
||||
url := "http://example.com?attestation_data_root=" + attDataRoot + "&slot=1" + "&committee_index=0"
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
|
||||
s.GetAggregateAttestationV2(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code, "Expected HTTP status OK")
|
||||
|
||||
var resp structs.AggregateAttestationResponse
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp), "Failed to unmarshal response")
|
||||
require.NotNil(t, resp.Data, "Response data should not be nil")
|
||||
|
||||
var attestation structs.AttestationElectra
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &attestation), "Failed to unmarshal attestation data")
|
||||
|
||||
compareResult(t, attestation, "1", hexutil.Encode(aggSlot1_Root1_2.AggregationBits), root1, sig.Marshal(), hexutil.Encode(aggSlot1_Root1_1.CommitteeBits))
|
||||
})
|
||||
t.Run("1 matching unaggregated attestation", func(t *testing.T) {
|
||||
reqRoot, err := unaggSlot4.Data.HashTreeRoot()
|
||||
require.NoError(t, err, "Failed to generate attestation data hash tree root")
|
||||
attDataRoot := hexutil.Encode(reqRoot[:])
|
||||
url := "http://example.com?attestation_data_root=" + attDataRoot + "&slot=4" + "&committee_index=0"
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
|
||||
s.GetAggregateAttestationV2(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code, "Expected HTTP status OK")
|
||||
|
||||
var resp structs.AggregateAttestationResponse
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp), "Failed to unmarshal response")
|
||||
require.NotNil(t, resp.Data, "Response data should not be nil")
|
||||
|
||||
var attestation structs.AttestationElectra
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &attestation), "Failed to unmarshal attestation data")
|
||||
compareResult(t, attestation, "4", hexutil.Encode(unaggSlot4.AggregationBits), root1, sig.Marshal(), hexutil.Encode(unaggSlot4.CommitteeBits))
|
||||
})
|
||||
t.Run("multiple matching unaggregated attestations - their aggregate is returned", func(t *testing.T) {
|
||||
reqRoot, err := unaggSlot3_Root1_1.Data.HashTreeRoot()
|
||||
require.NoError(t, err, "Failed to generate attestation data hash tree root")
|
||||
attDataRoot := hexutil.Encode(reqRoot[:])
|
||||
url := "http://example.com?attestation_data_root=" + attDataRoot + "&slot=3" + "&committee_index=0"
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
|
||||
s.GetAggregateAttestationV2(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code, "Expected HTTP status OK")
|
||||
|
||||
var resp structs.AggregateAttestationResponse
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp), "Failed to unmarshal response")
|
||||
require.NotNil(t, resp.Data, "Response data should not be nil")
|
||||
|
||||
var attestation structs.AttestationElectra
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &attestation), "Failed to unmarshal attestation data")
|
||||
sig1, err := bls.SignatureFromBytes(unaggSlot3_Root1_1.Signature)
|
||||
require.NoError(t, err)
|
||||
sig2, err := bls.SignatureFromBytes(unaggSlot3_Root1_2.Signature)
|
||||
require.NoError(t, err)
|
||||
expectedSig := bls.AggregateSignatures([]common.Signature{sig1, sig2})
|
||||
compareResult(t, attestation, "3", hexutil.Encode(bitfield.Bitlist{0b11100}), root1, expectedSig.Marshal(), hexutil.Encode(unaggSlot3_Root1_1.CommitteeBits))
|
||||
})
|
||||
t.Run("pre-electra attestation is ignored", func(t *testing.T) {
|
||||
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func createAttestationData(
|
||||
slot primitives.Slot,
|
||||
committeeIndex primitives.CommitteeIndex,
|
||||
epoch primitives.Epoch,
|
||||
root []byte,
|
||||
) *ethpbalpha.AttestationData {
|
||||
func createAttestationData(slot primitives.Slot, committeeIndex primitives.CommitteeIndex, root []byte) *ethpbalpha.AttestationData {
|
||||
return ðpbalpha.AttestationData{
|
||||
Slot: slot,
|
||||
CommitteeIndex: committeeIndex,
|
||||
BeaconBlockRoot: root,
|
||||
Source: ðpbalpha.Checkpoint{
|
||||
Epoch: epoch,
|
||||
Epoch: 1,
|
||||
Root: root,
|
||||
},
|
||||
Target: ðpbalpha.Checkpoint{
|
||||
Epoch: epoch,
|
||||
Epoch: 1,
|
||||
Root: root,
|
||||
},
|
||||
}
|
||||
|
||||
31
beacon-chain/rpc/metrics.go
Normal file
31
beacon-chain/rpc/metrics.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package rpc
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
var (
|
||||
httpRequestLatency = promauto.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "http_request_latency_seconds",
|
||||
Help: "Latency of HTTP requests in seconds",
|
||||
Buckets: []float64{0.001, 0.01, 0.025, 0.1, 0.25, 1, 2.5, 10},
|
||||
},
|
||||
[]string{"endpoint", "code", "method"},
|
||||
)
|
||||
httpRequestCount = promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "http_request_count",
|
||||
Help: "Number of HTTP requests",
|
||||
},
|
||||
[]string{"endpoint", "code", "method"},
|
||||
)
|
||||
httpErrorCount = promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "http_error_count",
|
||||
Help: "Total HTTP errors for beacon node requests",
|
||||
},
|
||||
[]string{"endpoint", "code", "method"},
|
||||
)
|
||||
)
|
||||
@@ -25,8 +25,8 @@ import (
|
||||
|
||||
type testIdentity enode.ID
|
||||
|
||||
func (_ testIdentity) Verify(_ *enr.Record, _ []byte) error { return nil }
|
||||
func (id testIdentity) NodeAddr(_ *enr.Record) []byte { return id[:] }
|
||||
func (testIdentity) Verify(*enr.Record, []byte) error { return nil }
|
||||
func (id testIdentity) NodeAddr(*enr.Record) []byte { return id[:] }
|
||||
|
||||
func TestListTrustedPeer(t *testing.T) {
|
||||
ids := libp2ptest.GeneratePeerIDs(9)
|
||||
@@ -62,13 +62,13 @@ func TestListTrustedPeer(t *testing.T) {
|
||||
|
||||
switch i {
|
||||
case 0, 1:
|
||||
peerStatus.SetConnectionState(id, peers.PeerConnecting)
|
||||
peerStatus.SetConnectionState(id, peers.Connecting)
|
||||
case 2, 3:
|
||||
peerStatus.SetConnectionState(id, peers.PeerConnected)
|
||||
peerStatus.SetConnectionState(id, peers.Connected)
|
||||
case 4, 5:
|
||||
peerStatus.SetConnectionState(id, peers.PeerDisconnecting)
|
||||
peerStatus.SetConnectionState(id, peers.Disconnecting)
|
||||
case 6, 7:
|
||||
peerStatus.SetConnectionState(id, peers.PeerDisconnected)
|
||||
peerStatus.SetConnectionState(id, peers.Disconnected)
|
||||
default:
|
||||
t.Fatalf("Failed to set connection state for peer")
|
||||
}
|
||||
|
||||
@@ -303,7 +303,7 @@ func (bs *Server) ListIndexedAttestationsElectra(
|
||||
// that it was included in a block. The attestation may have expired.
|
||||
// Refer to the ethereum consensus specification for more details on how
|
||||
// attestations are processed and when they are no longer valid.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/core/0_beacon-chain.md#attestations
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/beacon-chain.md#attestations
|
||||
func (bs *Server) AttestationPool(_ context.Context, req *ethpb.AttestationPoolRequest) (*ethpb.AttestationPoolResponse, error) {
|
||||
atts, err := attestationsFromPool[*ethpb.Attestation](req.PageSize, bs.AttestationsPool)
|
||||
if err != nil {
|
||||
|
||||
@@ -212,7 +212,9 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
eth_network = "minimal",
|
||||
tags = ["minimal"],
|
||||
deps = common_deps,
|
||||
deps = common_deps + [
|
||||
"//beacon-chain/operations/attestations/mock:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
|
||||
@@ -339,7 +339,7 @@ func (vs *Server) handleBlindedBlock(ctx context.Context, block interfaces.Signe
|
||||
|
||||
sidecars, err := unblindBlobsSidecars(copiedBlock, bundle)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "unblind sidecars failed")
|
||||
return nil, nil, errors.Wrap(err, "unblind blobs sidecars: commitment value doesn't match block")
|
||||
}
|
||||
|
||||
return copiedBlock, sidecars, nil
|
||||
|
||||
@@ -91,14 +91,7 @@ func (vs *Server) packAttestations(ctx context.Context, latestState state.Beacon
|
||||
|
||||
var attsForInclusion proposerAtts
|
||||
if postElectra {
|
||||
// TODO: hack for Electra devnet-1, take only one aggregate per ID
|
||||
// (which essentially means one aggregate for an attestation_data+committee combination
|
||||
topAggregates := make([]ethpb.Att, 0)
|
||||
for _, v := range attsById {
|
||||
topAggregates = append(topAggregates, v[0])
|
||||
}
|
||||
|
||||
attsForInclusion, err = computeOnChainAggregate(topAggregates)
|
||||
attsForInclusion, err = onChainAggregates(attsById)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -113,14 +106,68 @@ func (vs *Server) packAttestations(ctx context.Context, latestState state.Beacon
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sorted, err := deduped.sort()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
var sorted proposerAtts
|
||||
if postElectra {
|
||||
sorted, err = deduped.sortOnChainAggregates()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
sorted, err = deduped.sort()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
atts = sorted.limitToMaxAttestations()
|
||||
return vs.filterAttestationBySignature(ctx, atts, latestState)
|
||||
}
|
||||
|
||||
func onChainAggregates(attsById map[attestation.Id][]ethpb.Att) (proposerAtts, error) {
|
||||
var result proposerAtts
|
||||
var err error
|
||||
|
||||
// When constructing on-chain aggregates, we want to combine the most profitable
|
||||
// aggregate for each ID, then the second most profitable, and so on and so forth.
|
||||
// Because of this we sort attestations at the beginning.
|
||||
for id, as := range attsById {
|
||||
attsById[id], err = proposerAtts(as).sort()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// We construct the first on-chain aggregate by taking the first aggregate for each ID.
|
||||
// We construct the second on-chain aggregate by taking the second aggregate for each ID.
|
||||
// We continue doing this until we run out of aggregates.
|
||||
idx := 0
|
||||
for {
|
||||
topAggregates := make([]ethpb.Att, 0, len(attsById))
|
||||
for _, as := range attsById {
|
||||
// In case there are no more aggregates for an ID, we skip that ID.
|
||||
if len(as) > idx {
|
||||
topAggregates = append(topAggregates, as[idx])
|
||||
}
|
||||
}
|
||||
|
||||
// Once there are no more aggregates for any ID, we are done.
|
||||
if len(topAggregates) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
onChainAggs, err := computeOnChainAggregate(topAggregates)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result = append(result, onChainAggs...)
|
||||
|
||||
idx++
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// filter separates attestation list into two groups: valid and invalid attestations.
|
||||
// The first group passes the all the required checks for attestation to be considered for proposing.
|
||||
// And attestations from the second group should be deleted.
|
||||
@@ -223,6 +270,14 @@ func (a proposerAtts) sort() (proposerAtts, error) {
|
||||
return a.sortBySlotAndCommittee()
|
||||
}
|
||||
|
||||
func (a proposerAtts) sortOnChainAggregates() (proposerAtts, error) {
|
||||
if len(a) < 2 {
|
||||
return a, nil
|
||||
}
|
||||
|
||||
return a.sortByProfitabilityUsingMaxCover()
|
||||
}
|
||||
|
||||
// Separate attestations by slot, as slot number takes higher precedence when sorting.
|
||||
// Also separate by committee index because maxcover will prefer attestations for the same
|
||||
// committee with disjoint bits over attestations for different committees with overlapping
|
||||
@@ -231,7 +286,6 @@ func (a proposerAtts) sortBySlotAndCommittee() (proposerAtts, error) {
|
||||
type slotAtts struct {
|
||||
candidates map[primitives.CommitteeIndex]proposerAtts
|
||||
selected map[primitives.CommitteeIndex]proposerAtts
|
||||
leftover map[primitives.CommitteeIndex]proposerAtts
|
||||
}
|
||||
|
||||
var slots []primitives.Slot
|
||||
@@ -250,7 +304,6 @@ func (a proposerAtts) sortBySlotAndCommittee() (proposerAtts, error) {
|
||||
var err error
|
||||
for _, sa := range attsBySlot {
|
||||
sa.selected = make(map[primitives.CommitteeIndex]proposerAtts)
|
||||
sa.leftover = make(map[primitives.CommitteeIndex]proposerAtts)
|
||||
for ci, committeeAtts := range sa.candidates {
|
||||
sa.selected[ci], err = committeeAtts.sortByProfitabilityUsingMaxCover_committeeAwarePacking()
|
||||
if err != nil {
|
||||
@@ -266,9 +319,6 @@ func (a proposerAtts) sortBySlotAndCommittee() (proposerAtts, error) {
|
||||
for _, slot := range slots {
|
||||
sortedAtts = append(sortedAtts, sortSlotAttestations(attsBySlot[slot].selected)...)
|
||||
}
|
||||
for _, slot := range slots {
|
||||
sortedAtts = append(sortedAtts, sortSlotAttestations(attsBySlot[slot].leftover)...)
|
||||
}
|
||||
|
||||
return sortedAtts, nil
|
||||
}
|
||||
@@ -287,15 +337,11 @@ func (a proposerAtts) sortByProfitabilityUsingMaxCover_committeeAwarePacking() (
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// Add selected candidates on top, those that are not selected - append at bottom.
|
||||
selectedKeys, _, err := aggregation.MaxCover(candidates, len(candidates), true /* allowOverlaps */)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("MaxCover aggregation failed")
|
||||
return a, nil
|
||||
}
|
||||
|
||||
// Pick selected attestations first, leftover attestations will be appended at the end.
|
||||
// Both lists will be sorted by number of bits set.
|
||||
selected := make(proposerAtts, selectedKeys.Count())
|
||||
for i, key := range selectedKeys.BitIndices() {
|
||||
selected[i] = a[key]
|
||||
|
||||
@@ -13,6 +13,9 @@ import (
|
||||
// computeOnChainAggregate constructs a final aggregate form a list of network aggregates with equal attestation data.
|
||||
// It assumes that each network aggregate has exactly one committee bit set.
|
||||
//
|
||||
// Our implementation allows to pass aggregates for different attestation data, in which case the function will return
|
||||
// one final aggregate per attestation data.
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
// def compute_on_chain_aggregate(network_aggregates: Sequence[Attestation]) -> Attestation:
|
||||
|
||||
@@ -3,16 +3,21 @@ package validator
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"math/rand"
|
||||
"sort"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
chainMock "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/attestations/mock"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/bls/blst"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
@@ -680,6 +685,212 @@ func Test_packAttestations(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func Test_packAttestations_ElectraOnChainAggregates(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.ElectraForkEpoch = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
key, err := blst.RandKey()
|
||||
require.NoError(t, err)
|
||||
sig := key.Sign([]byte{'X'})
|
||||
|
||||
cb0 := primitives.NewAttestationCommitteeBits()
|
||||
cb0.SetBitAt(0, true)
|
||||
cb1 := primitives.NewAttestationCommitteeBits()
|
||||
cb1.SetBitAt(1, true)
|
||||
|
||||
data0 := util.HydrateAttestationData(ðpb.AttestationData{BeaconBlockRoot: bytesutil.PadTo([]byte{'0'}, 32)})
|
||||
data1 := util.HydrateAttestationData(ðpb.AttestationData{BeaconBlockRoot: bytesutil.PadTo([]byte{'1'}, 32)})
|
||||
|
||||
// Glossary:
|
||||
// - Single Aggregate: aggregate with exactly one committee bit set, from which an On-Chain Aggregate is constructed
|
||||
// - On-Chain Aggregate: final aggregate packed into a block
|
||||
//
|
||||
// We construct the following number of single aggregates:
|
||||
// - data_root_0 and committee_index_0: 3 single aggregates
|
||||
// - data_root_0 and committee_index_1: 2 single aggregates
|
||||
// - data_root_1 and committee_index_0: 1 single aggregate
|
||||
// - data_root_1 and committee_index_1: 3 single aggregates
|
||||
//
|
||||
// Because the function tries to aggregate attestations, we have to create attestations which are not aggregatable
|
||||
// and are not redundant when using MaxCover.
|
||||
// The function should also sort attestation by ID before computing the On-Chain Aggregate, so we want unsorted aggregation bits
|
||||
// to test the sorting part.
|
||||
//
|
||||
// The result should be the following six on-chain aggregates:
|
||||
// - for data_root_0 combining the most profitable aggregate for each committee
|
||||
// - for data_root_0 combining the second most profitable aggregate for each committee
|
||||
// - for data_root_0 constructed from the single aggregate at index 2 for committee_index_0
|
||||
// - for data_root_1 combining the most profitable aggregate for each committee
|
||||
// - for data_root_1 constructed from the single aggregate at index 1 for committee_index_1
|
||||
// - for data_root_1 constructed from the single aggregate at index 2 for committee_index_1
|
||||
|
||||
d0_c0_a1 := ðpb.AttestationElectra{
|
||||
AggregationBits: bitfield.Bitlist{0b1000011},
|
||||
CommitteeBits: cb0,
|
||||
Data: data0,
|
||||
Signature: sig.Marshal(),
|
||||
}
|
||||
d0_c0_a2 := ðpb.AttestationElectra{
|
||||
AggregationBits: bitfield.Bitlist{0b1100101},
|
||||
CommitteeBits: cb0,
|
||||
Data: data0,
|
||||
Signature: sig.Marshal(),
|
||||
}
|
||||
d0_c0_a3 := ðpb.AttestationElectra{
|
||||
AggregationBits: bitfield.Bitlist{0b1111000},
|
||||
CommitteeBits: cb0,
|
||||
Data: data0,
|
||||
Signature: sig.Marshal(),
|
||||
}
|
||||
d0_c1_a1 := ðpb.AttestationElectra{
|
||||
AggregationBits: bitfield.Bitlist{0b1111100},
|
||||
CommitteeBits: cb1,
|
||||
Data: data0,
|
||||
Signature: sig.Marshal(),
|
||||
}
|
||||
d0_c1_a2 := ðpb.AttestationElectra{
|
||||
AggregationBits: bitfield.Bitlist{0b1001111},
|
||||
CommitteeBits: cb1,
|
||||
Data: data0,
|
||||
Signature: sig.Marshal(),
|
||||
}
|
||||
d1_c0_a1 := ðpb.AttestationElectra{
|
||||
AggregationBits: bitfield.Bitlist{0b1111111},
|
||||
CommitteeBits: cb0,
|
||||
Data: data1,
|
||||
Signature: sig.Marshal(),
|
||||
}
|
||||
d1_c1_a1 := ðpb.AttestationElectra{
|
||||
AggregationBits: bitfield.Bitlist{0b1000011},
|
||||
CommitteeBits: cb1,
|
||||
Data: data1,
|
||||
Signature: sig.Marshal(),
|
||||
}
|
||||
d1_c1_a2 := ðpb.AttestationElectra{
|
||||
AggregationBits: bitfield.Bitlist{0b1100101},
|
||||
CommitteeBits: cb1,
|
||||
Data: data1,
|
||||
Signature: sig.Marshal(),
|
||||
}
|
||||
d1_c1_a3 := ðpb.AttestationElectra{
|
||||
AggregationBits: bitfield.Bitlist{0b1111000},
|
||||
CommitteeBits: cb1,
|
||||
Data: data1,
|
||||
Signature: sig.Marshal(),
|
||||
}
|
||||
|
||||
pool := &mock.PoolMock{}
|
||||
require.NoError(t, pool.SaveAggregatedAttestations([]ethpb.Att{d0_c0_a1, d0_c0_a2, d0_c0_a3, d0_c1_a1, d0_c1_a2, d1_c0_a1, d1_c1_a1, d1_c1_a2, d1_c1_a3}))
|
||||
slot := primitives.Slot(1)
|
||||
s := &Server{AttPool: pool, HeadFetcher: &chainMock.ChainService{}, TimeFetcher: &chainMock.ChainService{Slot: &slot}}
|
||||
|
||||
// We need the correct number of validators so that there are at least 2 committees per slot
|
||||
// and each committee has exactly 6 validators (this is because we have 6 aggregation bits).
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, 192)
|
||||
|
||||
require.NoError(t, st.SetSlot(params.BeaconConfig().SlotsPerEpoch+1))
|
||||
|
||||
atts, err := s.packAttestations(ctx, st, params.BeaconConfig().SlotsPerEpoch)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 6, len(atts))
|
||||
assert.Equal(t, true,
|
||||
atts[0].GetAggregationBits().Count() >= atts[1].GetAggregationBits().Count() &&
|
||||
atts[1].GetAggregationBits().Count() >= atts[2].GetAggregationBits().Count() &&
|
||||
atts[2].GetAggregationBits().Count() >= atts[3].GetAggregationBits().Count() &&
|
||||
atts[3].GetAggregationBits().Count() >= atts[4].GetAggregationBits().Count() &&
|
||||
atts[4].GetAggregationBits().Count() >= atts[5].GetAggregationBits().Count(),
|
||||
"on-chain aggregates are not sorted by aggregation bit count",
|
||||
)
|
||||
|
||||
t.Run("slot takes precedence", func(t *testing.T) {
|
||||
moreRecentAtt := ðpb.AttestationElectra{
|
||||
AggregationBits: bitfield.Bitlist{0b1100000}, // we set only one bit for committee_index_0
|
||||
CommitteeBits: cb1,
|
||||
Data: util.HydrateAttestationData(ðpb.AttestationData{Slot: 1, BeaconBlockRoot: bytesutil.PadTo([]byte{'0'}, 32)}),
|
||||
Signature: sig.Marshal(),
|
||||
}
|
||||
require.NoError(t, pool.SaveUnaggregatedAttestations([]ethpb.Att{moreRecentAtt}))
|
||||
atts, err = s.packAttestations(ctx, st, params.BeaconConfig().SlotsPerEpoch)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 7, len(atts))
|
||||
assert.Equal(t, true, atts[0].GetData().Slot == 1)
|
||||
})
|
||||
}
|
||||
|
||||
func Benchmark_packAttestations_Electra(b *testing.B) {
|
||||
ctx := context.Background()
|
||||
|
||||
params.SetupTestConfigCleanup(b)
|
||||
cfg := params.MainnetConfig().Copy()
|
||||
cfg.ElectraForkEpoch = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
valCount := uint64(1048576)
|
||||
committeeCount := helpers.SlotCommitteeCount(valCount)
|
||||
valsPerCommittee := valCount / committeeCount / uint64(params.BeaconConfig().SlotsPerEpoch)
|
||||
|
||||
st, _ := util.DeterministicGenesisStateElectra(b, valCount)
|
||||
|
||||
key, err := blst.RandKey()
|
||||
require.NoError(b, err)
|
||||
sig := key.Sign([]byte{'X'})
|
||||
|
||||
r := rand.New(rand.NewSource(123))
|
||||
|
||||
var atts []ethpb.Att
|
||||
for c := uint64(0); c < committeeCount; c++ {
|
||||
for a := uint64(0); a < params.BeaconConfig().TargetAggregatorsPerCommittee; a++ {
|
||||
cb := primitives.NewAttestationCommitteeBits()
|
||||
cb.SetBitAt(c, true)
|
||||
|
||||
var att *ethpb.AttestationElectra
|
||||
// Last two aggregators send aggregates for some random block root with only a few bits set.
|
||||
if a >= params.BeaconConfig().TargetAggregatorsPerCommittee-2 {
|
||||
root := bytesutil.PadTo([]byte("root_"+strconv.Itoa(r.Intn(100))), 32)
|
||||
att = ðpb.AttestationElectra{
|
||||
Data: util.HydrateAttestationData(ðpb.AttestationData{Slot: params.BeaconConfig().SlotsPerEpoch - 1, BeaconBlockRoot: root}),
|
||||
AggregationBits: bitfield.NewBitlist(valsPerCommittee),
|
||||
CommitteeBits: cb,
|
||||
Signature: sig.Marshal(),
|
||||
}
|
||||
for bit := uint64(0); bit < valsPerCommittee; bit++ {
|
||||
att.AggregationBits.SetBitAt(bit, r.Intn(100) < 2) // 2% that the bit is set
|
||||
}
|
||||
} else {
|
||||
att = ðpb.AttestationElectra{
|
||||
Data: util.HydrateAttestationData(ðpb.AttestationData{Slot: params.BeaconConfig().SlotsPerEpoch - 1, BeaconBlockRoot: bytesutil.PadTo([]byte("root"), 32)}),
|
||||
AggregationBits: bitfield.NewBitlist(valsPerCommittee),
|
||||
CommitteeBits: cb,
|
||||
Signature: sig.Marshal(),
|
||||
}
|
||||
for bit := uint64(0); bit < valsPerCommittee; bit++ {
|
||||
att.AggregationBits.SetBitAt(bit, r.Intn(100) < 98) // 98% that the bit is set
|
||||
}
|
||||
}
|
||||
|
||||
atts = append(atts, att)
|
||||
}
|
||||
}
|
||||
|
||||
pool := &mock.PoolMock{}
|
||||
require.NoError(b, pool.SaveAggregatedAttestations(atts))
|
||||
|
||||
slot := primitives.Slot(1)
|
||||
s := &Server{AttPool: pool, HeadFetcher: &chainMock.ChainService{}, TimeFetcher: &chainMock.ChainService{Slot: &slot}}
|
||||
|
||||
require.NoError(b, st.SetSlot(params.BeaconConfig().SlotsPerEpoch))
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err = s.packAttestations(ctx, st, params.BeaconConfig().SlotsPerEpoch+1)
|
||||
require.NoError(b, err)
|
||||
}
|
||||
}
|
||||
|
||||
func Test_limitToMaxAttestations(t *testing.T) {
|
||||
t.Run("Phase 0", func(t *testing.T) {
|
||||
atts := make([]ethpb.Att, params.BeaconConfig().MaxAttestations+1)
|
||||
|
||||
@@ -54,7 +54,7 @@ func (vs *Server) eth1DataMajorityVote(ctx context.Context, beaconState state.Be
|
||||
// by ETH1_FOLLOW_DISTANCE. The head state should maintain the same ETH1Data until this condition has passed, so
|
||||
// trust the existing head for the right eth1 vote until we can get a meaningful value from the deposit contract.
|
||||
if latestValidTime < genesisTime+followDistanceSeconds {
|
||||
log.WithField("genesisTime", genesisTime).WithField("latestValidTime", latestValidTime).Warn("voting period before genesis + follow distance, using eth1data from head")
|
||||
log.WithField("genesisTime", genesisTime).WithField("latestValidTime", latestValidTime).Warn("Voting period before genesis + follow distance, using eth1data from head")
|
||||
return vs.HeadFetcher.HeadETH1Data(), nil
|
||||
}
|
||||
|
||||
|
||||
@@ -84,7 +84,6 @@ func (vs *Server) getLocalPayloadFromEngine(
|
||||
}
|
||||
setFeeRecipientIfBurnAddress(&val)
|
||||
|
||||
var err error
|
||||
if ok && payloadId != [8]byte{} {
|
||||
// Payload ID is cache hit. Return the cached payload ID.
|
||||
var pid primitives.PayloadID
|
||||
@@ -102,7 +101,7 @@ func (vs *Server) getLocalPayloadFromEngine(
|
||||
return nil, errors.Wrap(err, "could not get cached payload from execution client")
|
||||
}
|
||||
}
|
||||
log.WithFields(logFields).Debug("payload ID cache miss")
|
||||
log.WithFields(logFields).Debug("Payload ID cache miss")
|
||||
parentHash, err := vs.getParentBlockHash(ctx, st, slot)
|
||||
switch {
|
||||
case errors.Is(err, errActivationNotReached) || errors.Is(err, errNoTerminalBlockHash):
|
||||
@@ -191,7 +190,7 @@ func (vs *Server) getLocalPayloadFromEngine(
|
||||
}
|
||||
|
||||
warnIfFeeRecipientDiffers(val.FeeRecipient[:], res.ExecutionData.FeeRecipient())
|
||||
log.WithField("value", res.Bid).Debug("received execution payload from local engine")
|
||||
log.WithField("value", res.Bid).Debug("Received execution payload from local engine")
|
||||
return res, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -912,7 +912,7 @@ func TestProposer_ProposeBlock_OK(t *testing.T) {
|
||||
return ðpb.GenericSignedBeaconBlock{Block: blk}
|
||||
},
|
||||
useBuilder: true,
|
||||
err: "unblind sidecars failed: commitment value doesn't match block",
|
||||
err: "unblind blobs sidecars: commitment value doesn't match block",
|
||||
},
|
||||
{
|
||||
name: "electra block no blob",
|
||||
|
||||
@@ -14,8 +14,6 @@ import (
|
||||
grpcopentracing "github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing"
|
||||
grpcprometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/builder"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
@@ -57,24 +55,6 @@ import (
|
||||
|
||||
const attestationBufferSize = 100
|
||||
|
||||
var (
|
||||
httpRequestLatency = promauto.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "http_request_latency_seconds",
|
||||
Help: "Latency of HTTP requests in seconds",
|
||||
Buckets: []float64{0.001, 0.01, 0.025, 0.1, 0.25, 1, 2.5, 10},
|
||||
},
|
||||
[]string{"endpoint", "code", "method"},
|
||||
)
|
||||
httpRequestCount = promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "http_request_count",
|
||||
Help: "Number of HTTP requests",
|
||||
},
|
||||
[]string{"endpoint", "code", "method"},
|
||||
)
|
||||
)
|
||||
|
||||
// Service defining an RPC server for a beacon node.
|
||||
type Service struct {
|
||||
cfg *Config
|
||||
|
||||
@@ -107,7 +107,7 @@ type blobBatchVerifier struct {
|
||||
|
||||
func (bbv *blobBatchVerifier) newVerifier(rb blocks.ROBlob) verification.BlobVerifier {
|
||||
m := bbv.verifiers[rb.BlockRoot()]
|
||||
m[rb.Index] = bbv.newBlobVerifier(rb, verification.BackfillSidecarRequirements)
|
||||
m[rb.Index] = bbv.newBlobVerifier(rb, verification.BackfillBlobSidecarRequirements)
|
||||
bbv.verifiers[rb.BlockRoot()] = m
|
||||
return m[rb.Index]
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/sync/checkpoint",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api/client:go_default_library",
|
||||
"//api/client/beacon:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
|
||||
@@ -4,11 +4,14 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/client"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/client/beacon"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
)
|
||||
|
||||
const stateSizeLimit int64 = 1 << 29 // 512MB
|
||||
|
||||
// APIInitializer manages initializing the beacon node using checkpoint sync, retrieving the checkpoint state and root
|
||||
// from the remote beacon node api.
|
||||
type APIInitializer struct {
|
||||
@@ -18,7 +21,7 @@ type APIInitializer struct {
|
||||
// NewAPIInitializer creates an APIInitializer, handling the set up of a beacon node api client
|
||||
// using the provided host string.
|
||||
func NewAPIInitializer(beaconNodeHost string) (*APIInitializer, error) {
|
||||
c, err := beacon.NewClient(beaconNodeHost)
|
||||
c, err := beacon.NewClient(beaconNodeHost, client.WithMaxBodySize(stateSizeLimit))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unable to parse beacon node url or hostname - %s", beaconNodeHost)
|
||||
}
|
||||
@@ -32,10 +35,9 @@ func (dl *APIInitializer) Initialize(ctx context.Context, d db.Database) error {
|
||||
if err == nil && origin != params.BeaconConfig().ZeroHash {
|
||||
log.Warnf("Origin checkpoint root %#x found in db, ignoring checkpoint sync flags", origin)
|
||||
return nil
|
||||
} else {
|
||||
if !errors.Is(err, db.ErrNotFound) {
|
||||
return errors.Wrap(err, "error while checking database for origin root")
|
||||
}
|
||||
}
|
||||
if err != nil && !errors.Is(err, db.ErrNotFound) {
|
||||
return errors.Wrap(err, "error while checking database for origin root")
|
||||
}
|
||||
od, err := beacon.DownloadFinalizedData(ctx, dl.c)
|
||||
if err != nil {
|
||||
|
||||
@@ -388,6 +388,7 @@ func TestService_CheckForPreviousEpochFork(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// oneEpoch returns the duration of one epoch.
|
||||
func oneEpoch() time.Duration {
|
||||
return time.Duration(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot)) * time.Second
|
||||
}
|
||||
|
||||
@@ -1094,7 +1094,7 @@ func TestBlocksQueue_stuckInUnfavourableFork(t *testing.T) {
|
||||
// its claims with actual blocks.
|
||||
emptyPeer := connectPeerHavingBlocks(t, p2p, chain1, finalizedSlot, p2p.Peers())
|
||||
defer func() {
|
||||
p2p.Peers().SetConnectionState(emptyPeer, peers.PeerDisconnected)
|
||||
p2p.Peers().SetConnectionState(emptyPeer, peers.Disconnected)
|
||||
}()
|
||||
chainState, err := p2p.Peers().ChainState(emptyPeer)
|
||||
require.NoError(t, err)
|
||||
@@ -1291,7 +1291,7 @@ func TestBlocksQueue_stuckWhenHeadIsSetToOrphanedBlock(t *testing.T) {
|
||||
// Connect peer that has all the blocks available.
|
||||
allBlocksPeer := connectPeerHavingBlocks(t, p2p, chain, finalizedSlot, p2p.Peers())
|
||||
defer func() {
|
||||
p2p.Peers().SetConnectionState(allBlocksPeer, peers.PeerDisconnected)
|
||||
p2p.Peers().SetConnectionState(allBlocksPeer, peers.Disconnected)
|
||||
}()
|
||||
|
||||
// Queue should be able to fetch whole chain (including slot which comes before the currently set head).
|
||||
|
||||
@@ -227,7 +227,7 @@ func connectPeer(t *testing.T, host *p2pt.TestP2P, datum *peerData, peerStatus *
|
||||
p.Connect(host)
|
||||
|
||||
peerStatus.Add(new(enr.Record), p.PeerID(), nil, network.DirOutbound)
|
||||
peerStatus.SetConnectionState(p.PeerID(), peers.PeerConnected)
|
||||
peerStatus.SetConnectionState(p.PeerID(), peers.Connected)
|
||||
peerStatus.SetChainState(p.PeerID(), ðpb.Status{
|
||||
ForkDigest: params.BeaconConfig().GenesisForkVersion,
|
||||
FinalizedRoot: []byte(fmt.Sprintf("finalized_root %d", datum.finalizedEpoch)),
|
||||
@@ -326,7 +326,7 @@ func connectPeerHavingBlocks(
|
||||
require.NoError(t, err)
|
||||
|
||||
peerStatus.Add(new(enr.Record), p.PeerID(), nil, network.DirOutbound)
|
||||
peerStatus.SetConnectionState(p.PeerID(), peers.PeerConnected)
|
||||
peerStatus.SetConnectionState(p.PeerID(), peers.Connected)
|
||||
peerStatus.SetChainState(p.PeerID(), ðpb.Status{
|
||||
ForkDigest: params.BeaconConfig().GenesisForkVersion,
|
||||
FinalizedRoot: []byte(fmt.Sprintf("finalized_root %d", finalizedEpoch)),
|
||||
|
||||
@@ -172,7 +172,7 @@ func (s *Service) processFetchedDataRegSync(
|
||||
if len(bwb) == 0 {
|
||||
return
|
||||
}
|
||||
bv := verification.NewBlobBatchVerifier(s.newBlobVerifier, verification.InitsyncSidecarRequirements)
|
||||
bv := verification.NewBlobBatchVerifier(s.newBlobVerifier, verification.InitsyncBlobSidecarRequirements)
|
||||
avs := das.NewLazilyPersistentStore(s.cfg.BlobStorage, bv)
|
||||
batchFields := logrus.Fields{
|
||||
"firstSlot": data.bwb[0].Block.Block().Slot(),
|
||||
@@ -331,7 +331,7 @@ func (s *Service) processBatchedBlocks(ctx context.Context, genesis time.Time,
|
||||
errParentDoesNotExist, first.Block().ParentRoot(), first.Block().Slot())
|
||||
}
|
||||
|
||||
bv := verification.NewBlobBatchVerifier(s.newBlobVerifier, verification.InitsyncSidecarRequirements)
|
||||
bv := verification.NewBlobBatchVerifier(s.newBlobVerifier, verification.InitsyncBlobSidecarRequirements)
|
||||
avs := das.NewLazilyPersistentStore(s.cfg.BlobStorage, bv)
|
||||
s.logBatchSyncStatus(genesis, first, len(bwb))
|
||||
for _, bb := range bwb {
|
||||
|
||||
@@ -340,7 +340,7 @@ func (s *Service) fetchOriginBlobs(pids []peer.ID) error {
|
||||
if len(sidecars) != len(req) {
|
||||
continue
|
||||
}
|
||||
bv := verification.NewBlobBatchVerifier(s.newBlobVerifier, verification.InitsyncSidecarRequirements)
|
||||
bv := verification.NewBlobBatchVerifier(s.newBlobVerifier, verification.InitsyncBlobSidecarRequirements)
|
||||
avs := das.NewLazilyPersistentStore(s.cfg.BlobStorage, bv)
|
||||
current := s.clock.CurrentSlot()
|
||||
if err := avs.Persist(current, sidecars...); err != nil {
|
||||
|
||||
@@ -495,8 +495,8 @@ func TestOriginOutsideRetention(t *testing.T) {
|
||||
bdb := dbtest.SetupDB(t)
|
||||
genesis := time.Unix(0, 0)
|
||||
secsPerEpoch := params.BeaconConfig().SecondsPerSlot * uint64(params.BeaconConfig().SlotsPerEpoch)
|
||||
retentionSeconds := time.Second * time.Duration(uint64(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest+1)*secsPerEpoch)
|
||||
outsideRetention := genesis.Add(retentionSeconds)
|
||||
retentionPeriod := time.Second * time.Duration(uint64(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest+1)*secsPerEpoch)
|
||||
outsideRetention := genesis.Add(retentionPeriod)
|
||||
now := func() time.Time {
|
||||
return outsideRetention
|
||||
}
|
||||
|
||||
@@ -40,7 +40,7 @@ func TestProcessPendingAtts_NoBlockRequestBlock(t *testing.T) {
|
||||
p1.Connect(p2)
|
||||
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
||||
p1.Peers().Add(new(enr.Record), p2.PeerID(), nil, network.DirOutbound)
|
||||
p1.Peers().SetConnectionState(p2.PeerID(), peers.PeerConnected)
|
||||
p1.Peers().SetConnectionState(p2.PeerID(), peers.Connected)
|
||||
p1.Peers().SetChainState(p2.PeerID(), ðpb.Status{})
|
||||
|
||||
chain := &mock.ChainService{Genesis: prysmTime.Now(), FinalizedCheckPoint: ðpb.Checkpoint{}}
|
||||
|
||||
@@ -315,7 +315,7 @@ func (s *Service) sendBatchRootRequest(ctx context.Context, roots [][32]byte, ra
|
||||
if uint64(len(roots)) > maxReqBlock {
|
||||
req = roots[:maxReqBlock]
|
||||
}
|
||||
if err := s.sendRecentBeaconBlocksRequest(ctx, &req, pid); err != nil {
|
||||
if err := s.sendBeaconBlocksRequest(ctx, &req, pid); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
log.WithError(err).Debug("Could not send recent block request")
|
||||
}
|
||||
|
||||
@@ -406,7 +406,7 @@ func TestRegularSyncBeaconBlockSubscriber_ProcessPendingBlocks_2Chains(t *testin
|
||||
r.initCaches()
|
||||
|
||||
p1.Peers().Add(new(enr.Record), p2.PeerID(), nil, network.DirOutbound)
|
||||
p1.Peers().SetConnectionState(p2.PeerID(), peers.PeerConnected)
|
||||
p1.Peers().SetConnectionState(p2.PeerID(), peers.Connected)
|
||||
p1.Peers().SetChainState(p2.PeerID(), ðpb.Status{})
|
||||
|
||||
b0 := util.NewBeaconBlock()
|
||||
@@ -505,7 +505,7 @@ func TestRegularSyncBeaconBlockSubscriber_PruneOldPendingBlocks(t *testing.T) {
|
||||
r.initCaches()
|
||||
|
||||
p1.Peers().Add(new(enr.Record), p1.PeerID(), nil, network.DirOutbound)
|
||||
p1.Peers().SetConnectionState(p1.PeerID(), peers.PeerConnected)
|
||||
p1.Peers().SetConnectionState(p1.PeerID(), peers.Connected)
|
||||
p1.Peers().SetChainState(p1.PeerID(), ðpb.Status{})
|
||||
|
||||
b0 := util.NewBeaconBlock()
|
||||
@@ -611,7 +611,7 @@ func TestService_BatchRootRequest(t *testing.T) {
|
||||
r.initCaches()
|
||||
|
||||
p1.Peers().Add(new(enr.Record), p2.PeerID(), nil, network.DirOutbound)
|
||||
p1.Peers().SetConnectionState(p2.PeerID(), peers.PeerConnected)
|
||||
p1.Peers().SetConnectionState(p2.PeerID(), peers.Connected)
|
||||
p1.Peers().SetChainState(p2.PeerID(), ðpb.Status{FinalizedEpoch: 2})
|
||||
|
||||
b0 := util.NewBeaconBlock()
|
||||
|
||||
@@ -7,12 +7,13 @@ import (
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/trailofbits/go-mutexasserts"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
|
||||
p2ptypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
|
||||
leakybucket "github.com/prysmaticlabs/prysm/v5/container/leaky-bucket"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/trailofbits/go-mutexasserts"
|
||||
)
|
||||
|
||||
const defaultBurstLimit = 5
|
||||
@@ -98,19 +99,20 @@ func (l *limiter) validateRequest(stream network.Stream, amt uint64) error {
|
||||
defer l.RUnlock()
|
||||
|
||||
topic := string(stream.Protocol())
|
||||
remotePeer := stream.Conn().RemotePeer()
|
||||
|
||||
collector, err := l.retrieveCollector(topic)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
key := stream.Conn().RemotePeer().String()
|
||||
remaining := collector.Remaining(key)
|
||||
|
||||
remaining := collector.Remaining(remotePeer.String())
|
||||
// Treat each request as a minimum of 1.
|
||||
if amt == 0 {
|
||||
amt = 1
|
||||
}
|
||||
if amt > uint64(remaining) {
|
||||
l.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer())
|
||||
l.p2p.Peers().Scorers().BadResponsesScorer().Increment(remotePeer)
|
||||
writeErrorResponseToStream(responseCodeInvalidRequest, p2ptypes.ErrRateLimited.Error(), stream, l.p2p)
|
||||
return p2ptypes.ErrRateLimited
|
||||
}
|
||||
|
||||
@@ -97,7 +97,7 @@ func TestRateLimiter_ExceedRawCapacity(t *testing.T) {
|
||||
for i := 0; i < defaultBurstLimit; i++ {
|
||||
assert.ErrorContains(t, p2ptypes.ErrRateLimited.Error(), rlimiter.validateRawRpcRequest(stream))
|
||||
}
|
||||
assert.Equal(t, true, p1.Peers().IsBad(p2.PeerID()), "peer is not marked as a bad peer")
|
||||
assert.NotNil(t, p1.Peers().IsBad(p2.PeerID()), "peer is not marked as a bad peer")
|
||||
require.NoError(t, stream.Close(), "could not close stream")
|
||||
|
||||
if util.WaitTimeout(&wg, 1*time.Second) {
|
||||
|
||||
@@ -51,6 +51,7 @@ func (s *Service) registerRPCHandlers() {
|
||||
s.pingHandler,
|
||||
)
|
||||
s.registerRPCHandlersAltair()
|
||||
|
||||
if currEpoch >= params.BeaconConfig().DenebForkEpoch {
|
||||
s.registerRPCHandlersDeneb()
|
||||
}
|
||||
@@ -138,6 +139,9 @@ func (s *Service) registerRPC(baseTopic string, handle rpcHandler) {
|
||||
ctx, cancel := context.WithTimeout(s.ctx, ttfbTimeout)
|
||||
defer cancel()
|
||||
|
||||
conn := stream.Conn()
|
||||
remotePeer := conn.RemotePeer()
|
||||
|
||||
// Resetting after closing is a no-op so defer a reset in case something goes wrong.
|
||||
// It's up to the handler to Close the stream (send an EOF) if
|
||||
// it successfully writes a response. We don't blindly call
|
||||
@@ -157,12 +161,12 @@ func (s *Service) registerRPC(baseTopic string, handle rpcHandler) {
|
||||
ctx, span := trace.StartSpan(ctx, "sync.rpc")
|
||||
defer span.End()
|
||||
span.SetAttributes(trace.StringAttribute("topic", topic))
|
||||
span.SetAttributes(trace.StringAttribute("peer", stream.Conn().RemotePeer().String()))
|
||||
span.SetAttributes(trace.StringAttribute("peer", remotePeer.String()))
|
||||
log := log.WithField("peer", stream.Conn().RemotePeer().String()).WithField("topic", string(stream.Protocol()))
|
||||
|
||||
// Check before hand that peer is valid.
|
||||
if s.cfg.p2p.Peers().IsBad(stream.Conn().RemotePeer()) {
|
||||
if err := s.sendGoodByeAndDisconnect(ctx, p2ptypes.GoodbyeCodeBanned, stream.Conn().RemotePeer()); err != nil {
|
||||
if err := s.cfg.p2p.Peers().IsBad(remotePeer); err != nil {
|
||||
if err := s.sendGoodByeAndDisconnect(ctx, p2ptypes.GoodbyeCodeBanned, remotePeer); err != nil {
|
||||
log.WithError(err).Debug("Could not disconnect from peer")
|
||||
}
|
||||
return
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
pb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// beaconBlocksByRangeRPCHandler looks up the request blocks from the database from a given start block.
|
||||
@@ -26,15 +27,23 @@ func (s *Service) beaconBlocksByRangeRPCHandler(ctx context.Context, msg interfa
|
||||
defer cancel()
|
||||
SetRPCStreamDeadlines(stream)
|
||||
|
||||
remotePeer := stream.Conn().RemotePeer()
|
||||
|
||||
m, ok := msg.(*pb.BeaconBlocksByRangeRequest)
|
||||
if !ok {
|
||||
return errors.New("message is not type *pb.BeaconBlockByRangeRequest")
|
||||
}
|
||||
log.WithField("startSlot", m.StartSlot).WithField("count", m.Count).Debug("Serving block by range request")
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"startSlot": m.StartSlot,
|
||||
"count": m.Count,
|
||||
"peer": remotePeer,
|
||||
}).Debug("Serving block by range request")
|
||||
|
||||
rp, err := validateRangeRequest(m, s.cfg.clock.CurrentSlot())
|
||||
if err != nil {
|
||||
s.writeErrorResponseToStream(responseCodeInvalidRequest, err.Error(), stream)
|
||||
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer())
|
||||
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(remotePeer)
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
@@ -50,12 +59,12 @@ func (s *Service) beaconBlocksByRangeRPCHandler(ctx context.Context, msg interfa
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
remainingBucketCapacity := blockLimiter.Remaining(stream.Conn().RemotePeer().String())
|
||||
remainingBucketCapacity := blockLimiter.Remaining(remotePeer.String())
|
||||
span.SetAttributes(
|
||||
trace.Int64Attribute("start", int64(rp.start)), // lint:ignore uintcast -- This conversion is OK for tracing.
|
||||
trace.Int64Attribute("end", int64(rp.end)), // lint:ignore uintcast -- This conversion is OK for tracing.
|
||||
trace.Int64Attribute("count", int64(m.Count)),
|
||||
trace.StringAttribute("peer", stream.Conn().RemotePeer().String()),
|
||||
trace.StringAttribute("peer", remotePeer.String()),
|
||||
trace.Int64Attribute("remaining_capacity", remainingBucketCapacity),
|
||||
)
|
||||
|
||||
@@ -82,12 +91,19 @@ func (s *Service) beaconBlocksByRangeRPCHandler(ctx context.Context, msg interfa
|
||||
}
|
||||
rpcBlocksByRangeResponseLatency.Observe(float64(time.Since(batchStart).Milliseconds()))
|
||||
}
|
||||
|
||||
if err := batch.error(); err != nil {
|
||||
log.WithError(err).Debug("error in BlocksByRange batch")
|
||||
s.writeErrorResponseToStream(responseCodeServerError, p2ptypes.ErrGeneric.Error(), stream)
|
||||
log.WithError(err).Debug("Serving block by range request - BlocksByRange batch")
|
||||
|
||||
// If a rate limit is hit, it means an error response has already been sent and the stream has been closed.
|
||||
if !errors.Is(err, p2ptypes.ErrRateLimited) {
|
||||
s.writeErrorResponseToStream(responseCodeServerError, p2ptypes.ErrGeneric.Error(), stream)
|
||||
}
|
||||
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
|
||||
closeStream(stream, log)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -20,9 +20,9 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
// sendRecentBeaconBlocksRequest sends a recent beacon blocks request to a peer to get
|
||||
// sendBeaconBlocksRequest sends a recent beacon blocks request to a peer to get
|
||||
// those corresponding blocks from that peer.
|
||||
func (s *Service) sendRecentBeaconBlocksRequest(ctx context.Context, requests *types.BeaconBlockByRootsReq, id peer.ID) error {
|
||||
func (s *Service) sendBeaconBlocksRequest(ctx context.Context, requests *types.BeaconBlockByRootsReq, id peer.ID) error {
|
||||
ctx, cancel := context.WithTimeout(ctx, respTimeout)
|
||||
defer cancel()
|
||||
|
||||
@@ -151,7 +151,7 @@ func (s *Service) sendAndSaveBlobSidecars(ctx context.Context, request types.Blo
|
||||
if len(sidecars) != len(request) {
|
||||
return fmt.Errorf("received %d blob sidecars, expected %d for RPC", len(sidecars), len(request))
|
||||
}
|
||||
bv := verification.NewBlobBatchVerifier(s.newBlobVerifier, verification.PendingQueueSidecarRequirements)
|
||||
bv := verification.NewBlobBatchVerifier(s.newBlobVerifier, verification.PendingQueueBlobSidecarRequirements)
|
||||
for _, sidecar := range sidecars {
|
||||
if err := verify.BlobAlignsWithBlock(sidecar, RoBlock); err != nil {
|
||||
return err
|
||||
|
||||
@@ -253,7 +253,7 @@ func TestRecentBeaconBlocks_RPCRequestSent(t *testing.T) {
|
||||
})
|
||||
|
||||
p1.Connect(p2)
|
||||
require.NoError(t, r.sendRecentBeaconBlocksRequest(context.Background(), &expectedRoots, p2.PeerID()))
|
||||
require.NoError(t, r.sendBeaconBlocksRequest(context.Background(), &expectedRoots, p2.PeerID()))
|
||||
|
||||
if util.WaitTimeout(&wg, 1*time.Second) {
|
||||
t.Fatal("Did not receive stream within 1 sec")
|
||||
@@ -328,7 +328,7 @@ func TestRecentBeaconBlocks_RPCRequestSent_IncorrectRoot(t *testing.T) {
|
||||
})
|
||||
|
||||
p1.Connect(p2)
|
||||
require.ErrorContains(t, "received unexpected block with root", r.sendRecentBeaconBlocksRequest(context.Background(), &expectedRoots, p2.PeerID()))
|
||||
require.ErrorContains(t, "received unexpected block with root", r.sendBeaconBlocksRequest(context.Background(), &expectedRoots, p2.PeerID()))
|
||||
}
|
||||
|
||||
func TestRecentBeaconBlocksRPCHandler_HandleZeroBlocks(t *testing.T) {
|
||||
@@ -395,7 +395,7 @@ func TestRequestPendingBlobs(t *testing.T) {
|
||||
Genesis: time.Now(),
|
||||
}
|
||||
p1.Peers().Add(new(enr.Record), p2.PeerID(), nil, network.DirOutbound)
|
||||
p1.Peers().SetConnectionState(p2.PeerID(), peers.PeerConnected)
|
||||
p1.Peers().SetConnectionState(p2.PeerID(), peers.Connected)
|
||||
p1.Peers().SetChainState(p2.PeerID(), ðpb.Status{FinalizedEpoch: 1})
|
||||
s := &Service{
|
||||
cfg: &config{
|
||||
|
||||
@@ -99,6 +99,7 @@ func (s *Service) blobSidecarsByRangeRPCHandler(ctx context.Context, msg interfa
|
||||
}
|
||||
|
||||
var batch blockBatch
|
||||
|
||||
wQuota := params.BeaconConfig().MaxRequestBlobSidecars
|
||||
for batch, ok = batcher.next(ctx, stream); ok; batch, ok = batcher.next(ctx, stream) {
|
||||
batchStart := time.Now()
|
||||
@@ -114,7 +115,12 @@ func (s *Service) blobSidecarsByRangeRPCHandler(ctx context.Context, msg interfa
|
||||
}
|
||||
if err := batch.error(); err != nil {
|
||||
log.WithError(err).Debug("error in BlobSidecarsByRange batch")
|
||||
s.writeErrorResponseToStream(responseCodeServerError, p2ptypes.ErrGeneric.Error(), stream)
|
||||
|
||||
// If a rate limit is hit, it means an error response has already been sent and the stream has been closed.
|
||||
if !errors.Is(err, p2ptypes.ErrRateLimited) {
|
||||
s.writeErrorResponseToStream(responseCodeServerError, p2ptypes.ErrGeneric.Error(), stream)
|
||||
}
|
||||
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -55,10 +55,7 @@ func (s *Service) goodbyeRPCHandler(_ context.Context, msg interface{}, stream l
|
||||
|
||||
// disconnectBadPeer checks whether peer is considered bad by some scorer, and tries to disconnect
|
||||
// the peer, if that is the case. Additionally, disconnection reason is obtained from scorer.
|
||||
func (s *Service) disconnectBadPeer(ctx context.Context, id peer.ID) {
|
||||
if !s.cfg.p2p.Peers().IsBad(id) {
|
||||
return
|
||||
}
|
||||
func (s *Service) disconnectBadPeer(ctx context.Context, id peer.ID, badPeerErr error) {
|
||||
err := s.cfg.p2p.Peers().Scorers().ValidationError(id)
|
||||
goodbyeCode := p2ptypes.ErrToGoodbyeCode(err)
|
||||
if err == nil {
|
||||
@@ -67,6 +64,8 @@ func (s *Service) disconnectBadPeer(ctx context.Context, id peer.ID) {
|
||||
if err := s.sendGoodByeAndDisconnect(ctx, goodbyeCode, id); err != nil {
|
||||
log.WithError(err).Debug("Error when disconnecting with bad peer")
|
||||
}
|
||||
|
||||
log.WithError(badPeerErr).WithField("peerID", id).Debug("Initiate peer disconnection")
|
||||
}
|
||||
|
||||
// A custom goodbye method that is used by our connection handler, in the
|
||||
|
||||
@@ -2,12 +2,12 @@ package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
libp2pcore "github.com/libp2p/go-libp2p/core"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
|
||||
p2ptypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
@@ -16,127 +16,191 @@ import (
|
||||
)
|
||||
|
||||
// pingHandler reads the incoming ping rpc message from the peer.
|
||||
// If the peer's sequence number is higher than the one stored locally,
|
||||
// a METADATA request is sent to the peer to retrieve and update the latest metadata.
|
||||
// Note: This function is misnamed, as it performs more than just reading a ping message.
|
||||
func (s *Service) pingHandler(_ context.Context, msg interface{}, stream libp2pcore.Stream) error {
|
||||
SetRPCStreamDeadlines(stream)
|
||||
|
||||
// Convert the message to SSW Uint64 type.
|
||||
m, ok := msg.(*primitives.SSZUint64)
|
||||
if !ok {
|
||||
return fmt.Errorf("wrong message type for ping, got %T, wanted *uint64", msg)
|
||||
}
|
||||
|
||||
// Validate the incoming request regarding rate limiting.
|
||||
if err := s.rateLimiter.validateRequest(stream, 1); err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "validate request")
|
||||
}
|
||||
|
||||
s.rateLimiter.add(stream, 1)
|
||||
valid, err := s.validateSequenceNum(*m, stream.Conn().RemotePeer())
|
||||
|
||||
// Retrieve the peer ID.
|
||||
peerID := stream.Conn().RemotePeer()
|
||||
|
||||
// Check if the peer sequence number is higher than the one we have in our store.
|
||||
valid, err := s.validateSequenceNum(*m, peerID)
|
||||
if err != nil {
|
||||
// Descore peer for giving us a bad sequence number.
|
||||
if errors.Is(err, p2ptypes.ErrInvalidSequenceNum) {
|
||||
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer())
|
||||
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(peerID)
|
||||
s.writeErrorResponseToStream(responseCodeInvalidRequest, p2ptypes.ErrInvalidSequenceNum.Error(), stream)
|
||||
}
|
||||
return err
|
||||
|
||||
return errors.Wrap(err, "validate sequence number")
|
||||
}
|
||||
|
||||
// We can already prepare a success response to the peer.
|
||||
if _, err := stream.Write([]byte{responseCodeSuccess}); err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "write response")
|
||||
}
|
||||
sq := primitives.SSZUint64(s.cfg.p2p.MetadataSeq())
|
||||
if _, err := s.cfg.p2p.Encoding().EncodeWithMaxLength(stream, &sq); err != nil {
|
||||
|
||||
// Retrieve our own sequence number.
|
||||
seqNumber := s.cfg.p2p.MetadataSeq()
|
||||
|
||||
// SSZ encode our sequence number.
|
||||
seqNumberSSZ := primitives.SSZUint64(seqNumber)
|
||||
|
||||
// Send our sequence number back to the peer.
|
||||
if _, err := s.cfg.p2p.Encoding().EncodeWithMaxLength(stream, &seqNumberSSZ); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
closeStream(stream, log)
|
||||
|
||||
if valid {
|
||||
// If the sequence number was valid we're done.
|
||||
// If the peer's sequence numberwas valid we're done.
|
||||
return nil
|
||||
}
|
||||
|
||||
// The sequence number was not valid. Start our own ping back to the peer.
|
||||
// The peer's sequence number was not valid. We ask the peer for its metadata.
|
||||
go func() {
|
||||
// New context so the calling function doesn't cancel on us.
|
||||
// Define a new context so the calling function doesn't cancel on us.
|
||||
ctx, cancel := context.WithTimeout(context.Background(), ttfbTimeout)
|
||||
defer cancel()
|
||||
md, err := s.sendMetaDataRequest(ctx, stream.Conn().RemotePeer())
|
||||
|
||||
// Send a METADATA request to the peer.
|
||||
peerMetadata, err := s.sendMetaDataRequest(ctx, peerID)
|
||||
if err != nil {
|
||||
// We cannot compare errors directly as the stream muxer error
|
||||
// type isn't compatible with the error we have, so a direct
|
||||
// equality checks fails.
|
||||
if !strings.Contains(err.Error(), p2ptypes.ErrIODeadline.Error()) {
|
||||
log.WithField("peer", stream.Conn().RemotePeer()).WithError(err).Debug("Could not send metadata request")
|
||||
log.WithField("peer", peerID).WithError(err).Debug("Could not send metadata request")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
// update metadata if there is no error
|
||||
s.cfg.p2p.Peers().SetMetadata(stream.Conn().RemotePeer(), md)
|
||||
|
||||
// Update peer's metadata.
|
||||
s.cfg.p2p.Peers().SetMetadata(peerID, peerMetadata)
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) sendPingRequest(ctx context.Context, id peer.ID) error {
|
||||
// sendPingRequest first sends a PING request to the peer.
|
||||
// If the peer responds with a sequence number higher than latest one for it we have in our store,
|
||||
// then this function sends a METADATA request to the peer, and stores the metadata received.
|
||||
// This function is actually poorly named, since it does more than just sending a ping request.
|
||||
func (s *Service) sendPingRequest(ctx context.Context, peerID peer.ID) error {
|
||||
ctx, cancel := context.WithTimeout(ctx, respTimeout)
|
||||
defer cancel()
|
||||
|
||||
metadataSeq := primitives.SSZUint64(s.cfg.p2p.MetadataSeq())
|
||||
topic, err := p2p.TopicFromMessage(p2p.PingMessageName, slots.ToEpoch(s.cfg.clock.CurrentSlot()))
|
||||
// Get the current epoch.
|
||||
currentSlot := s.cfg.clock.CurrentSlot()
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
|
||||
// SSZ encode our metadata sequence number.
|
||||
metadataSeq := s.cfg.p2p.MetadataSeq()
|
||||
encodedMetadataSeq := primitives.SSZUint64(metadataSeq)
|
||||
|
||||
// Get the PING topic for the current epoch.
|
||||
topic, err := p2p.TopicFromMessage(p2p.PingMessageName, currentEpoch)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "topic from message")
|
||||
}
|
||||
stream, err := s.cfg.p2p.Send(ctx, &metadataSeq, topic, id)
|
||||
|
||||
// Send the PING request to the peer.
|
||||
stream, err := s.cfg.p2p.Send(ctx, &encodedMetadataSeq, topic, peerID)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "send ping request")
|
||||
}
|
||||
currentTime := time.Now()
|
||||
defer closeStream(stream, log)
|
||||
|
||||
startTime := time.Now()
|
||||
|
||||
// Read the response from the peer.
|
||||
code, errMsg, err := ReadStatusCode(stream, s.cfg.p2p.Encoding())
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "read status code")
|
||||
}
|
||||
// Records the latency of the ping request for that peer.
|
||||
s.cfg.p2p.Host().Peerstore().RecordLatency(id, time.Now().Sub(currentTime))
|
||||
|
||||
// Record the latency of the ping request for that peer.
|
||||
s.cfg.p2p.Host().Peerstore().RecordLatency(peerID, time.Now().Sub(startTime))
|
||||
|
||||
// If the peer responded with an error, increment the bad responses scorer.
|
||||
if code != 0 {
|
||||
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer())
|
||||
return errors.New(errMsg)
|
||||
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(peerID)
|
||||
return errors.Errorf("code: %d - %s", code, errMsg)
|
||||
}
|
||||
|
||||
// Decode the sequence number from the peer.
|
||||
msg := new(primitives.SSZUint64)
|
||||
if err := s.cfg.p2p.Encoding().DecodeWithMaxLength(stream, msg); err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "decode sequence number")
|
||||
}
|
||||
valid, err := s.validateSequenceNum(*msg, stream.Conn().RemotePeer())
|
||||
|
||||
// Determine if the peer's sequence number returned by the peer is higher than the one we have in our store.
|
||||
valid, err := s.validateSequenceNum(*msg, peerID)
|
||||
if err != nil {
|
||||
// Descore peer for giving us a bad sequence number.
|
||||
if errors.Is(err, p2ptypes.ErrInvalidSequenceNum) {
|
||||
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer())
|
||||
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(peerID)
|
||||
}
|
||||
return err
|
||||
|
||||
return errors.Wrap(err, "validate sequence number")
|
||||
}
|
||||
|
||||
// The sequence number have in our store for this peer is the same as the one returned by the peer, all good.
|
||||
if valid {
|
||||
return nil
|
||||
}
|
||||
md, err := s.sendMetaDataRequest(ctx, stream.Conn().RemotePeer())
|
||||
|
||||
// We need to send a METADATA request to the peer to get its latest metadata.
|
||||
md, err := s.sendMetaDataRequest(ctx, peerID)
|
||||
if err != nil {
|
||||
// do not increment bad responses, as its
|
||||
// already done in the request method.
|
||||
return err
|
||||
// do not increment bad responses, as its already done in the request method.
|
||||
return errors.Wrap(err, "send metadata request")
|
||||
}
|
||||
s.cfg.p2p.Peers().SetMetadata(stream.Conn().RemotePeer(), md)
|
||||
|
||||
// Update the metadata for the peer.
|
||||
s.cfg.p2p.Peers().SetMetadata(peerID, md)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// validates the peer's sequence number.
|
||||
// validateSequenceNum validates the peer's sequence number.
|
||||
// - If the peer's sequence number is greater than the sequence number we have in our store for the peer, return false.
|
||||
// - If the peer's sequence number is equal to the sequence number we have in our store for the peer, return true.
|
||||
// - If the peer's sequence number is less than the sequence number we have in our store for the peer, return an error.
|
||||
func (s *Service) validateSequenceNum(seq primitives.SSZUint64, id peer.ID) (bool, error) {
|
||||
// Retrieve the metadata for the peer we got in our store.
|
||||
md, err := s.cfg.p2p.Peers().Metadata(id)
|
||||
if err != nil {
|
||||
return false, err
|
||||
return false, errors.Wrap(err, "get metadata")
|
||||
}
|
||||
|
||||
// If we have no metadata for the peer, return false.
|
||||
if md == nil || md.IsNil() {
|
||||
return false, nil
|
||||
}
|
||||
// Return error on invalid sequence number.
|
||||
|
||||
// The peer's sequence number must be less than or equal to the sequence number we have in our store.
|
||||
if md.SequenceNumber() > uint64(seq) {
|
||||
return false, p2ptypes.ErrInvalidSequenceNum
|
||||
}
|
||||
|
||||
// Return true if the peer's sequence number is equal to the sequence number we have in our store.
|
||||
return md.SequenceNumber() == uint64(seq), nil
|
||||
}
|
||||
|
||||
@@ -19,7 +19,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
pb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -49,7 +48,7 @@ type BeaconBlockProcessor func(block interfaces.ReadOnlySignedBeaconBlock) error
|
||||
// SendBeaconBlocksByRangeRequest sends BeaconBlocksByRange and returns fetched blocks, if any.
|
||||
func SendBeaconBlocksByRangeRequest(
|
||||
ctx context.Context, tor blockchain.TemporalOracle, p2pProvider p2p.SenderEncoder, pid peer.ID,
|
||||
req *pb.BeaconBlocksByRangeRequest, blockProcessor BeaconBlockProcessor,
|
||||
req *ethpb.BeaconBlocksByRangeRequest, blockProcessor BeaconBlockProcessor,
|
||||
) ([]interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
topic, err := p2p.TopicFromMessage(p2p.BeaconBlocksByRangeMessageName, slots.ToEpoch(tor.CurrentSlot()))
|
||||
if err != nil {
|
||||
@@ -155,7 +154,7 @@ func SendBeaconBlocksByRootRequest(
|
||||
return blocks, nil
|
||||
}
|
||||
|
||||
func SendBlobsByRangeRequest(ctx context.Context, tor blockchain.TemporalOracle, p2pApi p2p.SenderEncoder, pid peer.ID, ctxMap ContextByteVersions, req *pb.BlobSidecarsByRangeRequest, bvs ...BlobResponseValidation) ([]blocks.ROBlob, error) {
|
||||
func SendBlobsByRangeRequest(ctx context.Context, tor blockchain.TemporalOracle, p2pApi p2p.SenderEncoder, pid peer.ID, ctxMap ContextByteVersions, req *ethpb.BlobSidecarsByRangeRequest, bvs ...BlobResponseValidation) ([]blocks.ROBlob, error) {
|
||||
topic, err := p2p.TopicFromMessage(p2p.BlobSidecarsByRangeName, slots.ToEpoch(tor.CurrentSlot()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -298,7 +297,7 @@ func blobValidatorFromRootReq(req *p2ptypes.BlobSidecarsByRootReq) BlobResponseV
|
||||
}
|
||||
}
|
||||
|
||||
func blobValidatorFromRangeReq(req *pb.BlobSidecarsByRangeRequest) BlobResponseValidation {
|
||||
func blobValidatorFromRangeReq(req *ethpb.BlobSidecarsByRangeRequest) BlobResponseValidation {
|
||||
end := req.StartSlot + primitives.Slot(req.Count)
|
||||
return func(sc blocks.ROBlob) error {
|
||||
if sc.Slot() < req.StartSlot || sc.Slot() >= end {
|
||||
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// maintainPeerStatuses by infrequently polling peers for their latest status.
|
||||
// maintainPeerStatuses maintains peer statuses by polling peers for their latest status twice per epoch.
|
||||
func (s *Service) maintainPeerStatuses() {
|
||||
// Run twice per epoch.
|
||||
interval := time.Duration(params.BeaconConfig().SlotsPerEpoch.Div(2).Mul(params.BeaconConfig().SecondsPerSlot)) * time.Second
|
||||
@@ -38,16 +38,20 @@ func (s *Service) maintainPeerStatuses() {
|
||||
// If our peer status has not been updated correctly we disconnect over here
|
||||
// and set the connection state over here instead.
|
||||
if s.cfg.p2p.Host().Network().Connectedness(id) != network.Connected {
|
||||
s.cfg.p2p.Peers().SetConnectionState(id, peers.PeerDisconnecting)
|
||||
s.cfg.p2p.Peers().SetConnectionState(id, peers.Disconnecting)
|
||||
if err := s.cfg.p2p.Disconnect(id); err != nil {
|
||||
log.WithError(err).Debug("Error when disconnecting with peer")
|
||||
}
|
||||
s.cfg.p2p.Peers().SetConnectionState(id, peers.PeerDisconnected)
|
||||
s.cfg.p2p.Peers().SetConnectionState(id, peers.Disconnected)
|
||||
log.WithFields(logrus.Fields{
|
||||
"peer": id,
|
||||
"reason": "maintain peer statuses - peer is not connected",
|
||||
}).Debug("Initiate peer disconnection")
|
||||
return
|
||||
}
|
||||
// Disconnect from peers that are considered bad by any of the registered scorers.
|
||||
if s.cfg.p2p.Peers().IsBad(id) {
|
||||
s.disconnectBadPeer(s.ctx, id)
|
||||
if err := s.cfg.p2p.Peers().IsBad(id); err != nil {
|
||||
s.disconnectBadPeer(s.ctx, id, err)
|
||||
return
|
||||
}
|
||||
// If the status hasn't been updated in the recent interval time.
|
||||
@@ -73,6 +77,11 @@ func (s *Service) maintainPeerStatuses() {
|
||||
if err := s.sendGoodByeAndDisconnect(s.ctx, p2ptypes.GoodbyeCodeTooManyPeers, id); err != nil {
|
||||
log.WithField("peer", id).WithError(err).Debug("Could not disconnect with peer")
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"peer": id,
|
||||
"reason": "to be pruned",
|
||||
}).Debug("Initiate peer disconnection")
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -169,8 +178,8 @@ func (s *Service) sendRPCStatusRequest(ctx context.Context, id peer.ID) error {
|
||||
// If validation fails, validation error is logged, and peer status scorer will mark peer as bad.
|
||||
err = s.validateStatusMessage(ctx, msg)
|
||||
s.cfg.p2p.Peers().Scorers().PeerStatusScorer().SetPeerStatus(id, msg, err)
|
||||
if s.cfg.p2p.Peers().IsBad(id) {
|
||||
s.disconnectBadPeer(s.ctx, id)
|
||||
if err := s.cfg.p2p.Peers().IsBad(id); err != nil {
|
||||
s.disconnectBadPeer(s.ctx, id, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -182,7 +191,7 @@ func (s *Service) reValidatePeer(ctx context.Context, id peer.ID) error {
|
||||
}
|
||||
// Do not return an error for ping requests.
|
||||
if err := s.sendPingRequest(ctx, id); err != nil && !isUnwantedError(err) {
|
||||
log.WithError(err).Debug("Could not ping peer")
|
||||
log.WithError(err).WithField("pid", id).Debug("Could not ping peer")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -413,7 +413,7 @@ func TestHandshakeHandlers_Roundtrip(t *testing.T) {
|
||||
assert.Equal(t, numActive1+1, numActive2, "Number of active peers unexpected")
|
||||
|
||||
require.NoError(t, p2.Disconnect(p1.PeerID()))
|
||||
p1.Peers().SetConnectionState(p2.PeerID(), peers.PeerDisconnected)
|
||||
p1.Peers().SetConnectionState(p2.PeerID(), peers.Disconnected)
|
||||
|
||||
// Wait for disconnect event to trigger.
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
@@ -877,7 +877,7 @@ func TestStatusRPCRequest_BadPeerHandshake(t *testing.T) {
|
||||
|
||||
require.NoError(t, cw.SetClock(startup.NewClock(chain.Genesis, chain.ValidatorsRoot)))
|
||||
|
||||
assert.Equal(t, false, p1.Peers().Scorers().IsBadPeer(p2.PeerID()), "Peer is marked as bad")
|
||||
assert.NoError(t, p1.Peers().Scorers().IsBadPeer(p2.PeerID()), "Peer is marked as bad")
|
||||
p1.Connect(p2)
|
||||
|
||||
if util.WaitTimeout(&wg, time.Second) {
|
||||
@@ -887,9 +887,9 @@ func TestStatusRPCRequest_BadPeerHandshake(t *testing.T) {
|
||||
|
||||
connectionState, err := p1.Peers().ConnectionState(p2.PeerID())
|
||||
require.NoError(t, err, "Could not obtain peer connection state")
|
||||
assert.Equal(t, peers.PeerDisconnected, connectionState, "Expected peer to be disconnected")
|
||||
assert.Equal(t, peers.Disconnected, connectionState, "Expected peer to be disconnected")
|
||||
|
||||
assert.Equal(t, true, p1.Peers().Scorers().IsBadPeer(p2.PeerID()), "Peer is not marked as bad")
|
||||
assert.NotNil(t, p1.Peers().Scorers().IsBadPeer(p2.PeerID()), "Peer is not marked as bad")
|
||||
}
|
||||
|
||||
func TestStatusRPC_ValidGenesisMessage(t *testing.T) {
|
||||
|
||||
@@ -15,6 +15,8 @@ import (
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
gcache "github.com/patrickmn/go-cache"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/trailofbits/go-mutexasserts"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/async"
|
||||
"github.com/prysmaticlabs/prysm/v5/async/abool"
|
||||
"github.com/prysmaticlabs/prysm/v5/async/event"
|
||||
@@ -44,22 +46,24 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime"
|
||||
prysmTime "github.com/prysmaticlabs/prysm/v5/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/trailofbits/go-mutexasserts"
|
||||
)
|
||||
|
||||
var _ runtime.Service = (*Service)(nil)
|
||||
|
||||
const rangeLimit uint64 = 1024
|
||||
const seenBlockSize = 1000
|
||||
const seenBlobSize = seenBlockSize * 4 // Each block can have max 4 blobs. Worst case 164kB for cache.
|
||||
const seenUnaggregatedAttSize = 20000
|
||||
const seenAggregatedAttSize = 16384
|
||||
const seenSyncMsgSize = 1000 // Maximum of 512 sync committee members, 1000 is a safe amount.
|
||||
const seenSyncContributionSize = 512 // Maximum of SYNC_COMMITTEE_SIZE as specified by the spec.
|
||||
const seenExitSize = 100
|
||||
const seenProposerSlashingSize = 100
|
||||
const badBlockSize = 1000
|
||||
const syncMetricsInterval = 10 * time.Second
|
||||
const (
|
||||
rangeLimit uint64 = 1024
|
||||
seenBlockSize = 1000
|
||||
seenBlobSize = seenBlockSize * 6 // Each block can have max 6 blobs.
|
||||
seenDataColumnSize = seenBlockSize * 128 // Each block can have max 128 data columns.
|
||||
seenUnaggregatedAttSize = 20000
|
||||
seenAggregatedAttSize = 16384
|
||||
seenSyncMsgSize = 1000 // Maximum of 512 sync committee members, 1000 is a safe amount.
|
||||
seenSyncContributionSize = 512 // Maximum of SYNC_COMMITTEE_SIZE as specified by the spec.
|
||||
seenExitSize = 100
|
||||
seenProposerSlashingSize = 100
|
||||
badBlockSize = 1000
|
||||
syncMetricsInterval = 10 * time.Second
|
||||
)
|
||||
|
||||
var (
|
||||
// Seconds in one epoch.
|
||||
@@ -162,18 +166,18 @@ type Service struct {
|
||||
|
||||
// NewService initializes new regular sync service.
|
||||
func NewService(ctx context.Context, opts ...Option) *Service {
|
||||
c := gcache.New(pendingBlockExpTime /* exp time */, 0 /* disable janitor */)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
r := &Service{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
chainStarted: abool.New(),
|
||||
cfg: &config{clock: startup.NewClock(time.Unix(0, 0), [32]byte{})},
|
||||
slotToPendingBlocks: c,
|
||||
slotToPendingBlocks: gcache.New(pendingBlockExpTime /* exp time */, 0 /* disable janitor */),
|
||||
seenPendingBlocks: make(map[[32]byte]bool),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
|
||||
signatureChan: make(chan *signatureVerifier, verifierLimit),
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
if err := opt(r); err != nil {
|
||||
return nil
|
||||
@@ -224,7 +228,7 @@ func (s *Service) Start() {
|
||||
s.newBlobVerifier = newBlobVerifierFromInitializer(v)
|
||||
|
||||
go s.verifierRoutine()
|
||||
go s.registerHandlers()
|
||||
go s.startTasksPostInitialSync()
|
||||
|
||||
s.cfg.p2p.AddConnectionHandler(s.reValidatePeer, s.sendGoodbye)
|
||||
s.cfg.p2p.AddDisconnectionHandler(func(_ context.Context, _ peer.ID) error {
|
||||
@@ -315,23 +319,31 @@ func (s *Service) waitForChainStart() {
|
||||
s.markForChainStart()
|
||||
}
|
||||
|
||||
func (s *Service) registerHandlers() {
|
||||
func (s *Service) startTasksPostInitialSync() {
|
||||
// Wait for the chain to start.
|
||||
s.waitForChainStart()
|
||||
|
||||
select {
|
||||
case <-s.initialSyncComplete:
|
||||
// Register respective pubsub handlers at state synced event.
|
||||
digest, err := s.currentForkDigest()
|
||||
// Compute the current epoch.
|
||||
currentSlot := slots.CurrentSlot(uint64(s.cfg.clock.GenesisTime().Unix()))
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
|
||||
// Compute the current fork forkDigest.
|
||||
forkDigest, err := s.currentForkDigest()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not retrieve current fork digest")
|
||||
return
|
||||
}
|
||||
currentEpoch := slots.ToEpoch(slots.CurrentSlot(uint64(s.cfg.clock.GenesisTime().Unix())))
|
||||
s.registerSubscribers(currentEpoch, digest)
|
||||
|
||||
// Register respective pubsub handlers at state synced event.
|
||||
s.registerSubscribers(currentEpoch, forkDigest)
|
||||
|
||||
// Start the fork watcher.
|
||||
go s.forkWatcher()
|
||||
return
|
||||
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Context closed, exiting goroutine")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -62,7 +62,7 @@ func TestSyncHandlers_WaitToSync(t *testing.T) {
|
||||
}
|
||||
|
||||
topic := "/eth2/%x/beacon_block"
|
||||
go r.registerHandlers()
|
||||
go r.startTasksPostInitialSync()
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
var vr [32]byte
|
||||
@@ -143,7 +143,7 @@ func TestSyncHandlers_WaitTillSynced(t *testing.T) {
|
||||
|
||||
syncCompleteCh := make(chan bool)
|
||||
go func() {
|
||||
r.registerHandlers()
|
||||
r.startTasksPostInitialSync()
|
||||
syncCompleteCh <- true
|
||||
}()
|
||||
|
||||
@@ -200,7 +200,7 @@ func TestSyncService_StopCleanly(t *testing.T) {
|
||||
initialSyncComplete: make(chan struct{}),
|
||||
}
|
||||
|
||||
go r.registerHandlers()
|
||||
go r.startTasksPostInitialSync()
|
||||
var vr [32]byte
|
||||
require.NoError(t, gs.SetClock(startup.NewClock(time.Now(), vr)))
|
||||
r.waitForChainStart()
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
func (s *Service) blobSubscriber(ctx context.Context, msg proto.Message) error {
|
||||
b, ok := msg.(blocks.VerifiedROBlob)
|
||||
if !ok {
|
||||
return fmt.Errorf("message was not type blocks.ROBlob, type=%T", msg)
|
||||
return fmt.Errorf("message was not type blocks.VerifiedROBlob, type=%T", msg)
|
||||
}
|
||||
|
||||
return s.subscribeBlob(ctx, b)
|
||||
|
||||
@@ -117,6 +117,9 @@ func (s *Service) validateAggregateAndProof(ctx context.Context, pid peer.ID, ms
|
||||
if seen {
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
|
||||
// Verify the block being voted on is in the beacon chain.
|
||||
// If not, store this attestation in the map of pending attestations.
|
||||
if !s.validateBlockInAttestation(ctx, m) {
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
@@ -222,6 +225,8 @@ func (s *Service) validateAggregatedAtt(ctx context.Context, signed ethpb.Signed
|
||||
return s.validateWithBatchVerifier(ctx, "aggregate", set)
|
||||
}
|
||||
|
||||
// validateBlocksInAttestation checks if the block being voted on is in the beaconDB.
|
||||
// If not, it store this attestation in the map of pending attestations.
|
||||
func (s *Service) validateBlockInAttestation(ctx context.Context, satt ethpb.SignedAggregateAttAndProof) bool {
|
||||
// Verify the block being voted and the processed state is in beaconDB. The block should have passed validation if it's in the beaconDB.
|
||||
blockRoot := bytesutil.ToBytes32(satt.AggregateAttestationAndProof().AggregateVal().GetData().BeaconBlockRoot)
|
||||
|
||||
@@ -51,7 +51,7 @@ func (s *Service) validateBlob(ctx context.Context, pid peer.ID, msg *pubsub.Mes
|
||||
if err != nil {
|
||||
return pubsub.ValidationReject, errors.Wrap(err, "roblob conversion failure")
|
||||
}
|
||||
vf := s.newBlobVerifier(blob, verification.GossipSidecarRequirements)
|
||||
vf := s.newBlobVerifier(blob, verification.GossipBlobSidecarRequirements)
|
||||
|
||||
if err := vf.BlobIndexInBounds(); err != nil {
|
||||
return pubsub.ValidationReject, err
|
||||
|
||||
@@ -10,6 +10,7 @@ go_library(
|
||||
"fake.go",
|
||||
"initializer.go",
|
||||
"interface.go",
|
||||
"log.go",
|
||||
"metrics.go",
|
||||
"mock.go",
|
||||
"result.go",
|
||||
|
||||
@@ -169,7 +169,7 @@ func TestBatchVerifier(t *testing.T) {
|
||||
blk, blbs := c.bandb(t, c.nblobs)
|
||||
reqs := c.reqs
|
||||
if reqs == nil {
|
||||
reqs = InitsyncSidecarRequirements
|
||||
reqs = InitsyncBlobSidecarRequirements
|
||||
}
|
||||
bbv := NewBlobBatchVerifier(c.nv(), reqs)
|
||||
if c.cv == nil {
|
||||
|
||||
@@ -2,6 +2,7 @@ package verification
|
||||
|
||||
import (
|
||||
"context"
|
||||
goError "errors"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
|
||||
@@ -12,7 +13,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/logging"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -29,7 +29,7 @@ const (
|
||||
RequireSidecarProposerExpected
|
||||
)
|
||||
|
||||
var allSidecarRequirements = []Requirement{
|
||||
var allBlobSidecarRequirements = []Requirement{
|
||||
RequireBlobIndexInBounds,
|
||||
RequireNotFromFutureSlot,
|
||||
RequireSlotAboveFinalized,
|
||||
@@ -43,21 +43,21 @@ var allSidecarRequirements = []Requirement{
|
||||
RequireSidecarProposerExpected,
|
||||
}
|
||||
|
||||
// GossipSidecarRequirements defines the set of requirements that BlobSidecars received on gossip
|
||||
// GossipBlobSidecarRequirements defines the set of requirements that BlobSidecars received on gossip
|
||||
// must satisfy in order to upgrade an ROBlob to a VerifiedROBlob.
|
||||
var GossipSidecarRequirements = requirementList(allSidecarRequirements).excluding()
|
||||
var GossipBlobSidecarRequirements = requirementList(allBlobSidecarRequirements).excluding()
|
||||
|
||||
// SpectestSidecarRequirements is used by the forkchoice spectests when verifying blobs used in the on_block tests.
|
||||
// SpectestBlobSidecarRequirements is used by the forkchoice spectests when verifying blobs used in the on_block tests.
|
||||
// The only requirements we exclude for these tests are the parent validity and seen tests, as these are specific to
|
||||
// gossip processing and require the bad block cache that we only use there.
|
||||
var SpectestSidecarRequirements = requirementList(GossipSidecarRequirements).excluding(
|
||||
var SpectestBlobSidecarRequirements = requirementList(GossipBlobSidecarRequirements).excluding(
|
||||
RequireSidecarParentSeen, RequireSidecarParentValid)
|
||||
|
||||
// InitsyncSidecarRequirements is the list of verification requirements to be used by the init-sync service
|
||||
// InitsyncBlobSidecarRequirements is the list of verification requirements to be used by the init-sync service
|
||||
// for batch-mode syncing. Because we only perform batch verification as part of the IsDataAvailable method
|
||||
// for blobs after the block has been verified, and the blobs to be verified are keyed in the cache by the
|
||||
// block root, the list of required verifications is much shorter than gossip.
|
||||
var InitsyncSidecarRequirements = requirementList(GossipSidecarRequirements).excluding(
|
||||
var InitsyncBlobSidecarRequirements = requirementList(GossipBlobSidecarRequirements).excluding(
|
||||
RequireNotFromFutureSlot,
|
||||
RequireSlotAboveFinalized,
|
||||
RequireSidecarParentSeen,
|
||||
@@ -67,40 +67,22 @@ var InitsyncSidecarRequirements = requirementList(GossipSidecarRequirements).exc
|
||||
RequireSidecarProposerExpected,
|
||||
)
|
||||
|
||||
// ELMemPoolRequirements is a list of verification requirements to be used when importing blobs and proof from
|
||||
// execution layer mempool. Only the KZG proof verification is required.
|
||||
var ELMemPoolRequirements = []Requirement{RequireSidecarKzgProofVerified}
|
||||
// ELMemPoolRequirements defines the verification requirements for importing blobs and proofs
|
||||
// from the execution layer's mempool. Currently, no requirements are enforced because it is
|
||||
// assumed that blobs and proofs from the execution layer are correctly formatted,
|
||||
// given the trusted relationship between the consensus layer and execution layer.
|
||||
var ELMemPoolRequirements []Requirement
|
||||
|
||||
// BackfillSidecarRequirements is the same as InitsyncSidecarRequirements.
|
||||
var BackfillSidecarRequirements = requirementList(InitsyncSidecarRequirements).excluding()
|
||||
// BackfillBlobSidecarRequirements is the same as InitsyncBlobSidecarRequirements.
|
||||
var BackfillBlobSidecarRequirements = requirementList(InitsyncBlobSidecarRequirements).excluding()
|
||||
|
||||
// PendingQueueSidecarRequirements is the same as InitsyncSidecarRequirements, used by the pending blocks queue.
|
||||
var PendingQueueSidecarRequirements = requirementList(InitsyncSidecarRequirements).excluding()
|
||||
// PendingQueueBlobSidecarRequirements is the same as InitsyncBlobSidecarRequirements, used by the pending blocks queue.
|
||||
var PendingQueueBlobSidecarRequirements = requirementList(InitsyncBlobSidecarRequirements).excluding()
|
||||
|
||||
var (
|
||||
ErrBlobInvalid = errors.New("blob failed verification")
|
||||
// ErrBlobIndexInvalid means RequireBlobIndexInBounds failed.
|
||||
ErrBlobIndexInvalid = errors.Wrap(ErrBlobInvalid, "incorrect blob sidecar index")
|
||||
// ErrFromFutureSlot means RequireSlotNotTooEarly failed.
|
||||
ErrFromFutureSlot = errors.Wrap(ErrBlobInvalid, "slot is too far in the future")
|
||||
// ErrSlotNotAfterFinalized means RequireSlotAboveFinalized failed.
|
||||
ErrSlotNotAfterFinalized = errors.Wrap(ErrBlobInvalid, "slot <= finalized checkpoint")
|
||||
// ErrInvalidProposerSignature means RequireValidProposerSignature failed.
|
||||
ErrInvalidProposerSignature = errors.Wrap(ErrBlobInvalid, "proposer signature could not be verified")
|
||||
// ErrSidecarParentNotSeen means RequireSidecarParentSeen failed.
|
||||
ErrSidecarParentNotSeen = errors.Wrap(ErrBlobInvalid, "parent root has not been seen")
|
||||
// ErrSidecarParentInvalid means RequireSidecarParentValid failed.
|
||||
ErrSidecarParentInvalid = errors.Wrap(ErrBlobInvalid, "parent block is not valid")
|
||||
// ErrSlotNotAfterParent means RequireSidecarParentSlotLower failed.
|
||||
ErrSlotNotAfterParent = errors.Wrap(ErrBlobInvalid, "slot <= slot")
|
||||
// ErrSidecarNotFinalizedDescendent means RequireSidecarDescendsFromFinalized failed.
|
||||
ErrSidecarNotFinalizedDescendent = errors.Wrap(ErrBlobInvalid, "blob parent is not descended from the finalized block")
|
||||
// ErrSidecarInclusionProofInvalid means RequireSidecarInclusionProven failed.
|
||||
ErrSidecarInclusionProofInvalid = errors.Wrap(ErrBlobInvalid, "sidecar inclusion proof verification failed")
|
||||
// ErrSidecarKzgProofInvalid means RequireSidecarKzgProofVerified failed.
|
||||
ErrSidecarKzgProofInvalid = errors.Wrap(ErrBlobInvalid, "sidecar kzg commitment proof verification failed")
|
||||
// ErrSidecarUnexpectedProposer means RequireSidecarProposerExpected failed.
|
||||
ErrSidecarUnexpectedProposer = errors.Wrap(ErrBlobInvalid, "sidecar was not proposed by the expected proposer_index")
|
||||
ErrBlobIndexInvalid = errors.New("incorrect blob sidecar index")
|
||||
)
|
||||
|
||||
type ROBlobVerifier struct {
|
||||
@@ -149,7 +131,7 @@ func (bv *ROBlobVerifier) BlobIndexInBounds() (err error) {
|
||||
defer bv.recordResult(RequireBlobIndexInBounds, &err)
|
||||
if bv.blob.Index >= fieldparams.MaxBlobsPerBlock {
|
||||
log.WithFields(logging.BlobFields(bv.blob)).Debug("Sidecar index >= MAX_BLOBS_PER_BLOCK")
|
||||
return ErrBlobIndexInvalid
|
||||
return blobErrBuilder(ErrBlobIndexInvalid)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -168,7 +150,7 @@ func (bv *ROBlobVerifier) NotFromFutureSlot() (err error) {
|
||||
// If the system time is still before earliestStart, we consider the blob from a future slot and return an error.
|
||||
if bv.clock.Now().Before(earliestStart) {
|
||||
log.WithFields(logging.BlobFields(bv.blob)).Debug("sidecar slot is too far in the future")
|
||||
return ErrFromFutureSlot
|
||||
return blobErrBuilder(ErrFromFutureSlot)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -181,11 +163,11 @@ func (bv *ROBlobVerifier) SlotAboveFinalized() (err error) {
|
||||
fcp := bv.fc.FinalizedCheckpoint()
|
||||
fSlot, err := slots.EpochStart(fcp.Epoch)
|
||||
if err != nil {
|
||||
return errors.Wrapf(ErrSlotNotAfterFinalized, "error computing epoch start slot for finalized checkpoint (%d) %s", fcp.Epoch, err.Error())
|
||||
return errors.Wrapf(blobErrBuilder(ErrSlotNotAfterFinalized), "error computing epoch start slot for finalized checkpoint (%d) %s", fcp.Epoch, err.Error())
|
||||
}
|
||||
if bv.blob.Slot() <= fSlot {
|
||||
log.WithFields(logging.BlobFields(bv.blob)).Debug("sidecar slot is not after finalized checkpoint")
|
||||
return ErrSlotNotAfterFinalized
|
||||
return blobErrBuilder(ErrSlotNotAfterFinalized)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -203,7 +185,7 @@ func (bv *ROBlobVerifier) ValidProposerSignature(ctx context.Context) (err error
|
||||
if err != nil {
|
||||
log.WithFields(logging.BlobFields(bv.blob)).WithError(err).Debug("reusing failed proposer signature validation from cache")
|
||||
blobVerificationProposerSignatureCache.WithLabelValues("hit-invalid").Inc()
|
||||
return ErrInvalidProposerSignature
|
||||
return blobErrBuilder(ErrInvalidProposerSignature)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -213,12 +195,12 @@ func (bv *ROBlobVerifier) ValidProposerSignature(ctx context.Context) (err error
|
||||
parent, err := bv.parentState(ctx)
|
||||
if err != nil {
|
||||
log.WithFields(logging.BlobFields(bv.blob)).WithError(err).Debug("could not replay parent state for blob signature verification")
|
||||
return ErrInvalidProposerSignature
|
||||
return blobErrBuilder(ErrInvalidProposerSignature)
|
||||
}
|
||||
// Full verification, which will subsequently be cached for anything sharing the signature cache.
|
||||
if err = bv.sc.VerifySignature(sd, parent); err != nil {
|
||||
log.WithFields(logging.BlobFields(bv.blob)).WithError(err).Debug("signature verification failed")
|
||||
return ErrInvalidProposerSignature
|
||||
return blobErrBuilder(ErrInvalidProposerSignature)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -235,7 +217,7 @@ func (bv *ROBlobVerifier) SidecarParentSeen(parentSeen func([32]byte) bool) (err
|
||||
return nil
|
||||
}
|
||||
log.WithFields(logging.BlobFields(bv.blob)).Debug("parent root has not been seen")
|
||||
return ErrSidecarParentNotSeen
|
||||
return blobErrBuilder(ErrSidecarParentNotSeen)
|
||||
}
|
||||
|
||||
// SidecarParentValid represents the spec verification:
|
||||
@@ -244,7 +226,7 @@ func (bv *ROBlobVerifier) SidecarParentValid(badParent func([32]byte) bool) (err
|
||||
defer bv.recordResult(RequireSidecarParentValid, &err)
|
||||
if badParent != nil && badParent(bv.blob.ParentRoot()) {
|
||||
log.WithFields(logging.BlobFields(bv.blob)).Debug("parent root is invalid")
|
||||
return ErrSidecarParentInvalid
|
||||
return blobErrBuilder(ErrSidecarParentInvalid)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -255,10 +237,10 @@ func (bv *ROBlobVerifier) SidecarParentSlotLower() (err error) {
|
||||
defer bv.recordResult(RequireSidecarParentSlotLower, &err)
|
||||
parentSlot, err := bv.fc.Slot(bv.blob.ParentRoot())
|
||||
if err != nil {
|
||||
return errors.Wrap(ErrSlotNotAfterParent, "parent root not in forkchoice")
|
||||
return errors.Wrap(blobErrBuilder(ErrSlotNotAfterParent), "parent root not in forkchoice")
|
||||
}
|
||||
if parentSlot >= bv.blob.Slot() {
|
||||
return ErrSlotNotAfterParent
|
||||
return blobErrBuilder(ErrSlotNotAfterParent)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -270,7 +252,7 @@ func (bv *ROBlobVerifier) SidecarDescendsFromFinalized() (err error) {
|
||||
defer bv.recordResult(RequireSidecarDescendsFromFinalized, &err)
|
||||
if !bv.fc.HasNode(bv.blob.ParentRoot()) {
|
||||
log.WithFields(logging.BlobFields(bv.blob)).Debug("parent root not in forkchoice")
|
||||
return ErrSidecarNotFinalizedDescendent
|
||||
return blobErrBuilder(ErrSidecarNotFinalizedDescendent)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -281,7 +263,7 @@ func (bv *ROBlobVerifier) SidecarInclusionProven() (err error) {
|
||||
defer bv.recordResult(RequireSidecarInclusionProven, &err)
|
||||
if err = blocks.VerifyKZGInclusionProof(bv.blob); err != nil {
|
||||
log.WithError(err).WithFields(logging.BlobFields(bv.blob)).Debug("sidecar inclusion proof verification failed")
|
||||
return ErrSidecarInclusionProofInvalid
|
||||
return blobErrBuilder(ErrSidecarInclusionProofInvalid)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -293,7 +275,7 @@ func (bv *ROBlobVerifier) SidecarKzgProofVerified() (err error) {
|
||||
defer bv.recordResult(RequireSidecarKzgProofVerified, &err)
|
||||
if err = bv.verifyBlobCommitment(bv.blob); err != nil {
|
||||
log.WithError(err).WithFields(logging.BlobFields(bv.blob)).Debug("kzg commitment proof verification failed")
|
||||
return ErrSidecarKzgProofInvalid
|
||||
return blobErrBuilder(ErrSidecarKzgProofInvalid)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -311,7 +293,7 @@ func (bv *ROBlobVerifier) SidecarProposerExpected(ctx context.Context) (err erro
|
||||
}
|
||||
r, err := bv.fc.TargetRootForEpoch(bv.blob.ParentRoot(), e)
|
||||
if err != nil {
|
||||
return ErrSidecarUnexpectedProposer
|
||||
return blobErrBuilder(ErrSidecarUnexpectedProposer)
|
||||
}
|
||||
c := &forkchoicetypes.Checkpoint{Root: r, Epoch: e}
|
||||
idx, cached := bv.pc.Proposer(c, bv.blob.Slot())
|
||||
@@ -319,19 +301,19 @@ func (bv *ROBlobVerifier) SidecarProposerExpected(ctx context.Context) (err erro
|
||||
pst, err := bv.parentState(ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).WithFields(logging.BlobFields(bv.blob)).Debug("state replay to parent_root failed")
|
||||
return ErrSidecarUnexpectedProposer
|
||||
return blobErrBuilder(ErrSidecarUnexpectedProposer)
|
||||
}
|
||||
idx, err = bv.pc.ComputeProposer(ctx, bv.blob.ParentRoot(), bv.blob.Slot(), pst)
|
||||
if err != nil {
|
||||
log.WithError(err).WithFields(logging.BlobFields(bv.blob)).Debug("error computing proposer index from parent state")
|
||||
return ErrSidecarUnexpectedProposer
|
||||
return blobErrBuilder(ErrSidecarUnexpectedProposer)
|
||||
}
|
||||
}
|
||||
if idx != bv.blob.ProposerIndex() {
|
||||
log.WithError(ErrSidecarUnexpectedProposer).
|
||||
log.WithError(blobErrBuilder(ErrSidecarUnexpectedProposer)).
|
||||
WithFields(logging.BlobFields(bv.blob)).WithField("expectedProposer", idx).
|
||||
Debug("unexpected blob proposer")
|
||||
return ErrSidecarUnexpectedProposer
|
||||
return blobErrBuilder(ErrSidecarUnexpectedProposer)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -357,3 +339,7 @@ func blobToSignatureData(b blocks.ROBlob) SignatureData {
|
||||
Slot: b.Slot(),
|
||||
}
|
||||
}
|
||||
|
||||
func blobErrBuilder(baseErr error) error {
|
||||
return goError.Join(ErrBlobInvalid, baseErr)
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user