Compare commits

..

4 Commits

Author SHA1 Message Date
rkapka
788c6914f7 remove Proto() function 2024-11-01 16:46:06 +07:00
rkapka
201297bf46 move interface back 2024-11-01 15:44:05 +07:00
rkapka
4ee7066bbe protos 2024-11-01 15:03:31 +07:00
rkapka
1a894bb98d Use read only validator more 2024-11-01 14:38:41 +07:00
343 changed files with 3468 additions and 10388 deletions

View File

@@ -54,7 +54,7 @@ jobs:
- name: Golangci-lint
uses: golangci/golangci-lint-action@v5
with:
version: v1.56.1
version: v1.55.2
args: --config=.golangci.yml --out-${NO_FUTURE}format colored-line-number
build:

View File

@@ -73,7 +73,6 @@ linters:
- promlinter
- protogetter
- revive
- spancheck
- staticcheck
- stylecheck
- tagalign

View File

@@ -8,29 +8,17 @@ The format is based on Keep a Changelog, and this project adheres to Semantic Ve
### Added
- Electra EIP6110: Queue deposit [pr](https://github.com/prysmaticlabs/prysm/pull/14430).
- Electra EIP6110: Queue deposit [pr](https://github.com/prysmaticlabs/prysm/pull/14430)
- Add Bellatrix tests for light client functions.
- Add Discovery Rebooter Feature.
- Added GetBlockAttestationsV2 endpoint.
- Light client support: Consensus types for Electra.
- Light client support: Consensus types for Electra
- Added SubmitPoolAttesterSlashingV2 endpoint.
- Added SubmitAggregateAndProofsRequestV2 endpoint.
- Updated the `beacon-chain/monitor` package to Electra. [PR](https://github.com/prysmaticlabs/prysm/pull/14562)
- Added ListAttestationsV2 endpoint.
- Add ability to rollback node's internal state during processing.
- Change how unsafe protobuf state is created to prevent unnecessary copies.
- Added benchmarks for process slots for Capella, Deneb, Electra.
- Add helper to cast bytes to string without allocating memory.
- Added GetAggregatedAttestationV2 endpoint.
- Added SubmitAttestationsV2 endpoint.
- Validator REST mode Electra block support.
- Added validator index label to `validator_statuses` metric.
- Added Validator REST mode use of Attestation V2 endpoints and Electra attestations.
- PeerDAS: Added proto for `DataColumnIdentifier`, `DataColumnSidecar`, `DataColumnSidecarsByRangeRequest` and `MetadataV2`.
- Better attestation packing for Electra. [PR](https://github.com/prysmaticlabs/prysm/pull/14534)
- P2P: Add logs when a peer is (dis)connected. Add the reason of the disconnection when we initiate it.
- Added a Prometheus error counter metric for HTTP requests to track beacon node requests.
- Added a Prometheus error counter metric for SSE requests.
- Simplified `EjectedValidatorIndices`.
### Changed
@@ -45,31 +33,9 @@ The format is based on Keep a Changelog, and this project adheres to Semantic Ve
- Use read only validator for core processing to avoid unnecessary copying.
- Use ROBlock across block processing pipeline.
- Added missing Eth-Consensus-Version headers to GetBlockAttestationsV2 and GetAttesterSlashingsV2 endpoints.
- When instantiating new validators, explicit set `Slashed` to false and move `EffectiveBalance` to match struct definition.
- Updated pgo profile for beacon chain with holesky data. This improves the profile guided
optimizations in the go compiler.
- Use read only state when computing the active validator list.
- Simplified `ExitedValidatorIndices`.
- Simplified `EjectedValidatorIndices`.
- `engine_newPayloadV4`,`engine_getPayloadV4` are changes due to new execution request serialization decisions, [PR](https://github.com/prysmaticlabs/prysm/pull/14580)
- Fixed various small things in state-native code.
- Use ROBlock earlier in block syncing pipeline.
- Changed the signature of `ProcessPayload`.
- Only Build the Protobuf state once during serialization.
- Capella blocks are execution.
- Fixed panic when http request to subscribe to event stream fails.
- Return early for blob reconstructor during capella fork.
- Updated block endpoint from V1 to V2.
- Rename instances of "deposit receipts" to "deposit requests".
- Non-blocking payload attribute event handling in beacon api [pr](https://github.com/prysmaticlabs/prysm/pull/14644).
- Updated light client protobufs. [PR](https://github.com/prysmaticlabs/prysm/pull/14650)
- Added `Eth-Consensus-Version` header to `ListAttestationsV2` and `GetAggregateAttestationV2` endpoints.
- Updated light client consensus types. [PR](https://github.com/prysmaticlabs/prysm/pull/14652)
- Fixed pending deposits processing on Electra.
- Modified `ListAttestationsV2`, `GetAttesterSlashingsV2` and `GetAggregateAttestationV2` endpoints to use slot to determine fork version.
- Improvements to HTTP response handling. [pr](https://github.com/prysmaticlabs/prysm/pull/14673)
- Updated `Blobs` endpoint to return additional metadata fields.
- Refactor static and dynamic subnets subscription.
### Deprecated
@@ -78,9 +44,6 @@ The format is based on Keep a Changelog, and this project adheres to Semantic Ve
### Removed
- Removed finalized validator index cache, no longer needed.
- Removed validator queue position log on key reload and wait for activation.
- Removed outdated spectest exclusions for EIP-6110.
- Removed kzg proof check from blob reconstructor.
### Fixed
@@ -94,15 +57,6 @@ The format is based on Keep a Changelog, and this project adheres to Semantic Ve
- Fix keymanager API should return corrected error format for malformed tokens
- Fix keymanager API so that get keys returns an empty response instead of a 500 error when using an unsupported keystore.
- Small log imporvement, removing some redundant or duplicate logs
- EIP7521 - Fixes withdrawal bug by accounting for pending partial withdrawals and deducting already withdrawn amounts from the sweep balance. [PR](https://github.com/prysmaticlabs/prysm/pull/14578)
- unskip electra merkle spec test
- Fix panic in validator REST mode when checking status after removing all keys
- Fix panic on attestation interface since we call data before validation
- corrects nil check on some interface attestation types
- temporary solution to handling electra attesation and attester_slashing events. [pr](14655)
- Diverse log improvements and comment additions.
- P2P: Avoid infinite loop when looking for peers in small networks.
### Security
@@ -216,7 +170,6 @@ Updating to this release is recommended at your convenience.
- Light client support: fix light client attested header execution fields' wrong version bug.
- Testing: added custom matcher for better push settings testing.
- Registered `GetDepositSnapshot` Beacon API endpoint.
- Fix rolling back of a block due to a context deadline.
### Security

View File

@@ -12,7 +12,6 @@ go_library(
visibility = ["//visibility:public"],
deps = [
"//api:go_default_library",
"//api/client:go_default_library",
"//api/server/structs:go_default_library",
"//config/fieldparams:go_default_library",
"//consensus-types:go_default_library",

View File

@@ -14,7 +14,6 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/api"
"github.com/prysmaticlabs/prysm/v5/api/client"
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
@@ -177,7 +176,7 @@ func (c *Client) do(ctx context.Context, method string, path string, body io.Rea
err = non200Err(r)
return
}
res, err = io.ReadAll(io.LimitReader(r.Body, client.MaxBodySize))
res, err = io.ReadAll(r.Body)
if err != nil {
err = errors.Wrap(err, "error reading http response body from builder server")
return
@@ -359,7 +358,7 @@ func (c *Client) Status(ctx context.Context) error {
}
func non200Err(response *http.Response) error {
bodyBytes, err := io.ReadAll(io.LimitReader(response.Body, client.MaxErrBodySize))
bodyBytes, err := io.ReadAll(response.Body)
var errMessage ErrorMessage
var body string
if err != nil {

View File

@@ -10,17 +10,11 @@ import (
"github.com/pkg/errors"
)
const (
MaxBodySize int64 = 1 << 23 // 8MB default, WithMaxBodySize can override
MaxErrBodySize int64 = 1 << 17 // 128KB
)
// Client is a wrapper object around the HTTP client.
type Client struct {
hc *http.Client
baseURL *url.URL
token string
maxBodySize int64
hc *http.Client
baseURL *url.URL
token string
}
// NewClient constructs a new client with the provided options (ex WithTimeout).
@@ -32,9 +26,8 @@ func NewClient(host string, opts ...ClientOpt) (*Client, error) {
return nil, err
}
c := &Client{
hc: &http.Client{},
baseURL: u,
maxBodySize: MaxBodySize,
hc: &http.Client{},
baseURL: u,
}
for _, o := range opts {
o(c)
@@ -79,7 +72,7 @@ func (c *Client) NodeURL() string {
// Get is a generic, opinionated GET function to reduce boilerplate amongst the getters in this package.
func (c *Client) Get(ctx context.Context, path string, opts ...ReqOption) ([]byte, error) {
u := c.baseURL.ResolveReference(&url.URL{Path: path})
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), http.NoBody)
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil)
if err != nil {
return nil, err
}
@@ -96,7 +89,7 @@ func (c *Client) Get(ctx context.Context, path string, opts ...ReqOption) ([]byt
if r.StatusCode != http.StatusOK {
return nil, Non200Err(r)
}
b, err := io.ReadAll(io.LimitReader(r.Body, c.maxBodySize))
b, err := io.ReadAll(r.Body)
if err != nil {
return nil, errors.Wrap(err, "error reading http response body")
}

View File

@@ -25,16 +25,16 @@ var ErrInvalidNodeVersion = errors.New("invalid node version response")
var ErrConnectionIssue = errors.New("could not connect")
// Non200Err is a function that parses an HTTP response to handle responses that are not 200 with a formatted error.
func Non200Err(r *http.Response) error {
b, err := io.ReadAll(io.LimitReader(r.Body, MaxErrBodySize))
func Non200Err(response *http.Response) error {
bodyBytes, err := io.ReadAll(response.Body)
var body string
if err != nil {
body = "(Unable to read response body.)"
} else {
body = "response body:\n" + string(b)
body = "response body:\n" + string(bodyBytes)
}
msg := fmt.Sprintf("code=%d, url=%s, body=%s", r.StatusCode, r.Request.URL, body)
switch r.StatusCode {
msg := fmt.Sprintf("code=%d, url=%s, body=%s", response.StatusCode, response.Request.URL, body)
switch response.StatusCode {
case http.StatusNotFound:
return errors.Wrap(ErrNotFound, msg)
default:

View File

@@ -93,7 +93,6 @@ func (h *EventStream) Subscribe(eventsChannel chan<- *Event) {
EventType: EventConnectionError,
Data: []byte(errors.Wrap(err, client.ErrConnectionIssue.Error()).Error()),
}
return
}
defer func() {

View File

@@ -40,7 +40,7 @@ func TestNewEventStream(t *testing.T) {
func TestEventStream(t *testing.T) {
mux := http.NewServeMux()
mux.HandleFunc("/eth/v1/events", func(w http.ResponseWriter, _ *http.Request) {
mux.HandleFunc("/eth/v1/events", func(w http.ResponseWriter, r *http.Request) {
flusher, ok := w.(http.Flusher)
require.Equal(t, true, ok)
for i := 1; i <= 3; i++ {
@@ -79,23 +79,3 @@ func TestEventStream(t *testing.T) {
}
}
}
func TestEventStreamRequestError(t *testing.T) {
topics := []string{"head"}
eventsChannel := make(chan *Event, 1)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// use valid url that will result in failed request with nil body
stream, err := NewEventStream(ctx, http.DefaultClient, "http://badhost:1234", topics)
require.NoError(t, err)
// error will happen when request is made, should be received over events channel
go stream.Subscribe(eventsChannel)
event := <-eventsChannel
if event.EventType != EventConnectionError {
t.Errorf("Expected event type %q, got %q", EventConnectionError, event.EventType)
}
}

View File

@@ -46,10 +46,3 @@ func WithAuthenticationToken(token string) ClientOpt {
c.token = token
}
}
// WithMaxBodySize overrides the default max body size of 8MB.
func WithMaxBodySize(size int64) ClientOpt {
return func(c *Client) {
c.maxBodySize = size
}
}

View File

@@ -9,6 +9,7 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/api/server"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/consensus-types/validator"
"github.com/prysmaticlabs/prysm/v5/container/slice"
@@ -34,6 +35,20 @@ func ValidatorFromConsensus(v *eth.Validator) *Validator {
}
}
func ValidatorFromConsensusReadOnly(v interfaces.ReadOnlyValidator) *Validator {
pk := v.PublicKey()
return &Validator{
Pubkey: hexutil.Encode(pk[:]),
WithdrawalCredentials: hexutil.Encode(v.GetWithdrawalCredentials()),
EffectiveBalance: fmt.Sprintf("%d", v.EffectiveBalance()),
Slashed: v.Slashed(),
ActivationEligibilityEpoch: fmt.Sprintf("%d", v.ActivationEligibilityEpoch()),
ActivationEpoch: fmt.Sprintf("%d", v.ActivationEpoch()),
ExitEpoch: fmt.Sprintf("%d", v.ExitEpoch()),
WithdrawableEpoch: fmt.Sprintf("%d", v.WithdrawableEpoch()),
}
}
func PendingAttestationFromConsensus(a *eth.PendingAttestation) *PendingAttestation {
return &PendingAttestation{
AggregationBits: hexutil.Encode(a.AggregationBits),

View File

@@ -35,10 +35,10 @@ func BeaconStateFromConsensus(st beaconState.BeaconState) (*BeaconState, error)
for i, e := range srcVotes {
votes[i] = Eth1DataFromConsensus(e)
}
srcVals := st.Validators()
srcVals := st.ValidatorsReadOnly()
vals := make([]*Validator, len(srcVals))
for i, v := range srcVals {
vals[i] = ValidatorFromConsensus(v)
vals[i] = ValidatorFromConsensusReadOnly(v)
}
srcBals := st.Balances()
bals := make([]string, len(srcBals))
@@ -121,10 +121,10 @@ func BeaconStateAltairFromConsensus(st beaconState.BeaconState) (*BeaconStateAlt
for i, e := range srcVotes {
votes[i] = Eth1DataFromConsensus(e)
}
srcVals := st.Validators()
srcVals := st.ValidatorsReadOnly()
vals := make([]*Validator, len(srcVals))
for i, v := range srcVals {
vals[i] = ValidatorFromConsensus(v)
vals[i] = ValidatorFromConsensusReadOnly(v)
}
srcBals := st.Balances()
bals := make([]string, len(srcBals))
@@ -226,10 +226,10 @@ func BeaconStateBellatrixFromConsensus(st beaconState.BeaconState) (*BeaconState
for i, e := range srcVotes {
votes[i] = Eth1DataFromConsensus(e)
}
srcVals := st.Validators()
srcVals := st.ValidatorsReadOnly()
vals := make([]*Validator, len(srcVals))
for i, v := range srcVals {
vals[i] = ValidatorFromConsensus(v)
vals[i] = ValidatorFromConsensusReadOnly(v)
}
srcBals := st.Balances()
bals := make([]string, len(srcBals))
@@ -344,10 +344,10 @@ func BeaconStateCapellaFromConsensus(st beaconState.BeaconState) (*BeaconStateCa
for i, e := range srcVotes {
votes[i] = Eth1DataFromConsensus(e)
}
srcVals := st.Validators()
srcVals := st.ValidatorsReadOnly()
vals := make([]*Validator, len(srcVals))
for i, v := range srcVals {
vals[i] = ValidatorFromConsensus(v)
vals[i] = ValidatorFromConsensusReadOnly(v)
}
srcBals := st.Balances()
bals := make([]string, len(srcBals))
@@ -481,10 +481,10 @@ func BeaconStateDenebFromConsensus(st beaconState.BeaconState) (*BeaconStateDene
for i, e := range srcVotes {
votes[i] = Eth1DataFromConsensus(e)
}
srcVals := st.Validators()
srcVals := st.ValidatorsReadOnly()
vals := make([]*Validator, len(srcVals))
for i, v := range srcVals {
vals[i] = ValidatorFromConsensus(v)
vals[i] = ValidatorFromConsensusReadOnly(v)
}
srcBals := st.Balances()
bals := make([]string, len(srcBals))
@@ -618,10 +618,10 @@ func BeaconStateElectraFromConsensus(st beaconState.BeaconState) (*BeaconStateEl
for i, e := range srcVotes {
votes[i] = Eth1DataFromConsensus(e)
}
srcVals := st.Validators()
srcVals := st.ValidatorsReadOnly()
vals := make([]*Validator, len(srcVals))
for i, v := range srcVals {
vals[i] = ValidatorFromConsensus(v)
vals[i] = ValidatorFromConsensusReadOnly(v)
}
srcBals := st.Balances()
bals := make([]string, len(srcBals))

View File

@@ -26,7 +26,7 @@ type ListAttestationsResponse struct {
}
type SubmitAttestationsRequest struct {
Data json.RawMessage `json:"data"`
Data []*Attestation `json:"data"`
}
type ListVoluntaryExitsResponse struct {

View File

@@ -1,10 +1,7 @@
package structs
type SidecarsResponse struct {
Version string `json:"version"`
Data []*Sidecar `json:"data"`
ExecutionOptimistic bool `json:"execution_optimistic"`
Finalized bool `json:"finalized"`
Data []*Sidecar `json:"data"`
}
type Sidecar struct {

View File

@@ -7,8 +7,7 @@ import (
)
type AggregateAttestationResponse struct {
Version string `json:"version,omitempty"`
Data json.RawMessage `json:"data"`
Data *Attestation `json:"data"`
}
type SubmitContributionAndProofsRequest struct {

View File

@@ -6,11 +6,8 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/async/event"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
@@ -72,7 +69,6 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*
if arg.attributes == nil {
arg.attributes = payloadattribute.EmptyWithVersion(headBlk.Version())
}
go firePayloadAttributesEvent(ctx, s.cfg.StateNotifier.StateFeed(), arg)
payloadID, lastValidHash, err := s.cfg.ExecutionEngineCaller.ForkchoiceUpdated(ctx, fcs, arg.attributes)
if err != nil {
switch {
@@ -171,38 +167,6 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*
return payloadID, nil
}
func firePayloadAttributesEvent(ctx context.Context, f event.SubscriberSender, cfg *fcuConfig) {
pidx, err := helpers.BeaconProposerIndex(ctx, cfg.headState)
if err != nil {
log.WithError(err).
WithField("head_root", cfg.headRoot[:]).
Error("Could not get proposer index for PayloadAttributes event")
return
}
evd := payloadattribute.EventData{
ProposerIndex: pidx,
ProposalSlot: cfg.headState.Slot(),
ParentBlockRoot: cfg.headRoot[:],
Attributer: cfg.attributes,
HeadRoot: cfg.headRoot,
HeadState: cfg.headState,
HeadBlock: cfg.headBlock,
}
if cfg.headBlock != nil && !cfg.headBlock.IsNil() {
headPayload, err := cfg.headBlock.Block().Body().Execution()
if err != nil {
log.WithError(err).Error("Could not get execution payload for head block")
return
}
evd.ParentBlockHash = headPayload.BlockHash()
evd.ParentBlockNumber = headPayload.BlockNumber()
}
f.Send(&feed.Event{
Type: statefeed.PayloadAttributes,
Data: evd,
})
}
// getPayloadHash returns the payload hash given the block root.
// if the block is before bellatrix fork epoch, it returns the zero hash.
func (s *Service) getPayloadHash(ctx context.Context, root []byte) ([32]byte, error) {

View File

@@ -301,7 +301,7 @@ func (s *Service) headGenesisValidatorsRoot() [32]byte {
// This returns the validator referenced by the provided index in
// the head state.
// This is a lock free version.
func (s *Service) headValidatorAtIndex(index primitives.ValidatorIndex) (state.ReadOnlyValidator, error) {
func (s *Service) headValidatorAtIndex(index primitives.ValidatorIndex) (interfaces.ReadOnlyValidator, error) {
return s.head.state.ValidatorAtIndexReadOnly(index)
}

View File

@@ -92,12 +92,12 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
{
name: "process nil attestation",
a: nil,
wantedErr: "attestation is nil",
wantedErr: "attestation can't be nil",
},
{
name: "process nil field (a.Data) in attestation",
a: &ethpb.Attestation{},
wantedErr: "attestation is nil",
wantedErr: "attestation's data can't be nil",
},
{
name: "process nil field (a.Target) in attestation",

View File

@@ -7,6 +7,8 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
coreTime "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
@@ -74,8 +76,6 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
err := s.cfg.ForkChoiceStore.InsertNode(ctx, cfg.postState, cfg.roblock)
if err != nil {
// Do not use parent context in the event it deadlined
ctx = trace.NewContext(context.Background(), span)
s.rollbackBlock(ctx, cfg.roblock.Root())
return errors.Wrapf(err, "could not insert block %d to fork choice store", cfg.roblock.Block().Slot())
}
@@ -618,6 +618,9 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
if !s.inRegularSync() {
return
}
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.MissedSlot,
})
s.headLock.RLock()
headRoot := s.headRoot()
headState := s.headState(ctx)
@@ -645,13 +648,6 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
attribute := s.getPayloadAttribute(ctx, headState, s.CurrentSlot()+1, headRoot[:])
// return early if we are not proposing next slot
if attribute.IsEmpty() {
fcuArgs := &fcuConfig{
headState: headState,
headRoot: headRoot,
headBlock: nil,
attributes: attribute,
}
go firePayloadAttributesEvent(ctx, s.cfg.StateNotifier.StateFeed(), fcuArgs)
return
}

View File

@@ -1520,9 +1520,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
require.NoError(t, err)
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
require.NoError(t, err)
rowsb, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
_, err = service.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, rowsb)
_, err = service.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, wsb, root)
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
// Check that forkchoice's head and store's headroot are the previous head (since the invalid block did
// not finish importing and it was never imported to forkchoice). Check
@@ -1716,9 +1714,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
require.NoError(t, err)
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
require.NoError(t, err)
rowsb, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
_, err = service.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, rowsb)
_, err = service.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, wsb, root)
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
// Check that forkchoice's head and store's headroot are the previous head (since the invalid block did
@@ -1968,9 +1964,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
require.NoError(t, err)
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
require.NoError(t, err)
rowsb, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
_, err = service.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, rowsb)
_, err = service.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, wsb, root)
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
// Check that the headroot/state are not in DB and restart the node
@@ -2352,85 +2346,6 @@ func TestRollbackBlock(t *testing.T) {
require.Equal(t, false, hasState)
}
func TestRollbackBlock_ContextDeadline(t *testing.T) {
service, tr := minimalTestService(t)
ctx := tr.ctx
st, keys := util.DeterministicGenesisState(t, 64)
stateRoot, err := st.HashTreeRoot(ctx)
require.NoError(t, err, "Could not hash genesis state")
require.NoError(t, service.saveGenesisData(ctx, st))
genesis := blocks.NewGenesisBlock(stateRoot[:])
wsb, err := consensusblocks.NewSignedBeaconBlock(genesis)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb), "Could not save genesis block")
parentRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err, "Could not get signing root")
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, parentRoot), "Could not save genesis state")
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
require.NoError(t, service.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, &ethpb.Checkpoint{Root: parentRoot[:]}))
require.NoError(t, service.cfg.BeaconDB.SaveFinalizedCheckpoint(ctx, &ethpb.Checkpoint{Root: parentRoot[:]}))
st, err = service.HeadState(ctx)
require.NoError(t, err)
b, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 33)
require.NoError(t, err)
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
b, err = util.GenerateFullBlock(postState, keys, util.DefaultBlockGenConfig(), 34)
require.NoError(t, err)
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
root, err = b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err = service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err = service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
require.Equal(t, true, service.cfg.BeaconDB.HasBlock(ctx, root))
hasState, err := service.cfg.StateGen.HasState(ctx, root)
require.NoError(t, err)
require.Equal(t, true, hasState)
// Set deadlined context when processing the block
cancCtx, canc := context.WithCancel(context.Background())
canc()
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
parentRoot = roblock.Block().ParentRoot()
cj := &ethpb.Checkpoint{}
cj.Epoch = 1
cj.Root = parentRoot[:]
require.NoError(t, postState.SetCurrentJustifiedCheckpoint(cj))
require.NoError(t, postState.SetFinalizedCheckpoint(cj))
// Rollback block insertion into db and caches.
require.ErrorContains(t, "context canceled", service.postBlockProcess(&postBlockProcessConfig{cancCtx, roblock, [32]byte{}, postState, false}))
// The block should no longer exist.
require.Equal(t, false, service.cfg.BeaconDB.HasBlock(ctx, root))
hasState, err = service.cfg.StateGen.HasState(ctx, root)
require.NoError(t, err)
require.Equal(t, false, hasState)
}
func fakeCommitments(n int) [][]byte {
f := make([][]byte, n)
for i := range f {

View File

@@ -18,7 +18,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/config/features"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
consensus_blocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
consensusblocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
@@ -85,12 +84,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
}
currentCheckpoints := s.saveCurrentCheckpoints(preState)
roblock, err := consensus_blocks.NewROBlockWithRoot(blockCopy, blockRoot)
if err != nil {
return err
}
postState, isValidPayload, err := s.validateExecutionAndConsensus(ctx, preState, roblock)
postState, isValidPayload, err := s.validateExecutionAndConsensus(ctx, preState, blockCopy, blockRoot)
if err != nil {
return err
}
@@ -107,6 +101,10 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
if err := s.savePostStateInfo(ctx, blockRoot, blockCopy, postState); err != nil {
return errors.Wrap(err, "could not save post state info")
}
roblock, err := consensus_blocks.NewROBlockWithRoot(blockCopy, blockRoot)
if err != nil {
return err
}
args := &postBlockProcessConfig{
ctx: ctx,
roblock: roblock,
@@ -190,7 +188,8 @@ func (s *Service) updateCheckpoints(
func (s *Service) validateExecutionAndConsensus(
ctx context.Context,
preState state.BeaconState,
block consensusblocks.ROBlock,
block interfaces.SignedBeaconBlock,
blockRoot [32]byte,
) (state.BeaconState, bool, error) {
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
if err != nil {
@@ -209,7 +208,7 @@ func (s *Service) validateExecutionAndConsensus(
var isValidPayload bool
eg.Go(func() error {
var err error
isValidPayload, err = s.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, block)
isValidPayload, err = s.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, block, blockRoot)
if err != nil {
return errors.Wrap(err, "could not notify the engine of the new payload")
}
@@ -560,16 +559,16 @@ func (s *Service) sendBlockAttestationsToSlasher(signed interfaces.ReadOnlySigne
}
// validateExecutionOnBlock notifies the engine of the incoming block execution payload and returns true if the payload is valid
func (s *Service) validateExecutionOnBlock(ctx context.Context, ver int, header interfaces.ExecutionData, block consensusblocks.ROBlock) (bool, error) {
isValidPayload, err := s.notifyNewPayload(ctx, ver, header, block)
func (s *Service) validateExecutionOnBlock(ctx context.Context, ver int, header interfaces.ExecutionData, signed interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) (bool, error) {
isValidPayload, err := s.notifyNewPayload(ctx, ver, header, signed)
if err != nil {
s.cfg.ForkChoiceStore.Lock()
err = s.handleInvalidExecutionError(ctx, err, block.Root(), block.Block().ParentRoot())
err = s.handleInvalidExecutionError(ctx, err, blockRoot, signed.Block().ParentRoot())
s.cfg.ForkChoiceStore.Unlock()
return false, err
}
if block.Block().Version() < version.Capella && isValidPayload {
if err := s.validateMergeTransitionBlock(ctx, ver, header, block); err != nil {
if signed.Version() < version.Capella && isValidPayload {
if err := s.validateMergeTransitionBlock(ctx, ver, header, signed); err != nil {
return isValidPayload, err
}
}

View File

@@ -187,12 +187,11 @@ func AddValidatorToRegistry(beaconState state.BeaconState, pubKey []byte, withdr
// return Validator(
// pubkey=pubkey,
// withdrawal_credentials=withdrawal_credentials,
// effective_balance=effective_balance,
// slashed=False,
// activation_eligibility_epoch=FAR_FUTURE_EPOCH,
// activation_epoch=FAR_FUTURE_EPOCH,
// exit_epoch=FAR_FUTURE_EPOCH,
// withdrawable_epoch=FAR_FUTURE_EPOCH,
// effective_balance=effective_balance,
// )
func GetValidatorFromDeposit(pubKey []byte, withdrawalCredentials []byte, amount uint64) *ethpb.Validator {
effectiveBalance := amount - (amount % params.BeaconConfig().EffectiveBalanceIncrement)
@@ -203,11 +202,10 @@ func GetValidatorFromDeposit(pubKey []byte, withdrawalCredentials []byte, amount
return &ethpb.Validator{
PublicKey: pubKey,
WithdrawalCredentials: withdrawalCredentials,
EffectiveBalance: effectiveBalance,
Slashed: false,
ActivationEligibilityEpoch: params.BeaconConfig().FarFutureEpoch,
ActivationEpoch: params.BeaconConfig().FarFutureEpoch,
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
EffectiveBalance: effectiveBalance,
}
}

View File

@@ -9,6 +9,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/math"
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
)
@@ -41,7 +42,7 @@ func InitializePrecomputeValidators(ctx context.Context, beaconState state.Beaco
if beaconState.NumValidators() != len(inactivityScores) {
return nil, nil, errors.New("num of validators is different than num of inactivity scores")
}
if err := beaconState.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
if err := beaconState.ReadFromEveryValidator(func(idx int, val interfaces.ReadOnlyValidator) error {
// Set validator's balance, inactivity score and slashed/withdrawable status.
v := &precompute.Validator{
CurrentEpochEffectiveBalance: val.EffectiveBalance(),

View File

@@ -92,6 +92,7 @@ go_test(
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//consensus-types/validator:go_default_library",
"//container/trie:go_default_library",
"//crypto/bls:go_default_library",
"//crypto/bls/common:go_default_library",

View File

@@ -448,7 +448,6 @@ func TestValidateIndexedAttestation_AboveMaxLength(t *testing.T) {
Target: &ethpb.Checkpoint{
Epoch: primitives.Epoch(i),
},
Source: &ethpb.Checkpoint{},
}
}
@@ -490,7 +489,6 @@ func TestValidateIndexedAttestation_BadAttestationsSignatureSet(t *testing.T) {
Target: &ethpb.Checkpoint{
Root: []byte{},
},
Source: &ethpb.Checkpoint{},
},
Signature: sig.Marshal(),
AggregationBits: list,

View File

@@ -7,6 +7,7 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/container/slice"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
@@ -68,7 +69,7 @@ func ProcessAttesterSlashing(
currentEpoch := slots.ToEpoch(beaconState.Slot())
var err error
var slashedAny bool
var val state.ReadOnlyValidator
var val interfaces.ReadOnlyValidator
for _, validatorIndex := range slashableIndices {
val, err = beaconState.ValidatorAtIndexReadOnly(primitives.ValidatorIndex(validatorIndex))
if err != nil {

View File

@@ -11,6 +11,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/consensus-types/validator"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/require"
)
@@ -365,7 +366,7 @@ func TestFuzzVerifyExit_10000(t *testing.T) {
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
require.NoError(t, err)
val, err := state_native.NewValidator(&ethpb.Validator{})
val, err := validator.NewValidator(&ethpb.Validator{})
_ = err
err = VerifyExitAndSignature(val, s, ve)
_ = err

View File

@@ -10,6 +10,7 @@ import (
v "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/validators"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
@@ -107,7 +108,7 @@ func ProcessVoluntaryExits(
// # Initiate exit
// initiate_validator_exit(state, voluntary_exit.validator_index)
func VerifyExitAndSignature(
validator state.ReadOnlyValidator,
validator interfaces.ReadOnlyValidator,
state state.ReadOnlyBeaconState,
signed *ethpb.SignedVoluntaryExit,
) error {
@@ -166,7 +167,7 @@ func VerifyExitAndSignature(
// assert bls.Verify(validator.pubkey, signing_root, signed_voluntary_exit.signature)
// # Initiate exit
// initiate_validator_exit(state, voluntary_exit.validator_index)
func verifyExitConditions(st state.ReadOnlyBeaconState, validator state.ReadOnlyValidator, exit *ethpb.VoluntaryExit) error {
func verifyExitConditions(st state.ReadOnlyBeaconState, validator interfaces.ReadOnlyValidator, exit *ethpb.VoluntaryExit) error {
currentEpoch := slots.ToEpoch(st.Slot())
// Verify the validator is active.
if !helpers.IsActiveValidatorUsingTrie(validator, currentEpoch) {

View File

@@ -12,6 +12,7 @@ import (
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
validator2 "github.com/prysmaticlabs/prysm/v5/consensus-types/validator"
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/assert"
@@ -333,7 +334,7 @@ func TestVerifyExitAndSignature(t *testing.T) {
params.BeaconConfig().ShardCommitteePeriod = 0
validator, signedExit, st, err := tt.setup()
require.NoError(t, err)
rvalidator, err := state_native.NewValidator(validator)
rvalidator, err := validator2.NewValidator(validator)
require.NoError(t, err)
err = blocks.VerifyExitAndSignature(
rvalidator,

View File

@@ -61,9 +61,6 @@ func IsExecutionBlock(body interfaces.ReadOnlyBeaconBlockBody) (bool, error) {
if body == nil {
return false, errors.New("nil block body")
}
if body.Version() >= version.Capella {
return true, nil
}
payload, err := body.Execution()
switch {
case errors.Is(err, consensus_types.ErrUnsupportedField):
@@ -205,24 +202,24 @@ func ValidatePayload(st state.BeaconState, payload interfaces.ExecutionData) err
// block_hash=payload.block_hash,
// transactions_root=hash_tree_root(payload.transactions),
// )
func ProcessPayload(st state.BeaconState, body interfaces.ReadOnlyBeaconBlockBody) error {
func ProcessPayload(st state.BeaconState, body interfaces.ReadOnlyBeaconBlockBody) (state.BeaconState, error) {
payload, err := body.Execution()
if err != nil {
return err
return nil, err
}
if err := verifyBlobCommitmentCount(body); err != nil {
return err
return nil, err
}
if err := ValidatePayloadWhenMergeCompletes(st, payload); err != nil {
return err
return nil, err
}
if err := ValidatePayload(st, payload); err != nil {
return err
return nil, err
}
if err := st.SetLatestExecutionPayloadHeader(payload); err != nil {
return err
return nil, err
}
return nil
return st, nil
}
func verifyBlobCommitmentCount(body interfaces.ReadOnlyBeaconBlockBody) error {

View File

@@ -253,8 +253,7 @@ func Test_IsExecutionBlockCapella(t *testing.T) {
require.NoError(t, err)
got, err := blocks.IsExecutionBlock(wrappedBlock.Body())
require.NoError(t, err)
// #14614
require.Equal(t, true, got)
require.Equal(t, false, got)
}
func Test_IsExecutionEnabled(t *testing.T) {
@@ -588,7 +587,8 @@ func Test_ProcessPayload(t *testing.T) {
ExecutionPayload: tt.payload,
})
require.NoError(t, err)
if err := blocks.ProcessPayload(st, body); err != nil {
st, err := blocks.ProcessPayload(st, body)
if err != nil {
require.Equal(t, tt.err.Error(), err.Error())
} else {
require.Equal(t, tt.err, err)
@@ -619,7 +619,8 @@ func Test_ProcessPayloadCapella(t *testing.T) {
ExecutionPayload: payload,
})
require.NoError(t, err)
require.NoError(t, blocks.ProcessPayload(st, body))
_, err = blocks.ProcessPayload(st, body)
require.NoError(t, err)
}
func Test_ProcessPayload_Blinded(t *testing.T) {
@@ -676,7 +677,8 @@ func Test_ProcessPayload_Blinded(t *testing.T) {
ExecutionPayloadHeader: p,
})
require.NoError(t, err)
if err := blocks.ProcessPayload(st, body); err != nil {
st, err := blocks.ProcessPayload(st, body)
if err != nil {
require.Equal(t, tt.err.Error(), err.Error())
} else {
require.Equal(t, tt.err, err)

View File

@@ -73,12 +73,12 @@ func VerifyBlockSignature(beaconState state.ReadOnlyBeaconState,
if err != nil {
return err
}
proposer, err := beaconState.ValidatorAtIndex(proposerIndex)
proposer, err := beaconState.ValidatorAtIndexReadOnly(proposerIndex)
if err != nil {
return err
}
proposerPubKey := proposer.PublicKey
return signing.VerifyBlockSigningRoot(proposerPubKey, sig, domain, rootFunc)
proposerPubKey := proposer.PublicKey()
return signing.VerifyBlockSigningRoot(proposerPubKey[:], sig, domain, rootFunc)
}
// VerifyBlockHeaderSignature verifies the proposer signature of a beacon block header.
@@ -88,12 +88,12 @@ func VerifyBlockHeaderSignature(beaconState state.BeaconState, header *ethpb.Sig
if err != nil {
return err
}
proposer, err := beaconState.ValidatorAtIndex(header.Header.ProposerIndex)
proposer, err := beaconState.ValidatorAtIndexReadOnly(header.Header.ProposerIndex)
if err != nil {
return err
}
proposerPubKey := proposer.PublicKey
return signing.VerifyBlockHeaderSigningRoot(header.Header, proposerPubKey, header.Signature, domain)
proposerPubKey := proposer.PublicKey()
return signing.VerifyBlockHeaderSigningRoot(header.Header, proposerPubKey[:], header.Signature, domain)
}
// VerifyBlockSignatureUsingCurrentFork verifies the proposer signature of a beacon block. This differs
@@ -109,13 +109,13 @@ func VerifyBlockSignatureUsingCurrentFork(beaconState state.ReadOnlyBeaconState,
if err != nil {
return err
}
proposer, err := beaconState.ValidatorAtIndex(blk.Block().ProposerIndex())
proposer, err := beaconState.ValidatorAtIndexReadOnly(blk.Block().ProposerIndex())
if err != nil {
return err
}
proposerPubKey := proposer.PublicKey
proposerPubKey := proposer.PublicKey()
sig := blk.Signature()
return signing.VerifyBlockSigningRoot(proposerPubKey, sig[:], domain, func() ([32]byte, error) {
return signing.VerifyBlockSigningRoot(proposerPubKey[:], sig[:], domain, func() ([32]byte, error) {
return blkRoot, nil
})
}
@@ -130,12 +130,12 @@ func BlockSignatureBatch(beaconState state.ReadOnlyBeaconState,
if err != nil {
return nil, err
}
proposer, err := beaconState.ValidatorAtIndex(proposerIndex)
proposer, err := beaconState.ValidatorAtIndexReadOnly(proposerIndex)
if err != nil {
return nil, err
}
proposerPubKey := proposer.PublicKey
return signing.BlockSignatureBatch(proposerPubKey, sig, domain, rootFunc)
proposerPubKey := proposer.PublicKey()
return signing.BlockSignatureBatch(proposerPubKey[:], sig, domain, rootFunc)
}
// RandaoSignatureBatch retrieves the relevant randao specific signature batch object

View File

@@ -120,36 +120,35 @@ func ValidateBLSToExecutionChange(st state.ReadOnlyBeaconState, signed *ethpb.Si
//
// Spec pseudocode definition:
//
// def process_withdrawals(state: BeaconState, payload: ExecutionPayload) -> None:
// def process_withdrawals(state: BeaconState, payload: ExecutionPayload) -> None:
// expected_withdrawals, partial_withdrawals_count = get_expected_withdrawals(state) # [Modified in Electra:EIP7251]
//
// expected_withdrawals, processed_partial_withdrawals_count = get_expected_withdrawals(state) # [Modified in Electra:EIP7251]
// assert len(payload.withdrawals) == len(expected_withdrawals)
//
// assert len(payload.withdrawals) == len(expected_withdrawals)
// for expected_withdrawal, withdrawal in zip(expected_withdrawals, payload.withdrawals):
// assert withdrawal == expected_withdrawal
// decrease_balance(state, withdrawal.validator_index, withdrawal.amount)
//
// for expected_withdrawal, withdrawal in zip(expected_withdrawals, payload.withdrawals):
// assert withdrawal == expected_withdrawal
// decrease_balance(state, withdrawal.validator_index, withdrawal.amount)
// # Update pending partial withdrawals [New in Electra:EIP7251]
// state.pending_partial_withdrawals = state.pending_partial_withdrawals[partial_withdrawals_count:]
//
// # Update pending partial withdrawals [New in Electra:EIP7251]
// state.pending_partial_withdrawals = state.pending_partial_withdrawals[processed_partial_withdrawals_count:]
// # Update the next withdrawal index if this block contained withdrawals
// if len(expected_withdrawals) != 0:
// latest_withdrawal = expected_withdrawals[-1]
// state.next_withdrawal_index = WithdrawalIndex(latest_withdrawal.index + 1)
//
// # Update the next withdrawal index if this block contained withdrawals
// if len(expected_withdrawals) != 0:
// latest_withdrawal = expected_withdrawals[-1]
// state.next_withdrawal_index = WithdrawalIndex(latest_withdrawal.index + 1)
//
// # Update the next validator index to start the next withdrawal sweep
// if len(expected_withdrawals) == MAX_WITHDRAWALS_PER_PAYLOAD:
// # Next sweep starts after the latest withdrawal's validator index
// next_validator_index = ValidatorIndex((expected_withdrawals[-1].validator_index + 1) % len(state.validators))
// state.next_withdrawal_validator_index = next_validator_index
// else:
// # Advance sweep by the max length of the sweep if there was not a full set of withdrawals
// next_index = state.next_withdrawal_validator_index + MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP
// next_validator_index = ValidatorIndex(next_index % len(state.validators))
// state.next_withdrawal_validator_index = next_validator_index
// # Update the next validator index to start the next withdrawal sweep
// if len(expected_withdrawals) == MAX_WITHDRAWALS_PER_PAYLOAD:
// # Next sweep starts after the latest withdrawal's validator index
// next_validator_index = ValidatorIndex((expected_withdrawals[-1].validator_index + 1) % len(state.validators))
// state.next_withdrawal_validator_index = next_validator_index
// else:
// # Advance sweep by the max length of the sweep if there was not a full set of withdrawals
// next_index = state.next_withdrawal_validator_index + MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP
// next_validator_index = ValidatorIndex(next_index % len(state.validators))
// state.next_withdrawal_validator_index = next_validator_index
func ProcessWithdrawals(st state.BeaconState, executionData interfaces.ExecutionData) (state.BeaconState, error) {
expectedWithdrawals, processedPartialWithdrawalsCount, err := st.ExpectedWithdrawals()
expectedWithdrawals, partialWithdrawalsCount, err := st.ExpectedWithdrawals()
if err != nil {
return nil, errors.Wrap(err, "could not get expected withdrawals")
}
@@ -193,7 +192,7 @@ func ProcessWithdrawals(st state.BeaconState, executionData interfaces.Execution
}
if st.Version() >= version.Electra {
if err := st.DequeuePendingPartialWithdrawals(processedPartialWithdrawalsCount); err != nil {
if err := st.DequeuePartialWithdrawals(partialWithdrawalsCount); err != nil {
return nil, fmt.Errorf("unable to dequeue partial withdrawals from state: %w", err)
}
}

View File

@@ -31,6 +31,7 @@ go_library(
"//config/params:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//consensus-types/validator:go_default_library",
"//contracts/deposit:go_default_library",
"//crypto/bls/common:go_default_library",
"//encoding/bytesutil:go_default_library",

View File

@@ -8,9 +8,9 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
state_native "github.com/prysmaticlabs/prysm/v5/consensus-types/validator"
"github.com/prysmaticlabs/prysm/v5/contracts/deposit"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
@@ -386,14 +386,8 @@ func batchProcessNewPendingDeposits(ctx context.Context, state state.BeaconState
return errors.Wrap(err, "batch signature verification failed")
}
pubKeyMap := make(map[[48]byte]struct{}, len(pendingDeposits))
// Process each deposit individually
for _, pendingDeposit := range pendingDeposits {
_, found := pubKeyMap[bytesutil.ToBytes48(pendingDeposit.PublicKey)]
if !found {
pubKeyMap[bytesutil.ToBytes48(pendingDeposit.PublicKey)] = struct{}{}
}
validSignature := allSignaturesVerified
// If batch verification failed, check the individual deposit signature
@@ -411,16 +405,9 @@ func batchProcessNewPendingDeposits(ctx context.Context, state state.BeaconState
// Add validator to the registry if the signature is valid
if validSignature {
if found {
index, _ := state.ValidatorIndexByPubkey(bytesutil.ToBytes48(pendingDeposit.PublicKey))
if err := helpers.IncreaseBalance(state, index, pendingDeposit.Amount); err != nil {
return errors.Wrap(err, "could not increase balance")
}
} else {
err = AddValidatorToRegistry(state, pendingDeposit.PublicKey, pendingDeposit.WithdrawalCredentials, pendingDeposit.Amount)
if err != nil {
return errors.Wrap(err, "failed to add validator to registry")
}
err = AddValidatorToRegistry(state, pendingDeposit.PublicKey, pendingDeposit.WithdrawalCredentials, pendingDeposit.Amount)
if err != nil {
return errors.Wrap(err, "failed to add validator to registry")
}
}
}
@@ -521,12 +508,11 @@ func AddValidatorToRegistry(beaconState state.BeaconState, pubKey []byte, withdr
// validator = Validator(
// pubkey=pubkey,
// withdrawal_credentials=withdrawal_credentials,
// effective_balance=Gwei(0),
// slashed=False,
// activation_eligibility_epoch=FAR_FUTURE_EPOCH,
// activation_epoch=FAR_FUTURE_EPOCH,
// exit_epoch=FAR_FUTURE_EPOCH,
// withdrawable_epoch=FAR_FUTURE_EPOCH,
// effective_balance=Gwei(0),
// )
//
// # [Modified in Electra:EIP7251]
@@ -538,12 +524,11 @@ func GetValidatorFromDeposit(pubKey []byte, withdrawalCredentials []byte, amount
validator := &ethpb.Validator{
PublicKey: pubKey,
WithdrawalCredentials: withdrawalCredentials,
EffectiveBalance: 0,
Slashed: false,
ActivationEligibilityEpoch: params.BeaconConfig().FarFutureEpoch,
ActivationEpoch: params.BeaconConfig().FarFutureEpoch,
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
EffectiveBalance: 0,
}
v, err := state_native.NewValidator(validator)
if err != nil {
@@ -573,7 +558,7 @@ func ProcessDepositRequests(ctx context.Context, beaconState state.BeaconState,
return beaconState, nil
}
// processDepositRequest processes the specific deposit request
// processDepositRequest processes the specific deposit receipt
// def process_deposit_request(state: BeaconState, deposit_request: DepositRequest) -> None:
//
// # Set deposit request start index
@@ -603,8 +588,8 @@ func processDepositRequest(beaconState state.BeaconState, request *enginev1.Depo
}
if err := beaconState.AppendPendingDeposit(&ethpb.PendingDeposit{
PublicKey: bytesutil.SafeCopyBytes(request.Pubkey),
WithdrawalCredentials: bytesutil.SafeCopyBytes(request.WithdrawalCredentials),
Amount: request.Amount,
WithdrawalCredentials: bytesutil.SafeCopyBytes(request.WithdrawalCredentials),
Signature: bytesutil.SafeCopyBytes(request.Signature),
Slot: beaconState.Slot(),
}); err != nil {

View File

@@ -22,40 +22,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/testing/util"
)
func TestProcessPendingDepositsMultiplesSameDeposits(t *testing.T) {
st := stateWithActiveBalanceETH(t, 1000)
deps := make([]*eth.PendingDeposit, 2) // Make same deposit twice
validators := st.Validators()
sk, err := bls.RandKey()
require.NoError(t, err)
for i := 0; i < len(deps); i += 1 {
wc := make([]byte, 32)
wc[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
wc[31] = byte(i)
validators[i].PublicKey = sk.PublicKey().Marshal()
validators[i].WithdrawalCredentials = wc
deps[i] = stateTesting.GeneratePendingDeposit(t, sk, 32, bytesutil.ToBytes32(wc), 0)
}
require.NoError(t, st.SetPendingDeposits(deps))
err = electra.ProcessPendingDeposits(context.TODO(), st, 10000)
require.NoError(t, err)
val := st.Validators()
seenPubkeys := make(map[string]struct{})
for i := 0; i < len(val); i += 1 {
if len(val[i].PublicKey) == 0 {
continue
}
_, ok := seenPubkeys[string(val[i].PublicKey)]
if ok {
t.Fatalf("duplicated pubkeys")
} else {
seenPubkeys[string(val[i].PublicKey)] = struct{}{}
}
}
}
func TestProcessPendingDeposits(t *testing.T) {
tests := []struct {
name string
@@ -319,7 +285,7 @@ func TestBatchProcessNewPendingDeposits(t *testing.T) {
wc[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
wc[31] = byte(0)
validDep := stateTesting.GeneratePendingDeposit(t, sk, params.BeaconConfig().MinActivationBalance, bytesutil.ToBytes32(wc), 0)
invalidDep := &eth.PendingDeposit{PublicKey: make([]byte, 48)}
invalidDep := &eth.PendingDeposit{}
// have a combination of valid and invalid deposits
deps := []*eth.PendingDeposit{validDep, invalidDep}
require.NoError(t, electra.BatchProcessNewPendingDeposits(context.Background(), st, deps))

View File

@@ -6,6 +6,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
)
@@ -39,7 +40,7 @@ func ProcessEffectiveBalanceUpdates(st state.BeaconState) error {
bals := st.Balances()
// Update effective balances with hysteresis.
validatorFunc := func(idx int, val state.ReadOnlyValidator) (newVal *ethpb.Validator, err error) {
validatorFunc := func(idx int, val interfaces.ReadOnlyValidator) (newVal *ethpb.Validator, err error) {
if val == nil {
return nil, fmt.Errorf("validator %d is nil in state", idx)
}

View File

@@ -10,6 +10,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/validators"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
)
@@ -47,7 +48,7 @@ func ProcessRegistryUpdates(ctx context.Context, st state.BeaconState) error {
eligibleForEjection := make([]primitives.ValidatorIndex, 0)
eligibleForActivation := make([]primitives.ValidatorIndex, 0)
if err := st.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
if err := st.ReadFromEveryValidator(func(idx int, val interfaces.ReadOnlyValidator) error {
// Collect validators eligible to enter the activation queue.
if helpers.IsEligibleForActivationQueue(val, currentEpoch) {
eligibleForActivationQ = append(eligibleForActivationQ, primitives.ValidatorIndex(idx))

View File

@@ -29,6 +29,7 @@ var (
ProcessParticipationFlagUpdates = altair.ProcessParticipationFlagUpdates
ProcessSyncCommitteeUpdates = altair.ProcessSyncCommitteeUpdates
AttestationsDelta = altair.AttestationsDelta
ProcessSyncAggregate = altair.ProcessSyncAggregate
)
// ProcessEpoch describes the per epoch operations that are performed on the beacon state.

View File

@@ -84,11 +84,11 @@ func ProcessOperations(
}
st, err = ProcessDepositRequests(ctx, st, requests.Deposits)
if err != nil {
return nil, errors.Wrap(err, "could not process deposit requests")
return nil, errors.Wrap(err, "could not process deposit receipts")
}
st, err = ProcessWithdrawalRequests(ctx, st, requests.Withdrawals)
if err != nil {
return nil, errors.Wrap(err, "could not process withdrawal requests")
return nil, errors.Wrap(err, "could not process execution layer withdrawal requests")
}
if err := ProcessConsolidationRequests(ctx, st, requests.Consolidations); err != nil {
return nil, fmt.Errorf("could not process consolidation requests: %w", err)

View File

@@ -9,6 +9,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
@@ -190,7 +191,7 @@ func UpgradeToElectra(beaconState state.BeaconState) (state.BeaconState, error)
earliestExitEpoch := time.CurrentEpoch(beaconState)
preActivationIndices := make([]primitives.ValidatorIndex, 0)
compoundWithdrawalIndices := make([]primitives.ValidatorIndex, 0)
if err = beaconState.ReadFromEveryValidator(func(index int, val state.ReadOnlyValidator) error {
if err = beaconState.ReadFromEveryValidator(func(index int, val interfaces.ReadOnlyValidator) error {
if val.ExitEpoch() != params.BeaconConfig().FarFutureEpoch && val.ExitEpoch() > earliestExitEpoch {
earliestExitEpoch = val.ExitEpoch()
}

View File

@@ -19,6 +19,7 @@ go_library(
"//beacon-chain/state/stateutil:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//math:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",

View File

@@ -17,6 +17,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state/stateutil"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/math"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
@@ -59,7 +60,7 @@ func ProcessRegistryUpdates(ctx context.Context, st state.BeaconState) (state.Be
eligibleForActivation := make([]primitives.ValidatorIndex, 0)
eligibleForEjection := make([]primitives.ValidatorIndex, 0)
if err := st.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
if err := st.ReadFromEveryValidator(func(idx int, val interfaces.ReadOnlyValidator) error {
// Collect validators eligible to enter the activation queue.
if helpers.IsEligibleForActivationQueue(val, currentEpoch) {
eligibleForActivationQ = append(eligibleForActivationQ, primitives.ValidatorIndex(idx))
@@ -192,7 +193,7 @@ func ProcessSlashings(st state.BeaconState, slashingMultiplier uint64) (state.Be
bals := st.Balances()
changed := false
err = st.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
err = st.ReadFromEveryValidator(func(idx int, val interfaces.ReadOnlyValidator) error {
correctEpoch := (currentEpoch + exitLength/2) == val.WithdrawableEpoch()
if val.Slashed() && correctEpoch {
var penalty uint64
@@ -268,7 +269,7 @@ func ProcessEffectiveBalanceUpdates(st state.BeaconState) (state.BeaconState, er
bals := st.Balances()
// Update effective balances with hysteresis.
validatorFunc := func(idx int, val state.ReadOnlyValidator) (newVal *ethpb.Validator, err error) {
validatorFunc := func(idx int, val interfaces.ReadOnlyValidator) (newVal *ethpb.Validator, err error) {
if val == nil {
return nil, fmt.Errorf("validator %d is nil in state", idx)
}

View File

@@ -21,6 +21,7 @@ go_library(
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/state:go_default_library",
"//config/params:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//math:go_default_library",
"//monitoring/tracing:go_default_library",

View File

@@ -11,6 +11,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
)
@@ -27,7 +28,7 @@ func New(ctx context.Context, s state.BeaconState) ([]*Validator, *Balance, erro
currentEpoch := time.CurrentEpoch(s)
prevEpoch := time.PrevEpoch(s)
if err := s.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
if err := s.ReadFromEveryValidator(func(idx int, val interfaces.ReadOnlyValidator) error {
// Was validator withdrawable or slashed
withdrawable := prevEpoch+1 >= val.WithdrawableEpoch()
pVal := &Validator{

View File

@@ -5,6 +5,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/math"
)
@@ -26,7 +27,7 @@ func ProcessSlashingsPrecompute(s state.BeaconState, pBal *Balance) error {
var hasSlashing bool
// Iterate through validator list in state, stop until a validator satisfies slashing condition of current epoch.
err := s.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
err := s.ReadFromEveryValidator(func(idx int, val interfaces.ReadOnlyValidator) error {
correctEpoch := epochToWithdraw == val.WithdrawableEpoch()
if val.Slashed() && correctEpoch {
hasSlashing = true
@@ -43,7 +44,7 @@ func ProcessSlashingsPrecompute(s state.BeaconState, pBal *Balance) error {
increment := params.BeaconConfig().EffectiveBalanceIncrement
bals := s.Balances()
validatorFunc := func(idx int, val state.ReadOnlyValidator) error {
validatorFunc := func(idx int, val interfaces.ReadOnlyValidator) error {
correctEpoch := epochToWithdraw == val.WithdrawableEpoch()
if val.Slashed() && correctEpoch {
penaltyNumerator := val.EffectiveBalance() / increment * minSlashing

View File

@@ -31,8 +31,6 @@ const (
LightClientFinalityUpdate
// LightClientOptimisticUpdate event
LightClientOptimisticUpdate
// PayloadAttributes events are fired upon a missed slot or new head.
PayloadAttributes
)
// BlockProcessedData is the data sent with BlockProcessed events.

View File

@@ -76,6 +76,7 @@ go_test(
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//consensus-types/validator:go_default_library",
"//container/slice:go_default_library",
"//crypto/hash:go_default_library",
"//encoding/bytesutil:go_default_library",

View File

@@ -23,8 +23,11 @@ var (
// Access to these nil fields will result in run time panic,
// it is recommended to run these checks as first line of defense.
func ValidateNilAttestation(attestation ethpb.Att) error {
if attestation == nil || attestation.IsNil() {
return errors.New("attestation is nil")
if attestation == nil {
return errors.New("attestation can't be nil")
}
if attestation.GetData() == nil {
return errors.New("attestation's data can't be nil")
}
if attestation.GetData().Source == nil {
return errors.New("attestation's source can't be nil")

View File

@@ -260,12 +260,12 @@ func TestValidateNilAttestation(t *testing.T) {
{
name: "nil attestation",
attestation: nil,
errString: "attestation is nil",
errString: "attestation can't be nil",
},
{
name: "nil attestation data",
attestation: &ethpb.Attestation{},
errString: "attestation is nil",
errString: "attestation's data can't be nil",
},
{
name: "nil attestation source",

View File

@@ -15,6 +15,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/container/slice"
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
@@ -346,7 +347,7 @@ func ShuffledIndices(s state.ReadOnlyBeaconState, epoch primitives.Epoch) ([]pri
}
indices := make([]primitives.ValidatorIndex, 0, s.NumValidators())
if err := s.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
if err := s.ReadFromEveryValidator(func(idx int, val interfaces.ReadOnlyValidator) error {
if IsActiveValidatorUsingTrie(val, epoch) {
indices = append(indices, primitives.ValidatorIndex(idx))
}

View File

@@ -6,6 +6,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
mathutil "github.com/prysmaticlabs/prysm/v5/math"
"github.com/prysmaticlabs/prysm/v5/time/slots"
@@ -69,7 +70,7 @@ func TotalActiveBalance(s state.ReadOnlyBeaconState) (uint64, error) {
total := uint64(0)
epoch := slots.ToEpoch(s.Slot())
if err := s.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
if err := s.ReadFromEveryValidator(func(idx int, val interfaces.ReadOnlyValidator) error {
if IsActiveValidatorUsingTrie(val, epoch) {
total += val.EffectiveBalance()
}

View File

@@ -32,7 +32,7 @@ func IsCurrentPeriodSyncCommittee(st state.BeaconState, valIdx primitives.Valida
}
indices, err := syncCommitteeCache.CurrentPeriodIndexPosition(root, valIdx)
if errors.Is(err, cache.ErrNonExistingSyncCommitteeKey) {
val, err := st.ValidatorAtIndex(valIdx)
val, err := st.ValidatorAtIndexReadOnly(valIdx)
if err != nil {
return false, err
}
@@ -48,7 +48,8 @@ func IsCurrentPeriodSyncCommittee(st state.BeaconState, valIdx primitives.Valida
}
}()
return len(findSubCommitteeIndices(val.PublicKey, committee.Pubkeys)) > 0, nil
pk := val.PublicKey()
return len(findSubCommitteeIndices(pk[:], committee.Pubkeys)) > 0, nil
}
if err != nil {
return false, err

View File

@@ -47,12 +47,12 @@ func IsActiveValidator(validator *ethpb.Validator, epoch primitives.Epoch) bool
}
// IsActiveValidatorUsingTrie checks if a read only validator is active.
func IsActiveValidatorUsingTrie(validator state.ReadOnlyValidator, epoch primitives.Epoch) bool {
func IsActiveValidatorUsingTrie(validator interfaces.ReadOnlyValidator, epoch primitives.Epoch) bool {
return checkValidatorActiveStatus(validator.ActivationEpoch(), validator.ExitEpoch(), epoch)
}
// IsActiveNonSlashedValidatorUsingTrie checks if a read only validator is active and not slashed
func IsActiveNonSlashedValidatorUsingTrie(validator state.ReadOnlyValidator, epoch primitives.Epoch) bool {
func IsActiveNonSlashedValidatorUsingTrie(validator interfaces.ReadOnlyValidator, epoch primitives.Epoch) bool {
active := checkValidatorActiveStatus(validator.ActivationEpoch(), validator.ExitEpoch(), epoch)
return active && !validator.Slashed()
}
@@ -76,7 +76,7 @@ func IsSlashableValidator(activationEpoch, withdrawableEpoch primitives.Epoch, s
}
// IsSlashableValidatorUsingTrie checks if a read only validator is slashable.
func IsSlashableValidatorUsingTrie(val state.ReadOnlyValidator, epoch primitives.Epoch) bool {
func IsSlashableValidatorUsingTrie(val interfaces.ReadOnlyValidator, epoch primitives.Epoch) bool {
return checkValidatorSlashable(val.ActivationEpoch(), val.WithdrawableEpoch(), val.Slashed(), epoch)
}
@@ -134,7 +134,7 @@ func ActiveValidatorIndices(ctx context.Context, s state.ReadOnlyBeaconState, ep
}()
var indices []primitives.ValidatorIndex
if err := s.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
if err := s.ReadFromEveryValidator(func(idx int, val interfaces.ReadOnlyValidator) error {
if IsActiveValidatorUsingTrie(val, epoch) {
indices = append(indices, primitives.ValidatorIndex(idx))
}
@@ -187,7 +187,7 @@ func ActiveValidatorCount(ctx context.Context, s state.ReadOnlyBeaconState, epoc
}()
count := uint64(0)
if err := s.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
if err := s.ReadFromEveryValidator(func(idx int, val interfaces.ReadOnlyValidator) error {
if IsActiveValidatorUsingTrie(val, epoch) {
count++
}
@@ -410,7 +410,7 @@ func ComputeProposerIndex(bState state.ReadOnlyBeaconState, activeIndices []prim
// validator.activation_eligibility_epoch == FAR_FUTURE_EPOCH
// and validator.effective_balance >= MIN_ACTIVATION_BALANCE # [Modified in Electra:EIP7251]
// )
func IsEligibleForActivationQueue(validator state.ReadOnlyValidator, currentEpoch primitives.Epoch) bool {
func IsEligibleForActivationQueue(validator interfaces.ReadOnlyValidator, currentEpoch primitives.Epoch) bool {
if currentEpoch >= params.BeaconConfig().ElectraForkEpoch {
return isEligibleForActivationQueueElectra(validator.ActivationEligibilityEpoch(), validator.EffectiveBalance())
}
@@ -471,7 +471,7 @@ func IsEligibleForActivation(state state.ReadOnlyCheckpoint, validator *ethpb.Va
}
// IsEligibleForActivationUsingROVal checks if the validator is eligible for activation using the provided read only validator.
func IsEligibleForActivationUsingROVal(state state.ReadOnlyCheckpoint, validator state.ReadOnlyValidator) bool {
func IsEligibleForActivationUsingROVal(state state.ReadOnlyCheckpoint, validator interfaces.ReadOnlyValidator) bool {
return isEligibleForActivation(validator.ActivationEligibilityEpoch(), validator.ActivationEpoch(), state.FinalizedCheckpointEpoch())
}
@@ -584,7 +584,7 @@ func IsSameWithdrawalCredentials(a, b *ethpb.Validator) bool {
// and validator.withdrawable_epoch <= epoch
// and balance > 0
// )
func IsFullyWithdrawableValidator(val state.ReadOnlyValidator, balance uint64, epoch primitives.Epoch, fork int) bool {
func IsFullyWithdrawableValidator(val interfaces.ReadOnlyValidator, balance uint64, epoch primitives.Epoch, fork int) bool {
if val == nil || balance <= 0 {
return false
}
@@ -600,7 +600,7 @@ func IsFullyWithdrawableValidator(val state.ReadOnlyValidator, balance uint64, e
// IsPartiallyWithdrawableValidator returns whether the validator is able to perform a
// partial withdrawal. This function assumes that the caller has a lock on the state.
// This method conditionally calls the fork appropriate implementation based on the epoch argument.
func IsPartiallyWithdrawableValidator(val state.ReadOnlyValidator, balance uint64, epoch primitives.Epoch, fork int) bool {
func IsPartiallyWithdrawableValidator(val interfaces.ReadOnlyValidator, balance uint64, epoch primitives.Epoch, fork int) bool {
if val == nil {
return false
}
@@ -630,7 +630,7 @@ func IsPartiallyWithdrawableValidator(val state.ReadOnlyValidator, balance uint6
// and has_max_effective_balance
// and has_excess_balance
// )
func isPartiallyWithdrawableValidatorElectra(val state.ReadOnlyValidator, balance uint64, epoch primitives.Epoch) bool {
func isPartiallyWithdrawableValidatorElectra(val interfaces.ReadOnlyValidator, balance uint64, epoch primitives.Epoch) bool {
maxEB := ValidatorMaxEffectiveBalance(val)
hasMaxBalance := val.EffectiveBalance() == maxEB
hasExcessBalance := balance > maxEB
@@ -652,7 +652,7 @@ func isPartiallyWithdrawableValidatorElectra(val state.ReadOnlyValidator, balanc
// has_max_effective_balance = validator.effective_balance == MAX_EFFECTIVE_BALANCE
// has_excess_balance = balance > MAX_EFFECTIVE_BALANCE
// return has_eth1_withdrawal_credential(validator) and has_max_effective_balance and has_excess_balance
func isPartiallyWithdrawableValidatorCapella(val state.ReadOnlyValidator, balance uint64, epoch primitives.Epoch) bool {
func isPartiallyWithdrawableValidatorCapella(val interfaces.ReadOnlyValidator, balance uint64, epoch primitives.Epoch) bool {
hasMaxBalance := val.EffectiveBalance() == params.BeaconConfig().MaxEffectiveBalance
hasExcessBalance := balance > params.BeaconConfig().MaxEffectiveBalance
return HasETH1WithdrawalCredential(val) && hasExcessBalance && hasMaxBalance
@@ -670,7 +670,7 @@ func isPartiallyWithdrawableValidatorCapella(val state.ReadOnlyValidator, balanc
// return MAX_EFFECTIVE_BALANCE_ELECTRA
// else:
// return MIN_ACTIVATION_BALANCE
func ValidatorMaxEffectiveBalance(val state.ReadOnlyValidator) uint64 {
func ValidatorMaxEffectiveBalance(val interfaces.ReadOnlyValidator) uint64 {
if HasCompoundingWithdrawalCredential(val) {
return params.BeaconConfig().MaxEffectiveBalanceElectra
}

View File

@@ -13,6 +13,7 @@ import (
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/consensus-types/validator"
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
@@ -798,7 +799,7 @@ func TestIsEligibleForActivationQueue(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
helpers.ClearCache()
v, err := state_native.NewValidator(tt.validator)
v, err := validator.NewValidator(tt.validator)
assert.NoError(t, err)
assert.Equal(t, tt.want, helpers.IsEligibleForActivationQueue(v, tt.currentEpoch), "IsEligibleForActivationQueue()")
})
@@ -1029,7 +1030,7 @@ func TestIsFullyWithdrawableValidator(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
v, err := state_native.NewValidator(tt.validator)
v, err := validator.NewValidator(tt.validator)
require.NoError(t, err)
assert.Equal(t, tt.want, helpers.IsFullyWithdrawableValidator(v, tt.balance, tt.epoch, tt.fork))
})
@@ -1101,7 +1102,7 @@ func TestIsPartiallyWithdrawableValidator(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
v, err := state_native.NewValidator(tt.validator)
v, err := validator.NewValidator(tt.validator)
require.NoError(t, err)
assert.Equal(t, tt.want, helpers.IsPartiallyWithdrawableValidator(v, tt.balance, tt.epoch, tt.fork))
})
@@ -1160,7 +1161,7 @@ func TestValidatorMaxEffectiveBalance(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
v, err := state_native.NewValidator(tt.validator)
v, err := validator.NewValidator(tt.validator)
require.NoError(t, err)
assert.Equal(t, tt.want, helpers.ValidatorMaxEffectiveBalance(v))
})

View File

@@ -120,7 +120,7 @@ func ComputeSigningRootForRoot(root [32]byte, domain []byte) ([32]byte, error) {
// ComputeDomainVerifySigningRoot computes domain and verifies signing root of an object given the beacon state, validator index and signature.
func ComputeDomainVerifySigningRoot(st state.ReadOnlyBeaconState, index primitives.ValidatorIndex, epoch primitives.Epoch, obj fssz.HashRoot, domain [4]byte, sig []byte) error {
v, err := st.ValidatorAtIndex(index)
v, err := st.ValidatorAtIndexReadOnly(index)
if err != nil {
return err
}
@@ -128,7 +128,8 @@ func ComputeDomainVerifySigningRoot(st state.ReadOnlyBeaconState, index primitiv
if err != nil {
return err
}
return VerifySigningRoot(obj, v.PublicKey, sig, d)
pk := v.PublicKey()
return VerifySigningRoot(obj, pk[:], sig, d)
}
// VerifySigningRoot verifies the signing root of an object given its public key, signature and domain.

View File

@@ -120,7 +120,7 @@ func OptimizedGenesisBeaconStateBellatrix(genesisTime uint64, preState state.Bea
slashings := make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector)
genesisValidatorsRoot, err := stateutil.ValidatorRegistryRoot(preState.Validators())
genesisValidatorsRoot, err := stateutil.ValidatorRegistryRoot(preState.ValidatorsReadOnly())
if err != nil {
return nil, errors.Wrapf(err, "could not hash tree root genesis validators %v", err)
}
@@ -129,7 +129,7 @@ func OptimizedGenesisBeaconStateBellatrix(genesisTime uint64, preState state.Bea
if err != nil {
return nil, err
}
scoresMissing := len(preState.Validators()) - len(scores)
scoresMissing := len(preState.ValidatorsReadOnly()) - len(scores)
if scoresMissing > 0 {
for i := 0; i < scoresMissing; i++ {
scores = append(scores, 0)

View File

@@ -147,7 +147,7 @@ func OptimizedGenesisBeaconState(genesisTime uint64, preState state.BeaconState,
slashings := make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector)
genesisValidatorsRoot, err := stateutil.ValidatorRegistryRoot(preState.Validators())
genesisValidatorsRoot, err := stateutil.ValidatorRegistryRoot(preState.ValidatorsReadOnly())
if err != nil {
return nil, errors.Wrapf(err, "could not hash tree root genesis validators %v", err)
}

View File

@@ -333,7 +333,8 @@ func ProcessBlockForStateRoot(
return nil, errors.Wrap(err, "could not process withdrawals")
}
}
if err = b.ProcessPayload(state, blk.Body()); err != nil {
state, err = b.ProcessPayload(state, blk.Body())
if err != nil {
return nil, errors.Wrap(err, "could not process execution data")
}
}

View File

@@ -698,45 +698,3 @@ func TestProcessSlotsConditionally(t *testing.T) {
assert.Equal(t, primitives.Slot(6), s.Slot())
})
}
func BenchmarkProcessSlots_Capella(b *testing.B) {
st, _ := util.DeterministicGenesisStateCapella(b, params.BeaconConfig().MaxValidatorsPerCommittee)
var err error
b.ResetTimer()
for i := 0; i < b.N; i++ {
st, err = transition.ProcessSlots(context.Background(), st, st.Slot()+1)
if err != nil {
b.Fatalf("Failed to process slot %v", err)
}
}
}
func BenchmarkProcessSlots_Deneb(b *testing.B) {
st, _ := util.DeterministicGenesisStateDeneb(b, params.BeaconConfig().MaxValidatorsPerCommittee)
var err error
b.ResetTimer()
for i := 0; i < b.N; i++ {
st, err = transition.ProcessSlots(context.Background(), st, st.Slot()+1)
if err != nil {
b.Fatalf("Failed to process slot %v", err)
}
}
}
func BenchmarkProcessSlots_Electra(b *testing.B) {
st, _ := util.DeterministicGenesisStateElectra(b, params.BeaconConfig().MaxValidatorsPerCommittee)
var err error
b.ResetTimer()
for i := 0; i < b.N; i++ {
st, err = transition.ProcessSlots(context.Background(), st, st.Slot()+1)
if err != nil {
b.Fatalf("Failed to process slot %v", err)
}
}
}

View File

@@ -17,9 +17,9 @@ go_library(
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/state:go_default_library",
"//config/params:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//math:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"//time/slots:go_default_library",
"@com_github_pkg_errors//:go_default_library",

View File

@@ -12,9 +12,9 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/math"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
"github.com/prysmaticlabs/prysm/v5/time/slots"
)
@@ -27,7 +27,7 @@ var ErrValidatorAlreadyExited = errors.New("validator already exited")
// epoch and the number of them
func MaxExitEpochAndChurn(s state.BeaconState) (maxExitEpoch primitives.Epoch, churn uint64) {
farFutureEpoch := params.BeaconConfig().FarFutureEpoch
err := s.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
err := s.ReadFromEveryValidator(func(idx int, val interfaces.ReadOnlyValidator) error {
e := val.ExitEpoch()
if e != farFutureEpoch {
if e > maxExitEpoch {
@@ -217,11 +217,11 @@ func SlashValidator(
}
// ActivatedValidatorIndices determines the indices activated during the given epoch.
func ActivatedValidatorIndices(epoch primitives.Epoch, validators []*ethpb.Validator) []primitives.ValidatorIndex {
func ActivatedValidatorIndices(epoch primitives.Epoch, validators []interfaces.ReadOnlyValidator) []primitives.ValidatorIndex {
activations := make([]primitives.ValidatorIndex, 0)
for i := 0; i < len(validators); i++ {
val := validators[i]
if val.ActivationEpoch <= epoch && epoch < val.ExitEpoch {
if val.ActivationEpoch() <= epoch && epoch < val.ExitEpoch() {
activations = append(activations, primitives.ValidatorIndex(i))
}
}
@@ -229,28 +229,50 @@ func ActivatedValidatorIndices(epoch primitives.Epoch, validators []*ethpb.Valid
}
// SlashedValidatorIndices determines the indices slashed during the given epoch.
func SlashedValidatorIndices(epoch primitives.Epoch, validators []*ethpb.Validator) []primitives.ValidatorIndex {
func SlashedValidatorIndices(epoch primitives.Epoch, validators []interfaces.ReadOnlyValidator) []primitives.ValidatorIndex {
slashed := make([]primitives.ValidatorIndex, 0)
for i := 0; i < len(validators); i++ {
val := validators[i]
maxWithdrawableEpoch := primitives.MaxEpoch(val.WithdrawableEpoch, epoch+params.BeaconConfig().EpochsPerSlashingsVector)
if val.WithdrawableEpoch == maxWithdrawableEpoch && val.Slashed {
maxWithdrawableEpoch := primitives.MaxEpoch(val.WithdrawableEpoch(), epoch+params.BeaconConfig().EpochsPerSlashingsVector)
if val.WithdrawableEpoch() == maxWithdrawableEpoch && val.Slashed() {
slashed = append(slashed, primitives.ValidatorIndex(i))
}
}
return slashed
}
// ExitedValidatorIndices returns the indices of validators who exited during the specified epoch.
//
// A validator is considered to have exited during an epoch if their ExitEpoch equals the epoch and
// excludes validators that have been ejected.
// This function simplifies the exit determination by directly checking the validator's ExitEpoch,
// avoiding the complexities and potential inaccuracies of calculating withdrawable epochs.
func ExitedValidatorIndices(epoch primitives.Epoch, validators []*ethpb.Validator) ([]primitives.ValidatorIndex, error) {
// ExitedValidatorIndices determines the indices exited during the current epoch.
func ExitedValidatorIndices(epoch primitives.Epoch, validators []interfaces.ReadOnlyValidator, activeValidatorCount uint64) ([]primitives.ValidatorIndex, error) {
exited := make([]primitives.ValidatorIndex, 0)
exitEpochs := make([]primitives.Epoch, 0)
for i := 0; i < len(validators); i++ {
val := validators[i]
if val.ExitEpoch() != params.BeaconConfig().FarFutureEpoch {
exitEpochs = append(exitEpochs, val.ExitEpoch())
}
}
exitQueueEpoch := primitives.Epoch(0)
for _, i := range exitEpochs {
if exitQueueEpoch < i {
exitQueueEpoch = i
}
}
// We use the exit queue churn to determine if we have passed a churn limit.
exitQueueChurn := uint64(0)
for _, val := range validators {
if val.ExitEpoch() == exitQueueEpoch {
exitQueueChurn++
}
}
churn := helpers.ValidatorExitChurnLimit(activeValidatorCount)
if churn < exitQueueChurn {
exitQueueEpoch++
}
withdrawableEpoch := exitQueueEpoch + params.BeaconConfig().MinValidatorWithdrawabilityDelay
for i, val := range validators {
if val.ExitEpoch == epoch && val.EffectiveBalance > params.BeaconConfig().EjectionBalance {
if val.ExitEpoch() == epoch && val.WithdrawableEpoch() == withdrawableEpoch &&
val.EffectiveBalance() > params.BeaconConfig().EjectionBalance {
exited = append(exited, primitives.ValidatorIndex(i))
}
}
@@ -266,10 +288,10 @@ func ExitedValidatorIndices(epoch primitives.Epoch, validators []*ethpb.Validato
// This function simplifies the ejection determination by directly checking the validator's ExitEpoch
// and EffectiveBalance, avoiding the complexities and potential inaccuracies of calculating
// withdrawable epochs.
func EjectedValidatorIndices(epoch primitives.Epoch, validators []*ethpb.Validator) ([]primitives.ValidatorIndex, error) {
func EjectedValidatorIndices(epoch primitives.Epoch, validators []interfaces.ReadOnlyValidator) ([]primitives.ValidatorIndex, error) {
ejected := make([]primitives.ValidatorIndex, 0)
for i, val := range validators {
if val.ExitEpoch == epoch && val.EffectiveBalance <= params.BeaconConfig().EjectionBalance {
if val.ExitEpoch() == epoch && val.EffectiveBalance() <= params.BeaconConfig().EjectionBalance {
ejected = append(ejected, primitives.ValidatorIndex(i))
}
}

View File

@@ -389,16 +389,19 @@ func TestExitedValidatorIndices(t *testing.T) {
state: &ethpb.BeaconState{
Validators: []*ethpb.Validator{
{
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
ExitEpoch: 0,
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
ExitEpoch: 0,
WithdrawableEpoch: params.BeaconConfig().MinValidatorWithdrawabilityDelay,
},
{
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
ExitEpoch: 10,
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
ExitEpoch: 0,
WithdrawableEpoch: 10,
},
{
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
ExitEpoch: 0,
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
ExitEpoch: 0,
WithdrawableEpoch: params.BeaconConfig().MinValidatorWithdrawabilityDelay,
},
},
},
@@ -430,7 +433,11 @@ func TestExitedValidatorIndices(t *testing.T) {
},
}
for _, tt := range tests {
exitedIndices, err := validators.ExitedValidatorIndices(0, tt.state.Validators)
s, err := state_native.InitializeFromProtoPhase0(tt.state)
require.NoError(t, err)
activeCount, err := helpers.ActiveValidatorCount(context.Background(), s, time.PrevEpoch(s))
require.NoError(t, err)
exitedIndices, err := validators.ExitedValidatorIndices(0, tt.state.Validators, activeCount)
require.NoError(t, err)
assert.DeepEqual(t, tt.wanted, exitedIndices)
}

View File

@@ -23,10 +23,10 @@ import (
bolt "go.etcd.io/bbolt"
)
// Used to represent errors for inconsistent slot ranges.
// used to represent errors for inconsistent slot ranges.
var errInvalidSlotRange = errors.New("invalid end slot and start slot provided")
// Block retrieval by root. Return nil if block is not found.
// Block retrieval by root.
func (s *Store) Block(ctx context.Context, blockRoot [32]byte) (interfaces.ReadOnlySignedBeaconBlock, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.Block")
defer span.End()

View File

@@ -18,7 +18,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
"github.com/prysmaticlabs/prysm/v5/time"
"github.com/prysmaticlabs/prysm/v5/time/slots"
bolt "go.etcd.io/bbolt"
@@ -604,14 +603,14 @@ func (s *Store) unmarshalState(_ context.Context, enc []byte, validatorEntries [
// marshal versioned state from struct type down to bytes.
func marshalState(ctx context.Context, st state.ReadOnlyBeaconState) ([]byte, error) {
switch st.Version() {
case version.Phase0:
switch st.ToProtoUnsafe().(type) {
case *ethpb.BeaconState:
rState, ok := st.ToProtoUnsafe().(*ethpb.BeaconState)
if !ok {
return nil, errors.New("non valid inner state")
}
return encode(ctx, rState)
case version.Altair:
case *ethpb.BeaconStateAltair:
rState, ok := st.ToProtoUnsafe().(*ethpb.BeaconStateAltair)
if !ok {
return nil, errors.New("non valid inner state")
@@ -624,7 +623,7 @@ func marshalState(ctx context.Context, st state.ReadOnlyBeaconState) ([]byte, er
return nil, err
}
return snappy.Encode(nil, append(altairKey, rawObj...)), nil
case version.Bellatrix:
case *ethpb.BeaconStateBellatrix:
rState, ok := st.ToProtoUnsafe().(*ethpb.BeaconStateBellatrix)
if !ok {
return nil, errors.New("non valid inner state")
@@ -637,7 +636,7 @@ func marshalState(ctx context.Context, st state.ReadOnlyBeaconState) ([]byte, er
return nil, err
}
return snappy.Encode(nil, append(bellatrixKey, rawObj...)), nil
case version.Capella:
case *ethpb.BeaconStateCapella:
rState, ok := st.ToProtoUnsafe().(*ethpb.BeaconStateCapella)
if !ok {
return nil, errors.New("non valid inner state")
@@ -650,7 +649,7 @@ func marshalState(ctx context.Context, st state.ReadOnlyBeaconState) ([]byte, er
return nil, err
}
return snappy.Encode(nil, append(capellaKey, rawObj...)), nil
case version.Deneb:
case *ethpb.BeaconStateDeneb:
rState, ok := st.ToProtoUnsafe().(*ethpb.BeaconStateDeneb)
if !ok {
return nil, errors.New("non valid inner state")
@@ -663,7 +662,7 @@ func marshalState(ctx context.Context, st state.ReadOnlyBeaconState) ([]byte, er
return nil, err
}
return snappy.Encode(nil, append(denebKey, rawObj...)), nil
case version.Electra:
case *ethpb.BeaconStateElectra:
rState, ok := st.ToProtoUnsafe().(*ethpb.BeaconStateElectra)
if !ok {
return nil, errors.New("non valid inner state")

View File

@@ -688,7 +688,7 @@ func decodeSlasherChunk(enc []byte) ([]uint16, error) {
// Encode attestation record to bytes.
// The output encoded attestation record consists in the signing root concatenated with the compressed attestation record.
func encodeAttestationRecord(att *slashertypes.IndexedAttestationWrapper) ([]byte, error) {
if att == nil || att.IndexedAttestation == nil || att.IndexedAttestation.IsNil() {
if att == nil || att.IndexedAttestation == nil {
return []byte{}, errors.New("nil proposal record")
}

View File

@@ -623,7 +623,13 @@ func (s *Service) ReconstructBlobSidecars(ctx context.Context, block interfaces.
continue
}
// Verify the sidecar KZG proof
v := s.blobVerifier(roBlob, verification.ELMemPoolRequirements)
if err := v.SidecarKzgProofVerified(); err != nil {
log.WithError(err).WithField("index", i).Error("failed to verify KZG proof for sidecar")
continue
}
verifiedBlob, err := v.VerifiedROBlob()
if err != nil {
log.WithError(err).WithField("index", i).Error("failed to verify RO blob")

View File

@@ -1769,9 +1769,7 @@ func fixturesStruct() *payloadFixtures {
Proofs: []hexutil.Bytes{[]byte("proof1"), []byte("proof2")},
Blobs: []hexutil.Bytes{{'a'}, {'b'}},
},
ExecutionRequests: []hexutil.Bytes{append([]byte{pb.DepositRequestType}, depositRequestBytes...),
append([]byte{pb.WithdrawalRequestType}, withdrawalRequestBytes...),
append([]byte{pb.ConsolidationRequestType}, consolidationRequestBytes...)},
ExecutionRequests: []hexutil.Bytes{depositRequestBytes, withdrawalRequestBytes, consolidationRequestBytes},
}
parent := bytesutil.PadTo([]byte("parentHash"), fieldparams.RootLength)
sha3Uncles := bytesutil.PadTo([]byte("sha3Uncles"), fieldparams.RootLength)

View File

@@ -53,7 +53,7 @@ func (f *ForkChoice) ShouldOverrideFCU() (override bool) {
// Only reorg blocks that arrive late
early, err := head.arrivedEarly(f.store.genesisTime)
if err != nil {
log.WithError(err).Error("Could not check if block arrived early")
log.WithError(err).Error("could not check if block arrived early")
return
}
if early {

View File

@@ -192,13 +192,20 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
beacon.verifyInitWaiter = verification.NewInitializerWaiter(
beacon.clockWaiter, forkchoice.NewROForkChoice(beacon.forkChoicer), beacon.stateGen)
pa := peers.NewAssigner(beacon.fetchP2P().Peers(), beacon.forkChoicer)
beacon.BackfillOpts = append(
beacon.BackfillOpts,
backfill.WithVerifierWaiter(beacon.verifyInitWaiter),
backfill.WithInitSyncWaiter(initSyncWaiter(ctx, beacon.initialSyncComplete)),
)
if err := registerServices(cliCtx, beacon, synchronizer, bfs); err != nil {
bf, err := backfill.NewService(ctx, bfs, beacon.BlobStorage, beacon.clockWaiter, beacon.fetchP2P(), pa, beacon.BackfillOpts...)
if err != nil {
return nil, errors.Wrap(err, "error initializing backfill service")
}
if err := registerServices(cliCtx, beacon, synchronizer, bf, bfs); err != nil {
return nil, errors.Wrap(err, "could not register services")
}
@@ -285,6 +292,11 @@ func startBaseServices(cliCtx *cli.Context, beacon *BeaconNode, depositAddress s
return nil, errors.Wrap(err, "could not start slashing DB")
}
log.Debugln("Registering P2P Service")
if err := beacon.registerP2P(cliCtx); err != nil {
return nil, errors.Wrap(err, "could not register P2P service")
}
bfs, err := backfill.NewUpdater(ctx, beacon.db)
if err != nil {
return nil, errors.Wrap(err, "could not create backfill updater")
@@ -303,15 +315,9 @@ func startBaseServices(cliCtx *cli.Context, beacon *BeaconNode, depositAddress s
return bfs, nil
}
func registerServices(cliCtx *cli.Context, beacon *BeaconNode, synchronizer *startup.ClockSynchronizer, bfs *backfill.Store) error {
log.Debugln("Registering P2P Service")
if err := beacon.registerP2P(cliCtx); err != nil {
return errors.Wrap(err, "could not register P2P service")
}
log.Debugln("Registering Backfill Service")
if err := beacon.RegisterBackfillService(cliCtx, bfs); err != nil {
return errors.Wrap(err, "could not register Back Fill service")
func registerServices(cliCtx *cli.Context, beacon *BeaconNode, synchronizer *startup.ClockSynchronizer, bf *backfill.Service, bfs *backfill.Store) error {
if err := beacon.services.RegisterService(bf); err != nil {
return errors.Wrap(err, "could not register backfill service")
}
log.Debugln("Registering POW Chain Service")
@@ -1130,16 +1136,6 @@ func (b *BeaconNode) registerBuilderService(cliCtx *cli.Context) error {
return b.services.RegisterService(svc)
}
func (b *BeaconNode) RegisterBackfillService(cliCtx *cli.Context, bfs *backfill.Store) error {
pa := peers.NewAssigner(b.fetchP2P().Peers(), b.forkChoicer)
bf, err := backfill.NewService(cliCtx.Context, bfs, b.BlobStorage, b.clockWaiter, b.fetchP2P(), pa, b.BackfillOpts...)
if err != nil {
return errors.Wrap(err, "error initializing backfill service")
}
return b.services.RegisterService(bf)
}
func hasNetworkFlag(cliCtx *cli.Context) bool {
for _, flag := range features.NetworkFlags {
for _, name := range flag.Names() {

View File

@@ -49,12 +49,12 @@ func TestKV_Aggregated_SaveAggregatedAttestation(t *testing.T) {
{
name: "nil attestation",
att: nil,
wantErrString: "attestation is nil",
wantErrString: "attestation can't be nil",
},
{
name: "nil attestation data",
att: &ethpb.Attestation{},
wantErrString: "attestation is nil",
wantErrString: "attestation's data can't be nil",
},
{
name: "not aggregated",
@@ -206,7 +206,7 @@ func TestKV_Aggregated_AggregatedAttestations(t *testing.T) {
func TestKV_Aggregated_DeleteAggregatedAttestation(t *testing.T) {
t.Run("nil attestation", func(t *testing.T) {
cache := NewAttCaches()
assert.ErrorContains(t, "attestation is nil", cache.DeleteAggregatedAttestation(nil))
assert.ErrorContains(t, "attestation can't be nil", cache.DeleteAggregatedAttestation(nil))
att := util.HydrateAttestation(&ethpb.Attestation{AggregationBits: bitfield.Bitlist{0b10101}, Data: &ethpb.AttestationData{Slot: 2}})
assert.NoError(t, cache.DeleteAggregatedAttestation(att))
})
@@ -288,7 +288,7 @@ func TestKV_Aggregated_HasAggregatedAttestation(t *testing.T) {
name: "nil attestation",
input: nil,
want: false,
err: errors.New("is nil"),
err: errors.New("can't be nil"),
},
{
name: "nil attestation data",
@@ -296,7 +296,7 @@ func TestKV_Aggregated_HasAggregatedAttestation(t *testing.T) {
AggregationBits: bitfield.Bitlist{0b1111},
},
want: false,
err: errors.New("is nil"),
err: errors.New("can't be nil"),
},
{
name: "empty cache aggregated",

View File

@@ -8,7 +8,7 @@ import (
// SaveBlockAttestation saves an block attestation in cache.
func (c *AttCaches) SaveBlockAttestation(att ethpb.Att) error {
if att == nil || att.IsNil() {
if att == nil {
return nil
}
@@ -53,9 +53,10 @@ func (c *AttCaches) BlockAttestations() []ethpb.Att {
// DeleteBlockAttestation deletes a block attestation in cache.
func (c *AttCaches) DeleteBlockAttestation(att ethpb.Att) error {
if att == nil || att.IsNil() {
if att == nil {
return nil
}
id, err := attestation.NewId(att, attestation.Data)
if err != nil {
return errors.Wrap(err, "could not create attestation ID")

View File

@@ -8,7 +8,7 @@ import (
// SaveForkchoiceAttestation saves an forkchoice attestation in cache.
func (c *AttCaches) SaveForkchoiceAttestation(att ethpb.Att) error {
if att == nil || att.IsNil() {
if att == nil {
return nil
}
@@ -50,7 +50,7 @@ func (c *AttCaches) ForkchoiceAttestations() []ethpb.Att {
// DeleteForkchoiceAttestation deletes a forkchoice attestation in cache.
func (c *AttCaches) DeleteForkchoiceAttestation(att ethpb.Att) error {
if att == nil || att.IsNil() {
if att == nil {
return nil
}

View File

@@ -14,7 +14,7 @@ import (
// SaveUnaggregatedAttestation saves an unaggregated attestation in cache.
func (c *AttCaches) SaveUnaggregatedAttestation(att ethpb.Att) error {
if att == nil || att.IsNil() {
if att == nil {
return nil
}
if helpers.IsAggregated(att) {
@@ -130,10 +130,9 @@ func (c *AttCaches) UnaggregatedAttestationsBySlotIndexElectra(
// DeleteUnaggregatedAttestation deletes the unaggregated attestations in cache.
func (c *AttCaches) DeleteUnaggregatedAttestation(att ethpb.Att) error {
if att == nil || att.IsNil() {
if att == nil {
return nil
}
if helpers.IsAggregated(att) {
return errors.New("attestation is aggregated")
}
@@ -162,7 +161,7 @@ func (c *AttCaches) DeleteSeenUnaggregatedAttestations() (int, error) {
count := 0
for r, att := range c.unAggregatedAtt {
if att == nil || att.IsNil() || helpers.IsAggregated(att) {
if att == nil || helpers.IsAggregated(att) {
continue
}
if seen, err := c.hasSeenBit(att); err == nil && seen {

View File

@@ -7,7 +7,6 @@ go_library(
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/attestations/mock",
visibility = ["//visibility:public"],
deps = [
"//beacon-chain/operations/attestations:go_default_library",
"//consensus-types/primitives:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
],

View File

@@ -3,17 +3,13 @@ package mock
import (
"context"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
)
var _ attestations.Pool = &PoolMock{}
// PoolMock --
type PoolMock struct {
AggregatedAtts []ethpb.Att
UnaggregatedAtts []ethpb.Att
AggregatedAtts []*ethpb.Attestation
}
// AggregateUnaggregatedAttestations --
@@ -27,18 +23,18 @@ func (*PoolMock) AggregateUnaggregatedAttestationsBySlotIndex(_ context.Context,
}
// SaveAggregatedAttestation --
func (*PoolMock) SaveAggregatedAttestation(_ ethpb.Att) error {
func (*PoolMock) SaveAggregatedAttestation(_ *ethpb.Attestation) error {
panic("implement me")
}
// SaveAggregatedAttestations --
func (m *PoolMock) SaveAggregatedAttestations(atts []ethpb.Att) error {
func (m *PoolMock) SaveAggregatedAttestations(atts []*ethpb.Attestation) error {
m.AggregatedAtts = append(m.AggregatedAtts, atts...)
return nil
}
// AggregatedAttestations --
func (m *PoolMock) AggregatedAttestations() []ethpb.Att {
func (m *PoolMock) AggregatedAttestations() []*ethpb.Attestation {
return m.AggregatedAtts
}
@@ -47,18 +43,13 @@ func (*PoolMock) AggregatedAttestationsBySlotIndex(_ context.Context, _ primitiv
panic("implement me")
}
// AggregatedAttestationsBySlotIndexElectra --
func (*PoolMock) AggregatedAttestationsBySlotIndexElectra(_ context.Context, _ primitives.Slot, _ primitives.CommitteeIndex) []*ethpb.AttestationElectra {
panic("implement me")
}
// DeleteAggregatedAttestation --
func (*PoolMock) DeleteAggregatedAttestation(_ ethpb.Att) error {
func (*PoolMock) DeleteAggregatedAttestation(_ *ethpb.Attestation) error {
panic("implement me")
}
// HasAggregatedAttestation --
func (*PoolMock) HasAggregatedAttestation(_ ethpb.Att) (bool, error) {
func (*PoolMock) HasAggregatedAttestation(_ *ethpb.Attestation) (bool, error) {
panic("implement me")
}
@@ -68,19 +59,18 @@ func (*PoolMock) AggregatedAttestationCount() int {
}
// SaveUnaggregatedAttestation --
func (*PoolMock) SaveUnaggregatedAttestation(_ ethpb.Att) error {
func (*PoolMock) SaveUnaggregatedAttestation(_ *ethpb.Attestation) error {
panic("implement me")
}
// SaveUnaggregatedAttestations --
func (m *PoolMock) SaveUnaggregatedAttestations(atts []ethpb.Att) error {
m.UnaggregatedAtts = append(m.UnaggregatedAtts, atts...)
return nil
func (*PoolMock) SaveUnaggregatedAttestations(_ []*ethpb.Attestation) error {
panic("implement me")
}
// UnaggregatedAttestations --
func (m *PoolMock) UnaggregatedAttestations() ([]ethpb.Att, error) {
return m.UnaggregatedAtts, nil
func (*PoolMock) UnaggregatedAttestations() ([]*ethpb.Attestation, error) {
panic("implement me")
}
// UnaggregatedAttestationsBySlotIndex --
@@ -88,13 +78,8 @@ func (*PoolMock) UnaggregatedAttestationsBySlotIndex(_ context.Context, _ primit
panic("implement me")
}
// UnaggregatedAttestationsBySlotIndexElectra --
func (*PoolMock) UnaggregatedAttestationsBySlotIndexElectra(_ context.Context, _ primitives.Slot, _ primitives.CommitteeIndex) []*ethpb.AttestationElectra {
panic("implement me")
}
// DeleteUnaggregatedAttestation --
func (*PoolMock) DeleteUnaggregatedAttestation(_ ethpb.Att) error {
func (*PoolMock) DeleteUnaggregatedAttestation(_ *ethpb.Attestation) error {
panic("implement me")
}
@@ -109,42 +94,42 @@ func (*PoolMock) UnaggregatedAttestationCount() int {
}
// SaveBlockAttestation --
func (*PoolMock) SaveBlockAttestation(_ ethpb.Att) error {
func (*PoolMock) SaveBlockAttestation(_ *ethpb.Attestation) error {
panic("implement me")
}
// SaveBlockAttestations --
func (*PoolMock) SaveBlockAttestations(_ []ethpb.Att) error {
func (*PoolMock) SaveBlockAttestations(_ []*ethpb.Attestation) error {
panic("implement me")
}
// BlockAttestations --
func (*PoolMock) BlockAttestations() []ethpb.Att {
func (*PoolMock) BlockAttestations() []*ethpb.Attestation {
panic("implement me")
}
// DeleteBlockAttestation --
func (*PoolMock) DeleteBlockAttestation(_ ethpb.Att) error {
func (*PoolMock) DeleteBlockAttestation(_ *ethpb.Attestation) error {
panic("implement me")
}
// SaveForkchoiceAttestation --
func (*PoolMock) SaveForkchoiceAttestation(_ ethpb.Att) error {
func (*PoolMock) SaveForkchoiceAttestation(_ *ethpb.Attestation) error {
panic("implement me")
}
// SaveForkchoiceAttestations --
func (*PoolMock) SaveForkchoiceAttestations(_ []ethpb.Att) error {
func (*PoolMock) SaveForkchoiceAttestations(_ []*ethpb.Attestation) error {
panic("implement me")
}
// ForkchoiceAttestations --
func (*PoolMock) ForkchoiceAttestations() []ethpb.Att {
func (*PoolMock) ForkchoiceAttestations() []*ethpb.Attestation {
panic("implement me")
}
// DeleteForkchoiceAttestation --
func (*PoolMock) DeleteForkchoiceAttestation(_ ethpb.Att) error {
func (*PoolMock) DeleteForkchoiceAttestation(_ *ethpb.Attestation) error {
panic("implement me")
}

View File

@@ -17,6 +17,7 @@ go_library(
"handshake.go",
"info.go",
"interfaces.go",
"iterator.go",
"log.go",
"message_id.go",
"monitoring.go",
@@ -74,8 +75,6 @@ go_library(
"//runtime/version:go_default_library",
"//time:go_default_library",
"//time/slots:go_default_library",
"@com_github_btcsuite_btcd_btcec_v2//:go_default_library",
"@com_github_ethereum_go_ethereum//crypto:go_default_library",
"@com_github_ethereum_go_ethereum//p2p/discover:go_default_library",
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
@@ -163,10 +162,12 @@ go_test(
"//proto/eth/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/testing:go_default_library",
"//runtime/version:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"//time:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//crypto:go_default_library",
"@com_github_ethereum_go_ethereum//p2p/discover:go_default_library",
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",

View File

@@ -225,11 +225,11 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
require.NoError(t, err)
defer bootListener.Close()
// Use smaller batch size for testing.
currentBatchSize := batchSize
batchSize = 2
// Use shorter period for testing.
currentPeriod := pollingPeriod
pollingPeriod = 1 * time.Second
defer func() {
batchSize = currentBatchSize
pollingPeriod = currentPeriod
}()
bootNode := bootListener.Self()

View File

@@ -33,7 +33,7 @@ func (*Service) InterceptPeerDial(_ peer.ID) (allow bool) {
// multiaddr for the given peer.
func (s *Service) InterceptAddrDial(pid peer.ID, m multiaddr.Multiaddr) (allow bool) {
// Disallow bad peers from dialing in.
if s.peers.IsBad(pid) != nil {
if s.peers.IsBad(pid) {
return false
}
return filterConnections(s.addrFilter, m)

View File

@@ -50,7 +50,7 @@ func TestPeer_AtMaxLimit(t *testing.T) {
}()
for i := 0; i < highWatermarkBuffer; i++ {
addPeer(t, s.peers, peers.Connected, false)
addPeer(t, s.peers, peers.PeerConnected, false)
}
// create alternate host
@@ -159,7 +159,7 @@ func TestService_RejectInboundPeersBeyondLimit(t *testing.T) {
inboundLimit += 1
// Add in up to inbound peer limit.
for i := 0; i < int(inboundLimit); i++ {
addPeer(t, s.peers, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED), false)
addPeer(t, s.peers, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED), false)
}
valid = s.InterceptAccept(&maEndpoints{raddr: multiAddress})
if valid {

View File

@@ -22,7 +22,6 @@ import (
ecdsaprysm "github.com/prysmaticlabs/prysm/v5/crypto/ecdsa"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
"github.com/prysmaticlabs/prysm/v5/time/slots"
"github.com/sirupsen/logrus"
)
type ListenerRebooter interface {
@@ -48,12 +47,10 @@ const (
udp6
)
const quickProtocolEnrKey = "quic"
type quicProtocol uint16
// quicProtocol is the "quic" key, which holds the QUIC port of the node.
func (quicProtocol) ENRKey() string { return quickProtocolEnrKey }
func (quicProtocol) ENRKey() string { return "quic" }
type listenerWrapper struct {
mu sync.RWMutex
@@ -136,129 +133,68 @@ func (l *listenerWrapper) RebootListener() error {
return nil
}
// RefreshPersistentSubnets checks that we are tracking our local persistent subnets for a variety of gossip topics.
// This routine verifies and updates our attestation and sync committee subnets if they have been rotated.
func (s *Service) RefreshPersistentSubnets() {
// Return early if discv5 service isn't running.
// RefreshENR uses an epoch to refresh the enr entry for our node
// with the tracked committee ids for the epoch, allowing our node
// to be dynamically discoverable by others given our tracked committee ids.
func (s *Service) RefreshENR() {
// return early if discv5 isn't running
if s.dv5Listener == nil || !s.isInitialized() {
return
}
// Get the current epoch.
currentSlot := slots.CurrentSlot(uint64(s.genesisTime.Unix()))
currentEpoch := slots.ToEpoch(currentSlot)
// Get our node ID.
nodeID := s.dv5Listener.LocalNode().ID()
// Get our node record.
record := s.dv5Listener.Self().Record()
// Get the version of our metadata.
metadataVersion := s.Metadata().Version()
// Initialize persistent subnets.
if err := initializePersistentSubnets(nodeID, currentEpoch); err != nil {
currEpoch := slots.ToEpoch(slots.CurrentSlot(uint64(s.genesisTime.Unix())))
if err := initializePersistentSubnets(s.dv5Listener.LocalNode().ID(), currEpoch); err != nil {
log.WithError(err).Error("Could not initialize persistent subnets")
return
}
// Get the current attestation subnet bitfield.
bitV := bitfield.NewBitvector64()
attestationCommittees := cache.SubnetIDs.GetAllSubnets()
for _, idx := range attestationCommittees {
committees := cache.SubnetIDs.GetAllSubnets()
for _, idx := range committees {
bitV.SetBitAt(idx, true)
}
// Get the attestation subnet bitfield we store in our record.
inRecordBitV, err := attBitvector(record)
currentBitV, err := attBitvector(s.dv5Listener.Self().Record())
if err != nil {
log.WithError(err).Error("Could not retrieve att bitfield")
return
}
// Get the attestation subnet bitfield in our metadata.
inMetadataBitV := s.Metadata().AttnetsBitfield()
// Is our attestation bitvector record up to date?
isBitVUpToDate := bytes.Equal(bitV, inRecordBitV) && bytes.Equal(bitV, inMetadataBitV)
// Compare current epoch with Altair fork epoch
// Compare current epoch with our fork epochs
altairForkEpoch := params.BeaconConfig().AltairForkEpoch
if currentEpoch < altairForkEpoch {
switch {
case currEpoch < altairForkEpoch:
// Phase 0 behaviour.
if isBitVUpToDate {
// Return early if bitfield hasn't changed.
if bytes.Equal(bitV, currentBitV) {
// return early if bitfield hasn't changed
return
}
// Some data changed. Update the record and the metadata.
s.updateSubnetRecordWithMetadata(bitV)
// Ping all peers.
s.pingPeersAndLogEnr()
return
default:
// Retrieve sync subnets from application level
// cache.
bitS := bitfield.Bitvector4{byte(0x00)}
committees = cache.SyncSubnetIDs.GetAllSubnets(currEpoch)
for _, idx := range committees {
bitS.SetBitAt(idx, true)
}
currentBitS, err := syncBitvector(s.dv5Listener.Self().Record())
if err != nil {
log.WithError(err).Error("Could not retrieve sync bitfield")
return
}
if bytes.Equal(bitV, currentBitV) && bytes.Equal(bitS, currentBitS) &&
s.Metadata().Version() == version.Altair {
// return early if bitfields haven't changed
return
}
s.updateSubnetRecordWithMetadataV2(bitV, bitS)
}
// Get the current sync subnet bitfield.
bitS := bitfield.Bitvector4{byte(0x00)}
syncCommittees := cache.SyncSubnetIDs.GetAllSubnets(currentEpoch)
for _, idx := range syncCommittees {
bitS.SetBitAt(idx, true)
}
// Get the sync subnet bitfield we store in our record.
inRecordBitS, err := syncBitvector(record)
if err != nil {
log.WithError(err).Error("Could not retrieve sync bitfield")
return
}
// Get the sync subnet bitfield in our metadata.
currentBitSInMetadata := s.Metadata().SyncnetsBitfield()
// Is our sync bitvector record up to date?
isBitSUpToDate := bytes.Equal(bitS, inRecordBitS) && bytes.Equal(bitS, currentBitSInMetadata)
if metadataVersion == version.Altair && isBitVUpToDate && isBitSUpToDate {
// Nothing to do, return early.
return
}
// Some data have changed, update our record and metadata.
s.updateSubnetRecordWithMetadataV2(bitV, bitS)
// Ping all peers to inform them of new metadata
s.pingPeersAndLogEnr()
// ping all peers to inform them of new metadata
s.pingPeers()
}
// listen for new nodes watches for new nodes in the network and adds them to the peerstore.
func (s *Service) listenForNewNodes() {
const (
minLogInterval = 1 * time.Minute
thresholdLimit = 5
)
peersSummary := func(threshold uint) (uint, uint) {
// Retrieve how many active peers we have.
activePeers := s.Peers().Active()
activePeerCount := uint(len(activePeers))
// Compute how many peers we are missing to reach the threshold.
if activePeerCount >= threshold {
return activePeerCount, 0
}
missingPeerCount := threshold - activePeerCount
return activePeerCount, missingPeerCount
}
var lastLogTime time.Time
iterator := s.dv5Listener.RandomNodes()
iterator := filterNodes(s.ctx, s.dv5Listener.RandomNodes(), s.filterPeer)
defer iterator.Close()
connectivityTicker := time.NewTicker(1 * time.Minute)
thresholdCount := 0
@@ -267,31 +203,25 @@ func (s *Service) listenForNewNodes() {
select {
case <-s.ctx.Done():
return
case <-connectivityTicker.C:
// Skip the connectivity check if not enabled.
if !features.Get().EnableDiscoveryReboot {
continue
}
if !s.isBelowOutboundPeerThreshold() {
// Reset counter if we are beyond the threshold
thresholdCount = 0
continue
}
thresholdCount++
// Reboot listener if connectivity drops
if thresholdCount > thresholdLimit {
outBoundConnectedCount := len(s.peers.OutboundConnected())
log.WithField("outboundConnectionCount", outBoundConnectedCount).Warn("Rebooting discovery listener, reached threshold.")
if thresholdCount > 5 {
log.WithField("outboundConnectionCount", len(s.peers.OutboundConnected())).Warn("Rebooting discovery listener, reached threshold.")
if err := s.dv5Listener.RebootListener(); err != nil {
log.WithError(err).Error("Could not reboot listener")
continue
}
iterator = s.dv5Listener.RandomNodes()
iterator = filterNodes(s.ctx, s.dv5Listener.RandomNodes(), s.filterPeer)
thresholdCount = 0
}
default:
@@ -302,35 +232,17 @@ func (s *Service) listenForNewNodes() {
time.Sleep(pollingPeriod)
continue
}
// Compute the number of new peers we want to dial.
activePeerCount, missingPeerCount := peersSummary(s.cfg.MaxPeers)
fields := logrus.Fields{
"currentPeerCount": activePeerCount,
"targetPeerCount": s.cfg.MaxPeers,
}
if missingPeerCount == 0 {
wantedCount := s.wantedPeerDials()
if wantedCount == 0 {
log.Trace("Not looking for peers, at peer limit")
time.Sleep(pollingPeriod)
continue
}
if time.Since(lastLogTime) > minLogInterval {
lastLogTime = time.Now()
log.WithFields(fields).Debug("Searching for new active peers")
}
// Restrict dials if limit is applied.
if flags.MaxDialIsActive() {
maxConcurrentDials := uint(flags.Get().MaxConcurrentDials)
missingPeerCount = min(missingPeerCount, maxConcurrentDials)
wantedCount = min(wantedCount, flags.Get().MaxConcurrentDials)
}
// Search for new peers.
wantedNodes := searchForPeers(iterator, batchSize, missingPeerCount, s.filterPeer)
wantedNodes := enode.ReadNodes(iterator, wantedCount)
wg := new(sync.WaitGroup)
for i := 0; i < len(wantedNodes); i++ {
node := wantedNodes[i]
@@ -540,14 +452,12 @@ func (s *Service) filterPeer(node *enode.Node) bool {
}
// Ignore bad nodes.
if s.peers.IsBad(peerData.ID) != nil {
if s.peers.IsBad(peerData.ID) {
return false
}
// Ignore nodes that are already active.
if s.peers.IsActive(peerData.ID) {
// Constantly update enr for known peers
s.peers.UpdateENR(node.Record(), peerData.ID)
return false
}
@@ -616,6 +526,17 @@ func (s *Service) isBelowOutboundPeerThreshold() bool {
return outBoundCount < outBoundThreshold
}
func (s *Service) wantedPeerDials() int {
maxPeers := int(s.cfg.MaxPeers)
activePeers := len(s.Peers().Active())
wantedCount := 0
if maxPeers > activePeers {
wantedCount = maxPeers - activePeers
}
return wantedCount
}
// PeersFromStringAddrs converts peer raw ENRs into multiaddrs for p2p.
func PeersFromStringAddrs(addrs []string) ([]ma.Multiaddr, error) {
var allAddrs []ma.Multiaddr

View File

@@ -16,8 +16,6 @@ import (
"github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/enr"
"github.com/libp2p/go-libp2p"
"github.com/libp2p/go-libp2p/core/crypto"
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
@@ -32,12 +30,13 @@ import (
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/wrapper"
leakybucket "github.com/prysmaticlabs/prysm/v5/container/leaky-bucket"
ecdsaprysm "github.com/prysmaticlabs/prysm/v5/crypto/ecdsa"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
prysmNetwork "github.com/prysmaticlabs/prysm/v5/network"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
"github.com/prysmaticlabs/prysm/v5/testing/assert"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/prysmaticlabs/prysm/v5/time/slots"
logTest "github.com/sirupsen/logrus/hooks/test"
)
@@ -132,10 +131,6 @@ func TestStartDiscV5_DiscoverAllPeers(t *testing.T) {
}
func TestCreateLocalNode(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.Eip7594ForkEpoch = 1
params.OverrideBeaconConfig(cfg)
testCases := []struct {
name string
cfg *Config
@@ -383,14 +378,14 @@ func TestInboundPeerLimit(t *testing.T) {
}
for i := 0; i < 30; i++ {
_ = addPeer(t, s.peers, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED), false)
_ = addPeer(t, s.peers, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED), false)
}
require.Equal(t, true, s.isPeerAtLimit(false), "not at limit for outbound peers")
require.Equal(t, false, s.isPeerAtLimit(true), "at limit for inbound peers")
for i := 0; i < highWatermarkBuffer; i++ {
_ = addPeer(t, s.peers, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED), false)
_ = addPeer(t, s.peers, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED), false)
}
require.Equal(t, true, s.isPeerAtLimit(true), "not at limit for inbound peers")
@@ -409,13 +404,13 @@ func TestOutboundPeerThreshold(t *testing.T) {
}
for i := 0; i < 2; i++ {
_ = addPeer(t, s.peers, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED), true)
_ = addPeer(t, s.peers, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED), true)
}
require.Equal(t, true, s.isBelowOutboundPeerThreshold(), "not at outbound peer threshold")
for i := 0; i < 3; i++ {
_ = addPeer(t, s.peers, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED), true)
_ = addPeer(t, s.peers, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED), true)
}
require.Equal(t, false, s.isBelowOutboundPeerThreshold(), "still at outbound peer threshold")
@@ -482,7 +477,7 @@ func TestCorrectUDPVersion(t *testing.T) {
}
// addPeer is a helper to add a peer with a given connection state)
func addPeer(t *testing.T, p *peers.Status, state peerdata.ConnectionState, outbound bool) peer.ID {
func addPeer(t *testing.T, p *peers.Status, state peerdata.PeerConnectionState, outbound bool) peer.ID {
// Set up some peers with different states
mhBytes := []byte{0x11, 0x04}
idBytes := make([]byte, 4)
@@ -504,270 +499,192 @@ func addPeer(t *testing.T, p *peers.Status, state peerdata.ConnectionState, outb
return id
}
func createAndConnectPeer(t *testing.T, p2pService *testp2p.TestP2P, offset int) {
// Create the private key.
privateKeyBytes := make([]byte, 32)
for i := 0; i < 32; i++ {
privateKeyBytes[i] = byte(offset + i)
}
privateKey, err := crypto.UnmarshalSecp256k1PrivateKey(privateKeyBytes)
require.NoError(t, err)
// Create the peer.
peer := testp2p.NewTestP2P(t, libp2p.Identity(privateKey))
// Add the peer and connect it.
p2pService.Peers().Add(&enr.Record{}, peer.PeerID(), nil, network.DirOutbound)
p2pService.Peers().SetConnectionState(peer.PeerID(), peers.Connected)
p2pService.Connect(peer)
}
// Define the ping count.
var actualPingCount int
type check struct {
pingCount int
metadataSequenceNumber uint64
attestationSubnets []uint64
syncSubnets []uint64
custodySubnetCount *uint64
}
func checkPingCountCacheMetadataRecord(
t *testing.T,
service *Service,
expected check,
) {
// Check the ping count.
require.Equal(t, expected.pingCount, actualPingCount)
// Check the attestation subnets in the cache.
actualAttestationSubnets := cache.SubnetIDs.GetAllSubnets()
require.DeepSSZEqual(t, expected.attestationSubnets, actualAttestationSubnets)
// Check the metadata sequence number.
actualMetadataSequenceNumber := service.metaData.SequenceNumber()
require.Equal(t, expected.metadataSequenceNumber, actualMetadataSequenceNumber)
// Compute expected attestation subnets bits.
expectedBitV := bitfield.NewBitvector64()
exists := false
for _, idx := range expected.attestationSubnets {
exists = true
expectedBitV.SetBitAt(idx, true)
}
// Check attnets in ENR.
var actualBitVENR bitfield.Bitvector64
err := service.dv5Listener.LocalNode().Node().Record().Load(enr.WithEntry(attSubnetEnrKey, &actualBitVENR))
require.NoError(t, err)
require.DeepSSZEqual(t, expectedBitV, actualBitVENR)
// Check attnets in metadata.
if !exists {
expectedBitV = nil
}
actualBitVMetadata := service.metaData.AttnetsBitfield()
require.DeepSSZEqual(t, expectedBitV, actualBitVMetadata)
if expected.syncSubnets != nil {
// Compute expected sync subnets bits.
expectedBitS := bitfield.NewBitvector4()
exists = false
for _, idx := range expected.syncSubnets {
exists = true
expectedBitS.SetBitAt(idx, true)
}
// Check syncnets in ENR.
var actualBitSENR bitfield.Bitvector4
err := service.dv5Listener.LocalNode().Node().Record().Load(enr.WithEntry(syncCommsSubnetEnrKey, &actualBitSENR))
require.NoError(t, err)
require.DeepSSZEqual(t, expectedBitS, actualBitSENR)
// Check syncnets in metadata.
if !exists {
expectedBitS = nil
}
actualBitSMetadata := service.metaData.SyncnetsBitfield()
require.DeepSSZEqual(t, expectedBitS, actualBitSMetadata)
}
}
func TestRefreshPersistentSubnets(t *testing.T) {
func TestRefreshENR_ForkBoundaries(t *testing.T) {
params.SetupTestConfigCleanup(t)
// Clean up caches after usage.
defer cache.SubnetIDs.EmptyAllCaches()
defer cache.SyncSubnetIDs.EmptyAllCaches()
const (
altairForkEpoch = 5
eip7594ForkEpoch = 10
)
// Set up epochs.
defaultCfg := params.BeaconConfig()
cfg := defaultCfg.Copy()
cfg.AltairForkEpoch = altairForkEpoch
cfg.Eip7594ForkEpoch = eip7594ForkEpoch
params.OverrideBeaconConfig(cfg)
// Compute the number of seconds per epoch.
secondsPerSlot := params.BeaconConfig().SecondsPerSlot
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
secondsPerEpoch := secondsPerSlot * uint64(slotsPerEpoch)
testCases := []struct {
name string
epochSinceGenesis uint64
checks []check
tests := []struct {
name string
svcBuilder func(t *testing.T) *Service
postValidation func(t *testing.T, s *Service)
}{
{
name: "Phase0",
epochSinceGenesis: 0,
checks: []check{
{
pingCount: 0,
metadataSequenceNumber: 0,
attestationSubnets: []uint64{},
},
{
pingCount: 1,
metadataSequenceNumber: 1,
attestationSubnets: []uint64{40, 41},
},
{
pingCount: 1,
metadataSequenceNumber: 1,
attestationSubnets: []uint64{40, 41},
},
{
pingCount: 1,
metadataSequenceNumber: 1,
attestationSubnets: []uint64{40, 41},
},
name: "metadata no change",
svcBuilder: func(t *testing.T) *Service {
port := 2000
ipAddr, pkey := createAddrAndPrivKey(t)
s := &Service{
genesisTime: time.Now(),
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
cfg: &Config{UDPPort: uint(port)},
}
createListener := func() (*discover.UDPv5, error) {
return s.createListener(ipAddr, pkey)
}
listener, err := newListener(createListener)
assert.NoError(t, err)
s.dv5Listener = listener
s.metaData = wrapper.WrappedMetadataV0(new(ethpb.MetaDataV0))
s.updateSubnetRecordWithMetadata([]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00})
return s
},
postValidation: func(t *testing.T, s *Service) {
currEpoch := slots.ToEpoch(slots.CurrentSlot(uint64(s.genesisTime.Unix())))
subs, err := computeSubscribedSubnets(s.dv5Listener.LocalNode().ID(), currEpoch)
assert.NoError(t, err)
bitV := bitfield.NewBitvector64()
for _, idx := range subs {
bitV.SetBitAt(idx, true)
}
assert.DeepEqual(t, bitV, s.metaData.AttnetsBitfield())
},
},
{
name: "Altair",
epochSinceGenesis: altairForkEpoch,
checks: []check{
{
pingCount: 0,
metadataSequenceNumber: 0,
attestationSubnets: []uint64{},
syncSubnets: nil,
},
{
pingCount: 1,
metadataSequenceNumber: 1,
attestationSubnets: []uint64{40, 41},
syncSubnets: nil,
},
{
pingCount: 2,
metadataSequenceNumber: 2,
attestationSubnets: []uint64{40, 41},
syncSubnets: []uint64{1, 2},
},
{
pingCount: 2,
metadataSequenceNumber: 2,
attestationSubnets: []uint64{40, 41},
syncSubnets: []uint64{1, 2},
},
name: "metadata updated",
svcBuilder: func(t *testing.T) *Service {
port := 2000
ipAddr, pkey := createAddrAndPrivKey(t)
s := &Service{
genesisTime: time.Now(),
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
cfg: &Config{UDPPort: uint(port)},
}
createListener := func() (*discover.UDPv5, error) {
return s.createListener(ipAddr, pkey)
}
listener, err := newListener(createListener)
assert.NoError(t, err)
s.dv5Listener = listener
s.metaData = wrapper.WrappedMetadataV0(new(ethpb.MetaDataV0))
s.updateSubnetRecordWithMetadata([]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01})
cache.SubnetIDs.AddPersistentCommittee([]uint64{1, 2, 3, 23}, 0)
return s
},
postValidation: func(t *testing.T, s *Service) {
assert.DeepEqual(t, bitfield.Bitvector64{0xe, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0}, s.metaData.AttnetsBitfield())
},
},
{
name: "metadata updated at fork epoch",
svcBuilder: func(t *testing.T) *Service {
port := 2000
ipAddr, pkey := createAddrAndPrivKey(t)
s := &Service{
genesisTime: time.Now().Add(-5 * oneEpochDuration()),
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
cfg: &Config{UDPPort: uint(port)},
}
createListener := func() (*discover.UDPv5, error) {
return s.createListener(ipAddr, pkey)
}
listener, err := newListener(createListener)
assert.NoError(t, err)
// Update params
cfg := params.BeaconConfig().Copy()
cfg.AltairForkEpoch = 5
params.OverrideBeaconConfig(cfg)
params.BeaconConfig().InitializeForkSchedule()
s.dv5Listener = listener
s.metaData = wrapper.WrappedMetadataV0(new(ethpb.MetaDataV0))
s.updateSubnetRecordWithMetadata([]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01})
cache.SubnetIDs.AddPersistentCommittee([]uint64{1, 2, 3, 23}, 0)
return s
},
postValidation: func(t *testing.T, s *Service) {
assert.Equal(t, version.Altair, s.metaData.Version())
assert.DeepEqual(t, bitfield.Bitvector4{0x00}, s.metaData.MetadataObjV1().Syncnets)
assert.DeepEqual(t, bitfield.Bitvector64{0xe, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0}, s.metaData.AttnetsBitfield())
},
},
{
name: "metadata updated at fork epoch with no bitfield",
svcBuilder: func(t *testing.T) *Service {
port := 2000
ipAddr, pkey := createAddrAndPrivKey(t)
s := &Service{
genesisTime: time.Now().Add(-5 * oneEpochDuration()),
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
cfg: &Config{UDPPort: uint(port)},
}
createListener := func() (*discover.UDPv5, error) {
return s.createListener(ipAddr, pkey)
}
listener, err := newListener(createListener)
assert.NoError(t, err)
// Update params
cfg := params.BeaconConfig().Copy()
cfg.AltairForkEpoch = 5
params.OverrideBeaconConfig(cfg)
params.BeaconConfig().InitializeForkSchedule()
s.dv5Listener = listener
s.metaData = wrapper.WrappedMetadataV0(new(ethpb.MetaDataV0))
s.updateSubnetRecordWithMetadata([]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00})
return s
},
postValidation: func(t *testing.T, s *Service) {
assert.Equal(t, version.Altair, s.metaData.Version())
assert.DeepEqual(t, bitfield.Bitvector4{0x00}, s.metaData.MetadataObjV1().Syncnets)
currEpoch := slots.ToEpoch(slots.CurrentSlot(uint64(s.genesisTime.Unix())))
subs, err := computeSubscribedSubnets(s.dv5Listener.LocalNode().ID(), currEpoch)
assert.NoError(t, err)
bitV := bitfield.NewBitvector64()
for _, idx := range subs {
bitV.SetBitAt(idx, true)
}
assert.DeepEqual(t, bitV, s.metaData.AttnetsBitfield())
},
},
{
name: "metadata updated past fork epoch with bitfields",
svcBuilder: func(t *testing.T) *Service {
port := 2000
ipAddr, pkey := createAddrAndPrivKey(t)
s := &Service{
genesisTime: time.Now().Add(-6 * oneEpochDuration()),
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
cfg: &Config{UDPPort: uint(port)},
}
createListener := func() (*discover.UDPv5, error) {
return s.createListener(ipAddr, pkey)
}
listener, err := newListener(createListener)
assert.NoError(t, err)
// Update params
cfg := params.BeaconConfig().Copy()
cfg.AltairForkEpoch = 5
params.OverrideBeaconConfig(cfg)
params.BeaconConfig().InitializeForkSchedule()
s.dv5Listener = listener
s.metaData = wrapper.WrappedMetadataV0(new(ethpb.MetaDataV0))
s.updateSubnetRecordWithMetadata([]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00})
cache.SubnetIDs.AddPersistentCommittee([]uint64{1, 2, 3, 23}, 0)
cache.SyncSubnetIDs.AddSyncCommitteeSubnets([]byte{'A'}, 0, []uint64{0, 1}, 0)
return s
},
postValidation: func(t *testing.T, s *Service) {
assert.Equal(t, version.Altair, s.metaData.Version())
assert.DeepEqual(t, bitfield.Bitvector4{0x03}, s.metaData.MetadataObjV1().Syncnets)
assert.DeepEqual(t, bitfield.Bitvector64{0xe, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0}, s.metaData.AttnetsBitfield())
},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
const peerOffset = 1
// Initialize the ping count.
actualPingCount = 0
// Create the private key.
privateKeyBytes := make([]byte, 32)
for i := 0; i < 32; i++ {
privateKeyBytes[i] = byte(i)
}
unmarshalledPrivateKey, err := crypto.UnmarshalSecp256k1PrivateKey(privateKeyBytes)
require.NoError(t, err)
privateKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(unmarshalledPrivateKey)
require.NoError(t, err)
// Create a p2p service.
p2p := testp2p.NewTestP2P(t)
// Create and connect a peer.
createAndConnectPeer(t, p2p, peerOffset)
// Create a service.
service := &Service{
pingMethod: func(_ context.Context, _ peer.ID) error {
actualPingCount++
return nil
},
cfg: &Config{UDPPort: 2000},
peers: p2p.Peers(),
genesisTime: time.Now().Add(-time.Duration(tc.epochSinceGenesis*secondsPerEpoch) * time.Second),
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
}
// Set the listener and the metadata.
createListener := func() (*discover.UDPv5, error) {
return service.createListener(nil, privateKey)
}
listener, err := newListener(createListener)
require.NoError(t, err)
service.dv5Listener = listener
service.metaData = wrapper.WrappedMetadataV0(new(ethpb.MetaDataV0))
// Run a check.
checkPingCountCacheMetadataRecord(t, service, tc.checks[0])
// Refresh the persistent subnets.
service.RefreshPersistentSubnets()
time.Sleep(10 * time.Millisecond)
// Run a check.
checkPingCountCacheMetadataRecord(t, service, tc.checks[1])
// Add a sync committee subnet.
cache.SyncSubnetIDs.AddSyncCommitteeSubnets([]byte{'a'}, altairForkEpoch, []uint64{1, 2}, 1*time.Hour)
// Refresh the persistent subnets.
service.RefreshPersistentSubnets()
time.Sleep(10 * time.Millisecond)
// Run a check.
checkPingCountCacheMetadataRecord(t, service, tc.checks[2])
// Refresh the persistent subnets.
service.RefreshPersistentSubnets()
time.Sleep(10 * time.Millisecond)
// Run a check.
checkPingCountCacheMetadataRecord(t, service, tc.checks[3])
// Clean the test.
service.dv5Listener.Close()
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := tt.svcBuilder(t)
s.RefreshENR()
tt.postValidation(t, s)
s.dv5Listener.Close()
cache.SubnetIDs.EmptyAllCaches()
cache.SyncSubnetIDs.EmptyAllCaches()
})
}
// Reset the config.
params.OverrideBeaconConfig(defaultCfg)
}

View File

@@ -2,6 +2,7 @@ package p2p
import (
"context"
"errors"
"fmt"
"io"
"sync"
@@ -9,7 +10,6 @@ import (
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/peerdata"
prysmTime "github.com/prysmaticlabs/prysm/v5/time"
@@ -25,46 +25,6 @@ func peerMultiaddrString(conn network.Conn) string {
return fmt.Sprintf("%s/p2p/%s", conn.RemoteMultiaddr().String(), conn.RemotePeer().String())
}
func (s *Service) connectToPeer(conn network.Conn) {
s.peers.SetConnectionState(conn.RemotePeer(), peers.Connected)
// Go through the handshake process.
log.WithFields(logrus.Fields{
"direction": conn.Stat().Direction.String(),
"multiAddr": peerMultiaddrString(conn),
"activePeers": len(s.peers.Active()),
}).Debug("Initiate peer connection")
}
func (s *Service) disconnectFromPeerOnError(
conn network.Conn,
goodByeFunc func(ctx context.Context, id peer.ID) error,
badPeerErr error,
) {
// Get the remote peer ID.
remotePeerID := conn.RemotePeer()
// Set the peer to disconnecting state.
s.peers.SetConnectionState(remotePeerID, peers.Disconnecting)
// Only attempt a goodbye if we are still connected to the peer.
if s.host.Network().Connectedness(remotePeerID) == network.Connected {
if err := goodByeFunc(context.TODO(), remotePeerID); err != nil {
log.WithError(err).Error("Unable to disconnect from peer")
}
}
log.
WithError(badPeerErr).
WithFields(logrus.Fields{
"multiaddr": peerMultiaddrString(conn),
"direction": conn.Stat().Direction.String(),
"remainingActivePeers": len(s.peers.Active()),
}).
Debug("Initiate peer disconnection")
s.peers.SetConnectionState(remotePeerID, peers.Disconnected)
}
// AddConnectionHandler adds a callback function which handles the connection with a
// newly added peer. It performs a handshake with that peer by sending a hello request
// and validating the response from the peer.
@@ -97,9 +57,18 @@ func (s *Service) AddConnectionHandler(reqFunc, goodByeFunc func(ctx context.Con
}
s.host.Network().Notify(&network.NotifyBundle{
ConnectedF: func(_ network.Network, conn network.Conn) {
ConnectedF: func(net network.Network, conn network.Conn) {
remotePeer := conn.RemotePeer()
disconnectFromPeer := func() {
s.peers.SetConnectionState(remotePeer, peers.PeerDisconnecting)
// Only attempt a goodbye if we are still connected to the peer.
if s.host.Network().Connectedness(remotePeer) == network.Connected {
if err := goodByeFunc(context.TODO(), remotePeer); err != nil {
log.WithError(err).Error("Unable to disconnect from peer")
}
}
s.peers.SetConnectionState(remotePeer, peers.PeerDisconnected)
}
// Connection handler must be non-blocking as part of libp2p design.
go func() {
if peerHandshaking(remotePeer) {
@@ -108,21 +77,28 @@ func (s *Service) AddConnectionHandler(reqFunc, goodByeFunc func(ctx context.Con
return
}
defer peerFinished(remotePeer)
// Handle the various pre-existing conditions that will result in us not handshaking.
peerConnectionState, err := s.peers.ConnectionState(remotePeer)
if err == nil && (peerConnectionState == peers.Connected || peerConnectionState == peers.Connecting) {
if err == nil && (peerConnectionState == peers.PeerConnected || peerConnectionState == peers.PeerConnecting) {
log.WithField("currentState", peerConnectionState).WithField("reason", "already active").Trace("Ignoring connection request")
return
}
s.peers.Add(nil /* ENR */, remotePeer, conn.RemoteMultiaddr(), conn.Stat().Direction)
// Defensive check in the event we still get a bad peer.
if err := s.peers.IsBad(remotePeer); err != nil {
s.disconnectFromPeerOnError(conn, goodByeFunc, err)
if s.peers.IsBad(remotePeer) {
log.WithField("reason", "bad peer").Trace("Ignoring connection request")
disconnectFromPeer()
return
}
validPeerConnection := func() {
s.peers.SetConnectionState(conn.RemotePeer(), peers.PeerConnected)
// Go through the handshake process.
log.WithFields(logrus.Fields{
"direction": conn.Stat().Direction,
"multiAddr": peerMultiaddrString(conn),
"activePeers": len(s.peers.Active()),
}).Debug("Peer connected")
}
// Do not perform handshake on inbound dials.
if conn.Stat().Direction == network.DirInbound {
@@ -141,80 +117,63 @@ func (s *Service) AddConnectionHandler(reqFunc, goodByeFunc func(ctx context.Con
// If peer hasn't sent a status request, we disconnect with them
if _, err := s.peers.ChainState(remotePeer); errors.Is(err, peerdata.ErrPeerUnknown) || errors.Is(err, peerdata.ErrNoPeerStatus) {
statusMessageMissing.Inc()
s.disconnectFromPeerOnError(conn, goodByeFunc, errors.Wrap(err, "chain state"))
disconnectFromPeer()
return
}
if peerExists {
updated, err := s.peers.ChainStateLastUpdated(remotePeer)
if err != nil {
s.disconnectFromPeerOnError(conn, goodByeFunc, errors.Wrap(err, "chain state last updated"))
disconnectFromPeer()
return
}
// Exit if we don't receive any current status messages from peer.
if updated.IsZero() {
s.disconnectFromPeerOnError(conn, goodByeFunc, errors.New("is zero"))
return
}
if updated.Before(currentTime) {
s.disconnectFromPeerOnError(conn, goodByeFunc, errors.New("did not update"))
// exit if we don't receive any current status messages from
// peer.
if updated.IsZero() || !updated.After(currentTime) {
disconnectFromPeer()
return
}
}
s.connectToPeer(conn)
validPeerConnection()
return
}
s.peers.SetConnectionState(conn.RemotePeer(), peers.Connecting)
s.peers.SetConnectionState(conn.RemotePeer(), peers.PeerConnecting)
if err := reqFunc(context.TODO(), conn.RemotePeer()); err != nil && !errors.Is(err, io.EOF) {
s.disconnectFromPeerOnError(conn, goodByeFunc, err)
log.WithError(err).Trace("Handshake failed")
disconnectFromPeer()
return
}
s.connectToPeer(conn)
validPeerConnection()
}()
},
})
}
// AddDisconnectionHandler disconnects from peers. It handles updating the peer status.
// AddDisconnectionHandler disconnects from peers. It handles updating the peer status.
// This also calls the handler responsible for maintaining other parts of the sync or p2p system.
func (s *Service) AddDisconnectionHandler(handler func(ctx context.Context, id peer.ID) error) {
s.host.Network().Notify(&network.NotifyBundle{
DisconnectedF: func(net network.Network, conn network.Conn) {
peerID := conn.RemotePeer()
log.WithFields(logrus.Fields{
"multiAddr": peerMultiaddrString(conn),
"direction": conn.Stat().Direction.String(),
})
log := log.WithField("multiAddr", peerMultiaddrString(conn))
// Must be handled in a goroutine as this callback cannot be blocking.
go func() {
// Exit early if we are still connected to the peer.
if net.Connectedness(peerID) == network.Connected {
if net.Connectedness(conn.RemotePeer()) == network.Connected {
return
}
priorState, err := s.peers.ConnectionState(peerID)
priorState, err := s.peers.ConnectionState(conn.RemotePeer())
if err != nil {
// Can happen if the peer has already disconnected, so...
priorState = peers.Disconnected
priorState = peers.PeerDisconnected
}
s.peers.SetConnectionState(peerID, peers.Disconnecting)
s.peers.SetConnectionState(conn.RemotePeer(), peers.PeerDisconnecting)
if err := handler(context.TODO(), conn.RemotePeer()); err != nil {
log.WithError(err).Error("Disconnect handler failed")
}
s.peers.SetConnectionState(peerID, peers.Disconnected)
s.peers.SetConnectionState(conn.RemotePeer(), peers.PeerDisconnected)
// Only log disconnections if we were fully connected.
if priorState == peers.Connected {
activePeersCount := len(s.peers.Active())
log.WithField("remainingActivePeers", activePeersCount).Debug("Peer disconnected")
if priorState == peers.PeerConnected {
log.WithField("activePeers", len(s.peers.Active())).Debug("Peer disconnected")
}
}()
},

View File

@@ -82,7 +82,7 @@ type PeerManager interface {
Host() host.Host
ENR() *enr.Record
DiscoveryAddresses() ([]multiaddr.Multiaddr, error)
RefreshPersistentSubnets()
RefreshENR()
FindPeersWithSubnet(ctx context.Context, topic string, subIndex uint64, threshold int) (bool, error)
AddPingMethod(reqFunc func(ctx context.Context, id peer.ID) error)
}

View File

@@ -0,0 +1,36 @@
package p2p
import (
"context"
"github.com/ethereum/go-ethereum/p2p/enode"
)
// filterNodes wraps an iterator such that Next only returns nodes for which
// the 'check' function returns true. This custom implementation also
// checks for context deadlines so that in the event the parent context has
// expired, we do exit from the search rather than perform more network
// lookups for additional peers.
func filterNodes(ctx context.Context, it enode.Iterator, check func(*enode.Node) bool) enode.Iterator {
return &filterIter{ctx, it, check}
}
type filterIter struct {
context.Context
enode.Iterator
check func(*enode.Node) bool
}
// Next looks up for the next valid node according to our
// filter criteria.
func (f *filterIter) Next() bool {
for f.Iterator.Next() {
if f.Context.Err() != nil {
return false
}
if f.check(f.Node()) {
return true
}
}
return false
}

View File

@@ -29,7 +29,7 @@ func MsgID(genesisValidatorsRoot []byte, pmsg *pubsubpb.Message) string {
// never be hit.
msg := make([]byte, 20)
copy(msg, "invalid")
return bytesutil.UnsafeCastToString(msg)
return string(msg)
}
digest, err := ExtractGossipDigest(*pmsg.Topic)
if err != nil {
@@ -37,7 +37,7 @@ func MsgID(genesisValidatorsRoot []byte, pmsg *pubsubpb.Message) string {
// never be hit.
msg := make([]byte, 20)
copy(msg, "invalid")
return bytesutil.UnsafeCastToString(msg)
return string(msg)
}
_, fEpoch, err := forks.RetrieveForkDataFromDigest(digest, genesisValidatorsRoot)
if err != nil {
@@ -45,7 +45,7 @@ func MsgID(genesisValidatorsRoot []byte, pmsg *pubsubpb.Message) string {
// never be hit.
msg := make([]byte, 20)
copy(msg, "invalid")
return bytesutil.UnsafeCastToString(msg)
return string(msg)
}
if fEpoch >= params.BeaconConfig().AltairForkEpoch {
return postAltairMsgID(pmsg, fEpoch)
@@ -54,11 +54,11 @@ func MsgID(genesisValidatorsRoot []byte, pmsg *pubsubpb.Message) string {
if err != nil {
combinedData := append(params.BeaconConfig().MessageDomainInvalidSnappy[:], pmsg.Data...)
h := hash.Hash(combinedData)
return bytesutil.UnsafeCastToString(h[:20])
return string(h[:20])
}
combinedData := append(params.BeaconConfig().MessageDomainValidSnappy[:], decodedData...)
h := hash.Hash(combinedData)
return bytesutil.UnsafeCastToString(h[:20])
return string(h[:20])
}
// Spec:
@@ -93,13 +93,13 @@ func postAltairMsgID(pmsg *pubsubpb.Message, fEpoch primitives.Epoch) string {
// should never happen
msg := make([]byte, 20)
copy(msg, "invalid")
return bytesutil.UnsafeCastToString(msg)
return string(msg)
}
if uint64(totalLength) > gossipPubSubSize {
// this should never happen
msg := make([]byte, 20)
copy(msg, "invalid")
return bytesutil.UnsafeCastToString(msg)
return string(msg)
}
combinedData := make([]byte, 0, totalLength)
combinedData = append(combinedData, params.BeaconConfig().MessageDomainInvalidSnappy[:]...)
@@ -107,7 +107,7 @@ func postAltairMsgID(pmsg *pubsubpb.Message, fEpoch primitives.Epoch) string {
combinedData = append(combinedData, topic...)
combinedData = append(combinedData, pmsg.Data...)
h := hash.Hash(combinedData)
return bytesutil.UnsafeCastToString(h[:20])
return string(h[:20])
}
totalLength, err := math.AddInt(
len(params.BeaconConfig().MessageDomainValidSnappy),
@@ -120,7 +120,7 @@ func postAltairMsgID(pmsg *pubsubpb.Message, fEpoch primitives.Epoch) string {
// should never happen
msg := make([]byte, 20)
copy(msg, "invalid")
return bytesutil.UnsafeCastToString(msg)
return string(msg)
}
combinedData := make([]byte, 0, totalLength)
combinedData = append(combinedData, params.BeaconConfig().MessageDomainValidSnappy[:]...)
@@ -128,5 +128,5 @@ func postAltairMsgID(pmsg *pubsubpb.Message, fEpoch primitives.Epoch) string {
combinedData = append(combinedData, topic...)
combinedData = append(combinedData, decodedData...)
h := hash.Hash(combinedData)
return bytesutil.UnsafeCastToString(h[:20])
return string(h[:20])
}

View File

@@ -23,8 +23,8 @@ var (
ErrNoPeerStatus = errors.New("no chain status for peer")
)
// ConnectionState is the state of the connection.
type ConnectionState ethpb.ConnectionState
// PeerConnectionState is the state of the connection.
type PeerConnectionState ethpb.ConnectionState
// StoreConfig holds peer store parameters.
type StoreConfig struct {
@@ -49,7 +49,7 @@ type PeerData struct {
// Network related data.
Address ma.Multiaddr
Direction network.Direction
ConnState ConnectionState
ConnState PeerConnectionState
Enr *enr.Record
NextValidTime time.Time
// Chain related data.

View File

@@ -20,7 +20,6 @@ go_library(
"//crypto/rand:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)

View File

@@ -4,7 +4,6 @@ import (
"time"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/peerdata"
)
@@ -62,7 +61,7 @@ func (s *BadResponsesScorer) Score(pid peer.ID) float64 {
// scoreNoLock is a lock-free version of Score.
func (s *BadResponsesScorer) scoreNoLock(pid peer.ID) float64 {
if s.isBadPeerNoLock(pid) != nil {
if s.isBadPeerNoLock(pid) {
return BadPeerScore
}
score := float64(0)
@@ -117,24 +116,18 @@ func (s *BadResponsesScorer) Increment(pid peer.ID) {
// IsBadPeer states if the peer is to be considered bad.
// If the peer is unknown this will return `false`, which makes using this function easier than returning an error.
func (s *BadResponsesScorer) IsBadPeer(pid peer.ID) error {
func (s *BadResponsesScorer) IsBadPeer(pid peer.ID) bool {
s.store.RLock()
defer s.store.RUnlock()
return s.isBadPeerNoLock(pid)
}
// isBadPeerNoLock is lock-free version of IsBadPeer.
func (s *BadResponsesScorer) isBadPeerNoLock(pid peer.ID) error {
func (s *BadResponsesScorer) isBadPeerNoLock(pid peer.ID) bool {
if peerData, ok := s.store.PeerData(pid); ok {
if peerData.BadResponses >= s.config.Threshold {
return errors.Errorf("peer exceeded bad responses threshold: got %d, threshold %d", peerData.BadResponses, s.config.Threshold)
}
return nil
return peerData.BadResponses >= s.config.Threshold
}
return nil
return false
}
// BadPeers returns the peers that are considered bad.
@@ -144,7 +137,7 @@ func (s *BadResponsesScorer) BadPeers() []peer.ID {
badPeers := make([]peer.ID, 0)
for pid := range s.store.Peers() {
if s.isBadPeerNoLock(pid) != nil {
if s.isBadPeerNoLock(pid) {
badPeers = append(badPeers, pid)
}
}

View File

@@ -33,19 +33,19 @@ func TestScorers_BadResponses_Score(t *testing.T) {
assert.Equal(t, 0., scorer.Score(pid), "Unexpected score for unregistered peer")
scorer.Increment(pid)
assert.NoError(t, scorer.IsBadPeer(pid))
assert.Equal(t, false, scorer.IsBadPeer(pid))
assert.Equal(t, -2.5, scorer.Score(pid))
scorer.Increment(pid)
assert.NoError(t, scorer.IsBadPeer(pid))
assert.Equal(t, false, scorer.IsBadPeer(pid))
assert.Equal(t, float64(-5), scorer.Score(pid))
scorer.Increment(pid)
assert.NoError(t, scorer.IsBadPeer(pid))
assert.Equal(t, false, scorer.IsBadPeer(pid))
assert.Equal(t, float64(-7.5), scorer.Score(pid))
scorer.Increment(pid)
assert.NotNil(t, scorer.IsBadPeer(pid))
assert.Equal(t, true, scorer.IsBadPeer(pid))
assert.Equal(t, -100.0, scorer.Score(pid))
}
@@ -152,17 +152,17 @@ func TestScorers_BadResponses_IsBadPeer(t *testing.T) {
})
scorer := peerStatuses.Scorers().BadResponsesScorer()
pid := peer.ID("peer1")
assert.NoError(t, scorer.IsBadPeer(pid))
assert.Equal(t, false, scorer.IsBadPeer(pid))
peerStatuses.Add(nil, pid, nil, network.DirUnknown)
assert.NoError(t, scorer.IsBadPeer(pid))
assert.Equal(t, false, scorer.IsBadPeer(pid))
for i := 0; i < scorers.DefaultBadResponsesThreshold; i++ {
scorer.Increment(pid)
if i == scorers.DefaultBadResponsesThreshold-1 {
assert.NotNil(t, scorer.IsBadPeer(pid), "Unexpected peer status")
assert.Equal(t, true, scorer.IsBadPeer(pid), "Unexpected peer status")
} else {
assert.NoError(t, scorer.IsBadPeer(pid), "Unexpected peer status")
assert.Equal(t, false, scorer.IsBadPeer(pid), "Unexpected peer status")
}
}
}
@@ -185,11 +185,11 @@ func TestScorers_BadResponses_BadPeers(t *testing.T) {
scorer.Increment(pids[2])
scorer.Increment(pids[4])
}
assert.NoError(t, scorer.IsBadPeer(pids[0]), "Invalid peer status")
assert.NotNil(t, scorer.IsBadPeer(pids[1]), "Invalid peer status")
assert.NotNil(t, scorer.IsBadPeer(pids[2]), "Invalid peer status")
assert.NoError(t, scorer.IsBadPeer(pids[3]), "Invalid peer status")
assert.NotNil(t, scorer.IsBadPeer(pids[4]), "Invalid peer status")
assert.Equal(t, false, scorer.IsBadPeer(pids[0]), "Invalid peer status")
assert.Equal(t, true, scorer.IsBadPeer(pids[1]), "Invalid peer status")
assert.Equal(t, true, scorer.IsBadPeer(pids[2]), "Invalid peer status")
assert.Equal(t, false, scorer.IsBadPeer(pids[3]), "Invalid peer status")
assert.Equal(t, true, scorer.IsBadPeer(pids[4]), "Invalid peer status")
want := []peer.ID{pids[1], pids[2], pids[4]}
badPeers := scorer.BadPeers()
sort.Slice(badPeers, func(i, j int) bool {

View File

@@ -177,8 +177,8 @@ func (s *BlockProviderScorer) processedBlocksNoLock(pid peer.ID) uint64 {
// Block provider scorer cannot guarantee that lower score of a peer is indeed a sign of a bad peer.
// Therefore this scorer never marks peers as bad, and relies on scores to probabilistically sort
// out low-scorers (see WeightSorted method).
func (*BlockProviderScorer) IsBadPeer(_ peer.ID) error {
return nil
func (*BlockProviderScorer) IsBadPeer(_ peer.ID) bool {
return false
}
// BadPeers returns the peers that are considered bad.

View File

@@ -119,7 +119,7 @@ func TestScorers_BlockProvider_Score(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(*testing.T) {
t.Run(tt.name, func(t *testing.T) {
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
PeerLimit: 30,
ScorerParams: &scorers.Config{
@@ -224,7 +224,7 @@ func TestScorers_BlockProvider_Sorted(t *testing.T) {
}{
{
name: "no peers",
update: func(*scorers.BlockProviderScorer) {},
update: func(s *scorers.BlockProviderScorer) {},
have: []peer.ID{},
want: []peer.ID{},
},
@@ -451,7 +451,7 @@ func TestScorers_BlockProvider_FormatScorePretty(t *testing.T) {
})
}
for _, tt := range tests {
t.Run(tt.name, func(*testing.T) {
t.Run(tt.name, func(t *testing.T) {
peerStatuses := peerStatusGen()
scorer := peerStatuses.Scorers().BlockProviderScorer()
if tt.update != nil {
@@ -481,8 +481,8 @@ func TestScorers_BlockProvider_BadPeerMarking(t *testing.T) {
})
scorer := peerStatuses.Scorers().BlockProviderScorer()
assert.NoError(t, scorer.IsBadPeer("peer1"), "Unexpected status for unregistered peer")
assert.Equal(t, false, scorer.IsBadPeer("peer1"), "Unexpected status for unregistered peer")
scorer.IncrementProcessedBlocks("peer1", 64)
assert.NoError(t, scorer.IsBadPeer("peer1"))
assert.Equal(t, false, scorer.IsBadPeer("peer1"))
assert.Equal(t, 0, len(scorer.BadPeers()))
}

View File

@@ -2,7 +2,6 @@ package scorers
import (
"github.com/libp2p/go-libp2p/core/peer"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/peerdata"
pbrpc "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
)
@@ -52,24 +51,19 @@ func (s *GossipScorer) scoreNoLock(pid peer.ID) float64 {
}
// IsBadPeer states if the peer is to be considered bad.
func (s *GossipScorer) IsBadPeer(pid peer.ID) error {
func (s *GossipScorer) IsBadPeer(pid peer.ID) bool {
s.store.RLock()
defer s.store.RUnlock()
return s.isBadPeerNoLock(pid)
}
// isBadPeerNoLock is lock-free version of IsBadPeer.
func (s *GossipScorer) isBadPeerNoLock(pid peer.ID) error {
func (s *GossipScorer) isBadPeerNoLock(pid peer.ID) bool {
peerData, ok := s.store.PeerData(pid)
if !ok {
return nil
return false
}
if peerData.GossipScore < gossipThreshold {
return errors.Errorf("gossip score below threshold: got %f - threshold %f", peerData.GossipScore, gossipThreshold)
}
return nil
return peerData.GossipScore < gossipThreshold
}
// BadPeers returns the peers that are considered bad.
@@ -79,7 +73,7 @@ func (s *GossipScorer) BadPeers() []peer.ID {
badPeers := make([]peer.ID, 0)
for pid := range s.store.Peers() {
if s.isBadPeerNoLock(pid) != nil {
if s.isBadPeerNoLock(pid) {
badPeers = append(badPeers, pid)
}
}

View File

@@ -21,7 +21,7 @@ func TestScorers_Gossip_Score(t *testing.T) {
}{
{
name: "nonexistent peer",
update: func(*scorers.GossipScorer) {
update: func(scorer *scorers.GossipScorer) {
},
check: func(scorer *scorers.GossipScorer) {
assert.Equal(t, 0.0, scorer.Score("peer1"), "Unexpected score")
@@ -34,7 +34,7 @@ func TestScorers_Gossip_Score(t *testing.T) {
},
check: func(scorer *scorers.GossipScorer) {
assert.Equal(t, -101.0, scorer.Score("peer1"), "Unexpected score")
assert.NotNil(t, scorer.IsBadPeer("peer1"), "Unexpected good peer")
assert.Equal(t, true, scorer.IsBadPeer("peer1"), "Unexpected good peer")
},
},
{
@@ -44,7 +44,7 @@ func TestScorers_Gossip_Score(t *testing.T) {
},
check: func(scorer *scorers.GossipScorer) {
assert.Equal(t, 10.0, scorer.Score("peer1"), "Unexpected score")
assert.Equal(t, nil, scorer.IsBadPeer("peer1"), "Unexpected bad peer")
assert.Equal(t, false, scorer.IsBadPeer("peer1"), "Unexpected bad peer")
_, _, topicMap, err := scorer.GossipData("peer1")
assert.NoError(t, err)
assert.Equal(t, uint64(100), topicMap["a"].TimeInMesh, "incorrect time in mesh")
@@ -53,7 +53,7 @@ func TestScorers_Gossip_Score(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(*testing.T) {
t.Run(tt.name, func(t *testing.T) {
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
ScorerParams: &scorers.Config{},
})

View File

@@ -46,7 +46,7 @@ func (s *PeerStatusScorer) Score(pid peer.ID) float64 {
// scoreNoLock is a lock-free version of Score.
func (s *PeerStatusScorer) scoreNoLock(pid peer.ID) float64 {
if s.isBadPeerNoLock(pid) != nil {
if s.isBadPeerNoLock(pid) {
return BadPeerScore
}
score := float64(0)
@@ -67,34 +67,30 @@ func (s *PeerStatusScorer) scoreNoLock(pid peer.ID) float64 {
}
// IsBadPeer states if the peer is to be considered bad.
func (s *PeerStatusScorer) IsBadPeer(pid peer.ID) error {
func (s *PeerStatusScorer) IsBadPeer(pid peer.ID) bool {
s.store.RLock()
defer s.store.RUnlock()
return s.isBadPeerNoLock(pid)
}
// isBadPeerNoLock is lock-free version of IsBadPeer.
func (s *PeerStatusScorer) isBadPeerNoLock(pid peer.ID) error {
func (s *PeerStatusScorer) isBadPeerNoLock(pid peer.ID) bool {
peerData, ok := s.store.PeerData(pid)
if !ok {
return nil
return false
}
// Mark peer as bad, if the latest error is one of the terminal ones.
terminalErrs := []error{
p2ptypes.ErrWrongForkDigestVersion,
p2ptypes.ErrInvalidFinalizedRoot,
p2ptypes.ErrInvalidRequest,
}
for _, err := range terminalErrs {
if errors.Is(peerData.ChainStateValidationError, err) {
return err
return true
}
}
return nil
return false
}
// BadPeers returns the peers that are considered bad.
@@ -104,7 +100,7 @@ func (s *PeerStatusScorer) BadPeers() []peer.ID {
badPeers := make([]peer.ID, 0)
for pid := range s.store.Peers() {
if s.isBadPeerNoLock(pid) != nil {
if s.isBadPeerNoLock(pid) {
badPeers = append(badPeers, pid)
}
}

View File

@@ -122,7 +122,7 @@ func TestScorers_PeerStatus_Score(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(*testing.T) {
t.Run(tt.name, func(t *testing.T) {
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
ScorerParams: &scorers.Config{},
})
@@ -140,12 +140,12 @@ func TestScorers_PeerStatus_IsBadPeer(t *testing.T) {
ScorerParams: &scorers.Config{},
})
pid := peer.ID("peer1")
assert.NoError(t, peerStatuses.Scorers().IsBadPeer(pid))
assert.NoError(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid))
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer(pid))
assert.Equal(t, false, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid))
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid, &pb.Status{}, p2ptypes.ErrWrongForkDigestVersion)
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer(pid))
assert.NotNil(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid))
assert.Equal(t, true, peerStatuses.Scorers().IsBadPeer(pid))
assert.Equal(t, true, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid))
}
func TestScorers_PeerStatus_BadPeers(t *testing.T) {
@@ -155,22 +155,22 @@ func TestScorers_PeerStatus_BadPeers(t *testing.T) {
pid1 := peer.ID("peer1")
pid2 := peer.ID("peer2")
pid3 := peer.ID("peer3")
assert.NoError(t, peerStatuses.Scorers().IsBadPeer(pid1))
assert.NoError(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid1))
assert.NoError(t, peerStatuses.Scorers().IsBadPeer(pid2))
assert.NoError(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid2))
assert.NoError(t, peerStatuses.Scorers().IsBadPeer(pid3))
assert.NoError(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid3))
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer(pid1))
assert.Equal(t, false, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid1))
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer(pid2))
assert.Equal(t, false, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid2))
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer(pid3))
assert.Equal(t, false, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid3))
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid1, &pb.Status{}, p2ptypes.ErrWrongForkDigestVersion)
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid2, &pb.Status{}, nil)
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid3, &pb.Status{}, p2ptypes.ErrWrongForkDigestVersion)
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer(pid1))
assert.NotNil(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid1))
assert.NoError(t, peerStatuses.Scorers().IsBadPeer(pid2))
assert.NoError(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid2))
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer(pid3))
assert.NotNil(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid3))
assert.Equal(t, true, peerStatuses.Scorers().IsBadPeer(pid1))
assert.Equal(t, true, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid1))
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer(pid2))
assert.Equal(t, false, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid2))
assert.Equal(t, true, peerStatuses.Scorers().IsBadPeer(pid3))
assert.Equal(t, true, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid3))
assert.Equal(t, 2, len(peerStatuses.Scorers().PeerStatusScorer().BadPeers()))
assert.Equal(t, 2, len(peerStatuses.Scorers().BadPeers()))
}

View File

@@ -6,7 +6,6 @@ import (
"time"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/peerdata"
"github.com/prysmaticlabs/prysm/v5/config/features"
)
@@ -25,7 +24,7 @@ const BadPeerScore = gossipThreshold
// Scorer defines minimum set of methods every peer scorer must expose.
type Scorer interface {
Score(pid peer.ID) float64
IsBadPeer(pid peer.ID) error
IsBadPeer(pid peer.ID) bool
BadPeers() []peer.ID
}
@@ -125,29 +124,26 @@ func (s *Service) ScoreNoLock(pid peer.ID) float64 {
}
// IsBadPeer traverses all the scorers to see if any of them classifies peer as bad.
func (s *Service) IsBadPeer(pid peer.ID) error {
func (s *Service) IsBadPeer(pid peer.ID) bool {
s.store.RLock()
defer s.store.RUnlock()
return s.IsBadPeerNoLock(pid)
}
// IsBadPeerNoLock is a lock-free version of IsBadPeer.
func (s *Service) IsBadPeerNoLock(pid peer.ID) error {
if err := s.scorers.badResponsesScorer.isBadPeerNoLock(pid); err != nil {
return errors.Wrap(err, "bad responses scorer")
func (s *Service) IsBadPeerNoLock(pid peer.ID) bool {
if s.scorers.badResponsesScorer.isBadPeerNoLock(pid) {
return true
}
if err := s.scorers.peerStatusScorer.isBadPeerNoLock(pid); err != nil {
return errors.Wrap(err, "peer status scorer")
if s.scorers.peerStatusScorer.isBadPeerNoLock(pid) {
return true
}
if features.Get().EnablePeerScorer {
if err := s.scorers.gossipScorer.isBadPeerNoLock(pid); err != nil {
return errors.Wrap(err, "gossip scorer")
if s.scorers.gossipScorer.isBadPeerNoLock(pid) {
return true
}
}
return nil
return false
}
// BadPeers returns the peers that are considered bad by any of registered scorers.
@@ -157,7 +153,7 @@ func (s *Service) BadPeers() []peer.ID {
badPeers := make([]peer.ID, 0)
for pid := range s.store.Peers() {
if s.IsBadPeerNoLock(pid) != nil {
if s.IsBadPeerNoLock(pid) {
badPeers = append(badPeers, pid)
}
}

View File

@@ -100,7 +100,7 @@ func TestScorers_Service_Score(t *testing.T) {
return scores
}
pack := func(_ *scorers.Service, s1, s2, s3 float64) map[string]float64 {
pack := func(scorer *scorers.Service, s1, s2, s3 float64) map[string]float64 {
return map[string]float64{
"peer1": roundScore(s1),
"peer2": roundScore(s2),
@@ -237,7 +237,7 @@ func TestScorers_Service_loop(t *testing.T) {
for i := 0; i < s1.Params().Threshold+5; i++ {
s1.Increment(pid1)
}
assert.NotNil(t, s1.IsBadPeer(pid1), "Peer should be marked as bad")
assert.Equal(t, true, s1.IsBadPeer(pid1), "Peer should be marked as bad")
s2.IncrementProcessedBlocks("peer1", 221)
assert.Equal(t, uint64(221), s2.ProcessedBlocks("peer1"))
@@ -252,7 +252,7 @@ func TestScorers_Service_loop(t *testing.T) {
for {
select {
case <-ticker.C:
if s1.IsBadPeer(pid1) == nil && s2.ProcessedBlocks("peer1") == 0 {
if s1.IsBadPeer(pid1) == false && s2.ProcessedBlocks("peer1") == 0 {
return
}
case <-ctx.Done():
@@ -263,7 +263,7 @@ func TestScorers_Service_loop(t *testing.T) {
}()
<-done
assert.NoError(t, s1.IsBadPeer(pid1), "Peer should not be marked as bad")
assert.Equal(t, false, s1.IsBadPeer(pid1), "Peer should not be marked as bad")
assert.Equal(t, uint64(0), s2.ProcessedBlocks("peer1"), "No blocks are expected")
}
@@ -278,10 +278,10 @@ func TestScorers_Service_IsBadPeer(t *testing.T) {
},
})
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer1"))
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer("peer1"))
peerStatuses.Scorers().BadResponsesScorer().Increment("peer1")
peerStatuses.Scorers().BadResponsesScorer().Increment("peer1")
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer1"))
assert.Equal(t, true, peerStatuses.Scorers().IsBadPeer("peer1"))
}
func TestScorers_Service_BadPeers(t *testing.T) {
@@ -295,16 +295,16 @@ func TestScorers_Service_BadPeers(t *testing.T) {
},
})
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer1"))
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer2"))
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer3"))
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer("peer1"))
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer("peer2"))
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer("peer3"))
assert.Equal(t, 0, len(peerStatuses.Scorers().BadPeers()))
for _, pid := range []peer.ID{"peer1", "peer3"} {
peerStatuses.Scorers().BadResponsesScorer().Increment(pid)
peerStatuses.Scorers().BadResponsesScorer().Increment(pid)
}
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer1"))
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer2"))
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer3"))
assert.Equal(t, true, peerStatuses.Scorers().IsBadPeer("peer1"))
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer("peer2"))
assert.Equal(t, true, peerStatuses.Scorers().IsBadPeer("peer3"))
assert.Equal(t, 2, len(peerStatuses.Scorers().BadPeers()))
}

View File

@@ -34,7 +34,6 @@ import (
"github.com/libp2p/go-libp2p/core/peer"
ma "github.com/multiformats/go-multiaddr"
manet "github.com/multiformats/go-multiaddr/net"
"github.com/pkg/errors"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/peerdata"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/scorers"
@@ -50,14 +49,14 @@ import (
)
const (
// Disconnected means there is no connection to the peer.
Disconnected peerdata.ConnectionState = iota
// Disconnecting means there is an on-going attempt to disconnect from the peer.
Disconnecting
// Connected means the peer has an active connection.
Connected
// Connecting means there is an on-going attempt to connect to the peer.
Connecting
// PeerDisconnected means there is no connection to the peer.
PeerDisconnected peerdata.PeerConnectionState = iota
// PeerDisconnecting means there is an on-going attempt to disconnect from the peer.
PeerDisconnecting
// PeerConnected means the peer has an active connection.
PeerConnected
// PeerConnecting means there is an on-going attempt to connect to the peer.
PeerConnecting
)
const (
@@ -118,15 +117,6 @@ func NewStatus(ctx context.Context, config *StatusConfig) *Status {
}
}
func (p *Status) UpdateENR(record *enr.Record, pid peer.ID) {
p.store.Lock()
defer p.store.Unlock()
if peerData, ok := p.store.PeerData(pid); ok {
peerData.Enr = record
}
}
// Scorers exposes peer scoring management service.
func (p *Status) Scorers() *scorers.Service {
return p.scorers
@@ -160,7 +150,7 @@ func (p *Status) Add(record *enr.Record, pid peer.ID, address ma.Multiaddr, dire
Address: address,
Direction: direction,
// Peers start disconnected; state will be updated when the handshake process begins.
ConnState: Disconnected,
ConnState: PeerDisconnected,
}
if record != nil {
peerData.Enr = record
@@ -222,7 +212,7 @@ func (p *Status) IsActive(pid peer.ID) bool {
defer p.store.RUnlock()
peerData, ok := p.store.PeerData(pid)
return ok && (peerData.ConnState == Connected || peerData.ConnState == Connecting)
return ok && (peerData.ConnState == PeerConnected || peerData.ConnState == PeerConnecting)
}
// IsAboveInboundLimit checks if we are above our current inbound
@@ -232,7 +222,7 @@ func (p *Status) IsAboveInboundLimit() bool {
defer p.store.RUnlock()
totalInbound := 0
for _, peerData := range p.store.Peers() {
if peerData.ConnState == Connected &&
if peerData.ConnState == PeerConnected &&
peerData.Direction == network.DirInbound {
totalInbound += 1
}
@@ -296,7 +286,7 @@ func (p *Status) SubscribedToSubnet(index uint64) []peer.ID {
peers := make([]peer.ID, 0)
for pid, peerData := range p.store.Peers() {
// look at active peers
connectedStatus := peerData.ConnState == Connecting || peerData.ConnState == Connected
connectedStatus := peerData.ConnState == PeerConnecting || peerData.ConnState == PeerConnected
if connectedStatus && peerData.MetaData != nil && !peerData.MetaData.IsNil() && peerData.MetaData.AttnetsBitfield() != nil {
indices := indicesFromBitfield(peerData.MetaData.AttnetsBitfield())
for _, idx := range indices {
@@ -311,7 +301,7 @@ func (p *Status) SubscribedToSubnet(index uint64) []peer.ID {
}
// SetConnectionState sets the connection state of the given remote peer.
func (p *Status) SetConnectionState(pid peer.ID, state peerdata.ConnectionState) {
func (p *Status) SetConnectionState(pid peer.ID, state peerdata.PeerConnectionState) {
p.store.Lock()
defer p.store.Unlock()
@@ -321,14 +311,14 @@ func (p *Status) SetConnectionState(pid peer.ID, state peerdata.ConnectionState)
// ConnectionState gets the connection state of the given remote peer.
// This will error if the peer does not exist.
func (p *Status) ConnectionState(pid peer.ID) (peerdata.ConnectionState, error) {
func (p *Status) ConnectionState(pid peer.ID) (peerdata.PeerConnectionState, error) {
p.store.RLock()
defer p.store.RUnlock()
if peerData, ok := p.store.PeerData(pid); ok {
return peerData.ConnState, nil
}
return Disconnected, peerdata.ErrPeerUnknown
return PeerDisconnected, peerdata.ErrPeerUnknown
}
// ChainStateLastUpdated gets the last time the chain state of the given remote peer was updated.
@@ -345,29 +335,19 @@ func (p *Status) ChainStateLastUpdated(pid peer.ID) (time.Time, error) {
// IsBad states if the peer is to be considered bad (by *any* of the registered scorers).
// If the peer is unknown this will return `false`, which makes using this function easier than returning an error.
func (p *Status) IsBad(pid peer.ID) error {
func (p *Status) IsBad(pid peer.ID) bool {
p.store.RLock()
defer p.store.RUnlock()
return p.isBad(pid)
}
// isBad is the lock-free version of IsBad.
func (p *Status) isBad(pid peer.ID) error {
func (p *Status) isBad(pid peer.ID) bool {
// Do not disconnect from trusted peers.
if p.store.IsTrustedPeer(pid) {
return nil
return false
}
if err := p.isfromBadIP(pid); err != nil {
return errors.Wrap(err, "peer is from a bad IP")
}
if err := p.scorers.IsBadPeerNoLock(pid); err != nil {
return errors.Wrap(err, "is bad peer no lock")
}
return nil
return p.isfromBadIP(pid) || p.scorers.IsBadPeerNoLock(pid)
}
// NextValidTime gets the earliest possible time it is to contact/dial
@@ -431,7 +411,7 @@ func (p *Status) Connecting() []peer.ID {
defer p.store.RUnlock()
peers := make([]peer.ID, 0)
for pid, peerData := range p.store.Peers() {
if peerData.ConnState == Connecting {
if peerData.ConnState == PeerConnecting {
peers = append(peers, pid)
}
}
@@ -444,7 +424,7 @@ func (p *Status) Connected() []peer.ID {
defer p.store.RUnlock()
peers := make([]peer.ID, 0)
for pid, peerData := range p.store.Peers() {
if peerData.ConnState == Connected {
if peerData.ConnState == PeerConnected {
peers = append(peers, pid)
}
}
@@ -470,7 +450,7 @@ func (p *Status) InboundConnected() []peer.ID {
defer p.store.RUnlock()
peers := make([]peer.ID, 0)
for pid, peerData := range p.store.Peers() {
if peerData.ConnState == Connected && peerData.Direction == network.DirInbound {
if peerData.ConnState == PeerConnected && peerData.Direction == network.DirInbound {
peers = append(peers, pid)
}
}
@@ -483,7 +463,7 @@ func (p *Status) InboundConnectedWithProtocol(protocol InternetProtocol) []peer.
defer p.store.RUnlock()
peers := make([]peer.ID, 0)
for pid, peerData := range p.store.Peers() {
if peerData.ConnState == Connected && peerData.Direction == network.DirInbound && strings.Contains(peerData.Address.String(), string(protocol)) {
if peerData.ConnState == PeerConnected && peerData.Direction == network.DirInbound && strings.Contains(peerData.Address.String(), string(protocol)) {
peers = append(peers, pid)
}
}
@@ -509,7 +489,7 @@ func (p *Status) OutboundConnected() []peer.ID {
defer p.store.RUnlock()
peers := make([]peer.ID, 0)
for pid, peerData := range p.store.Peers() {
if peerData.ConnState == Connected && peerData.Direction == network.DirOutbound {
if peerData.ConnState == PeerConnected && peerData.Direction == network.DirOutbound {
peers = append(peers, pid)
}
}
@@ -522,7 +502,7 @@ func (p *Status) OutboundConnectedWithProtocol(protocol InternetProtocol) []peer
defer p.store.RUnlock()
peers := make([]peer.ID, 0)
for pid, peerData := range p.store.Peers() {
if peerData.ConnState == Connected && peerData.Direction == network.DirOutbound && strings.Contains(peerData.Address.String(), string(protocol)) {
if peerData.ConnState == PeerConnected && peerData.Direction == network.DirOutbound && strings.Contains(peerData.Address.String(), string(protocol)) {
peers = append(peers, pid)
}
}
@@ -535,7 +515,7 @@ func (p *Status) Active() []peer.ID {
defer p.store.RUnlock()
peers := make([]peer.ID, 0)
for pid, peerData := range p.store.Peers() {
if peerData.ConnState == Connecting || peerData.ConnState == Connected {
if peerData.ConnState == PeerConnecting || peerData.ConnState == PeerConnected {
peers = append(peers, pid)
}
}
@@ -548,7 +528,7 @@ func (p *Status) Disconnecting() []peer.ID {
defer p.store.RUnlock()
peers := make([]peer.ID, 0)
for pid, peerData := range p.store.Peers() {
if peerData.ConnState == Disconnecting {
if peerData.ConnState == PeerDisconnecting {
peers = append(peers, pid)
}
}
@@ -561,7 +541,7 @@ func (p *Status) Disconnected() []peer.ID {
defer p.store.RUnlock()
peers := make([]peer.ID, 0)
for pid, peerData := range p.store.Peers() {
if peerData.ConnState == Disconnected {
if peerData.ConnState == PeerDisconnected {
peers = append(peers, pid)
}
}
@@ -574,7 +554,7 @@ func (p *Status) Inactive() []peer.ID {
defer p.store.RUnlock()
peers := make([]peer.ID, 0)
for pid, peerData := range p.store.Peers() {
if peerData.ConnState == Disconnecting || peerData.ConnState == Disconnected {
if peerData.ConnState == PeerDisconnecting || peerData.ConnState == PeerDisconnected {
peers = append(peers, pid)
}
}
@@ -612,7 +592,7 @@ func (p *Status) Prune() {
return
}
notBadPeer := func(pid peer.ID) bool {
return p.isBad(pid) == nil
return !p.isBad(pid)
}
notTrustedPeer := func(pid peer.ID) bool {
return !p.isTrustedPeers(pid)
@@ -625,7 +605,7 @@ func (p *Status) Prune() {
// Select disconnected peers with a smaller bad response count.
for pid, peerData := range p.store.Peers() {
// Should not prune trusted peer or prune the peer dara and unset trusted peer.
if peerData.ConnState == Disconnected && notBadPeer(pid) && notTrustedPeer(pid) {
if peerData.ConnState == PeerDisconnected && notBadPeer(pid) && notTrustedPeer(pid) {
peersToPrune = append(peersToPrune, &peerResp{
pid: pid,
score: p.Scorers().ScoreNoLock(pid),
@@ -677,7 +657,7 @@ func (p *Status) deprecatedPrune() {
// Select disconnected peers with a smaller bad response count.
for pid, peerData := range p.store.Peers() {
// Should not prune trusted peer or prune the peer dara and unset trusted peer.
if peerData.ConnState == Disconnected && notBadPeer(peerData) && notTrustedPeer(pid) {
if peerData.ConnState == PeerDisconnected && notBadPeer(peerData) && notTrustedPeer(pid) {
peersToPrune = append(peersToPrune, &peerResp{
pid: pid,
badResp: peerData.BadResponses,
@@ -834,7 +814,7 @@ func (p *Status) PeersToPrune() []peer.ID {
peersToPrune := make([]*peerResp, 0)
// Select connected and inbound peers to prune.
for pid, peerData := range p.store.Peers() {
if peerData.ConnState == Connected &&
if peerData.ConnState == PeerConnected &&
peerData.Direction == network.DirInbound && !p.store.IsTrustedPeer(pid) {
peersToPrune = append(peersToPrune, &peerResp{
pid: pid,
@@ -900,7 +880,7 @@ func (p *Status) deprecatedPeersToPrune() []peer.ID {
peersToPrune := make([]*peerResp, 0)
// Select connected and inbound peers to prune.
for pid, peerData := range p.store.Peers() {
if peerData.ConnState == Connected &&
if peerData.ConnState == PeerConnected &&
peerData.Direction == network.DirInbound && !p.store.IsTrustedPeer(pid) {
peersToPrune = append(peersToPrune, &peerResp{
pid: pid,
@@ -1002,28 +982,24 @@ func (p *Status) isTrustedPeers(pid peer.ID) bool {
// this method assumes the store lock is acquired before
// executing the method.
func (p *Status) isfromBadIP(pid peer.ID) error {
func (p *Status) isfromBadIP(pid peer.ID) bool {
peerData, ok := p.store.PeerData(pid)
if !ok {
return nil
return false
}
if peerData.Address == nil {
return nil
return false
}
ip, err := manet.ToIP(peerData.Address)
if err != nil {
return errors.Wrap(err, "to ip")
return true
}
if val, ok := p.ipTracker[ip.String()]; ok {
if val > CollocationLimit {
return errors.Errorf("collocation limit exceeded: got %d - limit %d", val, CollocationLimit)
return true
}
}
return nil
return false
}
func (p *Status) addIpToTracker(pid peer.ID) {

View File

@@ -215,7 +215,7 @@ func TestPeerSubscribedToSubnet(t *testing.T) {
// Add some peers with different states
numPeers := 2
for i := 0; i < numPeers; i++ {
addPeer(t, p, peers.Connected)
addPeer(t, p, peers.PeerConnected)
}
expectedPeer := p.All()[1]
bitV := bitfield.NewBitvector64()
@@ -230,7 +230,7 @@ func TestPeerSubscribedToSubnet(t *testing.T) {
}))
numPeers = 3
for i := 0; i < numPeers; i++ {
addPeer(t, p, peers.Disconnected)
addPeer(t, p, peers.PeerDisconnected)
}
ps := p.SubscribedToSubnet(2)
assert.Equal(t, 1, len(ps), "Unexpected num of peers")
@@ -259,7 +259,7 @@ func TestPeerImplicitAdd(t *testing.T) {
id, err := peer.Decode("16Uiu2HAkyWZ4Ni1TpvDS8dPxsozmHY85KaiFjodQuV6Tz5tkHVeR")
require.NoError(t, err)
connectionState := peers.Connecting
connectionState := peers.PeerConnecting
p.SetConnectionState(id, connectionState)
resConnectionState, err := p.ConnectionState(id)
@@ -347,7 +347,7 @@ func TestPeerBadResponses(t *testing.T) {
require.NoError(t, err)
}
assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
assert.Equal(t, false, p.IsBad(id), "Peer marked as bad when should be good")
address, err := ma.NewMultiaddr("/ip4/213.202.254.180/tcp/13000")
require.NoError(t, err, "Failed to create address")
@@ -358,25 +358,25 @@ func TestPeerBadResponses(t *testing.T) {
resBadResponses, err := scorer.Count(id)
require.NoError(t, err)
assert.Equal(t, 0, resBadResponses, "Unexpected bad responses")
assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
assert.Equal(t, false, p.IsBad(id), "Peer marked as bad when should be good")
scorer.Increment(id)
resBadResponses, err = scorer.Count(id)
require.NoError(t, err)
assert.Equal(t, 1, resBadResponses, "Unexpected bad responses")
assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
assert.Equal(t, false, p.IsBad(id), "Peer marked as bad when should be good")
scorer.Increment(id)
resBadResponses, err = scorer.Count(id)
require.NoError(t, err)
assert.Equal(t, 2, resBadResponses, "Unexpected bad responses")
assert.NotNil(t, p.IsBad(id), "Peer not marked as bad when it should be")
assert.Equal(t, true, p.IsBad(id), "Peer not marked as bad when it should be")
scorer.Increment(id)
resBadResponses, err = scorer.Count(id)
require.NoError(t, err)
assert.Equal(t, 3, resBadResponses, "Unexpected bad responses")
assert.NotNil(t, p.IsBad(id), "Peer not marked as bad when it should be")
assert.Equal(t, true, p.IsBad(id), "Peer not marked as bad when it should be")
}
func TestAddMetaData(t *testing.T) {
@@ -393,7 +393,7 @@ func TestAddMetaData(t *testing.T) {
// Add some peers with different states
numPeers := 5
for i := 0; i < numPeers; i++ {
addPeer(t, p, peers.Connected)
addPeer(t, p, peers.PeerConnected)
}
newPeer := p.All()[2]
@@ -422,19 +422,19 @@ func TestPeerConnectionStatuses(t *testing.T) {
// Add some peers with different states
numPeersDisconnected := 11
for i := 0; i < numPeersDisconnected; i++ {
addPeer(t, p, peers.Disconnected)
addPeer(t, p, peers.PeerDisconnected)
}
numPeersConnecting := 7
for i := 0; i < numPeersConnecting; i++ {
addPeer(t, p, peers.Connecting)
addPeer(t, p, peers.PeerConnecting)
}
numPeersConnected := 43
for i := 0; i < numPeersConnected; i++ {
addPeer(t, p, peers.Connected)
addPeer(t, p, peers.PeerConnected)
}
numPeersDisconnecting := 4
for i := 0; i < numPeersDisconnecting; i++ {
addPeer(t, p, peers.Disconnecting)
addPeer(t, p, peers.PeerDisconnecting)
}
// Now confirm the states
@@ -463,7 +463,7 @@ func TestPeerValidTime(t *testing.T) {
numPeersConnected := 6
for i := 0; i < numPeersConnected; i++ {
addPeer(t, p, peers.Connected)
addPeer(t, p, peers.PeerConnected)
}
allPeers := p.All()
@@ -510,10 +510,10 @@ func TestPrune(t *testing.T) {
for i := 0; i < p.MaxPeerLimit()+100; i++ {
if i%7 == 0 {
// Peer added as disconnected.
_ = addPeer(t, p, peers.Disconnected)
_ = addPeer(t, p, peers.PeerDisconnected)
}
// Peer added to peer handler.
_ = addPeer(t, p, peers.Connected)
_ = addPeer(t, p, peers.PeerConnected)
}
disPeers := p.Disconnected()
@@ -571,23 +571,23 @@ func TestPeerIPTracker(t *testing.T) {
if err != nil {
t.Fatal(err)
}
badPeers = append(badPeers, createPeer(t, p, addr, network.DirUnknown, peerdata.ConnectionState(ethpb.ConnectionState_DISCONNECTED)))
badPeers = append(badPeers, createPeer(t, p, addr, network.DirUnknown, peerdata.PeerConnectionState(ethpb.ConnectionState_DISCONNECTED)))
}
for _, pr := range badPeers {
assert.NotNil(t, p.IsBad(pr), "peer with bad ip is not bad")
assert.Equal(t, true, p.IsBad(pr), "peer with bad ip is not bad")
}
// Add in bad peers, so that our records are trimmed out
// from the peer store.
for i := 0; i < p.MaxPeerLimit()+100; i++ {
// Peer added to peer handler.
pid := addPeer(t, p, peers.Disconnected)
pid := addPeer(t, p, peers.PeerDisconnected)
p.Scorers().BadResponsesScorer().Increment(pid)
}
p.Prune()
for _, pr := range badPeers {
assert.NoError(t, p.IsBad(pr), "peer with good ip is regarded as bad")
assert.Equal(t, false, p.IsBad(pr), "peer with good ip is regarded as bad")
}
}
@@ -601,11 +601,8 @@ func TestTrimmedOrderedPeers(t *testing.T) {
},
})
const (
expectedTarget = primitives.Epoch(2)
maxPeers = 3
)
expectedTarget := primitives.Epoch(2)
maxPeers := 3
var mockroot2 [32]byte
var mockroot3 [32]byte
var mockroot4 [32]byte
@@ -614,41 +611,36 @@ func TestTrimmedOrderedPeers(t *testing.T) {
copy(mockroot3[:], "three")
copy(mockroot4[:], "four")
copy(mockroot5[:], "five")
// Peer 1
pid1 := addPeer(t, p, peers.Connected)
pid1 := addPeer(t, p, peers.PeerConnected)
p.SetChainState(pid1, &pb.Status{
HeadSlot: 3 * params.BeaconConfig().SlotsPerEpoch,
FinalizedEpoch: 3,
FinalizedRoot: mockroot3[:],
})
// Peer 2
pid2 := addPeer(t, p, peers.Connected)
pid2 := addPeer(t, p, peers.PeerConnected)
p.SetChainState(pid2, &pb.Status{
HeadSlot: 4 * params.BeaconConfig().SlotsPerEpoch,
FinalizedEpoch: 4,
FinalizedRoot: mockroot4[:],
})
// Peer 3
pid3 := addPeer(t, p, peers.Connected)
pid3 := addPeer(t, p, peers.PeerConnected)
p.SetChainState(pid3, &pb.Status{
HeadSlot: 5 * params.BeaconConfig().SlotsPerEpoch,
FinalizedEpoch: 5,
FinalizedRoot: mockroot5[:],
})
// Peer 4
pid4 := addPeer(t, p, peers.Connected)
pid4 := addPeer(t, p, peers.PeerConnected)
p.SetChainState(pid4, &pb.Status{
HeadSlot: 2 * params.BeaconConfig().SlotsPerEpoch,
FinalizedEpoch: 2,
FinalizedRoot: mockroot2[:],
})
// Peer 5
pid5 := addPeer(t, p, peers.Connected)
pid5 := addPeer(t, p, peers.PeerConnected)
p.SetChainState(pid5, &pb.Status{
HeadSlot: 2 * params.BeaconConfig().SlotsPerEpoch,
FinalizedEpoch: 2,
@@ -688,12 +680,12 @@ func TestAtInboundPeerLimit(t *testing.T) {
})
for i := 0; i < 15; i++ {
// Peer added to peer handler.
createPeer(t, p, nil, network.DirOutbound, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED))
createPeer(t, p, nil, network.DirOutbound, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED))
}
assert.Equal(t, false, p.IsAboveInboundLimit(), "Inbound limit exceeded")
for i := 0; i < 31; i++ {
// Peer added to peer handler.
createPeer(t, p, nil, network.DirInbound, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED))
createPeer(t, p, nil, network.DirInbound, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED))
}
assert.Equal(t, true, p.IsAboveInboundLimit(), "Inbound limit not exceeded")
}
@@ -713,7 +705,7 @@ func TestPrunePeers(t *testing.T) {
})
for i := 0; i < 15; i++ {
// Peer added to peer handler.
createPeer(t, p, nil, network.DirOutbound, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED))
createPeer(t, p, nil, network.DirOutbound, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED))
}
// Assert there are no prunable peers.
peersToPrune := p.PeersToPrune()
@@ -721,7 +713,7 @@ func TestPrunePeers(t *testing.T) {
for i := 0; i < 18; i++ {
// Peer added to peer handler.
createPeer(t, p, nil, network.DirInbound, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED))
createPeer(t, p, nil, network.DirInbound, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED))
}
// Assert there are the correct prunable peers.
@@ -731,7 +723,7 @@ func TestPrunePeers(t *testing.T) {
// Add in more peers.
for i := 0; i < 13; i++ {
// Peer added to peer handler.
createPeer(t, p, nil, network.DirInbound, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED))
createPeer(t, p, nil, network.DirInbound, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED))
}
// Set up bad scores for inbound peers.
@@ -775,7 +767,7 @@ func TestPrunePeers_TrustedPeers(t *testing.T) {
for i := 0; i < 15; i++ {
// Peer added to peer handler.
createPeer(t, p, nil, network.DirOutbound, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED))
createPeer(t, p, nil, network.DirOutbound, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED))
}
// Assert there are no prunable peers.
peersToPrune := p.PeersToPrune()
@@ -783,7 +775,7 @@ func TestPrunePeers_TrustedPeers(t *testing.T) {
for i := 0; i < 18; i++ {
// Peer added to peer handler.
createPeer(t, p, nil, network.DirInbound, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED))
createPeer(t, p, nil, network.DirInbound, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED))
}
// Assert there are the correct prunable peers.
@@ -793,7 +785,7 @@ func TestPrunePeers_TrustedPeers(t *testing.T) {
// Add in more peers.
for i := 0; i < 13; i++ {
// Peer added to peer handler.
createPeer(t, p, nil, network.DirInbound, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED))
createPeer(t, p, nil, network.DirInbound, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED))
}
var trustedPeers []peer.ID
@@ -829,7 +821,7 @@ func TestPrunePeers_TrustedPeers(t *testing.T) {
// Add more peers to check if trusted peers can be pruned after they are deleted from trusted peer set.
for i := 0; i < 9; i++ {
// Peer added to peer handler.
createPeer(t, p, nil, network.DirInbound, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED))
createPeer(t, p, nil, network.DirInbound, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED))
}
// Delete trusted peers.
@@ -873,14 +865,14 @@ func TestStatus_BestPeer(t *testing.T) {
headSlot primitives.Slot
finalizedEpoch primitives.Epoch
}
tests := []struct {
name string
peers []*peerConfig
limitPeers int
ourFinalizedEpoch primitives.Epoch
targetEpoch primitives.Epoch
targetEpochSupport int // Denotes how many peers support returned epoch.
name string
peers []*peerConfig
limitPeers int
ourFinalizedEpoch primitives.Epoch
targetEpoch primitives.Epoch
// targetEpochSupport denotes how many peers support returned epoch.
targetEpochSupport int
}{
{
name: "head slot matches finalized epoch",
@@ -893,7 +885,6 @@ func TestStatus_BestPeer(t *testing.T) {
{finalizedEpoch: 3, headSlot: 3 * params.BeaconConfig().SlotsPerEpoch},
},
limitPeers: 15,
ourFinalizedEpoch: 0,
targetEpoch: 4,
targetEpochSupport: 4,
},
@@ -911,7 +902,6 @@ func TestStatus_BestPeer(t *testing.T) {
{finalizedEpoch: 3, headSlot: 4 * params.BeaconConfig().SlotsPerEpoch},
},
limitPeers: 15,
ourFinalizedEpoch: 0,
targetEpoch: 4,
targetEpochSupport: 4,
},
@@ -926,7 +916,6 @@ func TestStatus_BestPeer(t *testing.T) {
{finalizedEpoch: 3, headSlot: 42 * params.BeaconConfig().SlotsPerEpoch},
},
limitPeers: 15,
ourFinalizedEpoch: 0,
targetEpoch: 4,
targetEpochSupport: 4,
},
@@ -941,8 +930,8 @@ func TestStatus_BestPeer(t *testing.T) {
{finalizedEpoch: 3, headSlot: 46 * params.BeaconConfig().SlotsPerEpoch},
{finalizedEpoch: 6, headSlot: 6 * params.BeaconConfig().SlotsPerEpoch},
},
limitPeers: 15,
ourFinalizedEpoch: 5,
limitPeers: 15,
targetEpoch: 6,
targetEpochSupport: 1,
},
@@ -961,8 +950,8 @@ func TestStatus_BestPeer(t *testing.T) {
{finalizedEpoch: 7, headSlot: 7 * params.BeaconConfig().SlotsPerEpoch},
{finalizedEpoch: 8, headSlot: 8 * params.BeaconConfig().SlotsPerEpoch},
},
limitPeers: 15,
ourFinalizedEpoch: 5,
limitPeers: 15,
targetEpoch: 6,
targetEpochSupport: 5,
},
@@ -981,8 +970,8 @@ func TestStatus_BestPeer(t *testing.T) {
{finalizedEpoch: 7, headSlot: 7 * params.BeaconConfig().SlotsPerEpoch},
{finalizedEpoch: 8, headSlot: 8 * params.BeaconConfig().SlotsPerEpoch},
},
limitPeers: 4,
ourFinalizedEpoch: 5,
limitPeers: 4,
targetEpoch: 6,
targetEpochSupport: 4,
},
@@ -997,8 +986,8 @@ func TestStatus_BestPeer(t *testing.T) {
{finalizedEpoch: 8, headSlot: 8 * params.BeaconConfig().SlotsPerEpoch},
{finalizedEpoch: 8, headSlot: 8 * params.BeaconConfig().SlotsPerEpoch},
},
limitPeers: 15,
ourFinalizedEpoch: 5,
limitPeers: 15,
targetEpoch: 8,
targetEpochSupport: 3,
},
@@ -1013,7 +1002,7 @@ func TestStatus_BestPeer(t *testing.T) {
},
})
for _, peerConfig := range tt.peers {
p.SetChainState(addPeer(t, p, peers.Connected), &pb.Status{
p.SetChainState(addPeer(t, p, peers.PeerConnected), &pb.Status{
FinalizedEpoch: peerConfig.finalizedEpoch,
HeadSlot: peerConfig.headSlot,
})
@@ -1039,7 +1028,7 @@ func TestBestFinalized_returnsMaxValue(t *testing.T) {
for i := 0; i <= maxPeers+100; i++ {
p.Add(new(enr.Record), peer.ID(rune(i)), nil, network.DirOutbound)
p.SetConnectionState(peer.ID(rune(i)), peers.Connected)
p.SetConnectionState(peer.ID(rune(i)), peers.PeerConnected)
p.SetChainState(peer.ID(rune(i)), &pb.Status{
FinalizedEpoch: 10,
})
@@ -1062,7 +1051,7 @@ func TestStatus_BestNonFinalized(t *testing.T) {
peerSlots := []primitives.Slot{32, 32, 32, 32, 235, 233, 258, 268, 270}
for i, headSlot := range peerSlots {
p.Add(new(enr.Record), peer.ID(rune(i)), nil, network.DirOutbound)
p.SetConnectionState(peer.ID(rune(i)), peers.Connected)
p.SetConnectionState(peer.ID(rune(i)), peers.PeerConnected)
p.SetChainState(peer.ID(rune(i)), &pb.Status{
HeadSlot: headSlot,
})
@@ -1085,17 +1074,17 @@ func TestStatus_CurrentEpoch(t *testing.T) {
},
})
// Peer 1
pid1 := addPeer(t, p, peers.Connected)
pid1 := addPeer(t, p, peers.PeerConnected)
p.SetChainState(pid1, &pb.Status{
HeadSlot: params.BeaconConfig().SlotsPerEpoch * 4,
})
// Peer 2
pid2 := addPeer(t, p, peers.Connected)
pid2 := addPeer(t, p, peers.PeerConnected)
p.SetChainState(pid2, &pb.Status{
HeadSlot: params.BeaconConfig().SlotsPerEpoch * 5,
})
// Peer 3
pid3 := addPeer(t, p, peers.Connected)
pid3 := addPeer(t, p, peers.PeerConnected)
p.SetChainState(pid3, &pb.Status{
HeadSlot: params.BeaconConfig().SlotsPerEpoch * 4,
})
@@ -1114,8 +1103,8 @@ func TestInbound(t *testing.T) {
})
addr, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/33333")
require.NoError(t, err)
inbound := createPeer(t, p, addr, network.DirInbound, peers.Connected)
createPeer(t, p, addr, network.DirOutbound, peers.Connected)
inbound := createPeer(t, p, addr, network.DirInbound, peers.PeerConnected)
createPeer(t, p, addr, network.DirOutbound, peers.PeerConnected)
result := p.Inbound()
require.Equal(t, 1, len(result))
@@ -1134,8 +1123,8 @@ func TestInboundConnected(t *testing.T) {
addr, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/33333")
require.NoError(t, err)
inbound := createPeer(t, p, addr, network.DirInbound, peers.Connected)
createPeer(t, p, addr, network.DirInbound, peers.Connecting)
inbound := createPeer(t, p, addr, network.DirInbound, peers.PeerConnected)
createPeer(t, p, addr, network.DirInbound, peers.PeerConnecting)
result := p.InboundConnected()
require.Equal(t, 1, len(result))
@@ -1168,7 +1157,7 @@ func TestInboundConnectedWithProtocol(t *testing.T) {
multiaddr, err := ma.NewMultiaddr(addr)
require.NoError(t, err)
peer := createPeer(t, p, multiaddr, network.DirInbound, peers.Connected)
peer := createPeer(t, p, multiaddr, network.DirInbound, peers.PeerConnected)
expectedTCP[peer.String()] = true
}
@@ -1177,7 +1166,7 @@ func TestInboundConnectedWithProtocol(t *testing.T) {
multiaddr, err := ma.NewMultiaddr(addr)
require.NoError(t, err)
peer := createPeer(t, p, multiaddr, network.DirInbound, peers.Connected)
peer := createPeer(t, p, multiaddr, network.DirInbound, peers.PeerConnected)
expectedQUIC[peer.String()] = true
}
@@ -1214,8 +1203,8 @@ func TestOutbound(t *testing.T) {
})
addr, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/33333")
require.NoError(t, err)
createPeer(t, p, addr, network.DirInbound, peers.Connected)
outbound := createPeer(t, p, addr, network.DirOutbound, peers.Connected)
createPeer(t, p, addr, network.DirInbound, peers.PeerConnected)
outbound := createPeer(t, p, addr, network.DirOutbound, peers.PeerConnected)
result := p.Outbound()
require.Equal(t, 1, len(result))
@@ -1234,8 +1223,8 @@ func TestOutboundConnected(t *testing.T) {
addr, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/33333")
require.NoError(t, err)
inbound := createPeer(t, p, addr, network.DirOutbound, peers.Connected)
createPeer(t, p, addr, network.DirOutbound, peers.Connecting)
inbound := createPeer(t, p, addr, network.DirOutbound, peers.PeerConnected)
createPeer(t, p, addr, network.DirOutbound, peers.PeerConnecting)
result := p.OutboundConnected()
require.Equal(t, 1, len(result))
@@ -1268,7 +1257,7 @@ func TestOutboundConnectedWithProtocol(t *testing.T) {
multiaddr, err := ma.NewMultiaddr(addr)
require.NoError(t, err)
peer := createPeer(t, p, multiaddr, network.DirOutbound, peers.Connected)
peer := createPeer(t, p, multiaddr, network.DirOutbound, peers.PeerConnected)
expectedTCP[peer.String()] = true
}
@@ -1277,7 +1266,7 @@ func TestOutboundConnectedWithProtocol(t *testing.T) {
multiaddr, err := ma.NewMultiaddr(addr)
require.NoError(t, err)
peer := createPeer(t, p, multiaddr, network.DirOutbound, peers.Connected)
peer := createPeer(t, p, multiaddr, network.DirOutbound, peers.PeerConnected)
expectedQUIC[peer.String()] = true
}
@@ -1304,7 +1293,7 @@ func TestOutboundConnectedWithProtocol(t *testing.T) {
}
// addPeer is a helper to add a peer with a given connection state)
func addPeer(t *testing.T, p *peers.Status, state peerdata.ConnectionState) peer.ID {
func addPeer(t *testing.T, p *peers.Status, state peerdata.PeerConnectionState) peer.ID {
// Set up some peers with different states
mhBytes := []byte{0x11, 0x04}
idBytes := make([]byte, 4)
@@ -1323,7 +1312,7 @@ func addPeer(t *testing.T, p *peers.Status, state peerdata.ConnectionState) peer
}
func createPeer(t *testing.T, p *peers.Status, addr ma.Multiaddr,
dir network.Direction, state peerdata.ConnectionState) peer.ID {
dir network.Direction, state peerdata.PeerConnectionState) peer.ID {
mhBytes := []byte{0x11, 0x04}
idBytes := make([]byte, 4)
_, err := rand.Read(idBytes)

Some files were not shown because too many files have changed in this diff Show More