Compare commits

..

27 Commits

Author SHA1 Message Date
terence tsao
ae4442964a Merge branch 'single-att' of github.com:prysmaticlabs/prysm into devnet5 2024-12-27 09:05:05 -08:00
rkapka
0772e04c72 fix gossip issues 2024-12-27 18:03:18 +01:00
terence tsao
b8097164e9 Merge branch 'single-att' of github.com:prysmaticlabs/prysm into devnet5 2024-12-27 08:08:19 -08:00
terence tsao
38f8bc47d9 Merge branch 'blob-target-max-increase' of github.com:prysmaticlabs/prysm into devnet5 2024-12-27 08:07:38 -08:00
terence tsao
6efd0d52fc Add EIP-7691 blob throughput increase 2024-12-27 08:03:34 -08:00
Rupam Dey
9e7c1d6af6 Light client: add better error handling (#14749)
* add better error handling

* changelog
2024-12-26 20:46:29 +00:00
rkapka
ce39492b70 Merge branch '__develop' into single-att
# Conflicts:
#	CHANGELOG.md
#	beacon-chain/rpc/eth/beacon/handlers_pool.go
#	beacon-chain/rpc/prysm/v1alpha1/validator/attester.go
#	beacon-chain/rpc/prysm/v1alpha1/validator/server.go
#	beacon-chain/rpc/service.go
#	proto/prysm/v1alpha1/attestation.go
2024-12-26 11:08:40 +01:00
terence
6ce6b869e5 Implement consensus spec v1.5.0-alpha.10 (#14733)
* Use 16 bit random value

* enforce 0x02 credentials for consolidations

* Limit consolidating balance by validator effective balance

* Update max blob commitment size

* Fix next sync committee indices

* Bytes to little endian

* Handle proposer computations in between forks

* Fix config and tests

Fix tests

Fix tests

* Fix test stream events by properly set effective balance

Fix test stream events by properly set effective balance

* Preallocate buffers to avoid repeated allocations

* Potuz's feedback

* Use 16 bit random value

* enforce 0x02 credentials for consolidations

* Limit consolidating balance by validator effective balance

* Update max blob commitment size

* Fix next sync committee indices

* Bytes to little endian

* Handle proposer computations in between forks

* Fix config and tests

Fix tests

Fix tests

* Fix test stream events by properly set effective balance

Fix test stream events by properly set effective balance

* Preallocate buffers to avoid repeated allocations

* Potuz's feedback

* Fix change log
2024-12-23 17:32:41 +00:00
rkapka
023c99ddf4 try a different design 2024-12-23 18:12:06 +01:00
rkapka
abf7e6d8de review yet again 2024-12-23 11:41:34 +01:00
Charlton Liv
dbd53bd70d SA1019 fix: math/rand to crypto/rand (#14747)
* math/rand to crypto/rand

* Update CHANGELOG.md

* Update CHANGELOG.md

---------

Co-authored-by: terence <terence@prysmaticlabs.com>
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2024-12-23 10:18:38 +00:00
Radosław Kapka
d04b361cc3 Redesign of the attestation pool (#14324)
* Rename existing AttestationCache

* Cache with simple Add

* fix import cycle

* logic for unaggregated, aggregated and block attestations

* some small fixes

* remove Seen

* finishing touches

* feature flag

* extract forkchoice atts to separate type

* gate new functionality behind feature flag

* revert test files

* preparing for review

* change Id to [32]byte

* Potuz's review

* Potuz's review pt 2

* Nishant's review

* keep flat list of atts

* fix ForkchoiceAttestations() function

* Tests for Add, GetAll, Count

* Tests for remaining functions

* use DeepEqual

* fix tests

* documentation

* changelog <3

* v2 handlers

* nil check for forkchoice atts

* guard against 0 bits set

* fix failing test

* Preston's review

* better godocs
2024-12-23 09:59:32 +00:00
rkapka
a9f3844479 more review 2024-12-18 09:57:34 +01:00
rkapka
d3387b3068 review 2024-12-13 13:30:13 -05:00
Radosław Kapka
cb58bd1761 Merge branch 'develop' into single-att 2024-12-11 16:12:26 -05:00
rkapka
b04ccbe71b changelog <3 2024-12-11 12:23:02 -05:00
rkapka
9d70083b8c tests 2024-12-10 16:59:53 -05:00
rkapka
12795fbe46 Merge branch '__develop' into single-att
# Conflicts:
#	proto/prysm/v1alpha1/electra.ssz.go
2024-12-10 14:25:28 -05:00
rkapka
67e92e0c90 nitpick 2024-11-29 16:07:14 +01:00
rkapka
fd296d971a don't check bits after Electra 2024-11-29 15:53:51 +01:00
rkapka
4215a95283 small updates 2024-11-27 19:05:57 +01:00
rkapka
a32321f220 Merge branch '__develop' into single-att 2024-11-27 18:48:12 +01:00
rkapka
c8ecac2ff6 broadcast the correct att depending on version 2024-11-26 17:19:33 +01:00
rkapka
df5338a6b6 broadcast 2024-11-25 19:44:19 +01:00
rkapka
e741580f40 validator 2024-11-22 20:12:19 +01:00
rkapka
76de5f4e73 Merge branch '__develop' into single-att 2024-11-22 17:43:39 +01:00
rkapka
f9232dfde4 definitions and gossip 2024-11-22 17:43:24 +01:00
143 changed files with 3060 additions and 1420 deletions

View File

@@ -14,7 +14,9 @@ The format is based on Keep a Changelog, and this project adheres to Semantic Ve
- `Finished building block`: Display error only if not nil.
- Added support to update target and max blob count to different values per hard fork config.
- Log before blob filesystem cache warm-up.
- Debug log when downscoring a peer for bad response reason.
- New design for the attestation pool. [PR](https://github.com/prysmaticlabs/prysm/pull/14324)
- Add field param placeholder for Electra blob target and max to pass spec tests.
- Add EIP-7691: Blob throughput increase.
### Changed
@@ -23,6 +25,10 @@ The format is based on Keep a Changelog, and this project adheres to Semantic Ve
- Refactor RPC handlers subscriptions.
- Go deps upgrade, from `ioutil` to `io`
- Move successfully registered validator(s) on builder log to debug.
- Update some test files to use `crypto/rand` instead of `math/rand`
- Enforce Compound prefix (0x02) for target when processing pending consolidation request.
- Limit consolidating by validator's effective balance.
- Use 16-bit random value for proposer and sync committee selection filter.
### Deprecated
@@ -83,6 +89,7 @@ Notable features:
- Save light client updates and bootstraps in DB.
- Added more comprehensive tests for `BlockToLightClientHeader`. [PR](https://github.com/prysmaticlabs/prysm/pull/14699)
- Added light client feature flag check to RPC handlers. [PR](https://github.com/prysmaticlabs/prysm/pull/14736)
- Light client: Add better error handling. [PR](https://github.com/prysmaticlabs/prysm/pull/14749)
### Changed

View File

@@ -227,7 +227,7 @@ filegroup(
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
)
consensus_spec_version = "v1.5.0-alpha.9"
consensus_spec_version = "v1.5.0-alpha.10"
bls_test_version = "v0.1.1"
@@ -243,7 +243,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
integrity = "sha256-gHbvlnErUeJGWzW8/8JiVlk28JwmXSMhOzkynEIz+8g=",
integrity = "sha256-NtWIhbO/mVMb1edq5jqABL0o8R1tNFiuG8PCMAsUHcs=",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
)
@@ -259,7 +259,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
integrity = "sha256-hQkQdpm5ng4miGYa5WsOKWa0q8WtZu99Oqbv9QtBeJM=",
integrity = "sha256-DFlFlnzls1bBrDm+/xD8NK2ivvkhxR+rSNVLLqScVKc=",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
)
@@ -275,7 +275,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
integrity = "sha256-33sBsmApnJpcyYfR3olKaPB+WC1q00ZKNzHa2TczIxk=",
integrity = "sha256-G9ENPF8udZL/BqRHbi60GhFPnZDPZAH6UjcjRiOlvbk=",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
)
@@ -290,7 +290,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
integrity = "sha256-GQulBKLc2khpql2K/MxV+NG/d2kAhLXl+gLnKIg7rt4=",
integrity = "sha256-ClOLKkmAcEi8/uKi6LDeqthask5+E3sgxVoA0bqmQ0c=",
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
)

View File

@@ -432,6 +432,32 @@ func (a *AttestationElectra) ToConsensus() (*eth.AttestationElectra, error) {
}, nil
}
func (a *SingleAttestation) ToConsensus() (*eth.SingleAttestation, error) {
ci, err := strconv.ParseUint(a.CommitteeIndex, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "CommitteeIndex")
}
ai, err := strconv.ParseUint(a.AttesterIndex, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "AttesterIndex")
}
data, err := a.Data.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, "Data")
}
sig, err := bytesutil.DecodeHexWithLength(a.Signature, fieldparams.BLSSignatureLength)
if err != nil {
return nil, server.NewDecodeError(err, "Signature")
}
return &eth.SingleAttestation{
CommitteeId: primitives.CommitteeIndex(ci),
AttesterIndex: primitives.ValidatorIndex(ai),
Data: data,
Signature: sig,
}, nil
}
func AttElectraFromConsensus(a *eth.AttestationElectra) *AttestationElectra {
return &AttestationElectra{
AggregationBits: hexutil.Encode(a.AggregationBits),

View File

@@ -36,6 +36,13 @@ type AttestationElectra struct {
CommitteeBits string `json:"committee_bits"`
}
type SingleAttestation struct {
CommitteeIndex string `json:"committee_index"`
AttesterIndex string `json:"attester_index"`
Data *AttestationData `json:"data"`
Signature string `json:"signature"`
}
type AttestationData struct {
Slot string `json:"slot"`
CommitteeIndex string `json:"index"`

View File

@@ -11,6 +11,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/config/features"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
@@ -404,13 +405,19 @@ func (s *Service) saveOrphanedOperations(ctx context.Context, orphanedRoot [32]b
if a.GetData().Slot+params.BeaconConfig().SlotsPerEpoch < s.CurrentSlot() {
continue
}
if helpers.IsAggregated(a) {
if err := s.cfg.AttPool.SaveAggregatedAttestation(a); err != nil {
if features.Get().EnableExperimentalAttestationPool {
if err = s.cfg.AttestationCache.Add(a); err != nil {
return err
}
} else {
if err := s.cfg.AttPool.SaveUnaggregatedAttestation(a); err != nil {
return err
if a.IsAggregated() {
if err = s.cfg.AttPool.SaveAggregatedAttestation(a); err != nil {
return err
}
} else {
if err = s.cfg.AttPool.SaveUnaggregatedAttestation(a); err != nil {
return err
}
}
}
saveOrphanedAttCount.Inc()

View File

@@ -85,6 +85,14 @@ func WithTrackedValidatorsCache(c *cache.TrackedValidatorsCache) Option {
}
}
// WithAttestationCache for attestation lifecycle after chain inclusion.
func WithAttestationCache(c *cache.AttestationCache) Option {
return func(s *Service) error {
s.cfg.AttestationCache = c
return nil
}
}
// WithAttestationPool for attestation lifecycle after chain inclusion.
func WithAttestationPool(p attestations.Pool) Option {
return func(s *Service) error {

View File

@@ -378,7 +378,11 @@ func (s *Service) handleBlockAttestations(ctx context.Context, blk interfaces.Re
r := bytesutil.ToBytes32(a.GetData().BeaconBlockRoot)
if s.cfg.ForkChoiceStore.HasNode(r) {
s.cfg.ForkChoiceStore.ProcessAttestation(ctx, indices, r, a.GetData().Target.Epoch)
} else if err := s.cfg.AttPool.SaveBlockAttestation(a); err != nil {
} else if features.Get().EnableExperimentalAttestationPool {
if err = s.cfg.AttestationCache.Add(a); err != nil {
return err
}
} else if err = s.cfg.AttPool.SaveBlockAttestation(a); err != nil {
return err
}
}
@@ -418,7 +422,11 @@ func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b interface
func (s *Service) pruneAttsFromPool(headBlock interfaces.ReadOnlySignedBeaconBlock) error {
atts := headBlock.Block().Body().Attestations()
for _, att := range atts {
if helpers.IsAggregated(att) {
if features.Get().EnableExperimentalAttestationPool {
if err := s.cfg.AttestationCache.DeleteCovered(att); err != nil {
return errors.Wrap(err, "could not delete attestation")
}
} else if att.IsAggregated() {
if err := s.cfg.AttPool.DeleteAggregatedAttestation(att); err != nil {
return err
}

View File

@@ -4,6 +4,7 @@ import (
"bytes"
"context"
"fmt"
"strings"
"time"
lightclient "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/light-client"
@@ -129,7 +130,7 @@ func (s *Service) saveLightClientUpdate(cfg *postBlockProcessConfig) {
attestedRoot := cfg.roblock.Block().ParentRoot()
attestedBlock, err := s.getBlock(cfg.ctx, attestedRoot)
if err != nil {
log.WithError(err).Error("Saving light client update failed: Could not get attested block")
log.WithError(err).Errorf("Saving light client update failed: Could not get attested block for root %#x", attestedRoot)
return
}
if attestedBlock == nil || attestedBlock.IsNil() {
@@ -138,7 +139,7 @@ func (s *Service) saveLightClientUpdate(cfg *postBlockProcessConfig) {
}
attestedState, err := s.cfg.StateGen.StateByRoot(cfg.ctx, attestedRoot)
if err != nil {
log.WithError(err).Error("Saving light client update failed: Could not get attested state")
log.WithError(err).Errorf("Saving light client update failed: Could not get attested state for root %#x", attestedRoot)
return
}
if attestedState == nil || attestedState.IsNil() {
@@ -149,7 +150,11 @@ func (s *Service) saveLightClientUpdate(cfg *postBlockProcessConfig) {
finalizedRoot := attestedState.FinalizedCheckpoint().Root
finalizedBlock, err := s.getBlock(cfg.ctx, [32]byte(finalizedRoot))
if err != nil {
log.WithError(err).Error("Saving light client update failed: Could not get finalized block")
if errors.Is(err, errBlockNotFoundInCacheOrDB) {
log.Debugf("Skipping saving light client update: Finalized block is nil for root %#x", finalizedRoot)
} else {
log.WithError(err).Errorf("Saving light client update failed: Could not get finalized block for root %#x", finalizedRoot)
}
return
}
@@ -224,28 +229,30 @@ func (s *Service) processLightClientFinalityUpdate(
attestedRoot := signed.Block().ParentRoot()
attestedBlock, err := s.cfg.BeaconDB.Block(ctx, attestedRoot)
if err != nil {
return errors.Wrap(err, "could not get attested block")
return errors.Wrapf(err, "could not get attested block for root %#x", attestedRoot)
}
attestedState, err := s.cfg.StateGen.StateByRoot(ctx, attestedRoot)
if err != nil {
return errors.Wrap(err, "could not get attested state")
return errors.Wrapf(err, "could not get attested state for root %#x", attestedRoot)
}
var finalizedBlock interfaces.ReadOnlySignedBeaconBlock
finalizedCheckPoint := attestedState.FinalizedCheckpoint()
if finalizedCheckPoint != nil {
finalizedRoot := bytesutil.ToBytes32(finalizedCheckPoint.Root)
finalizedBlock, err = s.cfg.BeaconDB.Block(ctx, finalizedRoot)
if err != nil {
finalizedBlock = nil
}
}
finalizedCheckpoint := attestedState.FinalizedCheckpoint()
// Check if the finalized checkpoint has changed
if finalizedCheckPoint == nil || bytes.Equal(finalizedCheckPoint.GetRoot(), postState.FinalizedCheckpoint().Root) {
if finalizedCheckpoint == nil || bytes.Equal(finalizedCheckpoint.GetRoot(), postState.FinalizedCheckpoint().Root) {
return nil
}
finalizedRoot := bytesutil.ToBytes32(finalizedCheckpoint.Root)
finalizedBlock, err := s.cfg.BeaconDB.Block(ctx, finalizedRoot)
if err != nil {
if errors.Is(err, errBlockNotFoundInCacheOrDB) {
log.Debugf("Skipping processing light client finality update: Finalized block is nil for root %#x", finalizedRoot)
return nil
}
return errors.Wrapf(err, "could not get finalized block for root %#x", finalizedRoot)
}
update, err := lightclient.NewLightClientFinalityUpdateFromBeaconState(
ctx,
postState.Slot(),
@@ -272,11 +279,11 @@ func (s *Service) processLightClientOptimisticUpdate(ctx context.Context, signed
attestedRoot := signed.Block().ParentRoot()
attestedBlock, err := s.cfg.BeaconDB.Block(ctx, attestedRoot)
if err != nil {
return errors.Wrap(err, "could not get attested block")
return errors.Wrapf(err, "could not get attested block for root %#x", attestedRoot)
}
attestedState, err := s.cfg.StateGen.StateByRoot(ctx, attestedRoot)
if err != nil {
return errors.Wrap(err, "could not get attested state")
return errors.Wrapf(err, "could not get attested state for root %#x", attestedRoot)
}
update, err := lightclient.NewLightClientOptimisticUpdateFromBeaconState(
@@ -289,6 +296,10 @@ func (s *Service) processLightClientOptimisticUpdate(ctx context.Context, signed
)
if err != nil {
if strings.Contains(err.Error(), lightclient.ErrNotEnoughSyncCommitteeBits) {
log.WithError(err).Debug("Skipping processing light client optimistic update")
return nil
}
return errors.Wrap(err, "could not create light client optimistic update")
}

View File

@@ -9,6 +9,7 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/config/features"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
@@ -166,7 +167,13 @@ func (s *Service) UpdateHead(ctx context.Context, proposingSlot primitives.Slot)
// This processes fork choice attestations from the pool to account for validator votes and fork choice.
func (s *Service) processAttestations(ctx context.Context, disparity time.Duration) {
atts := s.cfg.AttPool.ForkchoiceAttestations()
var atts []ethpb.Att
if features.Get().EnableExperimentalAttestationPool {
atts = s.cfg.AttestationCache.ForkchoiceAttestations()
} else {
atts = s.cfg.AttPool.ForkchoiceAttestations()
}
for _, a := range atts {
// Based on the spec, don't process the attestation until the subsequent slot.
// This delays consideration in the fork choice until their slot is in the past.
@@ -182,7 +189,11 @@ func (s *Service) processAttestations(ctx context.Context, disparity time.Durati
continue
}
if err := s.cfg.AttPool.DeleteForkchoiceAttestation(a); err != nil {
if features.Get().EnableExperimentalAttestationPool {
if err := s.cfg.AttestationCache.DeleteForkchoiceAttestation(a); err != nil {
log.WithError(err).Error("Could not delete fork choice attestation in pool")
}
} else if err := s.cfg.AttPool.DeleteForkchoiceAttestation(a); err != nil {
log.WithError(err).Error("Could not delete fork choice attestation in pool")
}

View File

@@ -75,6 +75,7 @@ type config struct {
DepositCache cache.DepositCache
PayloadIDCache *cache.PayloadIDCache
TrackedValidatorsCache *cache.TrackedValidatorsCache
AttestationCache *cache.AttestationCache
AttPool attestations.Pool
ExitPool voluntaryexits.PoolManager
SlashingPool slashings.PoolManager

View File

@@ -5,6 +5,7 @@ go_library(
srcs = [
"active_balance.go",
"active_balance_disabled.go", # keep
"attestation.go",
"attestation_data.go",
"balance_cache_key.go",
"checkpoint_state.go",
@@ -36,18 +37,21 @@ go_library(
],
deps = [
"//beacon-chain/forkchoice/types:go_default_library",
"//beacon-chain/operations/attestations/attmap:go_default_library",
"//beacon-chain/state:go_default_library",
"//cache/lru:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//container/slice:go_default_library",
"//crypto/bls:go_default_library",
"//crypto/hash:go_default_library",
"//crypto/rand:go_default_library",
"//encoding/bytesutil:go_default_library",
"//math:go_default_library",
"//monitoring/tracing/trace:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/attestation:go_default_library",
"//runtime/version:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_hashicorp_golang_lru//:go_default_library",
@@ -66,6 +70,7 @@ go_test(
srcs = [
"active_balance_test.go",
"attestation_data_test.go",
"attestation_test.go",
"cache_test.go",
"checkpoint_state_test.go",
"committee_fuzz_test.go",
@@ -88,14 +93,17 @@ go_test(
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//crypto/bls/blst:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/attestation:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_google_gofuzz//:go_default_library",
"@com_github_hashicorp_golang_lru//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_stretchr_testify//require:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
],

275
beacon-chain/cache/attestation.go vendored Normal file
View File

@@ -0,0 +1,275 @@
package cache
import (
"sync"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/attestations/attmap"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
log "github.com/sirupsen/logrus"
)
type attGroup struct {
slot primitives.Slot
atts []ethpb.Att
}
// AttestationCache holds a map of attGroup items that group together all attestations for a single slot.
// When we add an attestation to the cache by calling Add, we either create a new group with this attestation
// (if this is the first attestation for some slot) or two things can happen:
//
// - If the attestation is unaggregated, we add its attestation bit to attestation bits of the first
// attestation in the group.
// - If the attestation is aggregated, we append it to the group. There should be no redundancy
// in the list because we ignore redundant aggregates in gossip.
//
// The first bullet point above means that we keep one aggregate attestation to which we keep appending bits
// as new single-bit attestations arrive. This means that at any point during seconds 0-4 of a slot
// we will have only one attestation for this slot in the cache.
//
// NOTE: This design in principle can result in worse aggregates since we lose the ability to aggregate some
// single bit attestations in case of overlaps with incoming aggregates.
//
// The cache also keeps forkchoice attestations in a separate struct. These attestations are used for
// forkchoice-related operations.
type AttestationCache struct {
atts map[attestation.Id]*attGroup
sync.RWMutex
forkchoiceAtts *attmap.Attestations
}
// NewAttestationCache creates a new cache instance.
func NewAttestationCache() *AttestationCache {
return &AttestationCache{
atts: make(map[attestation.Id]*attGroup),
forkchoiceAtts: attmap.New(),
}
}
// Add does one of two things:
//
// - For unaggregated attestations, it adds the attestation bit to attestation bits of the running aggregate,
// which is the first aggregate for the slot.
// - For aggregated attestations, it appends the attestation to the existng list of attestations for the slot.
func (c *AttestationCache) Add(att ethpb.Att) error {
if att.IsNil() {
log.Debug("Attempted to add a nil attestation to the attestation cache")
return nil
}
if len(att.GetAggregationBits().BitIndices()) == 0 {
log.Debug("Attempted to add an attestation with 0 bits set to the attestation cache")
return nil
}
c.Lock()
defer c.Unlock()
id, err := attestation.NewId(att, attestation.Data)
if err != nil {
return errors.Wrapf(err, "could not create attestation ID")
}
group := c.atts[id]
if group == nil {
group = &attGroup{
slot: att.GetData().Slot,
atts: []ethpb.Att{att},
}
c.atts[id] = group
return nil
}
if att.IsAggregated() {
group.atts = append(group.atts, att.Clone())
return nil
}
// This should never happen because we return early for a new group.
if len(group.atts) == 0 {
log.Error("Attestation group contains no attestations, skipping insertion")
return nil
}
a := group.atts[0]
// Indexing is safe because we have guarded against 0 bits set.
bit := att.GetAggregationBits().BitIndices()[0]
if a.GetAggregationBits().BitAt(uint64(bit)) {
return nil
}
sig, err := aggregateSig(a, att)
if err != nil {
return errors.Wrapf(err, "could not aggregate signatures")
}
a.GetAggregationBits().SetBitAt(uint64(bit), true)
a.SetSignature(sig)
return nil
}
// GetAll returns all attestations in the cache, excluding forkchoice attestations.
func (c *AttestationCache) GetAll() []ethpb.Att {
c.RLock()
defer c.RUnlock()
var result []ethpb.Att
for _, group := range c.atts {
result = append(result, group.atts...)
}
return result
}
// Count returns the number of all attestations in the cache, excluding forkchoice attestations.
func (c *AttestationCache) Count() int {
c.RLock()
defer c.RUnlock()
count := 0
for _, group := range c.atts {
count += len(group.atts)
}
return count
}
// DeleteCovered removes all attestations whose attestation bits are a proper subset of the passed-in attestation.
func (c *AttestationCache) DeleteCovered(att ethpb.Att) error {
if att.IsNil() {
return nil
}
c.Lock()
defer c.Unlock()
id, err := attestation.NewId(att, attestation.Data)
if err != nil {
return errors.Wrapf(err, "could not create attestation ID")
}
group := c.atts[id]
if group == nil {
return nil
}
idx := 0
for _, a := range group.atts {
if covered, err := att.GetAggregationBits().Contains(a.GetAggregationBits()); err != nil {
return err
} else if !covered {
group.atts[idx] = a
idx++
}
}
group.atts = group.atts[:idx]
if len(group.atts) == 0 {
delete(c.atts, id)
}
return nil
}
// PruneBefore removes all attestations whose slot is earlier than the passed-in slot.
func (c *AttestationCache) PruneBefore(slot primitives.Slot) uint64 {
c.Lock()
defer c.Unlock()
var pruneCount int
for id, group := range c.atts {
if group.slot < slot {
pruneCount += len(group.atts)
delete(c.atts, id)
}
}
return uint64(pruneCount)
}
// AggregateIsRedundant checks whether all attestation bits of the passed-in aggregate
// are already included by any aggregate in the cache.
func (c *AttestationCache) AggregateIsRedundant(att ethpb.Att) (bool, error) {
if att.IsNil() {
return true, nil
}
c.RLock()
defer c.RUnlock()
id, err := attestation.NewId(att, attestation.Data)
if err != nil {
return true, errors.Wrapf(err, "could not create attestation ID")
}
group := c.atts[id]
if group == nil {
return false, nil
}
for _, a := range group.atts {
if redundant, err := a.GetAggregationBits().Contains(att.GetAggregationBits()); err != nil {
return true, err
} else if redundant {
return true, nil
}
}
return false, nil
}
// SaveForkchoiceAttestations saves forkchoice attestations.
func (c *AttestationCache) SaveForkchoiceAttestations(att []ethpb.Att) error {
return c.forkchoiceAtts.SaveMany(att)
}
// ForkchoiceAttestations returns all forkchoice attestations.
func (c *AttestationCache) ForkchoiceAttestations() []ethpb.Att {
return c.forkchoiceAtts.GetAll()
}
// DeleteForkchoiceAttestation deletes a forkchoice attestation.
func (c *AttestationCache) DeleteForkchoiceAttestation(att ethpb.Att) error {
return c.forkchoiceAtts.Delete(att)
}
// GetBySlotAndCommitteeIndex returns all attestations in the cache that match the provided slot
// and committee index. Forkchoice attestations are not returned.
//
// NOTE: This function cannot be declared as a method on the AttestationCache because it is a generic function.
func GetBySlotAndCommitteeIndex[T ethpb.Att](c *AttestationCache, slot primitives.Slot, committeeIndex primitives.CommitteeIndex) []T {
c.RLock()
defer c.RUnlock()
var result []T
for _, group := range c.atts {
if len(group.atts) > 0 {
// We can safely compare the first attestation because all attestations in a group
// must have the same slot and committee index, since they are under the same key.
a, ok := group.atts[0].(T)
if ok && a.GetData().Slot == slot && a.CommitteeBitsVal().BitAt(uint64(committeeIndex)) {
for _, a := range group.atts {
a, ok := a.(T)
if ok {
result = append(result, a)
}
}
}
}
}
return result
}
func aggregateSig(agg ethpb.Att, att ethpb.Att) ([]byte, error) {
aggSig, err := bls.SignatureFromBytesNoValidation(agg.GetSignature())
if err != nil {
return nil, err
}
attSig, err := bls.SignatureFromBytesNoValidation(att.GetSignature())
if err != nil {
return nil, err
}
return bls.AggregateSignatures([]bls.Signature{aggSig, attSig}).Marshal(), nil
}

View File

@@ -15,24 +15,24 @@ type AttestationConsensusData struct {
Source forkchoicetypes.Checkpoint
}
// AttestationCache stores cached results of AttestationData requests.
type AttestationCache struct {
// AttestationDataCache stores cached results of AttestationData requests.
type AttestationDataCache struct {
a *AttestationConsensusData
sync.RWMutex
}
// NewAttestationCache creates a new instance of AttestationCache.
func NewAttestationCache() *AttestationCache {
return &AttestationCache{}
// NewAttestationDataCache creates a new instance of AttestationDataCache.
func NewAttestationDataCache() *AttestationDataCache {
return &AttestationDataCache{}
}
// Get retrieves cached attestation data, recording a cache hit or miss. This method is lock free.
func (c *AttestationCache) Get() *AttestationConsensusData {
func (c *AttestationDataCache) Get() *AttestationConsensusData {
return c.a
}
// Put adds a response to the cache. This method is lock free.
func (c *AttestationCache) Put(a *AttestationConsensusData) error {
func (c *AttestationDataCache) Put(a *AttestationConsensusData) error {
if a == nil {
return errors.New("attestation cannot be nil")
}

View File

@@ -9,7 +9,7 @@ import (
)
func TestAttestationCache_RoundTrip(t *testing.T) {
c := cache.NewAttestationCache()
c := cache.NewAttestationDataCache()
a := c.Get()
require.Nil(t, a)

353
beacon-chain/cache/attestation_test.go vendored Normal file
View File

@@ -0,0 +1,353 @@
package cache
import (
"testing"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/crypto/bls/blst"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
"github.com/prysmaticlabs/prysm/v5/testing/assert"
"github.com/prysmaticlabs/prysm/v5/testing/require"
)
func TestAdd(t *testing.T) {
k, err := blst.RandKey()
require.NoError(t, err)
sig := k.Sign([]byte{'X'})
t.Run("new ID", func(t *testing.T) {
t.Run("first ID ever", func(t *testing.T) {
c := NewAttestationCache()
ab := bitfield.NewBitlist(8)
ab.SetBitAt(0, true)
att := &ethpb.Attestation{
Data: &ethpb.AttestationData{Slot: 123, BeaconBlockRoot: make([]byte, 32), Source: &ethpb.Checkpoint{Root: make([]byte, 32)}, Target: &ethpb.Checkpoint{Root: make([]byte, 32)}},
AggregationBits: ab,
Signature: sig.Marshal(),
}
id, err := attestation.NewId(att, attestation.Data)
require.NoError(t, err)
require.NoError(t, c.Add(att))
require.Equal(t, 1, len(c.atts))
group, ok := c.atts[id]
require.Equal(t, true, ok)
assert.Equal(t, primitives.Slot(123), group.slot)
require.Equal(t, 1, len(group.atts))
assert.DeepEqual(t, group.atts[0], att)
})
t.Run("other ID exists", func(t *testing.T) {
c := NewAttestationCache()
ab := bitfield.NewBitlist(8)
ab.SetBitAt(0, true)
existingAtt := &ethpb.Attestation{
Data: &ethpb.AttestationData{BeaconBlockRoot: make([]byte, 32), Source: &ethpb.Checkpoint{Root: make([]byte, 32)}, Target: &ethpb.Checkpoint{Root: make([]byte, 32)}},
AggregationBits: ab,
Signature: sig.Marshal(),
}
existingId, err := attestation.NewId(existingAtt, attestation.Data)
require.NoError(t, err)
c.atts[existingId] = &attGroup{slot: existingAtt.Data.Slot, atts: []ethpb.Att{existingAtt}}
att := &ethpb.Attestation{
Data: &ethpb.AttestationData{Slot: 123, BeaconBlockRoot: make([]byte, 32), Source: &ethpb.Checkpoint{Root: make([]byte, 32)}, Target: &ethpb.Checkpoint{Root: make([]byte, 32)}},
AggregationBits: ab,
Signature: sig.Marshal(),
}
id, err := attestation.NewId(att, attestation.Data)
require.NoError(t, err)
require.NoError(t, c.Add(att))
require.Equal(t, 2, len(c.atts))
group, ok := c.atts[id]
require.Equal(t, true, ok)
assert.Equal(t, primitives.Slot(123), group.slot)
require.Equal(t, 1, len(group.atts))
assert.DeepEqual(t, group.atts[0], att)
})
})
t.Run("aggregated", func(t *testing.T) {
c := NewAttestationCache()
existingAtt := &ethpb.Attestation{
Data: &ethpb.AttestationData{Slot: 123, BeaconBlockRoot: make([]byte, 32), Source: &ethpb.Checkpoint{Root: make([]byte, 32)}, Target: &ethpb.Checkpoint{Root: make([]byte, 32)}},
AggregationBits: bitfield.NewBitlist(8),
Signature: sig.Marshal(),
}
id, err := attestation.NewId(existingAtt, attestation.Data)
require.NoError(t, err)
c.atts[id] = &attGroup{slot: existingAtt.Data.Slot, atts: []ethpb.Att{existingAtt}}
att := &ethpb.Attestation{
Data: &ethpb.AttestationData{Slot: 123, BeaconBlockRoot: make([]byte, 32), Source: &ethpb.Checkpoint{Root: make([]byte, 32)}, Target: &ethpb.Checkpoint{Root: make([]byte, 32)}},
AggregationBits: bitfield.NewBitlist(8),
Signature: sig.Marshal(),
}
att.AggregationBits.SetBitAt(0, true)
att.AggregationBits.SetBitAt(1, true)
require.NoError(t, c.Add(att))
require.Equal(t, 1, len(c.atts))
group, ok := c.atts[id]
require.Equal(t, true, ok)
assert.Equal(t, primitives.Slot(123), group.slot)
require.Equal(t, 2, len(group.atts))
assert.DeepEqual(t, group.atts[0], existingAtt)
assert.DeepEqual(t, group.atts[1], att)
})
t.Run("unaggregated - existing bit", func(t *testing.T) {
c := NewAttestationCache()
existingAtt := &ethpb.Attestation{
Data: &ethpb.AttestationData{Slot: 123, BeaconBlockRoot: make([]byte, 32), Source: &ethpb.Checkpoint{Root: make([]byte, 32)}, Target: &ethpb.Checkpoint{Root: make([]byte, 32)}},
AggregationBits: bitfield.NewBitlist(8),
Signature: sig.Marshal(),
}
existingAtt.AggregationBits.SetBitAt(0, true)
id, err := attestation.NewId(existingAtt, attestation.Data)
require.NoError(t, err)
c.atts[id] = &attGroup{slot: existingAtt.Data.Slot, atts: []ethpb.Att{existingAtt}}
att := &ethpb.Attestation{
Data: &ethpb.AttestationData{Slot: 123, BeaconBlockRoot: make([]byte, 32), Source: &ethpb.Checkpoint{Root: make([]byte, 32)}, Target: &ethpb.Checkpoint{Root: make([]byte, 32)}},
AggregationBits: bitfield.NewBitlist(8),
Signature: sig.Marshal(),
}
att.AggregationBits.SetBitAt(0, true)
require.NoError(t, c.Add(att))
require.Equal(t, 1, len(c.atts))
group, ok := c.atts[id]
require.Equal(t, true, ok)
assert.Equal(t, primitives.Slot(123), group.slot)
require.Equal(t, 1, len(group.atts))
assert.DeepEqual(t, []int{0}, group.atts[0].GetAggregationBits().BitIndices())
})
t.Run("unaggregated - new bit", func(t *testing.T) {
c := NewAttestationCache()
existingAtt := &ethpb.Attestation{
Data: &ethpb.AttestationData{Slot: 123, BeaconBlockRoot: make([]byte, 32), Source: &ethpb.Checkpoint{Root: make([]byte, 32)}, Target: &ethpb.Checkpoint{Root: make([]byte, 32)}},
AggregationBits: bitfield.NewBitlist(8),
Signature: sig.Marshal(),
}
existingAtt.AggregationBits.SetBitAt(0, true)
id, err := attestation.NewId(existingAtt, attestation.Data)
require.NoError(t, err)
c.atts[id] = &attGroup{slot: existingAtt.Data.Slot, atts: []ethpb.Att{existingAtt}}
att := &ethpb.Attestation{
Data: &ethpb.AttestationData{Slot: 123, BeaconBlockRoot: make([]byte, 32), Source: &ethpb.Checkpoint{Root: make([]byte, 32)}, Target: &ethpb.Checkpoint{Root: make([]byte, 32)}},
AggregationBits: bitfield.NewBitlist(8),
Signature: sig.Marshal(),
}
att.AggregationBits.SetBitAt(1, true)
require.NoError(t, c.Add(att))
require.Equal(t, 1, len(c.atts))
group, ok := c.atts[id]
require.Equal(t, true, ok)
assert.Equal(t, primitives.Slot(123), group.slot)
require.Equal(t, 1, len(group.atts))
assert.DeepEqual(t, []int{0, 1}, group.atts[0].GetAggregationBits().BitIndices())
})
}
func TestGetAll(t *testing.T) {
c := NewAttestationCache()
c.atts[bytesutil.ToBytes32([]byte("id1"))] = &attGroup{atts: []ethpb.Att{&ethpb.Attestation{}, &ethpb.Attestation{}}}
c.atts[bytesutil.ToBytes32([]byte("id2"))] = &attGroup{atts: []ethpb.Att{&ethpb.Attestation{}}}
assert.Equal(t, 3, len(c.GetAll()))
}
func TestCount(t *testing.T) {
c := NewAttestationCache()
c.atts[bytesutil.ToBytes32([]byte("id1"))] = &attGroup{atts: []ethpb.Att{&ethpb.Attestation{}, &ethpb.Attestation{}}}
c.atts[bytesutil.ToBytes32([]byte("id2"))] = &attGroup{atts: []ethpb.Att{&ethpb.Attestation{}}}
assert.Equal(t, 3, c.Count())
}
func TestDeleteCovered(t *testing.T) {
k, err := blst.RandKey()
require.NoError(t, err)
sig := k.Sign([]byte{'X'})
att1 := &ethpb.Attestation{
Data: &ethpb.AttestationData{Slot: 123, BeaconBlockRoot: make([]byte, 32), Source: &ethpb.Checkpoint{Root: make([]byte, 32)}, Target: &ethpb.Checkpoint{Root: make([]byte, 32)}},
AggregationBits: bitfield.NewBitlist(8),
Signature: sig.Marshal(),
}
att1.AggregationBits.SetBitAt(0, true)
att2 := &ethpb.Attestation{
Data: &ethpb.AttestationData{Slot: 123, BeaconBlockRoot: make([]byte, 32), Source: &ethpb.Checkpoint{Root: make([]byte, 32)}, Target: &ethpb.Checkpoint{Root: make([]byte, 32)}},
AggregationBits: bitfield.NewBitlist(8),
Signature: sig.Marshal(),
}
att2.AggregationBits.SetBitAt(1, true)
att2.AggregationBits.SetBitAt(2, true)
att3 := &ethpb.Attestation{
Data: &ethpb.AttestationData{Slot: 123, BeaconBlockRoot: make([]byte, 32), Source: &ethpb.Checkpoint{Root: make([]byte, 32)}, Target: &ethpb.Checkpoint{Root: make([]byte, 32)}},
AggregationBits: bitfield.NewBitlist(8),
Signature: sig.Marshal(),
}
att3.AggregationBits.SetBitAt(1, true)
att3.AggregationBits.SetBitAt(3, true)
att3.AggregationBits.SetBitAt(4, true)
c := NewAttestationCache()
id, err := attestation.NewId(att1, attestation.Data)
require.NoError(t, err)
c.atts[id] = &attGroup{slot: att1.Data.Slot, atts: []ethpb.Att{att1, att2, att3}}
t.Run("no matching group", func(t *testing.T) {
att := &ethpb.Attestation{
Data: &ethpb.AttestationData{Slot: 456, BeaconBlockRoot: make([]byte, 32), Source: &ethpb.Checkpoint{Root: make([]byte, 32)}, Target: &ethpb.Checkpoint{Root: make([]byte, 32)}},
AggregationBits: bitfield.NewBitlist(8),
Signature: sig.Marshal(),
}
att.AggregationBits.SetBitAt(0, true)
att.AggregationBits.SetBitAt(1, true)
att.AggregationBits.SetBitAt(2, true)
att.AggregationBits.SetBitAt(3, true)
att.AggregationBits.SetBitAt(4, true)
require.NoError(t, c.DeleteCovered(att))
assert.Equal(t, 3, len(c.atts[id].atts))
})
t.Run("covered atts deleted", func(t *testing.T) {
att := &ethpb.Attestation{
Data: &ethpb.AttestationData{Slot: 123, BeaconBlockRoot: make([]byte, 32), Source: &ethpb.Checkpoint{Root: make([]byte, 32)}, Target: &ethpb.Checkpoint{Root: make([]byte, 32)}},
AggregationBits: bitfield.NewBitlist(8),
Signature: sig.Marshal(),
}
att.AggregationBits.SetBitAt(0, true)
att.AggregationBits.SetBitAt(1, true)
att.AggregationBits.SetBitAt(3, true)
att.AggregationBits.SetBitAt(4, true)
require.NoError(t, c.DeleteCovered(att))
atts := c.atts[id].atts
require.Equal(t, 1, len(atts))
assert.DeepEqual(t, att2, atts[0])
})
t.Run("last att in group deleted", func(t *testing.T) {
att := &ethpb.Attestation{
Data: &ethpb.AttestationData{Slot: 123, BeaconBlockRoot: make([]byte, 32), Source: &ethpb.Checkpoint{Root: make([]byte, 32)}, Target: &ethpb.Checkpoint{Root: make([]byte, 32)}},
AggregationBits: bitfield.NewBitlist(8),
Signature: sig.Marshal(),
}
att.AggregationBits.SetBitAt(0, true)
att.AggregationBits.SetBitAt(1, true)
att.AggregationBits.SetBitAt(2, true)
att.AggregationBits.SetBitAt(3, true)
att.AggregationBits.SetBitAt(4, true)
require.NoError(t, c.DeleteCovered(att))
assert.Equal(t, 0, len(c.atts))
})
}
func TestPruneBefore(t *testing.T) {
c := NewAttestationCache()
c.atts[bytesutil.ToBytes32([]byte("id1"))] = &attGroup{slot: 1, atts: []ethpb.Att{&ethpb.Attestation{}, &ethpb.Attestation{}}}
c.atts[bytesutil.ToBytes32([]byte("id2"))] = &attGroup{slot: 3, atts: []ethpb.Att{&ethpb.Attestation{}}}
c.atts[bytesutil.ToBytes32([]byte("id3"))] = &attGroup{slot: 2, atts: []ethpb.Att{&ethpb.Attestation{}}}
count := c.PruneBefore(3)
require.Equal(t, 1, len(c.atts))
_, ok := c.atts[bytesutil.ToBytes32([]byte("id2"))]
assert.Equal(t, true, ok)
assert.Equal(t, uint64(3), count)
}
func TestAggregateIsRedundant(t *testing.T) {
k, err := blst.RandKey()
require.NoError(t, err)
sig := k.Sign([]byte{'X'})
c := NewAttestationCache()
existingAtt := &ethpb.Attestation{
Data: &ethpb.AttestationData{Slot: 123, BeaconBlockRoot: make([]byte, 32), Source: &ethpb.Checkpoint{Root: make([]byte, 32)}, Target: &ethpb.Checkpoint{Root: make([]byte, 32)}},
AggregationBits: bitfield.NewBitlist(8),
Signature: sig.Marshal(),
}
existingAtt.AggregationBits.SetBitAt(0, true)
existingAtt.AggregationBits.SetBitAt(1, true)
id, err := attestation.NewId(existingAtt, attestation.Data)
require.NoError(t, err)
c.atts[id] = &attGroup{slot: existingAtt.Data.Slot, atts: []ethpb.Att{existingAtt}}
t.Run("no matching group", func(t *testing.T) {
att := &ethpb.Attestation{
Data: &ethpb.AttestationData{Slot: 456, BeaconBlockRoot: make([]byte, 32), Source: &ethpb.Checkpoint{Root: make([]byte, 32)}, Target: &ethpb.Checkpoint{Root: make([]byte, 32)}},
AggregationBits: bitfield.NewBitlist(8),
Signature: sig.Marshal(),
}
att.AggregationBits.SetBitAt(0, true)
redundant, err := c.AggregateIsRedundant(att)
require.NoError(t, err)
assert.Equal(t, false, redundant)
})
t.Run("redundant", func(t *testing.T) {
att := &ethpb.Attestation{
Data: &ethpb.AttestationData{Slot: existingAtt.Data.Slot, BeaconBlockRoot: make([]byte, 32), Source: &ethpb.Checkpoint{Root: make([]byte, 32)}, Target: &ethpb.Checkpoint{Root: make([]byte, 32)}},
AggregationBits: bitfield.NewBitlist(8),
Signature: sig.Marshal(),
}
att.AggregationBits.SetBitAt(0, true)
redundant, err := c.AggregateIsRedundant(att)
require.NoError(t, err)
assert.Equal(t, true, redundant)
})
t.Run("not redundant", func(t *testing.T) {
t.Run("strictly better", func(t *testing.T) {
att := &ethpb.Attestation{
Data: &ethpb.AttestationData{Slot: existingAtt.Data.Slot, BeaconBlockRoot: make([]byte, 32), Source: &ethpb.Checkpoint{Root: make([]byte, 32)}, Target: &ethpb.Checkpoint{Root: make([]byte, 32)}},
AggregationBits: bitfield.NewBitlist(8),
Signature: sig.Marshal(),
}
att.AggregationBits.SetBitAt(0, true)
att.AggregationBits.SetBitAt(1, true)
att.AggregationBits.SetBitAt(2, true)
redundant, err := c.AggregateIsRedundant(att)
require.NoError(t, err)
assert.Equal(t, false, redundant)
})
t.Run("overlapping and new bits", func(t *testing.T) {
att := &ethpb.Attestation{
Data: &ethpb.AttestationData{Slot: existingAtt.Data.Slot, BeaconBlockRoot: make([]byte, 32), Source: &ethpb.Checkpoint{Root: make([]byte, 32)}, Target: &ethpb.Checkpoint{Root: make([]byte, 32)}},
AggregationBits: bitfield.NewBitlist(8),
Signature: sig.Marshal(),
}
att.AggregationBits.SetBitAt(0, true)
att.AggregationBits.SetBitAt(2, true)
redundant, err := c.AggregateIsRedundant(att)
require.NoError(t, err)
assert.Equal(t, false, redundant)
})
})
}
func TestGetBySlotAndCommitteeIndex(t *testing.T) {
c := NewAttestationCache()
c.atts[bytesutil.ToBytes32([]byte("id1"))] = &attGroup{slot: 1, atts: []ethpb.Att{&ethpb.Attestation{Data: &ethpb.AttestationData{Slot: 1, CommitteeIndex: 1}}, &ethpb.Attestation{Data: &ethpb.AttestationData{Slot: 1, CommitteeIndex: 1}}}}
c.atts[bytesutil.ToBytes32([]byte("id2"))] = &attGroup{slot: 2, atts: []ethpb.Att{&ethpb.Attestation{Data: &ethpb.AttestationData{Slot: 2, CommitteeIndex: 2}}}}
c.atts[bytesutil.ToBytes32([]byte("id3"))] = &attGroup{slot: 1, atts: []ethpb.Att{&ethpb.Attestation{Data: &ethpb.AttestationData{Slot: 2, CommitteeIndex: 2}}}}
// committeeIndex has to be small enough to fit in the bitvector
atts := GetBySlotAndCommitteeIndex[*ethpb.Attestation](c, 1, 1)
require.Equal(t, 2, len(atts))
assert.Equal(t, primitives.Slot(1), atts[0].Data.Slot)
assert.Equal(t, primitives.Slot(1), atts[1].Data.Slot)
assert.Equal(t, primitives.CommitteeIndex(1), atts[0].Data.CommitteeIndex)
assert.Equal(t, primitives.CommitteeIndex(1), atts[1].Data.CommitteeIndex)
}

View File

@@ -2,6 +2,7 @@ package altair
import (
"context"
"encoding/binary"
goErrors "errors"
"fmt"
"time"
@@ -22,8 +23,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/time/slots"
)
const maxRandomByte = uint64(1<<8 - 1)
var (
ErrTooLate = errors.New("sync message is too late")
)
@@ -91,19 +90,22 @@ func NextSyncCommittee(ctx context.Context, s state.BeaconState) (*ethpb.SyncCom
// """
// epoch = Epoch(get_current_epoch(state) + 1)
//
// MAX_RANDOM_BYTE = 2**8 - 1
// MAX_RANDOM_VALUE = 2**16 - 1 # [Modified in Electra]
// active_validator_indices = get_active_validator_indices(state, epoch)
// active_validator_count = uint64(len(active_validator_indices))
// seed = get_seed(state, epoch, DOMAIN_SYNC_COMMITTEE)
// i = 0
// i = uint64(0)
// sync_committee_indices: List[ValidatorIndex] = []
// while len(sync_committee_indices) < SYNC_COMMITTEE_SIZE:
// shuffled_index = compute_shuffled_index(uint64(i % active_validator_count), active_validator_count, seed)
// candidate_index = active_validator_indices[shuffled_index]
// random_byte = hash(seed + uint_to_bytes(uint64(i // 32)))[i % 32]
// # [Modified in Electra]
// random_bytes = hash(seed + uint_to_bytes(i // 16))
// offset = i % 16 * 2
// random_value = bytes_to_uint64(random_bytes[offset:offset + 2])
// effective_balance = state.validators[candidate_index].effective_balance
// # [Modified in Electra:EIP7251]
// if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE_ELECTRA * random_byte:
// if effective_balance * MAX_RANDOM_VALUE >= MAX_EFFECTIVE_BALANCE_ELECTRA * random_value:
// sync_committee_indices.append(candidate_index)
// i += 1
// return sync_committee_indices
@@ -123,12 +125,11 @@ func NextSyncCommitteeIndices(ctx context.Context, s state.BeaconState) ([]primi
cIndices := make([]primitives.ValidatorIndex, 0, syncCommitteeSize)
hashFunc := hash.CustomSHA256Hasher()
maxEB := cfg.MaxEffectiveBalanceElectra
if s.Version() < version.Electra {
maxEB = cfg.MaxEffectiveBalance
}
// Preallocate buffers to avoid repeated allocations
seedBuffer := make([]byte, len(seed)+8)
copy(seedBuffer, seed[:])
for i := primitives.ValidatorIndex(0); uint64(len(cIndices)) < params.BeaconConfig().SyncCommitteeSize; i++ {
for i := primitives.ValidatorIndex(0); uint64(len(cIndices)) < syncCommitteeSize; i++ {
if ctx.Err() != nil {
return nil, ctx.Err()
}
@@ -137,18 +138,30 @@ func NextSyncCommitteeIndices(ctx context.Context, s state.BeaconState) ([]primi
if err != nil {
return nil, err
}
b := append(seed[:], bytesutil.Bytes8(uint64(i.Div(32)))...)
randomByte := hashFunc(b)[i%32]
cIndex := indices[sIndex]
v, err := s.ValidatorAtIndexReadOnly(cIndex)
if err != nil {
return nil, err
}
effectiveBal := v.EffectiveBalance()
if effectiveBal*maxRandomByte >= maxEB*uint64(randomByte) {
cIndices = append(cIndices, cIndex)
if s.Version() >= version.Electra {
// Use the preallocated seed buffer
binary.LittleEndian.PutUint64(seedBuffer[len(seed):], uint64(i/16))
randomByte := hashFunc(seedBuffer)
offset := (i % 16) * 2
randomValue := uint64(randomByte[offset]) | uint64(randomByte[offset+1])<<8
if effectiveBal*fieldparams.MaxRandomValueElectra >= cfg.MaxEffectiveBalanceElectra*randomValue {
cIndices = append(cIndices, cIndex)
}
} else {
// Use the preallocated seed buffer
binary.LittleEndian.PutUint64(seedBuffer[len(seed):], uint64(i/32))
randomByte := hashFunc(seedBuffer)[i%32]
if effectiveBal*fieldparams.MaxRandomByte >= cfg.MaxEffectiveBalance*uint64(randomByte) {
cIndices = append(cIndices, cIndex)
}
}
}

View File

@@ -20,7 +20,7 @@ func createValidatorsWithTotalActiveBalance(totalBal primitives.Gwei) []*eth.Val
vals := make([]*eth.Validator, num)
for i := range vals {
wd := make([]byte, 32)
wd[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
wd[0] = params.BeaconConfig().CompoundingWithdrawalPrefixByte
wd[31] = byte(i)
vals[i] = &eth.Validator{

View File

@@ -37,8 +37,7 @@ import (
// break
//
// # Calculate the consolidated balance
// max_effective_balance = get_max_effective_balance(source_validator)
// source_effective_balance = min(state.balances[pending_consolidation.source_index], max_effective_balance)
// source_effective_balance = min(state.balances[pending_consolidation.source_index], source_validator.effective_balance)
//
// # Move active balance to target. Excess balance is withdrawable.
// decrease_balance(state, pending_consolidation.source_index, source_effective_balance)
@@ -78,7 +77,7 @@ func ProcessPendingConsolidations(ctx context.Context, st state.BeaconState) err
if err != nil {
return err
}
b := min(validatorBalance, helpers.ValidatorMaxEffectiveBalance(sourceValidator))
b := min(validatorBalance, sourceValidator.EffectiveBalance())
if err := helpers.DecreaseBalance(st, pc.SourceIndex, b); err != nil {
return err
@@ -141,8 +140,8 @@ func ProcessPendingConsolidations(ctx context.Context, st state.BeaconState) err
// if not (has_correct_credential and is_correct_source_address):
// return
//
// # Verify that target has execution withdrawal credentials
// if not has_execution_withdrawal_credential(target_validator):
// # Verify that target has compounding withdrawal credentials
// if not has_compounding_withdrawal_credential(target_validator):
// return
//
// # Verify the source and the target are active
@@ -175,10 +174,6 @@ func ProcessPendingConsolidations(ctx context.Context, st state.BeaconState) err
// source_index=source_index,
// target_index=target_index
// ))
//
// # Churn any target excess active balance of target and raise its max
// if has_eth1_withdrawal_credential(target_validator):
// switch_to_compounding_validator(state, target_index)
func ProcessConsolidationRequests(ctx context.Context, st state.BeaconState, reqs []*enginev1.ConsolidationRequest) error {
if len(reqs) == 0 || st == nil {
return nil
@@ -253,7 +248,7 @@ func ProcessConsolidationRequests(ctx context.Context, st state.BeaconState, req
}
// Target validator must have their withdrawal credentials set appropriately.
if !helpers.HasExecutionWithdrawalCredentials(tgtV) {
if !helpers.HasCompoundingWithdrawalCredential(tgtV) {
continue
}
@@ -298,13 +293,6 @@ func ProcessConsolidationRequests(ctx context.Context, st state.BeaconState, req
if err := st.AppendPendingConsolidation(&eth.PendingConsolidation{SourceIndex: srcIdx, TargetIndex: tgtIdx}); err != nil {
return fmt.Errorf("failed to append pending consolidation: %w", err) // This should never happen.
}
if helpers.HasETH1WithdrawalCredential(tgtV) {
if err := SwitchToCompoundingValidator(st, tgtIdx); err != nil {
log.WithError(err).Error("failed to switch to compounding validator")
continue
}
}
}
return nil

View File

@@ -46,6 +46,7 @@ func TestProcessPendingConsolidations(t *testing.T) {
Validators: []*eth.Validator{
{
WithdrawalCredentials: []byte{0x01, 0xFF},
EffectiveBalance: params.BeaconConfig().MinActivationBalance,
},
{
WithdrawalCredentials: []byte{0x01, 0xAB},
@@ -218,7 +219,7 @@ func TestProcessConsolidationRequests(t *testing.T) {
}
// Validator scenario setup. See comments in reqs section.
st.Validators[3].WithdrawalCredentials = bytesutil.Bytes32(0)
st.Validators[8].WithdrawalCredentials = bytesutil.Bytes32(0)
st.Validators[8].WithdrawalCredentials = bytesutil.Bytes32(1)
st.Validators[9].ActivationEpoch = params.BeaconConfig().FarFutureEpoch
st.Validators[12].ActivationEpoch = params.BeaconConfig().FarFutureEpoch
st.Validators[13].ExitEpoch = 10
@@ -246,7 +247,7 @@ func TestProcessConsolidationRequests(t *testing.T) {
SourcePubkey: []byte("val_5"),
TargetPubkey: []byte("val_6"),
},
// Target does not have their withdrawal credentials set appropriately.
// Target does not have their withdrawal credentials set appropriately. (Using eth1 address prefix)
{
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(7)),
SourcePubkey: []byte("val_7"),

View File

@@ -32,7 +32,7 @@ func ValidateNilAttestation(attestation ethpb.Att) error {
if attestation.GetData().Target == nil {
return errors.New("attestation's target can't be nil")
}
if attestation.GetAggregationBits() == nil {
if !attestation.IsSingle() && attestation.GetAggregationBits() == nil {
return errors.New("attestation's bitfield can't be nil")
}
return nil
@@ -67,12 +67,6 @@ func IsAggregator(committeeCount uint64, slotSig []byte) (bool, error) {
return binary.LittleEndian.Uint64(b[:8])%modulo == 0, nil
}
// IsAggregated returns true if the attestation is an aggregated attestation,
// false otherwise.
func IsAggregated(attestation ethpb.Att) bool {
return attestation.GetAggregationBits().Count() > 1
}
// ComputeSubnetForAttestation returns the subnet for which the provided attestation will be broadcasted to.
// This differs from the spec definition by instead passing in the active validators indices in the attestation's
// given epoch.

View File

@@ -308,6 +308,16 @@ func TestValidateNilAttestation(t *testing.T) {
},
errString: "",
},
{
name: "single attestation",
attestation: &ethpb.SingleAttestation{
Data: &ethpb.AttestationData{
Target: &ethpb.Checkpoint{},
Source: &ethpb.Checkpoint{},
},
},
errString: "",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {

View File

@@ -3,6 +3,7 @@ package helpers
import (
"bytes"
"context"
"encoding/binary"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
@@ -11,6 +12,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
@@ -347,27 +349,33 @@ func BeaconProposerIndexAtSlot(ctx context.Context, state state.ReadOnlyBeaconSt
// Spec pseudocode definition:
//
// def compute_proposer_index(state: BeaconState, indices: Sequence[ValidatorIndex], seed: Bytes32) -> ValidatorIndex:
// """
// Return from ``indices`` a random index sampled by effective balance.
// """
// assert len(indices) > 0
// MAX_RANDOM_BYTE = 2**8 - 1
// i = uint64(0)
// total = uint64(len(indices))
// while True:
// candidate_index = indices[compute_shuffled_index(i % total, total, seed)]
// random_byte = hash(seed + uint_to_bytes(uint64(i // 32)))[i % 32]
// effective_balance = state.validators[candidate_index].effective_balance
// if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE_ELECTRA * random_byte: #[Modified in Electra:EIP7251]
// return candidate_index
// i += 1
// """
// Return from ``indices`` a random index sampled by effective balance.
// """
// assert len(indices) > 0
// MAX_RANDOM_VALUE = 2**16 - 1 # [Modified in Electra]
// i = uint64(0)
// total = uint64(len(indices))
// while True:
// candidate_index = indices[compute_shuffled_index(i % total, total, seed)]
// # [Modified in Electra]
// random_bytes = hash(seed + uint_to_bytes(i // 16))
// offset = i % 16 * 2
// random_value = bytes_to_uint64(random_bytes[offset:offset + 2])
// effective_balance = state.validators[candidate_index].effective_balance
// # [Modified in Electra:EIP7251]
// if effective_balance * MAX_RANDOM_VALUE >= MAX_EFFECTIVE_BALANCE_ELECTRA * random_value:
// return candidate_index
// i += 1
func ComputeProposerIndex(bState state.ReadOnlyBeaconState, activeIndices []primitives.ValidatorIndex, seed [32]byte) (primitives.ValidatorIndex, error) {
length := uint64(len(activeIndices))
if length == 0 {
return 0, errors.New("empty active indices list")
}
maxRandomByte := uint64(1<<8 - 1)
hashFunc := hash.CustomSHA256Hasher()
beaconConfig := params.BeaconConfig()
seedBuffer := make([]byte, len(seed)+8)
copy(seedBuffer, seed[:])
for i := uint64(0); ; i++ {
candidateIndex, err := ComputeShuffledIndex(primitives.ValidatorIndex(i%length), length, seed, true /* shuffle */)
@@ -378,21 +386,28 @@ func ComputeProposerIndex(bState state.ReadOnlyBeaconState, activeIndices []prim
if uint64(candidateIndex) >= uint64(bState.NumValidators()) {
return 0, errors.New("active index out of range")
}
b := append(seed[:], bytesutil.Bytes8(i/32)...)
randomByte := hashFunc(b)[i%32]
v, err := bState.ValidatorAtIndexReadOnly(candidateIndex)
if err != nil {
return 0, err
}
effectiveBal := v.EffectiveBalance()
maxEB := params.BeaconConfig().MaxEffectiveBalance
if bState.Version() >= version.Electra {
maxEB = params.BeaconConfig().MaxEffectiveBalanceElectra
}
binary.LittleEndian.PutUint64(seedBuffer[len(seed):], i/16)
randomByte := hashFunc(seedBuffer)
offset := (i % 16) * 2
randomValue := uint64(randomByte[offset]) | uint64(randomByte[offset+1])<<8
if effectiveBal*maxRandomByte >= maxEB*uint64(randomByte) {
return candidateIndex, nil
if effectiveBal*fieldparams.MaxRandomValueElectra >= beaconConfig.MaxEffectiveBalanceElectra*randomValue {
return candidateIndex, nil
}
} else {
binary.LittleEndian.PutUint64(seedBuffer[len(seed):], i/32)
randomByte := hashFunc(seedBuffer)[i%32]
if effectiveBal*fieldparams.MaxRandomByte >= beaconConfig.MaxEffectiveBalance*uint64(randomByte) {
return candidateIndex, nil
}
}
}
}

View File

@@ -841,7 +841,6 @@ func computeProposerIndexWithValidators(validators []*ethpb.Validator, activeInd
if length == 0 {
return 0, errors.New("empty active indices list")
}
maxRandomByte := uint64(1<<8 - 1)
hashFunc := hash.CustomSHA256Hasher()
for i := uint64(0); ; i++ {
@@ -860,7 +859,7 @@ func computeProposerIndexWithValidators(validators []*ethpb.Validator, activeInd
if v != nil {
effectiveBal = v.EffectiveBalance
}
if effectiveBal*maxRandomByte >= params.BeaconConfig().MaxEffectiveBalance*uint64(randomByte) {
if effectiveBal*fieldparams.MaxRandomByte >= params.BeaconConfig().MaxEffectiveBalance*uint64(randomByte) {
return candidateIndex, nil
}
}

View File

@@ -24,6 +24,8 @@ import (
"google.golang.org/protobuf/proto"
)
const ErrNotEnoughSyncCommitteeBits = "sync committee bits count is less than required"
func NewLightClientFinalityUpdateFromBeaconState(
ctx context.Context,
currentSlot primitives.Slot,
@@ -84,7 +86,12 @@ func NewLightClientUpdateFromBeaconState(
return nil, errors.Wrap(err, "could not get sync aggregate")
}
if syncAggregate.SyncCommitteeBits.Count() < params.BeaconConfig().MinSyncCommitteeParticipants {
return nil, fmt.Errorf("invalid sync committee bits count %d", syncAggregate.SyncCommitteeBits.Count())
return nil, fmt.Errorf(
"%s (got %d, need %d)",
ErrNotEnoughSyncCommitteeBits,
syncAggregate.SyncCommitteeBits.Count(),
params.BeaconConfig().MinSyncCommitteeParticipants,
)
}
// assert state.slot == state.latest_block_header.slot

View File

@@ -217,6 +217,7 @@ func TestSlashValidator_OK(t *testing.T) {
}
func TestSlashValidator_Electra(t *testing.T) {
helpers.ClearCache()
validatorCount := 100
registry := make([]*ethpb.Validator, 0, validatorCount)
balances := make([]uint64, 0, validatorCount)

View File

@@ -2,8 +2,10 @@ package kv
import (
"context"
"crypto/rand"
"encoding/binary"
"math/rand"
mathRand "math/rand"
"strconv"
"testing"
"time"
@@ -878,16 +880,16 @@ func validators(limit int) []*ethpb.Validator {
var vals []*ethpb.Validator
for i := 0; i < limit; i++ {
pubKey := make([]byte, params.BeaconConfig().BLSPubkeyLength)
binary.LittleEndian.PutUint64(pubKey, rand.Uint64())
binary.LittleEndian.PutUint64(pubKey, mathRand.Uint64())
val := &ethpb.Validator{
PublicKey: pubKey,
WithdrawalCredentials: bytesutil.ToBytes(rand.Uint64(), 32),
EffectiveBalance: rand.Uint64(),
WithdrawalCredentials: bytesutil.ToBytes(mathRand.Uint64(), 32),
EffectiveBalance: mathRand.Uint64(),
Slashed: i%2 != 0,
ActivationEligibilityEpoch: primitives.Epoch(rand.Uint64()),
ActivationEpoch: primitives.Epoch(rand.Uint64()),
ExitEpoch: primitives.Epoch(rand.Uint64()),
WithdrawableEpoch: primitives.Epoch(rand.Uint64()),
ActivationEligibilityEpoch: primitives.Epoch(mathRand.Uint64()),
ActivationEpoch: primitives.Epoch(mathRand.Uint64()),
ExitEpoch: primitives.Epoch(mathRand.Uint64()),
WithdrawableEpoch: primitives.Epoch(mathRand.Uint64()),
}
vals = append(vals, val)
}
@@ -913,8 +915,8 @@ func checkStateSaveTime(b *testing.B, saveCount int) {
allValidators := append(initialSetOfValidators, validatosToAddInTest...)
// shuffle validators.
rand.Seed(time.Now().UnixNano())
rand.Shuffle(len(allValidators), func(i, j int) { allValidators[i], allValidators[j] = allValidators[j], allValidators[i] })
mathRand.New(mathRand.NewSource(time.Now().UnixNano()))
mathRand.Shuffle(len(allValidators), func(i, j int) { allValidators[i], allValidators[j] = allValidators[j], allValidators[i] })
require.NoError(b, st.SetValidators(allValidators))
require.NoError(b, db.SaveState(context.Background(), st, bytesutil.ToBytes32(key)))
@@ -959,8 +961,8 @@ func checkStateReadTime(b *testing.B, saveCount int) {
allValidators := append(initialSetOfValidators, validatosToAddInTest...)
// shuffle validators.
rand.Seed(time.Now().UnixNano())
rand.Shuffle(len(allValidators), func(i, j int) { allValidators[i], allValidators[j] = allValidators[j], allValidators[i] })
mathRand.New(mathRand.NewSource(time.Now().UnixNano()))
mathRand.Shuffle(len(allValidators), func(i, j int) { allValidators[i], allValidators[j] = allValidators[j], allValidators[i] })
require.NoError(b, st.SetValidators(allValidators))
require.NoError(b, db.SaveState(context.Background(), st, bytesutil.ToBytes32(key)))

View File

@@ -136,30 +136,18 @@ func (s *Service) NewPayload(ctx context.Context, payload interfaces.ExecutionDa
defer cancel()
result := &pb.PayloadStatus{}
switch payload.Proto().(type) {
switch payloadPb := payload.Proto().(type) {
case *pb.ExecutionPayload:
payloadPb, ok := payload.Proto().(*pb.ExecutionPayload)
if !ok {
return nil, errors.New("execution data must be a Bellatrix or Capella execution payload")
}
err := s.rpcClient.CallContext(ctx, result, NewPayloadMethod, payloadPb)
if err != nil {
return nil, handleRPCError(err)
}
case *pb.ExecutionPayloadCapella:
payloadPb, ok := payload.Proto().(*pb.ExecutionPayloadCapella)
if !ok {
return nil, errors.New("execution data must be a Capella execution payload")
}
err := s.rpcClient.CallContext(ctx, result, NewPayloadMethodV2, payloadPb)
if err != nil {
return nil, handleRPCError(err)
}
case *pb.ExecutionPayloadDeneb:
payloadPb, ok := payload.Proto().(*pb.ExecutionPayloadDeneb)
if !ok {
return nil, errors.New("execution data must be a Deneb execution payload")
}
if executionRequests == nil {
err := s.rpcClient.CallContext(ctx, result, NewPayloadMethodV3, payloadPb, versionedHashes, parentBlockRoot)
if err != nil {

View File

@@ -93,6 +93,7 @@ type BeaconNode struct {
stop chan struct{} // Channel to wait for termination notifications.
db db.Database
slasherDB db.SlasherDatabase
attestationCache *cache.AttestationCache
attestationPool attestations.Pool
exitPool voluntaryexits.PoolManager
slashingsPool slashings.PoolManager
@@ -144,6 +145,7 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
stateFeed: new(event.Feed),
blockFeed: new(event.Feed),
opFeed: new(event.Feed),
attestationCache: cache.NewAttestationCache(),
attestationPool: attestations.NewPool(),
exitPool: voluntaryexits.NewPool(),
slashingsPool: slashings.NewPool(),
@@ -704,6 +706,7 @@ func (b *BeaconNode) fetchBuilderService() *builder.Service {
func (b *BeaconNode) registerAttestationPool() error {
s, err := attestations.NewService(b.ctx, &attestations.Config{
Cache: b.attestationCache,
Pool: b.attestationPool,
InitialSyncComplete: b.initialSyncComplete,
})
@@ -732,6 +735,7 @@ func (b *BeaconNode) registerBlockchainService(fc forkchoice.ForkChoicer, gs *st
blockchain.WithDepositCache(b.depositCache),
blockchain.WithChainStartFetcher(web3Service),
blockchain.WithExecutionEngineCaller(web3Service),
blockchain.WithAttestationCache(b.attestationCache),
blockchain.WithAttestationPool(b.attestationPool),
blockchain.WithExitPool(b.exitPool),
blockchain.WithSlashingPool(b.slashingsPool),
@@ -816,6 +820,7 @@ func (b *BeaconNode) registerSyncService(initialSyncComplete chan struct{}, bFil
regularsync.WithBlockNotifier(b),
regularsync.WithAttestationNotifier(b),
regularsync.WithOperationNotifier(b),
regularsync.WithAttestationCache(b.attestationCache),
regularsync.WithAttestationPool(b.attestationPool),
regularsync.WithExitPool(b.exitPool),
regularsync.WithSlashingPool(b.slashingsPool),
@@ -952,6 +957,7 @@ func (b *BeaconNode) registerRPCService(router *http.ServeMux) error {
GenesisTimeFetcher: chainService,
GenesisFetcher: chainService,
OptimisticModeFetcher: chainService,
AttestationCache: b.attestationCache,
AttestationsPool: b.attestationPool,
ExitPool: b.exitPool,
SlashingsPool: b.slashingsPool,

View File

@@ -16,6 +16,7 @@ go_library(
"//testing/spectest:__subpackages__",
],
deps = [
"//beacon-chain/cache:go_default_library",
"//beacon-chain/operations/attestations/kv:go_default_library",
"//cache/lru:go_default_library",
"//config/features:go_default_library",

View File

@@ -0,0 +1,13 @@
load("@prysm//tools/go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["map.go"],
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/attestations/attmap",
visibility = ["//visibility:public"],
deps = [
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/attestation:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)

View File

@@ -0,0 +1,89 @@
package attmap
import (
"sync"
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
)
// Attestations --
type Attestations struct {
atts map[attestation.Id]ethpb.Att
sync.RWMutex
}
// New creates a new instance of the map.
func New() *Attestations {
return &Attestations{atts: make(map[attestation.Id]ethpb.Att)}
}
// Save stores an attestation in the map.
func (a *Attestations) Save(att ethpb.Att) error {
if att == nil || att.IsNil() {
return nil
}
id, err := attestation.NewId(att, attestation.Full)
if err != nil {
return errors.Wrap(err, "could not create attestation ID")
}
a.Lock()
defer a.Unlock()
a.atts[id] = att
return nil
}
// SaveMany stores multiple attestation in the map.
func (a *Attestations) SaveMany(atts []ethpb.Att) error {
for _, att := range atts {
if err := a.Save(att); err != nil {
return err
}
}
return nil
}
// GetAll retrieves all attestations that are in the map.
func (a *Attestations) GetAll() []ethpb.Att {
a.RLock()
defer a.RUnlock()
atts := make([]ethpb.Att, len(a.atts))
i := 0
for _, att := range a.atts {
atts[i] = att.Clone()
i++
}
return atts
}
// Delete removes an attestation from the map.
func (a *Attestations) Delete(att ethpb.Att) error {
if att == nil || att.IsNil() {
return nil
}
id, err := attestation.NewId(att, attestation.Full)
if err != nil {
return errors.Wrap(err, "could not create attestation ID")
}
a.Lock()
defer a.Unlock()
delete(a.atts, id)
return nil
}
// Count returns the number of attestations in the map.
func (a *Attestations) Count() int {
a.RLock()
defer a.RUnlock()
return len(a.atts)
}

View File

@@ -5,7 +5,6 @@ go_library(
srcs = [
"aggregated.go",
"block.go",
"forkchoice.go",
"kv.go",
"seen_bits.go",
"unaggregated.go",
@@ -14,6 +13,7 @@ go_library(
visibility = ["//beacon-chain:__subpackages__"],
deps = [
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/operations/attestations/attmap:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//monitoring/tracing/trace:go_default_library",

View File

@@ -88,7 +88,7 @@ func (c *AttCaches) aggregateParallel(atts map[attestation.Id][]ethpb.Att, leftO
log.Error("nil aggregated attestation")
continue
}
if helpers.IsAggregated(aggregated) {
if aggregated.IsAggregated() {
if err := c.SaveAggregatedAttestations([]ethpb.Att{aggregated}); err != nil {
log.WithError(err).Error("could not save aggregated attestation")
continue
@@ -122,7 +122,7 @@ func (c *AttCaches) SaveAggregatedAttestation(att ethpb.Att) error {
if err := helpers.ValidateNilAttestation(att); err != nil {
return err
}
if !helpers.IsAggregated(att) {
if !att.IsAggregated() {
return errors.New("attestation is not aggregated")
}
has, err := c.HasAggregatedAttestation(att)
@@ -255,7 +255,7 @@ func (c *AttCaches) DeleteAggregatedAttestation(att ethpb.Att) error {
if err := helpers.ValidateNilAttestation(att); err != nil {
return err
}
if !helpers.IsAggregated(att) {
if !att.IsAggregated() {
return errors.New("attestation is not aggregated")
}

View File

@@ -1,74 +0,0 @@
package kv
import (
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
)
// SaveForkchoiceAttestation saves an forkchoice attestation in cache.
func (c *AttCaches) SaveForkchoiceAttestation(att ethpb.Att) error {
if att == nil || att.IsNil() {
return nil
}
id, err := attestation.NewId(att, attestation.Full)
if err != nil {
return errors.Wrap(err, "could not create attestation ID")
}
c.forkchoiceAttLock.Lock()
defer c.forkchoiceAttLock.Unlock()
c.forkchoiceAtt[id] = att
return nil
}
// SaveForkchoiceAttestations saves a list of forkchoice attestations in cache.
func (c *AttCaches) SaveForkchoiceAttestations(atts []ethpb.Att) error {
for _, att := range atts {
if err := c.SaveForkchoiceAttestation(att); err != nil {
return err
}
}
return nil
}
// ForkchoiceAttestations returns the forkchoice attestations in cache.
func (c *AttCaches) ForkchoiceAttestations() []ethpb.Att {
c.forkchoiceAttLock.RLock()
defer c.forkchoiceAttLock.RUnlock()
atts := make([]ethpb.Att, 0, len(c.forkchoiceAtt))
for _, att := range c.forkchoiceAtt {
atts = append(atts, att.Clone())
}
return atts
}
// DeleteForkchoiceAttestation deletes a forkchoice attestation in cache.
func (c *AttCaches) DeleteForkchoiceAttestation(att ethpb.Att) error {
if att == nil || att.IsNil() {
return nil
}
id, err := attestation.NewId(att, attestation.Full)
if err != nil {
return errors.Wrap(err, "could not create attestation ID")
}
c.forkchoiceAttLock.Lock()
defer c.forkchoiceAttLock.Unlock()
delete(c.forkchoiceAtt, id)
return nil
}
// ForkchoiceAttestationCount returns the number of fork choice attestations key in the pool.
func (c *AttCaches) ForkchoiceAttestationCount() int {
c.forkchoiceAttLock.RLock()
defer c.forkchoiceAttLock.RUnlock()
return len(c.forkchoiceAtt)
}

View File

@@ -20,7 +20,7 @@ func TestKV_Forkchoice_CanSaveRetrieve(t *testing.T) {
atts := []ethpb.Att{att1, att2, att3}
for _, att := range atts {
require.NoError(t, cache.SaveForkchoiceAttestation(att))
require.NoError(t, cache.saveForkchoiceAttestation(att))
}
returned := cache.ForkchoiceAttestations()
@@ -41,7 +41,7 @@ func TestKV_Forkchoice_CanDelete(t *testing.T) {
atts := []ethpb.Att{att1, att2, att3}
for _, att := range atts {
require.NoError(t, cache.SaveForkchoiceAttestation(att))
require.NoError(t, cache.saveForkchoiceAttestation(att))
}
require.NoError(t, cache.DeleteForkchoiceAttestation(att1))
@@ -61,7 +61,7 @@ func TestKV_Forkchoice_CanCount(t *testing.T) {
atts := []*ethpb.Attestation{att1, att2, att3}
for _, att := range atts {
require.NoError(t, cache.SaveForkchoiceAttestation(att))
require.NoError(t, cache.saveForkchoiceAttestation(att))
}
require.Equal(t, 3, cache.ForkchoiceAttestationCount())

View File

@@ -8,6 +8,7 @@ import (
"time"
"github.com/patrickmn/go-cache"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/attestations/attmap"
"github.com/prysmaticlabs/prysm/v5/config/params"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
@@ -21,8 +22,7 @@ type AttCaches struct {
aggregatedAtt map[attestation.Id][]ethpb.Att
unAggregateAttLock sync.RWMutex
unAggregatedAtt map[attestation.Id]ethpb.Att
forkchoiceAttLock sync.RWMutex
forkchoiceAtt map[attestation.Id]ethpb.Att
forkchoiceAtt *attmap.Attestations
blockAttLock sync.RWMutex
blockAtt map[attestation.Id][]ethpb.Att
seenAtt *cache.Cache
@@ -36,10 +36,35 @@ func NewAttCaches() *AttCaches {
pool := &AttCaches{
unAggregatedAtt: make(map[attestation.Id]ethpb.Att),
aggregatedAtt: make(map[attestation.Id][]ethpb.Att),
forkchoiceAtt: make(map[attestation.Id]ethpb.Att),
forkchoiceAtt: attmap.New(),
blockAtt: make(map[attestation.Id][]ethpb.Att),
seenAtt: c,
}
return pool
}
// saveForkchoiceAttestation saves a forkchoice attestation.
func (c *AttCaches) saveForkchoiceAttestation(att ethpb.Att) error {
return c.forkchoiceAtt.Save(att)
}
// SaveForkchoiceAttestations saves forkchoice attestations.
func (c *AttCaches) SaveForkchoiceAttestations(att []ethpb.Att) error {
return c.forkchoiceAtt.SaveMany(att)
}
// ForkchoiceAttestations returns all forkchoice attestations.
func (c *AttCaches) ForkchoiceAttestations() []ethpb.Att {
return c.forkchoiceAtt.GetAll()
}
// DeleteForkchoiceAttestation deletes a forkchoice attestation.
func (c *AttCaches) DeleteForkchoiceAttestation(att ethpb.Att) error {
return c.forkchoiceAtt.Delete(att)
}
// ForkchoiceAttestationCount returns the number of forkchoice attestation keys.
func (c *AttCaches) ForkchoiceAttestationCount() int {
return c.forkchoiceAtt.Count()
}

View File

@@ -4,7 +4,6 @@ import (
"context"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
@@ -17,7 +16,7 @@ func (c *AttCaches) SaveUnaggregatedAttestation(att ethpb.Att) error {
if att == nil || att.IsNil() {
return nil
}
if helpers.IsAggregated(att) {
if att.IsAggregated() {
return errors.New("attestation is aggregated")
}
@@ -133,8 +132,7 @@ func (c *AttCaches) DeleteUnaggregatedAttestation(att ethpb.Att) error {
if att == nil || att.IsNil() {
return nil
}
if helpers.IsAggregated(att) {
if att.IsAggregated() {
return errors.New("attestation is aggregated")
}
@@ -162,7 +160,7 @@ func (c *AttCaches) DeleteSeenUnaggregatedAttestations() (int, error) {
count := 0
for r, att := range c.unAggregatedAtt {
if att == nil || att.IsNil() || helpers.IsAggregated(att) {
if att == nil || att.IsNil() || att.IsAggregated() {
continue
}
if seen, err := c.hasSeenBit(att); err == nil && seen {

View File

@@ -30,6 +30,16 @@ var (
Name: "expired_block_atts_total",
Help: "The number of expired and deleted block attestations in the pool.",
})
attCount = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "attestations_in_pool_total",
Help: "The number of attestations in the pool.",
},
)
expiredAtts = promauto.NewCounter(prometheus.CounterOpts{
Name: "expired_atts_total",
Help: "The number of expired and deleted attestations in the pool.",
})
batchForkChoiceAttsT1 = promauto.NewHistogram(
prometheus.HistogramOpts{
Name: "aggregate_attestations_t1",
@@ -50,3 +60,8 @@ func (s *Service) updateMetrics() {
aggregatedAttsCount.Set(float64(s.cfg.Pool.AggregatedAttestationCount()))
unaggregatedAttsCount.Set(float64(s.cfg.Pool.UnaggregatedAttestationCount()))
}
func (s *Service) updateMetricsExperimental(numExpired uint64) {
attCount.Set(float64(s.cfg.Cache.Count()))
expiredAtts.Add(float64(numExpired))
}

View File

@@ -37,7 +37,6 @@ type Pool interface {
BlockAttestations() []ethpb.Att
DeleteBlockAttestation(att ethpb.Att) error
// For attestations to be passed to fork choice.
SaveForkchoiceAttestation(att ethpb.Att) error
SaveForkchoiceAttestations(atts []ethpb.Att) error
ForkchoiceAttestations() []ethpb.Att
DeleteForkchoiceAttestation(att ethpb.Att) error

View File

@@ -61,11 +61,16 @@ func (s *Service) batchForkChoiceAtts(ctx context.Context) error {
ctx, span := trace.StartSpan(ctx, "Operations.attestations.batchForkChoiceAtts")
defer span.End()
if err := s.cfg.Pool.AggregateUnaggregatedAttestations(ctx); err != nil {
return err
var atts []ethpb.Att
if features.Get().EnableExperimentalAttestationPool {
atts = append(s.cfg.Cache.GetAll(), s.cfg.Cache.ForkchoiceAttestations()...)
} else {
if err := s.cfg.Pool.AggregateUnaggregatedAttestations(ctx); err != nil {
return err
}
atts = append(s.cfg.Pool.AggregatedAttestations(), s.cfg.Pool.BlockAttestations()...)
atts = append(atts, s.cfg.Pool.ForkchoiceAttestations()...)
}
atts := append(s.cfg.Pool.AggregatedAttestations(), s.cfg.Pool.BlockAttestations()...)
atts = append(atts, s.cfg.Pool.ForkchoiceAttestations()...)
attsById := make(map[attestation.Id][]ethpb.Att, len(atts))
@@ -92,9 +97,11 @@ func (s *Service) batchForkChoiceAtts(ctx context.Context) error {
}
}
for _, a := range s.cfg.Pool.BlockAttestations() {
if err := s.cfg.Pool.DeleteBlockAttestation(a); err != nil {
return err
if !features.Get().EnableExperimentalAttestationPool {
for _, a := range s.cfg.Pool.BlockAttestations() {
if err := s.cfg.Pool.DeleteBlockAttestation(a); err != nil {
return err
}
}
}

View File

@@ -9,8 +9,8 @@ import (
"github.com/prysmaticlabs/prysm/v5/time/slots"
)
// pruneAttsPool prunes attestations pool on every slot interval.
func (s *Service) pruneAttsPool() {
// pruneExpired prunes attestations pool on every slot interval.
func (s *Service) pruneExpired() {
ticker := time.NewTicker(s.cfg.pruneInterval)
defer ticker.Stop()
for {
@@ -25,6 +25,27 @@ func (s *Service) pruneAttsPool() {
}
}
// pruneExpiredExperimental prunes attestations on every prune interval.
func (s *Service) pruneExpiredExperimental() {
ticker := time.NewTicker(s.cfg.pruneInterval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
expirySlot, err := s.expirySlot()
if err != nil {
log.WithError(err).Error("Could not get expiry slot")
continue
}
numExpired := s.cfg.Cache.PruneBefore(expirySlot)
s.updateMetricsExperimental(numExpired)
case <-s.ctx.Done():
log.Debug("Context closed, exiting routine")
return
}
}
}
// This prunes expired attestations from the pool.
func (s *Service) pruneExpiredAtts() {
aggregatedAtts := s.cfg.Pool.AggregatedAttestations()
@@ -84,3 +105,17 @@ func (s *Service) expiredPreDeneb(slot primitives.Slot) bool {
currentTime := uint64(prysmTime.Now().Unix())
return currentTime >= expirationTime
}
// Attestations for a slot before the returned slot are considered expired.
func (s *Service) expirySlot() (primitives.Slot, error) {
currSlot := slots.CurrentSlot(s.genesisTime)
currEpoch := slots.ToEpoch(currSlot)
if currEpoch == 0 {
return 0, nil
}
if currEpoch < params.BeaconConfig().DenebForkEpoch {
// Safe to subtract because we exited early for epoch 0.
return currSlot - 31, nil
}
return slots.EpochStart(currEpoch - 1)
}

View File

@@ -50,7 +50,7 @@ func TestPruneExpired_Ticker(t *testing.T) {
// Rewind back one epoch worth of time.
s.genesisTime = uint64(prysmTime.Now().Unix()) - uint64(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
go s.pruneAttsPool()
go s.pruneExpired()
done := make(chan struct{}, 1)
async.RunEvery(ctx, 500*time.Millisecond, func() {
@@ -145,5 +145,4 @@ func TestPruneExpired_ExpiredDeneb(t *testing.T) {
assert.Equal(t, true, s.expired(secondEpochStart), "Should be expired")
assert.Equal(t, false, s.expired(thirdEpochStart), "Should not be expired")
}

View File

@@ -9,7 +9,9 @@ import (
"time"
lru "github.com/hashicorp/golang-lru"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
lruwrpr "github.com/prysmaticlabs/prysm/v5/cache/lru"
"github.com/prysmaticlabs/prysm/v5/config/features"
"github.com/prysmaticlabs/prysm/v5/config/params"
)
@@ -27,12 +29,13 @@ type Service struct {
// Config options for the service.
type Config struct {
Cache *cache.AttestationCache
Pool Pool
pruneInterval time.Duration
InitialSyncComplete chan struct{}
}
// NewService instantiates a new attestation pool service instance that will
// NewService instantiates a new attestation service instance that will
// be registered into a running beacon node.
func NewService(ctx context.Context, cfg *Config) (*Service, error) {
cache := lruwrpr.New(forkChoiceProcessedAttsSize)
@@ -58,7 +61,12 @@ func (s *Service) Start() {
return
}
go s.prepareForkChoiceAtts()
go s.pruneAttsPool()
if features.Get().EnableExperimentalAttestationPool {
go s.pruneExpiredExperimental()
} else {
go s.pruneExpired()
}
}
// waitForSync waits until the beacon node is synced to the latest head.

View File

@@ -349,7 +349,7 @@ func (s *Service) listenForNewNodes() {
wg.Add(1)
go func(info *peer.AddrInfo) {
if err := s.connectWithPeer(s.ctx, *info); err != nil {
log.WithError(err).WithField("peerID", info.ID).Debug("Could not connect with peer")
log.WithError(err).Tracef("Could not connect with peer %s", info.String())
}
wg.Done()
}(peerInfo)

View File

@@ -3,8 +3,9 @@ package p2p
import (
"context"
"crypto/ecdsa"
"crypto/rand"
"fmt"
"math/rand"
mathRand "math/rand"
"net"
"os"
"path"
@@ -48,7 +49,7 @@ func createAddrAndPrivKey(t *testing.T) (net.IP, *ecdsa.PrivateKey) {
require.NoError(t, err, "Could not get ip")
ipAddr := net.ParseIP(ip)
temp := t.TempDir()
randNum := rand.Int()
randNum := mathRand.Int()
tempPath := path.Join(temp, strconv.Itoa(randNum))
require.NoError(t, os.Mkdir(tempPath, 0700))
pkey, err := privKey(&Config{DataDir: tempPath})

View File

@@ -47,7 +47,7 @@ func GossipTopicMappings(topic string, epoch primitives.Epoch) proto.Message {
return gossipMessage(topic)
case AttestationSubnetTopicFormat:
if epoch >= params.BeaconConfig().ElectraForkEpoch {
return &ethpb.AttestationElectra{}
return &ethpb.SingleAttestation{}
}
return gossipMessage(topic)
case AttesterSlashingSubnetTopicFormat:
@@ -101,7 +101,7 @@ func init() {
GossipTypeMapping[reflect.TypeOf(&ethpb.SignedBeaconBlockDeneb{})] = BlockSubnetTopicFormat
// Specially handle Electra objects.
GossipTypeMapping[reflect.TypeOf(&ethpb.SignedBeaconBlockElectra{})] = BlockSubnetTopicFormat
GossipTypeMapping[reflect.TypeOf(&ethpb.AttestationElectra{})] = AttestationSubnetTopicFormat
GossipTypeMapping[reflect.TypeOf(&ethpb.SingleAttestation{})] = AttestationSubnetTopicFormat
GossipTypeMapping[reflect.TypeOf(&ethpb.AttesterSlashingElectra{})] = AttesterSlashingSubnetTopicFormat
GossipTypeMapping[reflect.TypeOf(&ethpb.SignedAggregateAttestationAndProofElectra{})] = AggregateAndProofSubnetTopicFormat
}

View File

@@ -118,7 +118,7 @@ func TestGossipTopicMappings_CorrectType(t *testing.T) {
_, ok = pMessage.(*ethpb.SignedBeaconBlockElectra)
assert.Equal(t, true, ok)
pMessage = GossipTopicMappings(AttestationSubnetTopicFormat, electraForkEpoch)
_, ok = pMessage.(*ethpb.AttestationElectra)
_, ok = pMessage.(*ethpb.SingleAttestation)
assert.Equal(t, true, ok)
pMessage = GossipTopicMappings(AttesterSlashingSubnetTopicFormat, electraForkEpoch)
_, ok = pMessage.(*ethpb.AttesterSlashingElectra)

View File

@@ -214,11 +214,7 @@ func (s *Service) AddDisconnectionHandler(handler func(ctx context.Context, id p
// Only log disconnections if we were fully connected.
if priorState == peers.Connected {
activePeersCount := len(s.peers.Active())
log.WithFields(logrus.Fields{
"remainingActivePeers": activePeersCount,
"direction": conn.Stat().Direction.String(),
"peerID": peerID,
}).Debug("Peer disconnected")
log.WithField("remainingActivePeers", activePeersCount).Debug("Peer disconnected")
}
}()
},

View File

@@ -101,22 +101,18 @@ func (s *BadResponsesScorer) countNoLock(pid peer.ID) (int, error) {
// Increment increments the number of bad responses we have received from the given remote peer.
// If peer doesn't exist this method is no-op.
func (s *BadResponsesScorer) Increment(pid peer.ID) int {
const defaultBadResponses = 1
func (s *BadResponsesScorer) Increment(pid peer.ID) {
s.store.Lock()
defer s.store.Unlock()
peerData, ok := s.store.PeerData(pid)
if !ok {
s.store.SetPeerData(pid, &peerdata.PeerData{
BadResponses: defaultBadResponses,
BadResponses: 1,
})
return defaultBadResponses
return
}
peerData.BadResponses++
return peerData.BadResponses
}
// IsBadPeer states if the peer is to be considered bad.

View File

@@ -73,6 +73,9 @@ const (
RPCBlocksByRootTopicV2 = protocolPrefix + BeaconBlocksByRootsMessageName + SchemaVersionV2
// RPCMetaDataTopicV2 defines the v2 topic for the metadata rpc method.
RPCMetaDataTopicV2 = protocolPrefix + MetadataMessageName + SchemaVersionV2
RPCBlobSidecarsByRangeTopicV2 = protocolPrefix + BlobSidecarsByRangeName + SchemaVersionV2
RPCBlobSidecarsByRootTopicV2 = protocolPrefix + BlobSidecarsByRootName + SchemaVersionV2
)
// RPC errors for topic parsing.

View File

@@ -443,7 +443,7 @@ func (s *Service) connectWithAllTrustedPeers(multiAddrs []multiaddr.Multiaddr) {
// make each dial non-blocking
go func(info peer.AddrInfo) {
if err := s.connectWithPeer(s.ctx, info); err != nil {
log.WithError(err).WithField("peerID", info.ID).Debug("Could not connect with trusted peer")
log.WithError(err).Tracef("Could not connect with peer %s", info.String())
}
}(info)
}
@@ -459,7 +459,7 @@ func (s *Service) connectWithAllPeers(multiAddrs []multiaddr.Multiaddr) {
// make each dial non-blocking
go func(info peer.AddrInfo) {
if err := s.connectWithPeer(s.ctx, info); err != nil {
log.WithError(err).WithField("peerID", info.ID).Debug("Could not connect with peer")
log.WithError(err).Tracef("Could not connect with peer %s", info.String())
}
}(info)
}
@@ -478,8 +478,8 @@ func (s *Service) connectWithPeer(ctx context.Context, info peer.AddrInfo) error
ctx, cancel := context.WithTimeout(ctx, maxDialTimeout)
defer cancel()
if err := s.host.Connect(ctx, info); err != nil {
score := s.Peers().Scorers().BadResponsesScorer().Increment(info.ID)
return errors.Wrapf(err, "connect to peer %s - new bad responses score: %d", info.ID, score)
s.Peers().Scorers().BadResponsesScorer().Increment(info.ID)
return err
}
return nil
}

View File

@@ -113,7 +113,7 @@ func (s *Service) dialPeer(ctx context.Context, wg *sync.WaitGroup, node *enode.
wg.Add(1)
go func() {
if err := s.connectWithPeer(ctx, *info); err != nil {
log.WithError(err).WithField("peerID", info.ID).Debug("Could not connect with peer")
log.WithError(err).Tracef("Could not connect with peer %s", info.String())
}
wg.Done()

View File

@@ -112,7 +112,7 @@ func InitializeDataMaps() {
return &ethpb.Attestation{}, nil
},
bytesutil.ToBytes4(params.BeaconConfig().ElectraForkVersion): func() (ethpb.Att, error) {
return &ethpb.AttestationElectra{}, nil
return &ethpb.SingleAttestation{}, nil
},
}

View File

@@ -21,7 +21,7 @@ type Service struct {
Broadcaster p2p.Broadcaster
SyncCommitteePool synccommittee.Pool
OperationNotifier opfeed.Notifier
AttestationCache *cache.AttestationCache
AttestationCache *cache.AttestationDataCache
StateGen stategen.StateManager
P2P p2p.Broadcaster
ReplayerBuilder stategen.ReplayerBuilder

View File

@@ -205,6 +205,7 @@ func (s *Service) validatorEndpoints(
TimeFetcher: s.cfg.GenesisTimeFetcher,
SyncChecker: s.cfg.SyncService,
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
AttestationCache: s.cfg.AttestationCache,
AttestationsPool: s.cfg.AttestationsPool,
PeerManager: s.cfg.PeerManager,
Broadcaster: s.cfg.Broadcaster,
@@ -508,6 +509,7 @@ func (s *Service) beaconEndpoints(
server := &beacon.Server{
CanonicalHistory: ch,
BeaconDB: s.cfg.BeaconDB,
AttestationCache: s.cfg.AttestationCache,
AttestationsPool: s.cfg.AttestationsPool,
SlashingsPool: s.cfg.SlashingsPool,
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
@@ -530,6 +532,7 @@ func (s *Service) beaconEndpoints(
FinalizationFetcher: s.cfg.FinalizationFetcher,
ForkchoiceFetcher: s.cfg.ForkchoiceFetcher,
CoreService: coreService,
AttestationStateFetcher: s.cfg.AttestationReceiver,
}
const namespace = "beacon"

View File

@@ -17,6 +17,7 @@ go_library(
"//api/server:go_default_library",
"//api/server/structs:go_default_library",
"//beacon-chain/blockchain:go_default_library",
"//beacon-chain/cache:go_default_library",
"//beacon-chain/cache/depositsnapshot:go_default_library",
"//beacon-chain/core/altair:go_default_library",
"//beacon-chain/core/blocks:go_default_library",

View File

@@ -49,13 +49,18 @@ func (s *Server) ListAttestations(w http.ResponseWriter, r *http.Request) {
return
}
attestations := s.AttestationsPool.AggregatedAttestations()
unaggAtts, err := s.AttestationsPool.UnaggregatedAttestations()
if err != nil {
httputil.HandleError(w, "Could not get unaggregated attestations: "+err.Error(), http.StatusInternalServerError)
return
var attestations []eth.Att
if features.Get().EnableExperimentalAttestationPool {
attestations = s.AttestationCache.GetAll()
} else {
attestations = s.AttestationsPool.AggregatedAttestations()
unaggAtts, err := s.AttestationsPool.UnaggregatedAttestations()
if err != nil {
httputil.HandleError(w, "Could not get unaggregated attestations: "+err.Error(), http.StatusInternalServerError)
return
}
attestations = append(attestations, unaggAtts...)
}
attestations = append(attestations, unaggAtts...)
filteredAtts := make([]*structs.Attestation, 0, len(attestations))
for _, a := range attestations {
@@ -102,13 +107,19 @@ func (s *Server) ListAttestationsV2(w http.ResponseWriter, r *http.Request) {
if rawSlot == "" {
v = slots.ToForkVersion(s.TimeFetcher.CurrentSlot())
}
attestations := s.AttestationsPool.AggregatedAttestations()
unaggAtts, err := s.AttestationsPool.UnaggregatedAttestations()
if err != nil {
httputil.HandleError(w, "Could not get unaggregated attestations: "+err.Error(), http.StatusInternalServerError)
return
var attestations []eth.Att
if features.Get().EnableExperimentalAttestationPool {
attestations = s.AttestationCache.GetAll()
} else {
attestations = s.AttestationsPool.AggregatedAttestations()
unaggAtts, err := s.AttestationsPool.UnaggregatedAttestations()
if err != nil {
httputil.HandleError(w, "Could not get unaggregated attestations: "+err.Error(), http.StatusInternalServerError)
return
}
attestations = append(attestations, unaggAtts...)
}
attestations = append(attestations, unaggAtts...)
filteredAtts := make([]interface{}, 0, len(attestations))
for _, att := range attestations {
@@ -274,8 +285,11 @@ func (s *Server) SubmitAttestationsV2(w http.ResponseWriter, r *http.Request) {
}
}
func (s *Server) handleAttestationsElectra(ctx context.Context, data json.RawMessage) (attFailures []*server.IndexedVerificationFailure, failedBroadcasts []string, err error) {
var sourceAttestations []*structs.AttestationElectra
func (s *Server) handleAttestationsElectra(
ctx context.Context,
data json.RawMessage,
) (attFailures []*server.IndexedVerificationFailure, failedBroadcasts []string, err error) {
var sourceAttestations []*structs.SingleAttestation
if err = json.Unmarshal(data, &sourceAttestations); err != nil {
return nil, nil, errors.Wrap(err, "failed to unmarshal attestation")
@@ -285,7 +299,7 @@ func (s *Server) handleAttestationsElectra(ctx context.Context, data json.RawMes
return nil, nil, errors.New("no data submitted")
}
var validAttestations []*eth.AttestationElectra
var validAttestations []*eth.SingleAttestation
for i, sourceAtt := range sourceAttestations {
att, err := sourceAtt.ToConsensus()
if err != nil {
@@ -306,14 +320,23 @@ func (s *Server) handleAttestationsElectra(ctx context.Context, data json.RawMes
}
for i, att := range validAttestations {
targetState, err := s.AttestationStateFetcher.AttestationTargetState(ctx, att.Data.Target)
if err != nil {
return nil, nil, errors.Wrap(err, "could not get target state for attestation")
}
committee, err := corehelpers.BeaconCommitteeFromState(ctx, targetState, att.Data.Slot, att.CommitteeId)
if err != nil {
return nil, nil, errors.Wrap(err, "could not get committee for attestation")
}
// Broadcast the unaggregated attestation on a feed to notify other services in the beacon node
// of a received unaggregated attestation.
// Note we can't send for aggregated att because we don't have selection proof.
if !corehelpers.IsAggregated(att) {
if !att.IsAggregated() {
s.OperationNotifier.OperationFeed().Send(&feed.Event{
Type: operation.UnaggregatedAttReceived,
Data: &operation.UnAggregatedAttReceivedData{
Attestation: att,
Attestation: att.ToAttestationElectra(committee),
},
})
}
@@ -324,24 +347,20 @@ func (s *Server) handleAttestationsElectra(ctx context.Context, data json.RawMes
failedBroadcasts = append(failedBroadcasts, strconv.Itoa(i))
continue
}
committeeIndex, err := att.GetCommitteeIndex()
if err != nil {
return nil, nil, errors.Wrap(err, "failed to retrieve attestation committee index")
}
subnet := corehelpers.ComputeSubnetFromCommitteeAndSlot(uint64(len(vals)), committeeIndex, att.Data.Slot)
subnet := corehelpers.ComputeSubnetFromCommitteeAndSlot(uint64(len(vals)), att.GetCommitteeIndex(), att.Data.Slot)
if err = s.Broadcaster.BroadcastAttestation(ctx, subnet, att); err != nil {
log.WithError(err).Errorf("could not broadcast attestation at index %d", i)
failedBroadcasts = append(failedBroadcasts, strconv.Itoa(i))
continue
}
if corehelpers.IsAggregated(att) {
if err = s.AttestationsPool.SaveAggregatedAttestation(att); err != nil {
log.WithError(err).Error("could not save aggregated attestation")
if features.Get().EnableExperimentalAttestationPool {
if err = s.AttestationCache.Add(att.ToAttestationElectra(committee)); err != nil {
log.WithError(err).Error("could not save attestation")
}
} else {
if err = s.AttestationsPool.SaveUnaggregatedAttestation(att); err != nil {
log.WithError(err).Error("could not save unaggregated attestation")
if err = s.AttestationsPool.SaveUnaggregatedAttestation(att.ToAttestationElectra(committee)); err != nil {
log.WithError(err).Error("could not save attestation")
}
}
}
@@ -384,7 +403,7 @@ func (s *Server) handleAttestations(ctx context.Context, data json.RawMessage) (
// Broadcast the unaggregated attestation on a feed to notify other services in the beacon node
// of a received unaggregated attestation.
// Note we can't send for aggregated att because we don't have selection proof.
if !corehelpers.IsAggregated(att) {
if !att.IsAggregated() {
s.OperationNotifier.OperationFeed().Send(&feed.Event{
Type: operation.UnaggregatedAttReceived,
Data: &operation.UnAggregatedAttReceivedData{
@@ -407,7 +426,11 @@ func (s *Server) handleAttestations(ctx context.Context, data json.RawMessage) (
continue
}
if corehelpers.IsAggregated(att) {
if features.Get().EnableExperimentalAttestationPool {
if err = s.AttestationCache.Add(att); err != nil {
log.WithError(err).Error("could not save attestation")
}
} else if att.IsAggregated() {
if err = s.AttestationsPool.SaveAggregatedAttestation(att); err != nil {
log.WithError(err).Error("could not save aggregated attestation")
}

View File

@@ -498,13 +498,17 @@ func TestSubmitAttestations(t *testing.T) {
c.SlotsPerEpoch = 1
params.OverrideBeaconConfig(c)
_, keys, err := util.DeterministicDepositsAndKeys(1)
_, keys, err := util.DeterministicDepositsAndKeys(2)
require.NoError(t, err)
validators := []*ethpbv1alpha1.Validator{
{
PublicKey: keys[0].PublicKey().Marshal(),
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
},
{
PublicKey: keys[1].PublicKey().Marshal(),
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
},
}
bs, err := util.NewBeaconState(func(state *ethpbv1alpha1.BeaconState) error {
state.Validators = validators
@@ -521,9 +525,10 @@ func TestSubmitAttestations(t *testing.T) {
chainService := &blockchainmock.ChainService{State: bs}
s := &Server{
HeadFetcher: chainService,
ChainInfoFetcher: chainService,
OperationNotifier: &blockchainmock.MockOperationNotifier{},
HeadFetcher: chainService,
ChainInfoFetcher: chainService,
OperationNotifier: &blockchainmock.MockOperationNotifier{},
AttestationStateFetcher: chainService,
}
t.Run("V1", func(t *testing.T) {
t.Run("single", func(t *testing.T) {
@@ -732,7 +737,7 @@ func TestSubmitAttestations(t *testing.T) {
assert.Equal(t, http.StatusOK, writer.Code)
assert.Equal(t, true, broadcaster.BroadcastCalled.Load())
assert.Equal(t, 1, broadcaster.NumAttestations())
assert.Equal(t, "0x03", hexutil.Encode(broadcaster.BroadcastAttestations[0].GetAggregationBits()))
assert.Equal(t, primitives.ValidatorIndex(1), broadcaster.BroadcastAttestations[0].GetAttestingIndex())
assert.Equal(t, "0x8146f4397bfd8fd057ebbcd6a67327bdc7ed5fb650533edcb6377b650dea0b6da64c14ecd60846d5c0a0cd43893d6972092500f82c9d8a955e2b58c5ed3cbe885d84008ace6bd86ba9e23652f58e2ec207cec494c916063257abf285b9b15b15", hexutil.Encode(broadcaster.BroadcastAttestations[0].GetSignature()))
assert.Equal(t, primitives.Slot(0), broadcaster.BroadcastAttestations[0].GetData().Slot)
assert.Equal(t, primitives.CommitteeIndex(0), broadcaster.BroadcastAttestations[0].GetData().CommitteeIndex)
@@ -2344,8 +2349,8 @@ var (
]`
singleAttElectra = `[
{
"aggregation_bits": "0x03",
"committee_bits": "0x0100000000000000",
"committee_index": "0",
"attester_index": "1",
"signature": "0x8146f4397bfd8fd057ebbcd6a67327bdc7ed5fb650533edcb6377b650dea0b6da64c14ecd60846d5c0a0cd43893d6972092500f82c9d8a955e2b58c5ed3cbe885d84008ace6bd86ba9e23652f58e2ec207cec494c916063257abf285b9b15b15",
"data": {
"slot": "0",
@@ -2364,8 +2369,8 @@ var (
]`
multipleAttsElectra = `[
{
"aggregation_bits": "0x03",
"committee_bits": "0x0100000000000000",
"committee_index": "0",
"attester_index": "0",
"signature": "0x8146f4397bfd8fd057ebbcd6a67327bdc7ed5fb650533edcb6377b650dea0b6da64c14ecd60846d5c0a0cd43893d6972092500f82c9d8a955e2b58c5ed3cbe885d84008ace6bd86ba9e23652f58e2ec207cec494c916063257abf285b9b15b15",
"data": {
"slot": "0",
@@ -2382,8 +2387,8 @@ var (
}
},
{
"aggregation_bits": "0x03",
"committee_bits": "0x0100000000000000",
"committee_index": "0",
"attester_index": "1",
"signature": "0x8146f4397bfd8fd057ebbcd6a67327bdc7ed5fb650533edcb6377b650dea0b6da64c14ecd60846d5c0a0cd43893d6972092500f82c9d8a955e2b58c5ed3cbe885d84008ace6bd86ba9e23652f58e2ec207cec494c916063257abf285b9b15b15",
"data": {
"slot": "0",
@@ -2403,8 +2408,8 @@ var (
// signature is invalid
invalidAttElectra = `[
{
"aggregation_bits": "0x03",
"committee_bits": "0x0100000000000000",
"committee_index": "0",
"attester_index": "0",
"signature": "0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"data": {
"slot": "0",

View File

@@ -5,6 +5,7 @@ package beacon
import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
blockfeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/block"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/operation"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db"
@@ -31,6 +32,7 @@ type Server struct {
BlockNotifier blockfeed.Notifier
OperationNotifier operation.Notifier
Broadcaster p2p.Broadcaster
AttestationCache *cache.AttestationCache
AttestationsPool attestations.Pool
SlashingsPool slashings.PoolManager
VoluntaryExitsPool voluntaryexits.PoolManager
@@ -48,4 +50,5 @@ type Server struct {
BLSChangesPool blstoexec.PoolManager
ForkchoiceFetcher blockchain.ForkchoiceFetcher
CoreService *core.Service
AttestationStateFetcher blockchain.AttestationStateFetcher
}

View File

@@ -190,7 +190,7 @@ func TestGetSpec(t *testing.T) {
data, ok := resp.Data.(map[string]interface{})
require.Equal(t, true, ok)
assert.Equal(t, 159, len(data))
assert.Equal(t, 160, len(data))
for k, v := range data {
t.Run(k, func(t *testing.T) {
switch k {
@@ -533,6 +533,8 @@ func TestGetSpec(t *testing.T) {
assert.Equal(t, "6", v)
case "MAX_BLOBS_PER_BLOCK_ELECTRA":
assert.Equal(t, "9", v)
case "MAX_REQUEST_BLOB_SIDECARS_ELECTRA":
assert.Equal(t, "1152", v)
default:
t.Errorf("Incorrect key: %s", k)
}

View File

@@ -53,6 +53,7 @@ go_test(
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/state:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/payload-attribute:go_default_library",

View File

@@ -19,6 +19,7 @@ import (
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
payloadattribute "github.com/prysmaticlabs/prysm/v5/consensus-types/payload-attribute"
@@ -460,7 +461,7 @@ func TestStreamEvents_OperationsEvents(t *testing.T) {
defer testSync.cleanup()
st := tc.getState()
v := &eth.Validator{ExitEpoch: math.MaxUint64}
v := &eth.Validator{ExitEpoch: math.MaxUint64, EffectiveBalance: params.BeaconConfig().MinActivationBalance}
require.NoError(t, st.SetValidators([]*eth.Validator{v}))
currentSlot := primitives.Slot(0)
// to avoid slot processing

View File

@@ -30,6 +30,7 @@ go_library(
"//beacon-chain/rpc/lookup:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/sync:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types:go_default_library",

View File

@@ -25,6 +25,7 @@ import (
rpchelpers "github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/shared"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/config/features"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
consensus_types "github.com/prysmaticlabs/prysm/v5/consensus-types"
@@ -129,13 +130,23 @@ func (s *Server) GetAggregateAttestationV2(w http.ResponseWriter, r *http.Reques
}
func (s *Server) aggregatedAttestation(w http.ResponseWriter, slot primitives.Slot, attDataRoot []byte, index primitives.CommitteeIndex) ethpbalpha.Att {
var match []ethpbalpha.Att
var err error
match, err := matchingAtts(s.AttestationsPool.AggregatedAttestations(), slot, attDataRoot, index)
if err != nil {
httputil.HandleError(w, "Could not get matching attestations: "+err.Error(), http.StatusInternalServerError)
return nil
if features.Get().EnableExperimentalAttestationPool {
match, err = matchingAtts(s.AttestationCache.GetAll(), slot, attDataRoot, index)
if err != nil {
httputil.HandleError(w, "Could not get matching attestations: "+err.Error(), http.StatusInternalServerError)
return nil
}
} else {
match, err = matchingAtts(s.AttestationsPool.AggregatedAttestations(), slot, attDataRoot, index)
if err != nil {
httputil.HandleError(w, "Could not get matching attestations: "+err.Error(), http.StatusInternalServerError)
return nil
}
}
if len(match) > 0 {
// If there are multiple matching aggregated attestations,
// then we return the one with the most aggregation bits.
@@ -145,6 +156,11 @@ func (s *Server) aggregatedAttestation(w http.ResponseWriter, slot primitives.Sl
return match[0]
}
// No match was found and the new pool doesn't store aggregated and unaggregated attestations separately.
if features.Get().EnableExperimentalAttestationPool {
return nil
}
atts, err := s.AttestationsPool.UnaggregatedAttestations()
if err != nil {
httputil.HandleError(w, "Could not get unaggregated attestations: "+err.Error(), http.StatusInternalServerError)
@@ -189,11 +205,7 @@ func matchingAtts(atts []ethpbalpha.Att, slot primitives.Slot, attDataRoot []byt
// compare the committee index separately.
if postElectra {
if att.Version() >= version.Electra {
ci, err := att.GetCommitteeIndex()
if err != nil {
return nil, err
}
if ci != index {
if att.GetCommitteeIndex() != index {
continue
}
} else {

View File

@@ -1194,7 +1194,7 @@ func TestGetAttestationData(t *testing.T) {
HeadFetcher: chain,
GenesisTimeFetcher: chain,
FinalizedFetcher: chain,
AttestationCache: cache.NewAttestationCache(),
AttestationCache: cache.NewAttestationDataCache(),
OptimisticModeFetcher: chain,
},
}
@@ -1275,7 +1275,7 @@ func TestGetAttestationData(t *testing.T) {
TimeFetcher: chain,
OptimisticModeFetcher: chain,
CoreService: &core.Service{
AttestationCache: cache.NewAttestationCache(),
AttestationCache: cache.NewAttestationDataCache(),
GenesisTimeFetcher: chain,
HeadFetcher: chain,
FinalizedFetcher: chain,
@@ -1434,7 +1434,7 @@ func TestGetAttestationData(t *testing.T) {
TimeFetcher: chain,
OptimisticModeFetcher: chain,
CoreService: &core.Service{
AttestationCache: cache.NewAttestationCache(),
AttestationCache: cache.NewAttestationDataCache(),
OptimisticModeFetcher: chain,
HeadFetcher: chain,
GenesisTimeFetcher: chain,
@@ -1528,7 +1528,7 @@ func TestGetAttestationData(t *testing.T) {
TimeFetcher: chain,
OptimisticModeFetcher: chain,
CoreService: &core.Service{
AttestationCache: cache.NewAttestationCache(),
AttestationCache: cache.NewAttestationDataCache(),
OptimisticModeFetcher: chain,
HeadFetcher: chain,
GenesisTimeFetcher: chain,

View File

@@ -22,6 +22,7 @@ type Server struct {
HeadFetcher blockchain.HeadFetcher
TimeFetcher blockchain.TimeFetcher
SyncChecker sync.Checker
AttestationCache *cache.AttestationCache
AttestationsPool attestations.Pool
PeerManager p2p.PeerManager
Broadcaster p2p.Broadcaster

View File

@@ -7,11 +7,13 @@ import (
"strings"
"github.com/prysmaticlabs/prysm/v5/api/pagination"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filters"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/v5/cmd"
"github.com/prysmaticlabs/prysm/v5/config/features"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
@@ -305,7 +307,14 @@ func (bs *Server) ListIndexedAttestationsElectra(
// attestations are processed and when they are no longer valid.
// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/beacon-chain.md#attestations
func (bs *Server) AttestationPool(_ context.Context, req *ethpb.AttestationPoolRequest) (*ethpb.AttestationPoolResponse, error) {
atts, err := attestationsFromPool[*ethpb.Attestation](req.PageSize, bs.AttestationsPool)
var atts []*ethpb.Attestation
var err error
if features.Get().EnableExperimentalAttestationPool {
atts, err = attestationsFromCache[*ethpb.Attestation](req.PageSize, bs.AttestationCache)
} else {
atts, err = attestationsFromPool[*ethpb.Attestation](req.PageSize, bs.AttestationsPool)
}
if err != nil {
return nil, err
}
@@ -332,10 +341,18 @@ func (bs *Server) AttestationPool(_ context.Context, req *ethpb.AttestationPoolR
}
func (bs *Server) AttestationPoolElectra(_ context.Context, req *ethpb.AttestationPoolRequest) (*ethpb.AttestationPoolElectraResponse, error) {
atts, err := attestationsFromPool[*ethpb.AttestationElectra](req.PageSize, bs.AttestationsPool)
var atts []*ethpb.AttestationElectra
var err error
if features.Get().EnableExperimentalAttestationPool {
atts, err = attestationsFromCache[*ethpb.AttestationElectra](req.PageSize, bs.AttestationCache)
} else {
atts, err = attestationsFromPool[*ethpb.AttestationElectra](req.PageSize, bs.AttestationsPool)
}
if err != nil {
return nil, err
}
// If there are no attestations, we simply return a response specifying this.
// Otherwise, attempting to paginate 0 attestations below would result in an error.
if len(atts) == 0 {
@@ -465,3 +482,25 @@ func attestationsFromPool[T ethpb.Att](pageSize int32, pool attestations.Pool) (
}
return atts, nil
}
func attestationsFromCache[T ethpb.Att](pageSize int32, c *cache.AttestationCache) ([]T, error) {
if int(pageSize) > cmd.Get().MaxRPCPageSize {
return nil, status.Errorf(
codes.InvalidArgument,
"Requested page size %d can not be greater than max size %d",
pageSize,
cmd.Get().MaxRPCPageSize,
)
}
cacheAtts := c.GetAll()
atts := make([]T, 0, len(cacheAtts))
for _, att := range cacheAtts {
a, ok := att.(T)
if !ok {
var expected T
return nil, status.Errorf(codes.Internal, "Attestation is of the wrong type (expected %T, got %T)", expected, att)
}
atts = append(atts, a)
}
return atts, nil
}

View File

@@ -39,6 +39,7 @@ type Server struct {
BlockNotifier blockfeed.Notifier
AttestationNotifier operation.Notifier
Broadcaster p2p.Broadcaster
AttestationCache *cache.AttestationCache
AttestationsPool attestations.Pool
SlashingsPool slashings.PoolManager
ChainStartChan chan time.Time

View File

@@ -3,8 +3,10 @@ package validator
import (
"context"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/core"
"github.com/prysmaticlabs/prysm/v5/config/features"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
@@ -27,14 +29,21 @@ func (vs *Server) SubmitAggregateSelectionProof(ctx context.Context, req *ethpb.
if err != nil {
return nil, err
}
atts := vs.AttPool.AggregatedAttestationsBySlotIndex(ctx, req.Slot, req.CommitteeIndex)
// Filter out the best aggregated attestation (ie. the one with the most aggregated bits).
if len(atts) == 0 {
atts = vs.AttPool.UnaggregatedAttestationsBySlotIndex(ctx, req.Slot, req.CommitteeIndex)
var atts []*ethpb.Attestation
if features.Get().EnableExperimentalAttestationPool {
atts = cache.GetBySlotAndCommitteeIndex[*ethpb.Attestation](vs.AttestationCache, req.Slot, req.CommitteeIndex)
} else {
atts = vs.AttPool.AggregatedAttestationsBySlotIndex(ctx, req.Slot, req.CommitteeIndex)
if len(atts) == 0 {
return nil, status.Errorf(codes.NotFound, "Could not find attestation for slot and committee in pool")
atts = vs.AttPool.UnaggregatedAttestationsBySlotIndex(ctx, req.Slot, req.CommitteeIndex)
}
}
if len(atts) == 0 {
return nil, status.Errorf(codes.NotFound, "Could not find attestation for slot and committee in pool")
}
best := bestAggregate(atts, req.CommitteeIndex, indexInCommittee)
attAndProof := &ethpb.AggregateAttestationAndProof{
Aggregate: best,
@@ -59,13 +68,21 @@ func (vs *Server) SubmitAggregateSelectionProofElectra(
if err != nil {
return nil, err
}
atts := vs.AttPool.AggregatedAttestationsBySlotIndexElectra(ctx, req.Slot, req.CommitteeIndex)
if len(atts) == 0 {
atts = vs.AttPool.UnaggregatedAttestationsBySlotIndexElectra(ctx, req.Slot, req.CommitteeIndex)
var atts []*ethpb.AttestationElectra
if features.Get().EnableExperimentalAttestationPool {
atts = cache.GetBySlotAndCommitteeIndex[*ethpb.AttestationElectra](vs.AttestationCache, req.Slot, req.CommitteeIndex)
} else {
atts = vs.AttPool.AggregatedAttestationsBySlotIndexElectra(ctx, req.Slot, req.CommitteeIndex)
if len(atts) == 0 {
return nil, status.Errorf(codes.NotFound, "No attestations found in pool")
atts = vs.AttPool.UnaggregatedAttestationsBySlotIndexElectra(ctx, req.Slot, req.CommitteeIndex)
}
}
if len(atts) == 0 {
return nil, status.Errorf(codes.NotFound, "Could not find attestation for slot and committee in pool")
}
best := bestAggregate(atts, req.CommitteeIndex, indexInCommittee)
attAndProof := &ethpb.AggregateAttestationAndProofElectra{
Aggregate: best,

View File

@@ -8,10 +8,12 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/operation"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/core"
"github.com/prysmaticlabs/prysm/v5/config/features"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
"github.com/prysmaticlabs/prysm/v5/time/slots"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
@@ -44,46 +46,63 @@ func (vs *Server) ProposeAttestation(ctx context.Context, att *ethpb.Attestation
ctx, span := trace.StartSpan(ctx, "AttesterServer.ProposeAttestation")
defer span.End()
resp, err := vs.proposeAtt(ctx, att, att.GetData().CommitteeIndex)
resp, err := vs.proposeAtt(ctx, att, nil, att.GetData().CommitteeIndex)
if err != nil {
return nil, err
}
go func() {
attCopy := att.Copy()
if err := vs.AttPool.SaveUnaggregatedAttestation(attCopy); err != nil {
log.WithError(err).Error("Could not save unaggregated attestation")
return
if features.Get().EnableExperimentalAttestationPool {
if err = vs.AttestationCache.Add(att); err != nil {
log.WithError(err).Error("Could not save attestation")
}
}()
} else {
go func() {
attCopy := att.Copy()
if err := vs.AttPool.SaveUnaggregatedAttestation(attCopy); err != nil {
log.WithError(err).Error("Could not save unaggregated attestation")
return
}
}()
}
return resp, nil
}
// ProposeAttestationElectra is a function called by an attester to vote
// on a block via an attestation object as defined in the Ethereum specification.
func (vs *Server) ProposeAttestationElectra(ctx context.Context, att *ethpb.AttestationElectra) (*ethpb.AttestResponse, error) {
func (vs *Server) ProposeAttestationElectra(ctx context.Context, att *ethpb.SingleAttestation) (*ethpb.AttestResponse, error) {
ctx, span := trace.StartSpan(ctx, "AttesterServer.ProposeAttestationElectra")
defer span.End()
committeeIndex, err := att.GetCommitteeIndex()
targetState, err := vs.AttestationStateFetcher.AttestationTargetState(ctx, att.Data.Target)
if err != nil {
return nil, status.Error(codes.Internal, "Could not get target state")
}
committeeIndex := att.GetCommitteeIndex()
committee, err := helpers.BeaconCommitteeFromState(ctx, targetState, att.Data.Slot, committeeIndex)
if err != nil {
return nil, status.Error(codes.Internal, "Could not get committee")
}
resp, err := vs.proposeAtt(ctx, att, committee, committeeIndex)
if err != nil {
return nil, err
}
resp, err := vs.proposeAtt(ctx, att, committeeIndex)
if err != nil {
return nil, err
}
go func() {
ctx = trace.NewContext(context.Background(), trace.FromContext(ctx))
attCopy := att.Copy()
if err := vs.AttPool.SaveUnaggregatedAttestation(attCopy); err != nil {
log.WithError(err).Error("Could not save unaggregated attestation")
return
if features.Get().EnableExperimentalAttestationPool {
if err = vs.AttestationCache.Add(att.ToAttestationElectra(committee)); err != nil {
log.WithError(err).Error("Could not save attestation")
}
}()
} else {
go func() {
ctx = trace.NewContext(context.Background(), trace.FromContext(ctx))
attCopy := att.Copy()
if err := vs.AttPool.SaveUnaggregatedAttestation(attCopy.ToAttestationElectra(committee)); err != nil {
log.WithError(err).Error("Could not save unaggregated attestation")
return
}
}()
}
return resp, nil
}
@@ -136,14 +155,29 @@ func (vs *Server) SubscribeCommitteeSubnets(ctx context.Context, req *ethpb.Comm
return &emptypb.Empty{}, nil
}
func (vs *Server) proposeAtt(ctx context.Context, att ethpb.Att, committee primitives.CommitteeIndex) (*ethpb.AttestResponse, error) {
func (vs *Server) proposeAtt(
ctx context.Context,
att ethpb.Att,
committee []primitives.ValidatorIndex, // required post-Electra
committeeIndex primitives.CommitteeIndex,
) (*ethpb.AttestResponse, error) {
if _, err := bls.SignatureFromBytes(att.GetSignature()); err != nil {
return nil, status.Error(codes.InvalidArgument, "Incorrect attestation signature")
}
root, err := att.GetData().HashTreeRoot()
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not tree hash attestation: %v", err)
return nil, status.Errorf(codes.Internal, "Could not get attestation root: %v", err)
}
var singleAtt *ethpb.SingleAttestation
if att.Version() >= version.Electra {
var ok bool
singleAtt, ok = att.(*ethpb.SingleAttestation)
if !ok {
return nil, status.Errorf(codes.Internal, "Attestation has wrong type (expected %T, got %T)", &ethpb.SingleAttestation{}, att)
}
att = singleAtt.ToAttestationElectra(committee)
}
// Broadcast the unaggregated attestation on a feed to notify other services in the beacon node
@@ -161,10 +195,16 @@ func (vs *Server) proposeAtt(ctx context.Context, att ethpb.Att, committee primi
if err != nil {
return nil, err
}
subnet := helpers.ComputeSubnetFromCommitteeAndSlot(uint64(len(vals)), committee, att.GetData().Slot)
subnet := helpers.ComputeSubnetFromCommitteeAndSlot(uint64(len(vals)), committeeIndex, att.GetData().Slot)
// Broadcast the new attestation to the network.
if err := vs.P2P.BroadcastAttestation(ctx, subnet, att); err != nil {
var attToBroadcast ethpb.Att
if singleAtt != nil {
attToBroadcast = singleAtt
} else {
attToBroadcast = att
}
if err := vs.P2P.BroadcastAttestation(ctx, subnet, attToBroadcast); err != nil {
return nil, status.Errorf(codes.Internal, "Could not broadcast attestation: %v", err)
}

View File

@@ -65,7 +65,7 @@ func TestAttestationDataAtSlot_HandlesFarAwayJustifiedEpoch(t *testing.T) {
OptimisticModeFetcher: &mock.ChainService{Optimistic: false},
TimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
CoreService: &core.Service{
AttestationCache: cache.NewAttestationCache(),
AttestationCache: cache.NewAttestationDataCache(),
HeadFetcher: &mock.ChainService{TargetRoot: blockRoot, Root: blockRoot[:], State: beaconState},
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
FinalizedFetcher: &mock.ChainService{CurrentJustifiedCheckPoint: justifiedCheckpoint},

View File

@@ -31,11 +31,13 @@ import (
)
func TestProposeAttestation(t *testing.T) {
chainService := &mock.ChainService{}
attesterServer := &Server{
HeadFetcher: &mock.ChainService{},
P2P: &mockp2p.MockBroadcaster{},
AttPool: attestations.NewPool(),
OperationNotifier: (&mock.ChainService{}).OperationNotifier(),
HeadFetcher: chainService,
P2P: &mockp2p.MockBroadcaster{},
AttPool: attestations.NewPool(),
OperationNotifier: (&mock.ChainService{}).OperationNotifier(),
AttestationStateFetcher: chainService,
}
head := util.NewBeaconBlock()
head.Block.Slot = 999
@@ -79,81 +81,19 @@ func TestProposeAttestation(t *testing.T) {
require.NoError(t, err)
require.NoError(t, state.SetSlot(params.BeaconConfig().SlotsPerEpoch+1))
require.NoError(t, state.SetValidators(validators))
chainService.State = state
cb := primitives.NewAttestationCommitteeBits()
cb.SetBitAt(0, true)
req := &ethpb.AttestationElectra{
req := &ethpb.SingleAttestation{
Signature: sig.Marshal(),
Data: &ethpb.AttestationData{
BeaconBlockRoot: root[:],
Source: &ethpb.Checkpoint{Root: make([]byte, 32)},
Target: &ethpb.Checkpoint{Root: make([]byte, 32)},
},
CommitteeBits: cb,
}
_, err = attesterServer.ProposeAttestationElectra(context.Background(), req)
assert.NoError(t, err)
})
t.Run("Electra - non-zero committee index", func(t *testing.T) {
state, err := util.NewBeaconStateElectra()
require.NoError(t, err)
require.NoError(t, state.SetSlot(params.BeaconConfig().SlotsPerEpoch+1))
require.NoError(t, state.SetValidators(validators))
cb := primitives.NewAttestationCommitteeBits()
cb.SetBitAt(0, true)
req := &ethpb.AttestationElectra{
Signature: sig.Marshal(),
Data: &ethpb.AttestationData{
BeaconBlockRoot: root[:],
Source: &ethpb.Checkpoint{Root: make([]byte, 32)},
Target: &ethpb.Checkpoint{Root: make([]byte, 32)},
CommitteeIndex: 1,
},
CommitteeBits: cb,
}
_, err = attesterServer.ProposeAttestationElectra(context.Background(), req)
assert.ErrorContains(t, "attestation data's committee index must be 0 but was 1", err)
})
t.Run("Electra - no committee bit set", func(t *testing.T) {
state, err := util.NewBeaconStateElectra()
require.NoError(t, err)
require.NoError(t, state.SetSlot(params.BeaconConfig().SlotsPerEpoch+1))
require.NoError(t, state.SetValidators(validators))
req := &ethpb.AttestationElectra{
Signature: sig.Marshal(),
Data: &ethpb.AttestationData{
BeaconBlockRoot: root[:],
Source: &ethpb.Checkpoint{Root: make([]byte, 32)},
Target: &ethpb.Checkpoint{Root: make([]byte, 32)},
},
CommitteeBits: primitives.NewAttestationCommitteeBits(),
}
_, err = attesterServer.ProposeAttestationElectra(context.Background(), req)
assert.ErrorContains(t, "exactly 1 committee index must be set but 0 were set", err)
})
t.Run("Electra - multiple committee bits set", func(t *testing.T) {
state, err := util.NewBeaconStateElectra()
require.NoError(t, err)
require.NoError(t, state.SetSlot(params.BeaconConfig().SlotsPerEpoch+1))
require.NoError(t, state.SetValidators(validators))
cb := primitives.NewAttestationCommitteeBits()
cb.SetBitAt(0, true)
cb.SetBitAt(1, true)
req := &ethpb.AttestationElectra{
Signature: sig.Marshal(),
Data: &ethpb.AttestationData{
BeaconBlockRoot: root[:],
Source: &ethpb.Checkpoint{Root: make([]byte, 32)},
Target: &ethpb.Checkpoint{Root: make([]byte, 32)},
},
CommitteeBits: cb,
}
_, err = attesterServer.ProposeAttestationElectra(context.Background(), req)
assert.ErrorContains(t, "exactly 1 committee index must be set but 2 were set", err)
})
}
func TestProposeAttestation_IncorrectSignature(t *testing.T) {
@@ -204,7 +144,7 @@ func TestGetAttestationData_OK(t *testing.T) {
Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second),
},
FinalizedFetcher: &mock.ChainService{CurrentJustifiedCheckPoint: justifiedCheckpoint},
AttestationCache: cache.NewAttestationCache(),
AttestationCache: cache.NewAttestationDataCache(),
OptimisticModeFetcher: &mock.ChainService{Optimistic: false},
},
}
@@ -259,7 +199,7 @@ func BenchmarkGetAttestationDataConcurrent(b *testing.B) {
OptimisticModeFetcher: &mock.ChainService{Optimistic: false},
TimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
CoreService: &core.Service{
AttestationCache: cache.NewAttestationCache(),
AttestationCache: cache.NewAttestationDataCache(),
HeadFetcher: &mock.ChainService{TargetRoot: targetRoot, Root: blockRoot[:]},
GenesisTimeFetcher: &mock.ChainService{
Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second),
@@ -313,7 +253,7 @@ func TestGetAttestationData_Optimistic(t *testing.T) {
CoreService: &core.Service{
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now()},
HeadFetcher: &mock.ChainService{},
AttestationCache: cache.NewAttestationCache(),
AttestationCache: cache.NewAttestationDataCache(),
OptimisticModeFetcher: &mock.ChainService{Optimistic: true},
},
}
@@ -330,7 +270,7 @@ func TestGetAttestationData_Optimistic(t *testing.T) {
OptimisticModeFetcher: &mock.ChainService{Optimistic: false},
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
CoreService: &core.Service{
AttestationCache: cache.NewAttestationCache(),
AttestationCache: cache.NewAttestationDataCache(),
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now()},
HeadFetcher: &mock.ChainService{Optimistic: false, State: beaconState},
FinalizedFetcher: &mock.ChainService{CurrentJustifiedCheckPoint: &ethpb.Checkpoint{}},
@@ -440,7 +380,7 @@ func TestGetAttestationData_SucceedsInFirstEpoch(t *testing.T) {
OptimisticModeFetcher: &mock.ChainService{Optimistic: false},
TimeFetcher: &mock.ChainService{Genesis: prysmTime.Now().Add(time.Duration(-1*offset) * time.Second)},
CoreService: &core.Service{
AttestationCache: cache.NewAttestationCache(),
AttestationCache: cache.NewAttestationDataCache(),
HeadFetcher: &mock.ChainService{
TargetRoot: targetRoot, Root: blockRoot[:], State: beaconState,
},
@@ -514,7 +454,7 @@ func TestGetAttestationData_CommitteeIndexIsZeroPostElectra(t *testing.T) {
Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second),
},
FinalizedFetcher: &mock.ChainService{CurrentJustifiedCheckPoint: justifiedCheckpoint},
AttestationCache: cache.NewAttestationCache(),
AttestationCache: cache.NewAttestationDataCache(),
OptimisticModeFetcher: &mock.ChainService{Optimistic: false},
},
}

View File

@@ -9,7 +9,6 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/config/features"
"github.com/prysmaticlabs/prysm/v5/config/params"
@@ -30,21 +29,22 @@ func (vs *Server) packAttestations(ctx context.Context, latestState state.Beacon
ctx, span := trace.StartSpan(ctx, "ProposerServer.packAttestations")
defer span.End()
atts := vs.AttPool.AggregatedAttestations()
atts, err := vs.validateAndDeleteAttsInPool(ctx, latestState, atts)
if err != nil {
return nil, errors.Wrap(err, "could not filter attestations")
}
var atts []ethpb.Att
uAtts, err := vs.AttPool.UnaggregatedAttestations()
if err != nil {
return nil, errors.Wrap(err, "could not get unaggregated attestations")
if features.Get().EnableExperimentalAttestationPool {
atts = vs.AttestationCache.GetAll()
atts = vs.validateAndDeleteAttsInPool(ctx, latestState, atts)
} else {
atts = vs.AttPool.AggregatedAttestations()
atts = vs.validateAndDeleteAttsInPool(ctx, latestState, atts)
uAtts, err := vs.AttPool.UnaggregatedAttestations()
if err != nil {
return nil, errors.Wrap(err, "could not get unaggregated attestations")
}
uAtts = vs.validateAndDeleteAttsInPool(ctx, latestState, uAtts)
atts = append(atts, uAtts...)
}
uAtts, err = vs.validateAndDeleteAttsInPool(ctx, latestState, uAtts)
if err != nil {
return nil, errors.Wrap(err, "could not filter attestations")
}
atts = append(atts, uAtts...)
// Checking the state's version here will give the wrong result if the last slot of Deneb is missed.
// The head state will still be in Deneb while we are trying to build an Electra block.
@@ -65,6 +65,8 @@ func (vs *Server) packAttestations(ctx context.Context, latestState state.Beacon
}
}
var err error
// Remove duplicates from both aggregated/unaggregated attestations. This
// prevents inefficient aggregates being created.
versionAtts, err = proposerAtts(versionAtts).dedup()
@@ -455,15 +457,15 @@ func (a proposerAtts) dedup() (proposerAtts, error) {
}
// This filters the input attestations to return a list of valid attestations to be packaged inside a beacon block.
func (vs *Server) validateAndDeleteAttsInPool(ctx context.Context, st state.BeaconState, atts []ethpb.Att) ([]ethpb.Att, error) {
func (vs *Server) validateAndDeleteAttsInPool(ctx context.Context, st state.BeaconState, atts []ethpb.Att) []ethpb.Att {
ctx, span := trace.StartSpan(ctx, "ProposerServer.validateAndDeleteAttsInPool")
defer span.End()
validAtts, invalidAtts := proposerAtts(atts).filter(ctx, st)
if err := vs.deleteAttsInPool(ctx, invalidAtts); err != nil {
return nil, err
log.WithError(err).Error("Could not delete invalid attestations")
}
return validAtts, nil
return validAtts
}
// The input attestations are processed and seen by the node, this deletes them from pool
@@ -476,13 +478,19 @@ func (vs *Server) deleteAttsInPool(ctx context.Context, atts []ethpb.Att) error
if ctx.Err() != nil {
return ctx.Err()
}
if helpers.IsAggregated(att) {
if err := vs.AttPool.DeleteAggregatedAttestation(att); err != nil {
return err
if features.Get().EnableExperimentalAttestationPool {
if err := vs.AttestationCache.DeleteCovered(att); err != nil {
return errors.Wrap(err, "could not delete attestation")
}
} else {
if err := vs.AttPool.DeleteUnaggregatedAttestation(att); err != nil {
return err
if att.IsAggregated() {
if err := vs.AttPool.DeleteAggregatedAttestation(att); err != nil {
return err
}
} else {
if err := vs.AttPool.DeleteUnaggregatedAttestation(att); err != nil {
return err
}
}
}
}

View File

@@ -2580,7 +2580,6 @@ func TestProposer_FilterAttestation(t *testing.T) {
tests := []struct {
name string
wantedErr string
inputAtts func() []ethpb.Att
expectedAtts func(inputAtts []ethpb.Att) []ethpb.Att
}{
@@ -2656,14 +2655,8 @@ func TestProposer_FilterAttestation(t *testing.T) {
HeadFetcher: &mock.ChainService{State: st, Root: genesisRoot[:]},
}
atts := tt.inputAtts()
received, err := proposerServer.validateAndDeleteAttsInPool(context.Background(), st, atts)
if tt.wantedErr != "" {
assert.ErrorContains(t, tt.wantedErr, err)
assert.Equal(t, nil, received)
} else {
assert.NoError(t, err)
assert.DeepEqual(t, tt.expectedAtts(atts), received)
}
received := proposerServer.validateAndDeleteAttsInPool(context.Background(), st, atts)
assert.DeepEqual(t, tt.expectedAtts(atts), received)
})
}
}

View File

@@ -42,42 +42,44 @@ import (
// and committees in which particular validators need to perform their responsibilities,
// and more.
type Server struct {
Ctx context.Context
PayloadIDCache *cache.PayloadIDCache
TrackedValidatorsCache *cache.TrackedValidatorsCache
HeadFetcher blockchain.HeadFetcher
ForkFetcher blockchain.ForkFetcher
ForkchoiceFetcher blockchain.ForkchoiceFetcher
GenesisFetcher blockchain.GenesisFetcher
FinalizationFetcher blockchain.FinalizationFetcher
TimeFetcher blockchain.TimeFetcher
BlockFetcher execution.POWBlockFetcher
DepositFetcher cache.DepositFetcher
ChainStartFetcher execution.ChainStartFetcher
Eth1InfoFetcher execution.ChainInfoFetcher
OptimisticModeFetcher blockchain.OptimisticModeFetcher
SyncChecker sync.Checker
StateNotifier statefeed.Notifier
BlockNotifier blockfeed.Notifier
P2P p2p.Broadcaster
AttPool attestations.Pool
SlashingsPool slashings.PoolManager
ExitPool voluntaryexits.PoolManager
SyncCommitteePool synccommittee.Pool
BlockReceiver blockchain.BlockReceiver
BlobReceiver blockchain.BlobReceiver
MockEth1Votes bool
Eth1BlockFetcher execution.POWBlockFetcher
PendingDepositsFetcher depositsnapshot.PendingDepositsFetcher
OperationNotifier opfeed.Notifier
StateGen stategen.StateManager
ReplayerBuilder stategen.ReplayerBuilder
BeaconDB db.HeadAccessDatabase
ExecutionEngineCaller execution.EngineCaller
BlockBuilder builder.BlockBuilder
BLSChangesPool blstoexec.PoolManager
ClockWaiter startup.ClockWaiter
CoreService *core.Service
Ctx context.Context
PayloadIDCache *cache.PayloadIDCache
TrackedValidatorsCache *cache.TrackedValidatorsCache
HeadFetcher blockchain.HeadFetcher
ForkFetcher blockchain.ForkFetcher
ForkchoiceFetcher blockchain.ForkchoiceFetcher
GenesisFetcher blockchain.GenesisFetcher
FinalizationFetcher blockchain.FinalizationFetcher
TimeFetcher blockchain.TimeFetcher
BlockFetcher execution.POWBlockFetcher
DepositFetcher cache.DepositFetcher
ChainStartFetcher execution.ChainStartFetcher
Eth1InfoFetcher execution.ChainInfoFetcher
OptimisticModeFetcher blockchain.OptimisticModeFetcher
SyncChecker sync.Checker
StateNotifier statefeed.Notifier
BlockNotifier blockfeed.Notifier
P2P p2p.Broadcaster
AttestationCache *cache.AttestationCache
AttPool attestations.Pool
SlashingsPool slashings.PoolManager
ExitPool voluntaryexits.PoolManager
SyncCommitteePool synccommittee.Pool
BlockReceiver blockchain.BlockReceiver
BlobReceiver blockchain.BlobReceiver
MockEth1Votes bool
Eth1BlockFetcher execution.POWBlockFetcher
PendingDepositsFetcher depositsnapshot.PendingDepositsFetcher
OperationNotifier opfeed.Notifier
StateGen stategen.StateManager
ReplayerBuilder stategen.ReplayerBuilder
BeaconDB db.HeadAccessDatabase
ExecutionEngineCaller execution.EngineCaller
BlockBuilder builder.BlockBuilder
BLSChangesPool blstoexec.PoolManager
ClockWaiter startup.ClockWaiter
CoreService *core.Service
AttestationStateFetcher blockchain.AttestationStateFetcher
}
// WaitForActivation checks if a validator public key exists in the active validator registry of the current

View File

@@ -95,6 +95,7 @@ type Config struct {
GenesisFetcher blockchain.GenesisFetcher
MockEth1Votes bool
EnableDebugRPCEndpoints bool
AttestationCache *cache.AttestationCache
AttestationsPool attestations.Pool
ExitPool voluntaryexits.PoolManager
SlashingsPool slashings.PoolManager
@@ -203,7 +204,7 @@ func NewService(ctx context.Context, cfg *Config) *Service {
Broadcaster: s.cfg.Broadcaster,
SyncCommitteePool: s.cfg.SyncCommitteeObjectPool,
OperationNotifier: s.cfg.OperationNotifier,
AttestationCache: cache.NewAttestationCache(),
AttestationCache: cache.NewAttestationDataCache(),
StateGen: s.cfg.StateGen,
P2P: s.cfg.Broadcaster,
FinalizedFetcher: s.cfg.FinalizationFetcher,
@@ -211,42 +212,44 @@ func NewService(ctx context.Context, cfg *Config) *Service {
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
}
validatorServer := &validatorv1alpha1.Server{
Ctx: s.ctx,
AttPool: s.cfg.AttestationsPool,
ExitPool: s.cfg.ExitPool,
HeadFetcher: s.cfg.HeadFetcher,
ForkFetcher: s.cfg.ForkFetcher,
ForkchoiceFetcher: s.cfg.ForkchoiceFetcher,
GenesisFetcher: s.cfg.GenesisFetcher,
FinalizationFetcher: s.cfg.FinalizationFetcher,
TimeFetcher: s.cfg.GenesisTimeFetcher,
BlockFetcher: s.cfg.ExecutionChainService,
DepositFetcher: s.cfg.DepositFetcher,
ChainStartFetcher: s.cfg.ChainStartFetcher,
Eth1InfoFetcher: s.cfg.ExecutionChainService,
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
SyncChecker: s.cfg.SyncService,
StateNotifier: s.cfg.StateNotifier,
BlockNotifier: s.cfg.BlockNotifier,
OperationNotifier: s.cfg.OperationNotifier,
P2P: s.cfg.Broadcaster,
BlockReceiver: s.cfg.BlockReceiver,
BlobReceiver: s.cfg.BlobReceiver,
MockEth1Votes: s.cfg.MockEth1Votes,
Eth1BlockFetcher: s.cfg.ExecutionChainService,
PendingDepositsFetcher: s.cfg.PendingDepositFetcher,
SlashingsPool: s.cfg.SlashingsPool,
StateGen: s.cfg.StateGen,
SyncCommitteePool: s.cfg.SyncCommitteeObjectPool,
ReplayerBuilder: ch,
ExecutionEngineCaller: s.cfg.ExecutionEngineCaller,
BeaconDB: s.cfg.BeaconDB,
BlockBuilder: s.cfg.BlockBuilder,
BLSChangesPool: s.cfg.BLSChangesPool,
ClockWaiter: s.cfg.ClockWaiter,
CoreService: coreService,
TrackedValidatorsCache: s.cfg.TrackedValidatorsCache,
PayloadIDCache: s.cfg.PayloadIDCache,
Ctx: s.ctx,
AttestationCache: s.cfg.AttestationCache,
AttPool: s.cfg.AttestationsPool,
ExitPool: s.cfg.ExitPool,
HeadFetcher: s.cfg.HeadFetcher,
ForkFetcher: s.cfg.ForkFetcher,
ForkchoiceFetcher: s.cfg.ForkchoiceFetcher,
GenesisFetcher: s.cfg.GenesisFetcher,
FinalizationFetcher: s.cfg.FinalizationFetcher,
TimeFetcher: s.cfg.GenesisTimeFetcher,
BlockFetcher: s.cfg.ExecutionChainService,
DepositFetcher: s.cfg.DepositFetcher,
ChainStartFetcher: s.cfg.ChainStartFetcher,
Eth1InfoFetcher: s.cfg.ExecutionChainService,
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
SyncChecker: s.cfg.SyncService,
StateNotifier: s.cfg.StateNotifier,
BlockNotifier: s.cfg.BlockNotifier,
OperationNotifier: s.cfg.OperationNotifier,
P2P: s.cfg.Broadcaster,
BlockReceiver: s.cfg.BlockReceiver,
BlobReceiver: s.cfg.BlobReceiver,
MockEth1Votes: s.cfg.MockEth1Votes,
Eth1BlockFetcher: s.cfg.ExecutionChainService,
PendingDepositsFetcher: s.cfg.PendingDepositFetcher,
SlashingsPool: s.cfg.SlashingsPool,
StateGen: s.cfg.StateGen,
SyncCommitteePool: s.cfg.SyncCommitteeObjectPool,
ReplayerBuilder: ch,
ExecutionEngineCaller: s.cfg.ExecutionEngineCaller,
BeaconDB: s.cfg.BeaconDB,
BlockBuilder: s.cfg.BlockBuilder,
BLSChangesPool: s.cfg.BLSChangesPool,
ClockWaiter: s.cfg.ClockWaiter,
CoreService: coreService,
TrackedValidatorsCache: s.cfg.TrackedValidatorsCache,
PayloadIDCache: s.cfg.PayloadIDCache,
AttestationStateFetcher: s.cfg.AttestationReceiver,
}
s.validatorServer = validatorServer
nodeServer := &nodev1alpha1.Server{
@@ -266,6 +269,7 @@ func NewService(ctx context.Context, cfg *Config) *Service {
beaconChainServer := &beaconv1alpha1.Server{
Ctx: s.ctx,
BeaconDB: s.cfg.BeaconDB,
AttestationCache: s.cfg.AttestationCache,
AttestationsPool: s.cfg.AttestationsPool,
SlashingsPool: s.cfg.SlashingsPool,
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,

View File

@@ -44,7 +44,6 @@ go_library(
"validate_aggregate_proof.go",
"validate_attester_slashing.go",
"validate_beacon_attestation.go",
"validate_beacon_attestation_electra.go",
"validate_beacon_blocks.go",
"validate_blob.go",
"validate_bls_to_execution_change.go",
@@ -179,7 +178,6 @@ go_test(
"sync_test.go",
"validate_aggregate_proof_test.go",
"validate_attester_slashing_test.go",
"validate_beacon_attestation_electra_test.go",
"validate_beacon_attestation_test.go",
"validate_beacon_blocks_test.go",
"validate_blob_test.go",

View File

@@ -208,8 +208,8 @@ func (s *Service) importBatches(ctx context.Context) {
}
_, err := s.batchImporter(ctx, current, ib, s.store)
if err != nil {
score := s.p2p.Peers().Scorers().BadResponsesScorer().Increment(ib.blockPid)
log.WithError(err).WithFields(ib.logFields()).WithField("newBlockPidBadResponsesScore", score).Debug("Backfill batch failed to import")
log.WithError(err).WithFields(ib.logFields()).Debug("Backfill batch failed to import")
s.downscore(ib)
s.batchSeq.update(ib.withState(batchErrRetryable))
// If a batch fails, the subsequent batches are no longer considered importable.
break
@@ -330,6 +330,10 @@ func (s *Service) initBatches() error {
return nil
}
func (s *Service) downscore(b batch) {
s.p2p.Peers().Scorers().BadResponsesScorer().Increment(b.blockPid)
}
func (*Service) Stop() error {
return nil
}

View File

@@ -62,7 +62,7 @@ func (s *Service) validateWithBatchVerifier(ctx context.Context, message string,
// If verification fails we fallback to individual verification
// of each signature set.
if resErr != nil {
log.WithError(resErr).Debugf("Could not perform batch verification of %s", message)
log.WithError(resErr).Tracef("Could not perform batch verification of %s", message)
verified, err := set.Verify()
if err != nil {
verErr := errors.Wrapf(err, "Could not verify %s", message)

View File

@@ -220,8 +220,12 @@ func (c *blobsTestCase) setup(t *testing.T) (*Service, []blocks.ROBlob, func())
byRootRate := params.BeaconConfig().MaxRequestBlobSidecars * uint64(params.BeaconConfig().MaxBlobsPerBlock(0))
byRangeRate := params.BeaconConfig().MaxRequestBlobSidecars * uint64(params.BeaconConfig().MaxBlobsPerBlock(0))
byRootRateElectra := params.BeaconConfig().MaxRequestBlobSidecarsElectra * uint64(params.BeaconConfig().MaxBlobsPerBlock(0))
byRangeRateElectra := params.BeaconConfig().MaxRequestBlobSidecarsElectra * uint64(params.BeaconConfig().MaxBlobsPerBlock(0))
s.setRateCollector(p2p.RPCBlobSidecarsByRootTopicV1, leakybucket.NewCollector(0.000001, int64(byRootRate), time.Second, false))
s.setRateCollector(p2p.RPCBlobSidecarsByRangeTopicV1, leakybucket.NewCollector(0.000001, int64(byRangeRate), time.Second, false))
s.setRateCollector(p2p.RPCBlobSidecarsByRootTopicV2, leakybucket.NewCollector(0.000001, int64(byRootRateElectra), time.Second, false))
s.setRateCollector(p2p.RPCBlobSidecarsByRangeTopicV2, leakybucket.NewCollector(0.000001, int64(byRangeRateElectra), time.Second, false))
return s, sidecars, cleanup
}

View File

@@ -274,7 +274,7 @@ func TestExtractDataType(t *testing.T) {
return wsb
}(),
wantMd: wrapper.WrappedMetadataV1(&ethpb.MetaDataV1{}),
wantAtt: &ethpb.AttestationElectra{},
wantAtt: &ethpb.SingleAttestation{},
wantAggregate: &ethpb.SignedAggregateAttestationAndProofElectra{},
wantErr: false,
},

View File

@@ -188,6 +188,50 @@ func TestService_CheckForNextEpochFork(t *testing.T) {
assert.Equal(t, true, rpcMap[p2p.RPCBlobSidecarsByRootTopicV1+s.cfg.p2p.Encoding().ProtocolSuffix()], "topic doesn't exist")
},
},
{
name: "electra fork in the next epoch",
svcCreator: func(t *testing.T) *Service {
peer2peer := p2ptest.NewTestP2P(t)
gt := time.Now().Add(-4 * oneEpoch())
vr := [32]byte{'A'}
chainService := &mockChain.ChainService{
Genesis: gt,
ValidatorsRoot: vr,
}
bCfg := params.BeaconConfig().Copy()
bCfg.ElectraForkEpoch = 5
params.OverrideBeaconConfig(bCfg)
params.BeaconConfig().InitializeForkSchedule()
ctx, cancel := context.WithCancel(context.Background())
r := &Service{
ctx: ctx,
cancel: cancel,
cfg: &config{
p2p: peer2peer,
chain: chainService,
clock: startup.NewClock(gt, vr),
initialSync: &mockSync.Sync{IsSyncing: false},
},
chainStarted: abool.New(),
subHandler: newSubTopicHandler(),
}
return r
},
currEpoch: 4,
wantErr: false,
postSvcCheck: func(t *testing.T, s *Service) {
genRoot := s.cfg.clock.GenesisValidatorsRoot()
digest, err := forks.ForkDigestFromEpoch(5, genRoot[:])
assert.NoError(t, err)
assert.Equal(t, true, s.subHandler.digestExists(digest))
rpcMap := make(map[string]bool)
for _, p := range s.cfg.p2p.Host().Mux().Protocols() {
rpcMap[string(p)] = true
}
assert.Equal(t, true, rpcMap[p2p.RPCBlobSidecarsByRangeTopicV2+s.cfg.p2p.Encoding().ProtocolSuffix()], "topic doesn't exist")
assert.Equal(t, true, rpcMap[p2p.RPCBlobSidecarsByRootTopicV2+s.cfg.p2p.Encoding().ProtocolSuffix()], "topic doesn't exist")
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {

View File

@@ -337,10 +337,8 @@ func (q *blocksQueue) onDataReceivedEvent(ctx context.Context) eventHandlerFn {
}
if errors.Is(response.err, beaconsync.ErrInvalidFetchedData) {
// Peer returned invalid data, penalize.
score := q.blocksFetcher.p2p.Peers().Scorers().BadResponsesScorer().Increment(m.pid)
log.
WithFields(logrus.Fields{"pid": response.pid, "newBadResponsesScore": score}).
Debug("Peer is penalized for invalid blocks")
q.blocksFetcher.p2p.Peers().Scorers().BadResponsesScorer().Increment(m.pid)
log.WithField("pid", response.pid).Debug("Peer is penalized for invalid blocks")
}
return m.state, response.err
}

View File

@@ -2,6 +2,7 @@ package sync
import (
"github.com/prysmaticlabs/prysm/v5/async/event"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
blockfeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/block"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/operation"
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
@@ -43,6 +44,13 @@ func WithDatabase(db db.NoHeadAccessDatabase) Option {
}
}
func WithAttestationCache(c *cache.AttestationCache) Option {
return func(s *Service) error {
s.cfg.attestationCache = c
return nil
}
}
func WithAttestationPool(attPool attestations.Pool) Option {
return func(s *Service) error {
s.cfg.attPool = attPool

View File

@@ -10,6 +10,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/async"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/config/features"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/crypto/rand"
@@ -94,7 +95,7 @@ func (s *Service) processAttestations(ctx context.Context, attestations []ethpb.
data := aggregate.GetData()
// The pending attestations can arrive in both aggregated and unaggregated forms,
// each from has distinct validation steps.
if helpers.IsAggregated(aggregate) {
if aggregate.IsAggregated() {
// Save the pending aggregated attestation to the pool if it passes the aggregated
// validation steps.
valRes, err := s.validateAggregatedAtt(ctx, signedAtt)
@@ -103,10 +104,18 @@ func (s *Service) processAttestations(ctx context.Context, attestations []ethpb.
}
aggValid := pubsub.ValidationAccept == valRes
if s.validateBlockInAttestation(ctx, signedAtt) && aggValid {
if err := s.cfg.attPool.SaveAggregatedAttestation(aggregate); err != nil {
log.WithError(err).Debug("Could not save aggregate attestation")
continue
if features.Get().EnableExperimentalAttestationPool {
if err = s.cfg.attestationCache.Add(aggregate); err != nil {
log.WithError(err).Debug("Could not save aggregate attestation")
continue
}
} else {
if err := s.cfg.attPool.SaveAggregatedAttestation(aggregate); err != nil {
log.WithError(err).Debug("Could not save aggregate attestation")
continue
}
}
s.setAggregatorIndexEpochSeen(data.Target.Epoch, signedAtt.AggregateAttestationAndProof().GetAggregatorIndex())
// Broadcasting the signed attestation again once a node is able to process it.
@@ -131,16 +140,38 @@ func (s *Service) processAttestations(ctx context.Context, attestations []ethpb.
log.WithError(err).Debug("Could not retrieve attestation prestate")
continue
}
valid, err := s.validateUnaggregatedAttWithState(ctx, aggregate, preState)
committee, err := helpers.BeaconCommitteeFromState(ctx, preState, aggregate.GetData().Slot, aggregate.GetCommitteeIndex())
if err != nil {
log.WithError(err).Debug("Could not retrieve committee from state")
continue
}
attesterIndex := primitives.ValidatorIndex(0)
if aggregate.Version() >= version.Electra {
var ok bool
singleAtt, ok := aggregate.(*ethpb.SingleAttestation)
if !ok {
log.Debugf("Attestation has wrong type (expected %T, got %T)", &ethpb.SingleAttestation{}, aggregate)
continue
}
attesterIndex = singleAtt.GetAttesterIndex()
aggregate = singleAtt.ToAttestationElectra(committee)
}
valid, err := s.validateUnaggregatedAttWithState(ctx, aggregate, attesterIndex, preState, committee)
if err != nil {
log.WithError(err).Debug("Pending unaggregated attestation failed validation")
continue
}
if valid == pubsub.ValidationAccept {
if err := s.cfg.attPool.SaveUnaggregatedAttestation(aggregate); err != nil {
log.WithError(err).Debug("Could not save unaggregated attestation")
continue
if features.Get().EnableExperimentalAttestationPool {
if err = s.cfg.attestationCache.Add(aggregate); err != nil {
log.WithError(err).Debug("Could not save unaggregated attestation")
continue
}
} else {
if err := s.cfg.attPool.SaveUnaggregatedAttestation(aggregate); err != nil {
log.WithError(err).Debug("Could not save unaggregated attestation")
continue
}
}
s.setSeenCommitteeIndicesSlot(data.Slot, data.CommitteeIndex, aggregate.GetAggregationBits())

View File

@@ -79,6 +79,8 @@ func newRateLimiter(p2pProvider p2p.P2P) *limiter {
topicMap[addEncoding(p2p.RPCBlobSidecarsByRootTopicV1)] = blobCollector
// BlobSidecarsByRangeV1
topicMap[addEncoding(p2p.RPCBlobSidecarsByRangeTopicV1)] = blobCollector
topicMap[addEncoding(p2p.RPCBlobSidecarsByRootTopicV2)] = blobCollector
topicMap[addEncoding(p2p.RPCBlobSidecarsByRangeTopicV2)] = blobCollector
// General topic for all rpc requests.
topicMap[rpcLimiterTopic] = leakybucket.NewCollector(5, defaultBurstLimit*2, leakyBucketPeriod, false /* deleteEmptyBuckets */)
@@ -112,8 +114,9 @@ func (l *limiter) validateRequest(stream network.Stream, amt uint64) error {
amt = 1
}
if amt > uint64(remaining) {
score := l.p2p.Peers().Scorers().BadResponsesScorer().Increment(remotePeer)
return errors.Wrapf(p2ptypes.ErrRateLimited, "new bad responses score: %d", score)
l.p2p.Peers().Scorers().BadResponsesScorer().Increment(remotePeer)
writeErrorResponseToStream(responseCodeInvalidRequest, p2ptypes.ErrRateLimited.Error(), stream, l.p2p)
return p2ptypes.ErrRateLimited
}
return nil
}
@@ -134,9 +137,9 @@ func (l *limiter) validateRawRpcRequest(stream network.Stream) error {
// Treat each request as a minimum of 1.
amt := int64(1)
if amt > remaining {
score := l.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer())
l.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer())
writeErrorResponseToStream(responseCodeInvalidRequest, p2ptypes.ErrRateLimited.Error(), stream, l.p2p)
return errors.Wrapf(p2ptypes.ErrRateLimited, "new bad responses score: %d", score)
return p2ptypes.ErrRateLimited
}
return nil
}

View File

@@ -18,7 +18,7 @@ import (
func TestNewRateLimiter(t *testing.T) {
rlimiter := newRateLimiter(mockp2p.NewTestP2P(t))
assert.Equal(t, len(rlimiter.limiterMap), 12, "correct number of topics not registered")
assert.Equal(t, len(rlimiter.limiterMap), 14, "correct number of topics not registered")
}
func TestNewRateLimiter_FreeCorrectly(t *testing.T) {

View File

@@ -9,7 +9,6 @@ import (
libp2pcore "github.com/libp2p/go-libp2p/core"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/pkg/errors"
ssz "github.com/prysmaticlabs/fastssz"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
@@ -20,7 +19,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
"github.com/prysmaticlabs/prysm/v5/time/slots"
"github.com/sirupsen/logrus"
)
var (
@@ -67,7 +65,7 @@ func (s *Service) rpcHandlerByTopicFromFork(forkIndex int) (map[string]rpcHandle
// Deneb: https://github.com/ethereum/consensus-specs/blob/dev/specs/deneb/p2p-interface.md#messages
// Electra: https://github.com/ethereum/consensus-specs/blob/dev/specs/electra/p2p-interface.md#messages
case version.Deneb, version.Electra:
case version.Deneb:
return map[string]rpcHandler{
p2p.RPCStatusTopicV1: s.statusRPCHandler,
p2p.RPCGoodByeTopicV1: s.goodbyeRPCHandler,
@@ -75,10 +73,14 @@ func (s *Service) rpcHandlerByTopicFromFork(forkIndex int) (map[string]rpcHandle
p2p.RPCBlocksByRootTopicV2: s.beaconBlocksRootRPCHandler,
p2p.RPCPingTopicV1: s.pingHandler,
p2p.RPCMetaDataTopicV2: s.metaDataHandler,
p2p.RPCBlobSidecarsByRootTopicV1: s.blobSidecarByRootRPCHandler, // Added in Deneb
p2p.RPCBlobSidecarsByRangeTopicV1: s.blobSidecarsByRangeRPCHandler, // Added in Deneb
p2p.RPCBlobSidecarsByRootTopicV1: s.blobSidecarByRootRPCHandler,
p2p.RPCBlobSidecarsByRangeTopicV1: s.blobSidecarsByRangeRPCHandler,
}, nil
case version.Electra:
return map[string]rpcHandler{
p2p.RPCBlobSidecarsByRootTopicV2: s.blobSidecarByRootRPCHandler,
p2p.RPCBlobSidecarsByRangeTopicV2: s.blobSidecarsByRangeRPCHandler,
}, nil
default:
return nil, errors.Errorf("RPC handler not found for fork index %d", forkIndex)
}
@@ -254,9 +256,9 @@ func (s *Service) registerRPC(baseTopic string, handle rpcHandler) {
return
}
if err := s.cfg.p2p.Encoding().DecodeWithMaxLength(stream, msg); err != nil {
logStreamErrors(err, topic)
tracing.AnnotateError(span, err)
score := s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(remotePeer)
logStreamErrors(err, topic, remotePeer, score)
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer())
return
}
if err := handle(ctx, msg, stream); err != nil {
@@ -274,9 +276,9 @@ func (s *Service) registerRPC(baseTopic string, handle rpcHandler) {
return
}
if err := s.cfg.p2p.Encoding().DecodeWithMaxLength(stream, msg); err != nil {
score := s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(remotePeer)
logStreamErrors(err, topic, remotePeer, score)
logStreamErrors(err, topic)
tracing.AnnotateError(span, err)
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer())
return
}
if err := handle(ctx, nTyp.Elem().Interface(), stream); err != nil {
@@ -290,20 +292,13 @@ func (s *Service) registerRPC(baseTopic string, handle rpcHandler) {
})
}
func logStreamErrors(err error, topic string, remotePeer peer.ID, badResponsesScore int) {
log := log.WithFields(logrus.Fields{
"topic": topic,
"peer": remotePeer.String(),
"newBadResponsesScore": badResponsesScore,
})
func logStreamErrors(err error, topic string) {
if isUnwantedError(err) {
log.WithError(err).Debug("Unwanted error")
return
}
if strings.Contains(topic, p2p.RPCGoodByeTopicV1) {
log.WithError(err).Debug("Could not decode goodbye stream message")
log.WithError(err).WithField("topic", topic).Trace("Could not decode goodbye stream message")
return
}
log.WithError(err).Debug("Could not decode stream message")
log.WithError(err).WithField("topic", topic).Debug("Could not decode stream message")
}

View File

@@ -43,13 +43,13 @@ func (s *Service) beaconBlocksByRangeRPCHandler(ctx context.Context, msg interfa
rp, err := validateRangeRequest(m, s.cfg.clock.CurrentSlot())
if err != nil {
s.writeErrorResponseToStream(responseCodeInvalidRequest, err.Error(), stream)
score := s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(remotePeer)
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(remotePeer)
tracing.AnnotateError(span, err)
return errors.Wrapf(err, "new bad responses score: %d", score)
return err
}
available := s.validateRangeAvailability(rp)
if !available {
log.Debug("Error in validating range availability")
log.Debug("error in validating range availability")
s.writeErrorResponseToStream(responseCodeResourceUnavailable, p2ptypes.ErrResourceUnavailable.Error(), stream)
tracing.AnnotateError(span, err)
return nil

View File

@@ -94,9 +94,9 @@ func (s *Service) beaconBlocksRootRPCHandler(ctx context.Context, msg interface{
currentEpoch := slots.ToEpoch(s.cfg.clock.CurrentSlot())
if uint64(len(blockRoots)) > params.MaxRequestBlock(currentEpoch) {
score := s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer())
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer())
s.writeErrorResponseToStream(responseCodeInvalidRequest, "requested more than the max block limit", stream)
return errors.Errorf("requested more than the max block limit - new bad responses score: %d", score)
return errors.New("requested more than the max block limit")
}
s.rateLimiter.add(stream, int64(len(blockRoots)))

View File

@@ -81,9 +81,9 @@ func (s *Service) blobSidecarsByRangeRPCHandler(ctx context.Context, msg interfa
rp, err := validateBlobsByRange(r, s.cfg.chain.CurrentSlot())
if err != nil {
s.writeErrorResponseToStream(responseCodeInvalidRequest, err.Error(), stream)
score := s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer())
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer())
tracing.AnnotateError(span, err)
return errors.Wrapf(err, "new bad responses score: %d", score)
return err
}
// Ticker to stagger out large requests.
@@ -100,6 +100,9 @@ func (s *Service) blobSidecarsByRangeRPCHandler(ctx context.Context, msg interfa
var batch blockBatch
wQuota := params.BeaconConfig().MaxRequestBlobSidecars
if slots.ToEpoch(s.cfg.chain.CurrentSlot()) >= params.BeaconConfig().ElectraForkEpoch {
wQuota = params.BeaconConfig().MaxRequestBlobSidecarsElectra
}
for batch, ok = batcher.next(ctx, stream); ok; batch, ok = batcher.next(ctx, stream) {
batchStart := time.Now()
wQuota, err = s.streamBlobBatch(ctx, batch, wQuota, stream)

View File

@@ -13,9 +13,11 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/types"
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
"github.com/prysmaticlabs/prysm/v5/time/slots"
"github.com/sirupsen/logrus"
)
@@ -34,10 +36,11 @@ func (s *Service) blobSidecarByRootRPCHandler(ctx context.Context, msg interface
}
blobIdents := *ref
if err := validateBlobByRootRequest(blobIdents); err != nil {
score := s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer())
cs := s.cfg.clock.CurrentSlot()
if err := validateBlobByRootRequest(blobIdents, cs); err != nil {
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer())
s.writeErrorResponseToStream(responseCodeInvalidRequest, err.Error(), stream)
return errors.Wrapf(err, "new bad responses score: %d", score)
return err
}
// Sort the identifiers so that requests for the same blob root will be adjacent, minimizing db lookups.
sort.Sort(blobIdents)
@@ -49,7 +52,6 @@ func (s *Service) blobSidecarByRootRPCHandler(ctx context.Context, msg interface
}
// Compute the oldest slot we'll allow a peer to request, based on the current slot.
cs := s.cfg.clock.CurrentSlot()
minReqSlot, err := BlobRPCMinValidSlot(cs)
if err != nil {
return errors.Wrapf(err, "unexpected error computing min valid blob request slot, current_slot=%d", cs)
@@ -104,9 +106,15 @@ func (s *Service) blobSidecarByRootRPCHandler(ctx context.Context, msg interface
return nil
}
func validateBlobByRootRequest(blobIdents types.BlobSidecarsByRootReq) error {
if uint64(len(blobIdents)) > params.BeaconConfig().MaxRequestBlobSidecars {
return types.ErrMaxBlobReqExceeded
func validateBlobByRootRequest(blobIdents types.BlobSidecarsByRootReq, slot primitives.Slot) error {
if slots.ToEpoch(slot) >= params.BeaconConfig().ElectraForkEpoch {
if uint64(len(blobIdents)) > params.BeaconConfig().MaxRequestBlobSidecarsElectra {
return types.ErrMaxBlobReqExceeded
}
} else {
if uint64(len(blobIdents)) > params.BeaconConfig().MaxRequestBlobSidecars {
return types.ErrMaxBlobReqExceeded
}
}
return nil
}

View File

@@ -139,13 +139,13 @@ func (s *Service) sendMetaDataRequest(ctx context.Context, peerID peer.ID) (meta
// Read the METADATA response from the peer.
code, errMsg, err := ReadStatusCode(stream, s.cfg.p2p.Encoding())
if err != nil {
score := s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(peerID)
return nil, errors.Wrapf(err, "read status code for metadata request, new bad responses score: %d", score)
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(peerID)
return nil, errors.Wrap(err, "read status code")
}
if code != 0 {
score := s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(peerID)
return nil, errors.Errorf("%s, new bad responses score: %d", errMsg, score)
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(peerID)
return nil, errors.New(errMsg)
}
// Get the genesis validators root.
@@ -179,8 +179,8 @@ func (s *Service) sendMetaDataRequest(ctx context.Context, peerID peer.ID) (meta
// Decode the metadata from the peer.
if err := s.cfg.p2p.Encoding().DecodeWithMaxLength(stream, msg); err != nil {
score := s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer())
return nil, errors.Wrapf(err, "decode metadata, new bad responses score: %d", score)
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer())
return nil, err
}
return msg, nil

View File

@@ -43,8 +43,7 @@ func (s *Service) pingHandler(_ context.Context, msg interface{}, stream libp2pc
if err != nil {
// Descore peer for giving us a bad sequence number.
if errors.Is(err, p2ptypes.ErrInvalidSequenceNum) {
score := s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(peerID)
err = errors.Wrapf(err, "new bad responses score: %d", score)
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(peerID)
s.writeErrorResponseToStream(responseCodeInvalidRequest, p2ptypes.ErrInvalidSequenceNum.Error(), stream)
}
@@ -142,8 +141,8 @@ func (s *Service) sendPingRequest(ctx context.Context, peerID peer.ID) error {
// If the peer responded with an error, increment the bad responses scorer.
if code != 0 {
score := s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(peerID)
return errors.Errorf("code: %d, new bad responses score: %d - %s", code, score, errMsg)
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(peerID)
return errors.Errorf("code: %d - %s", code, errMsg)
}
// Decode the sequence number from the peer.
@@ -157,8 +156,7 @@ func (s *Service) sendPingRequest(ctx context.Context, peerID peer.ID) error {
if err != nil {
// Descore peer for giving us a bad sequence number.
if errors.Is(err, p2ptypes.ErrInvalidSequenceNum) {
score := s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(peerID)
err = errors.Wrapf(err, "new bad responses score: %d", score)
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(peerID)
}
return errors.Wrap(err, "validate sequence number")

View File

@@ -171,6 +171,9 @@ func SendBlobsByRangeRequest(ctx context.Context, tor blockchain.TemporalOracle,
maxBlobsPerBlock := uint64(params.BeaconConfig().MaxBlobsPerBlock(req.StartSlot))
max := params.BeaconConfig().MaxRequestBlobSidecars
if slots.ToEpoch(req.StartSlot) >= params.BeaconConfig().ElectraForkEpoch {
max = params.BeaconConfig().MaxRequestBlobSidecarsElectra
}
if max > req.Count*maxBlobsPerBlock {
max = req.Count * maxBlobsPerBlock
}
@@ -201,6 +204,9 @@ func SendBlobSidecarByRoot(
defer closeStream(stream, log)
max := params.BeaconConfig().MaxRequestBlobSidecars
if slots.ToEpoch(slot) >= params.BeaconConfig().ElectraForkEpoch {
max = params.BeaconConfig().MaxRequestBlobSidecarsElectra
}
maxBlobCount := params.BeaconConfig().MaxBlobsPerBlock(slot)
if max > uint64(len(*req)*maxBlobCount) {
max = uint64(len(*req) * maxBlobCount)

View File

@@ -62,12 +62,8 @@ func (s *Service) maintainPeerStatuses() {
}
if prysmTime.Now().After(lastUpdated.Add(interval)) {
if err := s.reValidatePeer(s.ctx, id); err != nil {
score := s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(id)
log.
WithFields(logrus.Fields{
"peer": id,
"newBadResponsesScore": score,
}).WithError(err).Debug("Could not revalidate peer")
log.WithField("peer", id).WithError(err).Debug("Could not revalidate peer")
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(id)
}
}
}(pid)
@@ -165,18 +161,18 @@ func (s *Service) sendRPCStatusRequest(ctx context.Context, id peer.ID) error {
code, errMsg, err := ReadStatusCode(stream, s.cfg.p2p.Encoding())
if err != nil {
score := s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer())
return errors.Wrapf(err, "read status code for status request, new bad responses score: %d", score)
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer())
return err
}
if code != 0 {
score := s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(id)
return errors.Errorf(errMsg+" new bad responses score: %d", score)
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(id)
return errors.New(errMsg)
}
msg := &pb.Status{}
if err := s.cfg.p2p.Encoding().DecodeWithMaxLength(stream, msg); err != nil {
score := s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer())
return errors.Wrapf(err, "decode with max length, new bad responses score: %d", score)
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer())
return err
}
// If validation fails, validation error is logged, and peer status scorer will mark peer as bad.
@@ -191,7 +187,7 @@ func (s *Service) sendRPCStatusRequest(ctx context.Context, id peer.ID) error {
func (s *Service) reValidatePeer(ctx context.Context, id peer.ID) error {
s.cfg.p2p.Peers().Scorers().PeerStatusScorer().SetHeadSlot(s.cfg.chain.HeadSlot())
if err := s.sendRPCStatusRequest(ctx, id); err != nil {
return errors.Wrap(err, "revalidate peer")
return err
}
// Do not return an error for ping requests.
if err := s.sendPingRequest(ctx, id); err != nil && !isUnwantedError(err) {
@@ -241,11 +237,7 @@ func (s *Service) statusRPCHandler(ctx context.Context, msg interface{}, stream
return nil
default:
respCode = responseCodeInvalidRequest
score := s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(remotePeer)
log.WithError(err).WithFields(logrus.Fields{
"peer": remotePeer,
"newBadResponsesscore": score,
}).Debug("Could not validate status message")
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(remotePeer)
}
originalErr := err

View File

@@ -21,6 +21,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/async/abool"
"github.com/prysmaticlabs/prysm/v5/async/event"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
blockfeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/block"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/operation"
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
@@ -84,6 +85,7 @@ type config struct {
attestationNotifier operation.Notifier
p2p p2p.P2P
beaconDB db.NoHeadAccessDatabase
attestationCache *cache.AttestationCache
attPool attestations.Pool
exitPool voluntaryexits.PoolManager
slashingPool slashings.PoolManager

View File

@@ -156,6 +156,19 @@ func (s *Service) registerSubscribers(epoch primitives.Epoch, digest [4]byte) {
func(currentSlot primitives.Slot) []uint64 { return []uint64{} },
)
}
if params.BeaconConfig().ElectraForkEpoch <= epoch {
s.subscribeWithParameters(
p2p.BlobSubnetTopicFormat,
s.validateBlob,
s.blobSubscriber,
digest,
func(primitives.Slot) []uint64 {
return sliceFromCount(params.BeaconConfig().BlobsidecarSubnetCountElectra)
},
func(currentSlot primitives.Slot) []uint64 { return []uint64{} },
)
}
}
// subscribe to a given topic with a given validator and subscription handler.

View File

@@ -5,7 +5,7 @@ import (
"errors"
"fmt"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/config/features"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"google.golang.org/protobuf/proto"
)
@@ -24,10 +24,13 @@ func (s *Service) beaconAggregateProofSubscriber(_ context.Context, msg proto.Me
return errors.New("nil aggregate")
}
// An unaggregated attestation can make it here. Its valid, the aggregator it just itself, although it means poor performance for the subnet.
if !helpers.IsAggregated(aggregate) {
return s.cfg.attPool.SaveUnaggregatedAttestation(aggregate)
if features.Get().EnableExperimentalAttestationPool {
return s.cfg.attestationCache.Add(aggregate)
} else {
// An unaggregated attestation can make it here. Its valid, the aggregator it just itself, although it means poor performance for the subnet.
if !aggregate.IsAggregated() {
return s.cfg.attPool.SaveUnaggregatedAttestation(aggregate)
}
return s.cfg.attPool.SaveAggregatedAttestation(aggregate)
}
return s.cfg.attPool.SaveAggregatedAttestation(aggregate)
}

View File

@@ -6,6 +6,7 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v5/config/features"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/container/slice"
@@ -25,21 +26,21 @@ func (s *Service) committeeIndexBeaconAttestationSubscriber(_ context.Context, m
if data == nil {
return errors.New("nil attestation")
}
committeeIndex, err := a.GetCommitteeIndex()
if err != nil {
return errors.Wrap(err, "committeeIndexBeaconAttestationSubscriber failed to get committee index")
}
committeeIndex := a.GetCommitteeIndex()
s.setSeenCommitteeIndicesSlot(data.Slot, committeeIndex, a.GetAggregationBits())
exists, err := s.cfg.attPool.HasAggregatedAttestation(a)
if err != nil {
return errors.Wrap(err, "could not determine if attestation pool has this attestation")
if features.Get().EnableExperimentalAttestationPool {
return s.cfg.attestationCache.Add(a)
} else {
exists, err := s.cfg.attPool.HasAggregatedAttestation(a)
if err != nil {
return errors.Wrap(err, "could not determine if attestation pool has this attestation")
}
if exists {
return nil
}
return s.cfg.attPool.SaveUnaggregatedAttestation(a)
}
if exists {
return nil
}
return s.cfg.attPool.SaveUnaggregatedAttestation(a)
}
func (*Service) persistentSubnetIndices() []uint64 {

Some files were not shown because too many files have changed in this diff Show More