Compare commits

..

13 Commits

Author SHA1 Message Date
nisdas
1f1c4e2b08 Merge branch 'develop' of https://github.com/prysmaticlabs/geth-sharding into useNewCacheKey 2024-12-17 18:13:26 +08:00
Nishant Das
6387040613 Merge branch 'develop' into useNewCacheKey 2024-12-10 14:41:38 +08:00
Nishant Das
1c7077e254 Merge branch 'develop' into useNewCacheKey 2024-11-29 11:58:31 +08:00
Nishant Das
9ab7e7d37c Update CHANGELOG.md 2024-11-29 11:57:31 +08:00
Nishant Das
bf80a5430f Merge branch 'develop' into useNewCacheKey 2024-11-27 13:06:03 +08:00
Nishant Das
9aef2c4ee2 Merge branch 'develop' into useNewCacheKey 2024-11-25 17:25:43 +08:00
nisdas
d094118082 Add benchmarks 2024-11-08 17:02:04 +08:00
nisdas
c69599f343 Addressing Review Comments 2024-11-08 16:41:28 +08:00
nisdas
1e0e5e110a Add Changelog 2024-11-08 16:38:26 +08:00
nisdas
8254cb30b4 Merge branch 'develop' of https://github.com/prysmaticlabs/geth-sharding into useNewCacheKey 2024-11-08 16:37:35 +08:00
nisdas
74961eb51f Gosec 2024-10-30 16:50:28 +08:00
nisdas
514f96ec0e Changelog 2024-10-30 13:58:27 +08:00
nisdas
cabe7d34b6 Use New Cache Key 2024-10-30 13:46:38 +08:00
190 changed files with 2411 additions and 4741 deletions

View File

@@ -12,23 +12,11 @@ The format is based on Keep a Changelog, and this project adheres to Semantic Ve
- Added an error field to log `Finished building block`.
- Implemented a new `EmptyExecutionPayloadHeader` function.
- `Finished building block`: Display error only if not nil.
- Added support to update target and max blob count to different values per hard fork config.
- Log before blob filesystem cache warm-up.
- New design for the attestation pool. [PR](https://github.com/prysmaticlabs/prysm/pull/14324)
- Add field param placeholder for Electra blob target and max to pass spec tests.
- Add EIP-7691: Blob throughput increase.
### Changed
- Process light client finality updates only for new finalized epochs instead of doing it for every block.
- Refactor subnets subscriptions.
- Refactor RPC handlers subscriptions.
- Go deps upgrade, from `ioutil` to `io`
- Move successfully registered validator(s) on builder log to debug.
- Update some test files to use `crypto/rand` instead of `math/rand`
- Enforce Compound prefix (0x02) for target when processing pending consolidation request.
- Limit consolidating by validator's effective balance.
- Use 16-bit random value for proposer and sync committee selection filter.
- Add more efficient method of computing the cache key for unaggregated attestations.
### Deprecated
@@ -88,8 +76,6 @@ Notable features:
- Added a Prometheus error counter metric for SSE requests.
- Save light client updates and bootstraps in DB.
- Added more comprehensive tests for `BlockToLightClientHeader`. [PR](https://github.com/prysmaticlabs/prysm/pull/14699)
- Added light client feature flag check to RPC handlers. [PR](https://github.com/prysmaticlabs/prysm/pull/14736)
- Light client: Add better error handling. [PR](https://github.com/prysmaticlabs/prysm/pull/14749)
### Changed

View File

@@ -129,7 +129,7 @@ If your change is user facing, you must include a CHANGELOG.md entry. See the [M
**17. Create a pull request.**
Navigate your browser to https://github.com/prysmaticlabs/prysm and click on the new pull request button. In the “base” box on the left, leave the default selection “base develop”, the branch that you want your changes to be applied to. In the “compare” box on the right, select feature-in-progress-branch, the branch containing the changes you want to apply. You will then be asked to answer a few questions about your pull request. After you complete the questionnaire, the pull request will appear in the list of pull requests at https://github.com/prysmaticlabs/prysm/pulls. Ensure that you have added an entry to CHANGELOG.md if your PR is a user-facing change. See the [Maintaining CHANGELOG.md](#maintaining-changelogmd) section for more information.
Navigate your browser to https://github.com/prysmaticlabs/prysm and click on the new pull request button. In the “base” box on the left, leave the default selection “base master”, the branch that you want your changes to be applied to. In the “compare” box on the right, select feature-in-progress-branch, the branch containing the changes you want to apply. You will then be asked to answer a few questions about your pull request. After you complete the questionnaire, the pull request will appear in the list of pull requests at https://github.com/prysmaticlabs/prysm/pulls. Ensure that you have added an entry to CHANGELOG.md if your PR is a user-facing change. See the [Maintaining CHANGELOG.md](#maintaining-changelogmd) section for more information.
**18. Respond to comments by Core Contributors.**

View File

@@ -227,7 +227,7 @@ filegroup(
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
)
consensus_spec_version = "v1.5.0-alpha.10"
consensus_spec_version = "v1.5.0-alpha.9"
bls_test_version = "v0.1.1"
@@ -243,7 +243,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
integrity = "sha256-NtWIhbO/mVMb1edq5jqABL0o8R1tNFiuG8PCMAsUHcs=",
integrity = "sha256-gHbvlnErUeJGWzW8/8JiVlk28JwmXSMhOzkynEIz+8g=",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
)
@@ -259,7 +259,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
integrity = "sha256-DFlFlnzls1bBrDm+/xD8NK2ivvkhxR+rSNVLLqScVKc=",
integrity = "sha256-hQkQdpm5ng4miGYa5WsOKWa0q8WtZu99Oqbv9QtBeJM=",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
)
@@ -275,7 +275,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
integrity = "sha256-G9ENPF8udZL/BqRHbi60GhFPnZDPZAH6UjcjRiOlvbk=",
integrity = "sha256-33sBsmApnJpcyYfR3olKaPB+WC1q00ZKNzHa2TczIxk=",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
)
@@ -290,7 +290,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
integrity = "sha256-ClOLKkmAcEi8/uKi6LDeqthask5+E3sgxVoA0bqmQ0c=",
integrity = "sha256-GQulBKLc2khpql2K/MxV+NG/d2kAhLXl+gLnKIg7rt4=",
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
)

View File

@@ -15,7 +15,6 @@ go_library(
"//api/client:go_default_library",
"//api/server/structs:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",

View File

@@ -282,7 +282,7 @@ func (c *Client) RegisterValidator(ctx context.Context, svr []*ethpb.SignedValid
if err != nil {
return err
}
log.WithField("registrationCount", len(svr)).Debug("Successfully registered validator(s) on builder")
log.WithField("num_registrations", len(svr)).Info("successfully registered validator(s) on builder")
return nil
}

View File

@@ -9,7 +9,6 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/pkg/errors"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
consensusblocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
types "github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
@@ -1014,7 +1013,7 @@ func (bb *BuilderBidDeneb) ToProto() (*eth.BuilderBidDeneb, error) {
if err != nil {
return nil, err
}
if len(bb.BlobKzgCommitments) > params.BeaconConfig().DeprecatedMaxBlobsPerBlock {
if len(bb.BlobKzgCommitments) > fieldparams.MaxBlobsPerBlock {
return nil, fmt.Errorf("too many blob commitments: %d", len(bb.BlobKzgCommitments))
}
kzgCommitments := make([][]byte, len(bb.BlobKzgCommitments))

View File

@@ -432,32 +432,6 @@ func (a *AttestationElectra) ToConsensus() (*eth.AttestationElectra, error) {
}, nil
}
func (a *SingleAttestation) ToConsensus() (*eth.SingleAttestation, error) {
ci, err := strconv.ParseUint(a.CommitteeIndex, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "CommitteeIndex")
}
ai, err := strconv.ParseUint(a.AttesterIndex, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "AttesterIndex")
}
data, err := a.Data.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, "Data")
}
sig, err := bytesutil.DecodeHexWithLength(a.Signature, fieldparams.BLSSignatureLength)
if err != nil {
return nil, server.NewDecodeError(err, "Signature")
}
return &eth.SingleAttestation{
CommitteeId: primitives.CommitteeIndex(ci),
AttesterIndex: primitives.ValidatorIndex(ai),
Data: data,
Signature: sig,
}, nil
}
func AttElectraFromConsensus(a *eth.AttestationElectra) *AttestationElectra {
return &AttestationElectra{
AggregationBits: hexutil.Encode(a.AggregationBits),

View File

@@ -36,13 +36,6 @@ type AttestationElectra struct {
CommitteeBits string `json:"committee_bits"`
}
type SingleAttestation struct {
CommitteeIndex string `json:"committee_index"`
AttesterIndex string `json:"attester_index"`
Data *AttestationData `json:"data"`
Signature string `json:"signature"`
}
type AttestationData struct {
Slot string `json:"slot"`
CommitteeIndex string `json:"index"`

View File

@@ -140,7 +140,6 @@ go_test(
"//beacon-chain/core/blocks:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/light-client:go_default_library",
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/core/transition:go_default_library",
"//beacon-chain/das:go_default_library",

View File

@@ -11,7 +11,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/config/features"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
@@ -405,19 +404,13 @@ func (s *Service) saveOrphanedOperations(ctx context.Context, orphanedRoot [32]b
if a.GetData().Slot+params.BeaconConfig().SlotsPerEpoch < s.CurrentSlot() {
continue
}
if features.Get().EnableExperimentalAttestationPool {
if err = s.cfg.AttestationCache.Add(a); err != nil {
if helpers.IsAggregated(a) {
if err := s.cfg.AttPool.SaveAggregatedAttestation(a); err != nil {
return err
}
} else {
if a.IsAggregated() {
if err = s.cfg.AttPool.SaveAggregatedAttestation(a); err != nil {
return err
}
} else {
if err = s.cfg.AttPool.SaveUnaggregatedAttestation(a); err != nil {
return err
}
if err := s.cfg.AttPool.SaveUnaggregatedAttestation(a); err != nil {
return err
}
}
saveOrphanedAttCount.Inc()

View File

@@ -85,14 +85,6 @@ func WithTrackedValidatorsCache(c *cache.TrackedValidatorsCache) Option {
}
}
// WithAttestationCache for attestation lifecycle after chain inclusion.
func WithAttestationCache(c *cache.AttestationCache) Option {
return func(s *Service) error {
s.cfg.AttestationCache = c
return nil
}
}
// WithAttestationPool for attestation lifecycle after chain inclusion.
func WithAttestationPool(p attestations.Pool) Option {
return func(s *Service) error {

View File

@@ -15,6 +15,7 @@ import (
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/config/features"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
consensusblocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
@@ -378,11 +379,7 @@ func (s *Service) handleBlockAttestations(ctx context.Context, blk interfaces.Re
r := bytesutil.ToBytes32(a.GetData().BeaconBlockRoot)
if s.cfg.ForkChoiceStore.HasNode(r) {
s.cfg.ForkChoiceStore.ProcessAttestation(ctx, indices, r, a.GetData().Target.Epoch)
} else if features.Get().EnableExperimentalAttestationPool {
if err = s.cfg.AttestationCache.Add(a); err != nil {
return err
}
} else if err = s.cfg.AttPool.SaveBlockAttestation(a); err != nil {
} else if err := s.cfg.AttPool.SaveBlockAttestation(a); err != nil {
return err
}
}
@@ -422,11 +419,7 @@ func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b interface
func (s *Service) pruneAttsFromPool(headBlock interfaces.ReadOnlySignedBeaconBlock) error {
atts := headBlock.Block().Body().Attestations()
for _, att := range atts {
if features.Get().EnableExperimentalAttestationPool {
if err := s.cfg.AttestationCache.DeleteCovered(att); err != nil {
return errors.Wrap(err, "could not delete attestation")
}
} else if att.IsAggregated() {
if helpers.IsAggregated(att) {
if err := s.cfg.AttPool.DeleteAggregatedAttestation(att); err != nil {
return err
}
@@ -503,15 +496,14 @@ func (s *Service) runLateBlockTasks() {
// It returns a map where each key represents a missing BlobSidecar index.
// An empty map means we have all indices; a non-empty map can be used to compare incoming
// BlobSidecars against the set of known missing sidecars.
func missingIndices(bs *filesystem.BlobStorage, root [32]byte, expected [][]byte, slot primitives.Slot) (map[uint64]struct{}, error) {
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
func missingIndices(bs *filesystem.BlobStorage, root [32]byte, expected [][]byte) (map[uint64]struct{}, error) {
if len(expected) == 0 {
return nil, nil
}
if len(expected) > maxBlobsPerBlock {
if len(expected) > fieldparams.MaxBlobsPerBlock {
return nil, errMaxBlobsExceeded
}
indices, err := bs.Indices(root, slot)
indices, err := bs.Indices(root)
if err != nil {
return nil, err
}
@@ -560,7 +552,7 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
return nil
}
// get a map of BlobSidecar indices that are not currently available.
missing, err := missingIndices(s.blobStorage, root, kzgCommitments, block.Slot())
missing, err := missingIndices(s.blobStorage, root, kzgCommitments)
if err != nil {
return err
}
@@ -571,7 +563,7 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
// The gossip handler for blobs writes the index of each verified blob referencing the given
// root to the channel returned by blobNotifiers.forRoot.
nc := s.blobNotifiers.forRoot(root, block.Slot())
nc := s.blobNotifiers.forRoot(root)
// Log for DA checks that cross over into the next slot; helpful for debugging.
nextSlot := slots.BeginsAt(signed.Block().Slot()+1, s.genesisTime)

View File

@@ -4,7 +4,6 @@ import (
"bytes"
"context"
"fmt"
"strings"
"time"
lightclient "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/light-client"
@@ -130,7 +129,7 @@ func (s *Service) saveLightClientUpdate(cfg *postBlockProcessConfig) {
attestedRoot := cfg.roblock.Block().ParentRoot()
attestedBlock, err := s.getBlock(cfg.ctx, attestedRoot)
if err != nil {
log.WithError(err).Errorf("Saving light client update failed: Could not get attested block for root %#x", attestedRoot)
log.WithError(err).Error("Saving light client update failed: Could not get attested block")
return
}
if attestedBlock == nil || attestedBlock.IsNil() {
@@ -139,7 +138,7 @@ func (s *Service) saveLightClientUpdate(cfg *postBlockProcessConfig) {
}
attestedState, err := s.cfg.StateGen.StateByRoot(cfg.ctx, attestedRoot)
if err != nil {
log.WithError(err).Errorf("Saving light client update failed: Could not get attested state for root %#x", attestedRoot)
log.WithError(err).Error("Saving light client update failed: Could not get attested state")
return
}
if attestedState == nil || attestedState.IsNil() {
@@ -150,11 +149,7 @@ func (s *Service) saveLightClientUpdate(cfg *postBlockProcessConfig) {
finalizedRoot := attestedState.FinalizedCheckpoint().Root
finalizedBlock, err := s.getBlock(cfg.ctx, [32]byte(finalizedRoot))
if err != nil {
if errors.Is(err, errBlockNotFoundInCacheOrDB) {
log.Debugf("Skipping saving light client update: Finalized block is nil for root %#x", finalizedRoot)
} else {
log.WithError(err).Errorf("Saving light client update failed: Could not get finalized block for root %#x", finalizedRoot)
}
log.WithError(err).Error("Saving light client update failed: Could not get finalized block")
return
}
@@ -229,30 +224,28 @@ func (s *Service) processLightClientFinalityUpdate(
attestedRoot := signed.Block().ParentRoot()
attestedBlock, err := s.cfg.BeaconDB.Block(ctx, attestedRoot)
if err != nil {
return errors.Wrapf(err, "could not get attested block for root %#x", attestedRoot)
return errors.Wrap(err, "could not get attested block")
}
attestedState, err := s.cfg.StateGen.StateByRoot(ctx, attestedRoot)
if err != nil {
return errors.Wrapf(err, "could not get attested state for root %#x", attestedRoot)
return errors.Wrap(err, "could not get attested state")
}
finalizedCheckpoint := attestedState.FinalizedCheckpoint()
var finalizedBlock interfaces.ReadOnlySignedBeaconBlock
finalizedCheckPoint := attestedState.FinalizedCheckpoint()
if finalizedCheckPoint != nil {
finalizedRoot := bytesutil.ToBytes32(finalizedCheckPoint.Root)
finalizedBlock, err = s.cfg.BeaconDB.Block(ctx, finalizedRoot)
if err != nil {
finalizedBlock = nil
}
}
// Check if the finalized checkpoint has changed
if finalizedCheckpoint == nil || bytes.Equal(finalizedCheckpoint.GetRoot(), postState.FinalizedCheckpoint().Root) {
if finalizedCheckPoint == nil || bytes.Equal(finalizedCheckPoint.GetRoot(), postState.FinalizedCheckpoint().Root) {
return nil
}
finalizedRoot := bytesutil.ToBytes32(finalizedCheckpoint.Root)
finalizedBlock, err := s.cfg.BeaconDB.Block(ctx, finalizedRoot)
if err != nil {
if errors.Is(err, errBlockNotFoundInCacheOrDB) {
log.Debugf("Skipping processing light client finality update: Finalized block is nil for root %#x", finalizedRoot)
return nil
}
return errors.Wrapf(err, "could not get finalized block for root %#x", finalizedRoot)
}
update, err := lightclient.NewLightClientFinalityUpdateFromBeaconState(
ctx,
postState.Slot(),
@@ -279,11 +272,11 @@ func (s *Service) processLightClientOptimisticUpdate(ctx context.Context, signed
attestedRoot := signed.Block().ParentRoot()
attestedBlock, err := s.cfg.BeaconDB.Block(ctx, attestedRoot)
if err != nil {
return errors.Wrapf(err, "could not get attested block for root %#x", attestedRoot)
return errors.Wrap(err, "could not get attested block")
}
attestedState, err := s.cfg.StateGen.StateByRoot(ctx, attestedRoot)
if err != nil {
return errors.Wrapf(err, "could not get attested state for root %#x", attestedRoot)
return errors.Wrap(err, "could not get attested state")
}
update, err := lightclient.NewLightClientOptimisticUpdateFromBeaconState(
@@ -296,10 +289,6 @@ func (s *Service) processLightClientOptimisticUpdate(ctx context.Context, signed
)
if err != nil {
if strings.Contains(err.Error(), lightclient.ErrNotEnoughSyncCommitteeBits) {
log.WithError(err).Debug("Skipping processing light client optimistic update")
return nil
}
return errors.Wrap(err, "could not create light client optimistic update")
}

View File

@@ -14,7 +14,6 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
lightClient "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/light-client"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/das"
@@ -2206,23 +2205,23 @@ func TestMissingIndices(t *testing.T) {
},
{
name: "expected exceeds max",
expected: fakeCommitments(params.BeaconConfig().MaxBlobsPerBlock(0) + 1),
expected: fakeCommitments(fieldparams.MaxBlobsPerBlock + 1),
err: errMaxBlobsExceeded,
},
{
name: "first missing",
expected: fakeCommitments(params.BeaconConfig().MaxBlobsPerBlock(0)),
expected: fakeCommitments(fieldparams.MaxBlobsPerBlock),
present: []uint64{1, 2, 3, 4, 5},
result: fakeResult([]uint64{0}),
},
{
name: "all missing",
expected: fakeCommitments(params.BeaconConfig().MaxBlobsPerBlock(0)),
expected: fakeCommitments(fieldparams.MaxBlobsPerBlock),
result: fakeResult([]uint64{0, 1, 2, 3, 4, 5}),
},
{
name: "none missing",
expected: fakeCommitments(params.BeaconConfig().MaxBlobsPerBlock(0)),
expected: fakeCommitments(fieldparams.MaxBlobsPerBlock),
present: []uint64{0, 1, 2, 3, 4, 5},
result: fakeResult([]uint64{}),
},
@@ -2256,7 +2255,7 @@ func TestMissingIndices(t *testing.T) {
bm, bs := filesystem.NewEphemeralBlobStorageWithMocker(t)
t.Run(c.name, func(t *testing.T) {
require.NoError(t, bm.CreateFakeIndices(c.root, c.present...))
missing, err := missingIndices(bs, c.root, c.expected, 0)
missing, err := missingIndices(bs, c.root, c.expected)
if c.err != nil {
require.ErrorIs(t, err, c.err)
return
@@ -2506,500 +2505,173 @@ func fakeResult(missing []uint64) map[uint64]struct{} {
}
func TestSaveLightClientUpdate(t *testing.T) {
featCfg := &features.Flags{}
featCfg.EnableLightClient = true
reset := features.InitWithReset(featCfg)
s, tr := minimalTestService(t)
ctx := tr.ctx
t.Run("Altair", func(t *testing.T) {
t.Run("No old update", func(t *testing.T) {
l := util.NewTestLightClient(t).SetupTestAltair()
featCfg := &features.Flags{}
featCfg.EnableLightClient = true
reset := features.InitWithReset(featCfg)
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().AltairForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
l := util.NewTestLightClient(t).SetupTestAltair()
err := s.cfg.BeaconDB.SaveBlock(ctx, l.AttestedBlock)
require.NoError(t, err)
attestedBlockRoot, err := l.AttestedBlock.Block().HashTreeRoot()
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveState(ctx, l.AttestedState, attestedBlockRoot)
require.NoError(t, err)
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().AltairForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
currentBlockRoot, err := l.Block.Block().HashTreeRoot()
require.NoError(t, err)
roblock, err := consensusblocks.NewROBlockWithRoot(l.Block, currentBlockRoot)
require.NoError(t, err)
err := s.cfg.BeaconDB.SaveBlock(ctx, l.AttestedBlock)
require.NoError(t, err)
attestedBlockRoot, err := l.AttestedBlock.Block().HashTreeRoot()
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveState(ctx, l.AttestedState, attestedBlockRoot)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveBlock(ctx, roblock)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveState(ctx, l.State, currentBlockRoot)
require.NoError(t, err)
currentBlockRoot, err := l.Block.Block().HashTreeRoot()
require.NoError(t, err)
roblock, err := consensusblocks.NewROBlockWithRoot(l.Block, currentBlockRoot)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveBlock(ctx, l.FinalizedBlock)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveBlock(ctx, roblock)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveState(ctx, l.State, currentBlockRoot)
require.NoError(t, err)
cfg := &postBlockProcessConfig{
ctx: ctx,
roblock: roblock,
postState: l.State,
isValidPayload: true,
}
err = s.cfg.BeaconDB.SaveBlock(ctx, l.FinalizedBlock)
require.NoError(t, err)
s.saveLightClientUpdate(cfg)
cfg := &postBlockProcessConfig{
ctx: ctx,
roblock: roblock,
postState: l.State,
isValidPayload: true,
}
// Check that the light client update is saved
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
s.saveLightClientUpdate(cfg)
u, err := s.cfg.BeaconDB.LightClientUpdate(ctx, period)
require.NoError(t, err)
require.NotNil(t, u)
attestedStateRoot, err := l.AttestedState.HashTreeRoot(ctx)
require.NoError(t, err)
require.Equal(t, attestedStateRoot, [32]byte(u.AttestedHeader().Beacon().StateRoot))
require.Equal(t, u.Version(), version.Altair)
})
// Check that the light client update is saved
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
t.Run("New update is better", func(t *testing.T) {
l := util.NewTestLightClient(t).SetupTestAltair()
u, err := s.cfg.BeaconDB.LightClientUpdate(ctx, period)
require.NoError(t, err)
require.NotNil(t, u)
attestedStateRoot, err := l.AttestedState.HashTreeRoot(ctx)
require.NoError(t, err)
require.Equal(t, attestedStateRoot, [32]byte(u.AttestedHeader().Beacon().StateRoot))
require.Equal(t, u.Version(), version.Altair)
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().AltairForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
err := s.cfg.BeaconDB.SaveBlock(ctx, l.AttestedBlock)
require.NoError(t, err)
attestedBlockRoot, err := l.AttestedBlock.Block().HashTreeRoot()
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveState(ctx, l.AttestedState, attestedBlockRoot)
require.NoError(t, err)
currentBlockRoot, err := l.Block.Block().HashTreeRoot()
require.NoError(t, err)
roblock, err := consensusblocks.NewROBlockWithRoot(l.Block, currentBlockRoot)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveBlock(ctx, roblock)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveState(ctx, l.State, currentBlockRoot)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveBlock(ctx, l.FinalizedBlock)
require.NoError(t, err)
cfg := &postBlockProcessConfig{
ctx: ctx,
roblock: roblock,
postState: l.State,
isValidPayload: true,
}
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
// create and save old update
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(s.CurrentSlot(), l.AttestedState)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveLightClientUpdate(ctx, period, oldUpdate)
require.NoError(t, err)
s.saveLightClientUpdate(cfg)
u, err := s.cfg.BeaconDB.LightClientUpdate(ctx, period)
require.NoError(t, err)
require.NotNil(t, u)
attestedStateRoot, err := l.AttestedState.HashTreeRoot(ctx)
require.NoError(t, err)
require.Equal(t, attestedStateRoot, [32]byte(u.AttestedHeader().Beacon().StateRoot))
require.Equal(t, u.Version(), version.Altair)
})
t.Run("Old update is better", func(t *testing.T) {
l := util.NewTestLightClient(t).SetupTestAltair()
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().AltairForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
err := s.cfg.BeaconDB.SaveBlock(ctx, l.AttestedBlock)
require.NoError(t, err)
attestedBlockRoot, err := l.AttestedBlock.Block().HashTreeRoot()
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveState(ctx, l.AttestedState, attestedBlockRoot)
require.NoError(t, err)
currentBlockRoot, err := l.Block.Block().HashTreeRoot()
require.NoError(t, err)
roblock, err := consensusblocks.NewROBlockWithRoot(l.Block, currentBlockRoot)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveBlock(ctx, roblock)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveState(ctx, l.State, currentBlockRoot)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveBlock(ctx, l.FinalizedBlock)
require.NoError(t, err)
cfg := &postBlockProcessConfig{
ctx: ctx,
roblock: roblock,
postState: l.State,
isValidPayload: true,
}
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
// create and save old update
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(s.CurrentSlot(), l.AttestedState)
require.NoError(t, err)
scb := make([]byte, 64)
for i := 0; i < 5; i++ {
scb[i] = 0x01
}
oldUpdate.SetSyncAggregate(&ethpb.SyncAggregate{
SyncCommitteeBits: scb,
SyncCommitteeSignature: make([]byte, 96),
})
err = s.cfg.BeaconDB.SaveLightClientUpdate(ctx, period, oldUpdate)
require.NoError(t, err)
s.saveLightClientUpdate(cfg)
u, err := s.cfg.BeaconDB.LightClientUpdate(ctx, period)
require.NoError(t, err)
require.NotNil(t, u)
require.DeepEqual(t, oldUpdate, u)
require.Equal(t, u.Version(), version.Altair)
})
reset()
})
t.Run("Capella", func(t *testing.T) {
t.Run("No old update", func(t *testing.T) {
l := util.NewTestLightClient(t).SetupTestCapella(false)
featCfg := &features.Flags{}
featCfg.EnableLightClient = true
reset := features.InitWithReset(featCfg)
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().CapellaForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
l := util.NewTestLightClient(t).SetupTestCapella(false)
err := s.cfg.BeaconDB.SaveBlock(ctx, l.AttestedBlock)
require.NoError(t, err)
attestedBlockRoot, err := l.AttestedBlock.Block().HashTreeRoot()
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveState(ctx, l.AttestedState, attestedBlockRoot)
require.NoError(t, err)
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().CapellaForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
currentBlockRoot, err := l.Block.Block().HashTreeRoot()
require.NoError(t, err)
roblock, err := consensusblocks.NewROBlockWithRoot(l.Block, currentBlockRoot)
require.NoError(t, err)
err := s.cfg.BeaconDB.SaveBlock(ctx, l.AttestedBlock)
require.NoError(t, err)
attestedBlockRoot, err := l.AttestedBlock.Block().HashTreeRoot()
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveState(ctx, l.AttestedState, attestedBlockRoot)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveBlock(ctx, roblock)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveState(ctx, l.State, currentBlockRoot)
require.NoError(t, err)
currentBlockRoot, err := l.Block.Block().HashTreeRoot()
require.NoError(t, err)
roblock, err := consensusblocks.NewROBlockWithRoot(l.Block, currentBlockRoot)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveBlock(ctx, l.FinalizedBlock)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveBlock(ctx, roblock)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveState(ctx, l.State, currentBlockRoot)
require.NoError(t, err)
cfg := &postBlockProcessConfig{
ctx: ctx,
roblock: roblock,
postState: l.State,
isValidPayload: true,
}
err = s.cfg.BeaconDB.SaveBlock(ctx, l.FinalizedBlock)
require.NoError(t, err)
s.saveLightClientUpdate(cfg)
cfg := &postBlockProcessConfig{
ctx: ctx,
roblock: roblock,
postState: l.State,
isValidPayload: true,
}
// Check that the light client update is saved
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
u, err := s.cfg.BeaconDB.LightClientUpdate(ctx, period)
require.NoError(t, err)
require.NotNil(t, u)
attestedStateRoot, err := l.AttestedState.HashTreeRoot(ctx)
require.NoError(t, err)
require.Equal(t, attestedStateRoot, [32]byte(u.AttestedHeader().Beacon().StateRoot))
require.Equal(t, u.Version(), version.Capella)
})
s.saveLightClientUpdate(cfg)
t.Run("New update is better", func(t *testing.T) {
l := util.NewTestLightClient(t).SetupTestCapella(false)
// Check that the light client update is saved
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
u, err := s.cfg.BeaconDB.LightClientUpdate(ctx, period)
require.NoError(t, err)
require.NotNil(t, u)
attestedStateRoot, err := l.AttestedState.HashTreeRoot(ctx)
require.NoError(t, err)
require.Equal(t, attestedStateRoot, [32]byte(u.AttestedHeader().Beacon().StateRoot))
require.Equal(t, u.Version(), version.Capella)
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().CapellaForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
err := s.cfg.BeaconDB.SaveBlock(ctx, l.AttestedBlock)
require.NoError(t, err)
attestedBlockRoot, err := l.AttestedBlock.Block().HashTreeRoot()
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveState(ctx, l.AttestedState, attestedBlockRoot)
require.NoError(t, err)
currentBlockRoot, err := l.Block.Block().HashTreeRoot()
require.NoError(t, err)
roblock, err := consensusblocks.NewROBlockWithRoot(l.Block, currentBlockRoot)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveBlock(ctx, roblock)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveState(ctx, l.State, currentBlockRoot)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveBlock(ctx, l.FinalizedBlock)
require.NoError(t, err)
cfg := &postBlockProcessConfig{
ctx: ctx,
roblock: roblock,
postState: l.State,
isValidPayload: true,
}
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
// create and save old update
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(s.CurrentSlot(), l.AttestedState)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveLightClientUpdate(ctx, period, oldUpdate)
require.NoError(t, err)
s.saveLightClientUpdate(cfg)
u, err := s.cfg.BeaconDB.LightClientUpdate(ctx, period)
require.NoError(t, err)
require.NotNil(t, u)
attestedStateRoot, err := l.AttestedState.HashTreeRoot(ctx)
require.NoError(t, err)
require.Equal(t, attestedStateRoot, [32]byte(u.AttestedHeader().Beacon().StateRoot))
require.Equal(t, u.Version(), version.Capella)
})
t.Run("Old update is better", func(t *testing.T) {
l := util.NewTestLightClient(t).SetupTestCapella(false)
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().CapellaForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
err := s.cfg.BeaconDB.SaveBlock(ctx, l.AttestedBlock)
require.NoError(t, err)
attestedBlockRoot, err := l.AttestedBlock.Block().HashTreeRoot()
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveState(ctx, l.AttestedState, attestedBlockRoot)
require.NoError(t, err)
currentBlockRoot, err := l.Block.Block().HashTreeRoot()
require.NoError(t, err)
roblock, err := consensusblocks.NewROBlockWithRoot(l.Block, currentBlockRoot)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveBlock(ctx, roblock)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveState(ctx, l.State, currentBlockRoot)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveBlock(ctx, l.FinalizedBlock)
require.NoError(t, err)
cfg := &postBlockProcessConfig{
ctx: ctx,
roblock: roblock,
postState: l.State,
isValidPayload: true,
}
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
// create and save old update
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(s.CurrentSlot(), l.AttestedState)
require.NoError(t, err)
scb := make([]byte, 64)
for i := 0; i < 5; i++ {
scb[i] = 0x01
}
oldUpdate.SetSyncAggregate(&ethpb.SyncAggregate{
SyncCommitteeBits: scb,
SyncCommitteeSignature: make([]byte, 96),
})
err = s.cfg.BeaconDB.SaveLightClientUpdate(ctx, period, oldUpdate)
require.NoError(t, err)
s.saveLightClientUpdate(cfg)
u, err := s.cfg.BeaconDB.LightClientUpdate(ctx, period)
require.NoError(t, err)
require.NotNil(t, u)
require.DeepEqual(t, oldUpdate, u)
require.Equal(t, u.Version(), version.Capella)
})
reset()
})
t.Run("Deneb", func(t *testing.T) {
t.Run("No old update", func(t *testing.T) {
l := util.NewTestLightClient(t).SetupTestDeneb(false)
featCfg := &features.Flags{}
featCfg.EnableLightClient = true
reset := features.InitWithReset(featCfg)
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().DenebForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
l := util.NewTestLightClient(t).SetupTestDeneb(false)
err := s.cfg.BeaconDB.SaveBlock(ctx, l.AttestedBlock)
require.NoError(t, err)
attestedBlockRoot, err := l.AttestedBlock.Block().HashTreeRoot()
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveState(ctx, l.AttestedState, attestedBlockRoot)
require.NoError(t, err)
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().DenebForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
currentBlockRoot, err := l.Block.Block().HashTreeRoot()
require.NoError(t, err)
roblock, err := consensusblocks.NewROBlockWithRoot(l.Block, currentBlockRoot)
require.NoError(t, err)
err := s.cfg.BeaconDB.SaveBlock(ctx, l.AttestedBlock)
require.NoError(t, err)
attestedBlockRoot, err := l.AttestedBlock.Block().HashTreeRoot()
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveState(ctx, l.AttestedState, attestedBlockRoot)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveBlock(ctx, roblock)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveState(ctx, l.State, currentBlockRoot)
require.NoError(t, err)
currentBlockRoot, err := l.Block.Block().HashTreeRoot()
require.NoError(t, err)
roblock, err := consensusblocks.NewROBlockWithRoot(l.Block, currentBlockRoot)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveBlock(ctx, l.FinalizedBlock)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveBlock(ctx, roblock)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveState(ctx, l.State, currentBlockRoot)
require.NoError(t, err)
cfg := &postBlockProcessConfig{
ctx: ctx,
roblock: roblock,
postState: l.State,
isValidPayload: true,
}
err = s.cfg.BeaconDB.SaveBlock(ctx, l.FinalizedBlock)
require.NoError(t, err)
s.saveLightClientUpdate(cfg)
cfg := &postBlockProcessConfig{
ctx: ctx,
roblock: roblock,
postState: l.State,
isValidPayload: true,
}
// Check that the light client update is saved
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
u, err := s.cfg.BeaconDB.LightClientUpdate(ctx, period)
require.NoError(t, err)
require.NotNil(t, u)
attestedStateRoot, err := l.AttestedState.HashTreeRoot(ctx)
require.NoError(t, err)
require.Equal(t, attestedStateRoot, [32]byte(u.AttestedHeader().Beacon().StateRoot))
require.Equal(t, u.Version(), version.Deneb)
})
s.saveLightClientUpdate(cfg)
t.Run("New update is better", func(t *testing.T) {
l := util.NewTestLightClient(t).SetupTestDeneb(false)
// Check that the light client update is saved
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
u, err := s.cfg.BeaconDB.LightClientUpdate(ctx, period)
require.NoError(t, err)
require.NotNil(t, u)
attestedStateRoot, err := l.AttestedState.HashTreeRoot(ctx)
require.NoError(t, err)
require.Equal(t, attestedStateRoot, [32]byte(u.AttestedHeader().Beacon().StateRoot))
require.Equal(t, u.Version(), version.Deneb)
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().DenebForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
err := s.cfg.BeaconDB.SaveBlock(ctx, l.AttestedBlock)
require.NoError(t, err)
attestedBlockRoot, err := l.AttestedBlock.Block().HashTreeRoot()
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveState(ctx, l.AttestedState, attestedBlockRoot)
require.NoError(t, err)
currentBlockRoot, err := l.Block.Block().HashTreeRoot()
require.NoError(t, err)
roblock, err := consensusblocks.NewROBlockWithRoot(l.Block, currentBlockRoot)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveBlock(ctx, roblock)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveState(ctx, l.State, currentBlockRoot)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveBlock(ctx, l.FinalizedBlock)
require.NoError(t, err)
cfg := &postBlockProcessConfig{
ctx: ctx,
roblock: roblock,
postState: l.State,
isValidPayload: true,
}
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
// create and save old update
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(s.CurrentSlot(), l.AttestedState)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveLightClientUpdate(ctx, period, oldUpdate)
require.NoError(t, err)
s.saveLightClientUpdate(cfg)
u, err := s.cfg.BeaconDB.LightClientUpdate(ctx, period)
require.NoError(t, err)
require.NotNil(t, u)
attestedStateRoot, err := l.AttestedState.HashTreeRoot(ctx)
require.NoError(t, err)
require.Equal(t, attestedStateRoot, [32]byte(u.AttestedHeader().Beacon().StateRoot))
require.Equal(t, u.Version(), version.Deneb)
})
t.Run("Old update is better", func(t *testing.T) {
l := util.NewTestLightClient(t).SetupTestDeneb(false)
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().DenebForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
err := s.cfg.BeaconDB.SaveBlock(ctx, l.AttestedBlock)
require.NoError(t, err)
attestedBlockRoot, err := l.AttestedBlock.Block().HashTreeRoot()
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveState(ctx, l.AttestedState, attestedBlockRoot)
require.NoError(t, err)
currentBlockRoot, err := l.Block.Block().HashTreeRoot()
require.NoError(t, err)
roblock, err := consensusblocks.NewROBlockWithRoot(l.Block, currentBlockRoot)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveBlock(ctx, roblock)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveState(ctx, l.State, currentBlockRoot)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveBlock(ctx, l.FinalizedBlock)
require.NoError(t, err)
cfg := &postBlockProcessConfig{
ctx: ctx,
roblock: roblock,
postState: l.State,
isValidPayload: true,
}
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
// create and save old update
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(s.CurrentSlot(), l.AttestedState)
require.NoError(t, err)
scb := make([]byte, 64)
for i := 0; i < 5; i++ {
scb[i] = 0x01
}
oldUpdate.SetSyncAggregate(&ethpb.SyncAggregate{
SyncCommitteeBits: scb,
SyncCommitteeSignature: make([]byte, 96),
})
err = s.cfg.BeaconDB.SaveLightClientUpdate(ctx, period, oldUpdate)
require.NoError(t, err)
s.saveLightClientUpdate(cfg)
u, err := s.cfg.BeaconDB.LightClientUpdate(ctx, period)
require.NoError(t, err)
require.NotNil(t, u)
require.DeepEqual(t, oldUpdate, u)
require.Equal(t, u.Version(), version.Deneb)
})
reset()
})
reset()
}
func TestSaveLightClientBootstrap(t *testing.T) {
featCfg := &features.Flags{}
featCfg.EnableLightClient = true
reset := features.InitWithReset(featCfg)
s, tr := minimalTestService(t)
ctx := tr.ctx
t.Run("Altair", func(t *testing.T) {
featCfg := &features.Flags{}
featCfg.EnableLightClient = true
reset := features.InitWithReset(featCfg)
l := util.NewTestLightClient(t).SetupTestAltair()
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().AltairForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
@@ -3032,9 +2704,15 @@ func TestSaveLightClientBootstrap(t *testing.T) {
require.NoError(t, err)
require.Equal(t, stateRoot, [32]byte(b.Header().Beacon().StateRoot))
require.Equal(t, b.Version(), version.Altair)
reset()
})
t.Run("Capella", func(t *testing.T) {
featCfg := &features.Flags{}
featCfg.EnableLightClient = true
reset := features.InitWithReset(featCfg)
l := util.NewTestLightClient(t).SetupTestCapella(false)
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().CapellaForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
@@ -3067,9 +2745,15 @@ func TestSaveLightClientBootstrap(t *testing.T) {
require.NoError(t, err)
require.Equal(t, stateRoot, [32]byte(b.Header().Beacon().StateRoot))
require.Equal(t, b.Version(), version.Capella)
reset()
})
t.Run("Deneb", func(t *testing.T) {
featCfg := &features.Flags{}
featCfg.EnableLightClient = true
reset := features.InitWithReset(featCfg)
l := util.NewTestLightClient(t).SetupTestDeneb(false)
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().DenebForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
@@ -3102,7 +2786,7 @@ func TestSaveLightClientBootstrap(t *testing.T) {
require.NoError(t, err)
require.Equal(t, stateRoot, [32]byte(b.Header().Beacon().StateRoot))
require.Equal(t, b.Version(), version.Deneb)
})
reset()
reset()
})
}

View File

@@ -9,7 +9,6 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/config/features"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
@@ -167,13 +166,7 @@ func (s *Service) UpdateHead(ctx context.Context, proposingSlot primitives.Slot)
// This processes fork choice attestations from the pool to account for validator votes and fork choice.
func (s *Service) processAttestations(ctx context.Context, disparity time.Duration) {
var atts []ethpb.Att
if features.Get().EnableExperimentalAttestationPool {
atts = s.cfg.AttestationCache.ForkchoiceAttestations()
} else {
atts = s.cfg.AttPool.ForkchoiceAttestations()
}
atts := s.cfg.AttPool.ForkchoiceAttestations()
for _, a := range atts {
// Based on the spec, don't process the attestation until the subsequent slot.
// This delays consideration in the fork choice until their slot is in the past.
@@ -189,11 +182,7 @@ func (s *Service) processAttestations(ctx context.Context, disparity time.Durati
continue
}
if features.Get().EnableExperimentalAttestationPool {
if err := s.cfg.AttestationCache.DeleteForkchoiceAttestation(a); err != nil {
log.WithError(err).Error("Could not delete fork choice attestation in pool")
}
} else if err := s.cfg.AttPool.DeleteForkchoiceAttestation(a); err != nil {
if err := s.cfg.AttPool.DeleteForkchoiceAttestation(a); err != nil {
log.WithError(err).Error("Could not delete fork choice attestation in pool")
}

View File

@@ -4,13 +4,12 @@ import (
"context"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
)
// SendNewBlobEvent sends a message to the BlobNotifier channel that the blob
// for the block root `root` is ready in the database
func (s *Service) sendNewBlobEvent(root [32]byte, index uint64, slot primitives.Slot) {
s.blobNotifiers.notifyIndex(root, index, slot)
func (s *Service) sendNewBlobEvent(root [32]byte, index uint64) {
s.blobNotifiers.notifyIndex(root, index)
}
// ReceiveBlob saves the blob to database and sends the new event
@@ -19,6 +18,6 @@ func (s *Service) ReceiveBlob(ctx context.Context, b blocks.VerifiedROBlob) erro
return err
}
s.sendNewBlobEvent(b.BlockRoot(), b.Index, b.Slot())
s.sendNewBlobEvent(b.BlockRoot(), b.Index)
return nil
}

View File

@@ -33,10 +33,10 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/v5/config/features"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
@@ -75,7 +75,6 @@ type config struct {
DepositCache cache.DepositCache
PayloadIDCache *cache.PayloadIDCache
TrackedValidatorsCache *cache.TrackedValidatorsCache
AttestationCache *cache.AttestationCache
AttPool attestations.Pool
ExitPool voluntaryexits.PoolManager
SlashingPool slashings.PoolManager
@@ -105,22 +104,18 @@ var ErrMissingClockSetter = errors.New("blockchain Service initialized without a
type blobNotifierMap struct {
sync.RWMutex
notifiers map[[32]byte]chan uint64
seenIndex map[[32]byte][]bool
seenIndex map[[32]byte][fieldparams.MaxBlobsPerBlock]bool
}
// notifyIndex notifies a blob by its index for a given root.
// It uses internal maps to keep track of seen indices and notifier channels.
func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64, slot primitives.Slot) {
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
if idx >= uint64(maxBlobsPerBlock) {
func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64) {
if idx >= fieldparams.MaxBlobsPerBlock {
return
}
bn.Lock()
seen := bn.seenIndex[root]
if seen == nil {
seen = make([]bool, maxBlobsPerBlock)
}
if seen[idx] {
bn.Unlock()
return
@@ -131,7 +126,7 @@ func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64, slot primitive
// Retrieve or create the notifier channel for the given root.
c, ok := bn.notifiers[root]
if !ok {
c = make(chan uint64, maxBlobsPerBlock)
c = make(chan uint64, fieldparams.MaxBlobsPerBlock)
bn.notifiers[root] = c
}
@@ -140,13 +135,12 @@ func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64, slot primitive
c <- idx
}
func (bn *blobNotifierMap) forRoot(root [32]byte, slot primitives.Slot) chan uint64 {
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
func (bn *blobNotifierMap) forRoot(root [32]byte) chan uint64 {
bn.Lock()
defer bn.Unlock()
c, ok := bn.notifiers[root]
if !ok {
c = make(chan uint64, maxBlobsPerBlock)
c = make(chan uint64, fieldparams.MaxBlobsPerBlock)
bn.notifiers[root] = c
}
return c
@@ -172,7 +166,7 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
ctx, cancel := context.WithCancel(ctx)
bn := &blobNotifierMap{
notifiers: make(map[[32]byte]chan uint64),
seenIndex: make(map[[32]byte][]bool),
seenIndex: make(map[[32]byte][fieldparams.MaxBlobsPerBlock]bool),
}
srv := &Service{
ctx: ctx,

View File

@@ -587,7 +587,7 @@ func (s *MockClockSetter) SetClock(g *startup.Clock) error {
func TestNotifyIndex(t *testing.T) {
// Initialize a blobNotifierMap
bn := &blobNotifierMap{
seenIndex: make(map[[32]byte][]bool),
seenIndex: make(map[[32]byte][fieldparams.MaxBlobsPerBlock]bool),
notifiers: make(map[[32]byte]chan uint64),
}
@@ -596,7 +596,7 @@ func TestNotifyIndex(t *testing.T) {
copy(root[:], "exampleRoot")
// Test notifying a new index
bn.notifyIndex(root, 1, 1)
bn.notifyIndex(root, 1)
if !bn.seenIndex[root][1] {
t.Errorf("Index was not marked as seen")
}
@@ -607,13 +607,13 @@ func TestNotifyIndex(t *testing.T) {
}
// Test notifying an already seen index
bn.notifyIndex(root, 1, 1)
bn.notifyIndex(root, 1)
if len(bn.notifiers[root]) > 1 {
t.Errorf("Notifier channel should not receive multiple messages for the same index")
}
// Test notifying a new index again
bn.notifyIndex(root, 2, 1)
bn.notifyIndex(root, 2)
if !bn.seenIndex[root][2] {
t.Errorf("Index was not marked as seen")
}

View File

@@ -5,7 +5,6 @@ go_library(
srcs = [
"active_balance.go",
"active_balance_disabled.go", # keep
"attestation.go",
"attestation_data.go",
"balance_cache_key.go",
"checkpoint_state.go",
@@ -37,21 +36,18 @@ go_library(
],
deps = [
"//beacon-chain/forkchoice/types:go_default_library",
"//beacon-chain/operations/attestations/attmap:go_default_library",
"//beacon-chain/state:go_default_library",
"//cache/lru:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//container/slice:go_default_library",
"//crypto/bls:go_default_library",
"//crypto/hash:go_default_library",
"//crypto/rand:go_default_library",
"//encoding/bytesutil:go_default_library",
"//math:go_default_library",
"//monitoring/tracing/trace:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/attestation:go_default_library",
"//runtime/version:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_hashicorp_golang_lru//:go_default_library",
@@ -70,7 +66,6 @@ go_test(
srcs = [
"active_balance_test.go",
"attestation_data_test.go",
"attestation_test.go",
"cache_test.go",
"checkpoint_state_test.go",
"committee_fuzz_test.go",
@@ -93,17 +88,14 @@ go_test(
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//crypto/bls/blst:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/attestation:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_google_gofuzz//:go_default_library",
"@com_github_hashicorp_golang_lru//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_stretchr_testify//require:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
],

View File

@@ -1,275 +0,0 @@
package cache
import (
"sync"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/attestations/attmap"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
log "github.com/sirupsen/logrus"
)
type attGroup struct {
slot primitives.Slot
atts []ethpb.Att
}
// AttestationCache holds a map of attGroup items that group together all attestations for a single slot.
// When we add an attestation to the cache by calling Add, we either create a new group with this attestation
// (if this is the first attestation for some slot) or two things can happen:
//
// - If the attestation is unaggregated, we add its attestation bit to attestation bits of the first
// attestation in the group.
// - If the attestation is aggregated, we append it to the group. There should be no redundancy
// in the list because we ignore redundant aggregates in gossip.
//
// The first bullet point above means that we keep one aggregate attestation to which we keep appending bits
// as new single-bit attestations arrive. This means that at any point during seconds 0-4 of a slot
// we will have only one attestation for this slot in the cache.
//
// NOTE: This design in principle can result in worse aggregates since we lose the ability to aggregate some
// single bit attestations in case of overlaps with incoming aggregates.
//
// The cache also keeps forkchoice attestations in a separate struct. These attestations are used for
// forkchoice-related operations.
type AttestationCache struct {
atts map[attestation.Id]*attGroup
sync.RWMutex
forkchoiceAtts *attmap.Attestations
}
// NewAttestationCache creates a new cache instance.
func NewAttestationCache() *AttestationCache {
return &AttestationCache{
atts: make(map[attestation.Id]*attGroup),
forkchoiceAtts: attmap.New(),
}
}
// Add does one of two things:
//
// - For unaggregated attestations, it adds the attestation bit to attestation bits of the running aggregate,
// which is the first aggregate for the slot.
// - For aggregated attestations, it appends the attestation to the existng list of attestations for the slot.
func (c *AttestationCache) Add(att ethpb.Att) error {
if att.IsNil() {
log.Debug("Attempted to add a nil attestation to the attestation cache")
return nil
}
if len(att.GetAggregationBits().BitIndices()) == 0 {
log.Debug("Attempted to add an attestation with 0 bits set to the attestation cache")
return nil
}
c.Lock()
defer c.Unlock()
id, err := attestation.NewId(att, attestation.Data)
if err != nil {
return errors.Wrapf(err, "could not create attestation ID")
}
group := c.atts[id]
if group == nil {
group = &attGroup{
slot: att.GetData().Slot,
atts: []ethpb.Att{att},
}
c.atts[id] = group
return nil
}
if att.IsAggregated() {
group.atts = append(group.atts, att.Clone())
return nil
}
// This should never happen because we return early for a new group.
if len(group.atts) == 0 {
log.Error("Attestation group contains no attestations, skipping insertion")
return nil
}
a := group.atts[0]
// Indexing is safe because we have guarded against 0 bits set.
bit := att.GetAggregationBits().BitIndices()[0]
if a.GetAggregationBits().BitAt(uint64(bit)) {
return nil
}
sig, err := aggregateSig(a, att)
if err != nil {
return errors.Wrapf(err, "could not aggregate signatures")
}
a.GetAggregationBits().SetBitAt(uint64(bit), true)
a.SetSignature(sig)
return nil
}
// GetAll returns all attestations in the cache, excluding forkchoice attestations.
func (c *AttestationCache) GetAll() []ethpb.Att {
c.RLock()
defer c.RUnlock()
var result []ethpb.Att
for _, group := range c.atts {
result = append(result, group.atts...)
}
return result
}
// Count returns the number of all attestations in the cache, excluding forkchoice attestations.
func (c *AttestationCache) Count() int {
c.RLock()
defer c.RUnlock()
count := 0
for _, group := range c.atts {
count += len(group.atts)
}
return count
}
// DeleteCovered removes all attestations whose attestation bits are a proper subset of the passed-in attestation.
func (c *AttestationCache) DeleteCovered(att ethpb.Att) error {
if att.IsNil() {
return nil
}
c.Lock()
defer c.Unlock()
id, err := attestation.NewId(att, attestation.Data)
if err != nil {
return errors.Wrapf(err, "could not create attestation ID")
}
group := c.atts[id]
if group == nil {
return nil
}
idx := 0
for _, a := range group.atts {
if covered, err := att.GetAggregationBits().Contains(a.GetAggregationBits()); err != nil {
return err
} else if !covered {
group.atts[idx] = a
idx++
}
}
group.atts = group.atts[:idx]
if len(group.atts) == 0 {
delete(c.atts, id)
}
return nil
}
// PruneBefore removes all attestations whose slot is earlier than the passed-in slot.
func (c *AttestationCache) PruneBefore(slot primitives.Slot) uint64 {
c.Lock()
defer c.Unlock()
var pruneCount int
for id, group := range c.atts {
if group.slot < slot {
pruneCount += len(group.atts)
delete(c.atts, id)
}
}
return uint64(pruneCount)
}
// AggregateIsRedundant checks whether all attestation bits of the passed-in aggregate
// are already included by any aggregate in the cache.
func (c *AttestationCache) AggregateIsRedundant(att ethpb.Att) (bool, error) {
if att.IsNil() {
return true, nil
}
c.RLock()
defer c.RUnlock()
id, err := attestation.NewId(att, attestation.Data)
if err != nil {
return true, errors.Wrapf(err, "could not create attestation ID")
}
group := c.atts[id]
if group == nil {
return false, nil
}
for _, a := range group.atts {
if redundant, err := a.GetAggregationBits().Contains(att.GetAggregationBits()); err != nil {
return true, err
} else if redundant {
return true, nil
}
}
return false, nil
}
// SaveForkchoiceAttestations saves forkchoice attestations.
func (c *AttestationCache) SaveForkchoiceAttestations(att []ethpb.Att) error {
return c.forkchoiceAtts.SaveMany(att)
}
// ForkchoiceAttestations returns all forkchoice attestations.
func (c *AttestationCache) ForkchoiceAttestations() []ethpb.Att {
return c.forkchoiceAtts.GetAll()
}
// DeleteForkchoiceAttestation deletes a forkchoice attestation.
func (c *AttestationCache) DeleteForkchoiceAttestation(att ethpb.Att) error {
return c.forkchoiceAtts.Delete(att)
}
// GetBySlotAndCommitteeIndex returns all attestations in the cache that match the provided slot
// and committee index. Forkchoice attestations are not returned.
//
// NOTE: This function cannot be declared as a method on the AttestationCache because it is a generic function.
func GetBySlotAndCommitteeIndex[T ethpb.Att](c *AttestationCache, slot primitives.Slot, committeeIndex primitives.CommitteeIndex) []T {
c.RLock()
defer c.RUnlock()
var result []T
for _, group := range c.atts {
if len(group.atts) > 0 {
// We can safely compare the first attestation because all attestations in a group
// must have the same slot and committee index, since they are under the same key.
a, ok := group.atts[0].(T)
if ok && a.GetData().Slot == slot && a.CommitteeBitsVal().BitAt(uint64(committeeIndex)) {
for _, a := range group.atts {
a, ok := a.(T)
if ok {
result = append(result, a)
}
}
}
}
}
return result
}
func aggregateSig(agg ethpb.Att, att ethpb.Att) ([]byte, error) {
aggSig, err := bls.SignatureFromBytesNoValidation(agg.GetSignature())
if err != nil {
return nil, err
}
attSig, err := bls.SignatureFromBytesNoValidation(att.GetSignature())
if err != nil {
return nil, err
}
return bls.AggregateSignatures([]bls.Signature{aggSig, attSig}).Marshal(), nil
}

View File

@@ -15,24 +15,24 @@ type AttestationConsensusData struct {
Source forkchoicetypes.Checkpoint
}
// AttestationDataCache stores cached results of AttestationData requests.
type AttestationDataCache struct {
// AttestationCache stores cached results of AttestationData requests.
type AttestationCache struct {
a *AttestationConsensusData
sync.RWMutex
}
// NewAttestationDataCache creates a new instance of AttestationDataCache.
func NewAttestationDataCache() *AttestationDataCache {
return &AttestationDataCache{}
// NewAttestationCache creates a new instance of AttestationCache.
func NewAttestationCache() *AttestationCache {
return &AttestationCache{}
}
// Get retrieves cached attestation data, recording a cache hit or miss. This method is lock free.
func (c *AttestationDataCache) Get() *AttestationConsensusData {
func (c *AttestationCache) Get() *AttestationConsensusData {
return c.a
}
// Put adds a response to the cache. This method is lock free.
func (c *AttestationDataCache) Put(a *AttestationConsensusData) error {
func (c *AttestationCache) Put(a *AttestationConsensusData) error {
if a == nil {
return errors.New("attestation cannot be nil")
}

View File

@@ -9,7 +9,7 @@ import (
)
func TestAttestationCache_RoundTrip(t *testing.T) {
c := cache.NewAttestationDataCache()
c := cache.NewAttestationCache()
a := c.Get()
require.Nil(t, a)

View File

@@ -1,353 +0,0 @@
package cache
import (
"testing"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/crypto/bls/blst"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
"github.com/prysmaticlabs/prysm/v5/testing/assert"
"github.com/prysmaticlabs/prysm/v5/testing/require"
)
func TestAdd(t *testing.T) {
k, err := blst.RandKey()
require.NoError(t, err)
sig := k.Sign([]byte{'X'})
t.Run("new ID", func(t *testing.T) {
t.Run("first ID ever", func(t *testing.T) {
c := NewAttestationCache()
ab := bitfield.NewBitlist(8)
ab.SetBitAt(0, true)
att := &ethpb.Attestation{
Data: &ethpb.AttestationData{Slot: 123, BeaconBlockRoot: make([]byte, 32), Source: &ethpb.Checkpoint{Root: make([]byte, 32)}, Target: &ethpb.Checkpoint{Root: make([]byte, 32)}},
AggregationBits: ab,
Signature: sig.Marshal(),
}
id, err := attestation.NewId(att, attestation.Data)
require.NoError(t, err)
require.NoError(t, c.Add(att))
require.Equal(t, 1, len(c.atts))
group, ok := c.atts[id]
require.Equal(t, true, ok)
assert.Equal(t, primitives.Slot(123), group.slot)
require.Equal(t, 1, len(group.atts))
assert.DeepEqual(t, group.atts[0], att)
})
t.Run("other ID exists", func(t *testing.T) {
c := NewAttestationCache()
ab := bitfield.NewBitlist(8)
ab.SetBitAt(0, true)
existingAtt := &ethpb.Attestation{
Data: &ethpb.AttestationData{BeaconBlockRoot: make([]byte, 32), Source: &ethpb.Checkpoint{Root: make([]byte, 32)}, Target: &ethpb.Checkpoint{Root: make([]byte, 32)}},
AggregationBits: ab,
Signature: sig.Marshal(),
}
existingId, err := attestation.NewId(existingAtt, attestation.Data)
require.NoError(t, err)
c.atts[existingId] = &attGroup{slot: existingAtt.Data.Slot, atts: []ethpb.Att{existingAtt}}
att := &ethpb.Attestation{
Data: &ethpb.AttestationData{Slot: 123, BeaconBlockRoot: make([]byte, 32), Source: &ethpb.Checkpoint{Root: make([]byte, 32)}, Target: &ethpb.Checkpoint{Root: make([]byte, 32)}},
AggregationBits: ab,
Signature: sig.Marshal(),
}
id, err := attestation.NewId(att, attestation.Data)
require.NoError(t, err)
require.NoError(t, c.Add(att))
require.Equal(t, 2, len(c.atts))
group, ok := c.atts[id]
require.Equal(t, true, ok)
assert.Equal(t, primitives.Slot(123), group.slot)
require.Equal(t, 1, len(group.atts))
assert.DeepEqual(t, group.atts[0], att)
})
})
t.Run("aggregated", func(t *testing.T) {
c := NewAttestationCache()
existingAtt := &ethpb.Attestation{
Data: &ethpb.AttestationData{Slot: 123, BeaconBlockRoot: make([]byte, 32), Source: &ethpb.Checkpoint{Root: make([]byte, 32)}, Target: &ethpb.Checkpoint{Root: make([]byte, 32)}},
AggregationBits: bitfield.NewBitlist(8),
Signature: sig.Marshal(),
}
id, err := attestation.NewId(existingAtt, attestation.Data)
require.NoError(t, err)
c.atts[id] = &attGroup{slot: existingAtt.Data.Slot, atts: []ethpb.Att{existingAtt}}
att := &ethpb.Attestation{
Data: &ethpb.AttestationData{Slot: 123, BeaconBlockRoot: make([]byte, 32), Source: &ethpb.Checkpoint{Root: make([]byte, 32)}, Target: &ethpb.Checkpoint{Root: make([]byte, 32)}},
AggregationBits: bitfield.NewBitlist(8),
Signature: sig.Marshal(),
}
att.AggregationBits.SetBitAt(0, true)
att.AggregationBits.SetBitAt(1, true)
require.NoError(t, c.Add(att))
require.Equal(t, 1, len(c.atts))
group, ok := c.atts[id]
require.Equal(t, true, ok)
assert.Equal(t, primitives.Slot(123), group.slot)
require.Equal(t, 2, len(group.atts))
assert.DeepEqual(t, group.atts[0], existingAtt)
assert.DeepEqual(t, group.atts[1], att)
})
t.Run("unaggregated - existing bit", func(t *testing.T) {
c := NewAttestationCache()
existingAtt := &ethpb.Attestation{
Data: &ethpb.AttestationData{Slot: 123, BeaconBlockRoot: make([]byte, 32), Source: &ethpb.Checkpoint{Root: make([]byte, 32)}, Target: &ethpb.Checkpoint{Root: make([]byte, 32)}},
AggregationBits: bitfield.NewBitlist(8),
Signature: sig.Marshal(),
}
existingAtt.AggregationBits.SetBitAt(0, true)
id, err := attestation.NewId(existingAtt, attestation.Data)
require.NoError(t, err)
c.atts[id] = &attGroup{slot: existingAtt.Data.Slot, atts: []ethpb.Att{existingAtt}}
att := &ethpb.Attestation{
Data: &ethpb.AttestationData{Slot: 123, BeaconBlockRoot: make([]byte, 32), Source: &ethpb.Checkpoint{Root: make([]byte, 32)}, Target: &ethpb.Checkpoint{Root: make([]byte, 32)}},
AggregationBits: bitfield.NewBitlist(8),
Signature: sig.Marshal(),
}
att.AggregationBits.SetBitAt(0, true)
require.NoError(t, c.Add(att))
require.Equal(t, 1, len(c.atts))
group, ok := c.atts[id]
require.Equal(t, true, ok)
assert.Equal(t, primitives.Slot(123), group.slot)
require.Equal(t, 1, len(group.atts))
assert.DeepEqual(t, []int{0}, group.atts[0].GetAggregationBits().BitIndices())
})
t.Run("unaggregated - new bit", func(t *testing.T) {
c := NewAttestationCache()
existingAtt := &ethpb.Attestation{
Data: &ethpb.AttestationData{Slot: 123, BeaconBlockRoot: make([]byte, 32), Source: &ethpb.Checkpoint{Root: make([]byte, 32)}, Target: &ethpb.Checkpoint{Root: make([]byte, 32)}},
AggregationBits: bitfield.NewBitlist(8),
Signature: sig.Marshal(),
}
existingAtt.AggregationBits.SetBitAt(0, true)
id, err := attestation.NewId(existingAtt, attestation.Data)
require.NoError(t, err)
c.atts[id] = &attGroup{slot: existingAtt.Data.Slot, atts: []ethpb.Att{existingAtt}}
att := &ethpb.Attestation{
Data: &ethpb.AttestationData{Slot: 123, BeaconBlockRoot: make([]byte, 32), Source: &ethpb.Checkpoint{Root: make([]byte, 32)}, Target: &ethpb.Checkpoint{Root: make([]byte, 32)}},
AggregationBits: bitfield.NewBitlist(8),
Signature: sig.Marshal(),
}
att.AggregationBits.SetBitAt(1, true)
require.NoError(t, c.Add(att))
require.Equal(t, 1, len(c.atts))
group, ok := c.atts[id]
require.Equal(t, true, ok)
assert.Equal(t, primitives.Slot(123), group.slot)
require.Equal(t, 1, len(group.atts))
assert.DeepEqual(t, []int{0, 1}, group.atts[0].GetAggregationBits().BitIndices())
})
}
func TestGetAll(t *testing.T) {
c := NewAttestationCache()
c.atts[bytesutil.ToBytes32([]byte("id1"))] = &attGroup{atts: []ethpb.Att{&ethpb.Attestation{}, &ethpb.Attestation{}}}
c.atts[bytesutil.ToBytes32([]byte("id2"))] = &attGroup{atts: []ethpb.Att{&ethpb.Attestation{}}}
assert.Equal(t, 3, len(c.GetAll()))
}
func TestCount(t *testing.T) {
c := NewAttestationCache()
c.atts[bytesutil.ToBytes32([]byte("id1"))] = &attGroup{atts: []ethpb.Att{&ethpb.Attestation{}, &ethpb.Attestation{}}}
c.atts[bytesutil.ToBytes32([]byte("id2"))] = &attGroup{atts: []ethpb.Att{&ethpb.Attestation{}}}
assert.Equal(t, 3, c.Count())
}
func TestDeleteCovered(t *testing.T) {
k, err := blst.RandKey()
require.NoError(t, err)
sig := k.Sign([]byte{'X'})
att1 := &ethpb.Attestation{
Data: &ethpb.AttestationData{Slot: 123, BeaconBlockRoot: make([]byte, 32), Source: &ethpb.Checkpoint{Root: make([]byte, 32)}, Target: &ethpb.Checkpoint{Root: make([]byte, 32)}},
AggregationBits: bitfield.NewBitlist(8),
Signature: sig.Marshal(),
}
att1.AggregationBits.SetBitAt(0, true)
att2 := &ethpb.Attestation{
Data: &ethpb.AttestationData{Slot: 123, BeaconBlockRoot: make([]byte, 32), Source: &ethpb.Checkpoint{Root: make([]byte, 32)}, Target: &ethpb.Checkpoint{Root: make([]byte, 32)}},
AggregationBits: bitfield.NewBitlist(8),
Signature: sig.Marshal(),
}
att2.AggregationBits.SetBitAt(1, true)
att2.AggregationBits.SetBitAt(2, true)
att3 := &ethpb.Attestation{
Data: &ethpb.AttestationData{Slot: 123, BeaconBlockRoot: make([]byte, 32), Source: &ethpb.Checkpoint{Root: make([]byte, 32)}, Target: &ethpb.Checkpoint{Root: make([]byte, 32)}},
AggregationBits: bitfield.NewBitlist(8),
Signature: sig.Marshal(),
}
att3.AggregationBits.SetBitAt(1, true)
att3.AggregationBits.SetBitAt(3, true)
att3.AggregationBits.SetBitAt(4, true)
c := NewAttestationCache()
id, err := attestation.NewId(att1, attestation.Data)
require.NoError(t, err)
c.atts[id] = &attGroup{slot: att1.Data.Slot, atts: []ethpb.Att{att1, att2, att3}}
t.Run("no matching group", func(t *testing.T) {
att := &ethpb.Attestation{
Data: &ethpb.AttestationData{Slot: 456, BeaconBlockRoot: make([]byte, 32), Source: &ethpb.Checkpoint{Root: make([]byte, 32)}, Target: &ethpb.Checkpoint{Root: make([]byte, 32)}},
AggregationBits: bitfield.NewBitlist(8),
Signature: sig.Marshal(),
}
att.AggregationBits.SetBitAt(0, true)
att.AggregationBits.SetBitAt(1, true)
att.AggregationBits.SetBitAt(2, true)
att.AggregationBits.SetBitAt(3, true)
att.AggregationBits.SetBitAt(4, true)
require.NoError(t, c.DeleteCovered(att))
assert.Equal(t, 3, len(c.atts[id].atts))
})
t.Run("covered atts deleted", func(t *testing.T) {
att := &ethpb.Attestation{
Data: &ethpb.AttestationData{Slot: 123, BeaconBlockRoot: make([]byte, 32), Source: &ethpb.Checkpoint{Root: make([]byte, 32)}, Target: &ethpb.Checkpoint{Root: make([]byte, 32)}},
AggregationBits: bitfield.NewBitlist(8),
Signature: sig.Marshal(),
}
att.AggregationBits.SetBitAt(0, true)
att.AggregationBits.SetBitAt(1, true)
att.AggregationBits.SetBitAt(3, true)
att.AggregationBits.SetBitAt(4, true)
require.NoError(t, c.DeleteCovered(att))
atts := c.atts[id].atts
require.Equal(t, 1, len(atts))
assert.DeepEqual(t, att2, atts[0])
})
t.Run("last att in group deleted", func(t *testing.T) {
att := &ethpb.Attestation{
Data: &ethpb.AttestationData{Slot: 123, BeaconBlockRoot: make([]byte, 32), Source: &ethpb.Checkpoint{Root: make([]byte, 32)}, Target: &ethpb.Checkpoint{Root: make([]byte, 32)}},
AggregationBits: bitfield.NewBitlist(8),
Signature: sig.Marshal(),
}
att.AggregationBits.SetBitAt(0, true)
att.AggregationBits.SetBitAt(1, true)
att.AggregationBits.SetBitAt(2, true)
att.AggregationBits.SetBitAt(3, true)
att.AggregationBits.SetBitAt(4, true)
require.NoError(t, c.DeleteCovered(att))
assert.Equal(t, 0, len(c.atts))
})
}
func TestPruneBefore(t *testing.T) {
c := NewAttestationCache()
c.atts[bytesutil.ToBytes32([]byte("id1"))] = &attGroup{slot: 1, atts: []ethpb.Att{&ethpb.Attestation{}, &ethpb.Attestation{}}}
c.atts[bytesutil.ToBytes32([]byte("id2"))] = &attGroup{slot: 3, atts: []ethpb.Att{&ethpb.Attestation{}}}
c.atts[bytesutil.ToBytes32([]byte("id3"))] = &attGroup{slot: 2, atts: []ethpb.Att{&ethpb.Attestation{}}}
count := c.PruneBefore(3)
require.Equal(t, 1, len(c.atts))
_, ok := c.atts[bytesutil.ToBytes32([]byte("id2"))]
assert.Equal(t, true, ok)
assert.Equal(t, uint64(3), count)
}
func TestAggregateIsRedundant(t *testing.T) {
k, err := blst.RandKey()
require.NoError(t, err)
sig := k.Sign([]byte{'X'})
c := NewAttestationCache()
existingAtt := &ethpb.Attestation{
Data: &ethpb.AttestationData{Slot: 123, BeaconBlockRoot: make([]byte, 32), Source: &ethpb.Checkpoint{Root: make([]byte, 32)}, Target: &ethpb.Checkpoint{Root: make([]byte, 32)}},
AggregationBits: bitfield.NewBitlist(8),
Signature: sig.Marshal(),
}
existingAtt.AggregationBits.SetBitAt(0, true)
existingAtt.AggregationBits.SetBitAt(1, true)
id, err := attestation.NewId(existingAtt, attestation.Data)
require.NoError(t, err)
c.atts[id] = &attGroup{slot: existingAtt.Data.Slot, atts: []ethpb.Att{existingAtt}}
t.Run("no matching group", func(t *testing.T) {
att := &ethpb.Attestation{
Data: &ethpb.AttestationData{Slot: 456, BeaconBlockRoot: make([]byte, 32), Source: &ethpb.Checkpoint{Root: make([]byte, 32)}, Target: &ethpb.Checkpoint{Root: make([]byte, 32)}},
AggregationBits: bitfield.NewBitlist(8),
Signature: sig.Marshal(),
}
att.AggregationBits.SetBitAt(0, true)
redundant, err := c.AggregateIsRedundant(att)
require.NoError(t, err)
assert.Equal(t, false, redundant)
})
t.Run("redundant", func(t *testing.T) {
att := &ethpb.Attestation{
Data: &ethpb.AttestationData{Slot: existingAtt.Data.Slot, BeaconBlockRoot: make([]byte, 32), Source: &ethpb.Checkpoint{Root: make([]byte, 32)}, Target: &ethpb.Checkpoint{Root: make([]byte, 32)}},
AggregationBits: bitfield.NewBitlist(8),
Signature: sig.Marshal(),
}
att.AggregationBits.SetBitAt(0, true)
redundant, err := c.AggregateIsRedundant(att)
require.NoError(t, err)
assert.Equal(t, true, redundant)
})
t.Run("not redundant", func(t *testing.T) {
t.Run("strictly better", func(t *testing.T) {
att := &ethpb.Attestation{
Data: &ethpb.AttestationData{Slot: existingAtt.Data.Slot, BeaconBlockRoot: make([]byte, 32), Source: &ethpb.Checkpoint{Root: make([]byte, 32)}, Target: &ethpb.Checkpoint{Root: make([]byte, 32)}},
AggregationBits: bitfield.NewBitlist(8),
Signature: sig.Marshal(),
}
att.AggregationBits.SetBitAt(0, true)
att.AggregationBits.SetBitAt(1, true)
att.AggregationBits.SetBitAt(2, true)
redundant, err := c.AggregateIsRedundant(att)
require.NoError(t, err)
assert.Equal(t, false, redundant)
})
t.Run("overlapping and new bits", func(t *testing.T) {
att := &ethpb.Attestation{
Data: &ethpb.AttestationData{Slot: existingAtt.Data.Slot, BeaconBlockRoot: make([]byte, 32), Source: &ethpb.Checkpoint{Root: make([]byte, 32)}, Target: &ethpb.Checkpoint{Root: make([]byte, 32)}},
AggregationBits: bitfield.NewBitlist(8),
Signature: sig.Marshal(),
}
att.AggregationBits.SetBitAt(0, true)
att.AggregationBits.SetBitAt(2, true)
redundant, err := c.AggregateIsRedundant(att)
require.NoError(t, err)
assert.Equal(t, false, redundant)
})
})
}
func TestGetBySlotAndCommitteeIndex(t *testing.T) {
c := NewAttestationCache()
c.atts[bytesutil.ToBytes32([]byte("id1"))] = &attGroup{slot: 1, atts: []ethpb.Att{&ethpb.Attestation{Data: &ethpb.AttestationData{Slot: 1, CommitteeIndex: 1}}, &ethpb.Attestation{Data: &ethpb.AttestationData{Slot: 1, CommitteeIndex: 1}}}}
c.atts[bytesutil.ToBytes32([]byte("id2"))] = &attGroup{slot: 2, atts: []ethpb.Att{&ethpb.Attestation{Data: &ethpb.AttestationData{Slot: 2, CommitteeIndex: 2}}}}
c.atts[bytesutil.ToBytes32([]byte("id3"))] = &attGroup{slot: 1, atts: []ethpb.Att{&ethpb.Attestation{Data: &ethpb.AttestationData{Slot: 2, CommitteeIndex: 2}}}}
// committeeIndex has to be small enough to fit in the bitvector
atts := GetBySlotAndCommitteeIndex[*ethpb.Attestation](c, 1, 1)
require.Equal(t, 2, len(atts))
assert.Equal(t, primitives.Slot(1), atts[0].Data.Slot)
assert.Equal(t, primitives.Slot(1), atts[1].Data.Slot)
assert.Equal(t, primitives.CommitteeIndex(1), atts[0].Data.CommitteeIndex)
assert.Equal(t, primitives.CommitteeIndex(1), atts[1].Data.CommitteeIndex)
}

View File

@@ -2,7 +2,6 @@ package altair
import (
"context"
"encoding/binary"
goErrors "errors"
"fmt"
"time"
@@ -23,6 +22,8 @@ import (
"github.com/prysmaticlabs/prysm/v5/time/slots"
)
const maxRandomByte = uint64(1<<8 - 1)
var (
ErrTooLate = errors.New("sync message is too late")
)
@@ -90,22 +91,19 @@ func NextSyncCommittee(ctx context.Context, s state.BeaconState) (*ethpb.SyncCom
// """
// epoch = Epoch(get_current_epoch(state) + 1)
//
// MAX_RANDOM_VALUE = 2**16 - 1 # [Modified in Electra]
// MAX_RANDOM_BYTE = 2**8 - 1
// active_validator_indices = get_active_validator_indices(state, epoch)
// active_validator_count = uint64(len(active_validator_indices))
// seed = get_seed(state, epoch, DOMAIN_SYNC_COMMITTEE)
// i = uint64(0)
// i = 0
// sync_committee_indices: List[ValidatorIndex] = []
// while len(sync_committee_indices) < SYNC_COMMITTEE_SIZE:
// shuffled_index = compute_shuffled_index(uint64(i % active_validator_count), active_validator_count, seed)
// candidate_index = active_validator_indices[shuffled_index]
// # [Modified in Electra]
// random_bytes = hash(seed + uint_to_bytes(i // 16))
// offset = i % 16 * 2
// random_value = bytes_to_uint64(random_bytes[offset:offset + 2])
// random_byte = hash(seed + uint_to_bytes(uint64(i // 32)))[i % 32]
// effective_balance = state.validators[candidate_index].effective_balance
// # [Modified in Electra:EIP7251]
// if effective_balance * MAX_RANDOM_VALUE >= MAX_EFFECTIVE_BALANCE_ELECTRA * random_value:
// if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE_ELECTRA * random_byte:
// sync_committee_indices.append(candidate_index)
// i += 1
// return sync_committee_indices
@@ -125,11 +123,12 @@ func NextSyncCommitteeIndices(ctx context.Context, s state.BeaconState) ([]primi
cIndices := make([]primitives.ValidatorIndex, 0, syncCommitteeSize)
hashFunc := hash.CustomSHA256Hasher()
// Preallocate buffers to avoid repeated allocations
seedBuffer := make([]byte, len(seed)+8)
copy(seedBuffer, seed[:])
maxEB := cfg.MaxEffectiveBalanceElectra
if s.Version() < version.Electra {
maxEB = cfg.MaxEffectiveBalance
}
for i := primitives.ValidatorIndex(0); uint64(len(cIndices)) < syncCommitteeSize; i++ {
for i := primitives.ValidatorIndex(0); uint64(len(cIndices)) < params.BeaconConfig().SyncCommitteeSize; i++ {
if ctx.Err() != nil {
return nil, ctx.Err()
}
@@ -138,30 +137,18 @@ func NextSyncCommitteeIndices(ctx context.Context, s state.BeaconState) ([]primi
if err != nil {
return nil, err
}
b := append(seed[:], bytesutil.Bytes8(uint64(i.Div(32)))...)
randomByte := hashFunc(b)[i%32]
cIndex := indices[sIndex]
v, err := s.ValidatorAtIndexReadOnly(cIndex)
if err != nil {
return nil, err
}
effectiveBal := v.EffectiveBalance()
if s.Version() >= version.Electra {
// Use the preallocated seed buffer
binary.LittleEndian.PutUint64(seedBuffer[len(seed):], uint64(i/16))
randomByte := hashFunc(seedBuffer)
offset := (i % 16) * 2
randomValue := uint64(randomByte[offset]) | uint64(randomByte[offset+1])<<8
if effectiveBal*fieldparams.MaxRandomValueElectra >= cfg.MaxEffectiveBalanceElectra*randomValue {
cIndices = append(cIndices, cIndex)
}
} else {
// Use the preallocated seed buffer
binary.LittleEndian.PutUint64(seedBuffer[len(seed):], uint64(i/32))
randomByte := hashFunc(seedBuffer)[i%32]
if effectiveBal*fieldparams.MaxRandomByte >= cfg.MaxEffectiveBalance*uint64(randomByte) {
cIndices = append(cIndices, cIndex)
}
if effectiveBal*maxRandomByte >= maxEB*uint64(randomByte) {
cIndices = append(cIndices, cIndex)
}
}

View File

@@ -8,11 +8,10 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/config/params"
field_params "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
consensus_types "github.com/prysmaticlabs/prysm/v5/consensus-types"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
"github.com/prysmaticlabs/prysm/v5/time/slots"
@@ -211,7 +210,7 @@ func ProcessPayload(st state.BeaconState, body interfaces.ReadOnlyBeaconBlockBod
if err != nil {
return err
}
if err := verifyBlobCommitmentCount(st.Slot(), body); err != nil {
if err := verifyBlobCommitmentCount(body); err != nil {
return err
}
if err := ValidatePayloadWhenMergeCompletes(st, payload); err != nil {
@@ -226,7 +225,7 @@ func ProcessPayload(st state.BeaconState, body interfaces.ReadOnlyBeaconBlockBod
return nil
}
func verifyBlobCommitmentCount(slot primitives.Slot, body interfaces.ReadOnlyBeaconBlockBody) error {
func verifyBlobCommitmentCount(body interfaces.ReadOnlyBeaconBlockBody) error {
if body.Version() < version.Deneb {
return nil
}
@@ -234,8 +233,7 @@ func verifyBlobCommitmentCount(slot primitives.Slot, body interfaces.ReadOnlyBea
if err != nil {
return err
}
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
if len(kzgs) > maxBlobsPerBlock {
if len(kzgs) > field_params.MaxBlobsPerBlock {
return fmt.Errorf("too many kzg commitments in block: %d", len(kzgs))
}
return nil

View File

@@ -9,7 +9,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
consensusblocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
@@ -924,10 +923,10 @@ func TestVerifyBlobCommitmentCount(t *testing.T) {
b := &ethpb.BeaconBlockDeneb{Body: &ethpb.BeaconBlockBodyDeneb{}}
rb, err := consensusblocks.NewBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, blocks.VerifyBlobCommitmentCount(rb.Slot(), rb.Body()))
require.NoError(t, blocks.VerifyBlobCommitmentCount(rb.Body()))
b = &ethpb.BeaconBlockDeneb{Body: &ethpb.BeaconBlockBodyDeneb{BlobKzgCommitments: make([][]byte, params.BeaconConfig().MaxBlobsPerBlock(rb.Slot())+1)}}
b = &ethpb.BeaconBlockDeneb{Body: &ethpb.BeaconBlockBodyDeneb{BlobKzgCommitments: make([][]byte, fieldparams.MaxBlobsPerBlock+1)}}
rb, err = consensusblocks.NewBeaconBlock(b)
require.NoError(t, err)
require.ErrorContains(t, fmt.Sprintf("too many kzg commitments in block: %d", params.BeaconConfig().MaxBlobsPerBlock(rb.Slot())+1), blocks.VerifyBlobCommitmentCount(rb.Slot(), rb.Body()))
require.ErrorContains(t, fmt.Sprintf("too many kzg commitments in block: %d", fieldparams.MaxBlobsPerBlock+1), blocks.VerifyBlobCommitmentCount(rb.Body()))
}

View File

@@ -20,7 +20,7 @@ func createValidatorsWithTotalActiveBalance(totalBal primitives.Gwei) []*eth.Val
vals := make([]*eth.Validator, num)
for i := range vals {
wd := make([]byte, 32)
wd[0] = params.BeaconConfig().CompoundingWithdrawalPrefixByte
wd[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
wd[31] = byte(i)
vals[i] = &eth.Validator{

View File

@@ -37,7 +37,8 @@ import (
// break
//
// # Calculate the consolidated balance
// source_effective_balance = min(state.balances[pending_consolidation.source_index], source_validator.effective_balance)
// max_effective_balance = get_max_effective_balance(source_validator)
// source_effective_balance = min(state.balances[pending_consolidation.source_index], max_effective_balance)
//
// # Move active balance to target. Excess balance is withdrawable.
// decrease_balance(state, pending_consolidation.source_index, source_effective_balance)
@@ -77,7 +78,7 @@ func ProcessPendingConsolidations(ctx context.Context, st state.BeaconState) err
if err != nil {
return err
}
b := min(validatorBalance, sourceValidator.EffectiveBalance())
b := min(validatorBalance, helpers.ValidatorMaxEffectiveBalance(sourceValidator))
if err := helpers.DecreaseBalance(st, pc.SourceIndex, b); err != nil {
return err
@@ -140,8 +141,8 @@ func ProcessPendingConsolidations(ctx context.Context, st state.BeaconState) err
// if not (has_correct_credential and is_correct_source_address):
// return
//
// # Verify that target has compounding withdrawal credentials
// if not has_compounding_withdrawal_credential(target_validator):
// # Verify that target has execution withdrawal credentials
// if not has_execution_withdrawal_credential(target_validator):
// return
//
// # Verify the source and the target are active
@@ -174,6 +175,10 @@ func ProcessPendingConsolidations(ctx context.Context, st state.BeaconState) err
// source_index=source_index,
// target_index=target_index
// ))
//
// # Churn any target excess active balance of target and raise its max
// if has_eth1_withdrawal_credential(target_validator):
// switch_to_compounding_validator(state, target_index)
func ProcessConsolidationRequests(ctx context.Context, st state.BeaconState, reqs []*enginev1.ConsolidationRequest) error {
if len(reqs) == 0 || st == nil {
return nil
@@ -248,7 +253,7 @@ func ProcessConsolidationRequests(ctx context.Context, st state.BeaconState, req
}
// Target validator must have their withdrawal credentials set appropriately.
if !helpers.HasCompoundingWithdrawalCredential(tgtV) {
if !helpers.HasExecutionWithdrawalCredentials(tgtV) {
continue
}
@@ -293,6 +298,13 @@ func ProcessConsolidationRequests(ctx context.Context, st state.BeaconState, req
if err := st.AppendPendingConsolidation(&eth.PendingConsolidation{SourceIndex: srcIdx, TargetIndex: tgtIdx}); err != nil {
return fmt.Errorf("failed to append pending consolidation: %w", err) // This should never happen.
}
if helpers.HasETH1WithdrawalCredential(tgtV) {
if err := SwitchToCompoundingValidator(st, tgtIdx); err != nil {
log.WithError(err).Error("failed to switch to compounding validator")
continue
}
}
}
return nil

View File

@@ -46,7 +46,6 @@ func TestProcessPendingConsolidations(t *testing.T) {
Validators: []*eth.Validator{
{
WithdrawalCredentials: []byte{0x01, 0xFF},
EffectiveBalance: params.BeaconConfig().MinActivationBalance,
},
{
WithdrawalCredentials: []byte{0x01, 0xAB},
@@ -219,7 +218,7 @@ func TestProcessConsolidationRequests(t *testing.T) {
}
// Validator scenario setup. See comments in reqs section.
st.Validators[3].WithdrawalCredentials = bytesutil.Bytes32(0)
st.Validators[8].WithdrawalCredentials = bytesutil.Bytes32(1)
st.Validators[8].WithdrawalCredentials = bytesutil.Bytes32(0)
st.Validators[9].ActivationEpoch = params.BeaconConfig().FarFutureEpoch
st.Validators[12].ActivationEpoch = params.BeaconConfig().FarFutureEpoch
st.Validators[13].ExitEpoch = 10
@@ -247,7 +246,7 @@ func TestProcessConsolidationRequests(t *testing.T) {
SourcePubkey: []byte("val_5"),
TargetPubkey: []byte("val_6"),
},
// Target does not have their withdrawal credentials set appropriately. (Using eth1 address prefix)
// Target does not have their withdrawal credentials set appropriately.
{
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(7)),
SourcePubkey: []byte("val_7"),

View File

@@ -32,7 +32,7 @@ func ValidateNilAttestation(attestation ethpb.Att) error {
if attestation.GetData().Target == nil {
return errors.New("attestation's target can't be nil")
}
if !attestation.IsSingle() && attestation.GetAggregationBits() == nil {
if attestation.GetAggregationBits() == nil {
return errors.New("attestation's bitfield can't be nil")
}
return nil
@@ -67,6 +67,12 @@ func IsAggregator(committeeCount uint64, slotSig []byte) (bool, error) {
return binary.LittleEndian.Uint64(b[:8])%modulo == 0, nil
}
// IsAggregated returns true if the attestation is an aggregated attestation,
// false otherwise.
func IsAggregated(attestation ethpb.Att) bool {
return attestation.GetAggregationBits().Count() > 1
}
// ComputeSubnetForAttestation returns the subnet for which the provided attestation will be broadcasted to.
// This differs from the spec definition by instead passing in the active validators indices in the attestation's
// given epoch.

View File

@@ -308,16 +308,6 @@ func TestValidateNilAttestation(t *testing.T) {
},
errString: "",
},
{
name: "single attestation",
attestation: &ethpb.SingleAttestation{
Data: &ethpb.AttestationData{
Target: &ethpb.Checkpoint{},
Source: &ethpb.Checkpoint{},
},
},
errString: "",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {

View File

@@ -3,7 +3,6 @@ package helpers
import (
"bytes"
"context"
"encoding/binary"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
@@ -12,7 +11,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
@@ -349,33 +347,27 @@ func BeaconProposerIndexAtSlot(ctx context.Context, state state.ReadOnlyBeaconSt
// Spec pseudocode definition:
//
// def compute_proposer_index(state: BeaconState, indices: Sequence[ValidatorIndex], seed: Bytes32) -> ValidatorIndex:
// """
// Return from ``indices`` a random index sampled by effective balance.
// """
// assert len(indices) > 0
// MAX_RANDOM_VALUE = 2**16 - 1 # [Modified in Electra]
// i = uint64(0)
// total = uint64(len(indices))
// while True:
// candidate_index = indices[compute_shuffled_index(i % total, total, seed)]
// # [Modified in Electra]
// random_bytes = hash(seed + uint_to_bytes(i // 16))
// offset = i % 16 * 2
// random_value = bytes_to_uint64(random_bytes[offset:offset + 2])
// effective_balance = state.validators[candidate_index].effective_balance
// # [Modified in Electra:EIP7251]
// if effective_balance * MAX_RANDOM_VALUE >= MAX_EFFECTIVE_BALANCE_ELECTRA * random_value:
// return candidate_index
// i += 1
// """
// Return from ``indices`` a random index sampled by effective balance.
// """
// assert len(indices) > 0
// MAX_RANDOM_BYTE = 2**8 - 1
// i = uint64(0)
// total = uint64(len(indices))
// while True:
// candidate_index = indices[compute_shuffled_index(i % total, total, seed)]
// random_byte = hash(seed + uint_to_bytes(uint64(i // 32)))[i % 32]
// effective_balance = state.validators[candidate_index].effective_balance
// if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE_ELECTRA * random_byte: #[Modified in Electra:EIP7251]
// return candidate_index
// i += 1
func ComputeProposerIndex(bState state.ReadOnlyBeaconState, activeIndices []primitives.ValidatorIndex, seed [32]byte) (primitives.ValidatorIndex, error) {
length := uint64(len(activeIndices))
if length == 0 {
return 0, errors.New("empty active indices list")
}
maxRandomByte := uint64(1<<8 - 1)
hashFunc := hash.CustomSHA256Hasher()
beaconConfig := params.BeaconConfig()
seedBuffer := make([]byte, len(seed)+8)
copy(seedBuffer, seed[:])
for i := uint64(0); ; i++ {
candidateIndex, err := ComputeShuffledIndex(primitives.ValidatorIndex(i%length), length, seed, true /* shuffle */)
@@ -386,28 +378,21 @@ func ComputeProposerIndex(bState state.ReadOnlyBeaconState, activeIndices []prim
if uint64(candidateIndex) >= uint64(bState.NumValidators()) {
return 0, errors.New("active index out of range")
}
b := append(seed[:], bytesutil.Bytes8(i/32)...)
randomByte := hashFunc(b)[i%32]
v, err := bState.ValidatorAtIndexReadOnly(candidateIndex)
if err != nil {
return 0, err
}
effectiveBal := v.EffectiveBalance()
maxEB := params.BeaconConfig().MaxEffectiveBalance
if bState.Version() >= version.Electra {
binary.LittleEndian.PutUint64(seedBuffer[len(seed):], i/16)
randomByte := hashFunc(seedBuffer)
offset := (i % 16) * 2
randomValue := uint64(randomByte[offset]) | uint64(randomByte[offset+1])<<8
maxEB = params.BeaconConfig().MaxEffectiveBalanceElectra
}
if effectiveBal*fieldparams.MaxRandomValueElectra >= beaconConfig.MaxEffectiveBalanceElectra*randomValue {
return candidateIndex, nil
}
} else {
binary.LittleEndian.PutUint64(seedBuffer[len(seed):], i/32)
randomByte := hashFunc(seedBuffer)[i%32]
if effectiveBal*fieldparams.MaxRandomByte >= beaconConfig.MaxEffectiveBalance*uint64(randomByte) {
return candidateIndex, nil
}
if effectiveBal*maxRandomByte >= maxEB*uint64(randomByte) {
return candidateIndex, nil
}
}
}

View File

@@ -841,6 +841,7 @@ func computeProposerIndexWithValidators(validators []*ethpb.Validator, activeInd
if length == 0 {
return 0, errors.New("empty active indices list")
}
maxRandomByte := uint64(1<<8 - 1)
hashFunc := hash.CustomSHA256Hasher()
for i := uint64(0); ; i++ {
@@ -859,7 +860,7 @@ func computeProposerIndexWithValidators(validators []*ethpb.Validator, activeInd
if v != nil {
effectiveBal = v.EffectiveBalance
}
if effectiveBal*fieldparams.MaxRandomByte >= params.BeaconConfig().MaxEffectiveBalance*uint64(randomByte) {
if effectiveBal*maxRandomByte >= params.BeaconConfig().MaxEffectiveBalance*uint64(randomByte) {
return candidateIndex, nil
}
}

View File

@@ -24,8 +24,6 @@ import (
"google.golang.org/protobuf/proto"
)
const ErrNotEnoughSyncCommitteeBits = "sync committee bits count is less than required"
func NewLightClientFinalityUpdateFromBeaconState(
ctx context.Context,
currentSlot primitives.Slot,
@@ -86,12 +84,7 @@ func NewLightClientUpdateFromBeaconState(
return nil, errors.Wrap(err, "could not get sync aggregate")
}
if syncAggregate.SyncCommitteeBits.Count() < params.BeaconConfig().MinSyncCommitteeParticipants {
return nil, fmt.Errorf(
"%s (got %d, need %d)",
ErrNotEnoughSyncCommitteeBits,
syncAggregate.SyncCommitteeBits.Count(),
params.BeaconConfig().MinSyncCommitteeParticipants,
)
return nil, fmt.Errorf("invalid sync committee bits count %d", syncAggregate.SyncCommitteeBits.Count())
}
// assert state.slot == state.latest_block_header.slot
@@ -289,247 +282,56 @@ func CreateDefaultLightClientUpdate(currentSlot primitives.Slot, attestedState s
if currentEpoch < params.BeaconConfig().CapellaForkEpoch {
m = &pb.LightClientUpdateAltair{
AttestedHeader: &pb.LightClientHeaderAltair{
Beacon: &pb.BeaconBlockHeader{
ParentRoot: make([]byte, 32),
StateRoot: make([]byte, 32),
BodyRoot: make([]byte, 32),
},
Beacon: &pb.BeaconBlockHeader{},
},
NextSyncCommittee: nextSyncCommittee,
NextSyncCommitteeBranch: nextSyncCommitteeBranch,
FinalityBranch: finalityBranch,
FinalizedHeader: &pb.LightClientHeaderAltair{
Beacon: &pb.BeaconBlockHeader{
ParentRoot: make([]byte, 32),
StateRoot: make([]byte, 32),
BodyRoot: make([]byte, 32),
},
},
SyncAggregate: &pb.SyncAggregate{
SyncCommitteeBits: make([]byte, 64),
SyncCommitteeSignature: make([]byte, 96),
},
}
} else if currentEpoch < params.BeaconConfig().DenebForkEpoch {
m = &pb.LightClientUpdateCapella{
AttestedHeader: &pb.LightClientHeaderCapella{
Beacon: &pb.BeaconBlockHeader{
ParentRoot: make([]byte, 32),
StateRoot: make([]byte, 32),
BodyRoot: make([]byte, 32),
},
Execution: &enginev1.ExecutionPayloadHeaderCapella{
ParentHash: make([]byte, fieldparams.RootLength),
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
StateRoot: make([]byte, fieldparams.RootLength),
ReceiptsRoot: make([]byte, fieldparams.RootLength),
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
PrevRandao: make([]byte, fieldparams.RootLength),
ExtraData: make([]byte, 0),
BaseFeePerGas: make([]byte, fieldparams.RootLength),
BlockHash: make([]byte, fieldparams.RootLength),
TransactionsRoot: make([]byte, fieldparams.RootLength),
WithdrawalsRoot: make([]byte, fieldparams.RootLength),
},
Beacon: &pb.BeaconBlockHeader{},
Execution: &enginev1.ExecutionPayloadHeaderCapella{},
ExecutionBranch: executionBranch,
},
NextSyncCommittee: nextSyncCommittee,
NextSyncCommitteeBranch: nextSyncCommitteeBranch,
FinalityBranch: finalityBranch,
FinalizedHeader: &pb.LightClientHeaderCapella{
Beacon: &pb.BeaconBlockHeader{
ParentRoot: make([]byte, 32),
StateRoot: make([]byte, 32),
BodyRoot: make([]byte, 32),
},
Execution: &enginev1.ExecutionPayloadHeaderCapella{
ParentHash: make([]byte, fieldparams.RootLength),
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
StateRoot: make([]byte, fieldparams.RootLength),
ReceiptsRoot: make([]byte, fieldparams.RootLength),
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
PrevRandao: make([]byte, fieldparams.RootLength),
ExtraData: make([]byte, 0),
BaseFeePerGas: make([]byte, fieldparams.RootLength),
BlockHash: make([]byte, fieldparams.RootLength),
TransactionsRoot: make([]byte, fieldparams.RootLength),
WithdrawalsRoot: make([]byte, fieldparams.RootLength),
},
ExecutionBranch: executionBranch,
},
SyncAggregate: &pb.SyncAggregate{
SyncCommitteeBits: make([]byte, 64),
SyncCommitteeSignature: make([]byte, 96),
},
}
} else if currentEpoch < params.BeaconConfig().ElectraForkEpoch {
m = &pb.LightClientUpdateDeneb{
AttestedHeader: &pb.LightClientHeaderDeneb{
Beacon: &pb.BeaconBlockHeader{
ParentRoot: make([]byte, 32),
StateRoot: make([]byte, 32),
BodyRoot: make([]byte, 32),
},
Execution: &enginev1.ExecutionPayloadHeaderDeneb{
ParentHash: make([]byte, fieldparams.RootLength),
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
StateRoot: make([]byte, fieldparams.RootLength),
ReceiptsRoot: make([]byte, fieldparams.RootLength),
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
PrevRandao: make([]byte, fieldparams.RootLength),
ExtraData: make([]byte, 0),
BaseFeePerGas: make([]byte, fieldparams.RootLength),
BlockHash: make([]byte, fieldparams.RootLength),
TransactionsRoot: make([]byte, fieldparams.RootLength),
WithdrawalsRoot: make([]byte, fieldparams.RootLength),
GasLimit: 0,
GasUsed: 0,
},
Beacon: &pb.BeaconBlockHeader{},
Execution: &enginev1.ExecutionPayloadHeaderDeneb{},
ExecutionBranch: executionBranch,
},
NextSyncCommittee: nextSyncCommittee,
NextSyncCommitteeBranch: nextSyncCommitteeBranch,
FinalityBranch: finalityBranch,
FinalizedHeader: &pb.LightClientHeaderDeneb{
Beacon: &pb.BeaconBlockHeader{
ParentRoot: make([]byte, 32),
StateRoot: make([]byte, 32),
BodyRoot: make([]byte, 32),
},
Execution: &enginev1.ExecutionPayloadHeaderDeneb{
ParentHash: make([]byte, fieldparams.RootLength),
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
StateRoot: make([]byte, fieldparams.RootLength),
ReceiptsRoot: make([]byte, fieldparams.RootLength),
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
PrevRandao: make([]byte, fieldparams.RootLength),
ExtraData: make([]byte, 0),
BaseFeePerGas: make([]byte, fieldparams.RootLength),
BlockHash: make([]byte, fieldparams.RootLength),
TransactionsRoot: make([]byte, fieldparams.RootLength),
WithdrawalsRoot: make([]byte, fieldparams.RootLength),
GasLimit: 0,
GasUsed: 0,
},
ExecutionBranch: executionBranch,
},
SyncAggregate: &pb.SyncAggregate{
SyncCommitteeBits: make([]byte, 64),
SyncCommitteeSignature: make([]byte, 96),
},
}
} else {
if attestedState.Version() >= version.Electra {
m = &pb.LightClientUpdateElectra{
AttestedHeader: &pb.LightClientHeaderDeneb{
Beacon: &pb.BeaconBlockHeader{
ParentRoot: make([]byte, 32),
StateRoot: make([]byte, 32),
BodyRoot: make([]byte, 32),
},
Execution: &enginev1.ExecutionPayloadHeaderDeneb{
ParentHash: make([]byte, fieldparams.RootLength),
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
StateRoot: make([]byte, fieldparams.RootLength),
ReceiptsRoot: make([]byte, fieldparams.RootLength),
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
PrevRandao: make([]byte, fieldparams.RootLength),
ExtraData: make([]byte, 0),
BaseFeePerGas: make([]byte, fieldparams.RootLength),
BlockHash: make([]byte, fieldparams.RootLength),
TransactionsRoot: make([]byte, fieldparams.RootLength),
WithdrawalsRoot: make([]byte, fieldparams.RootLength),
GasLimit: 0,
GasUsed: 0,
},
Beacon: &pb.BeaconBlockHeader{},
Execution: &enginev1.ExecutionPayloadHeaderDeneb{},
ExecutionBranch: executionBranch,
},
NextSyncCommittee: nextSyncCommittee,
NextSyncCommitteeBranch: nextSyncCommitteeBranch,
FinalityBranch: finalityBranch,
FinalizedHeader: &pb.LightClientHeaderDeneb{
Beacon: &pb.BeaconBlockHeader{
ParentRoot: make([]byte, 32),
StateRoot: make([]byte, 32),
BodyRoot: make([]byte, 32),
},
Execution: &enginev1.ExecutionPayloadHeaderDeneb{
ParentHash: make([]byte, fieldparams.RootLength),
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
StateRoot: make([]byte, fieldparams.RootLength),
ReceiptsRoot: make([]byte, fieldparams.RootLength),
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
PrevRandao: make([]byte, fieldparams.RootLength),
ExtraData: make([]byte, 0),
BaseFeePerGas: make([]byte, fieldparams.RootLength),
BlockHash: make([]byte, fieldparams.RootLength),
TransactionsRoot: make([]byte, fieldparams.RootLength),
WithdrawalsRoot: make([]byte, fieldparams.RootLength),
GasLimit: 0,
GasUsed: 0,
},
ExecutionBranch: executionBranch,
},
SyncAggregate: &pb.SyncAggregate{
SyncCommitteeBits: make([]byte, 64),
SyncCommitteeSignature: make([]byte, 96),
},
}
} else {
m = &pb.LightClientUpdateDeneb{
AttestedHeader: &pb.LightClientHeaderDeneb{
Beacon: &pb.BeaconBlockHeader{
ParentRoot: make([]byte, 32),
StateRoot: make([]byte, 32),
BodyRoot: make([]byte, 32),
},
Execution: &enginev1.ExecutionPayloadHeaderDeneb{
ParentHash: make([]byte, fieldparams.RootLength),
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
StateRoot: make([]byte, fieldparams.RootLength),
ReceiptsRoot: make([]byte, fieldparams.RootLength),
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
PrevRandao: make([]byte, fieldparams.RootLength),
ExtraData: make([]byte, 0),
BaseFeePerGas: make([]byte, fieldparams.RootLength),
BlockHash: make([]byte, fieldparams.RootLength),
TransactionsRoot: make([]byte, fieldparams.RootLength),
WithdrawalsRoot: make([]byte, fieldparams.RootLength),
GasLimit: 0,
GasUsed: 0,
},
Beacon: &pb.BeaconBlockHeader{},
Execution: &enginev1.ExecutionPayloadHeaderDeneb{},
ExecutionBranch: executionBranch,
},
NextSyncCommittee: nextSyncCommittee,
NextSyncCommitteeBranch: nextSyncCommitteeBranch,
FinalityBranch: finalityBranch,
FinalizedHeader: &pb.LightClientHeaderDeneb{
Beacon: &pb.BeaconBlockHeader{
ParentRoot: make([]byte, 32),
StateRoot: make([]byte, 32),
BodyRoot: make([]byte, 32),
},
Execution: &enginev1.ExecutionPayloadHeaderDeneb{
ParentHash: make([]byte, fieldparams.RootLength),
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
StateRoot: make([]byte, fieldparams.RootLength),
ReceiptsRoot: make([]byte, fieldparams.RootLength),
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
PrevRandao: make([]byte, fieldparams.RootLength),
ExtraData: make([]byte, 0),
BaseFeePerGas: make([]byte, fieldparams.RootLength),
BlockHash: make([]byte, fieldparams.RootLength),
TransactionsRoot: make([]byte, fieldparams.RootLength),
WithdrawalsRoot: make([]byte, fieldparams.RootLength),
GasLimit: 0,
GasUsed: 0,
},
ExecutionBranch: executionBranch,
},
SyncAggregate: &pb.SyncAggregate{
SyncCommitteeBits: make([]byte, 64),
SyncCommitteeSignature: make([]byte, 96),
},
}
}
}

View File

@@ -217,7 +217,6 @@ func TestSlashValidator_OK(t *testing.T) {
}
func TestSlashValidator_Electra(t *testing.T) {
helpers.ClearCache()
validatorCount := 100
registry := make([]*ethpb.Validator, 0, validatorCount)
balances := make([]uint64, 0, validatorCount)

View File

@@ -13,6 +13,7 @@ go_library(
deps = [
"//beacon-chain/db/filesystem:go_default_library",
"//beacon-chain/verification:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/primitives:go_default_library",
@@ -34,6 +35,7 @@ go_test(
deps = [
"//beacon-chain/db/filesystem:go_default_library",
"//beacon-chain/verification:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/primitives:go_default_library",

View File

@@ -83,10 +83,10 @@ func (s *LazilyPersistentStore) Persist(current primitives.Slot, sc ...blocks.RO
func (s *LazilyPersistentStore) IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error {
blockCommitments, err := commitmentsToCheck(b, current)
if err != nil {
return errors.Wrapf(err, "could not check data availability for block %#x", b.Root())
return errors.Wrapf(err, "could check data availability for block %#x", b.Root())
}
// Return early for blocks that are pre-deneb or which do not have any commitments.
if len(blockCommitments) == 0 {
if blockCommitments.count() == 0 {
return nil
}
@@ -106,7 +106,7 @@ func (s *LazilyPersistentStore) IsDataAvailable(ctx context.Context, current pri
// Verify we have all the expected sidecars, and fail fast if any are missing or inconsistent.
// We don't try to salvage problematic batches because this indicates a misbehaving peer and we'd rather
// ignore their response and decrease their peer score.
sidecars, err := entry.filter(root, blockCommitments, b.Block().Slot())
sidecars, err := entry.filter(root, blockCommitments)
if err != nil {
return errors.Wrap(err, "incomplete BlobSidecar batch")
}
@@ -137,28 +137,22 @@ func (s *LazilyPersistentStore) IsDataAvailable(ctx context.Context, current pri
return nil
}
func commitmentsToCheck(b blocks.ROBlock, current primitives.Slot) ([][]byte, error) {
func commitmentsToCheck(b blocks.ROBlock, current primitives.Slot) (safeCommitmentArray, error) {
var ar safeCommitmentArray
if b.Version() < version.Deneb {
return nil, nil
return ar, nil
}
// We are only required to check within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUEST
// We are only required to check within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
if !params.WithinDAPeriod(slots.ToEpoch(b.Block().Slot()), slots.ToEpoch(current)) {
return nil, nil
return ar, nil
}
kzgCommitments, err := b.Block().Body().BlobKzgCommitments()
kc, err := b.Block().Body().BlobKzgCommitments()
if err != nil {
return nil, err
return ar, err
}
maxBlobCount := params.BeaconConfig().MaxBlobsPerBlock(b.Block().Slot())
if len(kzgCommitments) > maxBlobCount {
return nil, errIndexOutOfBounds
if len(kc) > len(ar) {
return ar, errIndexOutOfBounds
}
result := make([][]byte, len(kzgCommitments))
copy(result, kzgCommitments)
return result, nil
copy(ar[:], kc)
return ar, nil
}

View File

@@ -8,6 +8,7 @@ import (
errors "github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
@@ -88,7 +89,7 @@ func Test_commitmentsToCheck(t *testing.T) {
require.NoError(t, err)
c, err := rb.Block().Body().BlobKzgCommitments()
require.NoError(t, err)
require.Equal(t, true, len(c) > params.BeaconConfig().MaxBlobsPerBlock(sb.Block().Slot()))
require.Equal(t, true, len(c) > fieldparams.MaxBlobsPerBlock)
return rb
},
slot: windowSlots + 1,
@@ -104,7 +105,7 @@ func Test_commitmentsToCheck(t *testing.T) {
} else {
require.NoError(t, err)
}
require.Equal(t, len(c.commits), len(co))
require.Equal(t, len(c.commits), co.count())
for i := 0; i < len(c.commits); i++ {
require.Equal(t, true, bytes.Equal(c.commits[i], co[i]))
}

View File

@@ -5,7 +5,7 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
"github.com/prysmaticlabs/prysm/v5/config/params"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
)
@@ -60,7 +60,7 @@ func (c *cache) delete(key cacheKey) {
// cacheEntry holds a fixed-length cache of BlobSidecars.
type cacheEntry struct {
scs []*blocks.ROBlob
scs [fieldparams.MaxBlobsPerBlock]*blocks.ROBlob
diskSummary filesystem.BlobStorageSummary
}
@@ -72,13 +72,9 @@ func (e *cacheEntry) setDiskSummary(sum filesystem.BlobStorageSummary) {
// Only the first BlobSidecar of a given Index will be kept in the cache.
// stash will return an error if the given blob is already in the cache, or if the Index is out of bounds.
func (e *cacheEntry) stash(sc *blocks.ROBlob) error {
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(sc.Slot())
if sc.Index >= uint64(maxBlobsPerBlock) {
if sc.Index >= fieldparams.MaxBlobsPerBlock {
return errors.Wrapf(errIndexOutOfBounds, "index=%d", sc.Index)
}
if e.scs == nil {
e.scs = make([]*blocks.ROBlob, maxBlobsPerBlock)
}
if e.scs[sc.Index] != nil {
return errors.Wrapf(ErrDuplicateSidecar, "root=%#x, index=%d, commitment=%#x", sc.BlockRoot(), sc.Index, sc.KzgCommitment)
}
@@ -92,13 +88,12 @@ func (e *cacheEntry) stash(sc *blocks.ROBlob) error {
// commitments were found in the cache and the sidecar slice return value can be used
// to perform a DA check against the cached sidecars.
// filter only returns blobs that need to be checked. Blobs already available on disk will be excluded.
func (e *cacheEntry) filter(root [32]byte, kc [][]byte, slot primitives.Slot) ([]blocks.ROBlob, error) {
count := len(kc)
if e.diskSummary.AllAvailable(count) {
func (e *cacheEntry) filter(root [32]byte, kc safeCommitmentArray) ([]blocks.ROBlob, error) {
if e.diskSummary.AllAvailable(kc.count()) {
return nil, nil
}
scs := make([]blocks.ROBlob, 0, count)
for i := uint64(0); i < uint64(count); i++ {
scs := make([]blocks.ROBlob, 0, kc.count())
for i := uint64(0); i < fieldparams.MaxBlobsPerBlock; i++ {
// We already have this blob, we don't need to write it or validate it.
if e.diskSummary.HasIndex(i) {
continue
@@ -121,3 +116,16 @@ func (e *cacheEntry) filter(root [32]byte, kc [][]byte, slot primitives.Slot) ([
return scs, nil
}
// safeCommitmentArray is a fixed size array of commitment byte slices. This is helpful for avoiding
// gratuitous bounds checks.
type safeCommitmentArray [fieldparams.MaxBlobsPerBlock][]byte
func (s safeCommitmentArray) count() int {
for i := range s {
if s[i] == nil {
return i
}
}
return fieldparams.MaxBlobsPerBlock
}

View File

@@ -29,10 +29,10 @@ func TestCacheEnsureDelete(t *testing.T) {
require.Equal(t, nilEntry, c.entries[k])
}
type filterTestCaseSetupFunc func(t *testing.T) (*cacheEntry, [][]byte, []blocks.ROBlob)
type filterTestCaseSetupFunc func(t *testing.T) (*cacheEntry, safeCommitmentArray, []blocks.ROBlob)
func filterTestCaseSetup(slot primitives.Slot, nBlobs int, onDisk []int, numExpected int) filterTestCaseSetupFunc {
return func(t *testing.T) (*cacheEntry, [][]byte, []blocks.ROBlob) {
return func(t *testing.T) (*cacheEntry, safeCommitmentArray, []blocks.ROBlob) {
blk, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, nBlobs)
commits, err := commitmentsToCheck(blk, blk.Block().Slot())
require.NoError(t, err)
@@ -44,7 +44,7 @@ func filterTestCaseSetup(slot primitives.Slot, nBlobs int, onDisk []int, numExpe
entry.setDiskSummary(sum)
}
expected := make([]blocks.ROBlob, 0, nBlobs)
for i := 0; i < len(commits); i++ {
for i := 0; i < commits.count(); i++ {
if entry.diskSummary.HasIndex(uint64(i)) {
continue
}
@@ -113,7 +113,7 @@ func TestFilterDiskSummary(t *testing.T) {
t.Run(c.name, func(t *testing.T) {
entry, commits, expected := c.setup(t)
// first (root) argument doesn't matter, it is just for logs
got, err := entry.filter([32]byte{}, commits, 100)
got, err := entry.filter([32]byte{}, commits)
require.NoError(t, err)
require.Equal(t, len(expected), len(got))
})
@@ -125,12 +125,12 @@ func TestFilter(t *testing.T) {
require.NoError(t, err)
cases := []struct {
name string
setup func(t *testing.T) (*cacheEntry, [][]byte, []blocks.ROBlob)
setup func(t *testing.T) (*cacheEntry, safeCommitmentArray, []blocks.ROBlob)
err error
}{
{
name: "commitments mismatch - extra sidecar",
setup: func(t *testing.T) (*cacheEntry, [][]byte, []blocks.ROBlob) {
setup: func(t *testing.T) (*cacheEntry, safeCommitmentArray, []blocks.ROBlob) {
entry, commits, expected := filterTestCaseSetup(denebSlot, 6, []int{0, 1}, 4)(t)
commits[5] = nil
return entry, commits, expected
@@ -139,7 +139,7 @@ func TestFilter(t *testing.T) {
},
{
name: "sidecar missing",
setup: func(t *testing.T) (*cacheEntry, [][]byte, []blocks.ROBlob) {
setup: func(t *testing.T) (*cacheEntry, safeCommitmentArray, []blocks.ROBlob) {
entry, commits, expected := filterTestCaseSetup(denebSlot, 6, []int{0, 1}, 4)(t)
entry.scs[5] = nil
return entry, commits, expected
@@ -148,7 +148,7 @@ func TestFilter(t *testing.T) {
},
{
name: "commitments mismatch - different bytes",
setup: func(t *testing.T) (*cacheEntry, [][]byte, []blocks.ROBlob) {
setup: func(t *testing.T) (*cacheEntry, safeCommitmentArray, []blocks.ROBlob) {
entry, commits, expected := filterTestCaseSetup(denebSlot, 6, []int{0, 1}, 4)(t)
entry.scs[5].KzgCommitment = []byte("nope")
return entry, commits, expected
@@ -160,7 +160,7 @@ func TestFilter(t *testing.T) {
t.Run(c.name, func(t *testing.T) {
entry, commits, expected := c.setup(t)
// first (root) argument doesn't matter, it is just for logs
got, err := entry.filter([32]byte{}, commits, 100)
got, err := entry.filter([32]byte{}, commits)
if c.err != nil {
require.ErrorIs(t, err, c.err)
return

View File

@@ -42,7 +42,7 @@ go_test(
embed = [":go_default_library"],
deps = [
"//beacon-chain/verification:go_default_library",
"//config/params:go_default_library",
"//config/fieldparams:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",

View File

@@ -13,7 +13,7 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
"github.com/prysmaticlabs/prysm/v5/config/params"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
@@ -25,7 +25,7 @@ import (
)
var (
errIndexOutOfBounds = errors.New("blob index in file name >= DeprecatedMaxBlobsPerBlock")
errIndexOutOfBounds = errors.New("blob index in file name >= MaxBlobsPerBlock")
errEmptyBlobWritten = errors.New("zero bytes written to disk when saving blob sidecar")
errSidecarEmptySSZData = errors.New("sidecar marshalled to an empty ssz byte slice")
errNoBasePath = errors.New("BlobStorage base path not specified in init")
@@ -109,11 +109,10 @@ func (bs *BlobStorage) WarmCache() {
}
go func() {
start := time.Now()
log.Info("Blob filesystem cache warm-up started. This may take a few minutes.")
if err := bs.pruner.warmCache(); err != nil {
log.WithError(err).Error("Error encountered while warming up blob pruner cache")
}
log.WithField("elapsed", time.Since(start)).Info("Blob filesystem cache warm-up complete")
log.WithField("elapsed", time.Since(start)).Info("Blob filesystem cache warm-up complete.")
}()
}
@@ -219,7 +218,6 @@ func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error {
partialMoved = true
blobsWrittenCounter.Inc()
blobSaveLatency.Observe(float64(time.Since(startTime).Milliseconds()))
return nil
}
@@ -257,10 +255,8 @@ func (bs *BlobStorage) Remove(root [32]byte) error {
// Indices generates a bitmap representing which BlobSidecar.Index values are present on disk for a given root.
// This value can be compared to the commitments observed in a block to determine which indices need to be found
// on the network to confirm data availability.
func (bs *BlobStorage) Indices(root [32]byte, s primitives.Slot) ([]bool, error) {
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(s)
mask := make([]bool, maxBlobsPerBlock)
func (bs *BlobStorage) Indices(root [32]byte) ([fieldparams.MaxBlobsPerBlock]bool, error) {
var mask [fieldparams.MaxBlobsPerBlock]bool
rootDir := blobNamer{root: root}.dir()
entries, err := afero.ReadDir(bs.fs, rootDir)
if err != nil {
@@ -269,7 +265,6 @@ func (bs *BlobStorage) Indices(root [32]byte, s primitives.Slot) ([]bool, error)
}
return mask, err
}
for i := range entries {
if entries[i].IsDir() {
continue
@@ -286,7 +281,7 @@ func (bs *BlobStorage) Indices(root [32]byte, s primitives.Slot) ([]bool, error)
if err != nil {
return mask, errors.Wrapf(err, "unexpected directory entry breaks listing, %s", parts[0])
}
if u >= uint64(maxBlobsPerBlock) {
if u >= fieldparams.MaxBlobsPerBlock {
return mask, errIndexOutOfBounds
}
mask[u] = true

View File

@@ -10,7 +10,7 @@ import (
ssz "github.com/prysmaticlabs/fastssz"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
"github.com/prysmaticlabs/prysm/v5/config/params"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
@@ -20,7 +20,7 @@ import (
)
func TestBlobStorage_SaveBlobData(t *testing.T) {
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, params.BeaconConfig().MaxBlobsPerBlock(1))
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, fieldparams.MaxBlobsPerBlock)
testSidecars, err := verification.BlobSidecarSliceNoop(sidecars)
require.NoError(t, err)
@@ -56,10 +56,10 @@ func TestBlobStorage_SaveBlobData(t *testing.T) {
require.NoError(t, bs.Save(sc))
actualSc, err := bs.Get(sc.BlockRoot(), sc.Index)
require.NoError(t, err)
expectedIdx := []bool{false, false, true, false, false, false}
actualIdx, err := bs.Indices(actualSc.BlockRoot(), 100)
expectedIdx := [fieldparams.MaxBlobsPerBlock]bool{false, false, true}
actualIdx, err := bs.Indices(actualSc.BlockRoot())
require.NoError(t, err)
require.DeepEqual(t, expectedIdx, actualIdx)
require.Equal(t, expectedIdx, actualIdx)
})
t.Run("round trip write then read", func(t *testing.T) {
@@ -132,19 +132,19 @@ func TestBlobIndicesBounds(t *testing.T) {
fs, bs := NewEphemeralBlobStorageWithFs(t)
root := [32]byte{}
okIdx := uint64(params.BeaconConfig().MaxBlobsPerBlock(0)) - 1
okIdx := uint64(fieldparams.MaxBlobsPerBlock - 1)
writeFakeSSZ(t, fs, root, okIdx)
indices, err := bs.Indices(root, 100)
indices, err := bs.Indices(root)
require.NoError(t, err)
expected := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(0))
var expected [fieldparams.MaxBlobsPerBlock]bool
expected[okIdx] = true
for i := range expected {
require.Equal(t, expected[i], indices[i])
}
oobIdx := uint64(params.BeaconConfig().MaxBlobsPerBlock(0))
oobIdx := uint64(fieldparams.MaxBlobsPerBlock)
writeFakeSSZ(t, fs, root, oobIdx)
_, err = bs.Indices(root, 100)
_, err = bs.Indices(root)
require.ErrorIs(t, err, errIndexOutOfBounds)
}
@@ -163,7 +163,7 @@ func TestBlobStoragePrune(t *testing.T) {
fs, bs := NewEphemeralBlobStorageWithFs(t)
t.Run("PruneOne", func(t *testing.T) {
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 300, params.BeaconConfig().MaxBlobsPerBlock(0))
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 300, fieldparams.MaxBlobsPerBlock)
testSidecars, err := verification.BlobSidecarSliceNoop(sidecars)
require.NoError(t, err)
@@ -178,7 +178,7 @@ func TestBlobStoragePrune(t *testing.T) {
require.Equal(t, 0, len(remainingFolders))
})
t.Run("Prune dangling blob", func(t *testing.T) {
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 299, params.BeaconConfig().MaxBlobsPerBlock(0))
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 299, fieldparams.MaxBlobsPerBlock)
testSidecars, err := verification.BlobSidecarSliceNoop(sidecars)
require.NoError(t, err)
@@ -198,7 +198,7 @@ func TestBlobStoragePrune(t *testing.T) {
for j := 0; j <= blockQty; j++ {
root := bytesutil.ToBytes32(bytesutil.ToBytes(uint64(slot), 32))
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, root, slot, params.BeaconConfig().MaxBlobsPerBlock(0))
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, root, slot, fieldparams.MaxBlobsPerBlock)
testSidecars, err := verification.BlobSidecarSliceNoop(sidecars)
require.NoError(t, err)
require.NoError(t, bs.Save(testSidecars[0]))
@@ -224,7 +224,7 @@ func BenchmarkPruning(b *testing.B) {
for j := 0; j <= blockQty; j++ {
root := bytesutil.ToBytes32(bytesutil.ToBytes(uint64(slot), 32))
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, root, slot, params.BeaconConfig().MaxBlobsPerBlock(0))
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, root, slot, fieldparams.MaxBlobsPerBlock)
testSidecars, err := verification.BlobSidecarSliceNoop(sidecars)
require.NoError(t, err)
require.NoError(t, bs.Save(testSidecars[0]))

View File

@@ -9,7 +9,7 @@ import (
)
// blobIndexMask is a bitmask representing the set of blob indices that are currently set.
type blobIndexMask []bool
type blobIndexMask [fieldparams.MaxBlobsPerBlock]bool
// BlobStorageSummary represents cached information about the BlobSidecars on disk for each root the cache knows about.
type BlobStorageSummary struct {
@@ -20,11 +20,7 @@ type BlobStorageSummary struct {
// HasIndex returns true if the BlobSidecar at the given index is available in the filesystem.
func (s BlobStorageSummary) HasIndex(idx uint64) bool {
// Protect from panic, but assume callers are sophisticated enough to not need an error telling them they have an invalid idx.
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(s.slot)
if idx >= uint64(maxBlobsPerBlock) {
return false
}
if idx >= uint64(len(s.mask)) {
if idx >= fieldparams.MaxBlobsPerBlock {
return false
}
return s.mask[idx]
@@ -32,11 +28,7 @@ func (s BlobStorageSummary) HasIndex(idx uint64) bool {
// AllAvailable returns true if we have all blobs for all indices from 0 to count-1.
func (s BlobStorageSummary) AllAvailable(count int) bool {
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(s.slot)
if count > maxBlobsPerBlock {
return false
}
if count > len(s.mask) {
if count > fieldparams.MaxBlobsPerBlock {
return false
}
for i := 0; i < count; i++ {
@@ -76,17 +68,13 @@ func (s *blobStorageCache) Summary(root [32]byte) BlobStorageSummary {
}
func (s *blobStorageCache) ensure(key [32]byte, slot primitives.Slot, idx uint64) error {
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
if idx >= uint64(maxBlobsPerBlock) {
if idx >= fieldparams.MaxBlobsPerBlock {
return errIndexOutOfBounds
}
s.mu.Lock()
defer s.mu.Unlock()
v := s.cache[key]
v.slot = slot
if v.mask == nil {
v.mask = make(blobIndexMask, maxBlobsPerBlock)
}
if !v.mask[idx] {
s.updateMetrics(1)
}

View File

@@ -3,17 +3,13 @@ package filesystem
import (
"testing"
"github.com/prysmaticlabs/prysm/v5/config/params"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/testing/require"
)
func TestSlotByRoot_Summary(t *testing.T) {
noneSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(0))
allSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(0))
firstSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(0))
lastSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(0))
oneSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(0))
var noneSet, allSet, firstSet, lastSet, oneSet blobIndexMask
firstSet[0] = true
lastSet[len(lastSet)-1] = true
oneSet[1] = true
@@ -23,49 +19,49 @@ func TestSlotByRoot_Summary(t *testing.T) {
cases := []struct {
name string
root [32]byte
expected blobIndexMask
expected *blobIndexMask
}{
{
name: "not found",
},
{
name: "none set",
expected: noneSet,
expected: &noneSet,
},
{
name: "index 1 set",
expected: oneSet,
expected: &oneSet,
},
{
name: "all set",
expected: allSet,
expected: &allSet,
},
{
name: "first set",
expected: firstSet,
expected: &firstSet,
},
{
name: "last set",
expected: lastSet,
expected: &lastSet,
},
}
sc := newBlobStorageCache()
for _, c := range cases {
if c.expected != nil {
key := bytesutil.ToBytes32([]byte(c.name))
sc.cache[key] = BlobStorageSummary{slot: 0, mask: c.expected}
sc.cache[key] = BlobStorageSummary{slot: 0, mask: *c.expected}
}
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
key := bytesutil.ToBytes32([]byte(c.name))
sum := sc.Summary(key)
for i, has := range c.expected {
for i := range c.expected {
ui := uint64(i)
if c.expected == nil {
require.Equal(t, false, sum.HasIndex(ui))
} else {
require.Equal(t, has, sum.HasIndex(ui))
require.Equal(t, c.expected[i], sum.HasIndex(ui))
}
}
})
@@ -125,13 +121,13 @@ func TestAllAvailable(t *testing.T) {
},
{
name: "out of bound is safe",
count: params.BeaconConfig().MaxBlobsPerBlock(0) + 1,
count: fieldparams.MaxBlobsPerBlock + 1,
aa: false,
},
{
name: "max present",
count: params.BeaconConfig().MaxBlobsPerBlock(0),
idxSet: idxUpTo(params.BeaconConfig().MaxBlobsPerBlock(0)),
count: fieldparams.MaxBlobsPerBlock,
idxSet: idxUpTo(fieldparams.MaxBlobsPerBlock),
aa: true,
},
{
@@ -143,7 +139,7 @@ func TestAllAvailable(t *testing.T) {
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
mask := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(0))
var mask blobIndexMask
for _, idx := range c.idxSet {
mask[idx] = true
}

View File

@@ -12,7 +12,7 @@ import (
"time"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
"github.com/prysmaticlabs/prysm/v5/config/params"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/testing/require"
@@ -25,7 +25,7 @@ func TestTryPruneDir_CachedNotExpired(t *testing.T) {
pr, err := newBlobPruner(fs, 0)
require.NoError(t, err)
slot := pr.windowSize
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, params.BeaconConfig().MaxBlobsPerBlock(slot))
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, fieldparams.MaxBlobsPerBlock)
sc, err := verification.BlobSidecarNoop(sidecars[0])
require.NoError(t, err)
rootStr := rootString(sc.BlockRoot())

View File

@@ -539,231 +539,3 @@ func createDefaultLightClientUpdate(currentSlot primitives.Slot, attestedState s
return light_client.NewWrappedUpdate(m)
}
func TestStore_LightClientBootstrap_CanSaveRetrieve(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.AltairForkEpoch = 0
cfg.CapellaForkEpoch = 1
cfg.DenebForkEpoch = 2
cfg.ElectraForkEpoch = 3
cfg.EpochsPerSyncCommitteePeriod = 1
params.OverrideBeaconConfig(cfg)
db := setupDB(t)
ctx := context.Background()
t.Run("Nil", func(t *testing.T) {
retrievedBootstrap, err := db.LightClientBootstrap(ctx, []byte("NilBlockRoot"))
require.NoError(t, err)
require.IsNil(t, retrievedBootstrap)
})
t.Run("Altair", func(t *testing.T) {
bootstrap, err := createDefaultLightClientBootstrap(primitives.Slot(uint64(params.BeaconConfig().AltairForkEpoch) * uint64(params.BeaconConfig().SlotsPerEpoch)))
require.NoError(t, err)
err = bootstrap.SetCurrentSyncCommittee(createRandomSyncCommittee())
require.NoError(t, err)
err = db.SaveLightClientBootstrap(ctx, []byte("blockRootAltair"), bootstrap)
require.NoError(t, err)
retrievedBootstrap, err := db.LightClientBootstrap(ctx, []byte("blockRootAltair"))
require.NoError(t, err)
require.DeepEqual(t, bootstrap, retrievedBootstrap, "retrieved bootstrap does not match saved bootstrap")
})
t.Run("Capella", func(t *testing.T) {
bootstrap, err := createDefaultLightClientBootstrap(primitives.Slot(uint64(params.BeaconConfig().CapellaForkEpoch) * uint64(params.BeaconConfig().SlotsPerEpoch)))
require.NoError(t, err)
err = bootstrap.SetCurrentSyncCommittee(createRandomSyncCommittee())
require.NoError(t, err)
err = db.SaveLightClientBootstrap(ctx, []byte("blockRootCapella"), bootstrap)
require.NoError(t, err)
retrievedBootstrap, err := db.LightClientBootstrap(ctx, []byte("blockRootCapella"))
require.NoError(t, err)
require.DeepEqual(t, bootstrap, retrievedBootstrap, "retrieved bootstrap does not match saved bootstrap")
})
t.Run("Deneb", func(t *testing.T) {
bootstrap, err := createDefaultLightClientBootstrap(primitives.Slot(uint64(params.BeaconConfig().DenebForkEpoch) * uint64(params.BeaconConfig().SlotsPerEpoch)))
require.NoError(t, err)
err = bootstrap.SetCurrentSyncCommittee(createRandomSyncCommittee())
require.NoError(t, err)
err = db.SaveLightClientBootstrap(ctx, []byte("blockRootDeneb"), bootstrap)
require.NoError(t, err)
retrievedBootstrap, err := db.LightClientBootstrap(ctx, []byte("blockRootDeneb"))
require.NoError(t, err)
require.DeepEqual(t, bootstrap, retrievedBootstrap, "retrieved bootstrap does not match saved bootstrap")
})
t.Run("Electra", func(t *testing.T) {
bootstrap, err := createDefaultLightClientBootstrap(primitives.Slot(uint64(params.BeaconConfig().ElectraForkEpoch) * uint64(params.BeaconConfig().SlotsPerEpoch)))
require.NoError(t, err)
err = bootstrap.SetCurrentSyncCommittee(createRandomSyncCommittee())
require.NoError(t, err)
err = db.SaveLightClientBootstrap(ctx, []byte("blockRootElectra"), bootstrap)
require.NoError(t, err)
retrievedBootstrap, err := db.LightClientBootstrap(ctx, []byte("blockRootElectra"))
require.NoError(t, err)
require.DeepEqual(t, bootstrap, retrievedBootstrap, "retrieved bootstrap does not match saved bootstrap")
})
}
func createDefaultLightClientBootstrap(currentSlot primitives.Slot) (interfaces.LightClientBootstrap, error) {
currentEpoch := slots.ToEpoch(currentSlot)
syncCommitteeSize := params.BeaconConfig().SyncCommitteeSize
pubKeys := make([][]byte, syncCommitteeSize)
for i := uint64(0); i < syncCommitteeSize; i++ {
pubKeys[i] = make([]byte, fieldparams.BLSPubkeyLength)
}
currentSyncCommittee := &pb.SyncCommittee{
Pubkeys: pubKeys,
AggregatePubkey: make([]byte, fieldparams.BLSPubkeyLength),
}
var currentSyncCommitteeBranch [][]byte
if currentEpoch >= params.BeaconConfig().ElectraForkEpoch {
currentSyncCommitteeBranch = make([][]byte, fieldparams.SyncCommitteeBranchDepthElectra)
} else {
currentSyncCommitteeBranch = make([][]byte, fieldparams.SyncCommitteeBranchDepth)
}
for i := 0; i < len(currentSyncCommitteeBranch); i++ {
currentSyncCommitteeBranch[i] = make([]byte, fieldparams.RootLength)
}
executionBranch := make([][]byte, fieldparams.ExecutionBranchDepth)
for i := 0; i < fieldparams.ExecutionBranchDepth; i++ {
executionBranch[i] = make([]byte, 32)
}
// TODO: can this be based on the current epoch?
var m proto.Message
if currentEpoch < params.BeaconConfig().CapellaForkEpoch {
m = &pb.LightClientBootstrapAltair{
Header: &pb.LightClientHeaderAltair{
Beacon: &pb.BeaconBlockHeader{
ParentRoot: make([]byte, 32),
StateRoot: make([]byte, 32),
BodyRoot: make([]byte, 32),
},
},
CurrentSyncCommittee: currentSyncCommittee,
CurrentSyncCommitteeBranch: currentSyncCommitteeBranch,
}
} else if currentEpoch < params.BeaconConfig().DenebForkEpoch {
m = &pb.LightClientBootstrapCapella{
Header: &pb.LightClientHeaderCapella{
Beacon: &pb.BeaconBlockHeader{
ParentRoot: make([]byte, 32),
StateRoot: make([]byte, 32),
BodyRoot: make([]byte, 32),
},
Execution: &enginev1.ExecutionPayloadHeaderCapella{
ParentHash: make([]byte, fieldparams.RootLength),
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
StateRoot: make([]byte, fieldparams.RootLength),
ReceiptsRoot: make([]byte, fieldparams.RootLength),
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
PrevRandao: make([]byte, fieldparams.RootLength),
ExtraData: make([]byte, 0),
BaseFeePerGas: make([]byte, fieldparams.RootLength),
BlockHash: make([]byte, fieldparams.RootLength),
TransactionsRoot: make([]byte, fieldparams.RootLength),
WithdrawalsRoot: make([]byte, fieldparams.RootLength),
},
ExecutionBranch: executionBranch,
},
CurrentSyncCommittee: currentSyncCommittee,
CurrentSyncCommitteeBranch: currentSyncCommitteeBranch,
}
} else if currentEpoch < params.BeaconConfig().ElectraForkEpoch {
m = &pb.LightClientBootstrapDeneb{
Header: &pb.LightClientHeaderDeneb{
Beacon: &pb.BeaconBlockHeader{
ParentRoot: make([]byte, 32),
StateRoot: make([]byte, 32),
BodyRoot: make([]byte, 32),
},
Execution: &enginev1.ExecutionPayloadHeaderDeneb{
ParentHash: make([]byte, fieldparams.RootLength),
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
StateRoot: make([]byte, fieldparams.RootLength),
ReceiptsRoot: make([]byte, fieldparams.RootLength),
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
PrevRandao: make([]byte, fieldparams.RootLength),
ExtraData: make([]byte, 0),
BaseFeePerGas: make([]byte, fieldparams.RootLength),
BlockHash: make([]byte, fieldparams.RootLength),
TransactionsRoot: make([]byte, fieldparams.RootLength),
WithdrawalsRoot: make([]byte, fieldparams.RootLength),
GasLimit: 0,
GasUsed: 0,
},
ExecutionBranch: executionBranch,
},
CurrentSyncCommittee: currentSyncCommittee,
CurrentSyncCommitteeBranch: currentSyncCommitteeBranch,
}
} else {
m = &pb.LightClientBootstrapElectra{
Header: &pb.LightClientHeaderDeneb{
Beacon: &pb.BeaconBlockHeader{
ParentRoot: make([]byte, 32),
StateRoot: make([]byte, 32),
BodyRoot: make([]byte, 32),
},
Execution: &enginev1.ExecutionPayloadHeaderDeneb{
ParentHash: make([]byte, fieldparams.RootLength),
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
StateRoot: make([]byte, fieldparams.RootLength),
ReceiptsRoot: make([]byte, fieldparams.RootLength),
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
PrevRandao: make([]byte, fieldparams.RootLength),
ExtraData: make([]byte, 0),
BaseFeePerGas: make([]byte, fieldparams.RootLength),
BlockHash: make([]byte, fieldparams.RootLength),
TransactionsRoot: make([]byte, fieldparams.RootLength),
WithdrawalsRoot: make([]byte, fieldparams.RootLength),
GasLimit: 0,
GasUsed: 0,
},
ExecutionBranch: executionBranch,
},
CurrentSyncCommittee: currentSyncCommittee,
CurrentSyncCommitteeBranch: currentSyncCommitteeBranch,
}
}
return light_client.NewWrappedBootstrap(m)
}
func createRandomSyncCommittee() *pb.SyncCommittee {
// random number between 2 and 128
base := rand.Int()%127 + 2
syncCom := make([][]byte, params.BeaconConfig().SyncCommitteeSize)
for i := 0; uint64(i) < params.BeaconConfig().SyncCommitteeSize; i++ {
if i%base == 0 {
syncCom[i] = make([]byte, fieldparams.BLSPubkeyLength)
syncCom[i][0] = 1
continue
}
syncCom[i] = make([]byte, fieldparams.BLSPubkeyLength)
}
return &pb.SyncCommittee{
Pubkeys: syncCom,
AggregatePubkey: make([]byte, fieldparams.BLSPubkeyLength),
}
}

View File

@@ -2,10 +2,8 @@ package kv
import (
"context"
"crypto/rand"
"encoding/binary"
mathRand "math/rand"
"math/rand"
"strconv"
"testing"
"time"
@@ -880,16 +878,16 @@ func validators(limit int) []*ethpb.Validator {
var vals []*ethpb.Validator
for i := 0; i < limit; i++ {
pubKey := make([]byte, params.BeaconConfig().BLSPubkeyLength)
binary.LittleEndian.PutUint64(pubKey, mathRand.Uint64())
binary.LittleEndian.PutUint64(pubKey, rand.Uint64())
val := &ethpb.Validator{
PublicKey: pubKey,
WithdrawalCredentials: bytesutil.ToBytes(mathRand.Uint64(), 32),
EffectiveBalance: mathRand.Uint64(),
WithdrawalCredentials: bytesutil.ToBytes(rand.Uint64(), 32),
EffectiveBalance: rand.Uint64(),
Slashed: i%2 != 0,
ActivationEligibilityEpoch: primitives.Epoch(mathRand.Uint64()),
ActivationEpoch: primitives.Epoch(mathRand.Uint64()),
ExitEpoch: primitives.Epoch(mathRand.Uint64()),
WithdrawableEpoch: primitives.Epoch(mathRand.Uint64()),
ActivationEligibilityEpoch: primitives.Epoch(rand.Uint64()),
ActivationEpoch: primitives.Epoch(rand.Uint64()),
ExitEpoch: primitives.Epoch(rand.Uint64()),
WithdrawableEpoch: primitives.Epoch(rand.Uint64()),
}
vals = append(vals, val)
}
@@ -915,8 +913,8 @@ func checkStateSaveTime(b *testing.B, saveCount int) {
allValidators := append(initialSetOfValidators, validatosToAddInTest...)
// shuffle validators.
mathRand.New(mathRand.NewSource(time.Now().UnixNano()))
mathRand.Shuffle(len(allValidators), func(i, j int) { allValidators[i], allValidators[j] = allValidators[j], allValidators[i] })
rand.Seed(time.Now().UnixNano())
rand.Shuffle(len(allValidators), func(i, j int) { allValidators[i], allValidators[j] = allValidators[j], allValidators[i] })
require.NoError(b, st.SetValidators(allValidators))
require.NoError(b, db.SaveState(context.Background(), st, bytesutil.ToBytes32(key)))
@@ -961,8 +959,8 @@ func checkStateReadTime(b *testing.B, saveCount int) {
allValidators := append(initialSetOfValidators, validatosToAddInTest...)
// shuffle validators.
mathRand.New(mathRand.NewSource(time.Now().UnixNano()))
mathRand.Shuffle(len(allValidators), func(i, j int) { allValidators[i], allValidators[j] = allValidators[j], allValidators[i] })
rand.Seed(time.Now().UnixNano())
rand.Shuffle(len(allValidators), func(i, j int) { allValidators[i], allValidators[j] = allValidators[j], allValidators[i] })
require.NoError(b, st.SetValidators(allValidators))
require.NoError(b, db.SaveState(context.Background(), st, bytesutil.ToBytes32(key)))

View File

@@ -136,18 +136,30 @@ func (s *Service) NewPayload(ctx context.Context, payload interfaces.ExecutionDa
defer cancel()
result := &pb.PayloadStatus{}
switch payloadPb := payload.Proto().(type) {
switch payload.Proto().(type) {
case *pb.ExecutionPayload:
payloadPb, ok := payload.Proto().(*pb.ExecutionPayload)
if !ok {
return nil, errors.New("execution data must be a Bellatrix or Capella execution payload")
}
err := s.rpcClient.CallContext(ctx, result, NewPayloadMethod, payloadPb)
if err != nil {
return nil, handleRPCError(err)
}
case *pb.ExecutionPayloadCapella:
payloadPb, ok := payload.Proto().(*pb.ExecutionPayloadCapella)
if !ok {
return nil, errors.New("execution data must be a Capella execution payload")
}
err := s.rpcClient.CallContext(ctx, result, NewPayloadMethodV2, payloadPb)
if err != nil {
return nil, handleRPCError(err)
}
case *pb.ExecutionPayloadDeneb:
payloadPb, ok := payload.Proto().(*pb.ExecutionPayloadDeneb)
if !ok {
return nil, errors.New("execution data must be a Deneb execution payload")
}
if executionRequests == nil {
err := s.rpcClient.CallContext(ctx, result, NewPayloadMethodV3, payloadPb, versionedHashes, parentBlockRoot)
if err != nil {

View File

@@ -93,7 +93,6 @@ type BeaconNode struct {
stop chan struct{} // Channel to wait for termination notifications.
db db.Database
slasherDB db.SlasherDatabase
attestationCache *cache.AttestationCache
attestationPool attestations.Pool
exitPool voluntaryexits.PoolManager
slashingsPool slashings.PoolManager
@@ -145,7 +144,6 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
stateFeed: new(event.Feed),
blockFeed: new(event.Feed),
opFeed: new(event.Feed),
attestationCache: cache.NewAttestationCache(),
attestationPool: attestations.NewPool(),
exitPool: voluntaryexits.NewPool(),
slashingsPool: slashings.NewPool(),
@@ -706,7 +704,6 @@ func (b *BeaconNode) fetchBuilderService() *builder.Service {
func (b *BeaconNode) registerAttestationPool() error {
s, err := attestations.NewService(b.ctx, &attestations.Config{
Cache: b.attestationCache,
Pool: b.attestationPool,
InitialSyncComplete: b.initialSyncComplete,
})
@@ -735,7 +732,6 @@ func (b *BeaconNode) registerBlockchainService(fc forkchoice.ForkChoicer, gs *st
blockchain.WithDepositCache(b.depositCache),
blockchain.WithChainStartFetcher(web3Service),
blockchain.WithExecutionEngineCaller(web3Service),
blockchain.WithAttestationCache(b.attestationCache),
blockchain.WithAttestationPool(b.attestationPool),
blockchain.WithExitPool(b.exitPool),
blockchain.WithSlashingPool(b.slashingsPool),
@@ -820,7 +816,6 @@ func (b *BeaconNode) registerSyncService(initialSyncComplete chan struct{}, bFil
regularsync.WithBlockNotifier(b),
regularsync.WithAttestationNotifier(b),
regularsync.WithOperationNotifier(b),
regularsync.WithAttestationCache(b.attestationCache),
regularsync.WithAttestationPool(b.attestationPool),
regularsync.WithExitPool(b.exitPool),
regularsync.WithSlashingPool(b.slashingsPool),
@@ -957,7 +952,6 @@ func (b *BeaconNode) registerRPCService(router *http.ServeMux) error {
GenesisTimeFetcher: chainService,
GenesisFetcher: chainService,
OptimisticModeFetcher: chainService,
AttestationCache: b.attestationCache,
AttestationsPool: b.attestationPool,
ExitPool: b.exitPool,
SlashingsPool: b.slashingsPool,

View File

@@ -16,7 +16,6 @@ go_library(
"//testing/spectest:__subpackages__",
],
deps = [
"//beacon-chain/cache:go_default_library",
"//beacon-chain/operations/attestations/kv:go_default_library",
"//cache/lru:go_default_library",
"//config/features:go_default_library",

View File

@@ -1,13 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["map.go"],
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/attestations/attmap",
visibility = ["//visibility:public"],
deps = [
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/attestation:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)

View File

@@ -1,89 +0,0 @@
package attmap
import (
"sync"
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
)
// Attestations --
type Attestations struct {
atts map[attestation.Id]ethpb.Att
sync.RWMutex
}
// New creates a new instance of the map.
func New() *Attestations {
return &Attestations{atts: make(map[attestation.Id]ethpb.Att)}
}
// Save stores an attestation in the map.
func (a *Attestations) Save(att ethpb.Att) error {
if att == nil || att.IsNil() {
return nil
}
id, err := attestation.NewId(att, attestation.Full)
if err != nil {
return errors.Wrap(err, "could not create attestation ID")
}
a.Lock()
defer a.Unlock()
a.atts[id] = att
return nil
}
// SaveMany stores multiple attestation in the map.
func (a *Attestations) SaveMany(atts []ethpb.Att) error {
for _, att := range atts {
if err := a.Save(att); err != nil {
return err
}
}
return nil
}
// GetAll retrieves all attestations that are in the map.
func (a *Attestations) GetAll() []ethpb.Att {
a.RLock()
defer a.RUnlock()
atts := make([]ethpb.Att, len(a.atts))
i := 0
for _, att := range a.atts {
atts[i] = att.Clone()
i++
}
return atts
}
// Delete removes an attestation from the map.
func (a *Attestations) Delete(att ethpb.Att) error {
if att == nil || att.IsNil() {
return nil
}
id, err := attestation.NewId(att, attestation.Full)
if err != nil {
return errors.Wrap(err, "could not create attestation ID")
}
a.Lock()
defer a.Unlock()
delete(a.atts, id)
return nil
}
// Count returns the number of attestations in the map.
func (a *Attestations) Count() int {
a.RLock()
defer a.RUnlock()
return len(a.atts)
}

View File

@@ -5,6 +5,7 @@ go_library(
srcs = [
"aggregated.go",
"block.go",
"forkchoice.go",
"kv.go",
"seen_bits.go",
"unaggregated.go",
@@ -13,7 +14,6 @@ go_library(
visibility = ["//beacon-chain:__subpackages__"],
deps = [
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/operations/attestations/attmap:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//monitoring/tracing/trace:go_default_library",

View File

@@ -88,7 +88,7 @@ func (c *AttCaches) aggregateParallel(atts map[attestation.Id][]ethpb.Att, leftO
log.Error("nil aggregated attestation")
continue
}
if aggregated.IsAggregated() {
if helpers.IsAggregated(aggregated) {
if err := c.SaveAggregatedAttestations([]ethpb.Att{aggregated}); err != nil {
log.WithError(err).Error("could not save aggregated attestation")
continue
@@ -122,7 +122,7 @@ func (c *AttCaches) SaveAggregatedAttestation(att ethpb.Att) error {
if err := helpers.ValidateNilAttestation(att); err != nil {
return err
}
if !att.IsAggregated() {
if !helpers.IsAggregated(att) {
return errors.New("attestation is not aggregated")
}
has, err := c.HasAggregatedAttestation(att)
@@ -255,7 +255,7 @@ func (c *AttCaches) DeleteAggregatedAttestation(att ethpb.Att) error {
if err := helpers.ValidateNilAttestation(att); err != nil {
return err
}
if !att.IsAggregated() {
if !helpers.IsAggregated(att) {
return errors.New("attestation is not aggregated")
}

View File

@@ -0,0 +1,74 @@
package kv
import (
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
)
// SaveForkchoiceAttestation saves an forkchoice attestation in cache.
func (c *AttCaches) SaveForkchoiceAttestation(att ethpb.Att) error {
if att == nil || att.IsNil() {
return nil
}
id, err := attestation.NewId(att, attestation.Full)
if err != nil {
return errors.Wrap(err, "could not create attestation ID")
}
c.forkchoiceAttLock.Lock()
defer c.forkchoiceAttLock.Unlock()
c.forkchoiceAtt[id] = att
return nil
}
// SaveForkchoiceAttestations saves a list of forkchoice attestations in cache.
func (c *AttCaches) SaveForkchoiceAttestations(atts []ethpb.Att) error {
for _, att := range atts {
if err := c.SaveForkchoiceAttestation(att); err != nil {
return err
}
}
return nil
}
// ForkchoiceAttestations returns the forkchoice attestations in cache.
func (c *AttCaches) ForkchoiceAttestations() []ethpb.Att {
c.forkchoiceAttLock.RLock()
defer c.forkchoiceAttLock.RUnlock()
atts := make([]ethpb.Att, 0, len(c.forkchoiceAtt))
for _, att := range c.forkchoiceAtt {
atts = append(atts, att.Clone())
}
return atts
}
// DeleteForkchoiceAttestation deletes a forkchoice attestation in cache.
func (c *AttCaches) DeleteForkchoiceAttestation(att ethpb.Att) error {
if att == nil || att.IsNil() {
return nil
}
id, err := attestation.NewId(att, attestation.Full)
if err != nil {
return errors.Wrap(err, "could not create attestation ID")
}
c.forkchoiceAttLock.Lock()
defer c.forkchoiceAttLock.Unlock()
delete(c.forkchoiceAtt, id)
return nil
}
// ForkchoiceAttestationCount returns the number of fork choice attestations key in the pool.
func (c *AttCaches) ForkchoiceAttestationCount() int {
c.forkchoiceAttLock.RLock()
defer c.forkchoiceAttLock.RUnlock()
return len(c.forkchoiceAtt)
}

View File

@@ -20,7 +20,7 @@ func TestKV_Forkchoice_CanSaveRetrieve(t *testing.T) {
atts := []ethpb.Att{att1, att2, att3}
for _, att := range atts {
require.NoError(t, cache.saveForkchoiceAttestation(att))
require.NoError(t, cache.SaveForkchoiceAttestation(att))
}
returned := cache.ForkchoiceAttestations()
@@ -41,7 +41,7 @@ func TestKV_Forkchoice_CanDelete(t *testing.T) {
atts := []ethpb.Att{att1, att2, att3}
for _, att := range atts {
require.NoError(t, cache.saveForkchoiceAttestation(att))
require.NoError(t, cache.SaveForkchoiceAttestation(att))
}
require.NoError(t, cache.DeleteForkchoiceAttestation(att1))
@@ -61,7 +61,7 @@ func TestKV_Forkchoice_CanCount(t *testing.T) {
atts := []*ethpb.Attestation{att1, att2, att3}
for _, att := range atts {
require.NoError(t, cache.saveForkchoiceAttestation(att))
require.NoError(t, cache.SaveForkchoiceAttestation(att))
}
require.Equal(t, 3, cache.ForkchoiceAttestationCount())

View File

@@ -8,7 +8,6 @@ import (
"time"
"github.com/patrickmn/go-cache"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/attestations/attmap"
"github.com/prysmaticlabs/prysm/v5/config/params"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
@@ -22,7 +21,8 @@ type AttCaches struct {
aggregatedAtt map[attestation.Id][]ethpb.Att
unAggregateAttLock sync.RWMutex
unAggregatedAtt map[attestation.Id]ethpb.Att
forkchoiceAtt *attmap.Attestations
forkchoiceAttLock sync.RWMutex
forkchoiceAtt map[attestation.Id]ethpb.Att
blockAttLock sync.RWMutex
blockAtt map[attestation.Id][]ethpb.Att
seenAtt *cache.Cache
@@ -36,35 +36,10 @@ func NewAttCaches() *AttCaches {
pool := &AttCaches{
unAggregatedAtt: make(map[attestation.Id]ethpb.Att),
aggregatedAtt: make(map[attestation.Id][]ethpb.Att),
forkchoiceAtt: attmap.New(),
forkchoiceAtt: make(map[attestation.Id]ethpb.Att),
blockAtt: make(map[attestation.Id][]ethpb.Att),
seenAtt: c,
}
return pool
}
// saveForkchoiceAttestation saves a forkchoice attestation.
func (c *AttCaches) saveForkchoiceAttestation(att ethpb.Att) error {
return c.forkchoiceAtt.Save(att)
}
// SaveForkchoiceAttestations saves forkchoice attestations.
func (c *AttCaches) SaveForkchoiceAttestations(att []ethpb.Att) error {
return c.forkchoiceAtt.SaveMany(att)
}
// ForkchoiceAttestations returns all forkchoice attestations.
func (c *AttCaches) ForkchoiceAttestations() []ethpb.Att {
return c.forkchoiceAtt.GetAll()
}
// DeleteForkchoiceAttestation deletes a forkchoice attestation.
func (c *AttCaches) DeleteForkchoiceAttestation(att ethpb.Att) error {
return c.forkchoiceAtt.Delete(att)
}
// ForkchoiceAttestationCount returns the number of forkchoice attestation keys.
func (c *AttCaches) ForkchoiceAttestationCount() int {
return c.forkchoiceAtt.Count()
}

View File

@@ -4,6 +4,7 @@ import (
"context"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
@@ -16,7 +17,7 @@ func (c *AttCaches) SaveUnaggregatedAttestation(att ethpb.Att) error {
if att == nil || att.IsNil() {
return nil
}
if att.IsAggregated() {
if helpers.IsAggregated(att) {
return errors.New("attestation is aggregated")
}
@@ -132,7 +133,8 @@ func (c *AttCaches) DeleteUnaggregatedAttestation(att ethpb.Att) error {
if att == nil || att.IsNil() {
return nil
}
if att.IsAggregated() {
if helpers.IsAggregated(att) {
return errors.New("attestation is aggregated")
}
@@ -160,7 +162,7 @@ func (c *AttCaches) DeleteSeenUnaggregatedAttestations() (int, error) {
count := 0
for r, att := range c.unAggregatedAtt {
if att == nil || att.IsNil() || att.IsAggregated() {
if att == nil || att.IsNil() || helpers.IsAggregated(att) {
continue
}
if seen, err := c.hasSeenBit(att); err == nil && seen {

View File

@@ -30,16 +30,6 @@ var (
Name: "expired_block_atts_total",
Help: "The number of expired and deleted block attestations in the pool.",
})
attCount = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "attestations_in_pool_total",
Help: "The number of attestations in the pool.",
},
)
expiredAtts = promauto.NewCounter(prometheus.CounterOpts{
Name: "expired_atts_total",
Help: "The number of expired and deleted attestations in the pool.",
})
batchForkChoiceAttsT1 = promauto.NewHistogram(
prometheus.HistogramOpts{
Name: "aggregate_attestations_t1",
@@ -60,8 +50,3 @@ func (s *Service) updateMetrics() {
aggregatedAttsCount.Set(float64(s.cfg.Pool.AggregatedAttestationCount()))
unaggregatedAttsCount.Set(float64(s.cfg.Pool.UnaggregatedAttestationCount()))
}
func (s *Service) updateMetricsExperimental(numExpired uint64) {
attCount.Set(float64(s.cfg.Cache.Count()))
expiredAtts.Add(float64(numExpired))
}

View File

@@ -37,6 +37,7 @@ type Pool interface {
BlockAttestations() []ethpb.Att
DeleteBlockAttestation(att ethpb.Att) error
// For attestations to be passed to fork choice.
SaveForkchoiceAttestation(att ethpb.Att) error
SaveForkchoiceAttestations(atts []ethpb.Att) error
ForkchoiceAttestations() []ethpb.Att
DeleteForkchoiceAttestation(att ethpb.Att) error

View File

@@ -61,16 +61,11 @@ func (s *Service) batchForkChoiceAtts(ctx context.Context) error {
ctx, span := trace.StartSpan(ctx, "Operations.attestations.batchForkChoiceAtts")
defer span.End()
var atts []ethpb.Att
if features.Get().EnableExperimentalAttestationPool {
atts = append(s.cfg.Cache.GetAll(), s.cfg.Cache.ForkchoiceAttestations()...)
} else {
if err := s.cfg.Pool.AggregateUnaggregatedAttestations(ctx); err != nil {
return err
}
atts = append(s.cfg.Pool.AggregatedAttestations(), s.cfg.Pool.BlockAttestations()...)
atts = append(atts, s.cfg.Pool.ForkchoiceAttestations()...)
if err := s.cfg.Pool.AggregateUnaggregatedAttestations(ctx); err != nil {
return err
}
atts := append(s.cfg.Pool.AggregatedAttestations(), s.cfg.Pool.BlockAttestations()...)
atts = append(atts, s.cfg.Pool.ForkchoiceAttestations()...)
attsById := make(map[attestation.Id][]ethpb.Att, len(atts))
@@ -97,11 +92,9 @@ func (s *Service) batchForkChoiceAtts(ctx context.Context) error {
}
}
if !features.Get().EnableExperimentalAttestationPool {
for _, a := range s.cfg.Pool.BlockAttestations() {
if err := s.cfg.Pool.DeleteBlockAttestation(a); err != nil {
return err
}
for _, a := range s.cfg.Pool.BlockAttestations() {
if err := s.cfg.Pool.DeleteBlockAttestation(a); err != nil {
return err
}
}

View File

@@ -9,8 +9,8 @@ import (
"github.com/prysmaticlabs/prysm/v5/time/slots"
)
// pruneExpired prunes attestations pool on every slot interval.
func (s *Service) pruneExpired() {
// pruneAttsPool prunes attestations pool on every slot interval.
func (s *Service) pruneAttsPool() {
ticker := time.NewTicker(s.cfg.pruneInterval)
defer ticker.Stop()
for {
@@ -25,27 +25,6 @@ func (s *Service) pruneExpired() {
}
}
// pruneExpiredExperimental prunes attestations on every prune interval.
func (s *Service) pruneExpiredExperimental() {
ticker := time.NewTicker(s.cfg.pruneInterval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
expirySlot, err := s.expirySlot()
if err != nil {
log.WithError(err).Error("Could not get expiry slot")
continue
}
numExpired := s.cfg.Cache.PruneBefore(expirySlot)
s.updateMetricsExperimental(numExpired)
case <-s.ctx.Done():
log.Debug("Context closed, exiting routine")
return
}
}
}
// This prunes expired attestations from the pool.
func (s *Service) pruneExpiredAtts() {
aggregatedAtts := s.cfg.Pool.AggregatedAttestations()
@@ -105,17 +84,3 @@ func (s *Service) expiredPreDeneb(slot primitives.Slot) bool {
currentTime := uint64(prysmTime.Now().Unix())
return currentTime >= expirationTime
}
// Attestations for a slot before the returned slot are considered expired.
func (s *Service) expirySlot() (primitives.Slot, error) {
currSlot := slots.CurrentSlot(s.genesisTime)
currEpoch := slots.ToEpoch(currSlot)
if currEpoch == 0 {
return 0, nil
}
if currEpoch < params.BeaconConfig().DenebForkEpoch {
// Safe to subtract because we exited early for epoch 0.
return currSlot - 31, nil
}
return slots.EpochStart(currEpoch - 1)
}

View File

@@ -50,7 +50,7 @@ func TestPruneExpired_Ticker(t *testing.T) {
// Rewind back one epoch worth of time.
s.genesisTime = uint64(prysmTime.Now().Unix()) - uint64(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
go s.pruneExpired()
go s.pruneAttsPool()
done := make(chan struct{}, 1)
async.RunEvery(ctx, 500*time.Millisecond, func() {
@@ -145,4 +145,5 @@ func TestPruneExpired_ExpiredDeneb(t *testing.T) {
assert.Equal(t, true, s.expired(secondEpochStart), "Should be expired")
assert.Equal(t, false, s.expired(thirdEpochStart), "Should not be expired")
}

View File

@@ -9,9 +9,7 @@ import (
"time"
lru "github.com/hashicorp/golang-lru"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
lruwrpr "github.com/prysmaticlabs/prysm/v5/cache/lru"
"github.com/prysmaticlabs/prysm/v5/config/features"
"github.com/prysmaticlabs/prysm/v5/config/params"
)
@@ -29,13 +27,12 @@ type Service struct {
// Config options for the service.
type Config struct {
Cache *cache.AttestationCache
Pool Pool
pruneInterval time.Duration
InitialSyncComplete chan struct{}
}
// NewService instantiates a new attestation service instance that will
// NewService instantiates a new attestation pool service instance that will
// be registered into a running beacon node.
func NewService(ctx context.Context, cfg *Config) (*Service, error) {
cache := lruwrpr.New(forkChoiceProcessedAttsSize)
@@ -61,12 +58,7 @@ func (s *Service) Start() {
return
}
go s.prepareForkChoiceAtts()
if features.Get().EnableExperimentalAttestationPool {
go s.pruneExpiredExperimental()
} else {
go s.pruneExpired()
}
go s.pruneAttsPool()
}
// waitForSync waits until the beacon node is synced to the latest head.

View File

@@ -3,9 +3,8 @@ package p2p
import (
"context"
"crypto/ecdsa"
"crypto/rand"
"fmt"
mathRand "math/rand"
"math/rand"
"net"
"os"
"path"
@@ -49,7 +48,7 @@ func createAddrAndPrivKey(t *testing.T) (net.IP, *ecdsa.PrivateKey) {
require.NoError(t, err, "Could not get ip")
ipAddr := net.ParseIP(ip)
temp := t.TempDir()
randNum := mathRand.Int()
randNum := rand.Int()
tempPath := path.Join(temp, strconv.Itoa(randNum))
require.NoError(t, os.Mkdir(tempPath, 0700))
pkey, err := privKey(&Config{DataDir: tempPath})

View File

@@ -47,7 +47,7 @@ func GossipTopicMappings(topic string, epoch primitives.Epoch) proto.Message {
return gossipMessage(topic)
case AttestationSubnetTopicFormat:
if epoch >= params.BeaconConfig().ElectraForkEpoch {
return &ethpb.SingleAttestation{}
return &ethpb.AttestationElectra{}
}
return gossipMessage(topic)
case AttesterSlashingSubnetTopicFormat:
@@ -101,7 +101,7 @@ func init() {
GossipTypeMapping[reflect.TypeOf(&ethpb.SignedBeaconBlockDeneb{})] = BlockSubnetTopicFormat
// Specially handle Electra objects.
GossipTypeMapping[reflect.TypeOf(&ethpb.SignedBeaconBlockElectra{})] = BlockSubnetTopicFormat
GossipTypeMapping[reflect.TypeOf(&ethpb.SingleAttestation{})] = AttestationSubnetTopicFormat
GossipTypeMapping[reflect.TypeOf(&ethpb.AttestationElectra{})] = AttestationSubnetTopicFormat
GossipTypeMapping[reflect.TypeOf(&ethpb.AttesterSlashingElectra{})] = AttesterSlashingSubnetTopicFormat
GossipTypeMapping[reflect.TypeOf(&ethpb.SignedAggregateAttestationAndProofElectra{})] = AggregateAndProofSubnetTopicFormat
}

View File

@@ -118,7 +118,7 @@ func TestGossipTopicMappings_CorrectType(t *testing.T) {
_, ok = pMessage.(*ethpb.SignedBeaconBlockElectra)
assert.Equal(t, true, ok)
pMessage = GossipTopicMappings(AttestationSubnetTopicFormat, electraForkEpoch)
_, ok = pMessage.(*ethpb.SingleAttestation)
_, ok = pMessage.(*ethpb.AttestationElectra)
assert.Equal(t, true, ok)
pMessage = GossipTopicMappings(AttesterSlashingSubnetTopicFormat, electraForkEpoch)
_, ok = pMessage.(*ethpb.AttesterSlashingElectra)

View File

@@ -73,9 +73,6 @@ const (
RPCBlocksByRootTopicV2 = protocolPrefix + BeaconBlocksByRootsMessageName + SchemaVersionV2
// RPCMetaDataTopicV2 defines the v2 topic for the metadata rpc method.
RPCMetaDataTopicV2 = protocolPrefix + MetadataMessageName + SchemaVersionV2
RPCBlobSidecarsByRangeTopicV2 = protocolPrefix + BlobSidecarsByRangeName + SchemaVersionV2
RPCBlobSidecarsByRootTopicV2 = protocolPrefix + BlobSidecarsByRootName + SchemaVersionV2
)
// RPC errors for topic parsing.

View File

@@ -112,7 +112,7 @@ func InitializeDataMaps() {
return &ethpb.Attestation{}, nil
},
bytesutil.ToBytes4(params.BeaconConfig().ElectraForkVersion): func() (ethpb.Att, error) {
return &ethpb.SingleAttestation{}, nil
return &ethpb.AttestationElectra{}, nil
},
}

View File

@@ -21,7 +21,7 @@ type Service struct {
Broadcaster p2p.Broadcaster
SyncCommitteePool synccommittee.Pool
OperationNotifier opfeed.Notifier
AttestationCache *cache.AttestationDataCache
AttestationCache *cache.AttestationCache
StateGen stategen.StateManager
P2P p2p.Broadcaster
ReplayerBuilder stategen.ReplayerBuilder

View File

@@ -177,7 +177,6 @@ func (s *Service) blobEndpoints(blocker lookup.Blocker) []endpoint {
Blocker: blocker,
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
FinalizationFetcher: s.cfg.FinalizationFetcher,
TimeFetcher: s.cfg.GenesisTimeFetcher,
}
const namespace = "blob"
@@ -205,7 +204,6 @@ func (s *Service) validatorEndpoints(
TimeFetcher: s.cfg.GenesisTimeFetcher,
SyncChecker: s.cfg.SyncService,
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
AttestationCache: s.cfg.AttestationCache,
AttestationsPool: s.cfg.AttestationsPool,
PeerManager: s.cfg.PeerManager,
Broadcaster: s.cfg.Broadcaster,
@@ -509,7 +507,6 @@ func (s *Service) beaconEndpoints(
server := &beacon.Server{
CanonicalHistory: ch,
BeaconDB: s.cfg.BeaconDB,
AttestationCache: s.cfg.AttestationCache,
AttestationsPool: s.cfg.AttestationsPool,
SlashingsPool: s.cfg.SlashingsPool,
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
@@ -532,7 +529,6 @@ func (s *Service) beaconEndpoints(
FinalizationFetcher: s.cfg.FinalizationFetcher,
ForkchoiceFetcher: s.cfg.ForkchoiceFetcher,
CoreService: coreService,
AttestationStateFetcher: s.cfg.AttestationReceiver,
}
const namespace = "beacon"

View File

@@ -17,7 +17,6 @@ go_library(
"//api/server:go_default_library",
"//api/server/structs:go_default_library",
"//beacon-chain/blockchain:go_default_library",
"//beacon-chain/cache:go_default_library",
"//beacon-chain/cache/depositsnapshot:go_default_library",
"//beacon-chain/core/altair:go_default_library",
"//beacon-chain/core/blocks:go_default_library",

View File

@@ -49,18 +49,13 @@ func (s *Server) ListAttestations(w http.ResponseWriter, r *http.Request) {
return
}
var attestations []eth.Att
if features.Get().EnableExperimentalAttestationPool {
attestations = s.AttestationCache.GetAll()
} else {
attestations = s.AttestationsPool.AggregatedAttestations()
unaggAtts, err := s.AttestationsPool.UnaggregatedAttestations()
if err != nil {
httputil.HandleError(w, "Could not get unaggregated attestations: "+err.Error(), http.StatusInternalServerError)
return
}
attestations = append(attestations, unaggAtts...)
attestations := s.AttestationsPool.AggregatedAttestations()
unaggAtts, err := s.AttestationsPool.UnaggregatedAttestations()
if err != nil {
httputil.HandleError(w, "Could not get unaggregated attestations: "+err.Error(), http.StatusInternalServerError)
return
}
attestations = append(attestations, unaggAtts...)
filteredAtts := make([]*structs.Attestation, 0, len(attestations))
for _, a := range attestations {
@@ -107,19 +102,13 @@ func (s *Server) ListAttestationsV2(w http.ResponseWriter, r *http.Request) {
if rawSlot == "" {
v = slots.ToForkVersion(s.TimeFetcher.CurrentSlot())
}
var attestations []eth.Att
if features.Get().EnableExperimentalAttestationPool {
attestations = s.AttestationCache.GetAll()
} else {
attestations = s.AttestationsPool.AggregatedAttestations()
unaggAtts, err := s.AttestationsPool.UnaggregatedAttestations()
if err != nil {
httputil.HandleError(w, "Could not get unaggregated attestations: "+err.Error(), http.StatusInternalServerError)
return
}
attestations = append(attestations, unaggAtts...)
attestations := s.AttestationsPool.AggregatedAttestations()
unaggAtts, err := s.AttestationsPool.UnaggregatedAttestations()
if err != nil {
httputil.HandleError(w, "Could not get unaggregated attestations: "+err.Error(), http.StatusInternalServerError)
return
}
attestations = append(attestations, unaggAtts...)
filteredAtts := make([]interface{}, 0, len(attestations))
for _, att := range attestations {
@@ -285,11 +274,8 @@ func (s *Server) SubmitAttestationsV2(w http.ResponseWriter, r *http.Request) {
}
}
func (s *Server) handleAttestationsElectra(
ctx context.Context,
data json.RawMessage,
) (attFailures []*server.IndexedVerificationFailure, failedBroadcasts []string, err error) {
var sourceAttestations []*structs.SingleAttestation
func (s *Server) handleAttestationsElectra(ctx context.Context, data json.RawMessage) (attFailures []*server.IndexedVerificationFailure, failedBroadcasts []string, err error) {
var sourceAttestations []*structs.AttestationElectra
if err = json.Unmarshal(data, &sourceAttestations); err != nil {
return nil, nil, errors.Wrap(err, "failed to unmarshal attestation")
@@ -299,7 +285,7 @@ func (s *Server) handleAttestationsElectra(
return nil, nil, errors.New("no data submitted")
}
var validAttestations []*eth.SingleAttestation
var validAttestations []*eth.AttestationElectra
for i, sourceAtt := range sourceAttestations {
att, err := sourceAtt.ToConsensus()
if err != nil {
@@ -320,23 +306,14 @@ func (s *Server) handleAttestationsElectra(
}
for i, att := range validAttestations {
targetState, err := s.AttestationStateFetcher.AttestationTargetState(ctx, att.Data.Target)
if err != nil {
return nil, nil, errors.Wrap(err, "could not get target state for attestation")
}
committee, err := corehelpers.BeaconCommitteeFromState(ctx, targetState, att.Data.Slot, att.CommitteeId)
if err != nil {
return nil, nil, errors.Wrap(err, "could not get committee for attestation")
}
// Broadcast the unaggregated attestation on a feed to notify other services in the beacon node
// of a received unaggregated attestation.
// Note we can't send for aggregated att because we don't have selection proof.
if !att.IsAggregated() {
if !corehelpers.IsAggregated(att) {
s.OperationNotifier.OperationFeed().Send(&feed.Event{
Type: operation.UnaggregatedAttReceived,
Data: &operation.UnAggregatedAttReceivedData{
Attestation: att.ToAttestationElectra(committee),
Attestation: att,
},
})
}
@@ -347,20 +324,24 @@ func (s *Server) handleAttestationsElectra(
failedBroadcasts = append(failedBroadcasts, strconv.Itoa(i))
continue
}
subnet := corehelpers.ComputeSubnetFromCommitteeAndSlot(uint64(len(vals)), att.GetCommitteeIndex(), att.Data.Slot)
committeeIndex, err := att.GetCommitteeIndex()
if err != nil {
return nil, nil, errors.Wrap(err, "failed to retrieve attestation committee index")
}
subnet := corehelpers.ComputeSubnetFromCommitteeAndSlot(uint64(len(vals)), committeeIndex, att.Data.Slot)
if err = s.Broadcaster.BroadcastAttestation(ctx, subnet, att); err != nil {
log.WithError(err).Errorf("could not broadcast attestation at index %d", i)
failedBroadcasts = append(failedBroadcasts, strconv.Itoa(i))
continue
}
if features.Get().EnableExperimentalAttestationPool {
if err = s.AttestationCache.Add(att.ToAttestationElectra(committee)); err != nil {
log.WithError(err).Error("could not save attestation")
if corehelpers.IsAggregated(att) {
if err = s.AttestationsPool.SaveAggregatedAttestation(att); err != nil {
log.WithError(err).Error("could not save aggregated attestation")
}
} else {
if err = s.AttestationsPool.SaveUnaggregatedAttestation(att.ToAttestationElectra(committee)); err != nil {
log.WithError(err).Error("could not save attestation")
if err = s.AttestationsPool.SaveUnaggregatedAttestation(att); err != nil {
log.WithError(err).Error("could not save unaggregated attestation")
}
}
}
@@ -403,7 +384,7 @@ func (s *Server) handleAttestations(ctx context.Context, data json.RawMessage) (
// Broadcast the unaggregated attestation on a feed to notify other services in the beacon node
// of a received unaggregated attestation.
// Note we can't send for aggregated att because we don't have selection proof.
if !att.IsAggregated() {
if !corehelpers.IsAggregated(att) {
s.OperationNotifier.OperationFeed().Send(&feed.Event{
Type: operation.UnaggregatedAttReceived,
Data: &operation.UnAggregatedAttReceivedData{
@@ -426,11 +407,7 @@ func (s *Server) handleAttestations(ctx context.Context, data json.RawMessage) (
continue
}
if features.Get().EnableExperimentalAttestationPool {
if err = s.AttestationCache.Add(att); err != nil {
log.WithError(err).Error("could not save attestation")
}
} else if att.IsAggregated() {
if corehelpers.IsAggregated(att) {
if err = s.AttestationsPool.SaveAggregatedAttestation(att); err != nil {
log.WithError(err).Error("could not save aggregated attestation")
}

View File

@@ -498,17 +498,13 @@ func TestSubmitAttestations(t *testing.T) {
c.SlotsPerEpoch = 1
params.OverrideBeaconConfig(c)
_, keys, err := util.DeterministicDepositsAndKeys(2)
_, keys, err := util.DeterministicDepositsAndKeys(1)
require.NoError(t, err)
validators := []*ethpbv1alpha1.Validator{
{
PublicKey: keys[0].PublicKey().Marshal(),
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
},
{
PublicKey: keys[1].PublicKey().Marshal(),
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
},
}
bs, err := util.NewBeaconState(func(state *ethpbv1alpha1.BeaconState) error {
state.Validators = validators
@@ -525,10 +521,9 @@ func TestSubmitAttestations(t *testing.T) {
chainService := &blockchainmock.ChainService{State: bs}
s := &Server{
HeadFetcher: chainService,
ChainInfoFetcher: chainService,
OperationNotifier: &blockchainmock.MockOperationNotifier{},
AttestationStateFetcher: chainService,
HeadFetcher: chainService,
ChainInfoFetcher: chainService,
OperationNotifier: &blockchainmock.MockOperationNotifier{},
}
t.Run("V1", func(t *testing.T) {
t.Run("single", func(t *testing.T) {
@@ -737,7 +732,7 @@ func TestSubmitAttestations(t *testing.T) {
assert.Equal(t, http.StatusOK, writer.Code)
assert.Equal(t, true, broadcaster.BroadcastCalled.Load())
assert.Equal(t, 1, broadcaster.NumAttestations())
assert.Equal(t, primitives.ValidatorIndex(1), broadcaster.BroadcastAttestations[0].GetAttestingIndex())
assert.Equal(t, "0x03", hexutil.Encode(broadcaster.BroadcastAttestations[0].GetAggregationBits()))
assert.Equal(t, "0x8146f4397bfd8fd057ebbcd6a67327bdc7ed5fb650533edcb6377b650dea0b6da64c14ecd60846d5c0a0cd43893d6972092500f82c9d8a955e2b58c5ed3cbe885d84008ace6bd86ba9e23652f58e2ec207cec494c916063257abf285b9b15b15", hexutil.Encode(broadcaster.BroadcastAttestations[0].GetSignature()))
assert.Equal(t, primitives.Slot(0), broadcaster.BroadcastAttestations[0].GetData().Slot)
assert.Equal(t, primitives.CommitteeIndex(0), broadcaster.BroadcastAttestations[0].GetData().CommitteeIndex)
@@ -2349,8 +2344,8 @@ var (
]`
singleAttElectra = `[
{
"committee_index": "0",
"attester_index": "1",
"aggregation_bits": "0x03",
"committee_bits": "0x0100000000000000",
"signature": "0x8146f4397bfd8fd057ebbcd6a67327bdc7ed5fb650533edcb6377b650dea0b6da64c14ecd60846d5c0a0cd43893d6972092500f82c9d8a955e2b58c5ed3cbe885d84008ace6bd86ba9e23652f58e2ec207cec494c916063257abf285b9b15b15",
"data": {
"slot": "0",
@@ -2369,8 +2364,8 @@ var (
]`
multipleAttsElectra = `[
{
"committee_index": "0",
"attester_index": "0",
"aggregation_bits": "0x03",
"committee_bits": "0x0100000000000000",
"signature": "0x8146f4397bfd8fd057ebbcd6a67327bdc7ed5fb650533edcb6377b650dea0b6da64c14ecd60846d5c0a0cd43893d6972092500f82c9d8a955e2b58c5ed3cbe885d84008ace6bd86ba9e23652f58e2ec207cec494c916063257abf285b9b15b15",
"data": {
"slot": "0",
@@ -2387,8 +2382,8 @@ var (
}
},
{
"committee_index": "0",
"attester_index": "1",
"aggregation_bits": "0x03",
"committee_bits": "0x0100000000000000",
"signature": "0x8146f4397bfd8fd057ebbcd6a67327bdc7ed5fb650533edcb6377b650dea0b6da64c14ecd60846d5c0a0cd43893d6972092500f82c9d8a955e2b58c5ed3cbe885d84008ace6bd86ba9e23652f58e2ec207cec494c916063257abf285b9b15b15",
"data": {
"slot": "0",
@@ -2408,8 +2403,8 @@ var (
// signature is invalid
invalidAttElectra = `[
{
"committee_index": "0",
"attester_index": "0",
"aggregation_bits": "0x03",
"committee_bits": "0x0100000000000000",
"signature": "0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"data": {
"slot": "0",

View File

@@ -5,7 +5,6 @@ package beacon
import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
blockfeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/block"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/operation"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db"
@@ -32,7 +31,6 @@ type Server struct {
BlockNotifier blockfeed.Notifier
OperationNotifier operation.Notifier
Broadcaster p2p.Broadcaster
AttestationCache *cache.AttestationCache
AttestationsPool attestations.Pool
SlashingsPool slashings.PoolManager
VoluntaryExitsPool voluntaryexits.PoolManager
@@ -50,5 +48,4 @@ type Server struct {
BLSChangesPool blstoexec.PoolManager
ForkchoiceFetcher blockchain.ForkchoiceFetcher
CoreService *core.Service
AttestationStateFetcher blockchain.AttestationStateFetcher
}

View File

@@ -14,9 +14,7 @@ go_library(
"//beacon-chain/rpc/core:go_default_library",
"//beacon-chain/rpc/lookup:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/primitives:go_default_library",
"//monitoring/tracing/trace:go_default_library",
"//network/httputil:go_default_library",
"//runtime/version:go_default_library",

View File

@@ -12,9 +12,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/core"
field_params "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
"github.com/prysmaticlabs/prysm/v5/network/httputil"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
@@ -25,7 +23,7 @@ func (s *Server) Blobs(w http.ResponseWriter, r *http.Request) {
ctx, span := trace.StartSpan(r.Context(), "beacon.Blobs")
defer span.End()
indices, err := parseIndices(r.URL, s.TimeFetcher.CurrentSlot())
indices, err := parseIndices(r.URL)
if err != nil {
httputil.HandleError(w, err.Error(), http.StatusBadRequest)
return
@@ -89,9 +87,9 @@ func (s *Server) Blobs(w http.ResponseWriter, r *http.Request) {
}
// parseIndices filters out invalid and duplicate blob indices
func parseIndices(url *url.URL, s primitives.Slot) ([]uint64, error) {
func parseIndices(url *url.URL) ([]uint64, error) {
rawIndices := url.Query()["indices"]
indices := make([]uint64, 0, params.BeaconConfig().MaxBlobsPerBlock(s))
indices := make([]uint64, 0, field_params.MaxBlobsPerBlock)
invalidIndices := make([]string, 0)
loop:
for _, raw := range rawIndices {
@@ -100,7 +98,7 @@ loop:
invalidIndices = append(invalidIndices, raw)
continue
}
if ix >= uint64(params.BeaconConfig().MaxBlobsPerBlock(s)) {
if ix >= field_params.MaxBlobsPerBlock {
invalidIndices = append(invalidIndices, raw)
continue
}

View File

@@ -52,7 +52,6 @@ func TestBlobs(t *testing.T) {
s := &Server{
OptimisticModeFetcher: mockChainService,
FinalizationFetcher: mockChainService,
TimeFetcher: mockChainService,
}
t.Run("genesis", func(t *testing.T) {
@@ -401,7 +400,7 @@ func Test_parseIndices(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := parseIndices(&url.URL{RawQuery: tt.query}, 0)
got, err := parseIndices(&url.URL{RawQuery: tt.query})
if err != nil && tt.wantErr != "" {
require.StringContains(t, tt.wantErr, err.Error())
return

View File

@@ -9,5 +9,4 @@ type Server struct {
Blocker lookup.Blocker
OptimisticModeFetcher blockchain.OptimisticModeFetcher
FinalizationFetcher blockchain.FinalizationFetcher
TimeFetcher blockchain.TimeFetcher
}

View File

@@ -190,7 +190,7 @@ func TestGetSpec(t *testing.T) {
data, ok := resp.Data.(map[string]interface{})
require.Equal(t, true, ok)
assert.Equal(t, 160, len(data))
assert.Equal(t, 156, len(data))
for k, v := range data {
t.Run(k, func(t *testing.T) {
switch k {
@@ -335,7 +335,7 @@ func TestGetSpec(t *testing.T) {
case "MAX_VOLUNTARY_EXITS":
assert.Equal(t, "52", v)
case "MAX_BLOBS_PER_BLOCK":
assert.Equal(t, "6", v)
assert.Equal(t, "4", v)
case "TIMELY_HEAD_FLAG_INDEX":
assert.Equal(t, "0x35", v)
case "TIMELY_SOURCE_FLAG_INDEX":
@@ -529,12 +529,6 @@ func TestGetSpec(t *testing.T) {
assert.Equal(t, "93", v)
case "MAX_PENDING_DEPOSITS_PER_EPOCH":
assert.Equal(t, "94", v)
case "TARGET_BLOBS_PER_BLOCK_ELECTRA":
assert.Equal(t, "6", v)
case "MAX_BLOBS_PER_BLOCK_ELECTRA":
assert.Equal(t, "9", v)
case "MAX_REQUEST_BLOB_SIDECARS_ELECTRA":
assert.Equal(t, "1152", v)
default:
t.Errorf("Incorrect key: %s", k)
}

View File

@@ -53,7 +53,6 @@ go_test(
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/state:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/payload-attribute:go_default_library",

View File

@@ -19,7 +19,6 @@ import (
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
payloadattribute "github.com/prysmaticlabs/prysm/v5/consensus-types/payload-attribute"
@@ -461,7 +460,7 @@ func TestStreamEvents_OperationsEvents(t *testing.T) {
defer testSync.cleanup()
st := tc.getState()
v := &eth.Validator{ExitEpoch: math.MaxUint64, EffectiveBalance: params.BeaconConfig().MinActivationBalance}
v := &eth.Validator{ExitEpoch: math.MaxUint64}
require.NoError(t, st.SetValidators([]*eth.Validator{v}))
currentSlot := primitives.Slot(0)
// to avoid slot processing

View File

@@ -18,7 +18,6 @@ go_library(
"//beacon-chain/rpc/eth/shared:go_default_library",
"//beacon-chain/rpc/lookup:go_default_library",
"//beacon-chain/state:go_default_library",
"//config/features:go_default_library",
"//config/params:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
@@ -46,7 +45,6 @@ go_test(
"//beacon-chain/db/testing:go_default_library",
"//beacon-chain/rpc/testutil:go_default_library",
"//beacon-chain/state:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",

View File

@@ -12,7 +12,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
lightclient "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/light-client"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/shared"
"github.com/prysmaticlabs/prysm/v5/config/features"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
@@ -23,11 +22,6 @@ import (
// GetLightClientBootstrap - implements https://github.com/ethereum/beacon-APIs/blob/263f4ed6c263c967f13279c7a9f5629b51c5fc55/apis/beacon/light_client/bootstrap.yaml
func (s *Server) GetLightClientBootstrap(w http.ResponseWriter, req *http.Request) {
if !features.Get().EnableLightClient {
httputil.HandleError(w, "Light client feature flag is not enabled", http.StatusNotFound)
return
}
// Prepare
ctx, span := trace.StartSpan(req.Context(), "beacon.GetLightClientBootstrap")
defer span.End()
@@ -82,21 +76,26 @@ func (s *Server) GetLightClientBootstrap(w http.ResponseWriter, req *http.Reques
// GetLightClientUpdatesByRange - implements https://github.com/ethereum/beacon-APIs/blob/263f4ed6c263c967f13279c7a9f5629b51c5fc55/apis/beacon/light_client/updates.yaml
func (s *Server) GetLightClientUpdatesByRange(w http.ResponseWriter, req *http.Request) {
if !features.Get().EnableLightClient {
httputil.HandleError(w, "Light client feature flag is not enabled", http.StatusNotFound)
return
}
// Prepare
ctx, span := trace.StartSpan(req.Context(), "beacon.GetLightClientUpdatesByRange")
defer span.End()
// Determine slots per period
config := params.BeaconConfig()
slotsPerPeriod := uint64(config.EpochsPerSyncCommitteePeriod) * uint64(config.SlotsPerEpoch)
// Adjust count based on configuration
_, count, gotCount := shared.UintFromQuery(w, req, "count", true)
if !gotCount {
return
} else if count == 0 {
httputil.HandleError(w, fmt.Sprintf("Got invalid 'count' query variable '%d': count must be greater than 0", count), http.StatusBadRequest)
httputil.HandleError(w, fmt.Sprintf("got invalid 'count' query variable '%d': count must be greater than 0", count), http.StatusInternalServerError)
return
}
// Determine the start and end periods
_, startPeriod, gotStartPeriod := shared.UintFromQuery(w, req, "start_period", true)
if !gotStartPeriod {
return
}
@@ -104,13 +103,33 @@ func (s *Server) GetLightClientUpdatesByRange(w http.ResponseWriter, req *http.R
count = config.MaxRequestLightClientUpdates
}
_, startPeriod, gotStartPeriod := shared.UintFromQuery(w, req, "start_period", true)
if !gotStartPeriod {
// max possible slot is current head
headState, err := s.HeadFetcher.HeadState(ctx)
if err != nil {
httputil.HandleError(w, "could not get head state: "+err.Error(), http.StatusInternalServerError)
return
}
maxSlot := uint64(headState.Slot())
// min possible slot is Altair fork period
minSlot := uint64(config.AltairForkEpoch) * uint64(config.SlotsPerEpoch)
// Adjust startPeriod, the end of start period must be later than Altair fork epoch, otherwise, can not get the sync committee votes
startPeriodEndSlot := (startPeriod+1)*slotsPerPeriod - 1
if startPeriodEndSlot < minSlot {
startPeriod = minSlot / slotsPerPeriod
}
// Get the initial endPeriod, then we will adjust
endPeriod := startPeriod + count - 1
// Adjust endPeriod, the end of end period must be earlier than current head slot
endPeriodEndSlot := (endPeriod+1)*slotsPerPeriod - 1
if endPeriodEndSlot > maxSlot {
endPeriod = maxSlot / slotsPerPeriod
}
// get updates
updatesMap, err := s.BeaconDB.LightClientUpdates(ctx, startPeriod, endPeriod)
if err != nil {
@@ -143,11 +162,6 @@ func (s *Server) GetLightClientUpdatesByRange(w http.ResponseWriter, req *http.R
// GetLightClientFinalityUpdate - implements https://github.com/ethereum/beacon-APIs/blob/263f4ed6c263c967f13279c7a9f5629b51c5fc55/apis/beacon/light_client/finality_update.yaml
func (s *Server) GetLightClientFinalityUpdate(w http.ResponseWriter, req *http.Request) {
if !features.Get().EnableLightClient {
httputil.HandleError(w, "Light client feature flag is not enabled", http.StatusNotFound)
return
}
ctx, span := trace.StartSpan(req.Context(), "beacon.GetLightClientFinalityUpdate")
defer span.End()
@@ -206,11 +220,6 @@ func (s *Server) GetLightClientFinalityUpdate(w http.ResponseWriter, req *http.R
// GetLightClientOptimisticUpdate - implements https://github.com/ethereum/beacon-APIs/blob/263f4ed6c263c967f13279c7a9f5629b51c5fc55/apis/beacon/light_client/optimistic_update.yaml
func (s *Server) GetLightClientOptimisticUpdate(w http.ResponseWriter, req *http.Request) {
if !features.Get().EnableLightClient {
httputil.HandleError(w, "Light client feature flag is not enabled", http.StatusNotFound)
return
}
ctx, span := trace.StartSpan(req.Context(), "beacon.GetLightClientOptimisticUpdate")
defer span.End()

View File

@@ -19,7 +19,6 @@ import (
dbtesting "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/testutil"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/config/features"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
@@ -34,11 +33,6 @@ import (
)
func TestLightClientHandler_GetLightClientBootstrap(t *testing.T) {
resetFn := features.InitWithReset(&features.Flags{
EnableLightClient: true,
})
defer resetFn()
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.AltairForkEpoch = 0
@@ -258,11 +252,6 @@ func TestLightClientHandler_GetLightClientBootstrap(t *testing.T) {
// GetLightClientByRange tests
func TestLightClientHandler_GetLightClientUpdatesByRangeAltair(t *testing.T) {
resetFn := features.InitWithReset(&features.Flags{
EnableLightClient: true,
})
defer resetFn()
helpers.ClearCache()
ctx := context.Background()
@@ -312,11 +301,6 @@ func TestLightClientHandler_GetLightClientUpdatesByRangeAltair(t *testing.T) {
}
func TestLightClientHandler_GetLightClientUpdatesByRangeCapella(t *testing.T) {
resetFn := features.InitWithReset(&features.Flags{
EnableLightClient: true,
})
defer resetFn()
helpers.ClearCache()
ctx := context.Background()
params.SetupTestConfigCleanup(t)
@@ -366,11 +350,6 @@ func TestLightClientHandler_GetLightClientUpdatesByRangeCapella(t *testing.T) {
}
func TestLightClientHandler_GetLightClientUpdatesByRangeDeneb(t *testing.T) {
resetFn := features.InitWithReset(&features.Flags{
EnableLightClient: true,
})
defer resetFn()
helpers.ClearCache()
ctx := context.Background()
params.SetupTestConfigCleanup(t)
@@ -420,11 +399,6 @@ func TestLightClientHandler_GetLightClientUpdatesByRangeDeneb(t *testing.T) {
}
func TestLightClientHandler_GetLightClientUpdatesByRangeMultipleAltair(t *testing.T) {
resetFn := features.InitWithReset(&features.Flags{
EnableLightClient: true,
})
defer resetFn()
helpers.ClearCache()
ctx := context.Background()
params.SetupTestConfigCleanup(t)
@@ -484,11 +458,6 @@ func TestLightClientHandler_GetLightClientUpdatesByRangeMultipleAltair(t *testin
}
func TestLightClientHandler_GetLightClientUpdatesByRangeMultipleCapella(t *testing.T) {
resetFn := features.InitWithReset(&features.Flags{
EnableLightClient: true,
})
defer resetFn()
helpers.ClearCache()
ctx := context.Background()
params.SetupTestConfigCleanup(t)
@@ -549,11 +518,6 @@ func TestLightClientHandler_GetLightClientUpdatesByRangeMultipleCapella(t *testi
}
func TestLightClientHandler_GetLightClientUpdatesByRangeMultipleDeneb(t *testing.T) {
resetFn := features.InitWithReset(&features.Flags{
EnableLightClient: true,
})
defer resetFn()
helpers.ClearCache()
ctx := context.Background()
params.SetupTestConfigCleanup(t)
@@ -614,11 +578,6 @@ func TestLightClientHandler_GetLightClientUpdatesByRangeMultipleDeneb(t *testing
}
func TestLightClientHandler_GetLightClientUpdatesByRangeMultipleForksAltairCapella(t *testing.T) {
resetFn := features.InitWithReset(&features.Flags{
EnableLightClient: true,
})
defer resetFn()
helpers.ClearCache()
ctx := context.Background()
params.SetupTestConfigCleanup(t)
@@ -687,11 +646,6 @@ func TestLightClientHandler_GetLightClientUpdatesByRangeMultipleForksAltairCapel
}
func TestLightClientHandler_GetLightClientUpdatesByRangeMultipleForksCapellaDeneb(t *testing.T) {
resetFn := features.InitWithReset(&features.Flags{
EnableLightClient: true,
})
defer resetFn()
helpers.ClearCache()
ctx := context.Background()
params.SetupTestConfigCleanup(t)
@@ -761,11 +715,6 @@ func TestLightClientHandler_GetLightClientUpdatesByRangeMultipleForksCapellaDene
}
func TestLightClientHandler_GetLightClientUpdatesByRangeCountBiggerThanLimit(t *testing.T) {
resetFn := features.InitWithReset(&features.Flags{
EnableLightClient: true,
})
defer resetFn()
helpers.ClearCache()
ctx := context.Background()
params.SetupTestConfigCleanup(t)
@@ -828,11 +777,6 @@ func TestLightClientHandler_GetLightClientUpdatesByRangeCountBiggerThanLimit(t *
}
func TestLightClientHandler_GetLightClientUpdatesByRangeCountBiggerThanMax(t *testing.T) {
resetFn := features.InitWithReset(&features.Flags{
EnableLightClient: true,
})
defer resetFn()
helpers.ClearCache()
ctx := context.Background()
params.SetupTestConfigCleanup(t)
@@ -894,22 +838,35 @@ func TestLightClientHandler_GetLightClientUpdatesByRangeCountBiggerThanMax(t *te
}
func TestLightClientHandler_GetLightClientUpdatesByRangeStartPeriodBeforeAltair(t *testing.T) {
resetFn := features.InitWithReset(&features.Flags{
EnableLightClient: true,
})
defer resetFn()
helpers.ClearCache()
ctx := context.Background()
params.SetupTestConfigCleanup(t)
config := params.BeaconConfig()
config.AltairForkEpoch = 1
config.EpochsPerSyncCommitteePeriod = 1
params.OverrideBeaconConfig(config)
slot := primitives.Slot(config.AltairForkEpoch * primitives.Epoch(config.SlotsPerEpoch)).Add(1)
st, err := util.NewBeaconStateAltair()
require.NoError(t, err)
headSlot := slot.Add(1)
err = st.SetSlot(headSlot)
require.NoError(t, err)
db := dbtesting.SetupDB(t)
updatePeriod := slot.Div(uint64(config.EpochsPerSyncCommitteePeriod)).Div(uint64(config.SlotsPerEpoch))
update, err := createUpdate(t, version.Altair)
require.NoError(t, err)
err = db.SaveLightClientUpdate(ctx, uint64(updatePeriod), update)
require.NoError(t, err)
mockChainService := &mock.ChainService{State: st}
s := &Server{
BeaconDB: db,
HeadFetcher: mockChainService,
BeaconDB: db,
}
startPeriod := 0
url := fmt.Sprintf("http://foo.com/?count=2&start_period=%d", startPeriod)
@@ -921,17 +878,18 @@ func TestLightClientHandler_GetLightClientUpdatesByRangeStartPeriodBeforeAltair(
require.Equal(t, http.StatusOK, writer.Code)
var resp structs.LightClientUpdatesByRangeResponse
err := json.Unmarshal(writer.Body.Bytes(), &resp.Updates)
err = json.Unmarshal(writer.Body.Bytes(), &resp.Updates)
require.NoError(t, err)
require.Equal(t, 0, len(resp.Updates))
require.Equal(t, 1, len(resp.Updates))
require.Equal(t, "altair", resp.Updates[0].Version)
updateJson, err := structs.LightClientUpdateFromConsensus(update)
require.NoError(t, err)
require.DeepEqual(t, updateJson, resp.Updates[0].Data)
}
func TestLightClientHandler_GetLightClientUpdatesByRangeMissingUpdates(t *testing.T) {
resetFn := features.InitWithReset(&features.Flags{
EnableLightClient: true,
})
defer resetFn()
helpers.ClearCache()
ctx := context.Background()
params.SetupTestConfigCleanup(t)
@@ -1038,11 +996,6 @@ func TestLightClientHandler_GetLightClientUpdatesByRangeMissingUpdates(t *testin
}
func TestLightClientHandler_GetLightClientFinalityUpdate(t *testing.T) {
resetFn := features.InitWithReset(&features.Flags{
EnableLightClient: true,
})
defer resetFn()
helpers.ClearCache()
ctx := context.Background()
config := params.BeaconConfig()
@@ -1155,11 +1108,6 @@ func TestLightClientHandler_GetLightClientFinalityUpdate(t *testing.T) {
}
func TestLightClientHandler_GetLightClientOptimisticUpdateAltair(t *testing.T) {
resetFn := features.InitWithReset(&features.Flags{
EnableLightClient: true,
})
defer resetFn()
helpers.ClearCache()
ctx := context.Background()
config := params.BeaconConfig()
@@ -1272,11 +1220,6 @@ func TestLightClientHandler_GetLightClientOptimisticUpdateAltair(t *testing.T) {
}
func TestLightClientHandler_GetLightClientOptimisticUpdateCapella(t *testing.T) {
resetFn := features.InitWithReset(&features.Flags{
EnableLightClient: true,
})
defer resetFn()
helpers.ClearCache()
ctx := context.Background()
config := params.BeaconConfig()
@@ -1389,11 +1332,6 @@ func TestLightClientHandler_GetLightClientOptimisticUpdateCapella(t *testing.T)
}
func TestLightClientHandler_GetLightClientOptimisticUpdateDeneb(t *testing.T) {
resetFn := features.InitWithReset(&features.Flags{
EnableLightClient: true,
})
defer resetFn()
helpers.ClearCache()
ctx := context.Background()
config := params.BeaconConfig()

View File

@@ -30,7 +30,6 @@ go_library(
"//beacon-chain/rpc/lookup:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/sync:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types:go_default_library",

View File

@@ -25,7 +25,6 @@ import (
rpchelpers "github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/shared"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/config/features"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
consensus_types "github.com/prysmaticlabs/prysm/v5/consensus-types"
@@ -130,23 +129,13 @@ func (s *Server) GetAggregateAttestationV2(w http.ResponseWriter, r *http.Reques
}
func (s *Server) aggregatedAttestation(w http.ResponseWriter, slot primitives.Slot, attDataRoot []byte, index primitives.CommitteeIndex) ethpbalpha.Att {
var match []ethpbalpha.Att
var err error
if features.Get().EnableExperimentalAttestationPool {
match, err = matchingAtts(s.AttestationCache.GetAll(), slot, attDataRoot, index)
if err != nil {
httputil.HandleError(w, "Could not get matching attestations: "+err.Error(), http.StatusInternalServerError)
return nil
}
} else {
match, err = matchingAtts(s.AttestationsPool.AggregatedAttestations(), slot, attDataRoot, index)
if err != nil {
httputil.HandleError(w, "Could not get matching attestations: "+err.Error(), http.StatusInternalServerError)
return nil
}
match, err := matchingAtts(s.AttestationsPool.AggregatedAttestations(), slot, attDataRoot, index)
if err != nil {
httputil.HandleError(w, "Could not get matching attestations: "+err.Error(), http.StatusInternalServerError)
return nil
}
if len(match) > 0 {
// If there are multiple matching aggregated attestations,
// then we return the one with the most aggregation bits.
@@ -156,11 +145,6 @@ func (s *Server) aggregatedAttestation(w http.ResponseWriter, slot primitives.Sl
return match[0]
}
// No match was found and the new pool doesn't store aggregated and unaggregated attestations separately.
if features.Get().EnableExperimentalAttestationPool {
return nil
}
atts, err := s.AttestationsPool.UnaggregatedAttestations()
if err != nil {
httputil.HandleError(w, "Could not get unaggregated attestations: "+err.Error(), http.StatusInternalServerError)
@@ -205,7 +189,11 @@ func matchingAtts(atts []ethpbalpha.Att, slot primitives.Slot, attDataRoot []byt
// compare the committee index separately.
if postElectra {
if att.Version() >= version.Electra {
if att.GetCommitteeIndex() != index {
ci, err := att.GetCommitteeIndex()
if err != nil {
return nil, err
}
if ci != index {
continue
}
} else {

View File

@@ -1194,7 +1194,7 @@ func TestGetAttestationData(t *testing.T) {
HeadFetcher: chain,
GenesisTimeFetcher: chain,
FinalizedFetcher: chain,
AttestationCache: cache.NewAttestationDataCache(),
AttestationCache: cache.NewAttestationCache(),
OptimisticModeFetcher: chain,
},
}
@@ -1275,7 +1275,7 @@ func TestGetAttestationData(t *testing.T) {
TimeFetcher: chain,
OptimisticModeFetcher: chain,
CoreService: &core.Service{
AttestationCache: cache.NewAttestationDataCache(),
AttestationCache: cache.NewAttestationCache(),
GenesisTimeFetcher: chain,
HeadFetcher: chain,
FinalizedFetcher: chain,
@@ -1434,7 +1434,7 @@ func TestGetAttestationData(t *testing.T) {
TimeFetcher: chain,
OptimisticModeFetcher: chain,
CoreService: &core.Service{
AttestationCache: cache.NewAttestationDataCache(),
AttestationCache: cache.NewAttestationCache(),
OptimisticModeFetcher: chain,
HeadFetcher: chain,
GenesisTimeFetcher: chain,
@@ -1528,7 +1528,7 @@ func TestGetAttestationData(t *testing.T) {
TimeFetcher: chain,
OptimisticModeFetcher: chain,
CoreService: &core.Service{
AttestationCache: cache.NewAttestationDataCache(),
AttestationCache: cache.NewAttestationCache(),
OptimisticModeFetcher: chain,
HeadFetcher: chain,
GenesisTimeFetcher: chain,

View File

@@ -22,7 +22,6 @@ type Server struct {
HeadFetcher blockchain.HeadFetcher
TimeFetcher blockchain.TimeFetcher
SyncChecker sync.Checker
AttestationCache *cache.AttestationCache
AttestationsPool attestations.Pool
PeerManager p2p.PeerManager
Broadcaster p2p.Broadcaster

View File

@@ -235,7 +235,7 @@ func (p *BeaconDbBlocker) Blobs(ctx context.Context, id string, indices []uint64
return make([]*blocks.VerifiedROBlob, 0), nil
}
if len(indices) == 0 {
m, err := p.BlobStorage.Indices(bytesutil.ToBytes32(root), b.Block().Slot())
m, err := p.BlobStorage.Indices(bytesutil.ToBytes32(root))
if err != nil {
log.WithFields(log.Fields{
"blockRoot": hexutil.Encode(root),
@@ -244,9 +244,6 @@ func (p *BeaconDbBlocker) Blobs(ctx context.Context, id string, indices []uint64
}
for k, v := range m {
if v {
if k >= len(commitments) {
return nil, &core.RpcError{Err: fmt.Errorf("blob index %d is more than blob kzg commitments :%dd", k, len(commitments)), Reason: core.BadRequest}
}
indices = append(indices, uint64(k))
}
}

View File

@@ -7,13 +7,11 @@ import (
"strings"
"github.com/prysmaticlabs/prysm/v5/api/pagination"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filters"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/v5/cmd"
"github.com/prysmaticlabs/prysm/v5/config/features"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
@@ -307,14 +305,7 @@ func (bs *Server) ListIndexedAttestationsElectra(
// attestations are processed and when they are no longer valid.
// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/beacon-chain.md#attestations
func (bs *Server) AttestationPool(_ context.Context, req *ethpb.AttestationPoolRequest) (*ethpb.AttestationPoolResponse, error) {
var atts []*ethpb.Attestation
var err error
if features.Get().EnableExperimentalAttestationPool {
atts, err = attestationsFromCache[*ethpb.Attestation](req.PageSize, bs.AttestationCache)
} else {
atts, err = attestationsFromPool[*ethpb.Attestation](req.PageSize, bs.AttestationsPool)
}
atts, err := attestationsFromPool[*ethpb.Attestation](req.PageSize, bs.AttestationsPool)
if err != nil {
return nil, err
}
@@ -341,18 +332,10 @@ func (bs *Server) AttestationPool(_ context.Context, req *ethpb.AttestationPoolR
}
func (bs *Server) AttestationPoolElectra(_ context.Context, req *ethpb.AttestationPoolRequest) (*ethpb.AttestationPoolElectraResponse, error) {
var atts []*ethpb.AttestationElectra
var err error
if features.Get().EnableExperimentalAttestationPool {
atts, err = attestationsFromCache[*ethpb.AttestationElectra](req.PageSize, bs.AttestationCache)
} else {
atts, err = attestationsFromPool[*ethpb.AttestationElectra](req.PageSize, bs.AttestationsPool)
}
atts, err := attestationsFromPool[*ethpb.AttestationElectra](req.PageSize, bs.AttestationsPool)
if err != nil {
return nil, err
}
// If there are no attestations, we simply return a response specifying this.
// Otherwise, attempting to paginate 0 attestations below would result in an error.
if len(atts) == 0 {
@@ -482,25 +465,3 @@ func attestationsFromPool[T ethpb.Att](pageSize int32, pool attestations.Pool) (
}
return atts, nil
}
func attestationsFromCache[T ethpb.Att](pageSize int32, c *cache.AttestationCache) ([]T, error) {
if int(pageSize) > cmd.Get().MaxRPCPageSize {
return nil, status.Errorf(
codes.InvalidArgument,
"Requested page size %d can not be greater than max size %d",
pageSize,
cmd.Get().MaxRPCPageSize,
)
}
cacheAtts := c.GetAll()
atts := make([]T, 0, len(cacheAtts))
for _, att := range cacheAtts {
a, ok := att.(T)
if !ok {
var expected T
return nil, status.Errorf(codes.Internal, "Attestation is of the wrong type (expected %T, got %T)", expected, att)
}
atts = append(atts, a)
}
return atts, nil
}

View File

@@ -39,7 +39,6 @@ type Server struct {
BlockNotifier blockfeed.Notifier
AttestationNotifier operation.Notifier
Broadcaster p2p.Broadcaster
AttestationCache *cache.AttestationCache
AttestationsPool attestations.Pool
SlashingsPool slashings.PoolManager
ChainStartChan chan time.Time

View File

@@ -3,10 +3,8 @@ package validator
import (
"context"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/core"
"github.com/prysmaticlabs/prysm/v5/config/features"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
@@ -29,21 +27,14 @@ func (vs *Server) SubmitAggregateSelectionProof(ctx context.Context, req *ethpb.
if err != nil {
return nil, err
}
var atts []*ethpb.Attestation
if features.Get().EnableExperimentalAttestationPool {
atts = cache.GetBySlotAndCommitteeIndex[*ethpb.Attestation](vs.AttestationCache, req.Slot, req.CommitteeIndex)
} else {
atts = vs.AttPool.AggregatedAttestationsBySlotIndex(ctx, req.Slot, req.CommitteeIndex)
atts := vs.AttPool.AggregatedAttestationsBySlotIndex(ctx, req.Slot, req.CommitteeIndex)
// Filter out the best aggregated attestation (ie. the one with the most aggregated bits).
if len(atts) == 0 {
atts = vs.AttPool.UnaggregatedAttestationsBySlotIndex(ctx, req.Slot, req.CommitteeIndex)
if len(atts) == 0 {
atts = vs.AttPool.UnaggregatedAttestationsBySlotIndex(ctx, req.Slot, req.CommitteeIndex)
return nil, status.Errorf(codes.NotFound, "Could not find attestation for slot and committee in pool")
}
}
if len(atts) == 0 {
return nil, status.Errorf(codes.NotFound, "Could not find attestation for slot and committee in pool")
}
best := bestAggregate(atts, req.CommitteeIndex, indexInCommittee)
attAndProof := &ethpb.AggregateAttestationAndProof{
Aggregate: best,
@@ -68,21 +59,13 @@ func (vs *Server) SubmitAggregateSelectionProofElectra(
if err != nil {
return nil, err
}
var atts []*ethpb.AttestationElectra
if features.Get().EnableExperimentalAttestationPool {
atts = cache.GetBySlotAndCommitteeIndex[*ethpb.AttestationElectra](vs.AttestationCache, req.Slot, req.CommitteeIndex)
} else {
atts = vs.AttPool.AggregatedAttestationsBySlotIndexElectra(ctx, req.Slot, req.CommitteeIndex)
atts := vs.AttPool.AggregatedAttestationsBySlotIndexElectra(ctx, req.Slot, req.CommitteeIndex)
if len(atts) == 0 {
atts = vs.AttPool.UnaggregatedAttestationsBySlotIndexElectra(ctx, req.Slot, req.CommitteeIndex)
if len(atts) == 0 {
atts = vs.AttPool.UnaggregatedAttestationsBySlotIndexElectra(ctx, req.Slot, req.CommitteeIndex)
return nil, status.Errorf(codes.NotFound, "No attestations found in pool")
}
}
if len(atts) == 0 {
return nil, status.Errorf(codes.NotFound, "Could not find attestation for slot and committee in pool")
}
best := bestAggregate(atts, req.CommitteeIndex, indexInCommittee)
attAndProof := &ethpb.AggregateAttestationAndProofElectra{
Aggregate: best,

View File

@@ -8,12 +8,10 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/operation"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/core"
"github.com/prysmaticlabs/prysm/v5/config/features"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
"github.com/prysmaticlabs/prysm/v5/time/slots"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
@@ -46,64 +44,47 @@ func (vs *Server) ProposeAttestation(ctx context.Context, att *ethpb.Attestation
ctx, span := trace.StartSpan(ctx, "AttesterServer.ProposeAttestation")
defer span.End()
resp, err := vs.proposeAtt(ctx, att, nil, att.GetData().CommitteeIndex)
resp, err := vs.proposeAtt(ctx, att, att.GetData().CommitteeIndex)
if err != nil {
return nil, err
}
if features.Get().EnableExperimentalAttestationPool {
if err = vs.AttestationCache.Add(att); err != nil {
log.WithError(err).Error("Could not save attestation")
go func() {
attCopy := att.Copy()
if err := vs.AttPool.SaveUnaggregatedAttestation(attCopy); err != nil {
log.WithError(err).Error("Could not save unaggregated attestation")
return
}
} else {
go func() {
attCopy := att.Copy()
if err := vs.AttPool.SaveUnaggregatedAttestation(attCopy); err != nil {
log.WithError(err).Error("Could not save unaggregated attestation")
return
}
}()
}
}()
return resp, nil
}
// ProposeAttestationElectra is a function called by an attester to vote
// on a block via an attestation object as defined in the Ethereum specification.
func (vs *Server) ProposeAttestationElectra(ctx context.Context, att *ethpb.SingleAttestation) (*ethpb.AttestResponse, error) {
func (vs *Server) ProposeAttestationElectra(ctx context.Context, att *ethpb.AttestationElectra) (*ethpb.AttestResponse, error) {
ctx, span := trace.StartSpan(ctx, "AttesterServer.ProposeAttestationElectra")
defer span.End()
targetState, err := vs.AttestationStateFetcher.AttestationTargetState(ctx, att.Data.Target)
if err != nil {
return nil, status.Error(codes.Internal, "Could not get target state")
}
committeeIndex := att.GetCommitteeIndex()
committee, err := helpers.BeaconCommitteeFromState(ctx, targetState, att.Data.Slot, committeeIndex)
if err != nil {
return nil, status.Error(codes.Internal, "Could not get committee")
}
resp, err := vs.proposeAtt(ctx, att, committee, committeeIndex)
committeeIndex, err := att.GetCommitteeIndex()
if err != nil {
return nil, err
}
if features.Get().EnableExperimentalAttestationPool {
if err = vs.AttestationCache.Add(att.ToAttestationElectra(committee)); err != nil {
log.WithError(err).Error("Could not save attestation")
}
} else {
go func() {
ctx = trace.NewContext(context.Background(), trace.FromContext(ctx))
attCopy := att.Copy()
if err := vs.AttPool.SaveUnaggregatedAttestation(attCopy.ToAttestationElectra(committee)); err != nil {
log.WithError(err).Error("Could not save unaggregated attestation")
return
}
}()
resp, err := vs.proposeAtt(ctx, att, committeeIndex)
if err != nil {
return nil, err
}
go func() {
ctx = trace.NewContext(context.Background(), trace.FromContext(ctx))
attCopy := att.Copy()
if err := vs.AttPool.SaveUnaggregatedAttestation(attCopy); err != nil {
log.WithError(err).Error("Could not save unaggregated attestation")
return
}
}()
return resp, nil
}
@@ -155,29 +136,14 @@ func (vs *Server) SubscribeCommitteeSubnets(ctx context.Context, req *ethpb.Comm
return &emptypb.Empty{}, nil
}
func (vs *Server) proposeAtt(
ctx context.Context,
att ethpb.Att,
committee []primitives.ValidatorIndex, // required post-Electra
committeeIndex primitives.CommitteeIndex,
) (*ethpb.AttestResponse, error) {
func (vs *Server) proposeAtt(ctx context.Context, att ethpb.Att, committee primitives.CommitteeIndex) (*ethpb.AttestResponse, error) {
if _, err := bls.SignatureFromBytes(att.GetSignature()); err != nil {
return nil, status.Error(codes.InvalidArgument, "Incorrect attestation signature")
}
root, err := att.GetData().HashTreeRoot()
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get attestation root: %v", err)
}
var singleAtt *ethpb.SingleAttestation
if att.Version() >= version.Electra {
var ok bool
singleAtt, ok = att.(*ethpb.SingleAttestation)
if !ok {
return nil, status.Errorf(codes.Internal, "Attestation has wrong type (expected %T, got %T)", &ethpb.SingleAttestation{}, att)
}
att = singleAtt.ToAttestationElectra(committee)
return nil, status.Errorf(codes.Internal, "Could not tree hash attestation: %v", err)
}
// Broadcast the unaggregated attestation on a feed to notify other services in the beacon node
@@ -195,16 +161,10 @@ func (vs *Server) proposeAtt(
if err != nil {
return nil, err
}
subnet := helpers.ComputeSubnetFromCommitteeAndSlot(uint64(len(vals)), committeeIndex, att.GetData().Slot)
subnet := helpers.ComputeSubnetFromCommitteeAndSlot(uint64(len(vals)), committee, att.GetData().Slot)
// Broadcast the new attestation to the network.
var attToBroadcast ethpb.Att
if singleAtt != nil {
attToBroadcast = singleAtt
} else {
attToBroadcast = att
}
if err := vs.P2P.BroadcastAttestation(ctx, subnet, attToBroadcast); err != nil {
if err := vs.P2P.BroadcastAttestation(ctx, subnet, att); err != nil {
return nil, status.Errorf(codes.Internal, "Could not broadcast attestation: %v", err)
}

View File

@@ -65,7 +65,7 @@ func TestAttestationDataAtSlot_HandlesFarAwayJustifiedEpoch(t *testing.T) {
OptimisticModeFetcher: &mock.ChainService{Optimistic: false},
TimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
CoreService: &core.Service{
AttestationCache: cache.NewAttestationDataCache(),
AttestationCache: cache.NewAttestationCache(),
HeadFetcher: &mock.ChainService{TargetRoot: blockRoot, Root: blockRoot[:], State: beaconState},
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
FinalizedFetcher: &mock.ChainService{CurrentJustifiedCheckPoint: justifiedCheckpoint},

View File

@@ -31,13 +31,11 @@ import (
)
func TestProposeAttestation(t *testing.T) {
chainService := &mock.ChainService{}
attesterServer := &Server{
HeadFetcher: chainService,
P2P: &mockp2p.MockBroadcaster{},
AttPool: attestations.NewPool(),
OperationNotifier: (&mock.ChainService{}).OperationNotifier(),
AttestationStateFetcher: chainService,
HeadFetcher: &mock.ChainService{},
P2P: &mockp2p.MockBroadcaster{},
AttPool: attestations.NewPool(),
OperationNotifier: (&mock.ChainService{}).OperationNotifier(),
}
head := util.NewBeaconBlock()
head.Block.Slot = 999
@@ -81,19 +79,81 @@ func TestProposeAttestation(t *testing.T) {
require.NoError(t, err)
require.NoError(t, state.SetSlot(params.BeaconConfig().SlotsPerEpoch+1))
require.NoError(t, state.SetValidators(validators))
chainService.State = state
req := &ethpb.SingleAttestation{
cb := primitives.NewAttestationCommitteeBits()
cb.SetBitAt(0, true)
req := &ethpb.AttestationElectra{
Signature: sig.Marshal(),
Data: &ethpb.AttestationData{
BeaconBlockRoot: root[:],
Source: &ethpb.Checkpoint{Root: make([]byte, 32)},
Target: &ethpb.Checkpoint{Root: make([]byte, 32)},
},
CommitteeBits: cb,
}
_, err = attesterServer.ProposeAttestationElectra(context.Background(), req)
assert.NoError(t, err)
})
t.Run("Electra - non-zero committee index", func(t *testing.T) {
state, err := util.NewBeaconStateElectra()
require.NoError(t, err)
require.NoError(t, state.SetSlot(params.BeaconConfig().SlotsPerEpoch+1))
require.NoError(t, state.SetValidators(validators))
cb := primitives.NewAttestationCommitteeBits()
cb.SetBitAt(0, true)
req := &ethpb.AttestationElectra{
Signature: sig.Marshal(),
Data: &ethpb.AttestationData{
BeaconBlockRoot: root[:],
Source: &ethpb.Checkpoint{Root: make([]byte, 32)},
Target: &ethpb.Checkpoint{Root: make([]byte, 32)},
CommitteeIndex: 1,
},
CommitteeBits: cb,
}
_, err = attesterServer.ProposeAttestationElectra(context.Background(), req)
assert.ErrorContains(t, "attestation data's committee index must be 0 but was 1", err)
})
t.Run("Electra - no committee bit set", func(t *testing.T) {
state, err := util.NewBeaconStateElectra()
require.NoError(t, err)
require.NoError(t, state.SetSlot(params.BeaconConfig().SlotsPerEpoch+1))
require.NoError(t, state.SetValidators(validators))
req := &ethpb.AttestationElectra{
Signature: sig.Marshal(),
Data: &ethpb.AttestationData{
BeaconBlockRoot: root[:],
Source: &ethpb.Checkpoint{Root: make([]byte, 32)},
Target: &ethpb.Checkpoint{Root: make([]byte, 32)},
},
CommitteeBits: primitives.NewAttestationCommitteeBits(),
}
_, err = attesterServer.ProposeAttestationElectra(context.Background(), req)
assert.ErrorContains(t, "exactly 1 committee index must be set but 0 were set", err)
})
t.Run("Electra - multiple committee bits set", func(t *testing.T) {
state, err := util.NewBeaconStateElectra()
require.NoError(t, err)
require.NoError(t, state.SetSlot(params.BeaconConfig().SlotsPerEpoch+1))
require.NoError(t, state.SetValidators(validators))
cb := primitives.NewAttestationCommitteeBits()
cb.SetBitAt(0, true)
cb.SetBitAt(1, true)
req := &ethpb.AttestationElectra{
Signature: sig.Marshal(),
Data: &ethpb.AttestationData{
BeaconBlockRoot: root[:],
Source: &ethpb.Checkpoint{Root: make([]byte, 32)},
Target: &ethpb.Checkpoint{Root: make([]byte, 32)},
},
CommitteeBits: cb,
}
_, err = attesterServer.ProposeAttestationElectra(context.Background(), req)
assert.ErrorContains(t, "exactly 1 committee index must be set but 2 were set", err)
})
}
func TestProposeAttestation_IncorrectSignature(t *testing.T) {
@@ -144,7 +204,7 @@ func TestGetAttestationData_OK(t *testing.T) {
Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second),
},
FinalizedFetcher: &mock.ChainService{CurrentJustifiedCheckPoint: justifiedCheckpoint},
AttestationCache: cache.NewAttestationDataCache(),
AttestationCache: cache.NewAttestationCache(),
OptimisticModeFetcher: &mock.ChainService{Optimistic: false},
},
}
@@ -199,7 +259,7 @@ func BenchmarkGetAttestationDataConcurrent(b *testing.B) {
OptimisticModeFetcher: &mock.ChainService{Optimistic: false},
TimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
CoreService: &core.Service{
AttestationCache: cache.NewAttestationDataCache(),
AttestationCache: cache.NewAttestationCache(),
HeadFetcher: &mock.ChainService{TargetRoot: targetRoot, Root: blockRoot[:]},
GenesisTimeFetcher: &mock.ChainService{
Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second),
@@ -253,7 +313,7 @@ func TestGetAttestationData_Optimistic(t *testing.T) {
CoreService: &core.Service{
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now()},
HeadFetcher: &mock.ChainService{},
AttestationCache: cache.NewAttestationDataCache(),
AttestationCache: cache.NewAttestationCache(),
OptimisticModeFetcher: &mock.ChainService{Optimistic: true},
},
}
@@ -270,7 +330,7 @@ func TestGetAttestationData_Optimistic(t *testing.T) {
OptimisticModeFetcher: &mock.ChainService{Optimistic: false},
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
CoreService: &core.Service{
AttestationCache: cache.NewAttestationDataCache(),
AttestationCache: cache.NewAttestationCache(),
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now()},
HeadFetcher: &mock.ChainService{Optimistic: false, State: beaconState},
FinalizedFetcher: &mock.ChainService{CurrentJustifiedCheckPoint: &ethpb.Checkpoint{}},
@@ -380,7 +440,7 @@ func TestGetAttestationData_SucceedsInFirstEpoch(t *testing.T) {
OptimisticModeFetcher: &mock.ChainService{Optimistic: false},
TimeFetcher: &mock.ChainService{Genesis: prysmTime.Now().Add(time.Duration(-1*offset) * time.Second)},
CoreService: &core.Service{
AttestationCache: cache.NewAttestationDataCache(),
AttestationCache: cache.NewAttestationCache(),
HeadFetcher: &mock.ChainService{
TargetRoot: targetRoot, Root: blockRoot[:], State: beaconState,
},
@@ -454,7 +514,7 @@ func TestGetAttestationData_CommitteeIndexIsZeroPostElectra(t *testing.T) {
Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second),
},
FinalizedFetcher: &mock.ChainService{CurrentJustifiedCheckPoint: justifiedCheckpoint},
AttestationCache: cache.NewAttestationDataCache(),
AttestationCache: cache.NewAttestationCache(),
OptimisticModeFetcher: &mock.ChainService{Optimistic: false},
},
}

View File

@@ -9,6 +9,7 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/config/features"
"github.com/prysmaticlabs/prysm/v5/config/params"
@@ -29,23 +30,22 @@ func (vs *Server) packAttestations(ctx context.Context, latestState state.Beacon
ctx, span := trace.StartSpan(ctx, "ProposerServer.packAttestations")
defer span.End()
var atts []ethpb.Att
if features.Get().EnableExperimentalAttestationPool {
atts = vs.AttestationCache.GetAll()
atts = vs.validateAndDeleteAttsInPool(ctx, latestState, atts)
} else {
atts = vs.AttPool.AggregatedAttestations()
atts = vs.validateAndDeleteAttsInPool(ctx, latestState, atts)
uAtts, err := vs.AttPool.UnaggregatedAttestations()
if err != nil {
return nil, errors.Wrap(err, "could not get unaggregated attestations")
}
uAtts = vs.validateAndDeleteAttsInPool(ctx, latestState, uAtts)
atts = append(atts, uAtts...)
atts := vs.AttPool.AggregatedAttestations()
atts, err := vs.validateAndDeleteAttsInPool(ctx, latestState, atts)
if err != nil {
return nil, errors.Wrap(err, "could not filter attestations")
}
uAtts, err := vs.AttPool.UnaggregatedAttestations()
if err != nil {
return nil, errors.Wrap(err, "could not get unaggregated attestations")
}
uAtts, err = vs.validateAndDeleteAttsInPool(ctx, latestState, uAtts)
if err != nil {
return nil, errors.Wrap(err, "could not filter attestations")
}
atts = append(atts, uAtts...)
// Checking the state's version here will give the wrong result if the last slot of Deneb is missed.
// The head state will still be in Deneb while we are trying to build an Electra block.
postElectra := slots.ToEpoch(blkSlot) >= params.BeaconConfig().ElectraForkEpoch
@@ -65,8 +65,6 @@ func (vs *Server) packAttestations(ctx context.Context, latestState state.Beacon
}
}
var err error
// Remove duplicates from both aggregated/unaggregated attestations. This
// prevents inefficient aggregates being created.
versionAtts, err = proposerAtts(versionAtts).dedup()
@@ -457,15 +455,15 @@ func (a proposerAtts) dedup() (proposerAtts, error) {
}
// This filters the input attestations to return a list of valid attestations to be packaged inside a beacon block.
func (vs *Server) validateAndDeleteAttsInPool(ctx context.Context, st state.BeaconState, atts []ethpb.Att) []ethpb.Att {
func (vs *Server) validateAndDeleteAttsInPool(ctx context.Context, st state.BeaconState, atts []ethpb.Att) ([]ethpb.Att, error) {
ctx, span := trace.StartSpan(ctx, "ProposerServer.validateAndDeleteAttsInPool")
defer span.End()
validAtts, invalidAtts := proposerAtts(atts).filter(ctx, st)
if err := vs.deleteAttsInPool(ctx, invalidAtts); err != nil {
log.WithError(err).Error("Could not delete invalid attestations")
return nil, err
}
return validAtts
return validAtts, nil
}
// The input attestations are processed and seen by the node, this deletes them from pool
@@ -478,19 +476,13 @@ func (vs *Server) deleteAttsInPool(ctx context.Context, atts []ethpb.Att) error
if ctx.Err() != nil {
return ctx.Err()
}
if features.Get().EnableExperimentalAttestationPool {
if err := vs.AttestationCache.DeleteCovered(att); err != nil {
return errors.Wrap(err, "could not delete attestation")
if helpers.IsAggregated(att) {
if err := vs.AttPool.DeleteAggregatedAttestation(att); err != nil {
return err
}
} else {
if att.IsAggregated() {
if err := vs.AttPool.DeleteAggregatedAttestation(att); err != nil {
return err
}
} else {
if err := vs.AttPool.DeleteUnaggregatedAttestation(att); err != nil {
return err
}
if err := vs.AttPool.DeleteUnaggregatedAttestation(att); err != nil {
return err
}
}
}

View File

@@ -270,14 +270,13 @@ func (vs *Server) getPayloadHeaderFromBuilder(
return nil, errors.Wrap(err, "could not validate builder signature")
}
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
var kzgCommitments [][]byte
if bid.Version() >= version.Deneb {
kzgCommitments, err = bid.BlobKzgCommitments()
if err != nil {
return nil, errors.Wrap(err, "could not get blob kzg commitments")
}
if len(kzgCommitments) > maxBlobsPerBlock {
if len(kzgCommitments) > fieldparams.MaxBlobsPerBlock {
return nil, fmt.Errorf("builder returned too many kzg commitments: %d", len(kzgCommitments))
}
for _, c := range kzgCommitments {

View File

@@ -2580,6 +2580,7 @@ func TestProposer_FilterAttestation(t *testing.T) {
tests := []struct {
name string
wantedErr string
inputAtts func() []ethpb.Att
expectedAtts func(inputAtts []ethpb.Att) []ethpb.Att
}{
@@ -2655,8 +2656,14 @@ func TestProposer_FilterAttestation(t *testing.T) {
HeadFetcher: &mock.ChainService{State: st, Root: genesisRoot[:]},
}
atts := tt.inputAtts()
received := proposerServer.validateAndDeleteAttsInPool(context.Background(), st, atts)
assert.DeepEqual(t, tt.expectedAtts(atts), received)
received, err := proposerServer.validateAndDeleteAttsInPool(context.Background(), st, atts)
if tt.wantedErr != "" {
assert.ErrorContains(t, tt.wantedErr, err)
assert.Equal(t, nil, received)
} else {
assert.NoError(t, err)
assert.DeepEqual(t, tt.expectedAtts(atts), received)
}
})
}
}

Some files were not shown because too many files have changed in this diff Show More