mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-11 06:18:05 -05:00
Compare commits
9 Commits
validation
...
plus-one-b
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2018b647ea | ||
|
|
0352e39f64 | ||
|
|
82847da8a7 | ||
|
|
accfdd0f7c | ||
|
|
2b51e9e350 | ||
|
|
e05fec0f5f | ||
|
|
1a904dbae3 | ||
|
|
37417e5905 | ||
|
|
7a5f4cf122 |
23
CHANGELOG.md
23
CHANGELOG.md
@@ -4,29 +4,6 @@ All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on Keep a Changelog, and this project adheres to Semantic Versioning.
|
||||
|
||||
## [v6.1.4](https://github.com/prysmaticlabs/prysm/compare/v6.1.3...v6.1.4) - 2025-10-24
|
||||
|
||||
This release includes a bug fix affecting block proposals in rare cases, along with an important update for Windows users running post-Fusaka fork.
|
||||
|
||||
### Added
|
||||
|
||||
- SSZ-QL: Add endpoints for `BeaconState`/`BeaconBlock`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15888)
|
||||
- Add native state diff type and marshalling functions. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15250)
|
||||
- Update the earliest available slot after pruning operations in beacon chain database pruner. This ensures the P2P layer accurately knows which historical data is available after pruning, preventing nodes from advertising or attempting to serve data that has been pruned. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15694)
|
||||
|
||||
### Fixed
|
||||
|
||||
- Correctly advertise (in ENR and beacon API) attestation subnets when using `--subscribe-all-subnets`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15880)
|
||||
- `randomPeer`: Return if the context is cancelled when waiting for peers. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15876)
|
||||
- Improve error message when the byte count read from disk when reading a data column sidecars is lower than expected. (Mostly, because the file is truncated.). [[PR]](https://github.com/prysmaticlabs/prysm/pull/15881)
|
||||
- Delete the genesis state file when --clear-db / --force-clear-db is specified. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15883)
|
||||
- Fix sync committee subscription to use subnet indices instead of committee indices. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15885)
|
||||
- Fixed metadata extraction on Windows by correctly splitting file paths. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15899)
|
||||
- `VerifyDataColumnsSidecarKZGProofs`: Check if sizes match. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15892)
|
||||
- Fix recoverStateSummary to persist state summaries in stateSummaryBucket instead of stateBucket (#15896). [[PR]](https://github.com/prysmaticlabs/prysm/pull/15896)
|
||||
- `updateCustodyInfoInDB`: Use `NumberOfCustodyGroups` instead of `NumberOfColumns`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15908)
|
||||
- Sync committee uses correct state to calculate position. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15905)
|
||||
|
||||
## [v6.1.3](https://github.com/prysmaticlabs/prysm/compare/v6.1.2...v6.1.3) - 2025-10-20
|
||||
|
||||
This release has several important beacon API and p2p fixes.
|
||||
|
||||
@@ -472,8 +472,8 @@ func (s *Service) removeStartupState() {
|
||||
func (s *Service) updateCustodyInfoInDB(slot primitives.Slot) (primitives.Slot, uint64, error) {
|
||||
isSubscribedToAllDataSubnets := flags.Get().SubscribeAllDataSubnets
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
custodyRequirement := cfg.CustodyRequirement
|
||||
beaconConfig := params.BeaconConfig()
|
||||
custodyRequirement := beaconConfig.CustodyRequirement
|
||||
|
||||
// Check if the node was previously subscribed to all data subnets, and if so,
|
||||
// store the new status accordingly.
|
||||
@@ -493,7 +493,7 @@ func (s *Service) updateCustodyInfoInDB(slot primitives.Slot) (primitives.Slot,
|
||||
// Compute the custody group count.
|
||||
custodyGroupCount := custodyRequirement
|
||||
if isSubscribedToAllDataSubnets {
|
||||
custodyGroupCount = cfg.NumberOfCustodyGroups
|
||||
custodyGroupCount = beaconConfig.NumberOfCustodyGroups
|
||||
}
|
||||
|
||||
// Safely compute the fulu fork slot.
|
||||
@@ -536,11 +536,11 @@ func spawnCountdownIfPreGenesis(ctx context.Context, genesisTime time.Time, db d
|
||||
}
|
||||
|
||||
func fuluForkSlot() (primitives.Slot, error) {
|
||||
cfg := params.BeaconConfig()
|
||||
beaconConfig := params.BeaconConfig()
|
||||
|
||||
fuluForkEpoch := cfg.FuluForkEpoch
|
||||
if fuluForkEpoch == cfg.FarFutureEpoch {
|
||||
return cfg.FarFutureSlot, nil
|
||||
fuluForkEpoch := beaconConfig.FuluForkEpoch
|
||||
if fuluForkEpoch == beaconConfig.FarFutureEpoch {
|
||||
return beaconConfig.FarFutureSlot, nil
|
||||
}
|
||||
|
||||
forkFuluSlot, err := slots.EpochStart(fuluForkEpoch)
|
||||
|
||||
@@ -472,36 +472,6 @@ func (s *ChainService) HasBlock(ctx context.Context, rt [32]byte) bool {
|
||||
return s.InitSyncBlockRoots[rt]
|
||||
}
|
||||
|
||||
func (s *ChainService) AvailableBlocks(ctx context.Context, blockRoots [][32]byte) map[[32]byte]bool {
|
||||
if s.DB == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
count := len(blockRoots)
|
||||
availableRoots := make(map[[32]byte]bool, count)
|
||||
notInDBRoots := make([][32]byte, 0, count)
|
||||
for _, root := range blockRoots {
|
||||
if s.DB.HasBlock(ctx, root) {
|
||||
availableRoots[root] = true
|
||||
continue
|
||||
}
|
||||
|
||||
notInDBRoots = append(notInDBRoots, root)
|
||||
}
|
||||
|
||||
if s.InitSyncBlockRoots == nil {
|
||||
return availableRoots
|
||||
}
|
||||
|
||||
for _, root := range notInDBRoots {
|
||||
if s.InitSyncBlockRoots[root] {
|
||||
availableRoots[root] = true
|
||||
}
|
||||
}
|
||||
|
||||
return availableRoots
|
||||
}
|
||||
|
||||
// RecentBlockSlot mocks the same method in the chain service.
|
||||
func (s *ChainService) RecentBlockSlot([32]byte) (primitives.Slot, error) {
|
||||
return s.BlockSlot, nil
|
||||
|
||||
@@ -3,7 +3,6 @@ package blockchain
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filters"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
@@ -82,10 +81,12 @@ func (v *WeakSubjectivityVerifier) VerifyWeakSubjectivity(ctx context.Context, f
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error while retrieving block roots to verify weak subjectivity")
|
||||
}
|
||||
if slices.Contains(roots, v.root) {
|
||||
log.Info("Weak subjectivity check has passed!!")
|
||||
v.verified = true
|
||||
return nil
|
||||
for _, root := range roots {
|
||||
if v.root == root {
|
||||
log.Info("Weak subjectivity check has passed!!")
|
||||
v.verified = true
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return errors.Wrap(errWSBlockNotFoundInEpoch, fmt.Sprintf("root=%#x, epoch=%d", v.root, v.epoch))
|
||||
}
|
||||
|
||||
@@ -401,7 +401,7 @@ func ComputeProposerIndex(bState state.ReadOnlyBeaconState, activeIndices []prim
|
||||
return 0, errors.New("empty active indices list")
|
||||
}
|
||||
hashFunc := hash.CustomSHA256Hasher()
|
||||
cfg := params.BeaconConfig()
|
||||
beaconConfig := params.BeaconConfig()
|
||||
seedBuffer := make([]byte, len(seed)+8)
|
||||
copy(seedBuffer, seed[:])
|
||||
|
||||
@@ -426,14 +426,14 @@ func ComputeProposerIndex(bState state.ReadOnlyBeaconState, activeIndices []prim
|
||||
offset := (i % 16) * 2
|
||||
randomValue := uint64(randomBytes[offset]) | uint64(randomBytes[offset+1])<<8
|
||||
|
||||
if effectiveBal*fieldparams.MaxRandomValueElectra >= cfg.MaxEffectiveBalanceElectra*randomValue {
|
||||
if effectiveBal*fieldparams.MaxRandomValueElectra >= beaconConfig.MaxEffectiveBalanceElectra*randomValue {
|
||||
return candidateIndex, nil
|
||||
}
|
||||
} else {
|
||||
binary.LittleEndian.PutUint64(seedBuffer[len(seed):], i/32)
|
||||
randomByte := hashFunc(seedBuffer)[i%32]
|
||||
|
||||
if effectiveBal*fieldparams.MaxRandomByte >= cfg.MaxEffectiveBalance*uint64(randomByte) {
|
||||
if effectiveBal*fieldparams.MaxRandomByte >= beaconConfig.MaxEffectiveBalance*uint64(randomByte) {
|
||||
return candidateIndex, nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -89,14 +89,14 @@ func CustodyGroups(nodeId enode.ID, custodyGroupCount uint64) ([]uint64, error)
|
||||
// ComputeColumnsForCustodyGroup computes the columns for a given custody group.
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/das-core.md#compute_columns_for_custody_group
|
||||
func ComputeColumnsForCustodyGroup(custodyGroup uint64) ([]uint64, error) {
|
||||
cfg := params.BeaconConfig()
|
||||
numberOfCustodyGroups := cfg.NumberOfCustodyGroups
|
||||
beaconConfig := params.BeaconConfig()
|
||||
numberOfCustodyGroups := beaconConfig.NumberOfCustodyGroups
|
||||
|
||||
if custodyGroup >= numberOfCustodyGroups {
|
||||
return nil, ErrCustodyGroupTooLarge
|
||||
}
|
||||
|
||||
numberOfColumns := cfg.NumberOfColumns
|
||||
numberOfColumns := beaconConfig.NumberOfColumns
|
||||
|
||||
columnsPerGroup := numberOfColumns / numberOfCustodyGroups
|
||||
|
||||
@@ -112,9 +112,9 @@ func ComputeColumnsForCustodyGroup(custodyGroup uint64) ([]uint64, error) {
|
||||
// ComputeCustodyGroupForColumn computes the custody group for a given column.
|
||||
// It is the reciprocal function of ComputeColumnsForCustodyGroup.
|
||||
func ComputeCustodyGroupForColumn(columnIndex uint64) (uint64, error) {
|
||||
cfg := params.BeaconConfig()
|
||||
numberOfColumns := cfg.NumberOfColumns
|
||||
numberOfCustodyGroups := cfg.NumberOfCustodyGroups
|
||||
beaconConfig := params.BeaconConfig()
|
||||
numberOfColumns := beaconConfig.NumberOfColumns
|
||||
numberOfCustodyGroups := beaconConfig.NumberOfCustodyGroups
|
||||
|
||||
if columnIndex >= numberOfColumns {
|
||||
return 0, ErrIndexTooLarge
|
||||
|
||||
@@ -84,10 +84,10 @@ func ValidatorsCustodyRequirement(state beaconState.ReadOnlyBeaconState, validat
|
||||
totalNodeBalance += validator.EffectiveBalance()
|
||||
}
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
numberOfCustodyGroups := cfg.NumberOfCustodyGroups
|
||||
validatorCustodyRequirement := cfg.ValidatorCustodyRequirement
|
||||
balancePerAdditionalCustodyGroup := cfg.BalancePerAdditionalCustodyGroup
|
||||
beaconConfig := params.BeaconConfig()
|
||||
numberOfCustodyGroups := beaconConfig.NumberOfCustodyGroups
|
||||
validatorCustodyRequirement := beaconConfig.ValidatorCustodyRequirement
|
||||
balancePerAdditionalCustodyGroup := beaconConfig.BalancePerAdditionalCustodyGroup
|
||||
|
||||
count := totalNodeBalance / balancePerAdditionalCustodyGroup
|
||||
return min(max(count, validatorCustodyRequirement), numberOfCustodyGroups), nil
|
||||
|
||||
@@ -196,7 +196,7 @@ func TestAltairCompatible(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCanUpgradeTo(t *testing.T) {
|
||||
cfg := params.BeaconConfig()
|
||||
beaconConfig := params.BeaconConfig()
|
||||
|
||||
outerTestCases := []struct {
|
||||
name string
|
||||
@@ -205,32 +205,32 @@ func TestCanUpgradeTo(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "Altair",
|
||||
forkEpoch: &cfg.AltairForkEpoch,
|
||||
forkEpoch: &beaconConfig.AltairForkEpoch,
|
||||
upgradeFunc: time.CanUpgradeToAltair,
|
||||
},
|
||||
{
|
||||
name: "Bellatrix",
|
||||
forkEpoch: &cfg.BellatrixForkEpoch,
|
||||
forkEpoch: &beaconConfig.BellatrixForkEpoch,
|
||||
upgradeFunc: time.CanUpgradeToBellatrix,
|
||||
},
|
||||
{
|
||||
name: "Capella",
|
||||
forkEpoch: &cfg.CapellaForkEpoch,
|
||||
forkEpoch: &beaconConfig.CapellaForkEpoch,
|
||||
upgradeFunc: time.CanUpgradeToCapella,
|
||||
},
|
||||
{
|
||||
name: "Deneb",
|
||||
forkEpoch: &cfg.DenebForkEpoch,
|
||||
forkEpoch: &beaconConfig.DenebForkEpoch,
|
||||
upgradeFunc: time.CanUpgradeToDeneb,
|
||||
},
|
||||
{
|
||||
name: "Electra",
|
||||
forkEpoch: &cfg.ElectraForkEpoch,
|
||||
forkEpoch: &beaconConfig.ElectraForkEpoch,
|
||||
upgradeFunc: time.CanUpgradeToElectra,
|
||||
},
|
||||
{
|
||||
name: "Fulu",
|
||||
forkEpoch: &cfg.FuluForkEpoch,
|
||||
forkEpoch: &beaconConfig.FuluForkEpoch,
|
||||
upgradeFunc: time.CanUpgradeToFulu,
|
||||
},
|
||||
}
|
||||
@@ -238,7 +238,7 @@ func TestCanUpgradeTo(t *testing.T) {
|
||||
for _, otc := range outerTestCases {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
*otc.forkEpoch = 5
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
params.OverrideBeaconConfig(beaconConfig)
|
||||
|
||||
innerTestCases := []struct {
|
||||
name string
|
||||
|
||||
@@ -28,7 +28,6 @@ type ReadOnlyDatabase interface {
|
||||
BlocksBySlot(ctx context.Context, slot primitives.Slot) ([]interfaces.ReadOnlySignedBeaconBlock, error)
|
||||
BlockRootsBySlot(ctx context.Context, slot primitives.Slot) (bool, [][32]byte, error)
|
||||
HasBlock(ctx context.Context, blockRoot [32]byte) bool
|
||||
AvailableBlocks(ctx context.Context, blockRoots [][32]byte) map[[32]byte]bool
|
||||
GenesisBlock(ctx context.Context) (interfaces.ReadOnlySignedBeaconBlock, error)
|
||||
GenesisBlockRoot(ctx context.Context) ([32]byte, error)
|
||||
IsFinalizedBlock(ctx context.Context, blockRoot [32]byte) bool
|
||||
|
||||
@@ -336,42 +336,6 @@ func (s *Store) HasBlock(ctx context.Context, blockRoot [32]byte) bool {
|
||||
return exists
|
||||
}
|
||||
|
||||
// AvailableBlocks returns a set of roots indicating which blocks corresponding to `blockRoots` are available in the storage.
|
||||
func (s *Store) AvailableBlocks(ctx context.Context, blockRoots [][32]byte) map[[32]byte]bool {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.AvailableBlocks")
|
||||
defer span.End()
|
||||
|
||||
count := len(blockRoots)
|
||||
availableRoots := make(map[[32]byte]bool, count)
|
||||
|
||||
// First, check the cache for each block root.
|
||||
notInCacheRoots := make([][32]byte, 0, count)
|
||||
for _, root := range blockRoots {
|
||||
if v, ok := s.blockCache.Get(string(root[:])); v != nil && ok {
|
||||
availableRoots[root] = true
|
||||
continue
|
||||
}
|
||||
|
||||
notInCacheRoots = append(notInCacheRoots, root)
|
||||
}
|
||||
|
||||
// Next, check the database for the remaining block roots.
|
||||
if err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(blocksBucket)
|
||||
for _, root := range notInCacheRoots {
|
||||
if bkt.Get(root[:]) != nil {
|
||||
availableRoots[root] = true
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
panic(err) // lint:nopanic -- View never returns an error.
|
||||
}
|
||||
|
||||
return availableRoots
|
||||
}
|
||||
|
||||
// BlocksBySlot retrieves a list of beacon blocks and its respective roots by slot.
|
||||
func (s *Store) BlocksBySlot(ctx context.Context, slot primitives.Slot) ([]interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.BlocksBySlot")
|
||||
|
||||
@@ -656,44 +656,6 @@ func TestStore_BlocksCRUD_NoCache(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestAvailableBlocks(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
db := setupDB(t)
|
||||
|
||||
b0, b1, b2 := util.NewBeaconBlock(), util.NewBeaconBlock(), util.NewBeaconBlock()
|
||||
b0.Block.Slot, b1.Block.Slot, b2.Block.Slot = 10, 20, 30
|
||||
|
||||
sb0, err := blocks.NewSignedBeaconBlock(b0)
|
||||
require.NoError(t, err)
|
||||
r0, err := b0.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Save b0 but remove it from cache.
|
||||
err = db.SaveBlock(ctx, sb0)
|
||||
require.NoError(t, err)
|
||||
db.blockCache.Del(string(r0[:]))
|
||||
|
||||
// b1 is not saved at all.
|
||||
r1, err := b1.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Save b2 in cache and DB.
|
||||
sb2, err := blocks.NewSignedBeaconBlock(b2)
|
||||
require.NoError(t, err)
|
||||
r2, err := b2.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(ctx, sb2))
|
||||
require.NoError(t, err)
|
||||
|
||||
expected := map[[32]byte]bool{r0: true, r2: true}
|
||||
actual := db.AvailableBlocks(ctx, [][32]byte{r0, r1, r2})
|
||||
|
||||
require.Equal(t, len(expected), len(actual))
|
||||
for i := range expected {
|
||||
require.Equal(t, true, actual[i])
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_Blocks_FiltersCorrectly(t *testing.T) {
|
||||
for _, tt := range blockTests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
|
||||
@@ -31,6 +31,7 @@ go_test(
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"//time/slots/testing:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
|
||||
@@ -36,7 +36,7 @@ type ServiceOption func(*Service)
|
||||
// The retention period is specified in epochs, and must be >= MIN_EPOCHS_FOR_BLOCK_REQUESTS.
|
||||
func WithRetentionPeriod(retentionEpochs primitives.Epoch) ServiceOption {
|
||||
return func(s *Service) {
|
||||
defaultRetentionEpochs := primitives.Epoch(params.BeaconConfig().MinEpochsForBlockRequests) + 1
|
||||
defaultRetentionEpochs := primitives.Epoch(params.BeaconConfig().MinEpochsForBlockRequests)
|
||||
if retentionEpochs < defaultRetentionEpochs {
|
||||
log.WithField("userEpochs", retentionEpochs).
|
||||
WithField("minRequired", defaultRetentionEpochs).
|
||||
@@ -75,7 +75,7 @@ func New(ctx context.Context, db iface.Database, genesisTime time.Time, initSync
|
||||
p := &Service{
|
||||
ctx: ctx,
|
||||
db: db,
|
||||
ps: pruneStartSlotFunc(primitives.Epoch(params.BeaconConfig().MinEpochsForBlockRequests) + 1), // Default retention epochs is MIN_EPOCHS_FOR_BLOCK_REQUESTS + 1 from the current slot.
|
||||
ps: pruneStartSlotFunc(primitives.Epoch(params.BeaconConfig().MinEpochsForBlockRequests)),
|
||||
done: make(chan struct{}),
|
||||
slotTicker: slots.NewSlotTicker(slots.UnsafeStartTime(genesisTime, 0), params.BeaconConfig().SecondsPerSlot),
|
||||
initSyncWaiter: initSyncWaiter,
|
||||
@@ -239,15 +239,38 @@ func (p *Service) pruneBatches(pruneUpto primitives.Slot) (int, error) {
|
||||
}
|
||||
|
||||
// pruneStartSlotFunc returns the function to determine the start slot to start pruning.
|
||||
// The pruning calculation is epoch-aligned,
|
||||
// ensuring that earliestAvailableSlot is always at an epoch boundary.
|
||||
// So that we prune epoch-wise.
|
||||
// e.g. if retentionEpochs is 3 i.e. we should keep at least 3 epochs from current slot,
|
||||
//
|
||||
// current slot is 325 (=> current epoch is 10),
|
||||
// then we should keep epoch 7 onwards (inclusive of epoch 7).
|
||||
// So we can prune up to the last slot of 6th epoch i.e. 32 x 7 - 1 = 223
|
||||
// Earliest available slot would be 224 in that case.
|
||||
func pruneStartSlotFunc(retentionEpochs primitives.Epoch) func(primitives.Slot) primitives.Slot {
|
||||
return func(current primitives.Slot) primitives.Slot {
|
||||
if retentionEpochs > slots.MaxSafeEpoch() {
|
||||
retentionEpochs = slots.MaxSafeEpoch()
|
||||
}
|
||||
offset := slots.UnsafeEpochStart(retentionEpochs)
|
||||
if offset >= current {
|
||||
|
||||
// Calculate epoch-aligned minimum required slot
|
||||
currentEpoch := slots.ToEpoch(current)
|
||||
var minRequiredEpoch primitives.Epoch
|
||||
if currentEpoch > retentionEpochs {
|
||||
minRequiredEpoch = currentEpoch - retentionEpochs
|
||||
} else {
|
||||
minRequiredEpoch = 0
|
||||
}
|
||||
|
||||
// Get the start slot of the minimum required epoch
|
||||
minRequiredSlot, err := slots.EpochStart(minRequiredEpoch)
|
||||
if err != nil || minRequiredSlot == 0 {
|
||||
return 0
|
||||
}
|
||||
return current - offset
|
||||
|
||||
// Prune up to (but not including) the minimum required slot
|
||||
// This ensures earliestAvailableSlot (pruneUpto + 1) is at the epoch boundary
|
||||
return minRequiredSlot - 1
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
eth "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
slottest "github.com/OffchainLabs/prysm/v6/time/slots/testing"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
@@ -247,12 +248,23 @@ func TestWithRetentionPeriod_EnforcesMinimum(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
beaconDB := dbtest.SetupDB(t)
|
||||
|
||||
// Get the minimum required epochs (272 + 1 = 273 for minimal)
|
||||
minRequiredEpochs := primitives.Epoch(params.BeaconConfig().MinEpochsForBlockRequests + 1)
|
||||
// Get the minimum required epochs (272 for minimal)
|
||||
minRequiredEpochs := primitives.Epoch(params.BeaconConfig().MinEpochsForBlockRequests)
|
||||
|
||||
// Use a slot that's guaranteed to be after the minimum retention period
|
||||
currentSlot := primitives.Slot(minRequiredEpochs+100) * (params.BeaconConfig().SlotsPerEpoch)
|
||||
|
||||
// Calculate epoch-aligned expected prune slot
|
||||
// For epoch-aligned pruning: pruneUpto = epochStart(currentEpoch - retention) - 1
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
|
||||
// Helper function to calculate expected prune slot for a given retention
|
||||
calcExpectedPruneSlot := func(retention primitives.Epoch) primitives.Slot {
|
||||
minEpoch := currentEpoch - retention
|
||||
minSlot, _ := slots.EpochStart(minEpoch)
|
||||
return minSlot - 1
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
userRetentionEpochs primitives.Epoch
|
||||
@@ -262,19 +274,19 @@ func TestWithRetentionPeriod_EnforcesMinimum(t *testing.T) {
|
||||
{
|
||||
name: "User value below minimum - should use minimum",
|
||||
userRetentionEpochs: 2, // Way below minimum
|
||||
expectedPruneSlot: currentSlot - primitives.Slot(minRequiredEpochs)*params.BeaconConfig().SlotsPerEpoch,
|
||||
expectedPruneSlot: calcExpectedPruneSlot(minRequiredEpochs),
|
||||
description: "Should use minimum when user value is too low",
|
||||
},
|
||||
{
|
||||
name: "User value at minimum",
|
||||
userRetentionEpochs: minRequiredEpochs,
|
||||
expectedPruneSlot: currentSlot - primitives.Slot(minRequiredEpochs)*params.BeaconConfig().SlotsPerEpoch,
|
||||
expectedPruneSlot: calcExpectedPruneSlot(minRequiredEpochs),
|
||||
description: "Should use user value when at minimum",
|
||||
},
|
||||
{
|
||||
name: "User value above minimum",
|
||||
userRetentionEpochs: minRequiredEpochs + 10,
|
||||
expectedPruneSlot: currentSlot - primitives.Slot(minRequiredEpochs+10)*params.BeaconConfig().SlotsPerEpoch,
|
||||
expectedPruneSlot: calcExpectedPruneSlot(minRequiredEpochs + 10),
|
||||
description: "Should use user value when above minimum",
|
||||
},
|
||||
}
|
||||
@@ -311,6 +323,133 @@ func TestWithRetentionPeriod_EnforcesMinimum(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestWithRetentionPeriod_AcceptsSpecMinimum(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.MinimalSpecConfig()
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
ctx := t.Context()
|
||||
beaconDB := dbtest.SetupDB(t)
|
||||
|
||||
hook := logTest.NewGlobal()
|
||||
logrus.SetLevel(logrus.WarnLevel)
|
||||
|
||||
// The spec minimum - this SHOULD be accepted without warning
|
||||
specMinimum := primitives.Epoch(params.BeaconConfig().MinEpochsForBlockRequests)
|
||||
|
||||
// Use a slot that's guaranteed to be after the minimum retention period
|
||||
currentSlot := primitives.Slot(specMinimum+100) * (params.BeaconConfig().SlotsPerEpoch)
|
||||
|
||||
mockCustody := &mockCustodyUpdater{}
|
||||
p, err := New(
|
||||
ctx,
|
||||
beaconDB,
|
||||
time.Now(),
|
||||
nil,
|
||||
nil,
|
||||
mockCustody,
|
||||
WithRetentionPeriod(specMinimum),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test the pruning calculation
|
||||
pruneUptoSlot := p.ps(currentSlot)
|
||||
|
||||
// The expected prune slot should use epoch-aligned calculation
|
||||
// pruneUpto = epochStart(currentEpoch - retention) - 1
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
minRequiredEpoch := currentEpoch - specMinimum
|
||||
minRequiredSlot, err := slots.EpochStart(minRequiredEpoch)
|
||||
require.NoError(t, err)
|
||||
expectedPruneSlot := minRequiredSlot - 1
|
||||
|
||||
assert.Equal(t, expectedPruneSlot, pruneUptoSlot,
|
||||
"Pruner should accept and use MIN_EPOCHS_FOR_BLOCK_REQUESTS without adding 1")
|
||||
|
||||
for _, entry := range hook.AllEntries() {
|
||||
if entry.Level == logrus.WarnLevel {
|
||||
t.Errorf("Unexpected warning when using spec minimum: %s", entry.Message)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPruneStartSlotFunc_EpochAlignment(t *testing.T) {
|
||||
// This test verifies that the pruning calculation is epoch-aligned.
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.MinimalSpecConfig()
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch // 8 for minimal config
|
||||
retentionEpochs := primitives.Epoch(3)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
currentSlot primitives.Slot
|
||||
expectedEpochAlignment bool
|
||||
expectedMinRequiredSlot primitives.Slot
|
||||
description string
|
||||
}{
|
||||
{
|
||||
name: "Pruning at epoch boundary",
|
||||
currentSlot: primitives.Slot(4 * slotsPerEpoch), // Slot 32 (epoch 4, slot 0 of epoch)
|
||||
expectedEpochAlignment: true,
|
||||
expectedMinRequiredSlot: primitives.Slot(1 * slotsPerEpoch), // Epoch 1 start = slot 8
|
||||
description: "When pruning at epoch boundary, earliestAvailableSlot should be at epoch boundary",
|
||||
},
|
||||
{
|
||||
name: "Pruning at middle of epoch",
|
||||
currentSlot: primitives.Slot(4*slotsPerEpoch + 4), // Slot 36 (epoch 4, slot 4 of epoch)
|
||||
expectedEpochAlignment: true,
|
||||
expectedMinRequiredSlot: primitives.Slot(1 * slotsPerEpoch), // Epoch 1 start = slot 8
|
||||
description: "When pruning mid-epoch, earliestAvailableSlot must still be at epoch boundary",
|
||||
},
|
||||
{
|
||||
name: "Pruning at end of epoch",
|
||||
currentSlot: primitives.Slot(5*slotsPerEpoch - 1), // Slot 39 (epoch 4, last slot)
|
||||
expectedEpochAlignment: true,
|
||||
expectedMinRequiredSlot: primitives.Slot(1 * slotsPerEpoch), // Epoch 1 start = slot 8
|
||||
description: "When pruning at epoch end, earliestAvailableSlot must be at epoch boundary",
|
||||
},
|
||||
{
|
||||
name: "Pruning at various epoch positions",
|
||||
currentSlot: primitives.Slot(8*slotsPerEpoch + 5), // Slot 69 (epoch 8, slot 5 of epoch)
|
||||
expectedEpochAlignment: true,
|
||||
expectedMinRequiredSlot: primitives.Slot(5 * slotsPerEpoch), // Epoch 5 start = slot 40
|
||||
description: "EarliestAvailableSlot should always align to epoch boundary",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Create the prune start slot function
|
||||
ps := pruneStartSlotFunc(retentionEpochs)
|
||||
|
||||
// Calculate pruneUpto slot
|
||||
pruneUpto := ps(tt.currentSlot)
|
||||
|
||||
// EarliestAvailableSlot is pruneUpto + 1
|
||||
earliestAvailableSlot := pruneUpto + 1
|
||||
|
||||
// Verify epoch alignment: earliestAvailableSlot should be at an epoch boundary
|
||||
if tt.expectedEpochAlignment {
|
||||
// Check if earliestAvailableSlot is at the start of an epoch
|
||||
epoch := slots.ToEpoch(earliestAvailableSlot)
|
||||
epochStartSlot, err := slots.EpochStart(epoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, epochStartSlot, earliestAvailableSlot,
|
||||
"%s: earliestAvailableSlot (%d) should be at epoch boundary (slot %d of epoch %d)",
|
||||
tt.description, earliestAvailableSlot, epochStartSlot, epoch)
|
||||
}
|
||||
|
||||
// Verify it matches the expected minimum required slot for custody validation
|
||||
assert.Equal(t, tt.expectedMinRequiredSlot, earliestAvailableSlot,
|
||||
"%s: earliestAvailableSlot should match custody minimum required slot",
|
||||
tt.description)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPruner_UpdateEarliestSlotError(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -1136,8 +1135,10 @@ func (b *BeaconNode) registerLightClientStore() {
|
||||
|
||||
func hasNetworkFlag(cliCtx *cli.Context) bool {
|
||||
for _, flag := range features.NetworkFlags {
|
||||
if slices.ContainsFunc(flag.Names(), cliCtx.IsSet) {
|
||||
return true
|
||||
for _, name := range flag.Names() {
|
||||
if cliCtx.IsSet(name) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
|
||||
@@ -10,13 +10,11 @@ import (
|
||||
|
||||
// pruneExpired prunes attestations pool on every slot interval.
|
||||
func (s *Service) pruneExpired() {
|
||||
secondsPerSlot := params.BeaconConfig().SecondsPerSlot
|
||||
offset := time.Duration(secondsPerSlot-1) * time.Second
|
||||
slotTicker := slots.NewSlotTickerWithOffset(s.genesisTime, offset, secondsPerSlot)
|
||||
defer slotTicker.Done()
|
||||
ticker := time.NewTicker(s.cfg.pruneInterval)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-slotTicker.C():
|
||||
case <-ticker.C:
|
||||
s.pruneExpiredAtts()
|
||||
s.updateMetrics()
|
||||
case <-s.ctx.Done():
|
||||
|
||||
@@ -17,9 +17,7 @@ import (
|
||||
)
|
||||
|
||||
func TestPruneExpired_Ticker(t *testing.T) {
|
||||
// Need timeout longer than the offset (secondsPerSlot - 1) + some buffer
|
||||
timeout := time.Duration(params.BeaconConfig().SecondsPerSlot+5) * time.Second
|
||||
ctx, cancel := context.WithTimeout(t.Context(), timeout)
|
||||
ctx, cancel := context.WithTimeout(t.Context(), 3*time.Second)
|
||||
defer cancel()
|
||||
|
||||
s, err := NewService(ctx, &Config{
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// This is the default queue size used if we have specified an invalid one.
|
||||
@@ -64,17 +63,12 @@ func (cfg *Config) connManagerLowHigh() (int, int) {
|
||||
return low, high
|
||||
}
|
||||
|
||||
// validateConfig validates whether the provided config has valid values and sets
|
||||
// the invalid ones to default.
|
||||
func validateConfig(cfg *Config) {
|
||||
if cfg.QueueSize > 0 {
|
||||
return
|
||||
// validateConfig validates whether the values provided are accurate and will set
|
||||
// the appropriate values for those that are invalid.
|
||||
func validateConfig(cfg *Config) *Config {
|
||||
if cfg.QueueSize == 0 {
|
||||
log.Warnf("Invalid pubsub queue size of %d initialized, setting the quese size as %d instead", cfg.QueueSize, defaultPubsubQueueSize)
|
||||
cfg.QueueSize = defaultPubsubQueueSize
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"queueSize": cfg.QueueSize,
|
||||
"default": defaultPubsubQueueSize,
|
||||
}).Warning("Invalid pubsub queue size, setting the queue size to the default value")
|
||||
|
||||
cfg.QueueSize = defaultPubsubQueueSize
|
||||
return cfg
|
||||
}
|
||||
|
||||
@@ -259,11 +259,11 @@ func (s *Service) custodyGroupCountFromPeerENR(pid peer.ID) uint64 {
|
||||
}
|
||||
|
||||
func fuluForkSlot() (primitives.Slot, error) {
|
||||
cfg := params.BeaconConfig()
|
||||
beaconConfig := params.BeaconConfig()
|
||||
|
||||
fuluForkEpoch := cfg.FuluForkEpoch
|
||||
if fuluForkEpoch == cfg.FarFutureEpoch {
|
||||
return cfg.FarFutureSlot, nil
|
||||
fuluForkEpoch := beaconConfig.FuluForkEpoch
|
||||
if fuluForkEpoch == beaconConfig.FarFutureEpoch {
|
||||
return beaconConfig.FarFutureSlot, nil
|
||||
}
|
||||
|
||||
forkFuluSlot, err := slots.EpochStart(fuluForkEpoch)
|
||||
|
||||
@@ -3,7 +3,6 @@ package p2p
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/peerstore"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
@@ -27,25 +26,12 @@ var (
|
||||
Help: "The number of peers in a given state.",
|
||||
},
|
||||
[]string{"state"})
|
||||
p2pMaxPeers = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "p2p_max_peers",
|
||||
Help: "The target maximum number of peers.",
|
||||
})
|
||||
p2pPeerCountDirectionType = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "p2p_peer_count_direction_type",
|
||||
Help: "The number of peers in a given direction and type.",
|
||||
},
|
||||
[]string{"direction", "type"})
|
||||
connectedPeersCount = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "connected_libp2p_peers",
|
||||
Help: "Tracks the total number of connected libp2p peers by agent string",
|
||||
},
|
||||
[]string{"agent"},
|
||||
)
|
||||
minimumPeersPerSubnet = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "p2p_minimum_peers_per_subnet",
|
||||
Help: "The minimum number of peers to connect to per subnet",
|
||||
})
|
||||
avgScoreConnectedClients = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "connected_libp2p_peers_average_scores",
|
||||
Help: "Tracks the overall p2p scores of connected libp2p peers by agent string",
|
||||
@@ -188,26 +174,18 @@ var (
|
||||
)
|
||||
|
||||
func (s *Service) updateMetrics() {
|
||||
store := s.Host().Peerstore()
|
||||
connectedPeers := s.peers.Connected()
|
||||
|
||||
p2pPeerCount.WithLabelValues("Connected").Set(float64(len(connectedPeers)))
|
||||
p2pPeerCount.WithLabelValues("Disconnected").Set(float64(len(s.peers.Disconnected())))
|
||||
p2pPeerCount.WithLabelValues("Connecting").Set(float64(len(s.peers.Connecting())))
|
||||
p2pPeerCount.WithLabelValues("Disconnecting").Set(float64(len(s.peers.Disconnecting())))
|
||||
p2pPeerCount.WithLabelValues("Bad").Set(float64(len(s.peers.Bad())))
|
||||
|
||||
upperTCP := strings.ToUpper(string(peers.TCP))
|
||||
upperQUIC := strings.ToUpper(string(peers.QUIC))
|
||||
|
||||
p2pPeerCountDirectionType.WithLabelValues("inbound", upperTCP).Set(float64(len(s.peers.InboundConnectedWithProtocol(peers.TCP))))
|
||||
p2pPeerCountDirectionType.WithLabelValues("inbound", upperQUIC).Set(float64(len(s.peers.InboundConnectedWithProtocol(peers.QUIC))))
|
||||
p2pPeerCountDirectionType.WithLabelValues("outbound", upperTCP).Set(float64(len(s.peers.OutboundConnectedWithProtocol(peers.TCP))))
|
||||
p2pPeerCountDirectionType.WithLabelValues("outbound", upperQUIC).Set(float64(len(s.peers.OutboundConnectedWithProtocol(peers.QUIC))))
|
||||
|
||||
connectedPeersCountByClient := make(map[string]float64)
|
||||
store := s.Host().Peerstore()
|
||||
numConnectedPeersByClient := make(map[string]float64)
|
||||
peerScoresByClient := make(map[string][]float64)
|
||||
for _, p := range connectedPeers {
|
||||
for i := 0; i < len(connectedPeers); i++ {
|
||||
p := connectedPeers[i]
|
||||
pid, err := peer.Decode(p.String())
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not decode peer string")
|
||||
@@ -215,18 +193,16 @@ func (s *Service) updateMetrics() {
|
||||
}
|
||||
|
||||
foundName := agentFromPid(pid, store)
|
||||
connectedPeersCountByClient[foundName] += 1
|
||||
numConnectedPeersByClient[foundName] += 1
|
||||
|
||||
// Get peer scoring data.
|
||||
overallScore := s.peers.Scorers().Score(pid)
|
||||
peerScoresByClient[foundName] = append(peerScoresByClient[foundName], overallScore)
|
||||
}
|
||||
|
||||
connectedPeersCount.Reset() // Clear out previous results.
|
||||
for agent, total := range connectedPeersCountByClient {
|
||||
for agent, total := range numConnectedPeersByClient {
|
||||
connectedPeersCount.WithLabelValues(agent).Set(total)
|
||||
}
|
||||
|
||||
avgScoreConnectedClients.Reset() // Clear out previous results.
|
||||
for agent, scoringData := range peerScoresByClient {
|
||||
avgScore := average(scoringData)
|
||||
|
||||
@@ -25,7 +25,6 @@ package peers
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -82,31 +81,29 @@ const (
|
||||
type InternetProtocol string
|
||||
|
||||
const (
|
||||
TCP = InternetProtocol("tcp")
|
||||
QUIC = InternetProtocol("quic")
|
||||
TCP = "tcp"
|
||||
QUIC = "quic"
|
||||
)
|
||||
|
||||
type (
|
||||
// Status is the structure holding the peer status information.
|
||||
Status struct {
|
||||
ctx context.Context
|
||||
scorers *scorers.Service
|
||||
store *peerdata.Store
|
||||
ipTracker map[string]uint64
|
||||
rand *rand.Rand
|
||||
ipColocationWhitelist []*net.IPNet
|
||||
}
|
||||
// Status is the structure holding the peer status information.
|
||||
type Status struct {
|
||||
ctx context.Context
|
||||
scorers *scorers.Service
|
||||
store *peerdata.Store
|
||||
ipTracker map[string]uint64
|
||||
rand *rand.Rand
|
||||
ipColocationWhitelist []*net.IPNet
|
||||
}
|
||||
|
||||
// StatusConfig represents peer status service params.
|
||||
StatusConfig struct {
|
||||
// PeerLimit specifies maximum amount of concurrent peers that are expected to be connect to the node.
|
||||
PeerLimit int
|
||||
// ScorerParams holds peer scorer configuration params.
|
||||
ScorerParams *scorers.Config
|
||||
// IPColocationWhitelist contains CIDR ranges that are exempt from IP colocation limits.
|
||||
IPColocationWhitelist []*net.IPNet
|
||||
}
|
||||
)
|
||||
// StatusConfig represents peer status service params.
|
||||
type StatusConfig struct {
|
||||
// PeerLimit specifies maximum amount of concurrent peers that are expected to be connect to the node.
|
||||
PeerLimit int
|
||||
// ScorerParams holds peer scorer configuration params.
|
||||
ScorerParams *scorers.Config
|
||||
// IPColocationWhitelist contains CIDR ranges that are exempt from IP colocation limits.
|
||||
IPColocationWhitelist []*net.IPNet
|
||||
}
|
||||
|
||||
// NewStatus creates a new status entity.
|
||||
func NewStatus(ctx context.Context, config *StatusConfig) *Status {
|
||||
@@ -307,8 +304,11 @@ func (p *Status) SubscribedToSubnet(index uint64) []peer.ID {
|
||||
connectedStatus := peerData.ConnState == Connecting || peerData.ConnState == Connected
|
||||
if connectedStatus && peerData.MetaData != nil && !peerData.MetaData.IsNil() && peerData.MetaData.AttnetsBitfield() != nil {
|
||||
indices := indicesFromBitfield(peerData.MetaData.AttnetsBitfield())
|
||||
if slices.Contains(indices, index) {
|
||||
peers = append(peers, pid)
|
||||
for _, idx := range indices {
|
||||
if idx == index {
|
||||
peers = append(peers, pid)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -345,17 +345,17 @@ func TopicFromMessage(msg string, epoch primitives.Epoch) (string, error) {
|
||||
return "", errors.Errorf("%s: %s", invalidRPCMessageType, msg)
|
||||
}
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
beaconConfig := params.BeaconConfig()
|
||||
|
||||
// Check if the message is to be updated in fulu.
|
||||
if epoch >= cfg.FuluForkEpoch {
|
||||
if epoch >= beaconConfig.FuluForkEpoch {
|
||||
if version, ok := fuluMapping[msg]; ok {
|
||||
return protocolPrefix + msg + version, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Check if the message is to be updated in altair.
|
||||
if epoch >= cfg.AltairForkEpoch {
|
||||
if epoch >= beaconConfig.AltairForkEpoch {
|
||||
if version, ok := altairMapping[msg]; ok {
|
||||
return protocolPrefix + msg + version, nil
|
||||
}
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
@@ -107,16 +106,12 @@ func NewService(ctx context.Context, cfg *Config) (*Service, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
_ = cancel // govet fix for lost cancel. Cancel is handled in service.Stop().
|
||||
|
||||
validateConfig(cfg)
|
||||
|
||||
cfg = validateConfig(cfg)
|
||||
privKey, err := privKey(cfg)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to generate p2p private key")
|
||||
}
|
||||
|
||||
p2pMaxPeers.Set(float64(cfg.MaxPeers))
|
||||
minimumPeersPerSubnet.Set(float64(flags.Get().MinimumPeersPerSubnet))
|
||||
|
||||
metaData, err := metaDataFromDB(ctx, cfg.DB)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to create peer metadata")
|
||||
|
||||
@@ -514,18 +514,18 @@ func initializePersistentSubnets(id enode.ID, epoch primitives.Epoch) error {
|
||||
//
|
||||
// return [compute_subscribed_subnet(node_id, epoch, index) for index in range(SUBNETS_PER_NODE)]
|
||||
func computeSubscribedSubnets(nodeID enode.ID, epoch primitives.Epoch) ([]uint64, error) {
|
||||
cfg := params.BeaconConfig()
|
||||
beaconConfig := params.BeaconConfig()
|
||||
|
||||
if flags.Get().SubscribeToAllSubnets {
|
||||
subnets := make([]uint64, 0, cfg.AttestationSubnetCount)
|
||||
for i := range cfg.AttestationSubnetCount {
|
||||
subnets := make([]uint64, 0, beaconConfig.AttestationSubnetCount)
|
||||
for i := range beaconConfig.AttestationSubnetCount {
|
||||
subnets = append(subnets, i)
|
||||
}
|
||||
return subnets, nil
|
||||
}
|
||||
|
||||
subnets := make([]uint64, 0, cfg.SubnetsPerNode)
|
||||
for i := range cfg.SubnetsPerNode {
|
||||
subnets := make([]uint64, 0, beaconConfig.SubnetsPerNode)
|
||||
for i := range beaconConfig.SubnetsPerNode {
|
||||
sub, err := computeSubscribedSubnet(nodeID, epoch, i)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "compute subscribed subnet")
|
||||
|
||||
@@ -524,12 +524,12 @@ func TestSubnetComputation(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
localNode := enode.NewLocalNode(db, convertedKey)
|
||||
cfg := params.BeaconConfig()
|
||||
beaconConfig := params.BeaconConfig()
|
||||
|
||||
t.Run("standard", func(t *testing.T) {
|
||||
retrievedSubnets, err := computeSubscribedSubnets(localNode.ID(), 1000)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, cfg.SubnetsPerNode, uint64(len(retrievedSubnets)))
|
||||
require.Equal(t, beaconConfig.SubnetsPerNode, uint64(len(retrievedSubnets)))
|
||||
require.Equal(t, retrievedSubnets[0]+1, retrievedSubnets[1])
|
||||
})
|
||||
|
||||
@@ -541,8 +541,8 @@ func TestSubnetComputation(t *testing.T) {
|
||||
|
||||
retrievedSubnets, err := computeSubscribedSubnets(localNode.ID(), 1000)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, cfg.AttestationSubnetCount, uint64(len(retrievedSubnets)))
|
||||
for i := range cfg.AttestationSubnetCount {
|
||||
require.Equal(t, beaconConfig.AttestationSubnetCount, uint64(len(retrievedSubnets)))
|
||||
for i := range beaconConfig.AttestationSubnetCount {
|
||||
require.Equal(t, i, retrievedSubnets[i])
|
||||
}
|
||||
})
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
@@ -389,9 +388,12 @@ func syncRewardsVals(
|
||||
scIndices := make([]primitives.ValidatorIndex, 0, len(allScIndices))
|
||||
scVals := make([]*precompute.Validator, 0, len(allScIndices))
|
||||
for _, valIdx := range valIndices {
|
||||
if slices.Contains(allScIndices, valIdx) {
|
||||
scVals = append(scVals, allVals[valIdx])
|
||||
scIndices = append(scIndices, valIdx)
|
||||
for _, scIdx := range allScIndices {
|
||||
if valIdx == scIdx {
|
||||
scVals = append(scVals, allVals[valIdx])
|
||||
scIndices = append(scIndices, valIdx)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -90,10 +90,10 @@ func (s *Service) updateCustodyInfoIfNeeded() error {
|
||||
// custodyGroupCount computes the custody group count based on the custody requirement,
|
||||
// the validators custody requirement, and whether the node is subscribed to all data subnets.
|
||||
func (s *Service) custodyGroupCount(context.Context) (uint64, error) {
|
||||
cfg := params.BeaconConfig()
|
||||
beaconConfig := params.BeaconConfig()
|
||||
|
||||
if flags.Get().SubscribeAllDataSubnets {
|
||||
return cfg.NumberOfCustodyGroups, nil
|
||||
return beaconConfig.NumberOfCustodyGroups, nil
|
||||
}
|
||||
|
||||
validatorsCustodyRequirement, err := s.validatorsCustodyRequirement()
|
||||
@@ -101,7 +101,7 @@ func (s *Service) custodyGroupCount(context.Context) (uint64, error) {
|
||||
return 0, errors.Wrap(err, "validators custody requirement")
|
||||
}
|
||||
|
||||
return max(cfg.CustodyRequirement, validatorsCustodyRequirement), nil
|
||||
return max(beaconConfig.CustodyRequirement, validatorsCustodyRequirement), nil
|
||||
}
|
||||
|
||||
// validatorsCustodyRequirements computes the custody requirements based on the
|
||||
|
||||
@@ -116,11 +116,11 @@ func withSubscribeAllDataSubnets(t *testing.T, fn func()) {
|
||||
|
||||
func TestUpdateCustodyInfoIfNeeded(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.NumberOfCustodyGroups = 128
|
||||
cfg.CustodyRequirement = 4
|
||||
cfg.SamplesPerSlot = 8
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
beaconConfig := params.BeaconConfig()
|
||||
beaconConfig.NumberOfCustodyGroups = 128
|
||||
beaconConfig.CustodyRequirement = 4
|
||||
beaconConfig.SamplesPerSlot = 8
|
||||
params.OverrideBeaconConfig(beaconConfig)
|
||||
|
||||
t.Run("Skip update when actual custody count >= target", func(t *testing.T) {
|
||||
setup := setupCustodyTest(t, false)
|
||||
@@ -159,7 +159,7 @@ func TestUpdateCustodyInfoIfNeeded(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
const expectedSlot = primitives.Slot(100)
|
||||
setup.assertCustodyInfo(t, expectedSlot, cfg.NumberOfCustodyGroups)
|
||||
setup.assertCustodyInfo(t, expectedSlot, beaconConfig.NumberOfCustodyGroups)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,133 +0,0 @@
|
||||
# Gossip validation
|
||||
|
||||
**Note:** This design doc currently details some topics of gossip validation. Additional topics about gossip validation will be added in the future. When the document is complete we will remove this note.
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [State usage in gossip validation](#state-usage-in-gossip-validation)
|
||||
- [Beacon Blocks](#beacon-blocks)
|
||||
- [Head state is often good enough](#head-state-is-often-good-enough)
|
||||
- [Attestations](#attestations)
|
||||
- [Head is good again](#head-is-good-again)
|
||||
- [Other verifications and caches](#other-verifications-and-caches)
|
||||
- [Dropping expensive computations](#dropping-expensive-computations)
|
||||
|
||||
## State usage in gossip validation
|
||||
|
||||
The beacon node needs to verify different objects that arrive via gossipsub: beacon blocks, attestations, aggregated attestations, sync committee messages, data column sidecars, slashings, etc. Each of these objects requires a different validation path. However, they all have in common that in order for them to be verified, one needs access to information from a beacon state. The question is *what beacon state should we use?*.
|
||||
|
||||
### Beacon Blocks
|
||||
Before we get into implementation details, let us analyze some explicit checks that we need to perform. Suppose this is the forkchoice state of the node
|
||||
|
||||
```
|
||||
A <--- B <--- C <----------- E <-- .... <--- Y (<--- head of the chain)
|
||||
\
|
||||
----- D
|
||||
```
|
||||
Here the block `A` is finalized, the block `B` is justified and the head of the chain (from the point of view of this beacon node) is at `Y`. Suppose moreover that many slots and even epochs have happened between `D` and `Y`. The node now receives a block based on `D`.
|
||||
```
|
||||
D <--- Z
|
||||
```
|
||||
How can we validate that the proposer index was indeed supposed to propose during this slot? Which state should we use to check what is the proposer index? If we take the post state of `Y`, which is this blocks current head state, and advance it to the slot of `Z`, the proposer index may be different than if you take the post state of `D` and advance it accordingly. Now we put ourselves in the shoes of the proposer of `Z`. This validator may have honestly not seen the chain `E <-- ... <--- Y` and instead kept `D` all the time as head, simply processing slots. Eventually she finds in the position of proposing a block. She needs to base it on `D`. Hence the phrasing on the p2p-spec:
|
||||
|
||||
```
|
||||
- _[REJECT]_ The block is proposed by the expected `proposer_index` for the
|
||||
block's slot in the context of the current shuffling (defined by
|
||||
`parent_root`/`slot`). If the `proposer_index` cannot immediately be verified
|
||||
against the expected shuffling, the block MAY be queued for later processing
|
||||
while proposers for the block's branch are calculated -- in such a case _do
|
||||
not_ `REJECT`, instead `IGNORE` this message.
|
||||
```
|
||||
|
||||
### Head state is often good enough
|
||||
|
||||
So when is the head state good enough to validate the proposer index in the above case? The situation is slightly different pre-Fulu than post-Fulu with the proposer lookahead, but essentially what we need to verify, is that there couldn't have been different shufflings when considering the post-state of `Y` and the post-state of `D` when advanced to the current slot.
|
||||
|
||||
Let `S` be `Z`'s slot and `E` be its epoch. The proposer shuffling for `Z` was determined at slot `32 (E - 1)`. Let `X` be the latest ancestor of `Z` with slot less or equal to `32 (E - 1)`. If `X` is an ancestor of `C` (but not `C` itself), then the shuffling on the `Z` branch will be the same as on the `Y` branch for slot `S`. This for example is forced to happen if all `Z`, `Y` and `D` are in the same epoch `E`.
|
||||
|
||||
This takes care of the shuffling. However, the actual computation for the proposer index requires also the active validator indices, and this slice is determined at the latest epoch transition into `Z`'s epoch.
|
||||
|
||||
So a good algorithm is as follows when importing `Z` at slot `S` and epoch `E`.
|
||||
1. Check if the head state is at epoch `E`
|
||||
2. Check if the target checkpoint for `Y` in `E` equals the target checkpoint for `Z` at `E`.
|
||||
If both these points hold, then the head state already has the right proposer index.
|
||||
3. If either 1) or 2) does not hold, then the checkpoint state on the branch of `Z`, at `E` will hold the right proposer index for `Z`'s slot. Often times this state is faster to get than that of `D`, since being a checkpoint it will be cached in case that this checkpoint was canonical at some point.
|
||||
|
||||
This takes care of most reorgs that happen on mainnet, and the only problem occurs when deep forks are attempted (usually by struggling nodes building on some old block). In these cases, often times the parent block is already finalized and therefore we don't even attempt to import those blocks. But this problem is exacerbated when the chain is not finalizing because any such struggling block will cause a fork and will fail the above checks to use the head state to consider the proposer index.
|
||||
|
||||
### Attestations
|
||||
|
||||
Something similar happens for attestations. When receiving an attestation
|
||||
```
|
||||
AttestationData(
|
||||
slot,
|
||||
index,
|
||||
beacon_block_root,
|
||||
source,
|
||||
target=Checkpoint(
|
||||
epoch: E,
|
||||
root: R,
|
||||
)
|
||||
)
|
||||
```
|
||||
We make sure that we know the block with root `beacon_block_root`. We also check that the target checkpoint is consistent. In particular, we know that the beacon state of `R` (possibly advanced) at the slot `32 E` is at the same epoch as `slot` and has the right beacon committees to check if the attester was supposed to attest at `slot` or not. Indeed the ingredients to compute the Beacon committee at the given slot are built out of the `randao_mixes` of the epoch `E - 2` (it's `E - MIN_SEED_LOOKAHEAD - 1`) and the active validator indices of the epoch `E`. Therefore any state that belongs to the same chain containing `R` and `beacon_block_root` and has epoch greater or equal than `E-2` will contain all the information necessary to validate the randao mix, and it needs to be exactly `E` to validate the active validator indices. We thus always take the checkpoint state, that is `R` advanced to `32 E`.
|
||||
|
||||
### Head is good again
|
||||
|
||||
Now when is the head state good enough to validate an attestation as above? We already have the answer in the previous paragraph: the state needs to have the right active validator indices and the same randao mix. The mix is rarely a problem, this requires that the head state's checkpoint at `E-2` coincides with the `beacon_block_root` checkpoint at `E-2`. But the active validator indices are more likely to differ, the check here is very simple, if:
|
||||
1. The head state's epoch is `E`.
|
||||
2. The head target at `E` has root `R`.
|
||||
|
||||
Then the head state is good to validate this attestation. If the above two conditions fail, then the right state to validate it is `R` advanced to `32 E`, which is likely to be cached if this state happened to be a checkpoint in a canonical chain.
|
||||
|
||||
### Other verifications and caches
|
||||
|
||||
So we see we have two types of verifications: verifications related to randao mix, seeds and such to determine committees. These typically require a state from 1 or 2 epochs ago where the seed was fixed. And verifications related to active validator indices which require a state at the start of the current epoch (or the epoch of the object being validated). This applies to all verifications: proposer index, beacon committee attester index, sync committee index, PTC index, etc.
|
||||
|
||||
Since computing active validator indices, proposer indices, beacon committees, etc. is very expensive, we keep several caches (more than what we actually need and some need to be removed from our codebase) for these. Since these are updated at epoch transition they are keyed by either the latest state root before the epoch transition or by the checkpoint root itself.
|
||||
|
||||
In addition, forkchoice keeps an O(1) cache for each block, it gives the corresponding target checkpoint. So a general algorithm to perform verifications for arriving gossip elements is as follows:
|
||||
|
||||
```
|
||||
Gossiped Element Arrives
|
||||
|
|
||||
v
|
||||
┌──────────────────────────────────────────────────┐
|
||||
│ Is element part of head state or descendant? │
|
||||
└──────────────────────────────────────────────────┘
|
||||
/ \
|
||||
YES NO
|
||||
| |
|
||||
v v
|
||||
┌──────────────────┐ ┌──────────────────────────────────────┐
|
||||
│ Use Head State │ │ Is target same as head's target for │
|
||||
│ (possibly │ │ current epoch? │
|
||||
│ advanced to same │ └──────────────────────────────────────┘
|
||||
│ epoch as element)│ / \
|
||||
└──────────────────┘ YES NO
|
||||
| |
|
||||
v v
|
||||
┌──────────────┐ ┌────────────────────┐
|
||||
│ Use Head │ │ Targets differ: │
|
||||
│ State │ │ Get target state │
|
||||
└──────────────┘ └────────────────────┘
|
||||
|
|
||||
v
|
||||
┌──────────────────────────────┐
|
||||
│ Is parent in same epoch? │
|
||||
└──────────────────────────────┘
|
||||
/ \
|
||||
YES NO
|
||||
| |
|
||||
v v
|
||||
┌──────────────────────────┐ ┌────────────────────────┐
|
||||
│ Use forkchoice to get │ │ Take parent state and │
|
||||
│ parent's target (equals │ │ advance to current │
|
||||
│ gossiped element target).│ │ epoch (= target state).│
|
||||
│ Use checkpoint cache. │ │ │
|
||||
└──────────────────────────┘ └────────────────────────┘
|
||||
```
|
||||
|
||||
### Dropping expensive computations
|
||||
|
||||
If the checkpoint cache misses (for example if the checkpoint was not really a checkpoint in our canonical chain ever), then regenerating the checkpoint state could be very expensive. In this case we should consider dropping or queueing the gossiped object. For attestations we have some heuristics for this to avoid validating old useless attestations. For beacon blocks this is not the case and we will try to always import a block that we receive over gossip. This is dangerous in case of non-finality as this can lead to very old regeneration of states.
|
||||
@@ -1366,16 +1366,16 @@ func TestFetchSidecars(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Nominal", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig()
|
||||
numberOfColumns := cfg.NumberOfColumns
|
||||
samplesPerSlot := cfg.SamplesPerSlot
|
||||
beaconConfig := params.BeaconConfig()
|
||||
numberOfColumns := beaconConfig.NumberOfColumns
|
||||
samplesPerSlot := beaconConfig.SamplesPerSlot
|
||||
|
||||
// Define "now" to be one epoch after genesis time + retention period.
|
||||
genesisTime := time.Date(2025, time.August, 10, 0, 0, 0, 0, time.UTC)
|
||||
secondsPerSlot := cfg.SecondsPerSlot
|
||||
slotsPerEpoch := cfg.SlotsPerEpoch
|
||||
secondsPerSlot := beaconConfig.SecondsPerSlot
|
||||
slotsPerEpoch := beaconConfig.SlotsPerEpoch
|
||||
secondsPerEpoch := uint64(slotsPerEpoch.Mul(secondsPerSlot))
|
||||
retentionEpochs := cfg.MinEpochsForDataColumnSidecarsRequest
|
||||
retentionEpochs := beaconConfig.MinEpochsForDataColumnSidecarsRequest
|
||||
nowWrtGenesisSecs := retentionEpochs.Add(1).Mul(secondsPerEpoch)
|
||||
now := genesisTime.Add(time.Duration(nowWrtGenesisSecs) * time.Second)
|
||||
|
||||
|
||||
@@ -530,12 +530,12 @@ func TestOriginOutsideRetention(t *testing.T) {
|
||||
func TestFetchOriginSidecars(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
beaconConfig := params.BeaconConfig()
|
||||
genesisTime := time.Date(2025, time.August, 10, 0, 0, 0, 0, time.UTC)
|
||||
secondsPerSlot := cfg.SecondsPerSlot
|
||||
slotsPerEpoch := cfg.SlotsPerEpoch
|
||||
secondsPerSlot := beaconConfig.SecondsPerSlot
|
||||
slotsPerEpoch := beaconConfig.SlotsPerEpoch
|
||||
secondsPerEpoch := uint64(slotsPerEpoch.Mul(secondsPerSlot))
|
||||
retentionEpochs := cfg.MinEpochsForDataColumnSidecarsRequest
|
||||
retentionEpochs := beaconConfig.MinEpochsForDataColumnSidecarsRequest
|
||||
|
||||
genesisValidatorRoot := [fieldparams.RootLength]byte{}
|
||||
|
||||
|
||||
@@ -286,7 +286,6 @@ func (s *Service) updateMetrics() {
|
||||
topicPeerCount.WithLabelValues(formattedTopic).Set(float64(len(s.cfg.p2p.PubSub().ListPeers(formattedTopic))))
|
||||
}
|
||||
|
||||
subscribedTopicPeerCount.Reset()
|
||||
for _, topic := range s.cfg.p2p.PubSub().GetTopics() {
|
||||
subscribedTopicPeerCount.WithLabelValues(topic).Set(float64(len(s.cfg.p2p.PubSub().ListPeers(topic))))
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"slices"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/blocks"
|
||||
@@ -383,8 +382,10 @@ func (s *Service) savePending(root [32]byte, pending any, isEqual func(other any
|
||||
|
||||
// Skip if the attestation/aggregate from the same validator already exists in
|
||||
// the pending queue.
|
||||
if slices.ContainsFunc(s.blkRootToPendingAtts[root], isEqual) {
|
||||
return
|
||||
for _, a := range s.blkRootToPendingAtts[root] {
|
||||
if isEqual(a) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
pendingAttCount.Inc()
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
@@ -59,17 +58,6 @@ func (s *Service) blobSidecarByRootRPCHandler(ctx context.Context, msg interface
|
||||
return errors.Wrapf(err, "unexpected error computing min valid blob request slot, current_slot=%d", cs)
|
||||
}
|
||||
|
||||
// Extract all needed roots.
|
||||
roots := make([][fieldparams.RootLength]byte, 0, len(blobIdents))
|
||||
for _, ident := range blobIdents {
|
||||
root := bytesutil.ToBytes32(ident.BlockRoot)
|
||||
roots = append(roots, root)
|
||||
}
|
||||
|
||||
// Filter all available roots in block storage.
|
||||
availableRoots := s.cfg.beaconDB.AvailableBlocks(ctx, roots)
|
||||
|
||||
// Serve each requested blob sidecar.
|
||||
for i := range blobIdents {
|
||||
if err := ctx.Err(); err != nil {
|
||||
closeStream(stream, log)
|
||||
@@ -81,15 +69,7 @@ func (s *Service) blobSidecarByRootRPCHandler(ctx context.Context, msg interface
|
||||
<-ticker.C
|
||||
}
|
||||
s.rateLimiter.add(stream, 1)
|
||||
|
||||
root, idx := bytesutil.ToBytes32(blobIdents[i].BlockRoot), blobIdents[i].Index
|
||||
|
||||
// Do not serve a blob sidecar if the corresponding block is not available.
|
||||
if !availableRoots[root] {
|
||||
log.Trace("Peer requested blob sidecar by root but corresponding block not found in db")
|
||||
continue
|
||||
}
|
||||
|
||||
sc, err := s.cfg.blobStorage.Get(root, idx)
|
||||
if err != nil {
|
||||
log := log.WithFields(logrus.Fields{
|
||||
@@ -133,19 +113,19 @@ func (s *Service) blobSidecarByRootRPCHandler(ctx context.Context, msg interface
|
||||
}
|
||||
|
||||
func validateBlobByRootRequest(blobIdents types.BlobSidecarsByRootReq, slot primitives.Slot) error {
|
||||
cfg := params.BeaconConfig()
|
||||
beaconConfig := params.BeaconConfig()
|
||||
epoch := slots.ToEpoch(slot)
|
||||
blobIdentCount := uint64(len(blobIdents))
|
||||
|
||||
if epoch >= cfg.ElectraForkEpoch {
|
||||
if blobIdentCount > cfg.MaxRequestBlobSidecarsElectra {
|
||||
if epoch >= beaconConfig.ElectraForkEpoch {
|
||||
if blobIdentCount > beaconConfig.MaxRequestBlobSidecarsElectra {
|
||||
return types.ErrMaxBlobReqExceeded
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if blobIdentCount > cfg.MaxRequestBlobSidecars {
|
||||
if blobIdentCount > beaconConfig.MaxRequestBlobSidecars {
|
||||
return types.ErrMaxBlobReqExceeded
|
||||
}
|
||||
|
||||
|
||||
@@ -38,8 +38,8 @@ func (s *Service) dataColumnSidecarsByRangeRPCHandler(ctx context.Context, msg i
|
||||
defer cancel()
|
||||
|
||||
SetRPCStreamDeadlines(stream)
|
||||
cfg := params.BeaconConfig()
|
||||
maxRequestDataColumnSidecars := cfg.MaxRequestDataColumnSidecars
|
||||
beaconConfig := params.BeaconConfig()
|
||||
maxRequestDataColumnSidecars := beaconConfig.MaxRequestDataColumnSidecars
|
||||
remotePeer := stream.Conn().RemotePeer()
|
||||
|
||||
log := log.WithFields(logrus.Fields{
|
||||
@@ -102,7 +102,7 @@ func (s *Service) dataColumnSidecarsByRangeRPCHandler(ctx context.Context, msg i
|
||||
|
||||
// Once the quota is reached, we're done serving the request.
|
||||
if maxRequestDataColumnSidecars == 0 {
|
||||
log.WithField("initialQuota", cfg.MaxRequestDataColumnSidecars).Trace("Reached quota for data column sidecars by range request")
|
||||
log.WithField("initialQuota", beaconConfig.MaxRequestDataColumnSidecars).Trace("Reached quota for data column sidecars by range request")
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
@@ -31,9 +31,9 @@ import (
|
||||
|
||||
func TestDataColumnSidecarsByRangeRPCHandler(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.FuluForkEpoch = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
beaconConfig := params.BeaconConfig()
|
||||
beaconConfig.FuluForkEpoch = 0
|
||||
params.OverrideBeaconConfig(beaconConfig)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
ctx := context.Background()
|
||||
t.Run("wrong message type", func(t *testing.T) {
|
||||
|
||||
@@ -56,6 +56,18 @@ func (s *Service) dataColumnSidecarByRootRPCHandler(ctx context.Context, msg int
|
||||
return errors.Wrap(err, "validate data columns by root request")
|
||||
}
|
||||
|
||||
requestedColumnsByRoot := make(map[[fieldparams.RootLength]byte][]uint64)
|
||||
for _, columnIdent := range requestedColumnIdents {
|
||||
var root [fieldparams.RootLength]byte
|
||||
copy(root[:], columnIdent.BlockRoot)
|
||||
requestedColumnsByRoot[root] = append(requestedColumnsByRoot[root], columnIdent.Columns...)
|
||||
}
|
||||
|
||||
// Sort by column index for each root.
|
||||
for _, columns := range requestedColumnsByRoot {
|
||||
slices.Sort(columns)
|
||||
}
|
||||
|
||||
// Compute the oldest slot we'll allow a peer to request, based on the current slot.
|
||||
minReqSlot, err := dataColumnsRPCMinValidSlot(s.cfg.clock.CurrentSlot())
|
||||
if err != nil {
|
||||
@@ -72,12 +84,6 @@ func (s *Service) dataColumnSidecarByRootRPCHandler(ctx context.Context, msg int
|
||||
}
|
||||
|
||||
if log.Logger.Level >= logrus.TraceLevel {
|
||||
requestedColumnsByRoot := make(map[[fieldparams.RootLength]byte][]uint64)
|
||||
for _, ident := range requestedColumnIdents {
|
||||
root := bytesutil.ToBytes32(ident.BlockRoot)
|
||||
requestedColumnsByRoot[root] = append(requestedColumnsByRoot[root], ident.Columns...)
|
||||
}
|
||||
|
||||
// We optimistially assume the peer requests the same set of columns for all roots,
|
||||
// pre-sizing the map accordingly.
|
||||
requestedRootsByColumnSet := make(map[string][]string, 1)
|
||||
@@ -90,17 +96,6 @@ func (s *Service) dataColumnSidecarByRootRPCHandler(ctx context.Context, msg int
|
||||
log.WithField("requested", requestedRootsByColumnSet).Trace("Serving data column sidecars by root")
|
||||
}
|
||||
|
||||
// Extract all requested roots.
|
||||
roots := make([][fieldparams.RootLength]byte, 0, len(requestedColumnIdents))
|
||||
for _, ident := range requestedColumnIdents {
|
||||
root := bytesutil.ToBytes32(ident.BlockRoot)
|
||||
roots = append(roots, root)
|
||||
}
|
||||
|
||||
// Filter all available roots in block storage.
|
||||
availableRoots := s.cfg.beaconDB.AvailableBlocks(ctx, roots)
|
||||
|
||||
// Serve each requested data column sidecar.
|
||||
count := 0
|
||||
for _, ident := range requestedColumnIdents {
|
||||
if err := ctx.Err(); err != nil {
|
||||
@@ -122,12 +117,6 @@ func (s *Service) dataColumnSidecarByRootRPCHandler(ctx context.Context, msg int
|
||||
|
||||
s.rateLimiter.add(stream, int64(len(columns)))
|
||||
|
||||
// Do not serve a blob sidecar if the corresponding block is not available.
|
||||
if !availableRoots[root] {
|
||||
log.Trace("Peer requested blob sidecar by root but corresponding block not found in db")
|
||||
continue
|
||||
}
|
||||
|
||||
// Retrieve the requested sidecars from the store.
|
||||
verifiedRODataColumns, err := s.cfg.dataColumnStorage.Get(root, columns)
|
||||
if err != nil {
|
||||
@@ -174,9 +163,9 @@ func dataColumnsRPCMinValidSlot(currentSlot primitives.Slot) (primitives.Slot, e
|
||||
return primitives.Slot(math.MaxUint64), nil
|
||||
}
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
minReqEpochs := cfg.MinEpochsForDataColumnSidecarsRequest
|
||||
minStartEpoch := cfg.FuluForkEpoch
|
||||
beaconConfig := params.BeaconConfig()
|
||||
minReqEpochs := beaconConfig.MinEpochsForDataColumnSidecarsRequest
|
||||
minStartEpoch := beaconConfig.FuluForkEpoch
|
||||
|
||||
currEpoch := slots.ToEpoch(currentSlot)
|
||||
if currEpoch > minReqEpochs && currEpoch-minReqEpochs > minStartEpoch {
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
|
||||
chainMock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
p2ptest "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
|
||||
@@ -20,7 +19,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
@@ -30,9 +28,9 @@ import (
|
||||
|
||||
func TestDataColumnSidecarsByRootRPCHandler(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.FuluForkEpoch = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
beaconConfig := params.BeaconConfig()
|
||||
beaconConfig.FuluForkEpoch = 0
|
||||
params.OverrideBeaconConfig(beaconConfig)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
ctxMap, err := ContextByteVersionsForValRoot(params.BeaconConfig().GenesisValidatorsRoot)
|
||||
require.NoError(t, err)
|
||||
@@ -45,9 +43,9 @@ func TestDataColumnSidecarsByRootRPCHandler(t *testing.T) {
|
||||
|
||||
t.Run("invalid request", func(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.MaxRequestDataColumnSidecars = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
beaconConfig := params.BeaconConfig()
|
||||
beaconConfig.MaxRequestDataColumnSidecars = 1
|
||||
params.OverrideBeaconConfig(beaconConfig)
|
||||
|
||||
localP2P := p2ptest.NewTestP2P(t)
|
||||
service := &Service{cfg: &config{p2p: localP2P}}
|
||||
@@ -98,54 +96,30 @@ func TestDataColumnSidecarsByRootRPCHandler(t *testing.T) {
|
||||
}()
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.FuluForkEpoch = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
beaconConfig := params.BeaconConfig()
|
||||
beaconConfig.FuluForkEpoch = 1
|
||||
params.OverrideBeaconConfig(beaconConfig)
|
||||
|
||||
localP2P := p2ptest.NewTestP2P(t)
|
||||
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
|
||||
|
||||
_, verifiedRODataColumns := util.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
[]util.DataColumnParam{
|
||||
{Slot: 10, Index: 1}, {Slot: 10, Index: 2}, {Slot: 10, Index: 3},
|
||||
{Slot: 40, Index: 4}, {Slot: 40, Index: 6},
|
||||
{Slot: 45, Index: 7}, {Slot: 45, Index: 8}, {Slot: 45, Index: 9},
|
||||
{Slot: 46, Index: 10}, // Corresponding block won't be saved in DB
|
||||
},
|
||||
)
|
||||
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
err := dataColumnStorage.Save(verifiedRODataColumns)
|
||||
require.NoError(t, err)
|
||||
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
indices := [...]int{0, 3, 5}
|
||||
|
||||
roBlocks := make([]blocks.ROBlock, 0, len(indices))
|
||||
for _, i := range indices {
|
||||
blockPb := util.NewBeaconBlock()
|
||||
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(blockPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Here the block root has to match the sidecar's block root.
|
||||
// (However, the block root does not match the actual root of the block, but we don't care for this test.)
|
||||
roBlock, err := blocks.NewROBlockWithRoot(signedBeaconBlock, verifiedRODataColumns[i].BlockRoot())
|
||||
require.NoError(t, err)
|
||||
|
||||
roBlocks = append(roBlocks, roBlock)
|
||||
params := []util.DataColumnParam{
|
||||
{Slot: 10, Index: 1}, {Slot: 10, Index: 2}, {Slot: 10, Index: 3},
|
||||
{Slot: 40, Index: 4}, {Slot: 40, Index: 6},
|
||||
{Slot: 45, Index: 7}, {Slot: 45, Index: 8}, {Slot: 45, Index: 9},
|
||||
}
|
||||
|
||||
err = beaconDB.SaveROBlocks(ctx, roBlocks, false /*cache*/)
|
||||
_, verifiedRODataColumns := util.CreateTestVerifiedRoDataColumnSidecars(t, params)
|
||||
|
||||
storage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
err := storage.Save(verifiedRODataColumns)
|
||||
require.NoError(t, err)
|
||||
|
||||
service := &Service{
|
||||
cfg: &config{
|
||||
p2p: localP2P,
|
||||
beaconDB: beaconDB,
|
||||
clock: clock,
|
||||
dataColumnStorage: dataColumnStorage,
|
||||
dataColumnStorage: storage,
|
||||
chain: &chainMock.ChainService{},
|
||||
},
|
||||
rateLimiter: newRateLimiter(localP2P),
|
||||
@@ -160,7 +134,6 @@ func TestDataColumnSidecarsByRootRPCHandler(t *testing.T) {
|
||||
root0 := verifiedRODataColumns[0].BlockRoot()
|
||||
root3 := verifiedRODataColumns[3].BlockRoot()
|
||||
root5 := verifiedRODataColumns[5].BlockRoot()
|
||||
root8 := verifiedRODataColumns[8].BlockRoot()
|
||||
|
||||
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
@@ -174,22 +147,22 @@ func TestDataColumnSidecarsByRootRPCHandler(t *testing.T) {
|
||||
break
|
||||
}
|
||||
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
sidecars = append(sidecars, sidecar)
|
||||
}
|
||||
|
||||
assert.Equal(t, 5, len(sidecars))
|
||||
assert.Equal(t, root3, sidecars[0].BlockRoot())
|
||||
assert.Equal(t, root3, sidecars[1].BlockRoot())
|
||||
assert.Equal(t, root5, sidecars[2].BlockRoot())
|
||||
assert.Equal(t, root5, sidecars[3].BlockRoot())
|
||||
assert.Equal(t, root5, sidecars[4].BlockRoot())
|
||||
require.Equal(t, 5, len(sidecars))
|
||||
require.Equal(t, root3, sidecars[0].BlockRoot())
|
||||
require.Equal(t, root3, sidecars[1].BlockRoot())
|
||||
require.Equal(t, root5, sidecars[2].BlockRoot())
|
||||
require.Equal(t, root5, sidecars[3].BlockRoot())
|
||||
require.Equal(t, root5, sidecars[4].BlockRoot())
|
||||
|
||||
assert.Equal(t, uint64(4), sidecars[0].Index)
|
||||
assert.Equal(t, uint64(6), sidecars[1].Index)
|
||||
assert.Equal(t, uint64(7), sidecars[2].Index)
|
||||
assert.Equal(t, uint64(8), sidecars[3].Index)
|
||||
assert.Equal(t, uint64(9), sidecars[4].Index)
|
||||
require.Equal(t, uint64(4), sidecars[0].Index)
|
||||
require.Equal(t, uint64(6), sidecars[1].Index)
|
||||
require.Equal(t, uint64(7), sidecars[2].Index)
|
||||
require.Equal(t, uint64(8), sidecars[3].Index)
|
||||
require.Equal(t, uint64(9), sidecars[4].Index)
|
||||
})
|
||||
|
||||
localP2P.Connect(remoteP2P)
|
||||
@@ -209,10 +182,6 @@ func TestDataColumnSidecarsByRootRPCHandler(t *testing.T) {
|
||||
BlockRoot: root5[:],
|
||||
Columns: []uint64{7, 8, 9},
|
||||
},
|
||||
{
|
||||
BlockRoot: root8[:],
|
||||
Columns: []uint64{10},
|
||||
},
|
||||
}
|
||||
|
||||
err = service.dataColumnSidecarByRootRPCHandler(ctx, msg, stream)
|
||||
|
||||
@@ -465,8 +465,8 @@ func SendDataColumnSidecarsByRangeRequest(
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
numberOfColumns := cfg.NumberOfColumns
|
||||
beaconConfig := params.BeaconConfig()
|
||||
numberOfColumns := beaconConfig.NumberOfColumns
|
||||
maxRequestDataColumnSidecars := params.BeaconConfig().MaxRequestDataColumnSidecars
|
||||
|
||||
// Check if we do not request too many sidecars.
|
||||
|
||||
@@ -889,9 +889,9 @@ func TestErrInvalidFetchedDataDistinction(t *testing.T) {
|
||||
|
||||
func TestSendDataColumnSidecarsByRangeRequest(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.FuluForkEpoch = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
beaconConfig := params.BeaconConfig()
|
||||
beaconConfig.FuluForkEpoch = 0
|
||||
params.OverrideBeaconConfig(beaconConfig)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
ctxMap, err := ContextByteVersionsForValRoot(params.BeaconConfig().GenesisValidatorsRoot)
|
||||
require.NoError(t, err)
|
||||
@@ -923,9 +923,9 @@ func TestSendDataColumnSidecarsByRangeRequest(t *testing.T) {
|
||||
|
||||
t.Run("too many columns in request", func(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.MaxRequestDataColumnSidecars = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
beaconConfig := params.BeaconConfig()
|
||||
beaconConfig.MaxRequestDataColumnSidecars = 0
|
||||
params.OverrideBeaconConfig(beaconConfig)
|
||||
|
||||
request := ðpb.DataColumnSidecarsByRangeRequest{Count: 1, Columns: []uint64{1, 2, 3}}
|
||||
_, err := SendDataColumnSidecarsByRangeRequest(DataColumnSidecarsParams{Ctx: t.Context()}, "", request)
|
||||
@@ -1193,9 +1193,9 @@ func TestIsSidecarIndexRequested(t *testing.T) {
|
||||
|
||||
func TestSendDataColumnSidecarsByRootRequest(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.FuluForkEpoch = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
beaconConfig := params.BeaconConfig()
|
||||
beaconConfig.FuluForkEpoch = 0
|
||||
params.OverrideBeaconConfig(beaconConfig)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
ctxMap, err := ContextByteVersionsForValRoot(params.BeaconConfig().GenesisValidatorsRoot)
|
||||
require.NoError(t, err)
|
||||
@@ -1223,9 +1223,9 @@ func TestSendDataColumnSidecarsByRootRequest(t *testing.T) {
|
||||
|
||||
t.Run("too many columns in request", func(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.MaxRequestDataColumnSidecars = 4
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
beaconConfig := params.BeaconConfig()
|
||||
beaconConfig.MaxRequestDataColumnSidecars = 4
|
||||
params.OverrideBeaconConfig(beaconConfig)
|
||||
|
||||
request := p2ptypes.DataColumnsByRootIdentifiers{
|
||||
{Columns: []uint64{1, 2, 3}},
|
||||
|
||||
@@ -445,7 +445,7 @@ func TestStatusRPCRequest_RequestSent(t *testing.T) {
|
||||
custodyGroupCount = uint64(4)
|
||||
)
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
beaconConfig := params.BeaconConfig()
|
||||
ctx := t.Context()
|
||||
|
||||
testCases := []struct {
|
||||
@@ -456,7 +456,7 @@ func TestStatusRPCRequest_RequestSent(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "before fulu",
|
||||
fuluForkEpoch: cfg.FarFutureEpoch,
|
||||
fuluForkEpoch: beaconConfig.FarFutureEpoch,
|
||||
topic: "/eth2/beacon_chain/req/status/1/ssz_snappy",
|
||||
streamHandler: func(service *Service, stream network.Stream, genesisState beaconState.BeaconState, beaconRoot, headRoot, finalizedRoot []byte) {
|
||||
out := ðpb.Status{}
|
||||
|
||||
@@ -695,10 +695,10 @@ func (s *Service) dataColumnSubnetIndices(primitives.Slot) map[uint64]bool {
|
||||
// the validators custody requirement, and whether the node is subscribed to all data subnets.
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/das-core.md#custody-sampling
|
||||
func (s *Service) samplingSize() (uint64, error) {
|
||||
cfg := params.BeaconConfig()
|
||||
beaconConfig := params.BeaconConfig()
|
||||
|
||||
if flags.Get().SubscribeAllDataSubnets {
|
||||
return cfg.DataColumnSidecarSubnetCount, nil
|
||||
return beaconConfig.DataColumnSidecarSubnetCount, nil
|
||||
}
|
||||
|
||||
// Compute the validators custody requirement.
|
||||
@@ -712,7 +712,7 @@ func (s *Service) samplingSize() (uint64, error) {
|
||||
return 0, errors.Wrap(err, "custody group count")
|
||||
}
|
||||
|
||||
return max(cfg.SamplesPerSlot, validatorsCustodyRequirement, custodyGroupCount), nil
|
||||
return max(beaconConfig.SamplesPerSlot, validatorsCustodyRequirement, custodyGroupCount), nil
|
||||
}
|
||||
|
||||
func (s *Service) persistentAndAggregatorSubnetIndices(currentSlot primitives.Slot) map[uint64]bool {
|
||||
|
||||
@@ -3,7 +3,6 @@ package sync
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/blocks"
|
||||
@@ -291,8 +290,11 @@ func (s *Service) validateIndexInCommittee(ctx context.Context, a ethpb.Att, val
|
||||
}
|
||||
|
||||
var withinCommittee bool
|
||||
if slices.Contains(committee, validatorIndex) {
|
||||
withinCommittee = true
|
||||
for _, i := range committee {
|
||||
if validatorIndex == i {
|
||||
withinCommittee = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !withinCommittee {
|
||||
return pubsub.ValidationReject, fmt.Errorf("validator index %d is not within the committee: %v",
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain"
|
||||
@@ -337,7 +336,13 @@ func validateAttestingIndex(
|
||||
|
||||
// _[REJECT]_ The attester is a member of the committee -- i.e.
|
||||
// `attestation.attester_index in get_beacon_committee(state, attestation.data.slot, index)`.
|
||||
inCommittee := slices.Contains(committee, attestingIndex)
|
||||
inCommittee := false
|
||||
for _, ix := range committee {
|
||||
if attestingIndex == ix {
|
||||
inCommittee = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !inCommittee {
|
||||
return pubsub.ValidationReject, errors.New("attester is not a member of the committee")
|
||||
}
|
||||
|
||||
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- Fix recoverStateSummary to persist state summaries in stateSummaryBucket instead of stateBucket (#15896).
|
||||
@@ -1,3 +0,0 @@
|
||||
### Added
|
||||
|
||||
- Added GeneralizedIndicesFromPath function to calculate the GIs for a given sszInfo object and a PathElement
|
||||
3
changelog/james-prysm_v6.1.3.md
Normal file
3
changelog/james-prysm_v6.1.3.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Ignored
|
||||
|
||||
- Changelog entries for v6.1.3 through v6.1.2
|
||||
@@ -1,3 +0,0 @@
|
||||
### Ignored
|
||||
|
||||
- Changelog entries for v6.1.4 through v6.1.3
|
||||
2
changelog/kasey_clear-db-rm-genesis.md
Normal file
2
changelog/kasey_clear-db-rm-genesis.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Fixed
|
||||
- Delete the genesis state file when --clear-db / --force-clear-db is specified.
|
||||
@@ -1,2 +0,0 @@
|
||||
### Ignored
|
||||
- Fix bug with layout detection when readdirnames returns io.EOF.
|
||||
2
changelog/manu-advertise-atts.md
Normal file
2
changelog/manu-advertise-atts.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Fixed
|
||||
- Correctly advertise (in ENR and beacon API) attestation subnets when using `--subscribe-all-subnets`.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Fixed
|
||||
- `blobSidecarByRootRPCHandler`: Do not serve a sidecar if the corresponding block is not available.
|
||||
- `dataColumnSidecarByRootRPCHandler`: Do not serve a sidecar if the corresponding block is not available.
|
||||
@@ -1,2 +0,0 @@
|
||||
### Changed
|
||||
- Update go-netroute to `v0.3.0`
|
||||
@@ -1,4 +0,0 @@
|
||||
### Added
|
||||
- Metrics: Add count of peers per direction and type (inbound/outbound), (TCP/QUIC).
|
||||
- `p2p_subscribed_topic_peer_total`: Reset to avoid dangling values.
|
||||
- Add `p2p_minimum_peers_per_subnet` metric.
|
||||
2
changelog/manu-number-custody-groups.md
Normal file
2
changelog/manu-number-custody-groups.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Fixed
|
||||
- `updateCustodyInfoInDB`: Use `NumberOfCustodyGroups` instead of `NumberOfColumns`.
|
||||
2
changelog/manu-random-peer.md
Normal file
2
changelog/manu-random-peer.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Fixed
|
||||
- `randomPeer`: Return if the context is cancelled when waiting for peers.
|
||||
2
changelog/manu-read-columns-from-disk-error.md
Normal file
2
changelog/manu-read-columns-from-disk-error.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Fixed
|
||||
- Improve error message when the byte count read from disk when reading a data column sidecars is lower than expected. (Mostly, because the file is truncated.)
|
||||
2
changelog/manu-verify-data-column-sidecar-kzg-proofs.md
Normal file
2
changelog/manu-verify-data-column-sidecar-kzg-proofs.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Fixed
|
||||
- `VerifyDataColumnsSidecarKZGProofs`: Check if sizes match.
|
||||
@@ -1,2 +0,0 @@
|
||||
### Fixed
|
||||
- Fix incorrect version used when sending attestation version in Fulu
|
||||
2
changelog/muzry_fix_extract_metadata_file.md
Normal file
2
changelog/muzry_fix_extract_metadata_file.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Fixed
|
||||
- Fixed metadata extraction on Windows by correctly splitting file paths
|
||||
@@ -1,3 +0,0 @@
|
||||
### Added
|
||||
|
||||
- Add `gossip_validation.md` as design doc for state usage for gossip validation.
|
||||
3
changelog/potuz_hdiff_diff_type.md
Normal file
3
changelog/potuz_hdiff_diff_type.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Added
|
||||
|
||||
- Add native state diff type and marshalling functions
|
||||
@@ -1,3 +0,0 @@
|
||||
### Ignored
|
||||
|
||||
- Use slices.Contains to simplify code
|
||||
4
changelog/satushh-plus-one-bug.md
Normal file
4
changelog/satushh-plus-one-bug.md
Normal file
@@ -0,0 +1,4 @@
|
||||
### Fixed
|
||||
|
||||
- corrected defaultRetentionEpochs in pruner
|
||||
- epoch aligned pruning: pruning should be epoch-wise. No fractional epoch pruning.
|
||||
3
changelog/satushh-update-easlot-pruning.md
Normal file
3
changelog/satushh-update-easlot-pruning.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Added
|
||||
|
||||
- Update the earliest available slot after pruning operations in beacon chain database pruner. This ensures the P2P layer accurately knows which historical data is available after pruning, preventing nodes from advertising or attempting to serve data that has been pruned.
|
||||
3
changelog/syjn99_ssz-ql-endpoints.md
Normal file
3
changelog/syjn99_ssz-ql-endpoints.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Added
|
||||
|
||||
- SSZ-QL: Add endpoints for `BeaconState`/`BeaconBlock`.
|
||||
3
changelog/ttsao_fix-sync-aggregate-state.md
Normal file
3
changelog/ttsao_fix-sync-aggregate-state.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- Sync committee uses correct state to calculate position
|
||||
3
changelog/ttsao_fix-sync-committee-subnet-indices.md
Normal file
3
changelog/ttsao_fix-sync-committee-subnet-indices.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- Fix sync committee subscription to use subnet indices instead of committee indices
|
||||
@@ -1,3 +0,0 @@
|
||||
### Ignored
|
||||
|
||||
- Use SlotTicker with offset instead of time.Ticker for attestation pool pruning to avoid conflicts with slot boundary operations
|
||||
@@ -2,10 +2,8 @@ package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
@@ -57,8 +55,10 @@ func layoutFlagUsage() string {
|
||||
}
|
||||
|
||||
func validateLayoutFlag(_ *cli.Context, v string) error {
|
||||
if slices.Contains(filesystem.LayoutNames, v) {
|
||||
return nil
|
||||
for _, l := range filesystem.LayoutNames {
|
||||
if v == l {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return errors.Errorf("invalid value '%s' for flag --%s, %s", v, BlobStorageLayout.Name, layoutOptions())
|
||||
}
|
||||
@@ -140,10 +140,6 @@ func detectLayout(dir string, c stringFlagGetter) (string, error) {
|
||||
// amount of wiggle room to be confident that we'll likely see a by-root director if one exists.
|
||||
entries, err := base.Readdirnames(16)
|
||||
if err != nil {
|
||||
// We can get this error if the directory exists and is empty
|
||||
if errors.Is(err, io.EOF) {
|
||||
return filesystem.LayoutNameByEpoch, nil
|
||||
}
|
||||
return "", errors.Wrap(err, "reading blob storage directory")
|
||||
}
|
||||
for _, entry := range entries {
|
||||
|
||||
@@ -192,13 +192,6 @@ func TestDetectLayout(t *testing.T) {
|
||||
},
|
||||
expectedErr: syscall.ENOTDIR,
|
||||
},
|
||||
{
|
||||
name: "empty blobs dir",
|
||||
setup: func(t *testing.T, dir string) {
|
||||
require.NoError(t, os.MkdirAll(dir, 0o755))
|
||||
},
|
||||
expected: filesystem.LayoutNameByEpoch,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
|
||||
@@ -4,7 +4,6 @@ package flags
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/urfave/cli/v2"
|
||||
@@ -20,9 +19,11 @@ type EnumValue struct {
|
||||
}
|
||||
|
||||
func (e *EnumValue) Set(value string) error {
|
||||
if slices.Contains(e.Enum, value) {
|
||||
*e.Destination = value
|
||||
return nil
|
||||
for _, enum := range e.Enum {
|
||||
if enum == value {
|
||||
*e.Destination = value
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("allowed values are %s", strings.Join(e.Enum, ", "))
|
||||
|
||||
@@ -268,16 +268,20 @@ func (s *Slice[V]) At(obj Identifiable, index uint64) (V, error) {
|
||||
return s.sharedItems[index], nil
|
||||
}
|
||||
for _, v := range ind.Values {
|
||||
if slices.Contains(v.ids, obj.Id()) {
|
||||
return v.val, nil
|
||||
for _, id := range v.ids {
|
||||
if id == obj.Id() {
|
||||
return v.val, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return s.sharedItems[index], nil
|
||||
} else {
|
||||
item := s.appendedItems[index-uint64(len(s.sharedItems))]
|
||||
for _, v := range item.Values {
|
||||
if slices.Contains(v.ids, obj.Id()) {
|
||||
return v.val, nil
|
||||
for _, id := range v.ids {
|
||||
if id == obj.Id() {
|
||||
return v.val, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
var def V
|
||||
|
||||
4
deps.bzl
4
deps.bzl
@@ -2023,8 +2023,8 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "com_github_libp2p_go_netroute",
|
||||
importpath = "github.com/libp2p/go-netroute",
|
||||
sum = "h1:nqPCXHmeNmgTJnktosJ/sIef9hvwYCrsLxXmfNks/oc=",
|
||||
version = "v0.3.0",
|
||||
sum = "h1:Dejd8cQ47Qx2kRABg6lPwknU7+nBnFRpko45/fFPuZ8=",
|
||||
version = "v0.2.2",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_libp2p_go_reuseport",
|
||||
|
||||
@@ -11,10 +11,7 @@ import (
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
)
|
||||
|
||||
const (
|
||||
BitsPerChunk = 256
|
||||
BytesPerChunk = 32
|
||||
)
|
||||
const bytesPerChunk = 32
|
||||
|
||||
// BitlistRoot returns the mix in length of a bitwise Merkleized bitfield.
|
||||
func BitlistRoot(bfield bitfield.Bitfield, maxCapacity uint64) ([32]byte, error) {
|
||||
@@ -57,14 +54,14 @@ func BitwiseMerkleize(chunks [][32]byte, count, limit uint64) ([32]byte, error)
|
||||
}
|
||||
|
||||
// PackByChunk a given byte array's final chunk with zeroes if needed.
|
||||
func PackByChunk(serializedItems [][]byte) ([][BytesPerChunk]byte, error) {
|
||||
var emptyChunk [BytesPerChunk]byte
|
||||
func PackByChunk(serializedItems [][]byte) ([][bytesPerChunk]byte, error) {
|
||||
var emptyChunk [bytesPerChunk]byte
|
||||
// If there are no items, we return an empty chunk.
|
||||
if len(serializedItems) == 0 {
|
||||
return [][BytesPerChunk]byte{emptyChunk}, nil
|
||||
} else if len(serializedItems[0]) == BytesPerChunk {
|
||||
return [][bytesPerChunk]byte{emptyChunk}, nil
|
||||
} else if len(serializedItems[0]) == bytesPerChunk {
|
||||
// If each item has exactly BYTES_PER_CHUNK length, we return the list of serialized items.
|
||||
chunks := make([][BytesPerChunk]byte, 0, len(serializedItems))
|
||||
chunks := make([][bytesPerChunk]byte, 0, len(serializedItems))
|
||||
for _, c := range serializedItems {
|
||||
chunks = append(chunks, bytesutil.ToBytes32(c))
|
||||
}
|
||||
@@ -78,12 +75,12 @@ func PackByChunk(serializedItems [][]byte) ([][BytesPerChunk]byte, error) {
|
||||
// If all our serialized item slices are length zero, we
|
||||
// exit early.
|
||||
if len(orderedItems) == 0 {
|
||||
return [][BytesPerChunk]byte{emptyChunk}, nil
|
||||
return [][bytesPerChunk]byte{emptyChunk}, nil
|
||||
}
|
||||
numItems := len(orderedItems)
|
||||
var chunks [][BytesPerChunk]byte
|
||||
for i := 0; i < numItems; i += BytesPerChunk {
|
||||
j := i + BytesPerChunk
|
||||
var chunks [][bytesPerChunk]byte
|
||||
for i := 0; i < numItems; i += bytesPerChunk {
|
||||
j := i + bytesPerChunk
|
||||
// We create our upper bound index of the chunk, if it is greater than numItems,
|
||||
// we set it as numItems itself.
|
||||
if j > numItems {
|
||||
@@ -92,7 +89,7 @@ func PackByChunk(serializedItems [][]byte) ([][BytesPerChunk]byte, error) {
|
||||
// We create chunks from the list of items based on the
|
||||
// indices determined above.
|
||||
// Right-pad the last chunk with zero bytes if it does not
|
||||
// have length BytesPerChunk from the helper.
|
||||
// have length bytesPerChunk from the helper.
|
||||
// The ToBytes32 helper allocates a 32-byte array, before
|
||||
// copying the ordered items in. This ensures that even if
|
||||
// the last chunk is != 32 in length, we will right-pad it with
|
||||
|
||||
@@ -7,7 +7,6 @@ go_library(
|
||||
"bitlist.go",
|
||||
"bitvector.go",
|
||||
"container.go",
|
||||
"generalized_index.go",
|
||||
"list.go",
|
||||
"path.go",
|
||||
"query.go",
|
||||
@@ -19,16 +18,12 @@ go_library(
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/encoding/ssz/query",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//encoding/ssz:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
],
|
||||
deps = ["@com_github_prysmaticlabs_go_bitfield//:go_default_library"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"generalized_index_test.go",
|
||||
"path_test.go",
|
||||
"query_test.go",
|
||||
"tag_parser_test.go",
|
||||
|
||||
@@ -1,321 +0,0 @@
|
||||
package query
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/ssz"
|
||||
)
|
||||
|
||||
const listBaseIndex = 2
|
||||
|
||||
// GetGeneralizedIndexFromPath calculates the generalized index for a given path.
|
||||
// To calculate the generalized index, two inputs are needed:
|
||||
// 1. The sszInfo of the root object, to be able to navigate the SSZ structure
|
||||
// 2. The path to the field (e.g., "field_a.field_b[3].field_c")
|
||||
// It walks the path step by step, updating the generalized index at each step.
|
||||
func GetGeneralizedIndexFromPath(info *SszInfo, path []PathElement) (uint64, error) {
|
||||
if info == nil {
|
||||
return 0, errors.New("SszInfo is nil")
|
||||
}
|
||||
|
||||
// If path is empty, no generalized index can be computed.
|
||||
if len(path) == 0 {
|
||||
return 0, errors.New("cannot compute generalized index for an empty path")
|
||||
}
|
||||
|
||||
// Starting from the root generalized index
|
||||
currentIndex := uint64(1)
|
||||
currentInfo := info
|
||||
|
||||
for _, pathElement := range path {
|
||||
element := pathElement
|
||||
|
||||
// Check that we are in a container to access fields
|
||||
if currentInfo.sszType != Container {
|
||||
return 0, fmt.Errorf("indexing requires a container field step first, got %s", currentInfo.sszType)
|
||||
}
|
||||
|
||||
// Retrieve the field position and SSZInfo for the field in the current container
|
||||
fieldPos, fieldSsz, err := getContainerFieldByName(currentInfo, element.Name)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("container field %s not found: %w", element.Name, err)
|
||||
}
|
||||
|
||||
// Get the chunk count for the current container
|
||||
chunkCount, err := getChunkCount(currentInfo)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("chunk count error: %w", err)
|
||||
}
|
||||
|
||||
// Update the generalized index to point to the specified field
|
||||
currentIndex = currentIndex*nextPowerOfTwo(chunkCount) + fieldPos
|
||||
currentInfo = fieldSsz
|
||||
|
||||
// Check if a path element is a length field
|
||||
if element.Length {
|
||||
currentInfo, currentIndex, err = calculateLengthGeneralizedIndex(fieldSsz, element, currentIndex)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("length calculation error: %w", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if element.Index == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
switch fieldSsz.sszType {
|
||||
case List:
|
||||
currentInfo, currentIndex, err = calculateListGeneralizedIndex(fieldSsz, element, currentIndex)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("list calculation error: %w", err)
|
||||
}
|
||||
|
||||
case Vector:
|
||||
currentInfo, currentIndex, err = calculateVectorGeneralizedIndex(fieldSsz, element, currentIndex)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("vector calculation error: %w", err)
|
||||
}
|
||||
|
||||
case Bitlist:
|
||||
currentInfo, currentIndex, err = calculateBitlistGeneralizedIndex(fieldSsz, element, currentIndex)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("bitlist calculation error: %w", err)
|
||||
}
|
||||
|
||||
case Bitvector:
|
||||
currentInfo, currentIndex, err = calculateBitvectorGeneralizedIndex(fieldSsz, element, currentIndex)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("bitvector calculation error: %w", err)
|
||||
}
|
||||
|
||||
default:
|
||||
return 0, fmt.Errorf("indexing not supported for type %s", fieldSsz.sszType)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return currentIndex, nil
|
||||
}
|
||||
|
||||
// getContainerFieldByName finds a container field by its name
|
||||
// and returns its index and SSZInfo.
|
||||
func getContainerFieldByName(info *SszInfo, fieldName string) (uint64, *SszInfo, error) {
|
||||
containerInfo, err := info.ContainerInfo()
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
|
||||
for index, name := range containerInfo.order {
|
||||
if name == fieldName {
|
||||
fieldInfo := containerInfo.fields[name]
|
||||
if fieldInfo == nil || fieldInfo.sszInfo == nil {
|
||||
return 0, nil, fmt.Errorf("field %s has no ssz info", name)
|
||||
}
|
||||
return uint64(index), fieldInfo.sszInfo, nil
|
||||
}
|
||||
}
|
||||
|
||||
return 0, nil, fmt.Errorf("field %s not found", fieldName)
|
||||
}
|
||||
|
||||
// Helpers for Generalized Index calculation per type
|
||||
|
||||
// calculateLengthGeneralizedIndex calculates the generalized index for a length field.
|
||||
// note: length fields are only valid for List and Bitlist types. Multi-dimensional arrays are not supported.
|
||||
// Returns:
|
||||
// - its descendant SSZInfo (length field i.e. uint64)
|
||||
// - its generalized index.
|
||||
func calculateLengthGeneralizedIndex(fieldSsz *SszInfo, element PathElement, parentIndex uint64) (*SszInfo, uint64, error) {
|
||||
if element.Index != nil {
|
||||
return nil, 0, fmt.Errorf("len() is not supported for multi-dimensional arrays")
|
||||
}
|
||||
// Length field is only valid for List and Bitlist types
|
||||
if fieldSsz.sszType != List && fieldSsz.sszType != Bitlist {
|
||||
return nil, 0, fmt.Errorf("len() is only supported for List and Bitlist types, got %s", fieldSsz.sszType)
|
||||
}
|
||||
// Length is a uint64 per SSZ spec
|
||||
currentInfo := &SszInfo{sszType: Uint64}
|
||||
lengthIndex := parentIndex*2 + 1
|
||||
return currentInfo, lengthIndex, nil
|
||||
}
|
||||
|
||||
// calculateListGeneralizedIndex calculates the generalized index for a list element.
|
||||
// Returns:
|
||||
// - its descendant SSZInfo (list element)
|
||||
// - its generalized index.
|
||||
func calculateListGeneralizedIndex(fieldSsz *SszInfo, element PathElement, parentIndex uint64) (*SszInfo, uint64, error) {
|
||||
li, err := fieldSsz.ListInfo()
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("list info error: %w", err)
|
||||
}
|
||||
elem, err := li.Element()
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("list element error: %w", err)
|
||||
}
|
||||
if *element.Index >= li.Limit() {
|
||||
return nil, 0, fmt.Errorf("index %d out of bounds for list with limit %d", *element.Index, li.Limit())
|
||||
}
|
||||
// Compute chunk position for the element
|
||||
var chunkPos uint64
|
||||
if elem.sszType.isBasic() {
|
||||
start := *element.Index * itemLength(elem)
|
||||
chunkPos = start / ssz.BytesPerChunk
|
||||
} else {
|
||||
chunkPos = *element.Index
|
||||
}
|
||||
innerChunkCount, err := getChunkCount(fieldSsz)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("chunk count error: %w", err)
|
||||
}
|
||||
// root = root * base_index * pow2ceil(chunk_count(container)) + fieldPos
|
||||
listIndex := parentIndex*listBaseIndex*nextPowerOfTwo(innerChunkCount) + chunkPos
|
||||
currentInfo := elem
|
||||
|
||||
return currentInfo, listIndex, nil
|
||||
}
|
||||
|
||||
// calculateVectorGeneralizedIndex calculates the generalized index for a vector element.
|
||||
// Returns:
|
||||
// - its descendant SSZInfo (vector element)
|
||||
// - its generalized index.
|
||||
func calculateVectorGeneralizedIndex(fieldSsz *SszInfo, element PathElement, parentIndex uint64) (*SszInfo, uint64, error) {
|
||||
vi, err := fieldSsz.VectorInfo()
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("vector info error: %w", err)
|
||||
}
|
||||
elem, err := vi.Element()
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("vector element error: %w", err)
|
||||
}
|
||||
if *element.Index >= vi.Length() {
|
||||
return nil, 0, fmt.Errorf("index %d out of bounds for vector with length %d", *element.Index, vi.Length())
|
||||
}
|
||||
var chunkPos uint64
|
||||
if elem.sszType.isBasic() {
|
||||
start := *element.Index * itemLength(elem)
|
||||
chunkPos = start / ssz.BytesPerChunk
|
||||
} else {
|
||||
chunkPos = *element.Index
|
||||
}
|
||||
innerChunkCount, err := getChunkCount(fieldSsz)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("chunk count error: %w", err)
|
||||
}
|
||||
vectorIndex := parentIndex*nextPowerOfTwo(innerChunkCount) + chunkPos
|
||||
|
||||
currentInfo := elem
|
||||
return currentInfo, vectorIndex, nil
|
||||
}
|
||||
|
||||
// calculateBitlistGeneralizedIndex calculates the generalized index for a bitlist element.
|
||||
// Returns:
|
||||
// - its descendant SSZInfo (bitlist element i.e. a boolean)
|
||||
// - its generalized index.
|
||||
func calculateBitlistGeneralizedIndex(fieldSsz *SszInfo, element PathElement, parentIndex uint64) (*SszInfo, uint64, error) {
|
||||
// Bits packed into 256-bit chunks; select the chunk containing the bit
|
||||
chunkPos := *element.Index / ssz.BitsPerChunk
|
||||
innerChunkCount, err := getChunkCount(fieldSsz)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("chunk count error: %w", err)
|
||||
}
|
||||
bitlistIndex := parentIndex*listBaseIndex*nextPowerOfTwo(innerChunkCount) + chunkPos
|
||||
|
||||
// Bits element is not further descendable; set to basic to guard further steps
|
||||
currentInfo := &SszInfo{sszType: Boolean}
|
||||
return currentInfo, bitlistIndex, nil
|
||||
}
|
||||
|
||||
// calculateBitvectorGeneralizedIndex calculates the generalized index for a bitvector element.
|
||||
// Returns:
|
||||
// - its descendant SSZInfo (bitvector element i.e. a boolean)
|
||||
// - its generalized index.
|
||||
func calculateBitvectorGeneralizedIndex(fieldSsz *SszInfo, element PathElement, parentIndex uint64) (*SszInfo, uint64, error) {
|
||||
chunkPos := *element.Index / ssz.BitsPerChunk
|
||||
innerChunkCount, err := getChunkCount(fieldSsz)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("chunk count error: %w", err)
|
||||
}
|
||||
bitvectorIndex := parentIndex*nextPowerOfTwo(innerChunkCount) + chunkPos
|
||||
|
||||
// Bits element is not further descendable; set to basic to guard further steps
|
||||
currentInfo := &SszInfo{sszType: Boolean}
|
||||
return currentInfo, bitvectorIndex, nil
|
||||
}
|
||||
|
||||
// Helper functions from SSZ spec
|
||||
|
||||
// itemLength calculates the byte length of an SSZ item based on its type information.
|
||||
// For basic SSZ types (uint8, uint16, uint32, uint64, bool, etc.), it returns the actual
|
||||
// size of the type in bytes. For compound types (containers, lists, vectors), it returns
|
||||
// BytesPerChunk which represents the standard SSZ chunk size (32 bytes) used for
|
||||
// Merkle tree operations in the SSZ serialization format.
|
||||
func itemLength(info *SszInfo) uint64 {
|
||||
if info.sszType.isBasic() {
|
||||
return info.Size()
|
||||
}
|
||||
return ssz.BytesPerChunk
|
||||
}
|
||||
|
||||
// nextPowerOfTwo computes the next power of two greater than or equal to v.
|
||||
func nextPowerOfTwo(v uint64) uint64 {
|
||||
v--
|
||||
v |= v >> 1
|
||||
v |= v >> 2
|
||||
v |= v >> 4
|
||||
v |= v >> 8
|
||||
v |= v >> 16
|
||||
v++
|
||||
return uint64(v)
|
||||
}
|
||||
|
||||
// getChunkCount returns the number of chunks for the given SSZInfo (equivalent to chunk_count in the spec)
|
||||
func getChunkCount(info *SszInfo) (uint64, error) {
|
||||
switch info.sszType {
|
||||
case Uint8, Uint16, Uint32, Uint64, Boolean:
|
||||
return 1, nil
|
||||
case Container:
|
||||
containerInfo, err := info.ContainerInfo()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return uint64(len(containerInfo.fields)), nil
|
||||
case List:
|
||||
listInfo, err := info.ListInfo()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
elementInfo, err := listInfo.Element()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
elemLength := itemLength(elementInfo)
|
||||
return (listInfo.Limit()*elemLength + 31) / ssz.BytesPerChunk, nil
|
||||
case Vector:
|
||||
vectorInfo, err := info.VectorInfo()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
elementInfo, err := vectorInfo.Element()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
elemLength := itemLength(elementInfo)
|
||||
return (vectorInfo.Length()*elemLength + 31) / ssz.BytesPerChunk, nil
|
||||
case Bitlist:
|
||||
bitlistInfo, err := info.BitlistInfo()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return (bitlistInfo.Limit() + 255) / ssz.BitsPerChunk, nil // Bits are packed into 256-bit chunks
|
||||
case Bitvector:
|
||||
bitvectorInfo, err := info.BitvectorInfo()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return (bitvectorInfo.Length() + 255) / ssz.BitsPerChunk, nil // Bits are packed into 256-bit chunks
|
||||
default:
|
||||
return 0, errors.New("unsupported SSZ type for chunk count calculation")
|
||||
}
|
||||
}
|
||||
@@ -1,370 +0,0 @@
|
||||
package query_test
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/ssz/query"
|
||||
sszquerypb "github.com/OffchainLabs/prysm/v6/proto/ssz_query/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
)
|
||||
|
||||
func TestGetIndicesFromPath_FixedNestedContainer(t *testing.T) {
|
||||
fixedNestedContainer := &sszquerypb.FixedNestedContainer{}
|
||||
|
||||
info, err := query.AnalyzeObject(fixedNestedContainer)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, info, "Expected non-nil SSZ info")
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
path string
|
||||
expectedIndex uint64
|
||||
expectError bool
|
||||
errorMessage string
|
||||
}{
|
||||
{
|
||||
name: "Value1 field",
|
||||
path: ".value1",
|
||||
expectedIndex: 2,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "Value3 field",
|
||||
path: ".value3",
|
||||
expectError: true,
|
||||
errorMessage: "field value3 not found",
|
||||
},
|
||||
{
|
||||
name: "Basic field cannot descend",
|
||||
path: "value1.value1",
|
||||
expectError: true,
|
||||
errorMessage: "indexing requires a container field step first, got Uint64",
|
||||
},
|
||||
{
|
||||
name: "Indexing without container step",
|
||||
path: "value2.value2[0]",
|
||||
expectError: true,
|
||||
errorMessage: "indexing requires a container field step first",
|
||||
},
|
||||
{
|
||||
name: "Value2 field",
|
||||
path: "value2",
|
||||
expectedIndex: 3,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "Value2 -> element[0]",
|
||||
path: "value2[0]",
|
||||
expectedIndex: 3,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "Value2 -> element[31]",
|
||||
path: "value2[31]",
|
||||
expectedIndex: 3,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "Empty path error",
|
||||
path: "",
|
||||
expectError: true,
|
||||
errorMessage: "empty path",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
provingFields, err := query.ParsePath(tc.path)
|
||||
require.NoError(t, err)
|
||||
|
||||
actualIndex, err := query.GetGeneralizedIndexFromPath(info, provingFields)
|
||||
if tc.expectError {
|
||||
require.NotNil(t, err)
|
||||
if tc.errorMessage != "" {
|
||||
if !strings.Contains(err.Error(), tc.errorMessage) {
|
||||
t.Errorf("Expected error message to contain '%s', but got: %s", tc.errorMessage, err.Error())
|
||||
}
|
||||
}
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.expectedIndex, actualIndex, "Generalized index mismatch for path: %s", tc.path)
|
||||
t.Logf("Path: %s -> Generalized Index: %v", tc.path, actualIndex)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetIndicesFromPath_VariableTestContainer(t *testing.T) {
|
||||
testSpec := &sszquerypb.VariableTestContainer{}
|
||||
info, err := query.AnalyzeObject(testSpec)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, info, "Expected non-nil SSZ info")
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
path string
|
||||
expectedIndex uint64
|
||||
expectError bool
|
||||
errorMessage string
|
||||
}{
|
||||
{
|
||||
name: "leading_field",
|
||||
path: "leading_field",
|
||||
expectedIndex: 16,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "field_list_uint64",
|
||||
path: "field_list_uint64",
|
||||
expectedIndex: 17,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "len(field_list_uint64)",
|
||||
path: "len(field_list_uint64)",
|
||||
expectedIndex: 35,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "field_list_uint64[0]",
|
||||
path: "field_list_uint64[0]",
|
||||
expectedIndex: 17408,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "field_list_uint64[2047]",
|
||||
path: "field_list_uint64[2047]",
|
||||
expectedIndex: 17919,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "bitlist_field",
|
||||
path: "bitlist_field",
|
||||
expectedIndex: 22,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "bitlist_field[0]",
|
||||
path: "bitlist_field[0]",
|
||||
expectedIndex: 352,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "bitlist_field[1]",
|
||||
path: "bitlist_field[1]",
|
||||
expectedIndex: 352,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "len(bitlist_field)",
|
||||
path: "len(bitlist_field)",
|
||||
expectedIndex: 45,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "len(trailing_field)",
|
||||
path: "len(trailing_field)",
|
||||
expectError: true,
|
||||
errorMessage: "len() is only supported for List and Bitlist types, got Vector",
|
||||
},
|
||||
{
|
||||
name: "field_list_container[0]",
|
||||
path: "field_list_container[0]",
|
||||
expectedIndex: 4608,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "nested",
|
||||
path: "nested",
|
||||
expectedIndex: 20,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "nested.field_list_uint64[10]",
|
||||
path: "nested.field_list_uint64[10]",
|
||||
expectedIndex: 5186,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "variable_container_list",
|
||||
path: "variable_container_list",
|
||||
expectedIndex: 21,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "len(variable_container_list)",
|
||||
path: "len(variable_container_list)",
|
||||
expectedIndex: 43,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "variable_container_list[0]",
|
||||
path: "variable_container_list[0]",
|
||||
expectedIndex: 672,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "variable_container_list[0].inner_1",
|
||||
path: "variable_container_list[0].inner_1",
|
||||
expectedIndex: 1344,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "variable_container_list[0].inner_1.field_list_uint64[1]",
|
||||
path: "variable_container_list[0].inner_1.field_list_uint64[1]",
|
||||
expectedIndex: 344128,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "variable_container_list[0].inner_1.len(nested_list_field[3])",
|
||||
path: "variable_container_list[0].inner_1.len(nested_list_field[3])",
|
||||
expectError: true,
|
||||
errorMessage: "length calculation error: len() is not supported for multi-dimensional arrays",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
provingFields, err := query.ParsePath(tc.path)
|
||||
require.NoError(t, err)
|
||||
|
||||
actualIndex, err := query.GetGeneralizedIndexFromPath(info, provingFields)
|
||||
|
||||
if tc.expectError {
|
||||
require.NotNil(t, err)
|
||||
if tc.errorMessage != "" {
|
||||
if !strings.Contains(err.Error(), tc.errorMessage) {
|
||||
t.Errorf("Expected error message to contain '%s', but got: %s", tc.errorMessage, err.Error())
|
||||
}
|
||||
}
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.expectedIndex, actualIndex, "Generalized index mismatch for path: %s", tc.path)
|
||||
t.Logf("Path: %s -> Generalized Index: %v", tc.path, actualIndex)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetIndicesFromPath_FixedTestContainer(t *testing.T) {
|
||||
testSpec := &sszquerypb.FixedTestContainer{}
|
||||
info, err := query.AnalyzeObject(testSpec)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, info, "Expected non-nil SSZ info")
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
path string
|
||||
expectedIndex uint64
|
||||
expectError bool
|
||||
errorMessage string
|
||||
}{
|
||||
{
|
||||
name: "field_uint32",
|
||||
path: "field_uint32",
|
||||
expectedIndex: 16,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: ".field_uint64",
|
||||
path: ".field_uint64",
|
||||
expectedIndex: 17,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "field_bool",
|
||||
path: "field_bool",
|
||||
expectedIndex: 18,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "field_bytes32",
|
||||
path: "field_bytes32",
|
||||
expectedIndex: 19,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "nested",
|
||||
path: "nested",
|
||||
expectedIndex: 20,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "vector_field",
|
||||
path: "vector_field",
|
||||
expectedIndex: 21,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "two_dimension_bytes_field",
|
||||
path: "two_dimension_bytes_field",
|
||||
expectedIndex: 22,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "bitvector64_field",
|
||||
path: "bitvector64_field",
|
||||
expectedIndex: 23,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "bitvector512_field",
|
||||
path: "bitvector512_field",
|
||||
expectedIndex: 24,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "bitvector64_field[0]",
|
||||
path: "bitvector64_field[0]",
|
||||
expectedIndex: 23,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "bitvector64_field[63]",
|
||||
path: "bitvector64_field[63]",
|
||||
expectedIndex: 23,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "bitvector512_field[0]",
|
||||
path: "bitvector512_field[0]",
|
||||
expectedIndex: 48,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "bitvector512_field[511]",
|
||||
path: "bitvector512_field[511]",
|
||||
expectedIndex: 49,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "trailing_field",
|
||||
path: "trailing_field",
|
||||
expectedIndex: 25,
|
||||
expectError: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
provingFields, err := query.ParsePath(tc.path)
|
||||
require.NoError(t, err)
|
||||
|
||||
actualIndex, err := query.GetGeneralizedIndexFromPath(info, provingFields)
|
||||
|
||||
if tc.expectError {
|
||||
require.NotNil(t, err)
|
||||
if tc.errorMessage != "" {
|
||||
if !strings.Contains(err.Error(), tc.errorMessage) {
|
||||
t.Errorf("Expected error message to contain '%s', but got: %s", tc.errorMessage, err.Error())
|
||||
}
|
||||
}
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.expectedIndex, actualIndex, "Generalized index mismatch for path: %s", tc.path)
|
||||
t.Logf("Path: %s -> Generalized Index: %v", tc.path, actualIndex)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -3,31 +3,23 @@ package query
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// PathElement represents a single element in a path.
|
||||
type PathElement struct {
|
||||
Length bool
|
||||
Name string
|
||||
Name string
|
||||
// [Optional] Index for List/Vector elements
|
||||
Index *uint64
|
||||
}
|
||||
|
||||
var arrayIndexRegex = regexp.MustCompile(`\[\s*([^\]]+)\s*\]`)
|
||||
|
||||
var lengthRegex = regexp.MustCompile(`^\s*len\s*\(\s*([^)]+?)\s*\)\s*$`)
|
||||
|
||||
// ParsePath parses a raw path string into a slice of PathElements.
|
||||
// note: field names are stored in snake case format. rawPath has to be provided in snake case.
|
||||
// 1. Supports dot notation for field access (e.g., "field1.field2").
|
||||
// 2. Supports array indexing using square brackets (e.g., "array_field[0]").
|
||||
// 3. Supports length access using len() notation (e.g., "len(array_field)").
|
||||
// 4. Handles leading dots and validates path format.
|
||||
func ParsePath(rawPath string) ([]PathElement, error) {
|
||||
// We use dot notation, so we split the path by '.'.
|
||||
rawElements := strings.Split(rawPath, ".")
|
||||
if len(rawElements) == 0 {
|
||||
return nil, errors.New("empty path provided")
|
||||
}
|
||||
|
||||
if rawElements[0] == "" {
|
||||
// Remove leading dot if present
|
||||
@@ -40,74 +32,31 @@ func ParsePath(rawPath string) ([]PathElement, error) {
|
||||
return nil, errors.New("invalid path: consecutive dots or trailing dot")
|
||||
}
|
||||
|
||||
// Processing element string
|
||||
processingField := elem
|
||||
var pathElement PathElement
|
||||
fieldName := elem
|
||||
var index *uint64
|
||||
|
||||
matches := lengthRegex.FindStringSubmatch(processingField)
|
||||
// FindStringSubmatch matches a whole string like "len(field_name)" and its inner expression.
|
||||
// For a path element to be a length query, len(matches) should be 2:
|
||||
// 1. Full match: "len(field_name)"
|
||||
// 2. Inner expression: "field_name"
|
||||
if len(matches) == 2 {
|
||||
pathElement.Length = true
|
||||
// Extract the inner expression between len( and ) and continue parsing on that
|
||||
processingField = matches[1]
|
||||
}
|
||||
// Check for index notation, e.g., "field[0]"
|
||||
if strings.Contains(elem, "[") {
|
||||
parts := strings.SplitN(elem, "[", 2)
|
||||
if len(parts) != 2 {
|
||||
return nil, fmt.Errorf("invalid index notation in path element %s", elem)
|
||||
}
|
||||
|
||||
// Default name is the full working string (may be updated below if it contains indices)
|
||||
pathElement.Name = processingField
|
||||
fieldName = parts[0]
|
||||
indexPart := strings.TrimSuffix(parts[1], "]")
|
||||
if indexPart == "" {
|
||||
return nil, errors.New("index cannot be empty")
|
||||
}
|
||||
|
||||
if strings.Contains(processingField, "[") {
|
||||
// Split into field and indices, e.g., "array[0][1]" -> name:"array", indices:{0,1}
|
||||
pathElement.Name = extractFieldName(processingField)
|
||||
indices, err := extractArrayIndices(processingField)
|
||||
indexValue, err := strconv.ParseUint(indexPart, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("invalid index in path element %s: %w", elem, err)
|
||||
}
|
||||
// Although extractArrayIndices supports multiple indices,
|
||||
// only a single index is supported per PathElement, e.g., "transactions[0]" is valid
|
||||
// while "transactions[0][0]" is rejected explicitly.
|
||||
if len(indices) != 1 {
|
||||
return nil, fmt.Errorf("multiple indices not supported in token %s", processingField)
|
||||
}
|
||||
pathElement.Index = &indices[0]
|
||||
|
||||
index = &indexValue
|
||||
}
|
||||
|
||||
path = append(path, pathElement)
|
||||
path = append(path, PathElement{Name: fieldName, Index: index})
|
||||
}
|
||||
|
||||
return path, nil
|
||||
}
|
||||
|
||||
// extractFieldName extracts the field name from a path element name (removes array indices)
|
||||
// For example: "field_name[5]" returns "field_name"
|
||||
func extractFieldName(name string) string {
|
||||
if idx := strings.Index(name, "["); idx != -1 {
|
||||
return name[:idx]
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
// extractArrayIndices returns every bracketed, non-negative index in the name,
|
||||
// e.g. "array[0][1]" -> []uint64{0, 1}. Errors if none are found or if any index is invalid.
|
||||
func extractArrayIndices(name string) ([]uint64, error) {
|
||||
// Match all bracketed content, then we'll parse as unsigned to catch negatives explicitly
|
||||
matches := arrayIndexRegex.FindAllStringSubmatch(name, -1)
|
||||
|
||||
if len(matches) == 0 {
|
||||
return nil, errors.New("no array indices found")
|
||||
}
|
||||
|
||||
indices := make([]uint64, 0, len(matches))
|
||||
for _, m := range matches {
|
||||
raw := strings.TrimSpace(m[1])
|
||||
idx, err := strconv.ParseUint(raw, 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid array index: %w", err)
|
||||
}
|
||||
indices = append(indices, idx)
|
||||
}
|
||||
return indices, nil
|
||||
}
|
||||
|
||||
@@ -7,9 +7,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
)
|
||||
|
||||
// Helper to get pointer to uint64
|
||||
func u64(v uint64) *uint64 { return &v }
|
||||
|
||||
func TestParsePath(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -37,177 +34,6 @@ func TestParsePath(t *testing.T) {
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "simple length path with length field",
|
||||
path: "data.target.len(root)",
|
||||
expected: []query.PathElement{
|
||||
{Name: "data"},
|
||||
{Name: "target"},
|
||||
{Name: "root", Length: true},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "len with top-level identifier",
|
||||
path: "len(data)",
|
||||
expected: []query.PathElement{{Name: "data", Length: true}},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "length with messy whitespace",
|
||||
path: "data.target. \tlen ( root ) ",
|
||||
expected: []query.PathElement{
|
||||
{Name: "data"},
|
||||
{Name: "target"},
|
||||
{Name: "root", Length: true},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "len with numeric index inside argument",
|
||||
path: "data.len(a[10])",
|
||||
expected: []query.PathElement{
|
||||
{Name: "data"},
|
||||
{Name: "a", Length: true, Index: u64(10)},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "array index with spaces",
|
||||
path: "arr[ 42 ]",
|
||||
expected: []query.PathElement{{Name: "arr", Index: u64(42)}},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "array leading zeros",
|
||||
path: "arr[001]",
|
||||
expected: []query.PathElement{{Name: "arr", Index: u64(1)}},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "array max uint64",
|
||||
path: "arr[18446744073709551615]",
|
||||
expected: []query.PathElement{{Name: "arr", Index: u64(18446744073709551615)}},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "len with dotted path inside - no input validation - reverts at a later stage",
|
||||
path: "len(data.target.root)",
|
||||
expected: []query.PathElement{{Name: "len(data", Length: false}, {Name: "target", Length: false}, {Name: "root)", Length: false}},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "len with dotted path then more - no input validation - reverts at a later stage",
|
||||
path: "len(data.target.root).foo",
|
||||
expected: []query.PathElement{{Name: "len(data", Length: false}, {Name: "target", Length: false}, {Name: "root)", Length: false}, {Name: "foo", Length: false}},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "len without closing paren - no input validation - reverts at a later stage",
|
||||
path: "len(root",
|
||||
expected: []query.PathElement{{Name: "len(root"}},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "len with extra closing paren - no input validation - reverts at a later stage",
|
||||
path: "len(root))",
|
||||
expected: []query.PathElement{{Name: "len(root))"}},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "empty len argument - no input validation - reverts at a later stage",
|
||||
path: "len()",
|
||||
expected: []query.PathElement{{Name: "len()"}},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "len with comma-separated args - no input validation - reverts at a later stage",
|
||||
path: "len(a,b)",
|
||||
expected: []query.PathElement{{Name: "a,b", Length: true}},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "len call followed by index (outer) - no input validation - reverts at a later stage",
|
||||
path: "data.len(root)[0]",
|
||||
expected: []query.PathElement{
|
||||
{Name: "data"},
|
||||
{Name: "len(root)", Index: u64(0)},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "cannot provide consecutive dots in raw path",
|
||||
path: "data..target.root",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "cannot provide a negative index in array path",
|
||||
path: ".data.target.root[-1]",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "invalid index in array path",
|
||||
path: ".data.target.root[a]",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "multidimensional array index in path",
|
||||
path: ".data.target.root[0][1]",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "leading double dot",
|
||||
path: "..data",
|
||||
expected: nil,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "trailing dot",
|
||||
path: "data.target.",
|
||||
expected: nil,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "len with inner bracket non-numeric index",
|
||||
path: "data.len(a[b])",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "array empty index",
|
||||
path: "arr[]",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "array hex index",
|
||||
path: "arr[0x10]",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "array missing closing bracket",
|
||||
path: "arr[12",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "array plus sign index",
|
||||
path: "arr[+3]",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "array unicode digits",
|
||||
path: "arr[12]",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "array overflow uint64",
|
||||
path: "arr[18446744073709551616]",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "array index then suffix",
|
||||
path: "field[1]suffix",
|
||||
expected: []query.PathElement{{Name: "field", Index: u64(1)}},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
@@ -215,7 +41,7 @@ func TestParsePath(t *testing.T) {
|
||||
parsedPath, err := query.ParsePath(tt.path)
|
||||
|
||||
if tt.wantErr {
|
||||
require.NotNil(t, err, "Expected error did not occur")
|
||||
require.NotNil(t, err, "Expected error but got none")
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -57,8 +57,3 @@ func (t SSZType) String() string {
|
||||
return fmt.Sprintf("Unknown(%d)", t)
|
||||
}
|
||||
}
|
||||
|
||||
// isBasic returns true if the SSZType is a basic type.
|
||||
func (t SSZType) isBasic() bool {
|
||||
return t == Uint8 || t == Uint16 || t == Uint32 || t == Uint64 || t == Boolean
|
||||
}
|
||||
|
||||
2
go.mod
2
go.mod
@@ -177,7 +177,7 @@ require (
|
||||
github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect
|
||||
github.com/libp2p/go-msgio v0.3.0 // indirect
|
||||
github.com/libp2p/go-nat v0.2.0 // indirect
|
||||
github.com/libp2p/go-netroute v0.3.0 // indirect
|
||||
github.com/libp2p/go-netroute v0.2.2 // indirect
|
||||
github.com/libp2p/go-reuseport v0.4.0 // indirect
|
||||
github.com/libp2p/go-yamux/v4 v4.0.2 // indirect
|
||||
github.com/lunixbochs/vtclean v1.0.0 // indirect
|
||||
|
||||
4
go.sum
4
go.sum
@@ -599,8 +599,8 @@ github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0
|
||||
github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM=
|
||||
github.com/libp2p/go-nat v0.2.0 h1:Tyz+bUFAYqGyJ/ppPPymMGbIgNRH+WqC5QrT5fKrrGk=
|
||||
github.com/libp2p/go-nat v0.2.0/go.mod h1:3MJr+GRpRkyT65EpVPBstXLvOlAPzUVlG6Pwg9ohLJk=
|
||||
github.com/libp2p/go-netroute v0.3.0 h1:nqPCXHmeNmgTJnktosJ/sIef9hvwYCrsLxXmfNks/oc=
|
||||
github.com/libp2p/go-netroute v0.3.0/go.mod h1:Nkd5ShYgSMS5MUKy/MU2T57xFoOKvvLR92Lic48LEyA=
|
||||
github.com/libp2p/go-netroute v0.2.2 h1:Dejd8cQ47Qx2kRABg6lPwknU7+nBnFRpko45/fFPuZ8=
|
||||
github.com/libp2p/go-netroute v0.2.2/go.mod h1:Rntq6jUAH0l9Gg17w5bFGhcC9a+vk4KNXs6s7IljKYE=
|
||||
github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQscQm2s=
|
||||
github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU=
|
||||
github.com/libp2p/go-yamux/v4 v4.0.2 h1:nrLh89LN/LEiqcFiqdKDRHjGstN300C1269K/EX0CPU=
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package util
|
||||
|
||||
import (
|
||||
"slices"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -67,7 +66,13 @@ func assertNoHooks(t *testing.T, logger *logrus.Logger) {
|
||||
func assertRegistered(t *testing.T, logger *logrus.Logger, hook ComparableHook) {
|
||||
for _, lvl := range hook.Levels() {
|
||||
registered := logger.Hooks[lvl]
|
||||
found := slices.ContainsFunc(registered, hook.Equal)
|
||||
found := false
|
||||
for _, h := range registered {
|
||||
if hook.Equal(h) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
require.Equal(t, true, found, "Expected hook %v to be registered at level %s, but it was not", hook, lvl.String())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"errors"
|
||||
"go/ast"
|
||||
"go/token"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode"
|
||||
@@ -179,7 +178,12 @@ func isLoggingCall(call *ast.CallExpr, logFunctions []string, aliases map[string
|
||||
// isCommonLogPackage checks for common logging package names
|
||||
func isCommonLogPackage(pkg string) bool {
|
||||
common := []string{"log", "logrus", "zerolog", "zap", "glog", "klog"}
|
||||
return slices.Contains(common, pkg)
|
||||
for _, c := range common {
|
||||
if pkg == c {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// isFormatFunction checks if this is a format function (ending with 'f')
|
||||
@@ -270,8 +274,10 @@ func isAcceptableStart(firstRune rune, s string) bool {
|
||||
|
||||
// Special characters that are OK to start with
|
||||
acceptableChars := []rune{'%', '$', '/', '\\', '[', '(', '{', '"', '\'', '`', '-'}
|
||||
if slices.Contains(acceptableChars, firstRune) {
|
||||
return true
|
||||
for _, char := range acceptableChars {
|
||||
if firstRune == char {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// URLs/paths are OK
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/network/httputil"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@@ -68,8 +67,7 @@ func (c *beaconApiValidatorClient) proposeAttestationElectra(ctx context.Context
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
consensusVersion := version.String(slots.ToForkVersion(attestation.Data.Slot))
|
||||
headers := map[string]string{"Eth-Consensus-Version": consensusVersion}
|
||||
headers := map[string]string{"Eth-Consensus-Version": version.String(attestation.Version())}
|
||||
if err = c.jsonRestHandler.Post(
|
||||
ctx,
|
||||
"/eth/v2/beacon/pool/attestations",
|
||||
|
||||
@@ -8,14 +8,11 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/network/httputil"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/OffchainLabs/prysm/v6/validator/client/beacon-api/mock"
|
||||
testhelpers "github.com/OffchainLabs/prysm/v6/validator/client/beacon-api/test-helpers"
|
||||
"go.uber.org/mock/gomock"
|
||||
@@ -217,58 +214,36 @@ func TestProposeAttestationFallBack(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestProposeAttestationElectra(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().ElectraForkEpoch = 0
|
||||
params.BeaconConfig().FuluForkEpoch = 1
|
||||
|
||||
buildSingleAttestation := func(slot primitives.Slot) *ethpb.SingleAttestation {
|
||||
targetEpoch := slots.ToEpoch(slot)
|
||||
sourceEpoch := targetEpoch
|
||||
if targetEpoch > 0 {
|
||||
sourceEpoch = targetEpoch - 1
|
||||
}
|
||||
return ðpb.SingleAttestation{
|
||||
AttesterIndex: 74,
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: slot,
|
||||
CommitteeIndex: 76,
|
||||
BeaconBlockRoot: testhelpers.FillByteSlice(32, 38),
|
||||
Source: ðpb.Checkpoint{
|
||||
Epoch: sourceEpoch,
|
||||
Root: testhelpers.FillByteSlice(32, 79),
|
||||
},
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: targetEpoch,
|
||||
Root: testhelpers.FillByteSlice(32, 81),
|
||||
},
|
||||
attestation := ðpb.SingleAttestation{
|
||||
AttesterIndex: 74,
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 75,
|
||||
CommitteeIndex: 76,
|
||||
BeaconBlockRoot: testhelpers.FillByteSlice(32, 38),
|
||||
Source: ðpb.Checkpoint{
|
||||
Epoch: 78,
|
||||
Root: testhelpers.FillByteSlice(32, 79),
|
||||
},
|
||||
Signature: testhelpers.FillByteSlice(96, 82),
|
||||
CommitteeId: 83,
|
||||
}
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: 80,
|
||||
Root: testhelpers.FillByteSlice(32, 81),
|
||||
},
|
||||
},
|
||||
Signature: testhelpers.FillByteSlice(96, 82),
|
||||
CommitteeId: 83,
|
||||
}
|
||||
|
||||
attestationElectra := buildSingleAttestation(0)
|
||||
attestationFulu := buildSingleAttestation(params.BeaconConfig().SlotsPerEpoch)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
attestation *ethpb.SingleAttestation
|
||||
expectedConsensusVersion string
|
||||
expectedErrorMessage string
|
||||
endpointError error
|
||||
endpointCall int
|
||||
name string
|
||||
attestation *ethpb.SingleAttestation
|
||||
expectedErrorMessage string
|
||||
endpointError error
|
||||
endpointCall int
|
||||
}{
|
||||
{
|
||||
name: "valid electra",
|
||||
attestation: attestationElectra,
|
||||
expectedConsensusVersion: version.String(slots.ToForkVersion(attestationElectra.GetData().GetSlot())),
|
||||
endpointCall: 1,
|
||||
},
|
||||
{
|
||||
name: "valid fulu consensus version",
|
||||
attestation: attestationFulu,
|
||||
expectedConsensusVersion: version.String(slots.ToForkVersion(attestationFulu.GetData().GetSlot())),
|
||||
endpointCall: 1,
|
||||
name: "valid",
|
||||
attestation: attestation,
|
||||
endpointCall: 1,
|
||||
},
|
||||
{
|
||||
name: "nil attestation",
|
||||
@@ -308,11 +283,8 @@ func TestProposeAttestationElectra(t *testing.T) {
|
||||
expectedErrorMessage: "attestation's target can't be nil",
|
||||
},
|
||||
{
|
||||
name: "bad request",
|
||||
attestation: attestationElectra,
|
||||
expectedConsensusVersion: version.String(
|
||||
slots.ToForkVersion(attestationElectra.GetData().GetSlot()),
|
||||
),
|
||||
name: "bad request",
|
||||
attestation: attestation,
|
||||
expectedErrorMessage: "bad request",
|
||||
endpointError: errors.New("bad request"),
|
||||
endpointCall: 1,
|
||||
@@ -332,14 +304,11 @@ func TestProposeAttestationElectra(t *testing.T) {
|
||||
}
|
||||
|
||||
ctx := t.Context()
|
||||
headerMatcher := gomock.Any()
|
||||
if test.expectedConsensusVersion != "" {
|
||||
headerMatcher = gomock.Eq(map[string]string{"Eth-Consensus-Version": test.expectedConsensusVersion})
|
||||
}
|
||||
headers := map[string]string{"Eth-Consensus-Version": version.String(test.attestation.Version())}
|
||||
jsonRestHandler.EXPECT().Post(
|
||||
gomock.Any(),
|
||||
"/eth/v2/beacon/pool/attestations",
|
||||
headerMatcher,
|
||||
headers,
|
||||
bytes.NewBuffer(marshalledAttestations),
|
||||
nil,
|
||||
).Return(
|
||||
@@ -356,7 +325,7 @@ func TestProposeAttestationElectra(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, proposeResponse)
|
||||
|
||||
expectedAttestationDataRoot, err := test.attestation.Data.HashTreeRoot()
|
||||
expectedAttestationDataRoot, err := attestation.Data.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Make sure that the attestation data root is set
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/network/httputil"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@@ -55,9 +54,7 @@ func (c *beaconApiValidatorClient) submitSignedAggregateSelectionProofElectra(ct
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to marshal SignedAggregateAttestationAndProofElectra")
|
||||
}
|
||||
dataSlot := in.SignedAggregateAndProof.Message.Aggregate.Data.Slot
|
||||
consensusVersion := version.String(slots.ToForkVersion(dataSlot))
|
||||
headers := map[string]string{"Eth-Consensus-Version": consensusVersion}
|
||||
headers := map[string]string{"Eth-Consensus-Version": version.String(in.SignedAggregateAndProof.Version())}
|
||||
if err = c.jsonRestHandler.Post(ctx, "/eth/v2/validator/aggregate_and_proofs", headers, bytes.NewBuffer(body), nil); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -7,13 +7,11 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/network/httputil"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/OffchainLabs/prysm/v6/validator/client/beacon-api/mock"
|
||||
testhelpers "github.com/OffchainLabs/prysm/v6/validator/client/beacon-api/test-helpers"
|
||||
"github.com/pkg/errors"
|
||||
@@ -125,10 +123,6 @@ func TestSubmitSignedAggregateSelectionProof_Fallback(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSubmitSignedAggregateSelectionProofElectra_Valid(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().ElectraForkEpoch = 0
|
||||
params.BeaconConfig().FuluForkEpoch = 100
|
||||
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
@@ -137,8 +131,7 @@ func TestSubmitSignedAggregateSelectionProofElectra_Valid(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := t.Context()
|
||||
expectedVersion := version.String(slots.ToForkVersion(signedAggregateAndProofElectra.Message.Aggregate.Data.Slot))
|
||||
headers := map[string]string{"Eth-Consensus-Version": expectedVersion}
|
||||
headers := map[string]string{"Eth-Consensus-Version": version.String(signedAggregateAndProofElectra.Message.Version())}
|
||||
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
|
||||
jsonRestHandler.EXPECT().Post(
|
||||
gomock.Any(),
|
||||
@@ -162,10 +155,6 @@ func TestSubmitSignedAggregateSelectionProofElectra_Valid(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSubmitSignedAggregateSelectionProofElectra_BadRequest(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().ElectraForkEpoch = 0
|
||||
params.BeaconConfig().FuluForkEpoch = 100
|
||||
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
@@ -174,8 +163,7 @@ func TestSubmitSignedAggregateSelectionProofElectra_BadRequest(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := t.Context()
|
||||
expectedVersion := version.String(slots.ToForkVersion(signedAggregateAndProofElectra.Message.Aggregate.Data.Slot))
|
||||
headers := map[string]string{"Eth-Consensus-Version": expectedVersion}
|
||||
headers := map[string]string{"Eth-Consensus-Version": version.String(signedAggregateAndProofElectra.Message.Version())}
|
||||
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
|
||||
jsonRestHandler.EXPECT().Post(
|
||||
gomock.Any(),
|
||||
@@ -194,43 +182,6 @@ func TestSubmitSignedAggregateSelectionProofElectra_BadRequest(t *testing.T) {
|
||||
assert.ErrorContains(t, "bad request", err)
|
||||
}
|
||||
|
||||
func TestSubmitSignedAggregateSelectionProofElectra_FuluVersion(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().ElectraForkEpoch = 0
|
||||
params.BeaconConfig().FuluForkEpoch = 1
|
||||
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
signedAggregateAndProofElectra := generateSignedAggregateAndProofElectraJson()
|
||||
marshalledSignedAggregateSignedAndProofElectra, err := json.Marshal([]*structs.SignedAggregateAttestationAndProofElectra{jsonifySignedAggregateAndProofElectra(signedAggregateAndProofElectra)})
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := t.Context()
|
||||
expectedVersion := version.String(slots.ToForkVersion(signedAggregateAndProofElectra.Message.Aggregate.Data.Slot))
|
||||
headers := map[string]string{"Eth-Consensus-Version": expectedVersion}
|
||||
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
|
||||
jsonRestHandler.EXPECT().Post(
|
||||
gomock.Any(),
|
||||
"/eth/v2/validator/aggregate_and_proofs",
|
||||
headers,
|
||||
bytes.NewBuffer(marshalledSignedAggregateSignedAndProofElectra),
|
||||
nil,
|
||||
).Return(
|
||||
nil,
|
||||
).Times(1)
|
||||
|
||||
attestationDataRoot, err := signedAggregateAndProofElectra.Message.Aggregate.Data.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
validatorClient := &beaconApiValidatorClient{jsonRestHandler: jsonRestHandler}
|
||||
resp, err := validatorClient.submitSignedAggregateSelectionProofElectra(ctx, ðpb.SignedAggregateSubmitElectraRequest{
|
||||
SignedAggregateAndProof: signedAggregateAndProofElectra,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, attestationDataRoot[:], resp.AttestationDataRoot)
|
||||
}
|
||||
|
||||
func generateSignedAggregateAndProofJson() *ethpb.SignedAggregateAttestationAndProof {
|
||||
return ðpb.SignedAggregateAttestationAndProof{
|
||||
Message: ðpb.AggregateAttestationAndProof{
|
||||
|
||||
Reference in New Issue
Block a user