Compare commits

...

22 Commits

Author SHA1 Message Date
terence
38955fd08c Optimize pending attestation processing by adding batching (#15801)
* Optimize pending attestation processing by adding batching

* Update beacon-chain/sync/pending_attestations_queue.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update beacon-chain/sync/pending_attestations_queue.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Add root for debug

* Change it to map

* Dont need receiver

* Use two slices

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2025-10-08 05:18:13 +00:00
kasey
71f05b597f Use NetworkSchedule config to determine max blobs at epoch (#15714)
Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2025-10-08 04:02:05 +00:00
kasey
0d742c6f88 make registerSubscribers idempotent (#15779)
* make registerSubscribers idempotent

* clean up debugging changes

* test fix

* rm unused var

* sobbing noises

* naming feedback and separate test for digestActionDone

* gazelle

* manu's feedback

* refactor to enable immediate sub after init sync

* preston comment re panic causing db corruption risk

* ensure we check that we're 1 epoch past the fork

* manu feedback

---------

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2025-10-08 01:09:22 +00:00
Manu NALEPA
06b5409ff0 When looking for peers, skip peers with error instead of aborting the whole function (#15815)
* `findPeersWithSubnets`: If the `filter` function returns an error for a given peer, log an error and skip the peer instead of aborting the whole function.

* `computeIndicesByRootByPeer`: If the loop returns an error for a given peer, log an error and skip the peer instead of aborting the whole function.

* Add changelog.

---------

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2025-10-07 19:13:08 +00:00
Sahil Sojitra
9805e90d73 chore: refactor to use builtin max/min (#15817)
* passed the tests with inbuilt max func

* tested min changes

* fix bazel files

* added changelog
2025-10-07 19:02:47 +00:00
Manu NALEPA
537f3cb863 StatusV2: Send correct earliest available slot. (#15818)
* `buildStatusFromStream`: Use parent context.

* Status tests: Use `t.Context` everywhere.

* `buildStatusFromStream`: Respond statusV2 only if Fulu is enabled.

Without doing so, earliest available slot is never defined, and then `s.cfg.p2p.EarliestAvailableSlot` will block until the context is canceled.

* Send our real earliest available slot when sending a Status request post Fulu instead of `0`.

* Add changelog.
2025-10-07 18:45:47 +00:00
Manu NALEPA
b45e87abd6 Move some logs to trace (#15816)
* Prettify logs for byRange/byRoot data column sidecar requests.

* Moving byRoot/byRange data column sidecars requests from peers to TRACE level.

* Move "Peer requested blob sidecar by root not found in db" in TRACE.

* Add changelog.

* Fix Kasey's comment.

* Apply Kasey's suggestion.
2025-10-07 18:44:55 +00:00
Preston Van Loon
4c4b12cca7 Clear disconnected peers from connected_libp2p_peers (#15807)
Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2025-10-06 19:59:42 +00:00
james-prysm
aabded250f fixing Origin block root is not set error log (#15808)
* fixing error log

* kasey's suggestion

* kasey's feedback replacing comment
2025-10-06 15:47:27 +00:00
Manu NALEPA
4f9e56fc70 Custody Info: Waits for initialization (#15804)
* Revert "`createLocalNode`: Wait before retrying to retrieve the custody group count if not present. (#15735)"

This reverts commit 4585cdc932.

* Revert "Fix no custody info available at start (#15732)"

This reverts commit 80eba4e6dd.

* Add context to `EarliestAvailableSlot` and `CustodyGroupCount` (no functional change).

* Remove double imports.

* `EarliestAvailableSlot` and `CustodyGroupCount`: Wait for custody info to be initialized.
2025-10-06 10:55:48 +00:00
james-prysm
2a86132994 removing old unused configs and hiding prysm specific configs (#15797)
* removing old unused configs and hiding configs to align with other clients

* gaz and removing unneeded from test

* fixing test

* fixing test
2025-10-03 18:32:44 +00:00
kasey
74c47e25a9 exclude unscheduled forks from the schedule (#15799)
* exclude unscheduled forks from the schedule

* update tests that relied on fulu being far future

* don't mess with the version->epoch mapping

* LastFork cheats the tests by filtering by far_future_epoch

---------

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2025-10-03 15:29:27 +00:00
Bastin
28eb1a4c3c add delay before broadcasting lc objects (#15776)
* add delay before broadcasting lc objects

* address commments

* address commments

* reduce test runtime
2025-10-03 13:02:46 +00:00
terence
1f89394727 Return early if there's no block for data column sidecar (#15802) 2025-10-03 02:59:26 +00:00
kasey
bf1095c782 ignore version|digest mismatch if far future (#15798)
* ignore version/digest mismatch if far future

* bonus: this log generates a lot of noise, bump it down to trace

* unit test

---------

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2025-10-02 19:57:54 +00:00
Manu NALEPA
b24fe0d23a requestAndSaveMissingDataColumnSidecars: Fix log (#15794) 2025-10-02 16:38:00 +00:00
Bastin
cbe50269de Change LC p2p validation rules (#15783)
* compare incoming lc message with locally computed object

* fix logs

* add comment
2025-10-02 15:04:33 +00:00
Manu NALEPA
4ed2953fcf inclusionProofKey: Include the commitments in the key. (#15795)
* `inclusionProofKey`: Include the commitments in the key.

* Fix Potuz's comment.

* Update beacon-chain/verification/data_column.go

Co-authored-by: Potuz <potuz@prysmaticlabs.com>

* Fix Potuz's comment.

---------

Co-authored-by: Potuz <potuz@prysmaticlabs.com>
2025-10-02 13:17:11 +00:00
Potuz
915837d059 Process pending on block (#15791)
* Process pending attestations with block insertion

* fix tests

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>

* Terence's review

---------

Co-authored-by: Claude <noreply@anthropic.com>
2025-10-02 00:19:04 +00:00
Potuz
26b276660f Avoid unnecessary calls to ExitInformation() (#15764)
* Avoid unnecessary calls to ExitInformation()

ExitInformation runs a loop over the whole validator set. This is needed
in case that there are slashings or exits to be processed in a block (we
could be caching or avoid this entirely post-Electra though). This PR
removes these calls on normal state transition to this function. h/t to
@terencechain for finding out this bug.

In addition, on processing withdrawal requests and registry updates, we
kept recomputing the exit information at the same time that the state is
updated and the function that updates the state already takes care of
tracking and updating the right exit information. So this PR removes the
calls to compute this exit information on a loop. Notice that this bug
has been present even before we had a function `ExitInformation()` so I
will document here to help the reviewer

Our previous behavior is to do this in a loop

```
st, err = validators.InitiateValidatorExit(ctx, st, vIdx, validators.ExitInformation(st))
```

This is a bit problematic since `ExitInformation` loops over the whole validator set to compute the exit information (and the total active balance) and then the function `InitiateValidatorExit` actually recomputes the total active balance looping again over the whole validator set and overwriting the pointer returned by `ExitInformation`.

On the other hand, the funciton `InitiateValidatorExit` does mutate the state `st` itself. So each call to `ExitInformation(st)` may actually return a different pointer.

The function ExitInformation computes as follows

```
	err := s.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
		e := val.ExitEpoch()
		if e != farFutureEpoch {
			if e > exitInfo.HighestExitEpoch {
				exitInfo.HighestExitEpoch = e
				exitInfo.Churn = 1
			} else if e == exitInfo.HighestExitEpoch {
				exitInfo.Churn++
			}
```

So it simply increases the churn for each validator that has epoch equal to the highest exit epoch.

The function `InitiateValidatorExit` mutates this pointer in the following way

if the state is post-electra, it disregards completely this pointer and computes the highest exit epoch and updates churn inconditionally, so the pointer `exitInfo.HighestExitEpoch` will always have the right value and is not even neded to be computed before. We could even avoid the fist loop even. If the state is pre-Electra then the function itself updates correctly the exit info for the next iteration.

* Only care about exits pre-Electra

* Update beacon-chain/core/transition/transition_no_verify_sig.go

Co-authored-by: terence <terence@prysmaticlabs.com>

* Radek's review

---------

Co-authored-by: terence <terence@prysmaticlabs.com>
2025-10-02 00:17:39 +00:00
james-prysm
580509f2f4 attempting to improve duties v2 (#15784)
* attempting to improve duties v2

* removing go routine

* changelog

* unnessesary variable

* fixing test

* small optimization existing early on  CommitteeAssignments function

* fixing small bug

* fixes performance issues with duties v2

* fixed changelog

* gofmt
2025-10-01 20:40:14 +00:00
Potuz
be144da099 fix test race conditions (#15792)
Fix race condition where svc.verifierWaiter was being set after
svc.Start() was already running, causing nil pointer dereference.
2025-10-01 19:29:20 +00:00
176 changed files with 2835 additions and 2083 deletions

View File

@@ -59,6 +59,7 @@ go_test(
"//runtime/version:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",

View File

@@ -12,6 +12,7 @@ import (
"github.com/OffchainLabs/prysm/v6/api"
"github.com/OffchainLabs/prysm/v6/api/server/structs"
"github.com/OffchainLabs/prysm/v6/testing/util"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
@@ -170,8 +171,11 @@ func TestClient_RegisterValidator(t *testing.T) {
func TestClient_GetHeader(t *testing.T) {
ctx := t.Context()
expectedPath := "/eth/v1/builder/header/23/0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2/0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"
var slot primitives.Slot = 23
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
es := util.SlotAtEpoch(t, params.BeaconConfig().ElectraForkEpoch)
expectedPath := "/eth/v1/builder/header/%d/0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2/0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"
expectedPath = fmt.Sprintf(expectedPath, ds)
var slot primitives.Slot = ds
parentHash := ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
pubkey := ezDecode(t, "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a")
t.Run("server error", func(t *testing.T) {
@@ -533,7 +537,7 @@ func TestClient_GetHeader(t *testing.T) {
require.Equal(t, expectedPath, r.URL.Path)
epr := &ExecHeaderResponseElectra{}
require.NoError(t, json.Unmarshal([]byte(testExampleHeaderResponseElectra), epr))
pro, err := epr.ToProto(100)
pro, err := epr.ToProto(es)
require.NoError(t, err)
ssz, err := pro.MarshalSSZ()
require.NoError(t, err)

View File

@@ -16,7 +16,6 @@ import (
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/math"
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
ethpbv1 "github.com/OffchainLabs/prysm/v6/proto/eth/v1"
"github.com/OffchainLabs/prysm/v6/runtime/version"
@@ -108,7 +107,7 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
commonRoot = params.BeaconConfig().ZeroHash
}
dis := headSlot + newHeadSlot - 2*forkSlot
dep := math.Max(uint64(headSlot-forkSlot), uint64(newHeadSlot-forkSlot))
dep := max(uint64(headSlot-forkSlot), uint64(newHeadSlot-forkSlot))
oldWeight, err := s.cfg.ForkChoiceStore.Weight(oldHeadRoot)
if err != nil {
log.WithField("root", fmt.Sprintf("%#x", oldHeadRoot)).Warn("Could not determine node weight")
@@ -135,7 +134,7 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
Type: statefeed.Reorg,
Data: &ethpbv1.EventChainReorg{
Slot: newHeadSlot,
Depth: math.Max(uint64(headSlot-forkSlot), uint64(newHeadSlot-forkSlot)),
Depth: max(uint64(headSlot-forkSlot), uint64(newHeadSlot-forkSlot)),
OldHeadBlock: oldHeadRoot[:],
NewHeadBlock: newHeadRoot[:],
OldHeadState: oldStateRoot[:],

View File

@@ -712,7 +712,7 @@ func (s *Service) areDataColumnsAvailable(
nodeID := s.cfg.P2P.NodeID()
// Get the custody group sampling size for the node.
custodyGroupCount, err := s.cfg.P2P.CustodyGroupCount()
custodyGroupCount, err := s.cfg.P2P.CustodyGroupCount(ctx)
if err != nil {
return errors.Wrap(err, "custody group count")
}

View File

@@ -2413,6 +2413,8 @@ func driftGenesisTime(s *Service, slot primitives.Slot, delay time.Duration) {
}
func TestMissingBlobIndices(t *testing.T) {
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
maxBlobs := params.BeaconConfig().MaxBlobsPerBlock(ds)
cases := []struct {
name string
expected [][]byte
@@ -2426,23 +2428,23 @@ func TestMissingBlobIndices(t *testing.T) {
},
{
name: "expected exceeds max",
expected: fakeCommitments(params.BeaconConfig().MaxBlobsPerBlock(0) + 1),
expected: fakeCommitments(maxBlobs + 1),
err: errMaxBlobsExceeded,
},
{
name: "first missing",
expected: fakeCommitments(params.BeaconConfig().MaxBlobsPerBlock(0)),
expected: fakeCommitments(maxBlobs),
present: []uint64{1, 2, 3, 4, 5},
result: fakeResult([]uint64{0}),
},
{
name: "all missing",
expected: fakeCommitments(params.BeaconConfig().MaxBlobsPerBlock(0)),
expected: fakeCommitments(maxBlobs),
result: fakeResult([]uint64{0, 1, 2, 3, 4, 5}),
},
{
name: "none missing",
expected: fakeCommitments(params.BeaconConfig().MaxBlobsPerBlock(0)),
expected: fakeCommitments(maxBlobs),
present: []uint64{0, 1, 2, 3, 4, 5},
result: fakeResult([]uint64{}),
},
@@ -2475,8 +2477,8 @@ func TestMissingBlobIndices(t *testing.T) {
for _, c := range cases {
bm, bs := filesystem.NewEphemeralBlobStorageWithMocker(t)
t.Run(c.name, func(t *testing.T) {
require.NoError(t, bm.CreateFakeIndices(c.root, 0, c.present...))
missing, err := missingBlobIndices(bs, c.root, c.expected, 0)
require.NoError(t, bm.CreateFakeIndices(c.root, ds, c.present...))
missing, err := missingBlobIndices(bs, c.root, c.expected, ds)
if c.err != nil {
require.ErrorIs(t, err, c.err)
return
@@ -2904,22 +2906,21 @@ type testIsAvailableParams struct {
columnsToSave []uint64
}
func testIsAvailableSetup(t *testing.T, params testIsAvailableParams) (context.Context, context.CancelFunc, *Service, [fieldparams.RootLength]byte, interfaces.SignedBeaconBlock) {
func testIsAvailableSetup(t *testing.T, p testIsAvailableParams) (context.Context, context.CancelFunc, *Service, [fieldparams.RootLength]byte, interfaces.SignedBeaconBlock) {
ctx, cancel := context.WithCancel(t.Context())
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
options := append(params.options, WithDataColumnStorage(dataColumnStorage))
options := append(p.options, WithDataColumnStorage(dataColumnStorage))
service, _ := minimalTestService(t, options...)
fs := util.SlotAtEpoch(t, params.BeaconConfig().FuluForkEpoch)
genesisState, secretKeys := util.DeterministicGenesisStateElectra(t, 32 /*validator count*/)
err := service.saveGenesisData(ctx, genesisState)
require.NoError(t, err)
genesisState, secretKeys := util.DeterministicGenesisStateElectra(t, 32, util.WithElectraStateSlot(fs))
require.NoError(t, service.saveGenesisData(ctx, genesisState))
conf := util.DefaultBlockGenConfig()
conf.NumBlobKzgCommitments = params.blobKzgCommitmentsCount
conf.NumBlobKzgCommitments = p.blobKzgCommitmentsCount
signedBeaconBlock, err := util.GenerateFullBlockFulu(genesisState, secretKeys, conf, 10 /*block slot*/)
signedBeaconBlock, err := util.GenerateFullBlockFulu(genesisState, secretKeys, conf, fs+1)
require.NoError(t, err)
block := signedBeaconBlock.Block
@@ -2929,8 +2930,8 @@ func testIsAvailableSetup(t *testing.T, params testIsAvailableParams) (context.C
root, err := block.HashTreeRoot()
require.NoError(t, err)
dataColumnsParams := make([]util.DataColumnParam, 0, len(params.columnsToSave))
for _, i := range params.columnsToSave {
dataColumnsParams := make([]util.DataColumnParam, 0, len(p.columnsToSave))
for _, i := range p.columnsToSave {
dataColumnParam := util.DataColumnParam{
Index: i,
Slot: block.Slot,
@@ -2954,8 +2955,12 @@ func testIsAvailableSetup(t *testing.T, params testIsAvailableParams) (context.C
}
func TestIsDataAvailable(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.AltairForkEpoch, cfg.BellatrixForkEpoch, cfg.CapellaForkEpoch, cfg.DenebForkEpoch, cfg.ElectraForkEpoch, cfg.FuluForkEpoch = 0, 0, 0, 0, 0, 0
params.OverrideBeaconConfig(cfg)
t.Run("Fulu - out of retention window", func(t *testing.T) {
params := testIsAvailableParams{options: []Option{WithGenesisTime(time.Unix(0, 0))}}
params := testIsAvailableParams{}
ctx, _, service, root, signed := testIsAvailableSetup(t, params)
roBlock, err := consensusblocks.NewROBlockWithRoot(signed, root)
@@ -2972,7 +2977,6 @@ func TestIsDataAvailable(t *testing.T) {
err = service.isDataAvailable(ctx, roBlock)
require.NoError(t, err)
})
t.Run("Fulu - more than half of the columns in custody", func(t *testing.T) {
minimumColumnsCountToReconstruct := peerdas.MinimumColumnCountToReconstruct()
indices := make([]uint64, 0, minimumColumnsCountToReconstruct)

View File

@@ -562,8 +562,9 @@ func TestNotifyIndex(t *testing.T) {
var root [32]byte
copy(root[:], "exampleRoot")
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
// Test notifying a new index
bn.notifyIndex(root, 1, 1)
bn.notifyIndex(root, 1, ds)
if !bn.seenIndex[root][1] {
t.Errorf("Index was not marked as seen")
}
@@ -580,7 +581,7 @@ func TestNotifyIndex(t *testing.T) {
}
// Test notifying a new index again
bn.notifyIndex(root, 2, 1)
bn.notifyIndex(root, 2, ds)
if !bn.seenIndex[root][2] {
t.Errorf("Index was not marked as seen")
}

View File

@@ -106,14 +106,14 @@ type mockCustodyManager struct {
custodyGroupCount uint64
}
func (dch *mockCustodyManager) EarliestAvailableSlot() (primitives.Slot, error) {
func (dch *mockCustodyManager) EarliestAvailableSlot(context.Context) (primitives.Slot, error) {
dch.mut.RLock()
defer dch.mut.RUnlock()
return dch.earliestAvailableSlot, nil
}
func (dch *mockCustodyManager) CustodyGroupCount() (uint64, error) {
func (dch *mockCustodyManager) CustodyGroupCount(context.Context) (uint64, error) {
dch.mut.RLock()
defer dch.mut.RUnlock()

View File

@@ -5,7 +5,6 @@ package cache
import (
"context"
"errors"
"math"
"sync"
"time"
@@ -272,7 +271,7 @@ func (c *CommitteeCache) checkInProgress(ctx context.Context, seed [32]byte) err
// for the in progress boolean to flip to false.
time.Sleep(time.Duration(delay) * time.Nanosecond)
delay *= delayFactor
delay = math.Min(delay, maxDelay)
delay = min(delay, maxDelay)
}
return nil
}

View File

@@ -52,7 +52,7 @@ func create(leaves [][32]byte, depth uint64) MerkleTreeNode {
if depth == 0 {
return &LeafNode{hash: leaves[0]}
}
split := math.Min(math.PowerOf2(depth-1), length)
split := min(math.PowerOf2(depth-1), length)
left := create(leaves[0:split], depth-1)
right := create(leaves[split:], depth-1)
return &InnerNode{left: left, right: right}

View File

@@ -2,7 +2,6 @@ package cache
import (
"context"
"math"
"sync"
"time"
@@ -90,7 +89,7 @@ func (c *SkipSlotCache) Get(ctx context.Context, r [32]byte) (state.BeaconState,
// for the in progress boolean to flip to false.
time.Sleep(time.Duration(delay) * time.Nanosecond)
delay *= delayFactor
delay = math.Min(delay, maxDelay)
delay = min(delay, maxDelay)
}
span.SetAttributes(trace.BoolAttribute("inProgress", inProgress))

View File

@@ -16,7 +16,6 @@ import (
"github.com/OffchainLabs/prysm/v6/crypto/bls"
"github.com/OffchainLabs/prysm/v6/crypto/hash"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/math"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/runtime/version"
"github.com/OffchainLabs/prysm/v6/time/slots"
@@ -209,7 +208,7 @@ func IsSyncCommitteeAggregator(sig []byte) (bool, error) {
}
cfg := params.BeaconConfig()
modulo := math.Max(1, cfg.SyncCommitteeSize/cfg.SyncCommitteeSubnetCount/cfg.TargetAggregatorsPerSyncSubcommittee)
modulo := max(1, cfg.SyncCommitteeSize/cfg.SyncCommitteeSubnetCount/cfg.TargetAggregatorsPerSyncSubcommittee)
hashedSig := hash.Hash(sig)
return bytesutil.FromBytes8(hashedSig[:8])%modulo == 0, nil
}

View File

@@ -39,7 +39,6 @@ go_library(
"//crypto/hash:go_default_library",
"//encoding/bytesutil:go_default_library",
"//encoding/ssz:go_default_library",
"//math:go_default_library",
"//monitoring/tracing/trace:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",

View File

@@ -42,6 +42,9 @@ func ProcessAttesterSlashings(
slashings []ethpb.AttSlashing,
exitInfo *validators.ExitInfo,
) (state.BeaconState, error) {
if exitInfo == nil && len(slashings) > 0 {
return nil, errors.New("exit info required to process attester slashings")
}
var err error
for _, slashing := range slashings {
beaconState, err = ProcessAttesterSlashing(ctx, beaconState, slashing, exitInfo)
@@ -59,6 +62,9 @@ func ProcessAttesterSlashing(
slashing ethpb.AttSlashing,
exitInfo *validators.ExitInfo,
) (state.BeaconState, error) {
if exitInfo == nil {
return nil, errors.New("exit info is required to process attester slashing")
}
if err := VerifyAttesterSlashing(ctx, beaconState, slashing); err != nil {
return nil, errors.Wrap(err, "could not verify attester slashing")
}

View File

@@ -11,7 +11,6 @@ import (
"github.com/OffchainLabs/prysm/v6/contracts/deposit"
"github.com/OffchainLabs/prysm/v6/crypto/bls"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/math"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/pkg/errors"
)
@@ -34,7 +33,7 @@ func ActivateValidatorWithEffectiveBalance(beaconState state.BeaconState, deposi
if err != nil {
return nil, err
}
validator.EffectiveBalance = math.Min(balance-balance%params.BeaconConfig().EffectiveBalanceIncrement, params.BeaconConfig().MaxEffectiveBalance)
validator.EffectiveBalance = min(balance-balance%params.BeaconConfig().EffectiveBalanceIncrement, params.BeaconConfig().MaxEffectiveBalance)
if validator.EffectiveBalance ==
params.BeaconConfig().MaxEffectiveBalance {
validator.ActivationEligibilityEpoch = 0

View File

@@ -55,6 +55,9 @@ func ProcessVoluntaryExits(
if len(exits) == 0 {
return beaconState, nil
}
if exitInfo == nil {
return nil, errors.New("exit info required to process voluntary exits")
}
for idx, exit := range exits {
if exit == nil || exit.Exit == nil {
return nil, errors.New("nil voluntary exit in block body")

View File

@@ -51,6 +51,9 @@ func ProcessProposerSlashings(
slashings []*ethpb.ProposerSlashing,
exitInfo *validators.ExitInfo,
) (state.BeaconState, error) {
if exitInfo == nil && len(slashings) > 0 {
return nil, errors.New("exit info required to process proposer slashings")
}
var err error
for _, slashing := range slashings {
beaconState, err = ProcessProposerSlashing(ctx, beaconState, slashing, exitInfo)
@@ -75,6 +78,9 @@ func ProcessProposerSlashing(
if err = VerifyProposerSlashing(beaconState, slashing); err != nil {
return nil, errors.Wrap(err, "could not verify proposer slashing")
}
if exitInfo == nil {
return nil, errors.New("exit info is required to process proposer slashing")
}
beaconState, err = validators.SlashValidator(ctx, beaconState, slashing.Header_1.Header.ProposerIndex, exitInfo)
if err != nil {
return nil, errors.Wrapf(err, "could not slash proposer index %d", slashing.Header_1.Header.ProposerIndex)

View File

@@ -53,9 +53,15 @@ func ProcessOperations(ctx context.Context, st state.BeaconState, block interfac
// 6110 validations are in VerifyOperationLengths
bb := block.Body()
// Electra extends the altair operations.
exitInfo := v.ExitInformation(st)
if err := helpers.UpdateTotalActiveBalanceCache(st, exitInfo.TotalActiveBalance); err != nil {
return nil, errors.Wrap(err, "could not update total active balance cache")
var exitInfo *v.ExitInfo
hasSlashings := len(bb.ProposerSlashings()) > 0 || len(bb.AttesterSlashings()) > 0
hasExits := len(bb.VoluntaryExits()) > 0
if hasSlashings || hasExits {
// ExitInformation is expensive to compute, only do it if we need it.
exitInfo = v.ExitInformation(st)
if err := helpers.UpdateTotalActiveBalanceCache(st, exitInfo.TotalActiveBalance); err != nil {
return nil, errors.Wrap(err, "could not update total active balance cache")
}
}
st, err = ProcessProposerSlashings(ctx, st, bb.ProposerSlashings(), exitInfo)
if err != nil {

View File

@@ -13,6 +13,7 @@ import (
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/runtime/version"
"github.com/OffchainLabs/prysm/v6/time/slots"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/pkg/errors"
@@ -91,6 +92,18 @@ func ProcessWithdrawalRequests(ctx context.Context, st state.BeaconState, wrs []
ctx, span := trace.StartSpan(ctx, "electra.ProcessWithdrawalRequests")
defer span.End()
currentEpoch := slots.ToEpoch(st.Slot())
if len(wrs) == 0 {
return st, nil
}
// It is correct to compute exitInfo once for all withdrawals in the block, as the ExitInfo pointer is
// updated within InitiateValidatorExit which is the only function that uses it.
var exitInfo *validators.ExitInfo
if st.Version() < version.Electra {
exitInfo = validators.ExitInformation(st)
} else {
// After Electra, the function InitiateValidatorExit ignores the exitInfo passed to it and recomputes it anyway.
exitInfo = &validators.ExitInfo{}
}
for _, wr := range wrs {
if wr == nil {
return nil, errors.New("nil execution layer withdrawal request")
@@ -148,7 +161,8 @@ func ProcessWithdrawalRequests(ctx context.Context, st state.BeaconState, wrs []
// Only exit validator if it has no pending withdrawals in the queue
if pendingBalanceToWithdraw == 0 {
var err error
st, err = validators.InitiateValidatorExit(ctx, st, vIdx, validators.ExitInformation(st))
// exitInfo is updated within InitiateValidatorExit
st, err = validators.InitiateValidatorExit(ctx, st, vIdx, exitInfo)
if err != nil {
return nil, err
}

View File

@@ -96,12 +96,17 @@ func ProcessRegistryUpdates(ctx context.Context, st state.BeaconState) (state.Be
}
// Process validators eligible for ejection.
for _, idx := range eligibleForEjection {
// Here is fine to do a quadratic loop since this should
// barely happen
st, err = validators.InitiateValidatorExit(ctx, st, idx, validators.ExitInformation(st))
if err != nil && !errors.Is(err, validators.ErrValidatorAlreadyExited) {
return nil, errors.Wrapf(err, "could not initiate exit for validator %d", idx)
if len(eligibleForEjection) > 0 {
// It is safe to compute exitInfo once for all ejections in the epoch, as the ExitInfo pointer is
// updated within InitiateValidatorExit which is the only function that uses it.
exitInfo := validators.ExitInformation(st)
for _, idx := range eligibleForEjection {
// Here is fine to do a quadratic loop since this should
// barely happen
st, err = validators.InitiateValidatorExit(ctx, st, idx, exitInfo)
if err != nil && !errors.Is(err, validators.ErrValidatorAlreadyExited) {
return nil, errors.Wrapf(err, "could not initiate exit for validator %d", idx)
}
}
}
@@ -228,7 +233,7 @@ func ProcessSlashings(st state.BeaconState) error {
// a callback is used here to apply the following actions to all validators
// below equally.
increment := params.BeaconConfig().EffectiveBalanceIncrement
minSlashing := math.Min(totalSlashing*slashingMultiplier, totalBalance)
minSlashing := min(totalSlashing*slashingMultiplier, totalBalance)
// Modified in Electra:EIP7251
var penaltyPerEffectiveBalanceIncrement uint64

View File

@@ -5,7 +5,6 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/time"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/math"
)
// ProcessSlashingsPrecompute processes the slashed validators during epoch processing.
@@ -21,7 +20,7 @@ func ProcessSlashingsPrecompute(s state.BeaconState, pBal *Balance) error {
totalSlashing += slashing
}
minSlashing := math.Min(totalSlashing*params.BeaconConfig().ProportionalSlashingMultiplier, pBal.ActiveCurrentEpoch)
minSlashing := min(totalSlashing*params.BeaconConfig().ProportionalSlashingMultiplier, pBal.ActiveCurrentEpoch)
epochToWithdraw := currentEpoch + exitLength/2
var hasSlashing bool

View File

@@ -399,7 +399,6 @@ func CommitteeAssignments(ctx context.Context, state state.BeaconState, epoch pr
ctx, span := trace.StartSpan(ctx, "helpers.CommitteeAssignments")
defer span.End()
// Verify if the epoch is valid for assignment based on the provided state.
if err := VerifyAssignmentEpoch(epoch, state); err != nil {
return nil, err
}
@@ -407,12 +406,15 @@ func CommitteeAssignments(ctx context.Context, state state.BeaconState, epoch pr
if err != nil {
return nil, err
}
vals := make(map[primitives.ValidatorIndex]struct{})
// Deduplicate and make set for O(1) membership checks.
vals := make(map[primitives.ValidatorIndex]struct{}, len(validators))
for _, v := range validators {
vals[v] = struct{}{}
}
assignments := make(map[primitives.ValidatorIndex]*CommitteeAssignment)
// Compute committee assignments for each slot in the epoch.
remaining := len(vals)
assignments := make(map[primitives.ValidatorIndex]*CommitteeAssignment, len(vals))
for slot := startSlot; slot < startSlot+params.BeaconConfig().SlotsPerEpoch; slot++ {
committees, err := BeaconCommittees(ctx, state, slot)
if err != nil {
@@ -420,7 +422,7 @@ func CommitteeAssignments(ctx context.Context, state state.BeaconState, epoch pr
}
for j, committee := range committees {
for _, vIndex := range committee {
if _, ok := vals[vIndex]; !ok { // Skip if the validator is not in the provided validators slice.
if _, ok := vals[vIndex]; !ok {
continue
}
if _, ok := assignments[vIndex]; !ok {
@@ -429,6 +431,11 @@ func CommitteeAssignments(ctx context.Context, state state.BeaconState, epoch pr
assignments[vIndex].Committee = committee
assignments[vIndex].AttesterSlot = slot
assignments[vIndex].CommitteeIndex = primitives.CommitteeIndex(j)
delete(vals, vIndex)
remaining--
if remaining == 0 {
return assignments, nil // early exit
}
}
}
}

View File

@@ -79,7 +79,7 @@ func TotalActiveBalance(s state.ReadOnlyBeaconState) (uint64, error) {
}
// Spec defines `EffectiveBalanceIncrement` as min to avoid divisions by zero.
total = mathutil.Max(params.BeaconConfig().EffectiveBalanceIncrement, total)
total = max(params.BeaconConfig().EffectiveBalanceIncrement, total)
if err := balanceCache.AddTotalEffectiveBalance(s, total); err != nil {
return 0, err
}

View File

@@ -14,7 +14,6 @@ import (
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/math"
v1alpha1 "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/time/slots"
)
@@ -95,7 +94,7 @@ func ComputeWeakSubjectivityPeriod(ctx context.Context, st state.ReadOnlyBeaconS
if T*(200+3*D) < t*(200+12*D) {
epochsForValidatorSetChurn := N * (t*(200+12*D) - T*(200+3*D)) / (600 * delta * (2*t + T))
epochsForBalanceTopUps := N * (200 + 3*D) / (600 * Delta)
wsp += math.Max(epochsForValidatorSetChurn, epochsForBalanceTopUps)
wsp += max(epochsForValidatorSetChurn, epochsForBalanceTopUps)
} else {
wsp += 3 * N * D * t / (200 * Delta * (T - t))
}

View File

@@ -125,11 +125,12 @@ func TestReconstructDataColumnSidecars(t *testing.T) {
}
func TestReconstructBlobs(t *testing.T) {
// Start the trusted setup.
err := kzg.Start()
require.NoError(t, err)
params.SetupTestConfigCleanup(t)
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096*2
require.NoError(t, kzg.Start())
var emptyBlock blocks.ROBlock
fs := util.SlotAtEpoch(t, params.BeaconConfig().FuluForkEpoch)
t.Run("no index", func(t *testing.T) {
actual, err := peerdas.ReconstructBlobs(emptyBlock, nil, nil)
@@ -190,10 +191,10 @@ func TestReconstructBlobs(t *testing.T) {
})
t.Run("not committed to the same block", func(t *testing.T) {
_, _, verifiedRoSidecars := util.GenerateTestFuluBlockWithSidecars(t, 3, util.WithParentRoot([fieldparams.RootLength]byte{1}))
roBlock, _, _ := util.GenerateTestFuluBlockWithSidecars(t, 3, util.WithParentRoot([fieldparams.RootLength]byte{2}))
_, _, verifiedRoSidecars := util.GenerateTestFuluBlockWithSidecars(t, 3, util.WithParentRoot([fieldparams.RootLength]byte{1}), util.WithSlot(fs))
roBlock, _, _ := util.GenerateTestFuluBlockWithSidecars(t, 3, util.WithParentRoot([fieldparams.RootLength]byte{2}), util.WithSlot(fs))
_, err = peerdas.ReconstructBlobs(roBlock, verifiedRoSidecars, []int{0})
_, err := peerdas.ReconstructBlobs(roBlock, verifiedRoSidecars, []int{0})
require.ErrorContains(t, peerdas.ErrRootMismatch.Error(), err)
})

View File

@@ -16,61 +16,60 @@ func TestDataColumnsAlignWithBlock(t *testing.T) {
err := kzg.Start()
require.NoError(t, err)
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096*2
fs := util.SlotAtEpoch(t, params.BeaconConfig().ElectraForkEpoch)
require.NoError(t, err)
fuluMax := params.BeaconConfig().MaxBlobsPerBlock(fs)
t.Run("pre fulu", func(t *testing.T) {
block, _ := util.GenerateTestElectraBlockWithSidecar(t, [fieldparams.RootLength]byte{}, 0, 0)
block, _ := util.GenerateTestElectraBlockWithSidecar(t, [fieldparams.RootLength]byte{}, fs, 0)
err := peerdas.DataColumnsAlignWithBlock(block, nil)
require.NoError(t, err)
})
t.Run("too many commitmnets", func(t *testing.T) {
params.SetupTestConfigCleanup(t)
config := params.BeaconConfig()
config.BlobSchedule = []params.BlobScheduleEntry{{}}
params.OverrideBeaconConfig(config)
block, _, _ := util.GenerateTestFuluBlockWithSidecars(t, 3)
t.Run("too many commitments", func(t *testing.T) {
block, _, _ := util.GenerateTestFuluBlockWithSidecars(t, fuluMax+1, util.WithSlot(fs))
err := peerdas.DataColumnsAlignWithBlock(block, nil)
require.ErrorIs(t, err, peerdas.ErrTooManyCommitments)
})
t.Run("root mismatch", func(t *testing.T) {
_, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2)
block, _, _ := util.GenerateTestFuluBlockWithSidecars(t, 0)
_, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2, util.WithSlot(fs))
block, _, _ := util.GenerateTestFuluBlockWithSidecars(t, 0, util.WithSlot(fs))
err := peerdas.DataColumnsAlignWithBlock(block, sidecars)
require.ErrorIs(t, err, peerdas.ErrRootMismatch)
})
t.Run("column size mismatch", func(t *testing.T) {
block, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2)
block, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2, util.WithSlot(fs))
sidecars[0].Column = [][]byte{}
err := peerdas.DataColumnsAlignWithBlock(block, sidecars)
require.ErrorIs(t, err, peerdas.ErrBlockColumnSizeMismatch)
})
t.Run("KZG commitments size mismatch", func(t *testing.T) {
block, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2)
block, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2, util.WithSlot(fs))
sidecars[0].KzgCommitments = [][]byte{}
err := peerdas.DataColumnsAlignWithBlock(block, sidecars)
require.ErrorIs(t, err, peerdas.ErrBlockColumnSizeMismatch)
})
t.Run("KZG proofs mismatch", func(t *testing.T) {
block, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2)
block, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2, util.WithSlot(fs))
sidecars[0].KzgProofs = [][]byte{}
err := peerdas.DataColumnsAlignWithBlock(block, sidecars)
require.ErrorIs(t, err, peerdas.ErrBlockColumnSizeMismatch)
})
t.Run("commitment mismatch", func(t *testing.T) {
block, _, _ := util.GenerateTestFuluBlockWithSidecars(t, 2)
_, alteredSidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2)
block, _, _ := util.GenerateTestFuluBlockWithSidecars(t, 2, util.WithSlot(fs))
_, alteredSidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2, util.WithSlot(fs))
alteredSidecars[1].KzgCommitments[0][0]++ // Overflow is OK
err := peerdas.DataColumnsAlignWithBlock(block, alteredSidecars)
require.ErrorIs(t, err, peerdas.ErrCommitmentMismatch)
})
t.Run("nominal", func(t *testing.T) {
block, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2)
block, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2, util.WithSlot(fs))
err := peerdas.DataColumnsAlignWithBlock(block, sidecars)
require.NoError(t, err)
})

View File

@@ -10,6 +10,7 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/electra"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition/interop"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/validators"
v "github.com/OffchainLabs/prysm/v6/beacon-chain/core/validators"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
@@ -378,9 +379,16 @@ func ProcessBlockForStateRoot(
func altairOperations(ctx context.Context, st state.BeaconState, beaconBlock interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
var err error
exitInfo := v.ExitInformation(st)
if err := helpers.UpdateTotalActiveBalanceCache(st, exitInfo.TotalActiveBalance); err != nil {
return nil, errors.Wrap(err, "could not update total active balance cache")
hasSlashings := len(beaconBlock.Body().ProposerSlashings()) > 0 || len(beaconBlock.Body().AttesterSlashings()) > 0
// exitInfo is only needed for voluntary exits pre Electra.
hasExits := st.Version() < version.Electra && len(beaconBlock.Body().VoluntaryExits()) > 0
exitInfo := &validators.ExitInfo{}
if hasSlashings || hasExits {
// ExitInformation is expensive to compute, only do it if we need it.
exitInfo = v.ExitInformation(st)
if err := helpers.UpdateTotalActiveBalanceCache(st, exitInfo.TotalActiveBalance); err != nil {
return nil, errors.Wrap(err, "could not update total active balance cache")
}
}
st, err = b.ProcessProposerSlashings(ctx, st, beaconBlock.Body().ProposerSlashings(), exitInfo)
if err != nil {
@@ -407,10 +415,15 @@ func altairOperations(ctx context.Context, st state.BeaconState, beaconBlock int
// This calls phase 0 block operations.
func phase0Operations(ctx context.Context, st state.BeaconState, beaconBlock interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
var err error
exitInfo := v.ExitInformation(st)
if err := helpers.UpdateTotalActiveBalanceCache(st, exitInfo.TotalActiveBalance); err != nil {
return nil, errors.Wrap(err, "could not update total active balance cache")
hasSlashings := len(beaconBlock.Body().ProposerSlashings()) > 0 || len(beaconBlock.Body().AttesterSlashings()) > 0
hasExits := len(beaconBlock.Body().VoluntaryExits()) > 0
var exitInfo *v.ExitInfo
if hasSlashings || hasExits {
// ExitInformation is expensive to compute, only do it if we need it.
exitInfo = v.ExitInformation(st)
if err := helpers.UpdateTotalActiveBalanceCache(st, exitInfo.TotalActiveBalance); err != nil {
return nil, errors.Wrap(err, "could not update total active balance cache")
}
}
st, err = b.ProcessProposerSlashings(ctx, st, beaconBlock.Body().ProposerSlashings(), exitInfo)
if err != nil {

View File

@@ -13,7 +13,6 @@ import (
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/math"
mathutil "github.com/OffchainLabs/prysm/v6/math"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/runtime/version"
"github.com/OffchainLabs/prysm/v6/time/slots"
@@ -60,7 +59,7 @@ func ExitInformation(s state.BeaconState) *ExitInfo {
_ = err
// Apply minimum balance as per spec
exitInfo.TotalActiveBalance = mathutil.Max(params.BeaconConfig().EffectiveBalanceIncrement, totalActiveBalance)
exitInfo.TotalActiveBalance = max(params.BeaconConfig().EffectiveBalanceIncrement, totalActiveBalance)
return exitInfo
}
@@ -98,7 +97,9 @@ func InitiateValidatorExit(
if validator.ExitEpoch != params.BeaconConfig().FarFutureEpoch {
return s, ErrValidatorAlreadyExited
}
if exitInfo == nil {
return nil, errors.New("exit info is required to process validator exit")
}
// Compute exit queue epoch.
if s.Version() < version.Electra {
if err = initiateValidatorExitPreElectra(ctx, s, exitInfo); err != nil {
@@ -177,6 +178,9 @@ func initiateValidatorExitPreElectra(ctx context.Context, s state.BeaconState, e
// if exit_queue_churn >= get_validator_churn_limit(state):
// exit_queue_epoch += Epoch(1)
exitableEpoch := helpers.ActivationExitEpoch(time.CurrentEpoch(s))
if exitInfo == nil {
return errors.New("exit info is required to process validator exit")
}
if exitableEpoch > exitInfo.HighestExitEpoch {
exitInfo.HighestExitEpoch = exitableEpoch
exitInfo.Churn = 0
@@ -235,7 +239,9 @@ func SlashValidator(
exitInfo *ExitInfo,
) (state.BeaconState, error) {
var err error
if exitInfo == nil {
return nil, errors.New("exit info is required to slash validator")
}
s, err = InitiateValidatorExitForTotalBal(ctx, s, slashedIdx, exitInfo, primitives.Gwei(exitInfo.TotalActiveBalance))
if err != nil && !errors.Is(err, ErrValidatorAlreadyExited) {
return nil, errors.Wrapf(err, "could not initiate validator %d exit", slashedIdx)

View File

@@ -18,13 +18,16 @@ import (
)
func Test_commitmentsToCheck(t *testing.T) {
params.SetupTestConfigCleanup(t)
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096*2
fulu := primitives.Slot(params.BeaconConfig().FuluForkEpoch) * params.BeaconConfig().SlotsPerEpoch
windowSlots, err := slots.EpochEnd(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
require.NoError(t, err)
commits := [][]byte{
bytesutil.PadTo([]byte("a"), 48),
bytesutil.PadTo([]byte("b"), 48),
bytesutil.PadTo([]byte("c"), 48),
bytesutil.PadTo([]byte("d"), 48),
windowSlots = windowSlots + primitives.Slot(params.BeaconConfig().FuluForkEpoch)
maxBlobs := params.LastNetworkScheduleEntry().MaxBlobsPerBlock
commits := make([][]byte, maxBlobs+1)
for i := 0; i < len(commits); i++ {
commits[i] = bytesutil.PadTo([]byte{byte(i)}, 48)
}
cases := []struct {
name string
@@ -47,41 +50,40 @@ func Test_commitmentsToCheck(t *testing.T) {
{
name: "commitments within da",
block: func(t *testing.T) blocks.ROBlock {
d := util.NewBeaconBlockDeneb()
d.Block.Body.BlobKzgCommitments = commits
d.Block.Slot = 100
d := util.NewBeaconBlockFulu()
d.Block.Body.BlobKzgCommitments = commits[:maxBlobs]
d.Block.Slot = fulu + 100
sb, err := blocks.NewSignedBeaconBlock(d)
require.NoError(t, err)
rb, err := blocks.NewROBlock(sb)
require.NoError(t, err)
return rb
},
commits: commits,
slot: 100,
commits: commits[:maxBlobs],
slot: fulu + 100,
},
{
name: "commitments outside da",
block: func(t *testing.T) blocks.ROBlock {
d := util.NewBeaconBlockDeneb()
d := util.NewBeaconBlockFulu()
d.Block.Slot = fulu
// block is from slot 0, "current slot" is window size +1 (so outside the window)
d.Block.Body.BlobKzgCommitments = commits
d.Block.Body.BlobKzgCommitments = commits[:maxBlobs]
sb, err := blocks.NewSignedBeaconBlock(d)
require.NoError(t, err)
rb, err := blocks.NewROBlock(sb)
require.NoError(t, err)
return rb
},
slot: windowSlots + 1,
slot: fulu + windowSlots + 1,
},
{
name: "excessive commitments",
block: func(t *testing.T) blocks.ROBlock {
d := util.NewBeaconBlockDeneb()
d.Block.Slot = 100
d := util.NewBeaconBlockFulu()
d.Block.Slot = fulu + 100
// block is from slot 0, "current slot" is window size +1 (so outside the window)
d.Block.Body.BlobKzgCommitments = commits
// Double the number of commitments, assert that this is over the limit
d.Block.Body.BlobKzgCommitments = append(commits, d.Block.Body.BlobKzgCommitments...)
sb, err := blocks.NewSignedBeaconBlock(d)
require.NoError(t, err)
rb, err := blocks.NewROBlock(sb)
@@ -115,67 +117,69 @@ func Test_commitmentsToCheck(t *testing.T) {
func TestLazilyPersistent_Missing(t *testing.T) {
ctx := t.Context()
store := filesystem.NewEphemeralBlobStorage(t)
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
blk, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 3)
blk, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, ds, 3)
mbv := &mockBlobBatchVerifier{t: t, scs: blobSidecars}
as := NewLazilyPersistentStore(store, mbv)
// Only one commitment persisted, should return error with other indices
require.NoError(t, as.Persist(1, blobSidecars[2]))
err := as.IsDataAvailable(ctx, 1, blk)
require.NoError(t, as.Persist(ds, blobSidecars[2]))
err := as.IsDataAvailable(ctx, ds, blk)
require.ErrorIs(t, err, errMissingSidecar)
// All but one persisted, return missing idx
require.NoError(t, as.Persist(1, blobSidecars[0]))
err = as.IsDataAvailable(ctx, 1, blk)
require.NoError(t, as.Persist(ds, blobSidecars[0]))
err = as.IsDataAvailable(ctx, ds, blk)
require.ErrorIs(t, err, errMissingSidecar)
// All persisted, return nil
require.NoError(t, as.Persist(1, blobSidecars...))
require.NoError(t, as.Persist(ds, blobSidecars...))
require.NoError(t, as.IsDataAvailable(ctx, 1, blk))
require.NoError(t, as.IsDataAvailable(ctx, ds, blk))
}
func TestLazilyPersistent_Mismatch(t *testing.T) {
ctx := t.Context()
store := filesystem.NewEphemeralBlobStorage(t)
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
blk, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 3)
blk, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, ds, 3)
mbv := &mockBlobBatchVerifier{t: t, err: errors.New("kzg check should not run")}
blobSidecars[0].KzgCommitment = bytesutil.PadTo([]byte("nope"), 48)
as := NewLazilyPersistentStore(store, mbv)
// Only one commitment persisted, should return error with other indices
require.NoError(t, as.Persist(1, blobSidecars[0]))
err := as.IsDataAvailable(ctx, 1, blk)
require.NoError(t, as.Persist(ds, blobSidecars[0]))
err := as.IsDataAvailable(ctx, ds, blk)
require.NotNil(t, err)
require.ErrorIs(t, err, errCommitmentMismatch)
}
func TestLazyPersistOnceCommitted(t *testing.T) {
_, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 6)
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
_, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, ds, 6)
as := NewLazilyPersistentStore(filesystem.NewEphemeralBlobStorage(t), &mockBlobBatchVerifier{})
// stashes as expected
require.NoError(t, as.Persist(1, blobSidecars...))
require.NoError(t, as.Persist(ds, blobSidecars...))
// ignores duplicates
require.ErrorIs(t, as.Persist(1, blobSidecars...), ErrDuplicateSidecar)
require.ErrorIs(t, as.Persist(ds, blobSidecars...), ErrDuplicateSidecar)
// ignores index out of bound
blobSidecars[0].Index = 6
require.ErrorIs(t, as.Persist(1, blobSidecars[0]), errIndexOutOfBounds)
_, moreBlobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 4)
require.ErrorIs(t, as.Persist(ds, blobSidecars[0]), errIndexOutOfBounds)
_, moreBlobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, ds, 4)
// ignores sidecars before the retention period
slotOOB, err := slots.EpochStart(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
require.NoError(t, err)
require.NoError(t, as.Persist(32+slotOOB, moreBlobSidecars[0]))
slotOOB := util.SlotAtEpoch(t, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
slotOOB += ds + 32
require.NoError(t, as.Persist(slotOOB, moreBlobSidecars[0]))
// doesn't ignore new sidecars with a different block root
require.NoError(t, as.Persist(1, moreBlobSidecars...))
require.NoError(t, as.Persist(ds, moreBlobSidecars...))
}
type mockBlobBatchVerifier struct {

View File

@@ -39,7 +39,7 @@ func filterTestCaseSetup(slot primitives.Slot, nBlobs int, onDisk []int, numExpe
entry := &blobCacheEntry{}
if len(onDisk) > 0 {
od := map[[32]byte][]int{blk.Root(): onDisk}
sumz := filesystem.NewMockBlobStorageSummarizer(t, od)
sumz := filesystem.NewMockBlobStorageSummarizer(t, slots.ToEpoch(slot), od)
sum := sumz.Summary(blk.Root())
entry.setDiskSummary(sum)
}

View File

@@ -21,7 +21,8 @@ import (
)
func TestBlobStorage_SaveBlobData(t *testing.T) {
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, params.BeaconConfig().MaxBlobsPerBlock(1))
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, ds, params.BeaconConfig().MaxBlobsPerBlock(ds))
testSidecars := verification.FakeVerifySliceForTest(t, sidecars)
t.Run("no error for duplicate", func(t *testing.T) {
@@ -127,21 +128,22 @@ func TestBlobStorage_SaveBlobData(t *testing.T) {
}
func TestBlobIndicesBounds(t *testing.T) {
es := util.SlotAtEpoch(t, params.BeaconConfig().ElectraForkEpoch)
fs := afero.NewMemMapFs()
root := [32]byte{}
okIdx := uint64(params.BeaconConfig().MaxBlobsPerBlock(0)) - 1
writeFakeSSZ(t, fs, root, 0, okIdx)
okIdx := uint64(params.BeaconConfig().MaxBlobsPerBlock(es)) - 1
writeFakeSSZ(t, fs, root, es, okIdx)
bs := NewWarmedEphemeralBlobStorageUsingFs(t, fs, WithLayout(LayoutNameByEpoch))
indices := bs.Summary(root).mask
expected := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(0))
expected := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(es))
expected[okIdx] = true
for i := range expected {
require.Equal(t, expected[i], indices[i])
}
oobIdx := uint64(params.BeaconConfig().MaxBlobsPerBlock(0))
writeFakeSSZ(t, fs, root, 0, oobIdx)
oobIdx := uint64(params.BeaconConfig().MaxBlobsPerBlock(es))
writeFakeSSZ(t, fs, root, es, oobIdx)
// This now fails at cache warmup time.
require.ErrorIs(t, warmCache(bs.layout, bs.cache), errIndexOutOfBounds)
}

View File

@@ -6,14 +6,17 @@ import (
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/testing/require"
"github.com/OffchainLabs/prysm/v6/testing/util"
)
func TestSlotByRoot_Summary(t *testing.T) {
noneSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(0))
allSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(0))
firstSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(0))
lastSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(0))
oneSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(0))
ee := params.BeaconConfig().ElectraForkEpoch
es := util.SlotAtEpoch(t, ee)
noneSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(es))
allSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(es))
firstSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(es))
lastSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(es))
oneSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(es))
firstSet[0] = true
lastSet[len(lastSet)-1] = true
oneSet[1] = true
@@ -53,7 +56,7 @@ func TestSlotByRoot_Summary(t *testing.T) {
for _, c := range cases {
if c.expected != nil {
key := bytesutil.ToBytes32([]byte(c.name))
sc.cache[key] = BlobStorageSummary{epoch: 0, mask: c.expected}
sc.cache[key] = BlobStorageSummary{epoch: ee, mask: c.expected}
}
}
for _, c := range cases {
@@ -73,6 +76,7 @@ func TestSlotByRoot_Summary(t *testing.T) {
}
func TestAllAvailable(t *testing.T) {
es := util.SlotAtEpoch(t, params.BeaconConfig().ElectraForkEpoch)
idxUpTo := func(u int) []int {
r := make([]int, u)
for i := range r {
@@ -125,13 +129,13 @@ func TestAllAvailable(t *testing.T) {
},
{
name: "out of bound is safe",
count: params.BeaconConfig().MaxBlobsPerBlock(0) + 1,
count: params.BeaconConfig().MaxBlobsPerBlock(es) + 1,
aa: false,
},
{
name: "max present",
count: params.BeaconConfig().MaxBlobsPerBlock(0),
idxSet: idxUpTo(params.BeaconConfig().MaxBlobsPerBlock(0)),
count: params.BeaconConfig().MaxBlobsPerBlock(es),
idxSet: idxUpTo(params.BeaconConfig().MaxBlobsPerBlock(es)),
aa: true,
},
{
@@ -143,7 +147,7 @@ func TestAllAvailable(t *testing.T) {
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
mask := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(0))
mask := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(es))
for _, idx := range c.idxSet {
mask[idx] = true
}

View File

@@ -11,6 +11,7 @@ import (
"testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/testing/require"
"github.com/OffchainLabs/prysm/v6/testing/util"
@@ -60,12 +61,13 @@ func TestRootFromDir(t *testing.T) {
}
func TestSlotFromFile(t *testing.T) {
es := util.SlotAtEpoch(t, params.BeaconConfig().ElectraForkEpoch)
cases := []struct {
slot primitives.Slot
}{
{slot: 0},
{slot: 2},
{slot: 1123581321},
{slot: es + 0},
{slot: es + 2},
{slot: es + 1123581321},
{slot: math.MaxUint64},
}
for _, c := range cases {
@@ -243,39 +245,40 @@ func TestSlotFromBlob(t *testing.T) {
}
func TestIterationComplete(t *testing.T) {
de := params.BeaconConfig().DenebForkEpoch
targets := []migrationTestTarget{
{
ident: ezIdent(t, "0x0125e54c64c925018c9296965a5b622d9f5ab626c10917860dcfb6aa09a0a00b", 1234, 0),
path: "by-epoch/0/1234/0x0125e54c64c925018c9296965a5b622d9f5ab626c10917860dcfb6aa09a0a00b/0.ssz",
ident: ezIdent(t, "0x0125e54c64c925018c9296965a5b622d9f5ab626c10917860dcfb6aa09a0a00b", de+1234, 0),
path: "by-epoch/%d/%d/0x0125e54c64c925018c9296965a5b622d9f5ab626c10917860dcfb6aa09a0a00b/0.ssz",
},
{
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", 5330, 0),
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", de+5330, 0),
slotOffset: 31,
path: "by-epoch/1/5330/0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86/0.ssz",
path: "by-epoch/%d/%d/0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86/0.ssz",
},
{
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", 5330, 1),
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", de+5330, 1),
slotOffset: 31,
path: "by-epoch/1/5330/0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86/1.ssz",
path: "by-epoch/%d/%d/0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86/1.ssz",
},
{
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", 16777216, 0),
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", -1+math.MaxUint64/32, 0),
slotOffset: 16,
path: "by-epoch/4096/16777216/0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c/0.ssz",
path: "by-epoch/%d/%d/0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c/0.ssz",
},
{
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", 16777216, 1),
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", -1+math.MaxUint64/32, 1),
slotOffset: 16,
path: "by-epoch/4096/16777216/0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c/1.ssz",
path: "by-epoch/%d/%d/0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c/1.ssz",
},
{
ident: ezIdent(t, "0x42eabe3d2c125410cd226de6f2825fb7575ab896c3f52e43de1fa29e4c809aba", 16777217, 0),
ident: ezIdent(t, "0x42eabe3d2c125410cd226de6f2825fb7575ab896c3f52e43de1fa29e4c809aba", -1+math.MaxUint64/32, 0),
slotOffset: 16,
path: "by-epoch/4096/16777217/0x42eabe3d2c125410cd226de6f2825fb7575ab896c3f52e43de1fa29e4c809aba/0.ssz",
path: "by-epoch/%d/%d/0x42eabe3d2c125410cd226de6f2825fb7575ab896c3f52e43de1fa29e4c809aba/0.ssz",
},
{
ident: ezIdent(t, "0x666cea5034e22bd3b849cb33914cad59afd88ee08e4d5bc0e997411c945fbc1d", 11235, 1),
path: "by-epoch/2/11235/0x666cea5034e22bd3b849cb33914cad59afd88ee08e4d5bc0e997411c945fbc1d/1.ssz",
ident: ezIdent(t, "0x666cea5034e22bd3b849cb33914cad59afd88ee08e4d5bc0e997411c945fbc1d", de+11235, 1),
path: "by-epoch/%d/%d/0x666cea5034e22bd3b849cb33914cad59afd88ee08e4d5bc0e997411c945fbc1d/1.ssz",
},
}
fs := afero.NewMemMapFs()
@@ -299,6 +302,7 @@ func TestIterationComplete(t *testing.T) {
require.Equal(t, true, ok)
require.Equal(t, tar.ident.epoch, entry.epoch)
require.Equal(t, true, entry.HasIndex(tar.ident.index))
require.Equal(t, tar.path, byEpoch.sszPath(tar.ident))
path := fmt.Sprintf(tar.path, periodForEpoch(tar.ident.epoch), tar.ident.epoch)
require.Equal(t, path, byEpoch.sszPath(tar.ident))
}
}

View File

@@ -4,10 +4,10 @@ import (
"os"
"testing"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/testing/require"
"github.com/OffchainLabs/prysm/v6/testing/util"
"github.com/OffchainLabs/prysm/v6/time/slots"
"github.com/spf13/afero"
)
@@ -18,9 +18,7 @@ func ezIdent(t *testing.T, rootStr string, epoch primitives.Epoch, index uint64)
}
func setupTestBlobFile(t *testing.T, ident blobIdent, offset primitives.Slot, fs afero.Fs, l fsLayout) {
slot, err := slots.EpochStart(ident.epoch)
require.NoError(t, err)
slot += offset
slot := util.SlotAtEpoch(t, ident.epoch) + offset
_, sc := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, 1)
scb, err := sc[0].MarshalSSZ()
require.NoError(t, err)
@@ -53,6 +51,7 @@ func testAssertFsMigrated(t *testing.T, fs afero.Fs, ident blobIdent, before, af
}
func TestMigrations(t *testing.T) {
de := params.BeaconConfig().DenebForkEpoch
cases := []struct {
name string
forwardLayout string
@@ -65,18 +64,18 @@ func TestMigrations(t *testing.T) {
forwardLayout: LayoutNameByEpoch,
targets: []migrationTestTarget{
{
ident: ezIdent(t, "0x0125e54c64c925018c9296965a5b622d9f5ab626c10917860dcfb6aa09a0a00b", 1234, 0),
ident: ezIdent(t, "0x0125e54c64c925018c9296965a5b622d9f5ab626c10917860dcfb6aa09a0a00b", de+1234, 0),
},
{
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", 5330, 0),
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", de+5330, 0),
slotOffset: 31,
},
{
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", 5330, 1),
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", de+5330, 1),
slotOffset: 31,
},
{
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", 16777216, 0),
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", de+16777216, 0),
slotOffset: 16,
},
},
@@ -87,33 +86,33 @@ func TestMigrations(t *testing.T) {
forwardLayout: LayoutNameByEpoch,
targets: []migrationTestTarget{
{
ident: ezIdent(t, "0x0125e54c64c925018c9296965a5b622d9f5ab626c10917860dcfb6aa09a0a00b", 1234, 0),
ident: ezIdent(t, "0x0125e54c64c925018c9296965a5b622d9f5ab626c10917860dcfb6aa09a0a00b", de+1234, 0),
},
{
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", 5330, 0),
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", de+5330, 0),
slotOffset: 31,
},
{
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", 5330, 1),
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", de+5330, 1),
slotOffset: 31,
},
{
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", 16777216, 0),
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", de+16777216, 0),
slotOffset: 16,
migrated: true,
},
{
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", 16777216, 1),
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", de+16777216, 1),
slotOffset: 16,
migrated: true,
},
{
ident: ezIdent(t, "0x42eabe3d2c125410cd226de6f2825fb7575ab896c3f52e43de1fa29e4c809aba", 16777217, 0),
ident: ezIdent(t, "0x42eabe3d2c125410cd226de6f2825fb7575ab896c3f52e43de1fa29e4c809aba", de+16777217, 0),
slotOffset: 16,
migrated: true,
},
{
ident: ezIdent(t, "0x666cea5034e22bd3b849cb33914cad59afd88ee08e4d5bc0e997411c945fbc1d", 11235, 1),
ident: ezIdent(t, "0x666cea5034e22bd3b849cb33914cad59afd88ee08e4d5bc0e997411c945fbc1d", de+11235, 1),
migrated: true,
},
},

View File

@@ -88,11 +88,11 @@ func NewEphemeralBlobStorageWithMocker(t testing.TB) (*BlobMocker, *BlobStorage)
return &BlobMocker{fs: fs, bs: bs}, bs
}
func NewMockBlobStorageSummarizer(t *testing.T, set map[[32]byte][]int) BlobStorageSummarizer {
func NewMockBlobStorageSummarizer(t *testing.T, epoch primitives.Epoch, set map[[32]byte][]int) BlobStorageSummarizer {
c := newBlobStorageCache()
for k, v := range set {
for i := range v {
if err := c.ensure(blobIdent{root: k, epoch: 0, index: uint64(v[i])}); err != nil {
if err := c.ensure(blobIdent{root: k, epoch: epoch, index: uint64(v[i])}); err != nil {
t.Fatal(err)
}
}

View File

@@ -142,6 +142,7 @@ func testRoots(n int) [][32]byte {
}
func TestLayoutPruneBefore(t *testing.T) {
electra := params.BeaconConfig().ElectraForkEpoch
roots := testRoots(10)
cases := []struct {
name string
@@ -153,27 +154,27 @@ func TestLayoutPruneBefore(t *testing.T) {
}{
{
name: "none pruned",
pruneBefore: 1,
pruneBefore: electra + 1,
pruned: []testIdent{},
remain: []testIdent{
{offset: 1, blobIdent: blobIdent{root: roots[0], epoch: 1, index: 0}},
{offset: 1, blobIdent: blobIdent{root: roots[1], epoch: 1, index: 0}},
{offset: 1, blobIdent: blobIdent{root: roots[0], epoch: electra + 1, index: 0}},
{offset: 1, blobIdent: blobIdent{root: roots[1], epoch: electra + 1, index: 0}},
},
},
{
name: "expected pruned before epoch",
pruneBefore: 3,
pruneBefore: electra + 3,
pruned: []testIdent{
{offset: 0, blobIdent: blobIdent{root: roots[0], epoch: 1, index: 0}},
{offset: 31, blobIdent: blobIdent{root: roots[1], epoch: 1, index: 5}},
{offset: 0, blobIdent: blobIdent{root: roots[2], epoch: 2, index: 0}},
{offset: 31, blobIdent: blobIdent{root: roots[3], epoch: 2, index: 3}},
{offset: 0, blobIdent: blobIdent{root: roots[0], epoch: electra + 1, index: 0}},
{offset: 31, blobIdent: blobIdent{root: roots[1], epoch: electra + 1, index: 5}},
{offset: 0, blobIdent: blobIdent{root: roots[2], epoch: electra + 2, index: 0}},
{offset: 31, blobIdent: blobIdent{root: roots[3], epoch: electra + 2, index: 3}},
},
remain: []testIdent{
{offset: 0, blobIdent: blobIdent{root: roots[4], epoch: 3, index: 2}}, // boundary
{offset: 31, blobIdent: blobIdent{root: roots[5], epoch: 3, index: 0}}, // boundary
{offset: 0, blobIdent: blobIdent{root: roots[6], epoch: 4, index: 1}},
{offset: 31, blobIdent: blobIdent{root: roots[7], epoch: 4, index: 5}},
{offset: 0, blobIdent: blobIdent{root: roots[4], epoch: electra + 3, index: 2}}, // boundary
{offset: 31, blobIdent: blobIdent{root: roots[5], epoch: electra + 3, index: 0}}, // boundary
{offset: 0, blobIdent: blobIdent{root: roots[6], epoch: electra + 4, index: 1}},
{offset: 31, blobIdent: blobIdent{root: roots[7], epoch: electra + 4, index: 5}},
},
sum: pruneSummary{blobsPruned: 4},
},

View File

@@ -954,7 +954,9 @@ func (s *Store) CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint pr
deletedRoots := make([][32]byte, 0)
oRoot, err := s.OriginCheckpointBlockRoot(ctx)
if err != nil {
if err != nil && !errors.Is(err, ErrNotFoundOriginBlockRoot) {
// If the node did not use checkpoint sync, there will be no origin block root.
// Use zero hash which will never match any actual state root
return err
}

View File

@@ -10,6 +10,7 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
"github.com/OffchainLabs/prysm/v6/config/features"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
@@ -1283,3 +1284,50 @@ func BenchmarkState_CheckStateSaveTime_10(b *testing.B) { checkStateSaveTime(b,
func BenchmarkState_CheckStateReadTime_1(b *testing.B) { checkStateReadTime(b, 1) }
func BenchmarkState_CheckStateReadTime_10(b *testing.B) { checkStateReadTime(b, 10) }
func TestStore_CleanUpDirtyStates_NoOriginRoot(t *testing.T) {
// This test verifies that CleanUpDirtyStates does not fail when the origin block root is not set,
// which can happen when starting from genesis or in certain fork scenarios like Fulu.
db := setupDB(t)
genesisState, err := util.NewBeaconState()
require.NoError(t, err)
genesisRoot := [fieldparams.RootLength]byte{'a'}
require.NoError(t, db.SaveGenesisBlockRoot(t.Context(), genesisRoot))
require.NoError(t, db.SaveState(t.Context(), genesisState, genesisRoot))
// Note: We intentionally do NOT call SaveOriginCheckpointBlockRoot here
// to simulate the scenario where origin block root is not set
slotsPerArchivedPoint := primitives.Slot(128)
bRoots := make([][fieldparams.RootLength]byte, 0)
prevRoot := genesisRoot
for i := primitives.Slot(1); i <= slotsPerArchivedPoint; i++ { // skip slot 0
b := util.NewBeaconBlock()
b.Block.Slot = i
b.Block.ParentRoot = prevRoot[:]
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
wsb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, db.SaveBlock(t.Context(), wsb))
bRoots = append(bRoots, r)
prevRoot = r
st, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, st.SetSlot(i))
require.NoError(t, db.SaveState(t.Context(), st, r))
}
require.NoError(t, db.SaveFinalizedCheckpoint(t.Context(), &ethpb.Checkpoint{
Root: bRoots[len(bRoots)-1][:],
Epoch: primitives.Epoch(slotsPerArchivedPoint / params.BeaconConfig().SlotsPerEpoch),
}))
// This should not fail even though origin block root is not set
err = db.CleanUpDirtyStates(t.Context(), slotsPerArchivedPoint)
require.NoError(t, err)
// Verify that cleanup still works correctly
for i, root := range bRoots {
if primitives.Slot(i) >= slotsPerArchivedPoint.SubSlot(slotsPerArchivedPoint.Div(3)) {
require.Equal(t, true, db.HasState(t.Context(), root))
} else {
require.Equal(t, false, db.HasState(t.Context(), root))
}
}
}

View File

@@ -254,6 +254,7 @@ func (s *Store) getCacheUpdatesByPeriod(headBlock interfaces.ReadOnlySignedBeaco
return updatesByPeriod, nil
}
// SetLastFinalityUpdate should be used only for testing.
func (s *Store) SetLastFinalityUpdate(update interfaces.LightClientFinalityUpdate, broadcast bool) {
s.mu.Lock()
defer s.mu.Unlock()
@@ -263,9 +264,11 @@ func (s *Store) SetLastFinalityUpdate(update interfaces.LightClientFinalityUpdat
func (s *Store) setLastFinalityUpdate(update interfaces.LightClientFinalityUpdate, broadcast bool) {
if broadcast && IsFinalityUpdateValidForBroadcast(update, s.lastFinalityUpdate) {
if err := s.p2p.BroadcastLightClientFinalityUpdate(context.Background(), update); err != nil {
log.WithError(err).Error("Could not broadcast light client finality update")
}
go func() {
if err := s.p2p.BroadcastLightClientFinalityUpdate(context.Background(), update); err != nil {
log.WithError(err).Error("Could not broadcast light client finality update")
}
}()
}
s.lastFinalityUpdate = update
@@ -283,6 +286,7 @@ func (s *Store) LastFinalityUpdate() interfaces.LightClientFinalityUpdate {
return s.lastFinalityUpdate
}
// SetLastOptimisticUpdate should be used only for testing.
func (s *Store) SetLastOptimisticUpdate(update interfaces.LightClientOptimisticUpdate, broadcast bool) {
s.mu.Lock()
defer s.mu.Unlock()
@@ -292,9 +296,11 @@ func (s *Store) SetLastOptimisticUpdate(update interfaces.LightClientOptimisticU
func (s *Store) setLastOptimisticUpdate(update interfaces.LightClientOptimisticUpdate, broadcast bool) {
if broadcast {
if err := s.p2p.BroadcastLightClientOptimisticUpdate(context.Background(), update); err != nil {
log.WithError(err).Error("Could not broadcast light client optimistic update")
}
go func() {
if err := s.p2p.BroadcastLightClientOptimisticUpdate(context.Background(), update); err != nil {
log.WithError(err).Error("Could not broadcast light client optimistic update")
}
}()
}
s.lastOptimisticUpdate = update

View File

@@ -3,6 +3,7 @@ package light_client
import (
"context"
"testing"
"time"
"github.com/OffchainLabs/prysm/v6/async/event"
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
@@ -74,6 +75,7 @@ func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
p2p := p2pTesting.NewTestP2P(t)
lcStore := NewLightClientStore(p2p, new(event.Feed), testDB.SetupDB(t))
timeForGoroutinesToFinish := 20 * time.Microsecond
// update 0 with basic data and no supermajority following an empty lastFinalityUpdate - should save and broadcast
l0 := util.NewTestLightClient(t, version.Altair)
update0, err := NewLightClientFinalityUpdateFromBeaconState(l0.Ctx, l0.State, l0.Block, l0.AttestedState, l0.AttestedBlock, l0.FinalizedBlock)
@@ -85,6 +87,7 @@ func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
lcStore.SetLastFinalityUpdate(update0, true)
require.Equal(t, update0, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
time.Sleep(timeForGoroutinesToFinish) // give some time for the broadcast goroutine to finish
require.Equal(t, true, p2p.BroadcastCalled.Load(), "Broadcast should have been called after setting a new last finality update when previous is nil")
p2p.BroadcastCalled.Store(false) // Reset for next test
@@ -99,6 +102,7 @@ func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
lcStore.SetLastFinalityUpdate(update1, true)
require.Equal(t, update1, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
time.Sleep(timeForGoroutinesToFinish) // give some time for the broadcast goroutine to finish
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been called after setting a new last finality update without supermajority")
p2p.BroadcastCalled.Store(false) // Reset for next test
@@ -113,6 +117,7 @@ func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
lcStore.SetLastFinalityUpdate(update2, true)
require.Equal(t, update2, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
time.Sleep(timeForGoroutinesToFinish) // give some time for the broadcast goroutine to finish
require.Equal(t, true, p2p.BroadcastCalled.Load(), "Broadcast should have been called after setting a new last finality update with supermajority")
p2p.BroadcastCalled.Store(false) // Reset for next test
@@ -127,6 +132,7 @@ func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
lcStore.SetLastFinalityUpdate(update3, true)
require.Equal(t, update3, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
time.Sleep(timeForGoroutinesToFinish) // give some time for the broadcast goroutine to finish
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been when previous was already broadcast")
// update 4 with increased finality slot, increased attested slot, and supermajority - should save and broadcast
@@ -140,6 +146,7 @@ func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
lcStore.SetLastFinalityUpdate(update4, true)
require.Equal(t, update4, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
time.Sleep(timeForGoroutinesToFinish) // give some time for the broadcast goroutine to finish
require.Equal(t, true, p2p.BroadcastCalled.Load(), "Broadcast should have been called after a new finality update with increased finality slot")
p2p.BroadcastCalled.Store(false) // Reset for next test
@@ -154,6 +161,7 @@ func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
lcStore.SetLastFinalityUpdate(update5, true)
require.Equal(t, update5, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
time.Sleep(timeForGoroutinesToFinish) // give some time for the broadcast goroutine to finish
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been called when previous was already broadcast with supermajority")
// update 6 with the same new finality slot, increased attested slot, and no supermajority - should save but not broadcast
@@ -167,6 +175,7 @@ func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
lcStore.SetLastFinalityUpdate(update6, true)
require.Equal(t, update6, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
time.Sleep(timeForGoroutinesToFinish) // give some time for the broadcast goroutine to finish
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been called when previous was already broadcast with supermajority")
}

View File

@@ -1,7 +1,6 @@
package blstoexec
import (
"math"
"sync"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/blocks"
@@ -87,7 +86,7 @@ func (p *Pool) PendingBLSToExecChanges() ([]*ethpb.SignedBLSToExecutionChange, e
func (p *Pool) BLSToExecChangesForInclusion(st state.ReadOnlyBeaconState) ([]*ethpb.SignedBLSToExecutionChange, error) {
p.lock.RLock()
defer p.lock.RUnlock()
length := int(math.Min(float64(params.BeaconConfig().MaxBlsToExecutionChanges), float64(p.pending.Len())))
length := int(min(float64(params.BeaconConfig().MaxBlsToExecutionChanges), float64(p.pending.Len())))
result := make([]*ethpb.SignedBLSToExecutionChange, 0, length)
node := p.pending.Last()
for node != nil && len(result) < length {

View File

@@ -1,7 +1,6 @@
package voluntaryexits
import (
"math"
"sync"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/blocks"
@@ -63,7 +62,7 @@ func (p *Pool) PendingExits() ([]*ethpb.SignedVoluntaryExit, error) {
// return more than the block enforced MaxVoluntaryExits.
func (p *Pool) ExitsForInclusion(state state.ReadOnlyBeaconState, slot types.Slot) ([]*ethpb.SignedVoluntaryExit, error) {
p.lock.RLock()
length := int(math.Min(float64(params.BeaconConfig().MaxVoluntaryExits), float64(p.pending.Len())))
length := int(min(float64(params.BeaconConfig().MaxVoluntaryExits), float64(p.pending.Len())))
result := make([]*ethpb.SignedVoluntaryExit, 0, length)
node := p.pending.First()
for node != nil && len(result) < length {

View File

@@ -139,6 +139,7 @@ go_test(
"sender_test.go",
"service_test.go",
"subnets_test.go",
"topics_test.go",
"utils_test.go",
],
embed = [":go_default_library"],

View File

@@ -278,6 +278,20 @@ func (s *Service) BroadcastLightClientOptimisticUpdate(ctx context.Context, upda
return errors.New("attempted to broadcast nil light client optimistic update")
}
// add delay to ensure block has time to propagate
slotStart, err := slots.StartTime(s.genesisTime, update.SignatureSlot())
if err != nil {
err := errors.Wrap(err, "could not compute slot start time")
tracing.AnnotateError(span, err)
return err
}
timeSinceSlotStart := time.Since(slotStart)
expectedDelay := slots.ComponentDuration(primitives.BP(params.BeaconConfig().SyncMessageDueBPS))
if timeSinceSlotStart < expectedDelay {
waitDuration := expectedDelay - timeSinceSlotStart
<-time.After(waitDuration)
}
digest := params.ForkDigest(slots.ToEpoch(update.AttestedHeader().Beacon().Slot))
if err := s.broadcastObject(ctx, update, lcOptimisticToTopic(digest)); err != nil {
log.WithError(err).Debug("Failed to broadcast light client optimistic update")
@@ -298,6 +312,20 @@ func (s *Service) BroadcastLightClientFinalityUpdate(ctx context.Context, update
return errors.New("attempted to broadcast nil light client finality update")
}
// add delay to ensure block has time to propagate
slotStart, err := slots.StartTime(s.genesisTime, update.SignatureSlot())
if err != nil {
err := errors.Wrap(err, "could not compute slot start time")
tracing.AnnotateError(span, err)
return err
}
timeSinceSlotStart := time.Since(slotStart)
expectedDelay := slots.ComponentDuration(primitives.BP(params.BeaconConfig().SyncMessageDueBPS))
if timeSinceSlotStart < expectedDelay {
waitDuration := expectedDelay - timeSinceSlotStart
<-time.After(waitDuration)
}
forkDigest := params.ForkDigest(slots.ToEpoch(update.AttestedHeader().Beacon().Slot))
if err := s.broadcastObject(ctx, update, lcFinalityToTopic(forkDigest)); err != nil {
log.WithError(err).Debug("Failed to broadcast light client finality update")

View File

@@ -20,6 +20,7 @@ import (
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/consensus-types/wrapper"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
@@ -529,6 +530,11 @@ func TestService_BroadcastBlob(t *testing.T) {
}
func TestService_BroadcastLightClientOptimisticUpdate(t *testing.T) {
params.SetupTestConfigCleanup(t)
config := params.BeaconConfig().Copy()
config.SyncMessageDueBPS = 60 // ~72 millisecond
params.OverrideBeaconConfig(config)
p1 := p2ptest.NewTestP2P(t)
p2 := p2ptest.NewTestP2P(t)
p1.Connect(p2)
@@ -539,7 +545,7 @@ func TestService_BroadcastLightClientOptimisticUpdate(t *testing.T) {
pubsub: p1.PubSub(),
joinedTopics: map[string]*pubsub.Topic{},
cfg: &Config{},
genesisTime: time.Now(),
genesisTime: time.Now().Add(-33 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second), // the signature slot of the mock update is 33
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
subnetsLock: make(map[uint64]*sync.RWMutex),
subnetsLockLock: sync.Mutex{},
@@ -566,12 +572,19 @@ func TestService_BroadcastLightClientOptimisticUpdate(t *testing.T) {
wg.Add(1)
go func(tt *testing.T) {
defer wg.Done()
ctx, cancel := context.WithTimeout(t.Context(), 1*time.Second)
ctx, cancel := context.WithTimeout(t.Context(), 150*time.Millisecond)
defer cancel()
incomingMessage, err := sub.Next(ctx)
require.NoError(t, err)
slotStartTime, err := slots.StartTime(p.genesisTime, msg.SignatureSlot())
require.NoError(t, err)
expectedDelay := slots.ComponentDuration(primitives.BP(params.BeaconConfig().SyncMessageDueBPS))
if time.Now().Before(slotStartTime.Add(expectedDelay)) {
tt.Errorf("Message received too early, now %v, expected at least %v", time.Now(), slotStartTime.Add(expectedDelay))
}
result := &ethpb.LightClientOptimisticUpdateAltair{}
require.NoError(t, p.Encoding().DecodeGossip(incomingMessage.Data, result))
if !proto.Equal(result, msg.Proto()) {
@@ -593,6 +606,11 @@ func TestService_BroadcastLightClientOptimisticUpdate(t *testing.T) {
}
func TestService_BroadcastLightClientFinalityUpdate(t *testing.T) {
params.SetupTestConfigCleanup(t)
config := params.BeaconConfig().Copy()
config.SyncMessageDueBPS = 60 // ~72 millisecond
params.OverrideBeaconConfig(config)
p1 := p2ptest.NewTestP2P(t)
p2 := p2ptest.NewTestP2P(t)
p1.Connect(p2)
@@ -603,7 +621,7 @@ func TestService_BroadcastLightClientFinalityUpdate(t *testing.T) {
pubsub: p1.PubSub(),
joinedTopics: map[string]*pubsub.Topic{},
cfg: &Config{},
genesisTime: time.Now(),
genesisTime: time.Now().Add(-33 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second), // the signature slot of the mock update is 33
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
subnetsLock: make(map[uint64]*sync.RWMutex),
subnetsLockLock: sync.Mutex{},
@@ -630,12 +648,19 @@ func TestService_BroadcastLightClientFinalityUpdate(t *testing.T) {
wg.Add(1)
go func(tt *testing.T) {
defer wg.Done()
ctx, cancel := context.WithTimeout(t.Context(), 1*time.Second)
ctx, cancel := context.WithTimeout(t.Context(), 150*time.Millisecond)
defer cancel()
incomingMessage, err := sub.Next(ctx)
require.NoError(t, err)
slotStartTime, err := slots.StartTime(p.genesisTime, msg.SignatureSlot())
require.NoError(t, err)
expectedDelay := slots.ComponentDuration(primitives.BP(params.BeaconConfig().SyncMessageDueBPS))
if time.Now().Before(slotStartTime.Add(expectedDelay)) {
tt.Errorf("Message received too early, now %v, expected at least %v", time.Now(), slotStartTime.Add(expectedDelay))
}
result := &ethpb.LightClientFinalityUpdateAltair{}
require.NoError(t, p.Encoding().DecodeGossip(incomingMessage.Data, result))
if !proto.Equal(result, msg.Proto()) {

View File

@@ -1,6 +1,8 @@
package p2p
import (
"context"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
@@ -10,32 +12,28 @@ import (
"github.com/sirupsen/logrus"
)
var errNoCustodyInfo = errors.New("no custody info available")
var _ CustodyManager = (*Service)(nil)
// EarliestAvailableSlot returns the earliest available slot.
func (s *Service) EarliestAvailableSlot() (primitives.Slot, error) {
s.custodyInfoLock.RLock()
defer s.custodyInfoLock.RUnlock()
if s.custodyInfo == nil {
return 0, errors.New("no custody info available")
// It blocks until the custody info is set or the context is done.
func (s *Service) EarliestAvailableSlot(ctx context.Context) (primitives.Slot, error) {
custodyInfo, err := s.waitForCustodyInfo(ctx)
if err != nil {
return 0, errors.Wrap(err, "wait for custody info")
}
return s.custodyInfo.earliestAvailableSlot, nil
return custodyInfo.earliestAvailableSlot, nil
}
// CustodyGroupCount returns the custody group count.
func (s *Service) CustodyGroupCount() (uint64, error) {
s.custodyInfoLock.Lock()
defer s.custodyInfoLock.Unlock()
if s.custodyInfo == nil {
return 0, errNoCustodyInfo
// It blocks until the custody info is set or the context is done.
func (s *Service) CustodyGroupCount(ctx context.Context) (uint64, error) {
custodyInfo, err := s.waitForCustodyInfo(ctx)
if err != nil {
return 0, errors.Wrap(err, "wait for custody info")
}
return s.custodyInfo.groupCount, nil
return custodyInfo.groupCount, nil
}
// UpdateCustodyInfo updates the stored custody group count to the incoming one
@@ -79,6 +77,9 @@ func (s *Service) UpdateCustodyInfo(earliestAvailableSlot primitives.Slot, custo
earliestAvailableSlot: earliestAvailableSlot,
groupCount: custodyGroupCount,
}
close(s.custodyInfoSet)
return earliestAvailableSlot, custodyGroupCount, nil
}
@@ -147,6 +148,33 @@ func (s *Service) CustodyGroupCountFromPeer(pid peer.ID) uint64 {
return custodyCount
}
func (s *Service) waitForCustodyInfo(ctx context.Context) (custodyInfo, error) {
select {
case <-s.custodyInfoSet:
info, ok := s.copyCustodyInfo()
if !ok {
return custodyInfo{}, errors.New("custody info was set but is nil")
}
return info, nil
case <-ctx.Done():
return custodyInfo{}, ctx.Err()
}
}
// copyCustodyInfo returns a copy of the current custody info in a thread-safe manner.
// If no custody info is set, it returns false as the second return value.
func (s *Service) copyCustodyInfo() (custodyInfo, bool) {
s.custodyInfoLock.RLock()
defer s.custodyInfoLock.RUnlock()
if s.custodyInfo == nil {
return custodyInfo{}, false
}
return *s.custodyInfo, true
}
// custodyGroupCountFromPeerENR retrieves the custody count from the peer's ENR.
// If the ENR is not available, it defaults to the minimum number of custody groups
// an honest node custodies and serves samples from.

View File

@@ -20,58 +20,37 @@ import (
)
func TestEarliestAvailableSlot(t *testing.T) {
t.Run("No custody info available", func(t *testing.T) {
service := &Service{
custodyInfo: nil,
}
const expected primitives.Slot = 100
_, err := service.EarliestAvailableSlot()
service := &Service{
custodyInfoSet: make(chan struct{}),
custodyInfo: &custodyInfo{
earliestAvailableSlot: expected,
},
}
require.NotNil(t, err)
})
close(service.custodyInfoSet)
slot, err := service.EarliestAvailableSlot(t.Context())
t.Run("Valid custody info", func(t *testing.T) {
const expected primitives.Slot = 100
service := &Service{
custodyInfo: &custodyInfo{
earliestAvailableSlot: expected,
},
}
slot, err := service.EarliestAvailableSlot()
require.NoError(t, err)
require.Equal(t, expected, slot)
})
require.NoError(t, err)
require.Equal(t, expected, slot)
}
func TestCustodyGroupCount(t *testing.T) {
t.Run("No custody info available", func(t *testing.T) {
service := &Service{
custodyInfo: nil,
}
const expected uint64 = 5
_, err := service.CustodyGroupCount()
service := &Service{
custodyInfoSet: make(chan struct{}),
custodyInfo: &custodyInfo{
groupCount: expected,
},
}
require.NotNil(t, err)
require.Equal(t, true, strings.Contains(err.Error(), "no custody info available"))
})
close(service.custodyInfoSet)
count, err := service.CustodyGroupCount(t.Context())
t.Run("Valid custody info", func(t *testing.T) {
const expected uint64 = 5
service := &Service{
custodyInfo: &custodyInfo{
groupCount: expected,
},
}
count, err := service.CustodyGroupCount()
require.NoError(t, err)
require.Equal(t, expected, count)
})
require.NoError(t, err)
require.Equal(t, expected, count)
}
func TestUpdateCustodyInfo(t *testing.T) {
@@ -163,7 +142,8 @@ func TestUpdateCustodyInfo(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
service := &Service{
custodyInfo: tc.initialCustodyInfo,
custodyInfoSet: make(chan struct{}),
custodyInfo: tc.initialCustodyInfo,
}
slot, groupCount, err := service.UpdateCustodyInfo(tc.inputSlot, tc.inputGroupCount)

View File

@@ -253,7 +253,7 @@ func (s *Service) RefreshPersistentSubnets() {
return
}
custodyGroupCount, err = s.CustodyGroupCount()
custodyGroupCount, err = s.CustodyGroupCount(s.ctx)
if err != nil {
log.WithError(err).Error("Could not retrieve custody group count")
return
@@ -604,27 +604,13 @@ func (s *Service) createLocalNode(
localNode = initializeSyncCommSubnets(localNode)
if params.FuluEnabled() {
// TODO: Replace this quick fix with a proper synchronization scheme (chan?)
const delay = 1 * time.Second
var custodyGroupCount uint64
err := errNoCustodyInfo
for errors.Is(err, errNoCustodyInfo) {
custodyGroupCount, err = s.CustodyGroupCount()
if errors.Is(err, errNoCustodyInfo) {
log.WithField("delay", delay).Debug("No custody info available yet, retrying later")
time.Sleep(delay)
continue
}
if err != nil {
return nil, errors.Wrap(err, "retrieve custody group count")
}
custodyGroupCountEntry := peerdas.Cgc(custodyGroupCount)
localNode.Set(custodyGroupCountEntry)
custodyGroupCount, err := s.CustodyGroupCount(s.ctx)
if err != nil {
return nil, errors.Wrap(err, "could not retrieve custody group count")
}
custodyGroupCountEntry := peerdas.Cgc(custodyGroupCount)
localNode.Set(custodyGroupCountEntry)
}
if s.cfg != nil && s.cfg.HostAddress != "" {

View File

@@ -281,9 +281,13 @@ func TestCreateLocalNode(t *testing.T) {
genesisTime: time.Now(),
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
cfg: tt.cfg,
ctx: t.Context(),
custodyInfo: &custodyInfo{groupCount: custodyRequirement},
custodyInfoSet: make(chan struct{}),
}
close(service.custodyInfoSet)
localNode, err := service.createLocalNode(privKey, address, udpPort, tcpPort, quicPort)
if tt.expectedError {
require.NotNil(t, err)
@@ -912,9 +916,13 @@ func TestRefreshPersistentSubnets(t *testing.T) {
peers: p2p.Peers(),
genesisTime: time.Now().Add(-time.Duration(tc.epochSinceGenesis*secondsPerEpoch) * time.Second),
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
ctx: t.Context(),
custodyInfoSet: make(chan struct{}),
custodyInfo: &custodyInfo{groupCount: custodyGroupCount},
}
close(service.custodyInfoSet)
// Set the listener and the metadata.
createListener := func() (*discover.UDPv5, error) {
return service.createListener(nil, privateKey)

View File

@@ -79,6 +79,9 @@ func compareForkENR(self, peer *enr.Record) error {
// we allow the connection to continue until the fork boundary.
return nil
}
if selfEntry.NextForkEpoch == params.BeaconConfig().FarFutureEpoch {
return nil
}
// Since we agree on the next fork epoch, we require next fork version to also be in agreement.
if !bytes.Equal(peerEntry.NextForkVersion, selfEntry.NextForkVersion) {

View File

@@ -122,6 +122,29 @@ func TestCompareForkENR(t *testing.T) {
}
}
func TestIgnoreFarFutureMismatch(t *testing.T) {
db, err := enode.OpenDB("")
assert.NoError(t, err)
_, k := createAddrAndPrivKey(t)
current := params.GetNetworkScheduleEntry(params.BeaconConfig().ElectraForkEpoch)
next := params.NetworkScheduleEntry{
Epoch: params.BeaconConfig().FarFutureEpoch,
ForkDigest: [4]byte{0xFF, 0xFF, 0xFF, 0xFF}, // Ensure a unique digest for testing.
ForkVersion: [4]byte{0xFF, 0xFF, 0xFF, 0xFF},
}
self := enode.NewLocalNode(db, k)
require.NoError(t, updateENR(self, current, next))
peerNext := params.NetworkScheduleEntry{
Epoch: params.BeaconConfig().FarFutureEpoch,
ForkDigest: [4]byte{0xAA, 0xAA, 0xAA, 0xAA}, // Different unique digest for testing.
ForkVersion: [4]byte{0xAA, 0xAA, 0xAA, 0xAA},
}
peer := enode.NewLocalNode(db, k)
require.NoError(t, updateENR(peer, current, peerNext))
require.NoError(t, compareForkENR(self.Node().Record(), peer.Node().Record()))
}
func TestNfdSetAndLoad(t *testing.T) {
params.SetupTestConfigCleanup(t)
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096

View File

@@ -123,8 +123,8 @@ type (
// CustodyManager abstracts some data columns related methods.
CustodyManager interface {
EarliestAvailableSlot() (primitives.Slot, error)
CustodyGroupCount() (uint64, error)
EarliestAvailableSlot(ctx context.Context) (primitives.Slot, error)
CustodyGroupCount(ctx context.Context) (uint64, error)
UpdateCustodyInfo(earliestAvailableSlot primitives.Slot, custodyGroupCount uint64) (primitives.Slot, uint64, error)
CustodyGroupCountFromPeer(peer.ID) uint64
}

View File

@@ -198,9 +198,11 @@ func (s *Service) updateMetrics() {
overallScore := s.peers.Scorers().Score(pid)
peerScoresByClient[foundName] = append(peerScoresByClient[foundName], overallScore)
}
connectedPeersCount.Reset() // Clear out previous results.
for agent, total := range numConnectedPeersByClient {
connectedPeersCount.WithLabelValues(agent).Set(total)
}
avgScoreConnectedClients.Reset() // Clear out previous results.
for agent, scoringData := range peerScoresByClient {
avgScore := average(scoringData)
avgScoreConnectedClients.WithLabelValues(agent).Set(avgScore)

View File

@@ -24,7 +24,6 @@ package peers
import (
"context"
"math"
"net"
"sort"
"strings"
@@ -411,7 +410,7 @@ func (p *Status) RandomizeBackOff(pid peer.ID) {
return
}
duration := time.Duration(math.Max(MinBackOffDuration, float64(p.rand.Intn(MaxBackOffDuration)))) * time.Millisecond
duration := time.Duration(max(MinBackOffDuration, float64(p.rand.Intn(MaxBackOffDuration)))) * time.Millisecond
peerData.NextValidTime = time.Now().Add(duration)
}

View File

@@ -11,7 +11,6 @@ import (
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
mathutil "github.com/OffchainLabs/prysm/v6/math"
pbrpc "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
pubsub "github.com/libp2p/go-libp2p-pubsub"
pubsubpb "github.com/libp2p/go-libp2p-pubsub/pb"
@@ -135,13 +134,15 @@ func (s *Service) peerInspector(peerMap map[peer.ID]*pubsub.PeerScoreSnapshot) {
// pubsubOptions creates a list of options to configure our router with.
func (s *Service) pubsubOptions() []pubsub.Option {
filt := pubsub.NewAllowlistSubscriptionFilter(s.allTopicStrings()...)
filt = pubsub.WrapLimitSubscriptionFilter(filt, pubsubSubscriptionRequestLimit)
psOpts := []pubsub.Option{
pubsub.WithMessageSignaturePolicy(pubsub.StrictNoSign),
pubsub.WithNoAuthor(),
pubsub.WithMessageIdFn(func(pmsg *pubsubpb.Message) string {
return MsgID(s.genesisValidatorsRoot, pmsg)
}),
pubsub.WithSubscriptionFilter(s),
pubsub.WithSubscriptionFilter(filt),
pubsub.WithPeerOutboundQueueSize(int(s.cfg.QueueSize)),
pubsub.WithMaxMessageSize(int(MaxMessageSize())), // lint:ignore uintcast -- Max Message Size is a config value and is naturally bounded by networking limitations.
pubsub.WithValidateQueueSize(int(s.cfg.QueueSize)),
@@ -246,5 +247,5 @@ func ExtractGossipDigest(topic string) ([4]byte, error) {
// # Allow 1024 bytes for framing and encoding overhead but at least 1MiB in case MAX_PAYLOAD_SIZE is small.
// return max(max_compressed_len(MAX_PAYLOAD_SIZE) + 1024, 1024 * 1024)
func MaxMessageSize() uint64 {
return mathutil.Max(encoder.MaxCompressedLen(params.BeaconConfig().MaxPayloadSize)+1024, 1024*1024)
return max(encoder.MaxCompressedLen(params.BeaconConfig().MaxPayloadSize)+1024, 1024*1024)
}

View File

@@ -130,7 +130,7 @@ func (s *Service) logCheckSubscribableError(pid peer.ID) func(string) bool {
log.WithError(err).WithFields(logrus.Fields{
"peerID": pid,
"topic": topic,
}).Debug("Peer subscription rejected")
}).Trace("Peer subscription rejected")
}
return false
}

View File

@@ -91,6 +91,7 @@ type Service struct {
peerDisconnectionTime *cache.Cache
custodyInfo *custodyInfo
custodyInfoLock sync.RWMutex // Lock access to custodyInfo
custodyInfoSet chan struct{}
allForkDigests map[[4]byte]struct{}
}
@@ -137,6 +138,7 @@ func NewService(ctx context.Context, cfg *Config) (*Service, error) {
joinedTopics: make(map[string]*pubsub.Topic, len(gossipTopicMappings)),
subnetsLock: make(map[uint64]*sync.RWMutex),
peerDisconnectionTime: cache.New(1*time.Second, 1*time.Minute),
custodyInfoSet: make(chan struct{}),
}
ipAddr := prysmnetwork.IPAddr()

View File

@@ -25,6 +25,7 @@ import (
"github.com/holiman/uint256"
"github.com/pkg/errors"
"github.com/prysmaticlabs/go-bitfield"
"github.com/sirupsen/logrus"
)
var (
@@ -223,8 +224,14 @@ func (s *Service) findPeersWithSubnets(
// Skip nodes that are not subscribed to any of the defective subnets.
nodeSubnets, err := filter(node)
if err != nil {
return nil, errors.Wrap(err, "filter node")
log.WithError(err).WithFields(logrus.Fields{
"nodeID": node.ID(),
"topicFormat": topicFormat,
}).Debug("Could not get needed subnets from peer")
continue
}
if len(nodeSubnets) == 0 {
continue
}

View File

@@ -199,12 +199,12 @@ func (*FakeP2P) InterceptUpgraded(network.Conn) (allow bool, reason control.Disc
}
// EarliestAvailableSlot -- fake.
func (*FakeP2P) EarliestAvailableSlot() (primitives.Slot, error) {
func (*FakeP2P) EarliestAvailableSlot(context.Context) (primitives.Slot, error) {
return 0, nil
}
// CustodyGroupCount -- fake.
func (*FakeP2P) CustodyGroupCount() (uint64, error) {
func (*FakeP2P) CustodyGroupCount(context.Context) (uint64, error) {
return 0, nil
}

View File

@@ -473,7 +473,7 @@ func (*TestP2P) InterceptUpgraded(network.Conn) (allow bool, reason control.Disc
}
// EarliestAvailableSlot .
func (s *TestP2P) EarliestAvailableSlot() (primitives.Slot, error) {
func (s *TestP2P) EarliestAvailableSlot(context.Context) (primitives.Slot, error) {
s.custodyInfoMut.RLock()
defer s.custodyInfoMut.RUnlock()
@@ -481,7 +481,7 @@ func (s *TestP2P) EarliestAvailableSlot() (primitives.Slot, error) {
}
// CustodyGroupCount .
func (s *TestP2P) CustodyGroupCount() (uint64, error) {
func (s *TestP2P) CustodyGroupCount(context.Context) (uint64, error) {
s.custodyInfoMut.RLock()
defer s.custodyInfoMut.RUnlock()

View File

@@ -1,5 +1,15 @@
package p2p
import (
"encoding/hex"
"slices"
"strconv"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/encoder"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
)
const (
// GossipProtocolAndDigest represents the protocol and fork digest prefix in a gossip topic.
GossipProtocolAndDigest = "/eth2/%x/"
@@ -66,3 +76,129 @@ const (
// DataColumnSubnetTopicFormat is the topic format for the data column subnet.
DataColumnSubnetTopicFormat = GossipProtocolAndDigest + GossipDataColumnSidecarMessage + "_%d"
)
// topic is a struct representing a single gossipsub topic.
// It can also be used to represent a set of subnet topics: see appendSubnetsBelow().
// topic is intended to be used as an immutable value - it is hashable so it can be used as a map key
// and it uses strings in order to leverage golangs string interning for memory efficiency.
type topic struct {
full string
digest string
message string
start primitives.Epoch
end primitives.Epoch
suffix string
subnet uint64
}
func (t topic) String() string {
return t.full
}
// sszEnc is used to get the protocol suffix for topics. This value has been effectively hardcoded
// since phase0.
var sszEnc = &encoder.SszNetworkEncoder{}
// newTopic constructs a topic value for an ordinary topic structure (without subnets).
func newTopic(start, end primitives.Epoch, digest [4]byte, message string) topic {
suffix := sszEnc.ProtocolSuffix()
t := topic{digest: hex.EncodeToString(digest[:]), message: message, start: start, end: end, suffix: suffix}
t.full = "/" + "eth2" + "/" + t.digest + "/" + t.message + t.suffix
return t
}
// newSubnetTopic constructs a topic value for a topic with a subnet structure.
func newSubnetTopic(start, end primitives.Epoch, digest [4]byte, message string, subnet uint64) topic {
t := newTopic(start, end, digest, message)
t.subnet = subnet
t.full = "/" + "eth2" + "/" + t.digest + "/" + t.message + "_" + strconv.Itoa(int(t.subnet)) + t.suffix
return t
}
// allTopicStrings returns the full topic string for all topics
// that could be derived from the current fork schedule.
func (s *Service) allTopicStrings() []string {
topics := s.allTopics()
topicStrs := make([]string, 0, len(topics))
for _, t := range topics {
topicStrs = append(topicStrs, t.String())
}
return topicStrs
}
// appendSubnetsBelow uses the value of top.subnet as the subnet count
// and creates a topic value for each subnet less than the subnet count, appending them all
// to appendTo.
func appendSubnetsBelow(top topic, digest [4]byte, appendTo []topic) []topic {
for i := range top.subnet {
appendTo = append(appendTo, newSubnetTopic(top.start, top.end, digest, top.message, i))
}
return appendTo
}
// allTopics returns all topics that could be derived from the current fork schedule.
func (s *Service) allTopics() []topic {
cfg := params.BeaconConfig()
// bellatrix: no special topics; electra: blobs topics handled all together
genesis, altair, capella := cfg.GenesisEpoch, cfg.AltairForkEpoch, cfg.CapellaForkEpoch
deneb, fulu, future := cfg.DenebForkEpoch, cfg.FuluForkEpoch, cfg.FarFutureEpoch
// Templates are starter topics - they have a placeholder digest and the subnet is set to the maximum value
// for the subnet (see how this is used in allSubnetsBelow). These are not directly returned by the method,
// they are copied and modified for each digest where they apply based on the start and end epochs.
empty := [4]byte{0, 0, 0, 0} // empty digest for templates, replaced by real digests in per-fork copies.
templates := []topic{
newTopic(genesis, future, empty, GossipBlockMessage),
newTopic(genesis, future, empty, GossipAggregateAndProofMessage),
newTopic(genesis, future, empty, GossipExitMessage),
newTopic(genesis, future, empty, GossipProposerSlashingMessage),
newTopic(genesis, future, empty, GossipAttesterSlashingMessage),
newSubnetTopic(genesis, future, empty, GossipAttestationMessage, cfg.AttestationSubnetCount),
newSubnetTopic(altair, future, empty, GossipSyncCommitteeMessage, cfg.SyncCommitteeSubnetCount),
newTopic(altair, future, empty, GossipContributionAndProofMessage),
newTopic(altair, future, empty, GossipLightClientOptimisticUpdateMessage),
newTopic(altair, future, empty, GossipLightClientFinalityUpdateMessage),
newTopic(capella, future, empty, GossipBlsToExecutionChangeMessage),
}
last := params.GetNetworkScheduleEntry(genesis)
schedule := []params.NetworkScheduleEntry{last}
for next := params.NextNetworkScheduleEntry(last.Epoch); next.ForkDigest != last.ForkDigest; next = params.NextNetworkScheduleEntry(next.Epoch) {
schedule = append(schedule, next)
last = next
}
slices.Reverse(schedule) // reverse the fork schedule because it simplifies dealing with BPOs
fullTopics := make([]topic, 0, len(templates))
for _, top := range templates {
for _, entry := range schedule {
if top.start <= entry.Epoch && entry.Epoch < top.end {
if top.subnet > 0 { // subnet topics in the list above should set this value to the max subnet count: see allSubnetsBelow
fullTopics = appendSubnetsBelow(top, entry.ForkDigest, fullTopics)
} else {
fullTopics = append(fullTopics, newTopic(top.start, top.end, entry.ForkDigest, top.message))
}
}
}
}
end := future
// We're iterating from high to low per the slices.Reverse above.
// So we'll update end = n.Epoch as we go down, and use that as the end for the next entry.
// This loop either adds blob or data column sidecar topics depending on the fork.
for _, entry := range schedule {
if entry.Epoch < deneb {
break
// note: there is a special case where deneb is the genesis fork, in which case
// we'll generate blob sidecar topics for the earlier schedule, but
// this only happens in devnets where it doesn't really matter.
}
message := GossipDataColumnSidecarMessage
subnets := cfg.DataColumnSidecarSubnetCount
if entry.Epoch < fulu {
message = GossipBlobSidecarMessage
subnets = uint64(cfg.MaxBlobsPerBlockAtEpoch(entry.Epoch))
}
// Set subnet to max value, allSubnetsBelow will iterate every index up to that value.
top := newSubnetTopic(entry.Epoch, end, entry.ForkDigest, message, subnets)
fullTopics = appendSubnetsBelow(top, entry.ForkDigest, fullTopics)
end = entry.Epoch // These topics / subnet structures are mutually exclusive, so set each end to the next highest entry.
}
return fullTopics
}

View File

@@ -0,0 +1,70 @@
package p2p
import (
"encoding/hex"
"testing"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/testing/require"
)
func TestAllTopics(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.MainnetConfig()
cfg.FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096*2
params.OverrideBeaconConfig(cfg)
s := &Service{}
all := s.allTopicStrings()
tops := map[string]struct{}{}
for _, t := range all {
tops[t] = struct{}{}
}
require.Equal(t, len(tops), len(all), "duplicate topics found")
expected := []string{
"/eth2/ad532ceb/sync_committee_contribution_and_proof/ssz_snappy",
"/eth2/ad532ceb/beacon_aggregate_and_proof/ssz_snappy",
"/eth2/ad532ceb/beacon_block/ssz_snappy",
"/eth2/ad532ceb/bls_to_execution_change/ssz_snappy",
"/eth2/afcaaba0/beacon_attestation_19/ssz_snappy",
"/eth2/cc2c5cdb/data_column_sidecar_0/ssz_snappy",
"/eth2/cc2c5cdb/data_column_sidecar_127/ssz_snappy",
}
forks := []primitives.Epoch{cfg.GenesisEpoch, cfg.AltairForkEpoch,
cfg.BellatrixForkEpoch, cfg.CapellaForkEpoch, cfg.DenebForkEpoch,
cfg.ElectraForkEpoch, cfg.FuluForkEpoch}
// sanity check: we should always have a block topic.
// construct it by hand in case there are bugs in newTopic.
for _, f := range forks {
digest := params.ForkDigest(f)
expected = append(expected, "/eth2/"+hex.EncodeToString(digest[:])+"/beacon_block/ssz_snappy")
}
for _, e := range expected {
_, ok := tops[e]
require.Equal(t, true, ok)
}
// we should have no data column subnets before fulu
electraColumn := newSubnetTopic(cfg.ElectraForkEpoch, cfg.FuluForkEpoch,
params.ForkDigest(params.BeaconConfig().ElectraForkEpoch),
GossipDataColumnSidecarMessage,
cfg.DataColumnSidecarSubnetCount-1)
// we should have no blob sidecars before deneb or after electra
blobBeforeDeneb := newSubnetTopic(cfg.DenebForkEpoch-1, cfg.DenebForkEpoch,
params.ForkDigest(cfg.DenebForkEpoch-1),
GossipBlobSidecarMessage,
uint64(cfg.MaxBlobsPerBlockAtEpoch(cfg.DenebForkEpoch-1))-1)
blobAfterElectra := newSubnetTopic(cfg.FuluForkEpoch, cfg.FarFutureEpoch,
params.ForkDigest(cfg.FuluForkEpoch),
GossipBlobSidecarMessage,
uint64(cfg.MaxBlobsPerBlockAtEpoch(cfg.FuluForkEpoch))-1)
unexpected := []string{
"/eth2/cc2c5cdb/data_column_sidecar_128/ssz_snappy",
electraColumn.String(),
blobBeforeDeneb.String(),
blobAfterElectra.String(),
}
for _, e := range unexpected {
_, ok := tops[e]
require.Equal(t, false, ok)
}
}

View File

@@ -4876,8 +4876,16 @@ func TestServer_broadcastBlobSidecars(t *testing.T) {
}
func Test_validateBlobs(t *testing.T) {
params.SetupTestConfigCleanup(t)
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096*2
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
es := util.SlotAtEpoch(t, params.BeaconConfig().ElectraForkEpoch)
fe := params.BeaconConfig().FuluForkEpoch
fs := util.SlotAtEpoch(t, fe)
require.NoError(t, kzg.Start())
denebMax := params.BeaconConfig().MaxBlobsPerBlock(ds)
blob := util.GetRandBlob(123)
// Generate proper commitment and proof for the blob
var kzgBlob kzg.Blob
@@ -4887,6 +4895,7 @@ func Test_validateBlobs(t *testing.T) {
proof, err := kzg.ComputeBlobKZGProof(&kzgBlob, commitment)
require.NoError(t, err)
blk := util.NewBeaconBlockDeneb()
blk.Block.Slot = ds
blk.Block.Body.BlobKzgCommitments = [][]byte{commitment[:]}
b, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
@@ -4902,10 +4911,11 @@ func Test_validateBlobs(t *testing.T) {
require.NoError(t, err)
require.ErrorContains(t, "could not verify blob proofs", s.validateBlobs(b, [][]byte{blob[:]}, [][]byte{proof[:]}))
electraMax := params.BeaconConfig().MaxBlobsPerBlock(es)
blobs := [][]byte{}
commitments := [][]byte{}
proofs := [][]byte{}
for i := 0; i < 10; i++ {
for i := 0; i < electraMax+1; i++ {
blobs = append(blobs, blob[:])
commitments = append(commitments, commitment[:])
proofs = append(proofs, proof[:])
@@ -4923,6 +4933,7 @@ func Test_validateBlobs(t *testing.T) {
t.Run("Deneb block with valid single blob", func(t *testing.T) {
blk := util.NewBeaconBlockDeneb()
blk.Block.Slot = ds
blk.Block.Body.BlobKzgCommitments = [][]byte{commitment[:]}
b, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
@@ -4931,107 +4942,54 @@ func Test_validateBlobs(t *testing.T) {
})
t.Run("Deneb block with max blobs (6)", func(t *testing.T) {
cfg := params.BeaconConfig().Copy()
defer params.OverrideBeaconConfig(cfg)
testCfg := params.BeaconConfig().Copy()
testCfg.DenebForkEpoch = 0
testCfg.ElectraForkEpoch = 100
testCfg.DeprecatedMaxBlobsPerBlock = 6
params.OverrideBeaconConfig(testCfg)
blk := util.NewBeaconBlockDeneb()
blk.Block.Slot = 10 // Deneb slot
blk.Block.Slot = ds
blk.Block.Body.BlobKzgCommitments = commitments[:6]
b, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
s := &Server{}
// Should pass with exactly 6 blobs
require.NoError(t, s.validateBlobs(b, blobs[:6], proofs[:6]))
require.NoError(t, s.validateBlobs(b, blobs[:denebMax], proofs[:denebMax]))
})
t.Run("Deneb block exceeding max blobs", func(t *testing.T) {
cfg := params.BeaconConfig().Copy()
defer params.OverrideBeaconConfig(cfg)
testCfg := params.BeaconConfig().Copy()
testCfg.DenebForkEpoch = 0
testCfg.ElectraForkEpoch = 100
testCfg.DeprecatedMaxBlobsPerBlock = 6
params.OverrideBeaconConfig(testCfg)
blk := util.NewBeaconBlockDeneb()
blk.Block.Slot = 10 // Deneb slot
blk.Block.Slot = ds
blk.Block.Body.BlobKzgCommitments = commitments[:7]
b, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
s := &Server{}
// Should fail with 7 blobs when max is 6
err = s.validateBlobs(b, blobs[:7], proofs[:7])
require.ErrorContains(t, "number of blobs over max, 7 > 6", err)
err = s.validateBlobs(b, blobs[:denebMax+1], proofs[:denebMax+1])
require.ErrorContains(t, "number of blobs over max", err)
})
t.Run("Electra block with valid blobs", func(t *testing.T) {
cfg := params.BeaconConfig().Copy()
defer params.OverrideBeaconConfig(cfg)
// Set up Electra config with max 9 blobs
testCfg := params.BeaconConfig().Copy()
testCfg.DenebForkEpoch = 0
testCfg.ElectraForkEpoch = 5
testCfg.DeprecatedMaxBlobsPerBlock = 6
testCfg.DeprecatedMaxBlobsPerBlockElectra = 9
params.OverrideBeaconConfig(testCfg)
blk := util.NewBeaconBlockElectra()
blk.Block.Slot = 160 // Electra slot (epoch 5+)
blk.Block.Slot = es
blk.Block.Body.BlobKzgCommitments = commitments[:9]
b, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
s := &Server{}
// Should pass with 9 blobs in Electra
require.NoError(t, s.validateBlobs(b, blobs[:9], proofs[:9]))
require.NoError(t, s.validateBlobs(b, blobs[:electraMax], proofs[:electraMax]))
})
t.Run("Electra block exceeding max blobs", func(t *testing.T) {
cfg := params.BeaconConfig().Copy()
defer params.OverrideBeaconConfig(cfg)
// Set up Electra config with max 9 blobs
testCfg := params.BeaconConfig().Copy()
testCfg.DenebForkEpoch = 0
testCfg.ElectraForkEpoch = 5
testCfg.DeprecatedMaxBlobsPerBlock = 6
testCfg.DeprecatedMaxBlobsPerBlockElectra = 9
params.OverrideBeaconConfig(testCfg)
blk := util.NewBeaconBlockElectra()
blk.Block.Slot = 160 // Electra slot
blk.Block.Body.BlobKzgCommitments = commitments[:10]
blk.Block.Slot = es
blk.Block.Body.BlobKzgCommitments = commitments[:electraMax+1]
b, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
s := &Server{}
// Should fail with 10 blobs when max is 9
err = s.validateBlobs(b, blobs[:10], proofs[:10])
require.ErrorContains(t, "number of blobs over max, 10 > 9", err)
err = s.validateBlobs(b, blobs[:electraMax+1], proofs[:electraMax+1])
require.ErrorContains(t, "number of blobs over max", err)
})
t.Run("Fulu block with valid cell proofs", func(t *testing.T) {
cfg := params.BeaconConfig().Copy()
defer params.OverrideBeaconConfig(cfg)
testCfg := params.BeaconConfig().Copy()
testCfg.DenebForkEpoch = 0
testCfg.ElectraForkEpoch = 5
testCfg.FuluForkEpoch = 10
testCfg.DeprecatedMaxBlobsPerBlock = 6
testCfg.DeprecatedMaxBlobsPerBlockElectra = 9
testCfg.NumberOfColumns = 128 // Standard PeerDAS configuration
params.OverrideBeaconConfig(testCfg)
// Create Fulu block with proper cell proofs
blk := util.NewBeaconBlockFulu()
blk.Block.Slot = 320 // Epoch 10 (Fulu fork)
blk.Block.Slot = fs
// Generate valid commitments and cell proofs for testing
blobCount := 2
@@ -5075,18 +5033,8 @@ func Test_validateBlobs(t *testing.T) {
})
t.Run("Fulu block with invalid cell proof count", func(t *testing.T) {
cfg := params.BeaconConfig().Copy()
defer params.OverrideBeaconConfig(cfg)
testCfg := params.BeaconConfig().Copy()
testCfg.DenebForkEpoch = 0
testCfg.ElectraForkEpoch = 5
testCfg.FuluForkEpoch = 10
testCfg.NumberOfColumns = 128
params.OverrideBeaconConfig(testCfg)
blk := util.NewBeaconBlockFulu()
blk.Block.Slot = 320 // Epoch 10 (Fulu fork)
blk.Block.Slot = fs
// Create valid commitments but wrong number of cell proofs
blobCount := 2
@@ -5123,6 +5071,7 @@ func Test_validateBlobs(t *testing.T) {
require.NoError(t, err)
blk := util.NewBeaconBlockDeneb()
blk.Block.Slot = ds
blk.Block.Body.BlobKzgCommitments = [][]byte{sk.PublicKey().Marshal()}
b, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
@@ -5134,6 +5083,7 @@ func Test_validateBlobs(t *testing.T) {
t.Run("empty blobs and proofs should pass", func(t *testing.T) {
blk := util.NewBeaconBlockDeneb()
blk.Block.Slot = ds
blk.Block.Body.BlobKzgCommitments = [][]byte{}
b, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
@@ -5148,53 +5098,48 @@ func Test_validateBlobs(t *testing.T) {
// Set up config with BlobSchedule (BPO - Blob Production Optimization)
testCfg := params.BeaconConfig().Copy()
testCfg.DenebForkEpoch = 0
testCfg.ElectraForkEpoch = 100
testCfg.FuluForkEpoch = 200
testCfg.DeprecatedMaxBlobsPerBlock = 6
testCfg.DeprecatedMaxBlobsPerBlockElectra = 9
// Define blob schedule with progressive increases
testCfg.BlobSchedule = []params.BlobScheduleEntry{
{Epoch: 0, MaxBlobsPerBlock: 3}, // Start with 3 blobs
{Epoch: 10, MaxBlobsPerBlock: 5}, // Increase to 5 at epoch 10
{Epoch: 20, MaxBlobsPerBlock: 7}, // Increase to 7 at epoch 20
{Epoch: 30, MaxBlobsPerBlock: 9}, // Increase to 9 at epoch 30
{Epoch: fe + 1, MaxBlobsPerBlock: 3}, // Start with 3 blobs
{Epoch: fe + 10, MaxBlobsPerBlock: 5}, // Increase to 5 at epoch 10
{Epoch: fe + 20, MaxBlobsPerBlock: 7}, // Increase to 7 at epoch 20
{Epoch: fe + 30, MaxBlobsPerBlock: 9}, // Increase to 9 at epoch 30
}
params.OverrideBeaconConfig(testCfg)
s := &Server{}
// Test epoch 0-9: max 3 blobs
t.Run("epoch 0-9: max 3 blobs", func(t *testing.T) {
t.Run("deneb under and over max", func(t *testing.T) {
blk := util.NewBeaconBlockDeneb()
blk.Block.Slot = 5 // Epoch 0
blk.Block.Body.BlobKzgCommitments = commitments[:3]
blk.Block.Slot = ds
blk.Block.Body.BlobKzgCommitments = commitments[:denebMax]
b, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, s.validateBlobs(b, blobs[:3], proofs[:3]))
require.NoError(t, s.validateBlobs(b, blobs[:denebMax], proofs[:denebMax]))
// Should fail with 4 blobs
blk.Block.Body.BlobKzgCommitments = commitments[:4]
b, err = blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
err = s.validateBlobs(b, blobs[:4], proofs[:4])
require.ErrorContains(t, "number of blobs over max, 4 > 3", err)
err = s.validateBlobs(b, blobs[:denebMax+1], proofs[:denebMax+1])
require.ErrorContains(t, "number of blobs over max", err)
})
// Test epoch 30+: max 9 blobs
t.Run("epoch 30+: max 9 blobs", func(t *testing.T) {
blk := util.NewBeaconBlockDeneb()
blk.Block.Slot = 960 // Epoch 30
blk.Block.Body.BlobKzgCommitments = commitments[:9]
t.Run("different max in electra", func(t *testing.T) {
blk := util.NewBeaconBlockElectra()
blk.Block.Slot = es
blk.Block.Body.BlobKzgCommitments = commitments[:electraMax]
b, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, s.validateBlobs(b, blobs[:9], proofs[:9]))
require.NoError(t, s.validateBlobs(b, blobs[:electraMax], proofs[:electraMax]))
// Should fail with 10 blobs
blk.Block.Body.BlobKzgCommitments = commitments[:10]
// exceed the electra max
blk.Block.Body.BlobKzgCommitments = commitments[:electraMax+1]
b, err = blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
err = s.validateBlobs(b, blobs[:10], proofs[:10])
err = s.validateBlobs(b, blobs[:electraMax+1], proofs[:electraMax+1])
require.ErrorContains(t, "number of blobs over max, 10 > 9", err)
})
})

View File

@@ -36,11 +36,13 @@ import (
func TestBlobs(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.DenebForkEpoch = 1
cfg.FuluForkEpoch = cfg.ElectraForkEpoch + 4096*2
params.OverrideBeaconConfig(cfg)
es := util.SlotAtEpoch(t, cfg.ElectraForkEpoch)
ds := util.SlotAtEpoch(t, cfg.DenebForkEpoch)
db := testDB.SetupDB(t)
denebBlock, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 123, 4)
denebBlock, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, es, 4)
require.NoError(t, db.SaveBlock(t.Context(), denebBlock))
bs := filesystem.NewEphemeralBlobStorage(t)
testSidecars := verification.FakeVerifySliceForTest(t, blobs)
@@ -170,7 +172,7 @@ func TestBlobs(t *testing.T) {
require.Equal(t, false, resp.Finalized)
})
t.Run("slot", func(t *testing.T) {
u := "http://foo.example/123"
u := fmt.Sprintf("http://foo.example/%d", es)
request := httptest.NewRequest("GET", u, nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
@@ -194,7 +196,7 @@ func TestBlobs(t *testing.T) {
require.Equal(t, false, resp.Finalized)
})
t.Run("slot not found", func(t *testing.T) {
u := "http://foo.example/122"
u := fmt.Sprintf("http://foo.example/%d", es-1)
request := httptest.NewRequest("GET", u, nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
@@ -211,7 +213,7 @@ func TestBlobs(t *testing.T) {
assert.Equal(t, http.StatusNotFound, writer.Code)
})
t.Run("one blob only", func(t *testing.T) {
u := "http://foo.example/123?indices=2"
u := fmt.Sprintf("http://foo.example/%d?indices=2", es)
request := httptest.NewRequest("GET", u, nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
@@ -242,7 +244,7 @@ func TestBlobs(t *testing.T) {
require.Equal(t, false, resp.Finalized)
})
t.Run("no blobs returns an empty array", func(t *testing.T) {
u := "http://foo.example/123"
u := fmt.Sprintf("http://foo.example/%d", es)
request := httptest.NewRequest("GET", u, nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
@@ -266,8 +268,8 @@ func TestBlobs(t *testing.T) {
require.Equal(t, false, resp.Finalized)
})
t.Run("blob index over max", func(t *testing.T) {
overLimit := maxBlobsPerBlockByVersion(version.Deneb)
u := fmt.Sprintf("http://foo.example/123?indices=%d", overLimit)
overLimit := params.BeaconConfig().MaxBlobsPerBlock(ds)
u := fmt.Sprintf("http://foo.example/%d?indices=%d", es, overLimit)
request := httptest.NewRequest("GET", u, nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
@@ -281,7 +283,7 @@ func TestBlobs(t *testing.T) {
assert.Equal(t, true, strings.Contains(e.Message, fmt.Sprintf("requested blob indices [%d] are invalid", overLimit)))
})
t.Run("outside retention period returns 200 with what we have", func(t *testing.T) {
u := "http://foo.example/123"
u := fmt.Sprintf("http://foo.example/%d", es)
request := httptest.NewRequest("GET", u, nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
@@ -305,13 +307,13 @@ func TestBlobs(t *testing.T) {
require.Equal(t, false, resp.Finalized)
})
t.Run("block without commitments returns 200 w/empty list ", func(t *testing.T) {
denebBlock, _ := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 333, 0)
denebBlock, _ := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, es+128, 0)
commitments, err := denebBlock.Block().Body().BlobKzgCommitments()
require.NoError(t, err)
require.Equal(t, len(commitments), 0)
require.NoError(t, db.SaveBlock(t.Context(), denebBlock))
u := "http://foo.example/333"
u := fmt.Sprintf("http://foo.example/%d", es+128)
request := httptest.NewRequest("GET", u, nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
@@ -423,16 +425,17 @@ func TestBlobs(t *testing.T) {
func TestBlobs_Electra(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.DenebForkEpoch = 0
cfg.ElectraForkEpoch = 1
cfg.FuluForkEpoch = cfg.ElectraForkEpoch + 4096*2
cfg.BlobSchedule = []params.BlobScheduleEntry{
{Epoch: 0, MaxBlobsPerBlock: 6},
{Epoch: 1, MaxBlobsPerBlock: 9},
{Epoch: cfg.FuluForkEpoch + 4096, MaxBlobsPerBlock: 6},
{Epoch: cfg.FuluForkEpoch + 4096 + 128, MaxBlobsPerBlock: 9},
}
params.OverrideBeaconConfig(cfg)
es := util.SlotAtEpoch(t, cfg.ElectraForkEpoch)
db := testDB.SetupDB(t)
electraBlock, blobs := util.GenerateTestElectraBlockWithSidecar(t, [32]byte{}, 123, maxBlobsPerBlockByVersion(version.Electra))
overLimit := params.BeaconConfig().MaxBlobsPerBlock(es)
electraBlock, blobs := util.GenerateTestElectraBlockWithSidecar(t, [32]byte{}, es, overLimit)
require.NoError(t, db.SaveBlock(t.Context(), electraBlock))
bs := filesystem.NewEphemeralBlobStorage(t)
testSidecars := verification.FakeVerifySliceForTest(t, blobs)
@@ -450,7 +453,7 @@ func TestBlobs_Electra(t *testing.T) {
TimeFetcher: mockChainService,
}
t.Run("max blobs for electra", func(t *testing.T) {
u := "http://foo.example/123"
u := fmt.Sprintf("http://foo.example/%d", es)
request := httptest.NewRequest("GET", u, nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
@@ -468,7 +471,7 @@ func TestBlobs_Electra(t *testing.T) {
assert.Equal(t, http.StatusOK, writer.Code)
resp := &structs.SidecarsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.Equal(t, maxBlobsPerBlockByVersion(version.Electra), len(resp.Data))
require.Equal(t, overLimit, len(resp.Data))
sidecar := resp.Data[0]
require.NotNil(t, sidecar)
assert.Equal(t, "0", sidecar.Index)
@@ -481,8 +484,8 @@ func TestBlobs_Electra(t *testing.T) {
require.Equal(t, false, resp.Finalized)
})
t.Run("requested blob index at max", func(t *testing.T) {
limit := maxBlobsPerBlockByVersion(version.Electra) - 1
u := fmt.Sprintf("http://foo.example/123?indices=%d", limit)
limit := params.BeaconConfig().MaxBlobsPerBlock(es) - 1
u := fmt.Sprintf("http://foo.example/%d?indices=%d", es, limit)
request := httptest.NewRequest("GET", u, nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
@@ -513,8 +516,8 @@ func TestBlobs_Electra(t *testing.T) {
require.Equal(t, false, resp.Finalized)
})
t.Run("blob index over max", func(t *testing.T) {
overLimit := maxBlobsPerBlockByVersion(version.Electra)
u := fmt.Sprintf("http://foo.example/123?indices=%d", overLimit)
overLimit := params.BeaconConfig().MaxBlobsPerBlock(es)
u := fmt.Sprintf("http://foo.example/%d?indices=%d", es, overLimit)
request := httptest.NewRequest("GET", u, nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
@@ -530,6 +533,7 @@ func TestBlobs_Electra(t *testing.T) {
}
func Test_parseIndices(t *testing.T) {
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
tests := []struct {
name string
query string
@@ -559,7 +563,7 @@ func Test_parseIndices(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := parseIndices(&url.URL{RawQuery: tt.query}, 0)
got, err := parseIndices(&url.URL{RawQuery: tt.query}, ds)
if err != nil && tt.wantErr != "" {
require.StringContains(t, tt.wantErr, err.Error())
return
@@ -588,6 +592,7 @@ func TestGetBlobs(t *testing.T) {
{Epoch: 20, MaxBlobsPerBlock: 12}, // Fulu
}
params.OverrideBeaconConfig(cfg)
es := util.SlotAtEpoch(t, cfg.ElectraForkEpoch)
db := testDB.SetupDB(t)
denebBlock, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 123, 4)
@@ -1009,7 +1014,8 @@ func TestGetBlobs(t *testing.T) {
// Test for Electra fork
t.Run("electra max blobs", func(t *testing.T) {
electraBlock, electraBlobs := util.GenerateTestElectraBlockWithSidecar(t, [32]byte{}, 323, maxBlobsPerBlockByVersion(version.Electra))
overLimit := params.BeaconConfig().MaxBlobsPerBlock(es)
electraBlock, electraBlobs := util.GenerateTestElectraBlockWithSidecar(t, [32]byte{}, 323, overLimit)
require.NoError(t, db.SaveBlock(t.Context(), electraBlock))
electraBs := filesystem.NewEphemeralBlobStorage(t)
electraSidecars := verification.FakeVerifySliceForTest(t, electraBlobs)
@@ -1036,7 +1042,8 @@ func TestGetBlobs(t *testing.T) {
assert.Equal(t, http.StatusOK, writer.Code)
resp := &structs.GetBlobsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.Equal(t, maxBlobsPerBlockByVersion(version.Electra), len(resp.Data))
require.Equal(t, overLimit, len(resp.Data))
blob := resp.Data[0]
require.NotNil(t, blob)
assert.Equal(t, hexutil.Encode(electraBlobs[0].Blob), blob)
@@ -1145,14 +1152,3 @@ func unmarshalBlobs(t *testing.T, response []byte) [][]byte {
}
return blobs
}
func maxBlobsPerBlockByVersion(v int) int {
if v >= version.Fulu {
return params.BeaconConfig().DeprecatedMaxBlobsPerBlockFulu
}
if v >= version.Electra {
return params.BeaconConfig().DeprecatedMaxBlobsPerBlockElectra
}
return params.BeaconConfig().DeprecatedMaxBlobsPerBlock
}

View File

@@ -167,8 +167,8 @@ func prepareConfigSpec() (map[string]interface{}, error) {
for i := 0; i < t.NumField(); i++ {
tField := t.Field(i)
_, isSpec := tField.Tag.Lookup("spec")
if !isSpec {
specTag, isSpec := tField.Tag.Lookup("spec")
if !isSpec || specTag != "true" {
continue
}
if shouldSkip(tField) {

View File

@@ -145,7 +145,6 @@ func TestGetSpec(t *testing.T) {
config.PendingDepositsLimit = 82
config.MaxPendingPartialsPerWithdrawalsSweep = 83
config.PendingConsolidationsLimit = 84
config.MaxPartialWithdrawalsPerPayload = 85
config.FullExitRequestAmount = 86
config.MaxConsolidationsRequestsPerPayload = 87
config.MaxAttesterSlashingsElectra = 88
@@ -164,6 +163,7 @@ func TestGetSpec(t *testing.T) {
config.KzgCommitmentInclusionProofDepth = 101
config.BlobsidecarSubnetCount = 102
config.BlobsidecarSubnetCountElectra = 103
config.SyncMessageDueBPS = 104
var dbp [4]byte
copy(dbp[:], []byte{'0', '0', '0', '1'})
@@ -201,8 +201,7 @@ func TestGetSpec(t *testing.T) {
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp))
data, ok := resp.Data.(map[string]interface{})
require.Equal(t, true, ok)
assert.Equal(t, 176, len(data))
assert.Equal(t, 171, len(data))
for k, v := range data {
t.Run(k, func(t *testing.T) {
switch k {
@@ -240,8 +239,6 @@ func TestGetSpec(t *testing.T) {
assert.Equal(t, "14", v)
case "RANDOM_SUBNETS_PER_VALIDATOR":
assert.Equal(t, "15", v)
case "EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION":
assert.Equal(t, "16", v)
case "SECONDS_PER_ETH1_BLOCK":
assert.Equal(t, "17", v)
case "DEPOSIT_CHAIN_ID":
@@ -438,8 +435,6 @@ func TestGetSpec(t *testing.T) {
assert.Equal(t, "16777216", v)
case "PROPOSER_SCORE_BOOST":
assert.Equal(t, "40", v)
case "INTERVALS_PER_SLOT":
assert.Equal(t, "3", v)
case "MAX_WITHDRAWALS_PER_PAYLOAD":
assert.Equal(t, "74", v)
case "MAX_BLS_TO_EXECUTION_CHANGES":
@@ -456,9 +451,6 @@ func TestGetSpec(t *testing.T) {
assert.Equal(t, "8", v)
case "MAX_REQUEST_LIGHT_CLIENT_UPDATES":
assert.Equal(t, "128", v)
case "SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY":
case "NODE_ID_BITS":
assert.Equal(t, "256", v)
case "ATTESTATION_SUBNET_EXTRA_BITS":
assert.Equal(t, "0", v)
case "ATTESTATION_SUBNET_PREFIX_BITS":
@@ -521,8 +513,6 @@ func TestGetSpec(t *testing.T) {
assert.Equal(t, "83", v)
case "PENDING_CONSOLIDATIONS_LIMIT":
assert.Equal(t, "84", v)
case "MAX_PARTIAL_WITHDRAWALS_PER_PAYLOAD":
assert.Equal(t, "85", v)
case "FULL_EXIT_REQUEST_AMOUNT":
assert.Equal(t, "86", v)
case "MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD":
@@ -541,8 +531,6 @@ func TestGetSpec(t *testing.T) {
assert.Equal(t, "93", v)
case "MAX_PENDING_DEPOSITS_PER_EPOCH":
assert.Equal(t, "94", v)
case "TARGET_BLOBS_PER_BLOCK_ELECTRA":
assert.Equal(t, "6", v)
case "MAX_BLOBS_PER_BLOCK_ELECTRA":
assert.Equal(t, "9", v)
case "MAX_REQUEST_BLOB_SIDECARS_ELECTRA":
@@ -573,12 +561,12 @@ func TestGetSpec(t *testing.T) {
assert.Equal(t, "100", v)
case "KZG_COMMITMENT_INCLUSION_PROOF_DEPTH":
assert.Equal(t, "101", v)
case "MAX_BLOBS_PER_BLOCK_FULU":
assert.Equal(t, "12", v)
case "BLOB_SIDECAR_SUBNET_COUNT":
assert.Equal(t, "102", v)
case "BLOB_SIDECAR_SUBNET_COUNT_ELECTRA":
assert.Equal(t, "103", v)
case "SYNC_MESSAGE_DUE_BPS":
assert.Equal(t, "104", v)
case "BLOB_SCHEDULE":
// BLOB_SCHEDULE should be an empty slice when no schedule is defined
blobSchedule, ok := v.([]interface{})

View File

@@ -68,7 +68,11 @@ func (rs *BlockRewardService) GetBlockRewardsData(ctx context.Context, blk inter
Code: http.StatusInternalServerError,
}
}
exitInfo := validators.ExitInformation(st)
var exitInfo *validators.ExitInfo
if len(blk.Body().ProposerSlashings()) > 0 || len(blk.Body().AttesterSlashings()) > 0 {
// ExitInformation is expensive to compute, only do it if we need it.
exitInfo = validators.ExitInformation(st)
}
st, err = coreblocks.ProcessAttesterSlashings(ctx, st, blk.Body().AttesterSlashings(), exitInfo)
if err != nil {
return nil, &httputil.DefaultJsonError{

View File

@@ -3,7 +3,6 @@ package lookup
import (
"context"
"fmt"
"math"
"strconv"
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain"
@@ -284,14 +283,9 @@ func (p *BeaconDbBlocker) Blobs(ctx context.Context, id string, opts ...options.
return make([]*blocks.VerifiedROBlob, 0), nil
}
// Compute the first Fulu slot.
fuluForkEpoch := params.BeaconConfig().FuluForkEpoch
fuluForkSlot := primitives.Slot(math.MaxUint64)
if fuluForkEpoch != primitives.Epoch(math.MaxUint64) {
fuluForkSlot, err = slots.EpochStart(fuluForkEpoch)
if err != nil {
return nil, &core.RpcError{Err: errors.Wrap(err, "could not calculate Fulu start slot"), Reason: core.Internal}
}
fuluForkSlot, err := slots.EpochStart(params.BeaconConfig().FuluForkEpoch)
if err != nil {
return nil, &core.RpcError{Err: errors.Wrap(err, "could not calculate Fulu start slot"), Reason: core.Internal}
}
// Convert versioned hashes to indices if provided

View File

@@ -190,7 +190,7 @@ func TestBlobsErrorHandling(t *testing.T) {
t.Run("non-existent block by slot returns 404", func(t *testing.T) {
blocker := &BeaconDbBlocker{
BeaconDB: db,
BeaconDB: db,
ChainInfoFetcher: &mockChain.ChainService{},
}
@@ -275,39 +275,19 @@ func TestBlobsErrorHandling(t *testing.T) {
}
func TestGetBlob(t *testing.T) {
const (
slot = 123
blobCount = 4
denebForEpoch = 1
fuluForkEpoch = 2
)
setupDeneb := func(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.DenebForkEpoch = denebForEpoch
params.OverrideBeaconConfig(cfg)
}
setupFulu := func(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.DenebForkEpoch = denebForEpoch
cfg.FuluForkEpoch = fuluForkEpoch
params.OverrideBeaconConfig(cfg)
}
const blobCount = 4
ctx := t.Context()
db := testDB.SetupDB(t)
params.SetupTestConfigCleanup(t)
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().DenebForkEpoch + 4096*2
// Start the trusted setup.
err := kzg.Start()
require.NoError(t, err)
db := testDB.SetupDB(t)
require.NoError(t, kzg.Start())
// Create and save Deneb block and blob sidecars.
_, blobStorage := filesystem.NewEphemeralBlobStorageAndFs(t)
denebBlock, storedBlobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [fieldparams.RootLength]byte{}, slot, blobCount)
denebBlock, storedBlobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [fieldparams.RootLength]byte{}, ds, blobCount, util.WithDenebSlot(ds))
denebBlockRoot := denebBlock.Root()
verifiedStoredSidecars := verification.FakeVerifySliceForTest(t, storedBlobSidecars)
@@ -316,13 +296,14 @@ func TestGetBlob(t *testing.T) {
require.NoError(t, err)
}
err = db.SaveBlock(t.Context(), denebBlock)
err := db.SaveBlock(t.Context(), denebBlock)
require.NoError(t, err)
// Create Electra block and blob sidecars. (Electra block = Fulu block),
// save the block, convert blob sidecars to data column sidecars and save the block.
fuluForkSlot := fuluForkEpoch * params.BeaconConfig().SlotsPerEpoch
fuluBlock, fuluBlobSidecars := util.GenerateTestElectraBlockWithSidecar(t, [fieldparams.RootLength]byte{}, fuluForkSlot, blobCount)
fs := util.SlotAtEpoch(t, params.BeaconConfig().FuluForkEpoch)
dsStr := fmt.Sprintf("%d", ds)
fuluBlock, fuluBlobSidecars := util.GenerateTestElectraBlockWithSidecar(t, [fieldparams.RootLength]byte{}, fs, blobCount)
fuluBlockRoot := fuluBlock.Root()
cellsAndProofsList := make([]kzg.CellsAndProofs, 0, len(fuluBlobSidecars))
@@ -347,8 +328,6 @@ func TestGetBlob(t *testing.T) {
require.NoError(t, err)
t.Run("genesis", func(t *testing.T) {
setupDeneb(t)
blocker := &BeaconDbBlocker{}
_, rpcErr := blocker.Blobs(ctx, "genesis")
require.Equal(t, http.StatusBadRequest, core.ErrorReasonToHTTP(rpcErr.Reason))
@@ -356,8 +335,6 @@ func TestGetBlob(t *testing.T) {
})
t.Run("head", func(t *testing.T) {
setupDeneb(t)
blocker := &BeaconDbBlocker{
ChainInfoFetcher: &mockChain.ChainService{
Root: denebBlockRoot[:],
@@ -388,8 +365,6 @@ func TestGetBlob(t *testing.T) {
})
t.Run("finalized", func(t *testing.T) {
setupDeneb(t)
blocker := &BeaconDbBlocker{
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: &ethpb.Checkpoint{Root: denebBlockRoot[:]}},
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
@@ -405,8 +380,6 @@ func TestGetBlob(t *testing.T) {
})
t.Run("justified", func(t *testing.T) {
setupDeneb(t)
blocker := &BeaconDbBlocker{
ChainInfoFetcher: &mockChain.ChainService{CurrentJustifiedCheckPoint: &ethpb.Checkpoint{Root: denebBlockRoot[:]}},
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
@@ -422,8 +395,6 @@ func TestGetBlob(t *testing.T) {
})
t.Run("root", func(t *testing.T) {
setupDeneb(t)
blocker := &BeaconDbBlocker{
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
Genesis: time.Now(),
@@ -438,8 +409,6 @@ func TestGetBlob(t *testing.T) {
})
t.Run("slot", func(t *testing.T) {
setupDeneb(t)
blocker := &BeaconDbBlocker{
ChainInfoFetcher: &mockChain.ChainService{},
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
@@ -449,7 +418,7 @@ func TestGetBlob(t *testing.T) {
BlobStorage: blobStorage,
}
verifiedBlobs, rpcErr := blocker.Blobs(ctx, "123")
verifiedBlobs, rpcErr := blocker.Blobs(ctx, dsStr)
require.IsNil(t, rpcErr)
require.Equal(t, blobCount, len(verifiedBlobs))
})
@@ -457,8 +426,6 @@ func TestGetBlob(t *testing.T) {
t.Run("one blob only", func(t *testing.T) {
const index = 2
setupDeneb(t)
blocker := &BeaconDbBlocker{
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: &ethpb.Checkpoint{Root: denebBlockRoot[:]}},
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
@@ -468,7 +435,7 @@ func TestGetBlob(t *testing.T) {
BlobStorage: blobStorage,
}
retrievedVerifiedSidecars, rpcErr := blocker.Blobs(ctx, "123", options.WithIndices([]int{index}))
retrievedVerifiedSidecars, rpcErr := blocker.Blobs(ctx, dsStr, options.WithIndices([]int{index}))
require.IsNil(t, rpcErr)
require.Equal(t, 1, len(retrievedVerifiedSidecars))
@@ -483,8 +450,6 @@ func TestGetBlob(t *testing.T) {
})
t.Run("no blobs returns an empty array", func(t *testing.T) {
setupDeneb(t)
blocker := &BeaconDbBlocker{
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: &ethpb.Checkpoint{Root: denebBlockRoot[:]}},
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
@@ -494,14 +459,12 @@ func TestGetBlob(t *testing.T) {
BlobStorage: filesystem.NewEphemeralBlobStorage(t),
}
verifiedBlobs, rpcErr := blocker.Blobs(ctx, "123")
verifiedBlobs, rpcErr := blocker.Blobs(ctx, dsStr)
require.IsNil(t, rpcErr)
require.Equal(t, 0, len(verifiedBlobs))
})
t.Run("no blob at index", func(t *testing.T) {
setupDeneb(t)
blocker := &BeaconDbBlocker{
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: &ethpb.Checkpoint{Root: denebBlockRoot[:]}},
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
@@ -512,14 +475,12 @@ func TestGetBlob(t *testing.T) {
}
noBlobIndex := len(storedBlobSidecars) + 1
_, rpcErr := blocker.Blobs(ctx, "123", options.WithIndices([]int{0, noBlobIndex}))
_, rpcErr := blocker.Blobs(ctx, dsStr, options.WithIndices([]int{0, noBlobIndex}))
require.NotNil(t, rpcErr)
require.Equal(t, core.ErrorReason(core.NotFound), rpcErr.Reason)
})
t.Run("index too big", func(t *testing.T) {
setupDeneb(t)
blocker := &BeaconDbBlocker{
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: &ethpb.Checkpoint{Root: denebBlockRoot[:]}},
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
@@ -528,14 +489,12 @@ func TestGetBlob(t *testing.T) {
BeaconDB: db,
BlobStorage: blobStorage,
}
_, rpcErr := blocker.Blobs(ctx, "123", options.WithIndices([]int{0, math.MaxInt}))
_, rpcErr := blocker.Blobs(ctx, dsStr, options.WithIndices([]int{0, math.MaxInt}))
require.NotNil(t, rpcErr)
require.Equal(t, core.ErrorReason(core.BadRequest), rpcErr.Reason)
})
t.Run("not enough stored data column sidecars", func(t *testing.T) {
setupFulu(t)
_, dataColumnStorage := filesystem.NewEphemeralDataColumnStorageAndFs(t)
err = dataColumnStorage.Save(verifiedRoDataColumnSidecars[:fieldparams.CellsPerBlob-1])
require.NoError(t, err)
@@ -555,8 +514,6 @@ func TestGetBlob(t *testing.T) {
})
t.Run("reconstruction needed", func(t *testing.T) {
setupFulu(t)
_, dataColumnStorage := filesystem.NewEphemeralDataColumnStorageAndFs(t)
err = dataColumnStorage.Save(verifiedRoDataColumnSidecars[1 : peerdas.MinimumColumnCountToReconstruct()+1])
require.NoError(t, err)
@@ -582,8 +539,6 @@ func TestGetBlob(t *testing.T) {
})
t.Run("no reconstruction needed", func(t *testing.T) {
setupFulu(t)
_, dataColumnStorage := filesystem.NewEphemeralDataColumnStorageAndFs(t)
err = dataColumnStorage.Save(verifiedRoDataColumnSidecars)
require.NoError(t, err)

View File

@@ -17,13 +17,6 @@ import (
"google.golang.org/grpc/status"
)
const (
// validatorLookupThreshold determines when to use full assignment map vs cached linear search.
// For requests with fewer validators, we use cached linear search to avoid the overhead
// of building a complete assignment map for all validators in the epoch.
validatorLookupThreshold = 3000
)
// GetDutiesV2 returns the duties assigned to a list of validators specified
// in the request object.
//
@@ -60,7 +53,26 @@ func (vs *Server) dutiesv2(ctx context.Context, req *ethpb.DutiesRequest) (*ethp
span.SetAttributes(trace.Int64Attribute("num_pubkeys", int64(len(req.PublicKeys))))
defer span.End()
meta, err := loadDutiesMetadata(ctx, s, req.Epoch, len(req.PublicKeys))
// Collect validator indices from public keys and cache the lookups
type validatorInfo struct {
index primitives.ValidatorIndex
found bool
}
validatorLookup := make(map[string]validatorInfo, len(req.PublicKeys))
requestIndices := make([]primitives.ValidatorIndex, 0, len(req.PublicKeys))
for _, pubKey := range req.PublicKeys {
key := string(pubKey)
if _, exists := validatorLookup[key]; !exists {
idx, ok := s.ValidatorIndexByPubkey(bytesutil.ToBytes48(pubKey))
validatorLookup[key] = validatorInfo{index: idx, found: ok}
if ok {
requestIndices = append(requestIndices, idx)
}
}
}
meta, err := loadDutiesMetadata(ctx, s, req.Epoch, requestIndices)
if err != nil {
return nil, err
}
@@ -68,14 +80,14 @@ func (vs *Server) dutiesv2(ctx context.Context, req *ethpb.DutiesRequest) (*ethp
validatorAssignments := make([]*ethpb.DutiesV2Response_Duty, 0, len(req.PublicKeys))
nextValidatorAssignments := make([]*ethpb.DutiesV2Response_Duty, 0, len(req.PublicKeys))
// start loop for assignments for current and next epochs
// Build duties using cached validator index lookups
for _, pubKey := range req.PublicKeys {
if ctx.Err() != nil {
return nil, status.Errorf(codes.Aborted, "Could not continue fetching assignments: %v", ctx.Err())
}
validatorIndex, ok := s.ValidatorIndexByPubkey(bytesutil.ToBytes48(pubKey))
if !ok {
info := validatorLookup[string(pubKey)]
if !info.found {
unknownDuty := &ethpb.DutiesV2Response_Duty{
PublicKey: pubKey,
Status: ethpb.ValidatorStatus_UNKNOWN_STATUS,
@@ -85,16 +97,15 @@ func (vs *Server) dutiesv2(ctx context.Context, req *ethpb.DutiesRequest) (*ethp
continue
}
meta.current.liteAssignment = vs.getValidatorAssignment(meta.current, validatorIndex)
currentAssignment := vs.getValidatorAssignment(meta.current, info.index)
nextAssignment := vs.getValidatorAssignment(meta.next, info.index)
meta.next.liteAssignment = vs.getValidatorAssignment(meta.next, validatorIndex)
assignment, nextAssignment, err := vs.buildValidatorDuty(pubKey, validatorIndex, s, req.Epoch, meta)
assignment, nextDuty, err := vs.buildValidatorDuty(pubKey, info.index, s, req.Epoch, meta, currentAssignment, nextAssignment)
if err != nil {
return nil, err
}
validatorAssignments = append(validatorAssignments, assignment)
nextValidatorAssignments = append(nextValidatorAssignments, nextAssignment)
nextValidatorAssignments = append(nextValidatorAssignments, nextDuty)
}
// Dependent roots for fork choice
@@ -147,18 +158,15 @@ type dutiesMetadata struct {
}
type metadata struct {
committeesAtSlot uint64
proposalSlots map[primitives.ValidatorIndex][]primitives.Slot
startSlot primitives.Slot
committeesBySlot [][][]primitives.ValidatorIndex
validatorAssignmentMap map[primitives.ValidatorIndex]*helpers.LiteAssignment
liteAssignment *helpers.LiteAssignment
committeesAtSlot uint64
proposalSlots map[primitives.ValidatorIndex][]primitives.Slot
committeeAssignments map[primitives.ValidatorIndex]*helpers.CommitteeAssignment
}
func loadDutiesMetadata(ctx context.Context, s state.BeaconState, reqEpoch primitives.Epoch, numValidators int) (*dutiesMetadata, error) {
func loadDutiesMetadata(ctx context.Context, s state.BeaconState, reqEpoch primitives.Epoch, requestIndices []primitives.ValidatorIndex) (*dutiesMetadata, error) {
meta := &dutiesMetadata{}
var err error
meta.current, err = loadMetadata(ctx, s, reqEpoch, numValidators)
meta.current, err = loadMetadata(ctx, s, reqEpoch, requestIndices)
if err != nil {
return nil, err
}
@@ -168,14 +176,14 @@ func loadDutiesMetadata(ctx context.Context, s state.BeaconState, reqEpoch primi
return nil, status.Errorf(codes.Internal, "Could not compute proposer slots: %v", err)
}
meta.next, err = loadMetadata(ctx, s, reqEpoch+1, numValidators)
meta.next, err = loadMetadata(ctx, s, reqEpoch+1, requestIndices)
if err != nil {
return nil, err
}
return meta, nil
}
func loadMetadata(ctx context.Context, s state.BeaconState, reqEpoch primitives.Epoch, numValidators int) (*metadata, error) {
func loadMetadata(ctx context.Context, s state.BeaconState, reqEpoch primitives.Epoch, requestIndices []primitives.ValidatorIndex) (*metadata, error) {
meta := &metadata{}
if err := helpers.VerifyAssignmentEpoch(reqEpoch, s); err != nil {
@@ -188,56 +196,36 @@ func loadMetadata(ctx context.Context, s state.BeaconState, reqEpoch primitives.
}
meta.committeesAtSlot = helpers.SlotCommitteeCount(activeValidatorCount)
meta.startSlot, err = slots.EpochStart(reqEpoch)
// Use CommitteeAssignments which only computes committees for requested validators
meta.committeeAssignments, err = helpers.CommitteeAssignments(ctx, s, reqEpoch, requestIndices)
if err != nil {
return nil, err
}
meta.committeesBySlot, err = helpers.PrecomputeCommittees(ctx, s, meta.startSlot)
if err != nil {
return nil, err
}
if numValidators >= validatorLookupThreshold {
meta.validatorAssignmentMap = buildValidatorAssignmentMap(meta.committeesBySlot, meta.startSlot)
return nil, status.Errorf(codes.Internal, "Could not compute committee assignments: %v", err)
}
return meta, nil
}
// buildValidatorAssignmentMap creates a map from validator index to assignment for O(1) lookup.
func buildValidatorAssignmentMap(
bySlot [][][]primitives.ValidatorIndex,
startSlot primitives.Slot,
) map[primitives.ValidatorIndex]*helpers.LiteAssignment {
validatorToAssignment := make(map[primitives.ValidatorIndex]*helpers.LiteAssignment)
for relativeSlot, committees := range bySlot {
for cIdx, committee := range committees {
for pos, vIdx := range committee {
validatorToAssignment[vIdx] = &helpers.LiteAssignment{
AttesterSlot: startSlot + primitives.Slot(relativeSlot),
CommitteeIndex: primitives.CommitteeIndex(cIdx),
CommitteeLength: uint64(len(committee)),
ValidatorCommitteeIndex: uint64(pos),
}
}
// findValidatorIndexInCommittee finds the position of a validator in a committee.
func findValidatorIndexInCommittee(committee []primitives.ValidatorIndex, validatorIndex primitives.ValidatorIndex) uint64 {
for i, vIdx := range committee {
if vIdx == validatorIndex {
return uint64(i)
}
}
return validatorToAssignment
return 0
}
// getValidatorAssignment retrieves the assignment for a validator using either
// the pre-built assignment map (for large requests) or linear search (for small requests).
// getValidatorAssignment retrieves the assignment for a validator from CommitteeAssignments.
func (vs *Server) getValidatorAssignment(meta *metadata, validatorIndex primitives.ValidatorIndex) *helpers.LiteAssignment {
if meta.validatorAssignmentMap != nil {
if assignment, exists := meta.validatorAssignmentMap[validatorIndex]; exists {
return assignment
if assignment, exists := meta.committeeAssignments[validatorIndex]; exists {
return &helpers.LiteAssignment{
AttesterSlot: assignment.AttesterSlot,
CommitteeIndex: assignment.CommitteeIndex,
CommitteeLength: uint64(len(assignment.Committee)),
ValidatorCommitteeIndex: findValidatorIndexInCommittee(assignment.Committee, validatorIndex),
}
return &helpers.LiteAssignment{}
}
return helpers.AssignmentForValidator(meta.committeesBySlot, meta.startSlot, validatorIndex)
return &helpers.LiteAssignment{}
}
// buildValidatorDuty builds both currentepoch and nextepoch V2 duty objects
@@ -248,21 +236,23 @@ func (vs *Server) buildValidatorDuty(
s state.BeaconState,
reqEpoch primitives.Epoch,
meta *dutiesMetadata,
currentAssignment *helpers.LiteAssignment,
nextAssignment *helpers.LiteAssignment,
) (*ethpb.DutiesV2Response_Duty, *ethpb.DutiesV2Response_Duty, error) {
assignment := &ethpb.DutiesV2Response_Duty{PublicKey: pubKey}
nextAssignment := &ethpb.DutiesV2Response_Duty{PublicKey: pubKey}
nextDuty := &ethpb.DutiesV2Response_Duty{PublicKey: pubKey}
statusEnum := assignmentStatus(s, idx)
assignment.ValidatorIndex = idx
assignment.Status = statusEnum
assignment.CommitteesAtSlot = meta.current.committeesAtSlot
assignment.ProposerSlots = meta.current.proposalSlots[idx]
populateCommitteeFields(assignment, meta.current.liteAssignment)
populateCommitteeFields(assignment, currentAssignment)
nextAssignment.ValidatorIndex = idx
nextAssignment.Status = statusEnum
nextAssignment.CommitteesAtSlot = meta.next.committeesAtSlot
populateCommitteeFields(nextAssignment, meta.next.liteAssignment)
nextDuty.ValidatorIndex = idx
nextDuty.Status = statusEnum
nextDuty.CommitteesAtSlot = meta.next.committeesAtSlot
populateCommitteeFields(nextDuty, nextAssignment)
// Sync committee flags
if coreTime.HigherEqualThanAltairVersionAndEpoch(s, reqEpoch) {
@@ -271,7 +261,7 @@ func (vs *Server) buildValidatorDuty(
return nil, nil, status.Errorf(codes.Internal, "Could not determine current epoch sync committee: %v", err)
}
assignment.IsSyncCommittee = inSync
nextAssignment.IsSyncCommittee = inSync
nextDuty.IsSyncCommittee = inSync
if inSync {
if err := core.RegisterSyncSubnetCurrentPeriodProto(s, reqEpoch, pubKey, statusEnum); err != nil {
return nil, nil, status.Errorf(codes.Internal, "Could not register sync subnet current period: %v", err)
@@ -290,18 +280,16 @@ func (vs *Server) buildValidatorDuty(
if err != nil {
return nil, nil, status.Errorf(codes.Internal, "Could not determine next epoch sync committee: %v", err)
}
nextAssignment.IsSyncCommittee = nextInSync
nextDuty.IsSyncCommittee = nextInSync
if nextInSync {
go func() {
if err := core.RegisterSyncSubnetNextPeriodProto(s, reqEpoch, pubKey, statusEnum); err != nil {
log.WithError(err).Warn("Could not register sync subnet next period")
}
}()
if err := core.RegisterSyncSubnetNextPeriodProto(s, reqEpoch, pubKey, statusEnum); err != nil {
log.WithError(err).Warn("Could not register sync subnet next period")
}
}
}
}
return assignment, nextAssignment, nil
return assignment, nextDuty, nil
}
func populateCommitteeFields(duty *ethpb.DutiesV2Response_Duty, la *helpers.LiteAssignment) {

View File

@@ -560,105 +560,20 @@ func TestGetDutiesV2_SyncNotReady(t *testing.T) {
assert.ErrorContains(t, "Syncing to latest head", err)
}
func TestBuildValidatorAssignmentMap(t *testing.T) {
start := primitives.Slot(200)
bySlot := [][][]primitives.ValidatorIndex{
{{1, 2, 3}}, // slot 200, committee 0
{{7, 8, 9}}, // slot 201, committee 0
{{4, 5}, {10, 11}}, // slot 202, committee 0 & 1
}
assignmentMap := buildValidatorAssignmentMap(bySlot, start)
// Test validator 8 assignment (slot 201, committee 0, position 1)
vIdx := primitives.ValidatorIndex(8)
got, exists := assignmentMap[vIdx]
assert.Equal(t, true, exists)
require.NotNil(t, got)
assert.Equal(t, start+1, got.AttesterSlot)
assert.Equal(t, primitives.CommitteeIndex(0), got.CommitteeIndex)
assert.Equal(t, uint64(3), got.CommitteeLength)
assert.Equal(t, uint64(1), got.ValidatorCommitteeIndex)
// Test validator 1 assignment (slot 200, committee 0, position 0)
vIdx1 := primitives.ValidatorIndex(1)
got1, exists1 := assignmentMap[vIdx1]
assert.Equal(t, true, exists1)
require.NotNil(t, got1)
assert.Equal(t, start, got1.AttesterSlot)
assert.Equal(t, primitives.CommitteeIndex(0), got1.CommitteeIndex)
assert.Equal(t, uint64(3), got1.CommitteeLength)
assert.Equal(t, uint64(0), got1.ValidatorCommitteeIndex)
// Test validator 10 assignment (slot 202, committee 1, position 0)
vIdx10 := primitives.ValidatorIndex(10)
got10, exists10 := assignmentMap[vIdx10]
assert.Equal(t, true, exists10)
require.NotNil(t, got10)
assert.Equal(t, start+2, got10.AttesterSlot)
assert.Equal(t, primitives.CommitteeIndex(1), got10.CommitteeIndex)
assert.Equal(t, uint64(2), got10.CommitteeLength)
assert.Equal(t, uint64(0), got10.ValidatorCommitteeIndex)
// Test non-existent validator
_, exists99 := assignmentMap[primitives.ValidatorIndex(99)]
assert.Equal(t, false, exists99)
// Verify that we get the same results as the linear search
for _, committees := range bySlot {
for _, committee := range committees {
for _, validatorIdx := range committee {
linearResult := helpers.AssignmentForValidator(bySlot, start, validatorIdx)
mapResult, mapExists := assignmentMap[validatorIdx]
assert.Equal(t, true, mapExists)
require.DeepEqual(t, linearResult, mapResult)
}
}
}
}
func TestGetValidatorAssignment_WithAssignmentMap(t *testing.T) {
func TestGetValidatorAssignment(t *testing.T) {
start := primitives.Slot(100)
bySlot := [][][]primitives.ValidatorIndex{
{{1, 2, 3}},
{{4, 5, 6}},
// Test using CommitteeAssignments
committeeAssignments := map[primitives.ValidatorIndex]*helpers.CommitteeAssignment{
5: {
Committee: []primitives.ValidatorIndex{4, 5, 6},
AttesterSlot: start + 1,
CommitteeIndex: primitives.CommitteeIndex(0),
},
}
// Test with pre-built assignment map (large request scenario)
meta := &metadata{
startSlot: start,
committeesBySlot: bySlot,
validatorAssignmentMap: buildValidatorAssignmentMap(bySlot, start),
}
vs := &Server{}
// Test existing validator (validator 2 is at position 1 in the committee, not position 2)
assignment := vs.getValidatorAssignment(meta, primitives.ValidatorIndex(2))
require.NotNil(t, assignment)
assert.Equal(t, start, assignment.AttesterSlot)
assert.Equal(t, primitives.CommitteeIndex(0), assignment.CommitteeIndex)
assert.Equal(t, uint64(1), assignment.ValidatorCommitteeIndex)
// Test non-existent validator should return empty assignment
assignment = vs.getValidatorAssignment(meta, primitives.ValidatorIndex(99))
require.NotNil(t, assignment)
assert.Equal(t, primitives.Slot(0), assignment.AttesterSlot)
assert.Equal(t, primitives.CommitteeIndex(0), assignment.CommitteeIndex)
}
func TestGetValidatorAssignment_WithoutAssignmentMap(t *testing.T) {
start := primitives.Slot(100)
bySlot := [][][]primitives.ValidatorIndex{
{{1, 2, 3}},
{{4, 5, 6}},
}
// Test without assignment map (small request scenario)
meta := &metadata{
startSlot: start,
committeesBySlot: bySlot,
validatorAssignmentMap: nil, // No map - should use linear search
committeeAssignments: committeeAssignments,
}
vs := &Server{}
@@ -676,53 +591,3 @@ func TestGetValidatorAssignment_WithoutAssignmentMap(t *testing.T) {
assert.Equal(t, primitives.Slot(0), assignment.AttesterSlot)
assert.Equal(t, primitives.CommitteeIndex(0), assignment.CommitteeIndex)
}
func TestLoadMetadata_ThresholdBehavior(t *testing.T) {
state, _ := util.DeterministicGenesisState(t, 128)
epoch := primitives.Epoch(0)
tests := []struct {
name string
numValidators int
expectAssignmentMap bool
}{
{
name: "Small request - below threshold",
numValidators: 100,
expectAssignmentMap: false,
},
{
name: "Large request - at threshold",
numValidators: validatorLookupThreshold,
expectAssignmentMap: true,
},
{
name: "Large request - above threshold",
numValidators: validatorLookupThreshold + 1000,
expectAssignmentMap: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
meta, err := loadMetadata(t.Context(), state, epoch, tt.numValidators)
require.NoError(t, err)
require.NotNil(t, meta)
if tt.expectAssignmentMap {
require.NotNil(t, meta.validatorAssignmentMap, "Expected assignment map to be built for large requests")
assert.Equal(t, true, len(meta.validatorAssignmentMap) > 0, "Assignment map should not be empty")
} else {
// For small requests, the map should be nil (not initialized)
if meta.validatorAssignmentMap != nil {
t.Errorf("Expected no assignment map for small requests, got: %v", meta.validatorAssignmentMap)
}
}
// Common fields should always be set
assert.Equal(t, true, meta.committeesAtSlot > 0)
require.NotNil(t, meta.committeesBySlot)
assert.Equal(t, true, len(meta.committeesBySlot) > 0)
})
}
}

View File

@@ -11,7 +11,6 @@ import (
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/container/trie"
"github.com/OffchainLabs/prysm/v6/math"
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/runtime/version"
@@ -143,7 +142,7 @@ func (vs *Server) deposits(
if err != nil {
return nil, errors.Wrap(err, "could not retrieve requests start index")
}
eth1DepositIndexLimit := math.Min(canonicalEth1Data.DepositCount, requestsStartIndex)
eth1DepositIndexLimit := min(canonicalEth1Data.DepositCount, requestsStartIndex)
if beaconState.Eth1DepositIndex() < eth1DepositIndexLimit {
if uint64(dep.Index) >= beaconState.Eth1DepositIndex() && uint64(dep.Index) < eth1DepositIndexLimit {
pendingDeps = append(pendingDeps, dep)

View File

@@ -12,13 +12,18 @@ import (
func (vs *Server) getSlashings(ctx context.Context, head state.BeaconState) ([]*ethpb.ProposerSlashing, []ethpb.AttSlashing) {
var err error
proposerSlashings := vs.SlashingsPool.PendingProposerSlashings(ctx, head, false /*noLimit*/)
attSlashings := vs.SlashingsPool.PendingAttesterSlashings(ctx, head, false /*noLimit*/)
validProposerSlashings := make([]*ethpb.ProposerSlashing, 0, len(proposerSlashings))
validAttSlashings := make([]ethpb.AttSlashing, 0, len(attSlashings))
if len(proposerSlashings) == 0 && len(attSlashings) == 0 {
return validProposerSlashings, validAttSlashings
}
// ExitInformation is expensive to compute, only do it if we need it.
exitInfo := v.ExitInformation(head)
if err := helpers.UpdateTotalActiveBalanceCache(head, exitInfo.TotalActiveBalance); err != nil {
log.WithError(err).Warn("Could not update total active balance cache")
}
proposerSlashings := vs.SlashingsPool.PendingProposerSlashings(ctx, head, false /*noLimit*/)
validProposerSlashings := make([]*ethpb.ProposerSlashing, 0, len(proposerSlashings))
for _, slashing := range proposerSlashings {
_, err = blocks.ProcessProposerSlashing(ctx, head, slashing, exitInfo)
if err != nil {
@@ -27,8 +32,6 @@ func (vs *Server) getSlashings(ctx context.Context, head state.BeaconState) ([]*
}
validProposerSlashings = append(validProposerSlashings, slashing)
}
attSlashings := vs.SlashingsPool.PendingAttesterSlashings(ctx, head, false /*noLimit*/)
validAttSlashings := make([]ethpb.AttSlashing, 0, len(attSlashings))
for _, slashing := range attSlashings {
_, err = blocks.ProcessAttesterSlashing(ctx, head, slashing, exitInfo)
if err != nil {

View File

@@ -74,6 +74,18 @@ func WithTimeAsNow(t time.Time) ClockOpt {
}
}
func WithSlotAsNow(s types.Slot) ClockOpt {
return func(g *Clock) {
g.now = func() time.Time {
t, err := slots.StartTime(g.t, s)
if err != nil {
panic(err) // lint:nopanic -- This is a programming error if genesis/slot are invalid.
}
return t
}
}
}
// NewClock constructs a Clock value from a genesis timestamp (t) and a Genesis Validator Root (vr).
// The WithNower ClockOpt can be used in tests to specify an alternate `time.Now` implementation,
// for instance to return a value for `Now` spanning a certain number of slots from genesis time, to control the current slot.

View File

@@ -160,7 +160,7 @@ func (b *BeaconState) ExpectedWithdrawals() ([]*enginev1.Withdrawal, uint64, err
}
validatorsLen := b.validatorsLen()
bound := mathutil.Min(uint64(validatorsLen), params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep)
bound := min(uint64(validatorsLen), params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep)
for i := uint64(0); i < bound; i++ {
val, err := b.validatorAtIndexReadOnly(validatorIndex)
if err != nil {

View File

@@ -3,7 +3,6 @@ package stategen
import (
"context"
"fmt"
"math"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
@@ -54,7 +53,7 @@ func (s *State) saveStateByRoot(ctx context.Context, blockRoot [32]byte, st stat
defer span.End()
// Duration can't be 0 to prevent panic for division.
duration := uint64(math.Max(float64(s.saveHotStateDB.duration), 1))
duration := uint64(max(float64(s.saveHotStateDB.duration), 1))
s.saveHotStateDB.lock.Lock()
if s.saveHotStateDB.enabled && st.Slot().Mod(duration) == 0 {

View File

@@ -17,6 +17,7 @@ go_library(
"fuzz_exports.go", # keep
"log.go",
"metrics.go",
"once.go",
"options.go",
"pending_attestations_queue.go",
"pending_blocks_queue.go",
@@ -45,7 +46,6 @@ go_library(
"subscriber_bls_to_execution_change.go",
"subscriber_data_column_sidecar.go",
"subscriber_handlers.go",
"subscriber_light_client.go",
"subscriber_sync_committee_message.go",
"subscriber_sync_contribution_proof.go",
"subscription_topic_handler.go",
@@ -113,7 +113,6 @@ go_library(
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/light-client:go_default_library",
"//consensus-types/primitives:go_default_library",
"//consensus-types/wrapper:go_default_library",
"//container/leaky-bucket:go_default_library",
@@ -174,6 +173,8 @@ go_test(
"error_test.go",
"fork_watcher_test.go",
"kzg_batch_verifier_test.go",
"once_test.go",
"pending_attestations_queue_bucket_test.go",
"pending_attestations_queue_test.go",
"pending_blocks_queue_test.go",
"rate_limiter_test.go",

View File

@@ -5,6 +5,7 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
@@ -24,8 +25,9 @@ func testBlobGen(t *testing.T, start primitives.Slot, n int) ([]blocks.ROBlock,
}
func TestValidateNext_happy(t *testing.T) {
current := primitives.Slot(128)
blks, blobs := testBlobGen(t, 63, 4)
startSlot := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
current := startSlot + 65
blks, blobs := testBlobGen(t, startSlot, 4)
cfg := &blobSyncConfig{
retentionStart: 0,
nbv: testNewBlobVerifier(),
@@ -74,8 +76,9 @@ func TestValidateNext_sigMatch(t *testing.T) {
}
func TestValidateNext_errorsFromVerifier(t *testing.T) {
current := primitives.Slot(128)
blks, blobs := testBlobGen(t, 63, 1)
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
current := primitives.Slot(ds + 96)
blks, blobs := testBlobGen(t, ds+31, 1)
cases := []struct {
name string
err error

View File

@@ -2,6 +2,7 @@ package sync
import (
"encoding/binary"
"io"
"math"
"math/big"
"testing"
@@ -18,9 +19,11 @@ import (
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
types "github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
leakybucket "github.com/OffchainLabs/prysm/v6/container/leaky-bucket"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/genesis"
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/testing/require"
@@ -165,23 +168,12 @@ func (r *expectedBlobChunk) requireExpected(t *testing.T, s *Service, stream net
require.Equal(t, rob.Index, r.sidecar.Index)
}
func (c *blobsTestCase) setup(t *testing.T) (*Service, []blocks.ROBlob, func()) {
cfg := params.BeaconConfig()
copiedCfg := cfg.Copy()
repositionFutureEpochs(copiedCfg)
copiedCfg.InitializeForkSchedule()
params.OverrideBeaconConfig(copiedCfg)
cleanup := func() {
params.OverrideBeaconConfig(cfg)
}
maxBlobs := int(params.BeaconConfig().MaxBlobsPerBlock(0))
chain, clock := defaultMockChain(t)
func (c *blobsTestCase) setup(t *testing.T) (*Service, []blocks.ROBlob) {
maxBlobs := int(params.BeaconConfig().MaxBlobsPerBlockAtEpoch(params.BeaconConfig().DenebForkEpoch))
chain := defaultMockChain(t, c.clock.CurrentEpoch())
if c.chain == nil {
c.chain = chain
}
if c.clock == nil {
c.clock = clock
}
d := db.SetupDB(t)
sidecars := make([]blocks.ROBlob, 0)
@@ -208,16 +200,16 @@ func (c *blobsTestCase) setup(t *testing.T) (*Service, []blocks.ROBlob, func())
client := p2ptest.NewTestP2P(t)
s := &Service{
cfg: &config{p2p: client, chain: c.chain, clock: clock, beaconDB: d, blobStorage: filesystem.NewEphemeralBlobStorage(t)},
cfg: &config{p2p: client, chain: c.chain, clock: c.clock, beaconDB: d, blobStorage: filesystem.NewEphemeralBlobStorage(t)},
rateLimiter: newRateLimiter(client),
}
byRootRate := params.BeaconConfig().MaxRequestBlobSidecars * uint64(params.BeaconConfig().MaxBlobsPerBlock(0))
byRangeRate := params.BeaconConfig().MaxRequestBlobSidecars * uint64(params.BeaconConfig().MaxBlobsPerBlock(0))
byRootRate := params.BeaconConfig().MaxRequestBlobSidecars * uint64(maxBlobs)
byRangeRate := params.BeaconConfig().MaxRequestBlobSidecars * uint64(maxBlobs)
s.setRateCollector(p2p.RPCBlobSidecarsByRootTopicV1, leakybucket.NewCollector(0.000001, int64(byRootRate), time.Second, false))
s.setRateCollector(p2p.RPCBlobSidecarsByRangeTopicV1, leakybucket.NewCollector(0.000001, int64(byRangeRate), time.Second, false))
return s, sidecars, cleanup
return s, sidecars
}
func defaultExpectedRequirer(t *testing.T, s *Service, expect []*expectedBlobChunk) func(network.Stream) {
@@ -225,12 +217,16 @@ func defaultExpectedRequirer(t *testing.T, s *Service, expect []*expectedBlobChu
for _, ex := range expect {
ex.requireExpected(t, s, stream)
}
encoding := s.cfg.p2p.Encoding()
_, _, err := ReadStatusCode(stream, encoding)
require.ErrorIs(t, err, io.EOF)
}
}
func (c *blobsTestCase) run(t *testing.T) {
s, sidecars, cleanup := c.setup(t)
defer cleanup()
blobRpcThrottleInterval = time.Microsecond * 1
s, sidecars := c.setup(t)
req := c.requestFromSidecars(sidecars)
expect := c.defineExpected(t, sidecars, req)
m := map[types.Slot][]blocks.ROBlob{}
@@ -266,41 +262,32 @@ func (c *blobsTestCase) run(t *testing.T) {
// so it is helpful in tests to temporarily reposition the epochs to give room for some math.
func repositionFutureEpochs(cfg *params.BeaconChainConfig) {
if cfg.FuluForkEpoch == math.MaxUint64 {
cfg.FuluForkEpoch = cfg.ElectraForkEpoch + 100
cfg.FuluForkEpoch = cfg.ElectraForkEpoch + 4096*2
}
}
func defaultMockChain(t *testing.T) (*mock.ChainService, *startup.Clock) {
de := params.BeaconConfig().DenebForkEpoch
df, err := params.Fork(de)
func defaultMockChain(t *testing.T, current primitives.Epoch) *mock.ChainService {
fe := current - 2
df, err := params.Fork(current)
require.NoError(t, err)
denebBuffer := params.BeaconConfig().MinEpochsForBlobsSidecarsRequest + 1000
ce := de + denebBuffer
fe := ce - 2
cs, err := slots.EpochStart(ce)
require.NoError(t, err)
genesis := time.Now()
mockNow := startup.MockNower{}
clock := startup.NewClock(genesis, params.BeaconConfig().GenesisValidatorsRoot, startup.WithNower(mockNow.Now))
mockNow.SetSlot(t, clock, cs)
chain := &mock.ChainService{
FinalizedCheckPoint: &ethpb.Checkpoint{Epoch: fe},
Fork: df,
}
return chain, clock
return chain
}
func TestTestcaseSetup_BlocksAndBlobs(t *testing.T) {
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
ctx := t.Context()
nblocks := 10
c := &blobsTestCase{nblocks: nblocks}
c := &blobsTestCase{nblocks: nblocks, clock: startup.NewClock(genesis.Time(), genesis.ValidatorsRoot(), startup.WithSlotAsNow(ds))}
c.oldestSlot = c.defaultOldestSlotByRoot
s, sidecars, cleanup := c.setup(t)
s, sidecars := c.setup(t)
req := blobRootRequestFromSidecars(sidecars)
expect := c.filterExpectedByRoot(t, sidecars, req)
defer cleanup()
maxed := nblocks * params.BeaconConfig().MaxBlobsPerBlock(0)
maxed := nblocks * params.BeaconConfig().MaxBlobsPerBlockAtEpoch(params.BeaconConfig().DenebForkEpoch)
require.Equal(t, maxed, len(sidecars))
require.Equal(t, maxed, len(expect))
for _, sc := range sidecars {

View File

@@ -1,6 +1,7 @@
package sync
import (
"context"
"strings"
"time"
@@ -29,13 +30,13 @@ func (s *Service) updateCustodyInfoIfNeeded() error {
const minimumPeerCount = 1
// Get our actual custody group count.
actualCustodyGrounpCount, err := s.cfg.p2p.CustodyGroupCount()
actualCustodyGrounpCount, err := s.cfg.p2p.CustodyGroupCount(s.ctx)
if err != nil {
return errors.Wrap(err, "p2p custody group count")
}
// Get our target custody group count.
targetCustodyGroupCount, err := s.custodyGroupCount()
targetCustodyGroupCount, err := s.custodyGroupCount(s.ctx)
if err != nil {
return errors.Wrap(err, "custody group count")
}
@@ -88,7 +89,7 @@ func (s *Service) updateCustodyInfoIfNeeded() error {
// custodyGroupCount computes the custody group count based on the custody requirement,
// the validators custody requirement, and whether the node is subscribed to all data subnets.
func (s *Service) custodyGroupCount() (uint64, error) {
func (s *Service) custodyGroupCount(context.Context) (uint64, error) {
beaconConfig := params.BeaconConfig()
if flags.Get().SubscribeAllDataSubnets {

View File

@@ -16,7 +16,6 @@ import (
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
eth "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/testing/require"
)
@@ -55,9 +54,9 @@ func setupCustodyTest(t *testing.T, withChain bool) *testSetup {
if withChain {
const headSlot = primitives.Slot(100)
block, err := blocks.NewSignedBeaconBlock(&eth.SignedBeaconBlock{
Block: &eth.BeaconBlock{
Body: &eth.BeaconBlockBody{},
block, err := blocks.NewSignedBeaconBlock(&ethpb.SignedBeaconBlock{
Block: &ethpb.BeaconBlock{
Body: &ethpb.BeaconBlockBody{},
Slot: headSlot,
},
})
@@ -90,11 +89,13 @@ func setupCustodyTest(t *testing.T, withChain bool) *testSetup {
}
func (ts *testSetup) assertCustodyInfo(t *testing.T, expectedSlot primitives.Slot, expectedCount uint64) {
p2pEarliestSlot, err := ts.p2pService.EarliestAvailableSlot()
ctx := t.Context()
p2pEarliestSlot, err := ts.p2pService.EarliestAvailableSlot(ctx)
require.NoError(t, err)
require.Equal(t, expectedSlot, p2pEarliestSlot)
p2pCustodyCount, err := ts.p2pService.CustodyGroupCount()
p2pCustodyCount, err := ts.p2pService.CustodyGroupCount(ctx)
require.NoError(t, err)
require.Equal(t, expectedCount, p2pCustodyCount)
@@ -170,13 +171,15 @@ func TestCustodyGroupCount(t *testing.T) {
config.CustodyRequirement = 3
params.OverrideBeaconConfig(config)
ctx := t.Context()
t.Run("SubscribeAllDataSubnets enabled returns NumberOfCustodyGroups", func(t *testing.T) {
withSubscribeAllDataSubnets(t, func() {
service := &Service{
ctx: context.Background(),
}
result, err := service.custodyGroupCount()
result, err := service.custodyGroupCount(ctx)
require.NoError(t, err)
require.Equal(t, config.NumberOfCustodyGroups, result)
})
@@ -188,7 +191,7 @@ func TestCustodyGroupCount(t *testing.T) {
trackedValidatorsCache: cache.NewTrackedValidatorsCache(),
}
result, err := service.custodyGroupCount()
result, err := service.custodyGroupCount(ctx)
require.NoError(t, err)
require.Equal(t, config.CustodyRequirement, result)
})

View File

@@ -20,7 +20,6 @@ import (
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
leakybucket "github.com/OffchainLabs/prysm/v6/container/leaky-bucket"
"github.com/OffchainLabs/prysm/v6/crypto/rand"
eth "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
goPeer "github.com/libp2p/go-libp2p/core/peer"
"github.com/pkg/errors"
@@ -921,7 +920,7 @@ func buildByRangeRequests(
func buildByRootRequest(indicesByRoot map[[fieldparams.RootLength]byte]map[uint64]bool) p2ptypes.DataColumnsByRootIdentifiers {
identifiers := make(p2ptypes.DataColumnsByRootIdentifiers, 0, len(indicesByRoot))
for root, indices := range indicesByRoot {
identifier := &eth.DataColumnsByRootIdentifier{
identifier := &ethpb.DataColumnsByRootIdentifier{
BlockRoot: root[:],
Columns: helpers.SortedSliceFromMap(indices),
}
@@ -929,7 +928,7 @@ func buildByRootRequest(indicesByRoot map[[fieldparams.RootLength]byte]map[uint6
}
// Sort identifiers to have a deterministic output.
slices.SortFunc(identifiers, func(left, right *eth.DataColumnsByRootIdentifier) int {
slices.SortFunc(identifiers, func(left, right *ethpb.DataColumnsByRootIdentifier) int {
if cmp := bytes.Compare(left.BlockRoot, right.BlockRoot); cmp != 0 {
return cmp
}
@@ -1023,17 +1022,20 @@ func computeIndicesByRootByPeer(
peersByIndex := make(map[uint64]map[goPeer.ID]bool)
headSlotByPeer := make(map[goPeer.ID]primitives.Slot)
for peer := range peers {
log := log.WithField("peerID", peer)
// Computes the custody columns for each peer
nodeID, err := prysmP2P.ConvertPeerIDToNodeID(peer)
if err != nil {
return nil, errors.Wrapf(err, "convert peer ID to node ID for peer %s", peer)
log.WithError(err).Debug("Failed to convert peer ID to node ID")
continue
}
custodyGroupCount := p2p.CustodyGroupCountFromPeer(peer)
dasInfo, _, err := peerdas.Info(nodeID, custodyGroupCount)
if err != nil {
return nil, errors.Wrapf(err, "peerdas info for peer %s", peer)
log.WithError(err).Debug("Failed to get peer DAS info")
continue
}
for column := range dasInfo.CustodyColumns {
@@ -1046,11 +1048,13 @@ func computeIndicesByRootByPeer(
// Compute the head slot for each peer
peerChainState, err := p2p.Peers().ChainState(peer)
if err != nil {
return nil, errors.Wrapf(err, "get chain state for peer %s", peer)
log.WithError(err).Debug("Failed to get peer chain state")
continue
}
if peerChainState == nil {
return nil, errors.Errorf("chain state is nil for peer %s", peer)
log.Debug("Peer chain state is nil")
continue
}
// Our view of the head slot of a peer is not updated in real time.

View File

@@ -23,7 +23,6 @@ import (
"github.com/OffchainLabs/prysm/v6/consensus-types/wrapper"
leakybucket "github.com/OffchainLabs/prysm/v6/container/leaky-bucket"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
pb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/testing/assert"
"github.com/OffchainLabs/prysm/v6/testing/require"
"github.com/OffchainLabs/prysm/v6/testing/util"
@@ -144,7 +143,7 @@ func TestFetchDataColumnSidecars(t *testing.T) {
HeadSlot: 8,
})
p2p.Peers().SetMetadata(other.PeerID(), wrapper.WrappedMetadataV2(&pb.MetaDataV2{
p2p.Peers().SetMetadata(other.PeerID(), wrapper.WrappedMetadataV2(&ethpb.MetaDataV2{
CustodyGroupCount: 128,
}))

View File

@@ -117,6 +117,7 @@ func TestService_decodePubsubMessage(t *testing.T) {
func TestExtractDataType(t *testing.T) {
params.SetupTestConfigCleanup(t)
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096*2
params.BeaconConfig().InitializeForkSchedule()
type args struct {
@@ -304,6 +305,9 @@ func TestExtractDataType(t *testing.T) {
}
func TestExtractDataTypeFromTypeMapInvalid(t *testing.T) {
params.SetupTestConfigCleanup(t)
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096*2
params.BeaconConfig().InitializeForkSchedule()
chain := &mock.ChainService{ValidatorsRoot: [32]byte{}}
_, err := extractDataTypeFromTypeMap(types.BlockMap, []byte{0x00, 0x01}, chain)
require.ErrorIs(t, err, errInvalidDigest)

View File

@@ -2,7 +2,6 @@ package sync
import (
"bytes"
"errors"
"io"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
@@ -12,6 +11,7 @@ import (
libp2pcore "github.com/libp2p/go-libp2p/core"
"github.com/libp2p/go-libp2p/core/network"
multiplex "github.com/libp2p/go-mplex"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -38,7 +38,7 @@ func ReadStatusCode(stream network.Stream, encoding encoder.NetworkEncoding) (ui
b := make([]byte, 1)
_, err := stream.Read(b)
if err != nil {
return 0, "", err
return 0, "", errors.Wrap(err, "stream read")
}
if b[0] == responseCodeSuccess {
@@ -52,7 +52,7 @@ func ReadStatusCode(stream network.Stream, encoding encoder.NetworkEncoding) (ui
SetStreamReadDeadline(stream, params.BeaconConfig().RespTimeoutDuration())
msg := &types.ErrorMessage{}
if err := encoding.DecodeWithMaxLength(stream, msg); err != nil {
return 0, "", err
return 0, "", errors.Wrap(err, "decode error message")
}
return b[0], string(*msg), nil

View File

@@ -9,23 +9,28 @@ import (
"github.com/pkg/errors"
)
// Is a background routine that observes for new incoming forks. Depending on the epoch
// it will be in charge of subscribing/unsubscribing the relevant topics at the fork boundaries.
func (s *Service) forkWatcher() {
<-s.initialSyncComplete
// p2pHandlerControlLoop runs in a continuous loop to ensure that:
// - We are subscribed to the correct gossipsub topics (for the current and upcoming epoch).
// - We have registered the correct RPC stream handlers (for the current and upcoming epoch).
// - We have cleaned up gossipsub topics and RPC stream handlers that are no longer needed.
func (s *Service) p2pHandlerControlLoop() {
// At startup, launch registration and peer discovery loops, and register rpc stream handlers.
startEntry := params.GetNetworkScheduleEntry(s.cfg.clock.CurrentEpoch())
s.registerSubscribers(startEntry)
slotTicker := slots.NewSlotTicker(s.cfg.clock.GenesisTime(), params.BeaconConfig().SecondsPerSlot)
for {
select {
// In the event of a node restart, we will still end up subscribing to the correct
// topics during/after the fork epoch. This routine is to ensure correct
// subscriptions for nodes running before a fork epoch.
case currSlot := <-slotTicker.C():
currEpoch := slots.ToEpoch(currSlot)
if err := s.registerForUpcomingFork(currEpoch); err != nil {
case <-slotTicker.C():
current := s.cfg.clock.CurrentEpoch()
if err := s.ensureRegistrationsForEpoch(current); err != nil {
log.WithError(err).Error("Unable to check for fork in the next epoch")
continue
}
if err := s.deregisterFromPastFork(currEpoch); err != nil {
if err := s.ensureDeregistrationForEpoch(current); err != nil {
log.WithError(err).Error("Unable to check for fork in the previous epoch")
continue
}
@@ -37,102 +42,90 @@ func (s *Service) forkWatcher() {
}
}
// registerForUpcomingFork registers appropriate gossip and RPC topic if there is a fork in the next epoch.
func (s *Service) registerForUpcomingFork(currentEpoch primitives.Epoch) error {
nextEntry := params.GetNetworkScheduleEntry(currentEpoch + 1)
// Check if there is a fork in the next epoch.
if nextEntry.ForkDigest == s.registeredNetworkEntry.ForkDigest {
return nil
}
// ensureRegistrationsForEpoch ensures that gossip topic and RPC stream handler
// registrations are in place for the current and subsequent epoch.
func (s *Service) ensureRegistrationsForEpoch(epoch primitives.Epoch) error {
current := params.GetNetworkScheduleEntry(epoch)
s.registerSubscribers(current)
if s.subHandler.digestExists(nextEntry.ForkDigest) {
return nil
}
// Register the subscribers (gossipsub) for the next epoch.
s.registerSubscribers(nextEntry.Epoch, nextEntry.ForkDigest)
// Get the handlers for the current and next fork.
currentHandler, err := s.rpcHandlerByTopicFromEpoch(currentEpoch)
currentHandler, err := s.rpcHandlerByTopicFromFork(current.VersionEnum)
if err != nil {
return errors.Wrap(err, "RPC handler by topic from before fork epoch")
}
if !s.digestActionDone(current.ForkDigest, registerRpcOnce) {
for topic, handler := range currentHandler {
s.registerRPC(topic, handler)
}
}
nextHandler, err := s.rpcHandlerByTopicFromEpoch(nextEntry.Epoch)
next := params.GetNetworkScheduleEntry(epoch + 1)
if current.Epoch == next.Epoch {
return nil // no fork in the next epoch
}
s.registerSubscribers(next)
if s.digestActionDone(next.ForkDigest, registerRpcOnce) {
return nil
}
nextHandler, err := s.rpcHandlerByTopicFromFork(next.VersionEnum)
if err != nil {
return errors.Wrap(err, "RPC handler by topic from fork epoch")
}
// Compute newly added topics.
newHandlersByTopic := addedRPCHandlerByTopic(currentHandler, nextHandler)
// Register the new RPC handlers.
// We deregister the old topics later, at least one epoch after the fork.
for topic, handler := range newHandlersByTopic {
s.registerRPC(topic, handler)
}
s.registeredNetworkEntry = nextEntry
return nil
}
// deregisterFromPastFork deregisters appropriate gossip and RPC topic if there is a fork in the current epoch.
func (s *Service) deregisterFromPastFork(currentEpoch primitives.Epoch) error {
// Get the fork.
currentFork, err := params.Fork(currentEpoch)
if err != nil {
return errors.Wrap(err, "genesis validators root")
}
// ensureDeregistrationForEpoch deregisters appropriate gossip and RPC topic if there is a fork in the current epoch.
func (s *Service) ensureDeregistrationForEpoch(currentEpoch primitives.Epoch) error {
current := params.GetNetworkScheduleEntry(currentEpoch)
// If we are still in our genesis fork version then exit early.
if currentFork.Epoch == params.BeaconConfig().GenesisEpoch {
if current.Epoch == params.BeaconConfig().GenesisEpoch {
return nil
}
if currentEpoch < current.Epoch+1 {
return nil // wait until we are 1 epoch into the fork
}
// Get the epoch after the fork epoch.
afterForkEpoch := currentFork.Epoch + 1
previous := params.GetNetworkScheduleEntry(current.Epoch - 1)
// Remove stream handlers for all topics that are in the set of
// currentTopics-previousTopics
if !s.digestActionDone(previous.ForkDigest, unregisterRpcOnce) {
previousTopics, err := s.rpcHandlerByTopicFromFork(previous.VersionEnum)
if err != nil {
return errors.Wrap(err, "RPC handler by topic from before fork epoch")
}
currentTopics, err := s.rpcHandlerByTopicFromFork(current.VersionEnum)
if err != nil {
return errors.Wrap(err, "RPC handler by topic from fork epoch")
}
topicsToRemove := removedRPCTopics(previousTopics, currentTopics)
for topic := range topicsToRemove {
fullTopic := topic + s.cfg.p2p.Encoding().ProtocolSuffix()
s.cfg.p2p.Host().RemoveStreamHandler(protocol.ID(fullTopic))
log.WithField("topic", fullTopic).Debug("Removed RPC handler")
}
}
// Start de-registering if the current epoch is after the fork epoch.
if currentEpoch != afterForkEpoch {
// Unsubscribe from all gossip topics with the previous fork digest.
if s.digestActionDone(previous.ForkDigest, unregisterGossipOnce) {
return nil
}
// Look at the previous fork's digest.
beforeForkEpoch := currentFork.Epoch - 1
beforeForkDigest := params.ForkDigest(beforeForkEpoch)
// Exit early if there are no topics with that particular digest.
if !s.subHandler.digestExists(beforeForkDigest) {
return nil
}
// Compute the RPC handlers that are no longer needed.
beforeForkHandlerByTopic, err := s.rpcHandlerByTopicFromEpoch(beforeForkEpoch)
if err != nil {
return errors.Wrap(err, "RPC handler by topic from before fork epoch")
}
forkHandlerByTopic, err := s.rpcHandlerByTopicFromEpoch(currentFork.Epoch)
if err != nil {
return errors.Wrap(err, "RPC handler by topic from fork epoch")
}
topicsToRemove := removedRPCTopics(beforeForkHandlerByTopic, forkHandlerByTopic)
for topic := range topicsToRemove {
fullTopic := topic + s.cfg.p2p.Encoding().ProtocolSuffix()
s.cfg.p2p.Host().RemoveStreamHandler(protocol.ID(fullTopic))
log.WithField("topic", fullTopic).Debug("Removed RPC handler")
}
// Run through all our current active topics and see
// if there are any subscriptions to be removed.
for _, t := range s.subHandler.allTopics() {
retDigest, err := p2p.ExtractGossipDigest(t)
if err != nil {
log.WithError(err).Error("Could not retrieve digest")
continue
}
if retDigest == beforeForkDigest {
if retDigest == previous.ForkDigest {
s.unSubscribeFromTopic(t)
}
}

View File

@@ -50,12 +50,36 @@ func testForkWatcherService(t *testing.T, current primitives.Epoch) *Service {
return r
}
func TestRegisterSubscriptions_Idempotent(t *testing.T) {
params.SetupTestConfigCleanup(t)
genesis.StoreEmbeddedDuringTest(t, params.BeaconConfig().ConfigName)
fulu := params.BeaconConfig().ElectraForkEpoch + 4096*2
params.BeaconConfig().FuluForkEpoch = fulu
params.BeaconConfig().InitializeForkSchedule()
current := fulu - 1
s := testForkWatcherService(t, current)
next := params.GetNetworkScheduleEntry(fulu)
wg := attachSpawner(s)
require.Equal(t, true, s.registerSubscribers(next))
done := make(chan struct{})
go func() { wg.Wait(); close(done) }()
select {
case <-time.After(5 * time.Second):
t.Fatal("timed out waiting for subscriptions to be registered")
case <-done:
}
// the goal of this callback is just to assert that spawn is never called.
s.subscriptionSpawner = func(func()) { t.Error("registration routines spawned twice for the same digest") }
require.NoError(t, s.ensureRegistrationsForEpoch(fulu))
}
func TestService_CheckForNextEpochFork(t *testing.T) {
closedChan := make(chan struct{})
close(closedChan)
params.SetupTestConfigCleanup(t)
genesis.StoreEmbeddedDuringTest(t, params.BeaconConfig().ConfigName)
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 1096*2
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096*2
params.BeaconConfig().InitializeForkSchedule()
tests := []struct {
@@ -171,7 +195,7 @@ func TestService_CheckForNextEpochFork(t *testing.T) {
current := tt.epochAtRegistration(tt.forkEpoch)
s := testForkWatcherService(t, current)
wg := attachSpawner(s)
require.NoError(t, s.registerForUpcomingFork(s.cfg.clock.CurrentEpoch()))
require.NoError(t, s.ensureRegistrationsForEpoch(s.cfg.clock.CurrentEpoch()))
wg.Wait()
tt.checkRegistration(t, s)
@@ -193,10 +217,13 @@ func TestService_CheckForNextEpochFork(t *testing.T) {
// Move the clock to just before the next fork epoch and ensure deregistration is correct
wg = attachSpawner(s)
s.cfg.clock = defaultClockWithTimeAtEpoch(tt.nextForkEpoch - 1)
require.NoError(t, s.registerForUpcomingFork(s.cfg.clock.CurrentEpoch()))
require.NoError(t, s.ensureRegistrationsForEpoch(s.cfg.clock.CurrentEpoch()))
wg.Wait()
require.NoError(t, s.ensureDeregistrationForEpoch(tt.nextForkEpoch))
assert.Equal(t, true, s.subHandler.digestExists(digest))
// deregister as if it is the epoch after the next fork epoch
require.NoError(t, s.deregisterFromPastFork(tt.nextForkEpoch+1))
require.NoError(t, s.ensureDeregistrationForEpoch(tt.nextForkEpoch+1))
assert.Equal(t, false, s.subHandler.digestExists(digest))
assert.Equal(t, true, s.subHandler.digestExists(nextDigest))
})

View File

@@ -384,7 +384,7 @@ func (f *blocksFetcher) fetchSidecars(ctx context.Context, pid peer.ID, peers []
}
// Compute the columns to request.
custodyGroupCount, err := f.p2p.CustodyGroupCount()
custodyGroupCount, err := f.p2p.CustodyGroupCount(ctx)
if err != nil {
return blobsPid, errors.Wrap(err, "custody group count")
}

View File

@@ -9,7 +9,6 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
"github.com/OffchainLabs/prysm/v6/config/params"
mathutil "github.com/OffchainLabs/prysm/v6/math"
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
prysmTime "github.com/OffchainLabs/prysm/v6/time"
"github.com/OffchainLabs/prysm/v6/time/slots"
@@ -131,8 +130,8 @@ func trimPeers(peers []peer.ID, peersPercentage float64) []peer.ID {
// Weak/slow peers will be pushed down the list and trimmed since only percentage of peers is selected.
limit := uint64(math.Round(float64(len(peers)) * peersPercentage))
// Limit cannot be less that minimum peers required by sync mechanism.
limit = mathutil.Max(limit, uint64(required))
limit = max(limit, uint64(required))
// Limit cannot be higher than number of peers available (safe-guard).
limit = mathutil.Min(limit, uint64(len(peers)))
limit = min(limit, uint64(len(peers)))
return peers[:limit]
}

View File

@@ -1017,13 +1017,13 @@ func TestBlobRangeForBlocks(t *testing.T) {
for i := range blks {
sbbs[i] = blks[i]
}
retentionStart := primitives.Slot(5)
retentionStart := blks[len(blks)/2].Block().Slot()
bwb, err := sortedBlockWithVerifiedBlobSlice(sbbs)
require.NoError(t, err)
bounds := countCommitments(bwb, retentionStart).blobRange(nil)
require.Equal(t, retentionStart, bounds.low)
higher := primitives.Slot(len(blks) + 1)
bounds = countCommitments(bwb, higher).blobRange(nil)
maxBlkSlot := blks[len(blks)-1].Block().Slot()
bounds = countCommitments(bwb, maxBlkSlot+1).blobRange(nil)
var nilBounds *blobRange
require.Equal(t, nilBounds, bounds)
@@ -1054,17 +1054,17 @@ func TestBlobRequest(t *testing.T) {
}
bwb, err := sortedBlockWithVerifiedBlobSlice(sbbs)
require.NoError(t, err)
maxBlkSlot := primitives.Slot(len(blks) - 1)
tooHigh := primitives.Slot(len(blks) + 1)
maxBlkSlot := blks[len(blks)-1].Block().Slot()
tooHigh := maxBlkSlot + 1
req = countCommitments(bwb, tooHigh).blobRange(nil).Request()
require.Equal(t, nilReq, req)
req = countCommitments(bwb, maxBlkSlot).blobRange(nil).Request()
require.Equal(t, uint64(1), req.Count)
require.Equal(t, maxBlkSlot, req.StartSlot)
require.Equal(t, uint64(1), req.Count)
halfway := primitives.Slot(5)
halfway := blks[len(blks)/2].Block().Slot()
req = countCommitments(bwb, halfway).blobRange(nil).Request()
require.Equal(t, halfway, req.StartSlot)
// adding 1 to include the halfway slot itself
@@ -1103,6 +1103,12 @@ func TestCountCommitments(t *testing.T) {
}
func TestCommitmentCountList(t *testing.T) {
de := params.BeaconConfig().DenebForkEpoch
ds := util.SlotAtEpoch(t, de)
denebRel := func(s primitives.Slot) primitives.Slot {
return ds + s
}
maxBlobs := params.BeaconConfig().MaxBlobsPerBlock(ds)
cases := []struct {
name string
cc commitmentCountList
@@ -1119,20 +1125,20 @@ func TestCommitmentCountList(t *testing.T) {
{
name: "nil bss, single slot",
cc: []commitmentCount{
{slot: 11235, count: 1},
{slot: denebRel(11235), count: 1},
},
expected: &blobRange{low: 11235, high: 11235},
request: &ethpb.BlobSidecarsByRangeRequest{StartSlot: 11235, Count: 1},
expected: &blobRange{low: denebRel(11235), high: denebRel(11235)},
request: &ethpb.BlobSidecarsByRangeRequest{StartSlot: denebRel(11235), Count: 1},
},
{
name: "nil bss, sparse slots",
cc: []commitmentCount{
{slot: 11235, count: 1},
{slot: 11240, count: params.BeaconConfig().MaxBlobsPerBlock(0)},
{slot: 11250, count: 3},
{slot: denebRel(11235), count: 1},
{slot: denebRel(11240), count: maxBlobs},
{slot: denebRel(11250), count: 3},
},
expected: &blobRange{low: 11235, high: 11250},
request: &ethpb.BlobSidecarsByRangeRequest{StartSlot: 11235, Count: 16},
expected: &blobRange{low: denebRel(11235), high: denebRel(11250)},
request: &ethpb.BlobSidecarsByRangeRequest{StartSlot: denebRel(11235), Count: 16},
},
{
name: "AllAvailable in middle, some avail low, none high",
@@ -1141,15 +1147,15 @@ func TestCommitmentCountList(t *testing.T) {
bytesutil.ToBytes32([]byte("0")): {0, 1},
bytesutil.ToBytes32([]byte("1")): {0, 1, 2, 3, 4, 5},
}
return filesystem.NewMockBlobStorageSummarizer(t, onDisk)
return filesystem.NewMockBlobStorageSummarizer(t, de, onDisk)
},
cc: []commitmentCount{
{slot: 0, count: 3, root: bytesutil.ToBytes32([]byte("0"))},
{slot: 5, count: params.BeaconConfig().MaxBlobsPerBlock(0), root: bytesutil.ToBytes32([]byte("1"))},
{slot: 15, count: 3},
{slot: denebRel(0), count: 3, root: bytesutil.ToBytes32([]byte("0"))},
{slot: denebRel(5), count: maxBlobs, root: bytesutil.ToBytes32([]byte("1"))},
{slot: denebRel(15), count: 3},
},
expected: &blobRange{low: 0, high: 15},
request: &ethpb.BlobSidecarsByRangeRequest{StartSlot: 0, Count: 16},
expected: &blobRange{low: denebRel(0), high: denebRel(15)},
request: &ethpb.BlobSidecarsByRangeRequest{StartSlot: denebRel(0), Count: 16},
},
{
name: "AllAvailable at high and low",
@@ -1158,15 +1164,15 @@ func TestCommitmentCountList(t *testing.T) {
bytesutil.ToBytes32([]byte("0")): {0, 1},
bytesutil.ToBytes32([]byte("2")): {0, 1, 2, 3, 4, 5},
}
return filesystem.NewMockBlobStorageSummarizer(t, onDisk)
return filesystem.NewMockBlobStorageSummarizer(t, de, onDisk)
},
cc: []commitmentCount{
{slot: 0, count: 2, root: bytesutil.ToBytes32([]byte("0"))},
{slot: 5, count: 3},
{slot: 15, count: params.BeaconConfig().MaxBlobsPerBlock(0), root: bytesutil.ToBytes32([]byte("2"))},
{slot: denebRel(0), count: 2, root: bytesutil.ToBytes32([]byte("0"))},
{slot: denebRel(5), count: 3},
{slot: denebRel(15), count: maxBlobs, root: bytesutil.ToBytes32([]byte("2"))},
},
expected: &blobRange{low: 5, high: 5},
request: &ethpb.BlobSidecarsByRangeRequest{StartSlot: 5, Count: 1},
expected: &blobRange{low: denebRel(5), high: denebRel(5)},
request: &ethpb.BlobSidecarsByRangeRequest{StartSlot: denebRel(5), Count: 1},
},
{
name: "AllAvailable at high and low, adjacent range in middle",
@@ -1175,16 +1181,16 @@ func TestCommitmentCountList(t *testing.T) {
bytesutil.ToBytes32([]byte("0")): {0, 1},
bytesutil.ToBytes32([]byte("2")): {0, 1, 2, 3, 4, 5},
}
return filesystem.NewMockBlobStorageSummarizer(t, onDisk)
return filesystem.NewMockBlobStorageSummarizer(t, de, onDisk)
},
cc: []commitmentCount{
{slot: 0, count: 2, root: bytesutil.ToBytes32([]byte("0"))},
{slot: 5, count: 3},
{slot: 6, count: 3},
{slot: 15, count: params.BeaconConfig().MaxBlobsPerBlock(0), root: bytesutil.ToBytes32([]byte("2"))},
{slot: denebRel(0), count: 2, root: bytesutil.ToBytes32([]byte("0"))},
{slot: denebRel(5), count: 3},
{slot: denebRel(6), count: 3},
{slot: denebRel(15), count: maxBlobs, root: bytesutil.ToBytes32([]byte("2"))},
},
expected: &blobRange{low: 5, high: 6},
request: &ethpb.BlobSidecarsByRangeRequest{StartSlot: 5, Count: 2},
expected: &blobRange{low: denebRel(5), high: denebRel(6)},
request: &ethpb.BlobSidecarsByRangeRequest{StartSlot: denebRel(5), Count: 2},
},
{
name: "AllAvailable at high and low, range in middle",
@@ -1194,16 +1200,16 @@ func TestCommitmentCountList(t *testing.T) {
bytesutil.ToBytes32([]byte("1")): {0, 1},
bytesutil.ToBytes32([]byte("2")): {0, 1, 2, 3, 4, 5},
}
return filesystem.NewMockBlobStorageSummarizer(t, onDisk)
return filesystem.NewMockBlobStorageSummarizer(t, de, onDisk)
},
cc: []commitmentCount{
{slot: 0, count: 2, root: bytesutil.ToBytes32([]byte("0"))},
{slot: 5, count: 3, root: bytesutil.ToBytes32([]byte("1"))},
{slot: 10, count: 3},
{slot: 15, count: params.BeaconConfig().MaxBlobsPerBlock(0), root: bytesutil.ToBytes32([]byte("2"))},
{slot: denebRel(0), count: 2, root: bytesutil.ToBytes32([]byte("0"))},
{slot: denebRel(5), count: 3, root: bytesutil.ToBytes32([]byte("1"))},
{slot: denebRel(10), count: 3},
{slot: denebRel(15), count: maxBlobs, root: bytesutil.ToBytes32([]byte("2"))},
},
expected: &blobRange{low: 5, high: 10},
request: &ethpb.BlobSidecarsByRangeRequest{StartSlot: 5, Count: 6},
expected: &blobRange{low: denebRel(5), high: denebRel(10)},
request: &ethpb.BlobSidecarsByRangeRequest{StartSlot: denebRel(5), Count: 6},
},
}
for _, c := range cases {
@@ -1218,8 +1224,8 @@ func TestCommitmentCountList(t *testing.T) {
require.IsNil(t, br.Request())
} else {
req := br.Request()
require.DeepEqual(t, req.StartSlot, c.request.StartSlot)
require.DeepEqual(t, req.Count, c.request.Count)
require.Equal(t, req.StartSlot, c.request.StartSlot)
require.Equal(t, req.Count, c.request.Count)
}
})
}
@@ -1299,7 +1305,7 @@ func TestVerifyAndPopulateBlobs(t *testing.T) {
r1: {0, 1},
r7: {0, 1, 2, 3, 4, 5},
}
bss := filesystem.NewMockBlobStorageSummarizer(t, onDisk)
bss := filesystem.NewMockBlobStorageSummarizer(t, params.BeaconConfig().DenebForkEpoch, onDisk)
err := verifyAndPopulateBlobs(bwb, blobs, testReqFromResp(bwb), bss)
require.NoError(t, err)
require.Equal(t, 6, len(bwb[i1].Blobs))

View File

@@ -413,7 +413,7 @@ func (s *Service) fetchOriginDataColumnSidecars(roBlock blocks.ROBlock, delay ti
}
// Compute the indices we need to custody.
custodyGroupCount, err := s.cfg.P2P.CustodyGroupCount()
custodyGroupCount, err := s.cfg.P2P.CustodyGroupCount(s.ctx)
if err != nil {
return errors.Wrap(err, "custody group count")
}

View File

@@ -439,6 +439,7 @@ func TestService_Synced(t *testing.T) {
}
func TestMissingBlobRequest(t *testing.T) {
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
cases := []struct {
name string
setup func(t *testing.T) (blocks.ROBlock, *filesystem.BlobStorage)
@@ -476,7 +477,7 @@ func TestMissingBlobRequest(t *testing.T) {
{
name: "2 commitments, 1 missing",
setup: func(t *testing.T) (blocks.ROBlock, *filesystem.BlobStorage) {
bk, _ := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 0, 2)
bk, _ := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, ds, 2)
bm, fs := filesystem.NewEphemeralBlobStorageWithMocker(t)
require.NoError(t, bm.CreateFakeIndices(bk.Root(), bk.Block().Slot(), 1))
return bk, fs
@@ -486,7 +487,7 @@ func TestMissingBlobRequest(t *testing.T) {
{
name: "2 commitments, 0 missing",
setup: func(t *testing.T) (blocks.ROBlock, *filesystem.BlobStorage) {
bk, _ := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 0, 2)
bk, _ := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, ds, 2)
bm, fs := filesystem.NewEphemeralBlobStorageWithMocker(t)
require.NoError(t, bm.CreateFakeIndices(bk.Root(), bk.Block().Slot(), 0, 1))
return bk, fs
@@ -629,7 +630,7 @@ func TestFetchOriginSidecars(t *testing.T) {
// Compute the columns to request.
p2p := p2ptest.NewTestP2P(t)
custodyGroupCount, err := p2p.CustodyGroupCount()
custodyGroupCount, err := p2p.CustodyGroupCount(t.Context())
require.NoError(t, err)
samplingSize := max(custodyGroupCount, samplesPerSlot)

40
beacon-chain/sync/once.go Normal file
View File

@@ -0,0 +1,40 @@
package sync
import "sync"
// oncePerDigest represents an action that should only be performed once per fork digest.
type oncePerDigest uint8
const (
doneZero oncePerDigest = 0
registerGossipOnce oncePerDigest = 1 << 0
unregisterGossipOnce oncePerDigest = 1 << 1
registerRpcOnce oncePerDigest = 1 << 2
unregisterRpcOnce oncePerDigest = 1 << 3
)
// perDigestSet keeps track of which oncePerDigest actions
// have been performed for each fork digest.
type perDigestSet struct {
sync.Mutex
history map[[4]byte]oncePerDigest
}
// digestActionDone marks the action as done for the given digest, returning true if it was already done.
func (s *Service) digestActionDone(digest [4]byte, action oncePerDigest) bool {
s.digestActions.Lock()
defer s.digestActions.Unlock()
// lazy initialize registrationHistory; the lock is not a reference type so it is ready to go
if s.digestActions.history == nil {
s.digestActions.history = make(map[[4]byte]oncePerDigest)
}
prev := s.digestActions.history[digest]
// Return true if the bit was already set
if prev&action != 0 {
return true
}
s.digestActions.history[digest] = prev | action
return false
}

View File

@@ -0,0 +1,40 @@
package sync
import (
"fmt"
"slices"
"testing"
)
func TestDigestActionDone(t *testing.T) {
digests := [][4]byte{
{0, 0, 0, 0},
{1, 2, 3, 4},
{4, 3, 2, 1},
}
actions := []oncePerDigest{
registerGossipOnce,
unregisterGossipOnce,
registerRpcOnce,
unregisterRpcOnce,
}
testCombos := func(d [][4]byte, a []oncePerDigest) {
s := &Service{}
for _, digest := range d {
for _, action := range a {
t.Run(fmt.Sprintf("digest=%#x/action=%d", digest, action), func(t *testing.T) {
if s.digestActionDone(digest, action) {
t.Fatal("expected first call to return false")
}
if !s.digestActionDone(digest, action) {
t.Fatal("expected second call to return true")
}
})
}
}
}
testCombos(digests, actions)
slices.Reverse(digests)
slices.Reverse(actions)
testCombos(digests, actions)
}

View File

@@ -4,13 +4,14 @@ import (
"bytes"
"context"
"encoding/hex"
"sync"
"fmt"
"github.com/OffchainLabs/prysm/v6/async"
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/blocks"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/operation"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
"github.com/OffchainLabs/prysm/v6/config/features"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
@@ -19,91 +20,283 @@ import (
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/runtime/version"
"github.com/OffchainLabs/prysm/v6/time"
"github.com/OffchainLabs/prysm/v6/time/slots"
pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/sirupsen/logrus"
)
// This defines how often a node cleans up and processes pending attestations in the queue.
var processPendingAttsPeriod = slots.DivideSlotBy(2 /* twice per slot */)
var pendingAttsLimit = 10000
var pendingAttsLimit = 32768
// This processes pending attestation queues on every processPendingAttsPeriod.
func (s *Service) runPendingAttsQueue() {
// Prevents multiple queue processing goroutines (invoked by RunEvery) from contending for data.
mutex := new(sync.Mutex)
async.RunEvery(s.ctx, processPendingAttsPeriod, func() {
mutex.Lock()
if err := s.processPendingAtts(s.ctx); err != nil {
log.WithError(err).Debug("Could not process pending attestation")
}
mutex.Unlock()
})
}
// This defines how pending attestations are processed. It contains features:
// 1. Clean up invalid pending attestations from the queue.
// 2. Check if pending attestations can be processed when the block has arrived.
// 3. Request block from a random peer if unable to proceed step 2.
func (s *Service) processPendingAtts(ctx context.Context) error {
ctx, span := trace.StartSpan(ctx, "processPendingAtts")
// This method processes pending attestations as a "known" block as arrived. With validations,
// the valid attestations get saved into the operation mem pool, and the invalid attestations gets deleted
// from the sync pending pool.
func (s *Service) processPendingAttsForBlock(ctx context.Context, bRoot [32]byte) error {
ctx, span := trace.StartSpan(ctx, "processPendingAttsForBlock")
defer span.End()
// Confirm that the pending attestation's missing block arrived and the node processed the block.
if !s.cfg.beaconDB.HasBlock(ctx, bRoot) || !(s.cfg.beaconDB.HasState(ctx, bRoot) || s.cfg.beaconDB.HasStateSummary(ctx, bRoot)) || !s.cfg.chain.InForkchoice(bRoot) {
return fmt.Errorf("could not process unknown block root %#x", bRoot)
}
// Before a node processes pending attestations queue, it verifies
// the attestations in the queue are still valid. Attestations will
// be deleted from the queue if invalid (i.e. getting stalled from falling too many slots behind).
s.validatePendingAtts(ctx, s.cfg.clock.CurrentSlot())
s.pendingAttsLock.RLock()
roots := make([][32]byte, 0, len(s.blkRootToPendingAtts))
for br := range s.blkRootToPendingAtts {
roots = append(roots, br)
}
attestations := s.blkRootToPendingAtts[bRoot]
s.pendingAttsLock.RUnlock()
var pendingRoots [][32]byte
randGen := rand.NewGenerator()
for _, bRoot := range roots {
s.pendingAttsLock.RLock()
attestations := s.blkRootToPendingAtts[bRoot]
s.pendingAttsLock.RUnlock()
// has the pending attestation's missing block arrived and the node processed block yet?
if s.cfg.beaconDB.HasBlock(ctx, bRoot) && (s.cfg.beaconDB.HasState(ctx, bRoot) || s.cfg.beaconDB.HasStateSummary(ctx, bRoot)) && s.cfg.chain.InForkchoice(bRoot) {
s.processAttestations(ctx, attestations)
log.WithFields(logrus.Fields{
"blockRoot": hex.EncodeToString(bytesutil.Trunc(bRoot[:])),
"pendingAttsCount": len(attestations),
}).Debug("Verified and saved pending attestations to pool")
if len(attestations) > 0 {
start := time.Now()
s.processAttestations(ctx, attestations)
duration := time.Since(start)
log.WithFields(logrus.Fields{
"blockRoot": hex.EncodeToString(bytesutil.Trunc(bRoot[:])),
"pendingAttsCount": len(attestations),
"duration": duration,
}).Debug("Verified and saved pending attestations to pool")
}
// Delete the missing block root key from pending attestation queue so a node will not request for the block again.
s.pendingAttsLock.Lock()
delete(s.blkRootToPendingAtts, bRoot)
s.pendingAttsLock.Unlock()
} else {
s.pendingQueueLock.RLock()
seen := s.seenPendingBlocks[bRoot]
s.pendingQueueLock.RUnlock()
if !seen {
pendingRoots = append(pendingRoots, bRoot)
}
randGen := rand.NewGenerator()
// Delete the missing block root key from pending attestation queue so a node will not request for the block again.
s.pendingAttsLock.Lock()
delete(s.blkRootToPendingAtts, bRoot)
pendingRoots := make([][32]byte, 0, len(s.blkRootToPendingAtts))
s.pendingQueueLock.RLock()
for r := range s.blkRootToPendingAtts {
if !s.seenPendingBlocks[r] {
pendingRoots = append(pendingRoots, r)
}
}
s.pendingQueueLock.RUnlock()
s.pendingAttsLock.Unlock()
// Request the blocks for the pending attestations that could not be processed.
return s.sendBatchRootRequest(ctx, pendingRoots, randGen)
}
func (s *Service) processAttestations(ctx context.Context, attestations []any) {
for _, signedAtt := range attestations {
// The pending attestations can arrive as both aggregates and attestations,
// and each form has to be processed differently.
switch t := signedAtt.(type) {
if len(attestations) == 0 {
return
}
atts := make([]ethpb.Att, 0, len(attestations))
for _, att := range attestations {
switch v := att.(type) {
case ethpb.Att:
s.processAtt(ctx, t)
atts = append(atts, v)
case ethpb.SignedAggregateAttAndProof:
s.processAggregate(ctx, t)
s.processAggregate(ctx, v)
default:
log.Warnf("Unexpected item of type %T in pending attestation queue. Item will not be processed", t)
log.Warnf("Unexpected attestation type %T, skipping", v)
}
}
for _, bucket := range bucketAttestationsByData(atts) {
s.processAttestationBucket(ctx, bucket)
}
}
// attestationBucket groups attestations with the same AttestationData for batch processing.
type attestationBucket struct {
dataHash [32]byte
data *ethpb.AttestationData
attestations []ethpb.Att
}
// processAttestationBucket processes a bucket of attestations with shared AttestationData.
func (s *Service) processAttestationBucket(ctx context.Context, bucket *attestationBucket) {
if bucket == nil || len(bucket.attestations) == 0 {
return
}
data := bucket.data
// Shared validations for the entire bucket.
if !s.cfg.chain.InForkchoice(bytesutil.ToBytes32(data.BeaconBlockRoot)) {
log.WithError(blockchain.ErrNotDescendantOfFinalized).WithField("root", fmt.Sprintf("%#x", data.BeaconBlockRoot)).Debug("Failed forkchoice check for bucket")
return
}
preState, err := s.cfg.chain.AttestationTargetState(ctx, data.Target)
if err != nil {
log.WithError(err).Debug("Failed to get attestation prestate for bucket")
return
}
if err := s.cfg.chain.VerifyLmdFfgConsistency(ctx, bucket.attestations[0]); err != nil {
log.WithError(err).Debug("Failed FFG consistency check for bucket")
return
}
// Collect valid attestations for both single and electra formats.
// Broadcast takes single format but attestation pool and batch signature verification take electra format.
forBroadcast := make([]ethpb.Att, 0, len(bucket.attestations))
forPool := make([]ethpb.Att, 0, len(bucket.attestations))
for _, att := range bucket.attestations {
committee, err := helpers.BeaconCommitteeFromState(ctx, preState, data.Slot, att.GetCommitteeIndex())
if err != nil {
log.WithError(err).Debug("Failed to get committee from state")
continue
}
valid, err := validateAttesterData(ctx, att, committee)
if err != nil {
log.WithError(err).Debug("Failed attester data validation")
continue
}
if valid != pubsub.ValidationAccept {
log.Debug("Pending attestation rejected due to invalid data")
continue
}
var conv ethpb.Att
if att.Version() >= version.Electra {
single, ok := att.(*ethpb.SingleAttestation)
if !ok {
log.Debugf("Wrong type: expected %T, got %T", &ethpb.SingleAttestation{}, att)
continue
}
conv = single.ToAttestationElectra(committee)
} else {
conv = att
}
forBroadcast = append(forBroadcast, att)
forPool = append(forPool, conv)
}
if len(forPool) == 0 {
return
}
verified := s.batchVerifyAttestationSignatures(ctx, forPool, preState)
verifiedSet := make(map[ethpb.Att]struct{}, len(verified))
for _, att := range verified {
verifiedSet[att] = struct{}{}
}
for i, poolAtt := range forPool {
if _, ok := verifiedSet[poolAtt]; ok {
s.processVerifiedAttestation(ctx, forBroadcast[i], poolAtt, preState)
}
}
}
// batchVerifyAttestationSignatures attempts batch verification, with individual fallback on failure.
func (s *Service) batchVerifyAttestationSignatures(
ctx context.Context,
attestations []ethpb.Att,
preState state.ReadOnlyBeaconState,
) []ethpb.Att {
const fallbackMsg = "batch verification failed, using individual checks"
set, err := blocks.AttestationSignatureBatch(ctx, preState, attestations)
if err != nil {
log.WithError(err).Debug(fallbackMsg)
return s.fallbackToIndividualVerification(ctx, attestations, preState)
}
ok, err := set.Verify()
if err != nil || !ok {
if err != nil {
log.WithError(err).Debug(fallbackMsg)
} else {
log.Debug(fallbackMsg)
}
return s.fallbackToIndividualVerification(ctx, attestations, preState)
}
return attestations
}
// fallbackToIndividualVerification verifies each attestation individually if batch verification fails.
func (s *Service) fallbackToIndividualVerification(
ctx context.Context,
attestations []ethpb.Att,
preState state.ReadOnlyBeaconState,
) []ethpb.Att {
verified := make([]ethpb.Att, 0, len(attestations))
for _, att := range attestations {
res, err := s.validateUnaggregatedAttWithState(ctx, att, preState)
if err != nil {
log.WithError(err).Debug("Individual signature verification error")
continue
}
if res == pubsub.ValidationAccept {
verified = append(verified, att)
}
}
return verified
}
// saveAttestation saves an attestation to the appropriate pool.
func (s *Service) saveAttestation(att ethpb.Att) error {
if features.Get().EnableExperimentalAttestationPool {
return s.cfg.attestationCache.Add(att)
}
return s.cfg.attPool.SaveUnaggregatedAttestation(att)
}
// processVerifiedAttestation handles a signature-verified attestation.
func (s *Service) processVerifiedAttestation(
ctx context.Context,
broadcastAtt ethpb.Att,
poolAtt ethpb.Att,
preState state.ReadOnlyBeaconState,
) {
data := broadcastAtt.GetData()
if err := s.saveAttestation(poolAtt); err != nil {
log.WithError(err).Debug("Failed to save unaggregated attestation")
return
}
if key, err := generateUnaggregatedAttCacheKey(broadcastAtt); err != nil {
log.WithError(err).Error("Failed to generate cache key for attestation tracking")
} else {
s.setSeenUnaggregatedAtt(key)
}
valCount, err := helpers.ActiveValidatorCount(ctx, preState, slots.ToEpoch(data.Slot))
if err != nil {
log.WithError(err).Debug("Failed to retrieve active validator count")
return
}
if err := s.cfg.p2p.BroadcastAttestation(ctx, helpers.ComputeSubnetForAttestation(valCount, broadcastAtt), broadcastAtt); err != nil {
log.WithError(err).Debug("Failed to broadcast attestation")
}
var (
eventType feed.EventType
eventData any
)
switch {
case broadcastAtt.Version() >= version.Electra:
if sa, ok := broadcastAtt.(*ethpb.SingleAttestation); ok {
eventType = operation.SingleAttReceived
eventData = &operation.SingleAttReceivedData{Attestation: sa}
break
}
fallthrough
default:
eventType = operation.UnaggregatedAttReceived
eventData = &operation.UnAggregatedAttReceivedData{Attestation: broadcastAtt}
}
// Send event notification
s.cfg.attestationNotifier.OperationFeed().Send(&feed.Event{
Type: eventType,
Data: eventData,
})
}
func (s *Service) processAggregate(ctx context.Context, aggregate ethpb.SignedAggregateAttAndProof) {
@@ -114,9 +307,10 @@ func (s *Service) processAggregate(ctx context.Context, aggregate ethpb.SignedAg
valRes, err := s.validateAggregatedAtt(ctx, aggregate)
if err != nil {
log.WithError(err).Debug("Pending aggregated attestation failed validation")
return
}
aggValid := pubsub.ValidationAccept == valRes
if s.validateBlockInAttestation(ctx, aggregate) && aggValid {
if aggValid && s.validateBlockInAttestation(ctx, aggregate) {
if features.Get().EnableExperimentalAttestationPool {
if err = s.cfg.attestationCache.Add(att); err != nil {
log.WithError(err).Debug("Could not save aggregated attestation")
@@ -143,114 +337,6 @@ func (s *Service) processAggregate(ctx context.Context, aggregate ethpb.SignedAg
}
}
func (s *Service) processAtt(ctx context.Context, att ethpb.Att) {
data := att.GetData()
// This is an important validation before retrieving attestation pre state to defend against
// attestation's target intentionally referencing a checkpoint that's long ago.
if !s.cfg.chain.InForkchoice(bytesutil.ToBytes32(data.BeaconBlockRoot)) {
log.WithError(blockchain.ErrNotDescendantOfFinalized).Debug("Could not verify finalized consistency")
return
}
if err := s.cfg.chain.VerifyLmdFfgConsistency(ctx, att); err != nil {
log.WithError(err).Debug("Could not verify FFG consistency")
return
}
preState, err := s.cfg.chain.AttestationTargetState(ctx, data.Target)
if err != nil {
log.WithError(err).Debug("Could not retrieve attestation prestate")
return
}
committee, err := helpers.BeaconCommitteeFromState(ctx, preState, data.Slot, att.GetCommitteeIndex())
if err != nil {
log.WithError(err).Debug("Could not retrieve committee from state")
return
}
valid, err := validateAttesterData(ctx, att, committee)
if err != nil {
log.WithError(err).Debug("Could not validate attester data")
return
} else if valid != pubsub.ValidationAccept {
log.Debug("Attestation failed attester data validation")
return
}
// Decide if the attestation is an Electra SingleAttestation or a Phase0 unaggregated attestation
var (
attForValidation ethpb.Att
broadcastAtt ethpb.Att
eventType feed.EventType
eventData interface{}
)
if att.Version() >= version.Electra {
singleAtt, ok := att.(*ethpb.SingleAttestation)
if !ok {
log.Debugf("Attestation has wrong type (expected %T, got %T)", &ethpb.SingleAttestation{}, att)
return
}
// Convert Electra SingleAttestation to unaggregated ElectraAttestation. This is needed because many parts of the codebase assume that attestations have a certain structure and SingleAttestation validates these assumptions.
attForValidation = singleAtt.ToAttestationElectra(committee)
broadcastAtt = singleAtt
eventType = operation.SingleAttReceived
eventData = &operation.SingleAttReceivedData{
Attestation: singleAtt,
}
} else {
// Phase0 attestation
attForValidation = att
broadcastAtt = att
eventType = operation.UnaggregatedAttReceived
eventData = &operation.UnAggregatedAttReceivedData{
Attestation: att,
}
}
valid, err = s.validateUnaggregatedAttWithState(ctx, attForValidation, preState)
if err != nil {
log.WithError(err).Debug("Pending unaggregated attestation failed validation")
return
}
if valid == pubsub.ValidationAccept {
if features.Get().EnableExperimentalAttestationPool {
if err = s.cfg.attestationCache.Add(attForValidation); err != nil {
log.WithError(err).Debug("Could not save unaggregated attestation")
return
}
} else {
if err := s.cfg.attPool.SaveUnaggregatedAttestation(attForValidation); err != nil {
log.WithError(err).Debug("Could not save unaggregated attestation")
return
}
}
attKey, err := generateUnaggregatedAttCacheKey(att)
if err != nil {
log.WithError(err).Error("Could not generate cache key for attestation tracking")
} else {
s.setSeenUnaggregatedAtt(attKey)
}
valCount, err := helpers.ActiveValidatorCount(ctx, preState, slots.ToEpoch(data.Slot))
if err != nil {
log.WithError(err).Debug("Could not retrieve active validator count")
return
}
// Broadcast the final 'broadcastAtt' object
if err := s.cfg.p2p.BroadcastAttestation(ctx, helpers.ComputeSubnetForAttestation(valCount, broadcastAtt), broadcastAtt); err != nil {
log.WithError(err).Debug("Could not broadcast")
}
// Feed event notification for other services
s.cfg.attestationNotifier.OperationFeed().Send(&feed.Event{
Type: eventType,
Data: eventData,
})
}
}
// This defines how pending aggregates are saved in the map. The key is the
// root of the missing block. The value is the list of pending attestations/aggregates
// that voted for that block root. The caller of this function is responsible
@@ -392,3 +478,29 @@ func (s *Service) validatePendingAtts(ctx context.Context, slot primitives.Slot)
}
}
}
// bucketAttestationsByData groups attestations by their AttestationData hash.
func bucketAttestationsByData(attestations []ethpb.Att) map[[32]byte]*attestationBucket {
bucketMap := make(map[[32]byte]*attestationBucket)
for _, att := range attestations {
data := att.GetData()
dataHash, err := data.HashTreeRoot()
if err != nil {
log.WithError(err).Debug("Failed to hash attestation data, skipping attestation")
continue
}
if bucket, ok := bucketMap[dataHash]; ok {
bucket.attestations = append(bucket.attestations, att)
} else {
bucketMap[dataHash] = &attestationBucket{
dataHash: dataHash,
data: data,
attestations: []ethpb.Att{att},
}
}
}
return bucketMap
}

View File

@@ -0,0 +1,417 @@
package sync
import (
"context"
"testing"
mockChain "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/testing/assert"
"github.com/OffchainLabs/prysm/v6/testing/require"
"github.com/OffchainLabs/prysm/v6/testing/util"
logTest "github.com/sirupsen/logrus/hooks/test"
)
func TestProcessAttestationBucket(t *testing.T) {
t.Run("EmptyBucket", func(t *testing.T) {
hook := logTest.NewGlobal()
s := &Service{}
s.processAttestationBucket(context.Background(), nil)
emptyBucket := &attestationBucket{
attestations: []ethpb.Att{},
}
s.processAttestationBucket(context.Background(), emptyBucket)
require.Equal(t, 0, len(hook.Entries), "Should not log any messages for empty buckets")
})
t.Run("ForkchoiceFailure", func(t *testing.T) {
hook := logTest.NewGlobal()
chainService := &mockChain.ChainService{
NotFinalized: true, // This makes InForkchoice return false
}
s := &Service{
cfg: &config{
chain: chainService,
},
}
attData := &ethpb.AttestationData{
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot"), 32),
}
bucket := &attestationBucket{
data: attData,
attestations: []ethpb.Att{util.NewAttestation()},
}
s.processAttestationBucket(context.Background(), bucket)
require.Equal(t, 1, len(hook.Entries))
assert.StringContains(t, "Failed forkchoice check for bucket", hook.LastEntry().Message)
require.NotNil(t, hook.LastEntry().Data["error"])
})
t.Run("CommitteeFailure", func(t *testing.T) {
hook := logTest.NewGlobal()
beaconState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, beaconState.SetSlot(1))
chainService := &mockChain.ChainService{
State: beaconState,
ValidAttestation: true,
}
s := &Service{
cfg: &config{
chain: chainService,
},
}
attData := &ethpb.AttestationData{
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot"), 32),
Target: &ethpb.Checkpoint{
Epoch: 1,
Root: bytesutil.PadTo([]byte("blockroot"), 32),
},
CommitteeIndex: 999999,
}
att := util.NewAttestation()
att.Data = attData
bucket := &attestationBucket{
data: attData,
attestations: []ethpb.Att{att},
}
s.processAttestationBucket(context.Background(), bucket)
require.Equal(t, 1, len(hook.Entries))
assert.StringContains(t, "Failed to get committee from state", hook.LastEntry().Message)
})
t.Run("FFGConsistencyFailure", func(t *testing.T) {
hook := logTest.NewGlobal()
validators := make([]*ethpb.Validator, 64)
for i := range validators {
validators[i] = &ethpb.Validator{
ExitEpoch: 1000000,
EffectiveBalance: 32000000000,
}
}
beaconState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, beaconState.SetSlot(1))
require.NoError(t, beaconState.SetValidators(validators))
chainService := &mockChain.ChainService{
State: beaconState,
}
s := &Service{
cfg: &config{
chain: chainService,
},
}
attData := &ethpb.AttestationData{
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot"), 32),
Target: &ethpb.Checkpoint{
Epoch: 1,
Root: bytesutil.PadTo([]byte("different_target"), 32), // Different from BeaconBlockRoot to trigger FFG failure
},
}
att := util.NewAttestation()
att.Data = attData
bucket := &attestationBucket{
data: attData,
attestations: []ethpb.Att{att},
}
s.processAttestationBucket(context.Background(), bucket)
require.Equal(t, 1, len(hook.Entries))
assert.StringContains(t, "Failed FFG consistency check for bucket", hook.LastEntry().Message)
})
t.Run("ProcessingSuccess", func(t *testing.T) {
hook := logTest.NewGlobal()
validators := make([]*ethpb.Validator, 64)
for i := range validators {
validators[i] = &ethpb.Validator{
ExitEpoch: 1000000,
EffectiveBalance: 32000000000,
}
}
beaconState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, beaconState.SetSlot(1))
require.NoError(t, beaconState.SetValidators(validators))
chainService := &mockChain.ChainService{
State: beaconState,
ValidAttestation: true,
}
s := &Service{
cfg: &config{
chain: chainService,
},
}
// Test with Phase0 attestation
t.Run("Phase0_NoError", func(t *testing.T) {
hook.Reset() // Reset logs before test
phase0Att := util.NewAttestation()
phase0Att.Data.Slot = 1
phase0Att.Data.CommitteeIndex = 0
bucket := &attestationBucket{
data: phase0Att.GetData(),
attestations: []ethpb.Att{phase0Att},
}
s.processAttestationBucket(context.Background(), bucket)
})
// Test with SingleAttestation
t.Run("Electra_NoError", func(t *testing.T) {
hook.Reset() // Reset logs before test
attData := &ethpb.AttestationData{
Slot: 1,
CommitteeIndex: 0,
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot"), 32),
Source: &ethpb.Checkpoint{
Epoch: 0,
Root: bytesutil.PadTo([]byte("source"), 32),
},
Target: &ethpb.Checkpoint{
Epoch: 1,
Root: bytesutil.PadTo([]byte("blockroot"), 32), // Same as BeaconBlockRoot for LMD/FFG consistency
},
}
singleAtt := &ethpb.SingleAttestation{
CommitteeId: 0,
AttesterIndex: 0,
Data: attData,
Signature: make([]byte, 96),
}
bucket := &attestationBucket{
data: singleAtt.GetData(),
attestations: []ethpb.Att{singleAtt},
}
s.processAttestationBucket(context.Background(), bucket)
})
})
}
func TestBucketAttestationsByData(t *testing.T) {
t.Run("EmptyInput", func(t *testing.T) {
hook := logTest.NewGlobal()
buckets := bucketAttestationsByData(nil)
require.Equal(t, 0, len(buckets))
require.Equal(t, 0, len(hook.Entries))
buckets = bucketAttestationsByData([]ethpb.Att{})
require.Equal(t, 0, len(buckets))
require.Equal(t, 0, len(hook.Entries))
})
t.Run("SingleAttestation", func(t *testing.T) {
hook := logTest.NewGlobal()
att := util.NewAttestation()
att.Data.Slot = 1
att.Data.CommitteeIndex = 0
buckets := bucketAttestationsByData([]ethpb.Att{att})
require.Equal(t, 1, len(buckets))
var bucket *attestationBucket
for _, b := range buckets {
bucket = b
break
}
require.NotNil(t, bucket)
require.Equal(t, 1, len(bucket.attestations))
require.Equal(t, att, bucket.attestations[0])
require.Equal(t, att.GetData(), bucket.data)
require.Equal(t, 0, len(hook.Entries))
})
t.Run("MultipleAttestationsSameData", func(t *testing.T) {
hook := logTest.NewGlobal()
att1 := util.NewAttestation()
att1.Data.Slot = 1
att1.Data.CommitteeIndex = 0
att2 := util.NewAttestation()
att2.Data = att1.Data // Same data
att2.Signature = make([]byte, 96) // Different signature
buckets := bucketAttestationsByData([]ethpb.Att{att1, att2})
require.Equal(t, 1, len(buckets), "Should have one bucket for same data")
var bucket *attestationBucket
for _, b := range buckets {
bucket = b
break
}
require.NotNil(t, bucket)
require.Equal(t, 2, len(bucket.attestations), "Should have both attestations in one bucket")
require.Equal(t, att1.GetData(), bucket.data)
require.Equal(t, 0, len(hook.Entries))
})
t.Run("MultipleAttestationsDifferentData", func(t *testing.T) {
hook := logTest.NewGlobal()
att1 := util.NewAttestation()
att1.Data.Slot = 1
att1.Data.CommitteeIndex = 0
att2 := util.NewAttestation()
att2.Data.Slot = 2 // Different slot
att2.Data.CommitteeIndex = 1
buckets := bucketAttestationsByData([]ethpb.Att{att1, att2})
require.Equal(t, 2, len(buckets), "Should have two buckets for different data")
bucketCount := 0
for _, bucket := range buckets {
require.Equal(t, 1, len(bucket.attestations), "Each bucket should have one attestation")
bucketCount++
}
require.Equal(t, 2, bucketCount, "Should have exactly two buckets")
require.Equal(t, 0, len(hook.Entries))
})
t.Run("MixedAttestationTypes", func(t *testing.T) {
hook := logTest.NewGlobal()
// Create Phase0 attestation
phase0Att := util.NewAttestation()
phase0Att.Data.Slot = 1
phase0Att.Data.CommitteeIndex = 0
electraAtt := &ethpb.SingleAttestation{
CommitteeId: 0,
AttesterIndex: 1,
Data: phase0Att.Data, // Same data
Signature: make([]byte, 96),
}
buckets := bucketAttestationsByData([]ethpb.Att{phase0Att, electraAtt})
require.Equal(t, 1, len(buckets), "Should have one bucket for same data")
var bucket *attestationBucket
for _, b := range buckets {
bucket = b
break
}
require.NotNil(t, bucket)
require.Equal(t, 2, len(bucket.attestations), "Should have both attestations in one bucket")
require.Equal(t, phase0Att.GetData(), bucket.data)
require.Equal(t, 0, len(hook.Entries))
})
}
func TestBatchVerifyAttestationSignatures(t *testing.T) {
t.Run("EmptyInput", func(t *testing.T) {
s := &Service{}
beaconState, err := util.NewBeaconState()
require.NoError(t, err)
result := s.batchVerifyAttestationSignatures(context.Background(), []ethpb.Att{}, beaconState)
// Empty input should return empty output
require.Equal(t, 0, len(result))
})
t.Run("BatchVerificationWithState", func(t *testing.T) {
hook := logTest.NewGlobal()
validators := make([]*ethpb.Validator, 64)
for i := range validators {
validators[i] = &ethpb.Validator{
ExitEpoch: 1000000,
EffectiveBalance: 32000000000,
}
}
beaconState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, beaconState.SetSlot(1))
require.NoError(t, beaconState.SetValidators(validators))
s := &Service{}
att := util.NewAttestation()
att.Data.Slot = 1
attestations := []ethpb.Att{att}
result := s.batchVerifyAttestationSignatures(context.Background(), attestations, beaconState)
require.NotNil(t, result)
if len(result) == 0 && len(hook.Entries) > 0 {
_ = false // Check if fallback message is logged
for _, entry := range hook.Entries {
if entry.Message == "batch verification failed, using individual checks" {
_ = true // Found the fallback message
break
}
}
// It's OK if fallback message is logged - this means the function is working correctly
}
})
t.Run("BatchVerificationFailureFallbackToIndividual", func(t *testing.T) {
hook := logTest.NewGlobal()
beaconState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, beaconState.SetSlot(1))
chainService := &mockChain.ChainService{
State: beaconState,
ValidAttestation: false, // This will cause verification to fail
}
s := &Service{
cfg: &config{
chain: chainService,
},
}
att := util.NewAttestation()
att.Data.Slot = 1
attestations := []ethpb.Att{att}
result := s.batchVerifyAttestationSignatures(context.Background(), attestations, beaconState)
require.Equal(t, 0, len(result))
require.NotEqual(t, 0, len(hook.Entries), "Should have log entries")
found := false
for _, entry := range hook.Entries {
if entry.Message == "batch verification failed, using individual checks" {
found = true
break
}
}
require.Equal(t, true, found, "Should log fallback message")
})
}

View File

@@ -53,16 +53,47 @@ func TestProcessPendingAtts_NoBlockRequestBlock(t *testing.T) {
p1.Peers().SetConnectionState(p2.PeerID(), peers.Connected)
p1.Peers().SetChainState(p2.PeerID(), &ethpb.StatusV2{})
chain := &mock.ChainService{Genesis: prysmTime.Now(), FinalizedCheckPoint: &ethpb.Checkpoint{}}
// Create and save block 'A' to DB
blockA := util.NewBeaconBlock()
util.SaveBlock(t, t.Context(), db, blockA)
rootA, err := blockA.Block.HashTreeRoot()
require.NoError(t, err)
// Save state for block 'A'
stateA, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, db.SaveState(t.Context(), stateA, rootA))
// Setup chain service with block 'A' in forkchoice
chain := &mock.ChainService{
Genesis: prysmTime.Now(),
FinalizedCheckPoint: &ethpb.Checkpoint{},
// NotFinalized: false means InForkchoice returns true
}
r := &Service{
cfg: &config{p2p: p1, beaconDB: db, chain: chain, clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot)},
blkRootToPendingAtts: make(map[[32]byte][]any),
seenPendingBlocks: make(map[[32]byte]bool),
chainStarted: abool.New(),
}
a := &ethpb.Attestation{Data: &ethpb.AttestationData{Target: &ethpb.Checkpoint{Root: make([]byte, 32)}}}
r.blkRootToPendingAtts[[32]byte{'A'}] = []any{a}
require.NoError(t, r.processPendingAtts(t.Context()))
// Add pending attestations for OTHER block roots (not block A)
// These are blocks we don't have yet, so they should be requested
attB := &ethpb.Attestation{Data: &ethpb.AttestationData{
BeaconBlockRoot: bytesutil.PadTo([]byte{'B'}, 32),
Target: &ethpb.Checkpoint{Root: make([]byte, 32)},
}}
attC := &ethpb.Attestation{Data: &ethpb.AttestationData{
BeaconBlockRoot: bytesutil.PadTo([]byte{'C'}, 32),
Target: &ethpb.Checkpoint{Root: make([]byte, 32)},
}}
r.blkRootToPendingAtts[[32]byte{'B'}] = []any{attB}
r.blkRootToPendingAtts[[32]byte{'C'}] = []any{attC}
// Process block A (which exists and has no pending attestations)
// This should skip processing attestations for A and request blocks B and C
require.NoError(t, r.processPendingAttsForBlock(t.Context(), rootA))
require.LogsContain(t, hook, "Requesting block by root")
}
@@ -141,7 +172,7 @@ func TestProcessPendingAtts_HasBlockSaveUnaggregatedAtt(t *testing.T) {
require.NoError(t, r.cfg.beaconDB.SaveState(t.Context(), s, root))
r.blkRootToPendingAtts[root] = []any{att}
require.NoError(t, r.processPendingAtts(t.Context()))
require.NoError(t, r.processPendingAttsForBlock(t.Context(), root))
var wg sync.WaitGroup
wg.Add(1)
@@ -235,7 +266,7 @@ func TestProcessPendingAtts_HasBlockSaveUnaggregatedAttElectra(t *testing.T) {
require.NoError(t, r.cfg.beaconDB.SaveState(t.Context(), s, root))
r.blkRootToPendingAtts[root] = []any{att}
require.NoError(t, r.processPendingAtts(t.Context()))
require.NoError(t, r.processPendingAttsForBlock(t.Context(), root))
var wg sync.WaitGroup
wg.Add(1)
go func() {
@@ -360,7 +391,7 @@ func TestProcessPendingAtts_HasBlockSaveUnAggregatedAttElectra_VerifyAlreadySeen
r.blkRootToPendingAtts[root] = []any{
att,
}
require.NoError(t, r.processPendingAtts(t.Context()))
require.NoError(t, r.processPendingAttsForBlock(t.Context(), root))
// Verify that the event feed receives the expected attestation.
var wg sync.WaitGroup
@@ -471,7 +502,7 @@ func TestProcessPendingAtts_NoBroadcastWithBadSignature(t *testing.T) {
}
s.blkRootToPendingAtts[r32] = []any{&ethpb.SignedAggregateAttestationAndProof{Message: a, Signature: make([]byte, fieldparams.BLSSignatureLength)}}
require.NoError(t, s.processPendingAtts(t.Context()))
require.NoError(t, s.processPendingAttsForBlock(t.Context(), r32))
assert.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcasted bad aggregate")
@@ -510,7 +541,7 @@ func TestProcessPendingAtts_NoBroadcastWithBadSignature(t *testing.T) {
require.NoError(t, err)
s.blkRootToPendingAtts[r32] = []any{&ethpb.SignedAggregateAttestationAndProof{Message: aggregateAndProof, Signature: aggreSig}}
require.NoError(t, s.processPendingAtts(t.Context()))
require.NoError(t, s.processPendingAttsForBlock(t.Context(), r32))
assert.Equal(t, true, p2p.BroadcastCalled.Load(), "The good aggregate was not broadcasted")
@@ -601,7 +632,7 @@ func TestProcessPendingAtts_HasBlockSaveAggregatedAtt(t *testing.T) {
require.NoError(t, r.cfg.beaconDB.SaveState(t.Context(), s, root))
r.blkRootToPendingAtts[root] = []any{&ethpb.SignedAggregateAttestationAndProof{Message: aggregateAndProof, Signature: aggreSig}}
require.NoError(t, r.processPendingAtts(t.Context()))
require.NoError(t, r.processPendingAttsForBlock(t.Context(), root))
assert.Equal(t, 1, len(r.cfg.attPool.AggregatedAttestations()), "Did not save aggregated att")
assert.DeepEqual(t, att, r.cfg.attPool.AggregatedAttestations()[0], "Incorrect saved att")
@@ -696,7 +727,7 @@ func TestProcessPendingAtts_HasBlockSaveAggregatedAttElectra(t *testing.T) {
require.NoError(t, r.cfg.beaconDB.SaveState(t.Context(), s, root))
r.blkRootToPendingAtts[root] = []any{&ethpb.SignedAggregateAttestationAndProofElectra{Message: aggregateAndProof, Signature: aggreSig}}
require.NoError(t, r.processPendingAtts(t.Context()))
require.NoError(t, r.processPendingAttsForBlock(t.Context(), root))
assert.Equal(t, 1, len(r.cfg.attPool.AggregatedAttestations()), "Did not save aggregated att")
assert.DeepEqual(t, att, r.cfg.attPool.AggregatedAttestations()[0], "Incorrect saved att")
@@ -780,8 +811,8 @@ func TestProcessPendingAtts_BlockNotInForkChoice(t *testing.T) {
// Add pending attestation
r.blkRootToPendingAtts[root] = []any{&ethpb.SignedAggregateAttestationAndProof{Message: aggregateAndProof}}
// Process pending attestations - should not process because block is not in fork choice
require.NoError(t, r.processPendingAtts(t.Context()))
// Process pending attestations - should return error because block is not in fork choice
require.ErrorContains(t, "could not process unknown block root", r.processPendingAttsForBlock(t.Context(), root))
// Verify attestations were not processed (should still be pending)
assert.Equal(t, 1, len(r.blkRootToPendingAtts[root]), "Attestations should still be pending")

View File

@@ -11,11 +11,9 @@ import (
p2ptypes "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
"github.com/OffchainLabs/prysm/v6/config/features"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/monitoring/tracing"
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
"github.com/OffchainLabs/prysm/v6/runtime/version"
"github.com/OffchainLabs/prysm/v6/time/slots"
libp2pcore "github.com/libp2p/go-libp2p/core"
"github.com/libp2p/go-libp2p/core/network"
"github.com/pkg/errors"
@@ -122,41 +120,9 @@ func (s *Service) rpcHandlerByTopicFromFork(forkIndex int) (map[string]rpcHandle
return nil, errors.Errorf("RPC handler not found for fork index %d", forkIndex)
}
// rpcHandlerByTopic returns the RPC handlers for a given epoch.
func (s *Service) rpcHandlerByTopicFromEpoch(epoch primitives.Epoch) (map[string]rpcHandler, error) {
// Get the beacon config.
beaconConfig := params.BeaconConfig()
if epoch >= beaconConfig.FuluForkEpoch {
return s.rpcHandlerByTopicFromFork(version.Fulu)
}
if epoch >= beaconConfig.ElectraForkEpoch {
return s.rpcHandlerByTopicFromFork(version.Electra)
}
if epoch >= beaconConfig.DenebForkEpoch {
return s.rpcHandlerByTopicFromFork(version.Deneb)
}
if epoch >= beaconConfig.CapellaForkEpoch {
return s.rpcHandlerByTopicFromFork(version.Capella)
}
if epoch >= beaconConfig.BellatrixForkEpoch {
return s.rpcHandlerByTopicFromFork(version.Bellatrix)
}
if epoch >= beaconConfig.AltairForkEpoch {
return s.rpcHandlerByTopicFromFork(version.Altair)
}
return s.rpcHandlerByTopicFromFork(version.Phase0)
}
// addedRPCHandlerByTopic returns the RPC handlers that are added in the new map that are not present in the old map.
func addedRPCHandlerByTopic(previous, next map[string]rpcHandler) map[string]rpcHandler {
added := make(map[string]rpcHandler)
added := make(map[string]rpcHandler, len(next))
for topic, handler := range next {
if _, ok := previous[topic]; !ok {
@@ -181,13 +147,12 @@ func removedRPCTopics(previous, next map[string]rpcHandler) map[string]bool {
}
// registerRPCHandlers for p2p RPC.
func (s *Service) registerRPCHandlers() error {
// Get the current epoch.
currentSlot := s.cfg.clock.CurrentSlot()
currentEpoch := slots.ToEpoch(currentSlot)
func (s *Service) registerRPCHandlers(nse params.NetworkScheduleEntry) error {
if s.digestActionDone(nse.ForkDigest, registerRpcOnce) {
return nil
}
// Get the RPC handlers for the current epoch.
handlerByTopic, err := s.rpcHandlerByTopicFromEpoch(currentEpoch)
handlerByTopic, err := s.rpcHandlerByTopicFromFork(nse.VersionEnum)
if err != nil {
return errors.Wrap(err, "rpc handler by topic from epoch")
}

View File

@@ -854,7 +854,7 @@ func TestRPCBeaconBlocksByRange_FilterBlocks(t *testing.T) {
blocks := make([]*ethpb.SignedBeaconBlock, 0, req.Count)
for i := req.StartSlot; i < req.StartSlot.Add(req.Count*req.Step); i += primitives.Slot(req.Step) {
code, _, err := ReadStatusCode(stream, &encoder.SszNetworkEncoder{})
if err != nil && err != io.EOF {
if err != nil && !errors.Is(err, io.EOF) {
t.Fatal(err)
}
if code != 0 || errors.Is(err, io.EOF) {

View File

@@ -93,11 +93,15 @@ func (s *Service) sendBeaconBlocksRequest(ctx context.Context, requests *types.B
// requestAndSaveMissingDataColumns checks if the data columns are missing for the given block.
// If so, requests them and saves them to the storage.
func (s *Service) requestAndSaveMissingDataColumnSidecars(blks []blocks.ROBlock) error {
if len(blks) == 0 {
return nil
}
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
custodyGroupCount, err := s.cfg.p2p.CustodyGroupCount()
custodyGroupCount, err := s.cfg.p2p.CustodyGroupCount(s.ctx)
if err != nil {
return errors.Wrap(err, "fetch custody group count from peer")
return errors.Wrap(err, "custody group count")
}
samplingSize := max(custodyGroupCount, samplesPerSlot)

View File

@@ -415,6 +415,7 @@ func TestRequestPendingBlobs(t *testing.T) {
}
func TestConstructPendingBlobsRequest(t *testing.T) {
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
d := db.SetupDB(t)
bs := filesystem.NewEphemeralBlobStorage(t)
s := &Service{cfg: &config{beaconDB: d, blobStorage: bs}}
@@ -436,6 +437,7 @@ func TestConstructPendingBlobsRequest(t *testing.T) {
ParentRoot: bytesutil.PadTo([]byte{}, 32),
StateRoot: bytesutil.PadTo([]byte{}, 32),
BodyRoot: bytesutil.PadTo([]byte{}, 32),
Slot: ds,
},
Signature: bytesutil.PadTo([]byte{}, 96),
}

Some files were not shown because too many files have changed in this diff Show More