mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-09 21:38:05 -05:00
Compare commits
29 Commits
agent-stri
...
stream-slo
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
60cfd6a5a6 | ||
|
|
38955fd08c | ||
|
|
71f05b597f | ||
|
|
0d742c6f88 | ||
|
|
06b5409ff0 | ||
|
|
9805e90d73 | ||
|
|
537f3cb863 | ||
|
|
b45e87abd6 | ||
|
|
4c4b12cca7 | ||
|
|
aabded250f | ||
|
|
4f9e56fc70 | ||
|
|
2a86132994 | ||
|
|
74c47e25a9 | ||
|
|
28eb1a4c3c | ||
|
|
1f89394727 | ||
|
|
bf1095c782 | ||
|
|
b24fe0d23a | ||
|
|
cbe50269de | ||
|
|
4ed2953fcf | ||
|
|
915837d059 | ||
|
|
26b276660f | ||
|
|
580509f2f4 | ||
|
|
be144da099 | ||
|
|
cc2565a422 | ||
|
|
d86353ea9d | ||
|
|
45d6002411 | ||
|
|
08c855fd4b | ||
|
|
7c86b5d737 | ||
|
|
023287f7df |
@@ -59,6 +59,7 @@ go_test(
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/api"
|
||||
"github.com/OffchainLabs/prysm/v6/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
@@ -170,8 +171,11 @@ func TestClient_RegisterValidator(t *testing.T) {
|
||||
|
||||
func TestClient_GetHeader(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
expectedPath := "/eth/v1/builder/header/23/0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2/0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"
|
||||
var slot primitives.Slot = 23
|
||||
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
es := util.SlotAtEpoch(t, params.BeaconConfig().ElectraForkEpoch)
|
||||
expectedPath := "/eth/v1/builder/header/%d/0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2/0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"
|
||||
expectedPath = fmt.Sprintf(expectedPath, ds)
|
||||
var slot primitives.Slot = ds
|
||||
parentHash := ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||
pubkey := ezDecode(t, "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a")
|
||||
t.Run("server error", func(t *testing.T) {
|
||||
@@ -533,7 +537,7 @@ func TestClient_GetHeader(t *testing.T) {
|
||||
require.Equal(t, expectedPath, r.URL.Path)
|
||||
epr := &ExecHeaderResponseElectra{}
|
||||
require.NoError(t, json.Unmarshal([]byte(testExampleHeaderResponseElectra), epr))
|
||||
pro, err := epr.ToProto(100)
|
||||
pro, err := epr.ToProto(es)
|
||||
require.NoError(t, err)
|
||||
ssz, err := pro.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/math"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
ethpbv1 "github.com/OffchainLabs/prysm/v6/proto/eth/v1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
@@ -108,7 +107,7 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
|
||||
commonRoot = params.BeaconConfig().ZeroHash
|
||||
}
|
||||
dis := headSlot + newHeadSlot - 2*forkSlot
|
||||
dep := math.Max(uint64(headSlot-forkSlot), uint64(newHeadSlot-forkSlot))
|
||||
dep := max(uint64(headSlot-forkSlot), uint64(newHeadSlot-forkSlot))
|
||||
oldWeight, err := s.cfg.ForkChoiceStore.Weight(oldHeadRoot)
|
||||
if err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", oldHeadRoot)).Warn("Could not determine node weight")
|
||||
@@ -135,7 +134,7 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
|
||||
Type: statefeed.Reorg,
|
||||
Data: ðpbv1.EventChainReorg{
|
||||
Slot: newHeadSlot,
|
||||
Depth: math.Max(uint64(headSlot-forkSlot), uint64(newHeadSlot-forkSlot)),
|
||||
Depth: max(uint64(headSlot-forkSlot), uint64(newHeadSlot-forkSlot)),
|
||||
OldHeadBlock: oldHeadRoot[:],
|
||||
NewHeadBlock: newHeadRoot[:],
|
||||
OldHeadState: oldStateRoot[:],
|
||||
|
||||
@@ -109,6 +109,7 @@ func VerifyCellKZGProofBatch(commitmentsBytes []Bytes48, cellIndices []uint64, c
|
||||
}
|
||||
|
||||
// RecoverCellsAndKZGProofs recovers the complete cells and KZG proofs from a given set of cell indices and partial cells.
|
||||
// Note: `len(cellIndices)` must be equal to `len(partialCells)` and `cellIndices` must be sorted in ascending order.
|
||||
func RecoverCellsAndKZGProofs(cellIndices []uint64, partialCells []Cell) (CellsAndProofs, error) {
|
||||
// Convert `Cell` type to `ckzg4844.Cell`
|
||||
ckzgPartialCells := make([]ckzg4844.Cell, len(partialCells))
|
||||
|
||||
@@ -712,7 +712,7 @@ func (s *Service) areDataColumnsAvailable(
|
||||
nodeID := s.cfg.P2P.NodeID()
|
||||
|
||||
// Get the custody group sampling size for the node.
|
||||
custodyGroupCount, err := s.cfg.P2P.CustodyGroupCount()
|
||||
custodyGroupCount, err := s.cfg.P2P.CustodyGroupCount(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "custody group count")
|
||||
}
|
||||
|
||||
@@ -2413,6 +2413,8 @@ func driftGenesisTime(s *Service, slot primitives.Slot, delay time.Duration) {
|
||||
}
|
||||
|
||||
func TestMissingBlobIndices(t *testing.T) {
|
||||
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
maxBlobs := params.BeaconConfig().MaxBlobsPerBlock(ds)
|
||||
cases := []struct {
|
||||
name string
|
||||
expected [][]byte
|
||||
@@ -2426,23 +2428,23 @@ func TestMissingBlobIndices(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "expected exceeds max",
|
||||
expected: fakeCommitments(params.BeaconConfig().MaxBlobsPerBlock(0) + 1),
|
||||
expected: fakeCommitments(maxBlobs + 1),
|
||||
err: errMaxBlobsExceeded,
|
||||
},
|
||||
{
|
||||
name: "first missing",
|
||||
expected: fakeCommitments(params.BeaconConfig().MaxBlobsPerBlock(0)),
|
||||
expected: fakeCommitments(maxBlobs),
|
||||
present: []uint64{1, 2, 3, 4, 5},
|
||||
result: fakeResult([]uint64{0}),
|
||||
},
|
||||
{
|
||||
name: "all missing",
|
||||
expected: fakeCommitments(params.BeaconConfig().MaxBlobsPerBlock(0)),
|
||||
expected: fakeCommitments(maxBlobs),
|
||||
result: fakeResult([]uint64{0, 1, 2, 3, 4, 5}),
|
||||
},
|
||||
{
|
||||
name: "none missing",
|
||||
expected: fakeCommitments(params.BeaconConfig().MaxBlobsPerBlock(0)),
|
||||
expected: fakeCommitments(maxBlobs),
|
||||
present: []uint64{0, 1, 2, 3, 4, 5},
|
||||
result: fakeResult([]uint64{}),
|
||||
},
|
||||
@@ -2475,8 +2477,8 @@ func TestMissingBlobIndices(t *testing.T) {
|
||||
for _, c := range cases {
|
||||
bm, bs := filesystem.NewEphemeralBlobStorageWithMocker(t)
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
require.NoError(t, bm.CreateFakeIndices(c.root, 0, c.present...))
|
||||
missing, err := missingBlobIndices(bs, c.root, c.expected, 0)
|
||||
require.NoError(t, bm.CreateFakeIndices(c.root, ds, c.present...))
|
||||
missing, err := missingBlobIndices(bs, c.root, c.expected, ds)
|
||||
if c.err != nil {
|
||||
require.ErrorIs(t, err, c.err)
|
||||
return
|
||||
@@ -2904,22 +2906,21 @@ type testIsAvailableParams struct {
|
||||
columnsToSave []uint64
|
||||
}
|
||||
|
||||
func testIsAvailableSetup(t *testing.T, params testIsAvailableParams) (context.Context, context.CancelFunc, *Service, [fieldparams.RootLength]byte, interfaces.SignedBeaconBlock) {
|
||||
func testIsAvailableSetup(t *testing.T, p testIsAvailableParams) (context.Context, context.CancelFunc, *Service, [fieldparams.RootLength]byte, interfaces.SignedBeaconBlock) {
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
|
||||
options := append(params.options, WithDataColumnStorage(dataColumnStorage))
|
||||
options := append(p.options, WithDataColumnStorage(dataColumnStorage))
|
||||
service, _ := minimalTestService(t, options...)
|
||||
fs := util.SlotAtEpoch(t, params.BeaconConfig().FuluForkEpoch)
|
||||
|
||||
genesisState, secretKeys := util.DeterministicGenesisStateElectra(t, 32 /*validator count*/)
|
||||
|
||||
err := service.saveGenesisData(ctx, genesisState)
|
||||
require.NoError(t, err)
|
||||
genesisState, secretKeys := util.DeterministicGenesisStateElectra(t, 32, util.WithElectraStateSlot(fs))
|
||||
require.NoError(t, service.saveGenesisData(ctx, genesisState))
|
||||
|
||||
conf := util.DefaultBlockGenConfig()
|
||||
conf.NumBlobKzgCommitments = params.blobKzgCommitmentsCount
|
||||
conf.NumBlobKzgCommitments = p.blobKzgCommitmentsCount
|
||||
|
||||
signedBeaconBlock, err := util.GenerateFullBlockFulu(genesisState, secretKeys, conf, 10 /*block slot*/)
|
||||
signedBeaconBlock, err := util.GenerateFullBlockFulu(genesisState, secretKeys, conf, fs+1)
|
||||
require.NoError(t, err)
|
||||
|
||||
block := signedBeaconBlock.Block
|
||||
@@ -2929,8 +2930,8 @@ func testIsAvailableSetup(t *testing.T, params testIsAvailableParams) (context.C
|
||||
root, err := block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
dataColumnsParams := make([]util.DataColumnParam, 0, len(params.columnsToSave))
|
||||
for _, i := range params.columnsToSave {
|
||||
dataColumnsParams := make([]util.DataColumnParam, 0, len(p.columnsToSave))
|
||||
for _, i := range p.columnsToSave {
|
||||
dataColumnParam := util.DataColumnParam{
|
||||
Index: i,
|
||||
Slot: block.Slot,
|
||||
@@ -2954,8 +2955,12 @@ func testIsAvailableSetup(t *testing.T, params testIsAvailableParams) (context.C
|
||||
}
|
||||
|
||||
func TestIsDataAvailable(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.AltairForkEpoch, cfg.BellatrixForkEpoch, cfg.CapellaForkEpoch, cfg.DenebForkEpoch, cfg.ElectraForkEpoch, cfg.FuluForkEpoch = 0, 0, 0, 0, 0, 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
t.Run("Fulu - out of retention window", func(t *testing.T) {
|
||||
params := testIsAvailableParams{options: []Option{WithGenesisTime(time.Unix(0, 0))}}
|
||||
params := testIsAvailableParams{}
|
||||
ctx, _, service, root, signed := testIsAvailableSetup(t, params)
|
||||
|
||||
roBlock, err := consensusblocks.NewROBlockWithRoot(signed, root)
|
||||
@@ -2972,7 +2977,6 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
err = service.isDataAvailable(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("Fulu - more than half of the columns in custody", func(t *testing.T) {
|
||||
minimumColumnsCountToReconstruct := peerdas.MinimumColumnCountToReconstruct()
|
||||
indices := make([]uint64, 0, minimumColumnsCountToReconstruct)
|
||||
|
||||
@@ -562,8 +562,9 @@ func TestNotifyIndex(t *testing.T) {
|
||||
var root [32]byte
|
||||
copy(root[:], "exampleRoot")
|
||||
|
||||
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
// Test notifying a new index
|
||||
bn.notifyIndex(root, 1, 1)
|
||||
bn.notifyIndex(root, 1, ds)
|
||||
if !bn.seenIndex[root][1] {
|
||||
t.Errorf("Index was not marked as seen")
|
||||
}
|
||||
@@ -580,7 +581,7 @@ func TestNotifyIndex(t *testing.T) {
|
||||
}
|
||||
|
||||
// Test notifying a new index again
|
||||
bn.notifyIndex(root, 2, 1)
|
||||
bn.notifyIndex(root, 2, ds)
|
||||
if !bn.seenIndex[root][2] {
|
||||
t.Errorf("Index was not marked as seen")
|
||||
}
|
||||
|
||||
@@ -89,7 +89,7 @@ func (mb *mockBroadcaster) BroadcastLightClientFinalityUpdate(_ context.Context,
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastDataColumnSidecar(_ uint64, _ blocks.VerifiedRODataColumn) error {
|
||||
func (mb *mockBroadcaster) BroadcastDataColumnSidecars(_ context.Context, _ []blocks.VerifiedRODataColumn) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
@@ -106,14 +106,14 @@ type mockCustodyManager struct {
|
||||
custodyGroupCount uint64
|
||||
}
|
||||
|
||||
func (dch *mockCustodyManager) EarliestAvailableSlot() (primitives.Slot, error) {
|
||||
func (dch *mockCustodyManager) EarliestAvailableSlot(context.Context) (primitives.Slot, error) {
|
||||
dch.mut.RLock()
|
||||
defer dch.mut.RUnlock()
|
||||
|
||||
return dch.earliestAvailableSlot, nil
|
||||
}
|
||||
|
||||
func (dch *mockCustodyManager) CustodyGroupCount() (uint64, error) {
|
||||
func (dch *mockCustodyManager) CustodyGroupCount(context.Context) (uint64, error) {
|
||||
dch.mut.RLock()
|
||||
defer dch.mut.RUnlock()
|
||||
|
||||
|
||||
3
beacon-chain/cache/committee.go
vendored
3
beacon-chain/cache/committee.go
vendored
@@ -5,7 +5,6 @@ package cache
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"math"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -272,7 +271,7 @@ func (c *CommitteeCache) checkInProgress(ctx context.Context, seed [32]byte) err
|
||||
// for the in progress boolean to flip to false.
|
||||
time.Sleep(time.Duration(delay) * time.Nanosecond)
|
||||
delay *= delayFactor
|
||||
delay = math.Min(delay, maxDelay)
|
||||
delay = min(delay, maxDelay)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -52,7 +52,7 @@ func create(leaves [][32]byte, depth uint64) MerkleTreeNode {
|
||||
if depth == 0 {
|
||||
return &LeafNode{hash: leaves[0]}
|
||||
}
|
||||
split := math.Min(math.PowerOf2(depth-1), length)
|
||||
split := min(math.PowerOf2(depth-1), length)
|
||||
left := create(leaves[0:split], depth-1)
|
||||
right := create(leaves[split:], depth-1)
|
||||
return &InnerNode{left: left, right: right}
|
||||
|
||||
3
beacon-chain/cache/skip_slot_cache.go
vendored
3
beacon-chain/cache/skip_slot_cache.go
vendored
@@ -2,7 +2,6 @@ package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -90,7 +89,7 @@ func (c *SkipSlotCache) Get(ctx context.Context, r [32]byte) (state.BeaconState,
|
||||
// for the in progress boolean to flip to false.
|
||||
time.Sleep(time.Duration(delay) * time.Nanosecond)
|
||||
delay *= delayFactor
|
||||
delay = math.Min(delay, maxDelay)
|
||||
delay = min(delay, maxDelay)
|
||||
}
|
||||
span.SetAttributes(trace.BoolAttribute("inProgress", inProgress))
|
||||
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/hash"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/math"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
@@ -209,7 +208,7 @@ func IsSyncCommitteeAggregator(sig []byte) (bool, error) {
|
||||
}
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
modulo := math.Max(1, cfg.SyncCommitteeSize/cfg.SyncCommitteeSubnetCount/cfg.TargetAggregatorsPerSyncSubcommittee)
|
||||
modulo := max(1, cfg.SyncCommitteeSize/cfg.SyncCommitteeSubnetCount/cfg.TargetAggregatorsPerSyncSubcommittee)
|
||||
hashedSig := hash.Hash(sig)
|
||||
return bytesutil.FromBytes8(hashedSig[:8])%modulo == 0, nil
|
||||
}
|
||||
|
||||
@@ -39,7 +39,6 @@ go_library(
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
|
||||
@@ -42,6 +42,9 @@ func ProcessAttesterSlashings(
|
||||
slashings []ethpb.AttSlashing,
|
||||
exitInfo *validators.ExitInfo,
|
||||
) (state.BeaconState, error) {
|
||||
if exitInfo == nil && len(slashings) > 0 {
|
||||
return nil, errors.New("exit info required to process attester slashings")
|
||||
}
|
||||
var err error
|
||||
for _, slashing := range slashings {
|
||||
beaconState, err = ProcessAttesterSlashing(ctx, beaconState, slashing, exitInfo)
|
||||
@@ -59,6 +62,9 @@ func ProcessAttesterSlashing(
|
||||
slashing ethpb.AttSlashing,
|
||||
exitInfo *validators.ExitInfo,
|
||||
) (state.BeaconState, error) {
|
||||
if exitInfo == nil {
|
||||
return nil, errors.New("exit info is required to process attester slashing")
|
||||
}
|
||||
if err := VerifyAttesterSlashing(ctx, beaconState, slashing); err != nil {
|
||||
return nil, errors.Wrap(err, "could not verify attester slashing")
|
||||
}
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/contracts/deposit"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/math"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -34,7 +33,7 @@ func ActivateValidatorWithEffectiveBalance(beaconState state.BeaconState, deposi
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
validator.EffectiveBalance = math.Min(balance-balance%params.BeaconConfig().EffectiveBalanceIncrement, params.BeaconConfig().MaxEffectiveBalance)
|
||||
validator.EffectiveBalance = min(balance-balance%params.BeaconConfig().EffectiveBalanceIncrement, params.BeaconConfig().MaxEffectiveBalance)
|
||||
if validator.EffectiveBalance ==
|
||||
params.BeaconConfig().MaxEffectiveBalance {
|
||||
validator.ActivationEligibilityEpoch = 0
|
||||
|
||||
@@ -55,6 +55,9 @@ func ProcessVoluntaryExits(
|
||||
if len(exits) == 0 {
|
||||
return beaconState, nil
|
||||
}
|
||||
if exitInfo == nil {
|
||||
return nil, errors.New("exit info required to process voluntary exits")
|
||||
}
|
||||
for idx, exit := range exits {
|
||||
if exit == nil || exit.Exit == nil {
|
||||
return nil, errors.New("nil voluntary exit in block body")
|
||||
|
||||
@@ -51,6 +51,9 @@ func ProcessProposerSlashings(
|
||||
slashings []*ethpb.ProposerSlashing,
|
||||
exitInfo *validators.ExitInfo,
|
||||
) (state.BeaconState, error) {
|
||||
if exitInfo == nil && len(slashings) > 0 {
|
||||
return nil, errors.New("exit info required to process proposer slashings")
|
||||
}
|
||||
var err error
|
||||
for _, slashing := range slashings {
|
||||
beaconState, err = ProcessProposerSlashing(ctx, beaconState, slashing, exitInfo)
|
||||
@@ -75,6 +78,9 @@ func ProcessProposerSlashing(
|
||||
if err = VerifyProposerSlashing(beaconState, slashing); err != nil {
|
||||
return nil, errors.Wrap(err, "could not verify proposer slashing")
|
||||
}
|
||||
if exitInfo == nil {
|
||||
return nil, errors.New("exit info is required to process proposer slashing")
|
||||
}
|
||||
beaconState, err = validators.SlashValidator(ctx, beaconState, slashing.Header_1.Header.ProposerIndex, exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not slash proposer index %d", slashing.Header_1.Header.ProposerIndex)
|
||||
|
||||
@@ -53,9 +53,15 @@ func ProcessOperations(ctx context.Context, st state.BeaconState, block interfac
|
||||
// 6110 validations are in VerifyOperationLengths
|
||||
bb := block.Body()
|
||||
// Electra extends the altair operations.
|
||||
exitInfo := v.ExitInformation(st)
|
||||
if err := helpers.UpdateTotalActiveBalanceCache(st, exitInfo.TotalActiveBalance); err != nil {
|
||||
return nil, errors.Wrap(err, "could not update total active balance cache")
|
||||
var exitInfo *v.ExitInfo
|
||||
hasSlashings := len(bb.ProposerSlashings()) > 0 || len(bb.AttesterSlashings()) > 0
|
||||
hasExits := len(bb.VoluntaryExits()) > 0
|
||||
if hasSlashings || hasExits {
|
||||
// ExitInformation is expensive to compute, only do it if we need it.
|
||||
exitInfo = v.ExitInformation(st)
|
||||
if err := helpers.UpdateTotalActiveBalanceCache(st, exitInfo.TotalActiveBalance); err != nil {
|
||||
return nil, errors.Wrap(err, "could not update total active balance cache")
|
||||
}
|
||||
}
|
||||
st, err = ProcessProposerSlashings(ctx, st, bb.ProposerSlashings(), exitInfo)
|
||||
if err != nil {
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
@@ -91,6 +92,18 @@ func ProcessWithdrawalRequests(ctx context.Context, st state.BeaconState, wrs []
|
||||
ctx, span := trace.StartSpan(ctx, "electra.ProcessWithdrawalRequests")
|
||||
defer span.End()
|
||||
currentEpoch := slots.ToEpoch(st.Slot())
|
||||
if len(wrs) == 0 {
|
||||
return st, nil
|
||||
}
|
||||
// It is correct to compute exitInfo once for all withdrawals in the block, as the ExitInfo pointer is
|
||||
// updated within InitiateValidatorExit which is the only function that uses it.
|
||||
var exitInfo *validators.ExitInfo
|
||||
if st.Version() < version.Electra {
|
||||
exitInfo = validators.ExitInformation(st)
|
||||
} else {
|
||||
// After Electra, the function InitiateValidatorExit ignores the exitInfo passed to it and recomputes it anyway.
|
||||
exitInfo = &validators.ExitInfo{}
|
||||
}
|
||||
for _, wr := range wrs {
|
||||
if wr == nil {
|
||||
return nil, errors.New("nil execution layer withdrawal request")
|
||||
@@ -148,7 +161,8 @@ func ProcessWithdrawalRequests(ctx context.Context, st state.BeaconState, wrs []
|
||||
// Only exit validator if it has no pending withdrawals in the queue
|
||||
if pendingBalanceToWithdraw == 0 {
|
||||
var err error
|
||||
st, err = validators.InitiateValidatorExit(ctx, st, vIdx, validators.ExitInformation(st))
|
||||
// exitInfo is updated within InitiateValidatorExit
|
||||
st, err = validators.InitiateValidatorExit(ctx, st, vIdx, exitInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -96,12 +96,17 @@ func ProcessRegistryUpdates(ctx context.Context, st state.BeaconState) (state.Be
|
||||
}
|
||||
|
||||
// Process validators eligible for ejection.
|
||||
for _, idx := range eligibleForEjection {
|
||||
// Here is fine to do a quadratic loop since this should
|
||||
// barely happen
|
||||
st, err = validators.InitiateValidatorExit(ctx, st, idx, validators.ExitInformation(st))
|
||||
if err != nil && !errors.Is(err, validators.ErrValidatorAlreadyExited) {
|
||||
return nil, errors.Wrapf(err, "could not initiate exit for validator %d", idx)
|
||||
if len(eligibleForEjection) > 0 {
|
||||
// It is safe to compute exitInfo once for all ejections in the epoch, as the ExitInfo pointer is
|
||||
// updated within InitiateValidatorExit which is the only function that uses it.
|
||||
exitInfo := validators.ExitInformation(st)
|
||||
for _, idx := range eligibleForEjection {
|
||||
// Here is fine to do a quadratic loop since this should
|
||||
// barely happen
|
||||
st, err = validators.InitiateValidatorExit(ctx, st, idx, exitInfo)
|
||||
if err != nil && !errors.Is(err, validators.ErrValidatorAlreadyExited) {
|
||||
return nil, errors.Wrapf(err, "could not initiate exit for validator %d", idx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -228,7 +233,7 @@ func ProcessSlashings(st state.BeaconState) error {
|
||||
// a callback is used here to apply the following actions to all validators
|
||||
// below equally.
|
||||
increment := params.BeaconConfig().EffectiveBalanceIncrement
|
||||
minSlashing := math.Min(totalSlashing*slashingMultiplier, totalBalance)
|
||||
minSlashing := min(totalSlashing*slashingMultiplier, totalBalance)
|
||||
|
||||
// Modified in Electra:EIP7251
|
||||
var penaltyPerEffectiveBalanceIncrement uint64
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/time"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/math"
|
||||
)
|
||||
|
||||
// ProcessSlashingsPrecompute processes the slashed validators during epoch processing.
|
||||
@@ -21,7 +20,7 @@ func ProcessSlashingsPrecompute(s state.BeaconState, pBal *Balance) error {
|
||||
totalSlashing += slashing
|
||||
}
|
||||
|
||||
minSlashing := math.Min(totalSlashing*params.BeaconConfig().ProportionalSlashingMultiplier, pBal.ActiveCurrentEpoch)
|
||||
minSlashing := min(totalSlashing*params.BeaconConfig().ProportionalSlashingMultiplier, pBal.ActiveCurrentEpoch)
|
||||
epochToWithdraw := currentEpoch + exitLength/2
|
||||
|
||||
var hasSlashing bool
|
||||
|
||||
@@ -399,7 +399,6 @@ func CommitteeAssignments(ctx context.Context, state state.BeaconState, epoch pr
|
||||
ctx, span := trace.StartSpan(ctx, "helpers.CommitteeAssignments")
|
||||
defer span.End()
|
||||
|
||||
// Verify if the epoch is valid for assignment based on the provided state.
|
||||
if err := VerifyAssignmentEpoch(epoch, state); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -407,12 +406,15 @@ func CommitteeAssignments(ctx context.Context, state state.BeaconState, epoch pr
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vals := make(map[primitives.ValidatorIndex]struct{})
|
||||
|
||||
// Deduplicate and make set for O(1) membership checks.
|
||||
vals := make(map[primitives.ValidatorIndex]struct{}, len(validators))
|
||||
for _, v := range validators {
|
||||
vals[v] = struct{}{}
|
||||
}
|
||||
assignments := make(map[primitives.ValidatorIndex]*CommitteeAssignment)
|
||||
// Compute committee assignments for each slot in the epoch.
|
||||
remaining := len(vals)
|
||||
|
||||
assignments := make(map[primitives.ValidatorIndex]*CommitteeAssignment, len(vals))
|
||||
for slot := startSlot; slot < startSlot+params.BeaconConfig().SlotsPerEpoch; slot++ {
|
||||
committees, err := BeaconCommittees(ctx, state, slot)
|
||||
if err != nil {
|
||||
@@ -420,7 +422,7 @@ func CommitteeAssignments(ctx context.Context, state state.BeaconState, epoch pr
|
||||
}
|
||||
for j, committee := range committees {
|
||||
for _, vIndex := range committee {
|
||||
if _, ok := vals[vIndex]; !ok { // Skip if the validator is not in the provided validators slice.
|
||||
if _, ok := vals[vIndex]; !ok {
|
||||
continue
|
||||
}
|
||||
if _, ok := assignments[vIndex]; !ok {
|
||||
@@ -429,6 +431,11 @@ func CommitteeAssignments(ctx context.Context, state state.BeaconState, epoch pr
|
||||
assignments[vIndex].Committee = committee
|
||||
assignments[vIndex].AttesterSlot = slot
|
||||
assignments[vIndex].CommitteeIndex = primitives.CommitteeIndex(j)
|
||||
delete(vals, vIndex)
|
||||
remaining--
|
||||
if remaining == 0 {
|
||||
return assignments, nil // early exit
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -79,7 +79,7 @@ func TotalActiveBalance(s state.ReadOnlyBeaconState) (uint64, error) {
|
||||
}
|
||||
|
||||
// Spec defines `EffectiveBalanceIncrement` as min to avoid divisions by zero.
|
||||
total = mathutil.Max(params.BeaconConfig().EffectiveBalanceIncrement, total)
|
||||
total = max(params.BeaconConfig().EffectiveBalanceIncrement, total)
|
||||
if err := balanceCache.AddTotalEffectiveBalance(s, total); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/math"
|
||||
v1alpha1 "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
)
|
||||
@@ -95,7 +94,7 @@ func ComputeWeakSubjectivityPeriod(ctx context.Context, st state.ReadOnlyBeaconS
|
||||
if T*(200+3*D) < t*(200+12*D) {
|
||||
epochsForValidatorSetChurn := N * (t*(200+12*D) - T*(200+3*D)) / (600 * delta * (2*t + T))
|
||||
epochsForBalanceTopUps := N * (200 + 3*D) / (600 * Delta)
|
||||
wsp += math.Max(epochsForValidatorSetChurn, epochsForBalanceTopUps)
|
||||
wsp += max(epochsForValidatorSetChurn, epochsForBalanceTopUps)
|
||||
} else {
|
||||
wsp += 3 * N * D * t / (200 * Delta * (T - t))
|
||||
}
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package peerdas
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
@@ -28,7 +30,8 @@ func MinimumColumnCountToReconstruct() uint64 {
|
||||
|
||||
// ReconstructDataColumnSidecars reconstructs all the data column sidecars from the given input data column sidecars.
|
||||
// All input sidecars must be committed to the same block.
|
||||
// `inVerifiedRoSidecars` should contain enough (unique) sidecars to reconstruct the missing columns.
|
||||
// `inVerifiedRoSidecars` should contain enough sidecars to reconstruct the missing columns, and should not contain any duplicate.
|
||||
// WARNING: This function sorts inplace `verifiedRoSidecars` by index.
|
||||
func ReconstructDataColumnSidecars(verifiedRoSidecars []blocks.VerifiedRODataColumn) ([]blocks.VerifiedRODataColumn, error) {
|
||||
// Check if there is at least one input sidecar.
|
||||
if len(verifiedRoSidecars) == 0 {
|
||||
@@ -51,18 +54,17 @@ func ReconstructDataColumnSidecars(verifiedRoSidecars []blocks.VerifiedRODataCol
|
||||
}
|
||||
}
|
||||
|
||||
// Deduplicate sidecars.
|
||||
sidecarByIndex := make(map[uint64]blocks.VerifiedRODataColumn, len(verifiedRoSidecars))
|
||||
for _, inVerifiedRoSidecar := range verifiedRoSidecars {
|
||||
sidecarByIndex[inVerifiedRoSidecar.Index] = inVerifiedRoSidecar
|
||||
}
|
||||
|
||||
// Check if there is enough sidecars to reconstruct the missing columns.
|
||||
sidecarCount := len(sidecarByIndex)
|
||||
sidecarCount := len(verifiedRoSidecars)
|
||||
if uint64(sidecarCount) < MinimumColumnCountToReconstruct() {
|
||||
return nil, ErrNotEnoughDataColumnSidecars
|
||||
}
|
||||
|
||||
// Sort the input sidecars by index.
|
||||
sort.Slice(verifiedRoSidecars, func(i, j int) bool {
|
||||
return verifiedRoSidecars[i].Index < verifiedRoSidecars[j].Index
|
||||
})
|
||||
|
||||
// Recover cells and compute proofs in parallel.
|
||||
var wg errgroup.Group
|
||||
cellsAndProofs := make([]kzg.CellsAndProofs, blobCount)
|
||||
@@ -71,10 +73,10 @@ func ReconstructDataColumnSidecars(verifiedRoSidecars []blocks.VerifiedRODataCol
|
||||
cellsIndices := make([]uint64, 0, sidecarCount)
|
||||
cells := make([]kzg.Cell, 0, sidecarCount)
|
||||
|
||||
for columnIndex, sidecar := range sidecarByIndex {
|
||||
for _, sidecar := range verifiedRoSidecars {
|
||||
cell := sidecar.Column[blobIndex]
|
||||
cells = append(cells, kzg.Cell(cell))
|
||||
cellsIndices = append(cellsIndices, columnIndex)
|
||||
cellsIndices = append(cellsIndices, sidecar.Index)
|
||||
}
|
||||
|
||||
// Recover the cells and proofs for the corresponding blob
|
||||
|
||||
@@ -125,11 +125,12 @@ func TestReconstructDataColumnSidecars(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestReconstructBlobs(t *testing.T) {
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096*2
|
||||
|
||||
require.NoError(t, kzg.Start())
|
||||
var emptyBlock blocks.ROBlock
|
||||
fs := util.SlotAtEpoch(t, params.BeaconConfig().FuluForkEpoch)
|
||||
|
||||
t.Run("no index", func(t *testing.T) {
|
||||
actual, err := peerdas.ReconstructBlobs(emptyBlock, nil, nil)
|
||||
@@ -190,10 +191,10 @@ func TestReconstructBlobs(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("not committed to the same block", func(t *testing.T) {
|
||||
_, _, verifiedRoSidecars := util.GenerateTestFuluBlockWithSidecars(t, 3, util.WithParentRoot([fieldparams.RootLength]byte{1}))
|
||||
roBlock, _, _ := util.GenerateTestFuluBlockWithSidecars(t, 3, util.WithParentRoot([fieldparams.RootLength]byte{2}))
|
||||
_, _, verifiedRoSidecars := util.GenerateTestFuluBlockWithSidecars(t, 3, util.WithParentRoot([fieldparams.RootLength]byte{1}), util.WithSlot(fs))
|
||||
roBlock, _, _ := util.GenerateTestFuluBlockWithSidecars(t, 3, util.WithParentRoot([fieldparams.RootLength]byte{2}), util.WithSlot(fs))
|
||||
|
||||
_, err = peerdas.ReconstructBlobs(roBlock, verifiedRoSidecars, []int{0})
|
||||
_, err := peerdas.ReconstructBlobs(roBlock, verifiedRoSidecars, []int{0})
|
||||
require.ErrorContains(t, peerdas.ErrRootMismatch.Error(), err)
|
||||
})
|
||||
|
||||
|
||||
@@ -16,61 +16,60 @@ func TestDataColumnsAlignWithBlock(t *testing.T) {
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096*2
|
||||
fs := util.SlotAtEpoch(t, params.BeaconConfig().ElectraForkEpoch)
|
||||
require.NoError(t, err)
|
||||
fuluMax := params.BeaconConfig().MaxBlobsPerBlock(fs)
|
||||
t.Run("pre fulu", func(t *testing.T) {
|
||||
block, _ := util.GenerateTestElectraBlockWithSidecar(t, [fieldparams.RootLength]byte{}, 0, 0)
|
||||
block, _ := util.GenerateTestElectraBlockWithSidecar(t, [fieldparams.RootLength]byte{}, fs, 0)
|
||||
err := peerdas.DataColumnsAlignWithBlock(block, nil)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("too many commitmnets", func(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.BlobSchedule = []params.BlobScheduleEntry{{}}
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
block, _, _ := util.GenerateTestFuluBlockWithSidecars(t, 3)
|
||||
t.Run("too many commitments", func(t *testing.T) {
|
||||
block, _, _ := util.GenerateTestFuluBlockWithSidecars(t, fuluMax+1, util.WithSlot(fs))
|
||||
err := peerdas.DataColumnsAlignWithBlock(block, nil)
|
||||
require.ErrorIs(t, err, peerdas.ErrTooManyCommitments)
|
||||
})
|
||||
|
||||
t.Run("root mismatch", func(t *testing.T) {
|
||||
_, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2)
|
||||
block, _, _ := util.GenerateTestFuluBlockWithSidecars(t, 0)
|
||||
_, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2, util.WithSlot(fs))
|
||||
block, _, _ := util.GenerateTestFuluBlockWithSidecars(t, 0, util.WithSlot(fs))
|
||||
err := peerdas.DataColumnsAlignWithBlock(block, sidecars)
|
||||
require.ErrorIs(t, err, peerdas.ErrRootMismatch)
|
||||
})
|
||||
|
||||
t.Run("column size mismatch", func(t *testing.T) {
|
||||
block, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2)
|
||||
block, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2, util.WithSlot(fs))
|
||||
sidecars[0].Column = [][]byte{}
|
||||
err := peerdas.DataColumnsAlignWithBlock(block, sidecars)
|
||||
require.ErrorIs(t, err, peerdas.ErrBlockColumnSizeMismatch)
|
||||
})
|
||||
|
||||
t.Run("KZG commitments size mismatch", func(t *testing.T) {
|
||||
block, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2)
|
||||
block, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2, util.WithSlot(fs))
|
||||
sidecars[0].KzgCommitments = [][]byte{}
|
||||
err := peerdas.DataColumnsAlignWithBlock(block, sidecars)
|
||||
require.ErrorIs(t, err, peerdas.ErrBlockColumnSizeMismatch)
|
||||
})
|
||||
|
||||
t.Run("KZG proofs mismatch", func(t *testing.T) {
|
||||
block, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2)
|
||||
block, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2, util.WithSlot(fs))
|
||||
sidecars[0].KzgProofs = [][]byte{}
|
||||
err := peerdas.DataColumnsAlignWithBlock(block, sidecars)
|
||||
require.ErrorIs(t, err, peerdas.ErrBlockColumnSizeMismatch)
|
||||
})
|
||||
|
||||
t.Run("commitment mismatch", func(t *testing.T) {
|
||||
block, _, _ := util.GenerateTestFuluBlockWithSidecars(t, 2)
|
||||
_, alteredSidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2)
|
||||
block, _, _ := util.GenerateTestFuluBlockWithSidecars(t, 2, util.WithSlot(fs))
|
||||
_, alteredSidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2, util.WithSlot(fs))
|
||||
alteredSidecars[1].KzgCommitments[0][0]++ // Overflow is OK
|
||||
err := peerdas.DataColumnsAlignWithBlock(block, alteredSidecars)
|
||||
require.ErrorIs(t, err, peerdas.ErrCommitmentMismatch)
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
block, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2)
|
||||
block, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2, util.WithSlot(fs))
|
||||
err := peerdas.DataColumnsAlignWithBlock(block, sidecars)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/electra"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition/interop"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/validators"
|
||||
v "github.com/OffchainLabs/prysm/v6/beacon-chain/core/validators"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
@@ -378,9 +379,16 @@ func ProcessBlockForStateRoot(
|
||||
func altairOperations(ctx context.Context, st state.BeaconState, beaconBlock interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
|
||||
var err error
|
||||
|
||||
exitInfo := v.ExitInformation(st)
|
||||
if err := helpers.UpdateTotalActiveBalanceCache(st, exitInfo.TotalActiveBalance); err != nil {
|
||||
return nil, errors.Wrap(err, "could not update total active balance cache")
|
||||
hasSlashings := len(beaconBlock.Body().ProposerSlashings()) > 0 || len(beaconBlock.Body().AttesterSlashings()) > 0
|
||||
// exitInfo is only needed for voluntary exits pre Electra.
|
||||
hasExits := st.Version() < version.Electra && len(beaconBlock.Body().VoluntaryExits()) > 0
|
||||
exitInfo := &validators.ExitInfo{}
|
||||
if hasSlashings || hasExits {
|
||||
// ExitInformation is expensive to compute, only do it if we need it.
|
||||
exitInfo = v.ExitInformation(st)
|
||||
if err := helpers.UpdateTotalActiveBalanceCache(st, exitInfo.TotalActiveBalance); err != nil {
|
||||
return nil, errors.Wrap(err, "could not update total active balance cache")
|
||||
}
|
||||
}
|
||||
st, err = b.ProcessProposerSlashings(ctx, st, beaconBlock.Body().ProposerSlashings(), exitInfo)
|
||||
if err != nil {
|
||||
@@ -407,10 +415,15 @@ func altairOperations(ctx context.Context, st state.BeaconState, beaconBlock int
|
||||
// This calls phase 0 block operations.
|
||||
func phase0Operations(ctx context.Context, st state.BeaconState, beaconBlock interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
|
||||
var err error
|
||||
|
||||
exitInfo := v.ExitInformation(st)
|
||||
if err := helpers.UpdateTotalActiveBalanceCache(st, exitInfo.TotalActiveBalance); err != nil {
|
||||
return nil, errors.Wrap(err, "could not update total active balance cache")
|
||||
hasSlashings := len(beaconBlock.Body().ProposerSlashings()) > 0 || len(beaconBlock.Body().AttesterSlashings()) > 0
|
||||
hasExits := len(beaconBlock.Body().VoluntaryExits()) > 0
|
||||
var exitInfo *v.ExitInfo
|
||||
if hasSlashings || hasExits {
|
||||
// ExitInformation is expensive to compute, only do it if we need it.
|
||||
exitInfo = v.ExitInformation(st)
|
||||
if err := helpers.UpdateTotalActiveBalanceCache(st, exitInfo.TotalActiveBalance); err != nil {
|
||||
return nil, errors.Wrap(err, "could not update total active balance cache")
|
||||
}
|
||||
}
|
||||
st, err = b.ProcessProposerSlashings(ctx, st, beaconBlock.Body().ProposerSlashings(), exitInfo)
|
||||
if err != nil {
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/math"
|
||||
mathutil "github.com/OffchainLabs/prysm/v6/math"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
@@ -60,7 +59,7 @@ func ExitInformation(s state.BeaconState) *ExitInfo {
|
||||
_ = err
|
||||
|
||||
// Apply minimum balance as per spec
|
||||
exitInfo.TotalActiveBalance = mathutil.Max(params.BeaconConfig().EffectiveBalanceIncrement, totalActiveBalance)
|
||||
exitInfo.TotalActiveBalance = max(params.BeaconConfig().EffectiveBalanceIncrement, totalActiveBalance)
|
||||
return exitInfo
|
||||
}
|
||||
|
||||
@@ -98,7 +97,9 @@ func InitiateValidatorExit(
|
||||
if validator.ExitEpoch != params.BeaconConfig().FarFutureEpoch {
|
||||
return s, ErrValidatorAlreadyExited
|
||||
}
|
||||
|
||||
if exitInfo == nil {
|
||||
return nil, errors.New("exit info is required to process validator exit")
|
||||
}
|
||||
// Compute exit queue epoch.
|
||||
if s.Version() < version.Electra {
|
||||
if err = initiateValidatorExitPreElectra(ctx, s, exitInfo); err != nil {
|
||||
@@ -177,6 +178,9 @@ func initiateValidatorExitPreElectra(ctx context.Context, s state.BeaconState, e
|
||||
// if exit_queue_churn >= get_validator_churn_limit(state):
|
||||
// exit_queue_epoch += Epoch(1)
|
||||
exitableEpoch := helpers.ActivationExitEpoch(time.CurrentEpoch(s))
|
||||
if exitInfo == nil {
|
||||
return errors.New("exit info is required to process validator exit")
|
||||
}
|
||||
if exitableEpoch > exitInfo.HighestExitEpoch {
|
||||
exitInfo.HighestExitEpoch = exitableEpoch
|
||||
exitInfo.Churn = 0
|
||||
@@ -235,7 +239,9 @@ func SlashValidator(
|
||||
exitInfo *ExitInfo,
|
||||
) (state.BeaconState, error) {
|
||||
var err error
|
||||
|
||||
if exitInfo == nil {
|
||||
return nil, errors.New("exit info is required to slash validator")
|
||||
}
|
||||
s, err = InitiateValidatorExitForTotalBal(ctx, s, slashedIdx, exitInfo, primitives.Gwei(exitInfo.TotalActiveBalance))
|
||||
if err != nil && !errors.Is(err, ErrValidatorAlreadyExited) {
|
||||
return nil, errors.Wrapf(err, "could not initiate validator %d exit", slashedIdx)
|
||||
|
||||
@@ -18,13 +18,16 @@ import (
|
||||
)
|
||||
|
||||
func Test_commitmentsToCheck(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096*2
|
||||
fulu := primitives.Slot(params.BeaconConfig().FuluForkEpoch) * params.BeaconConfig().SlotsPerEpoch
|
||||
windowSlots, err := slots.EpochEnd(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
|
||||
require.NoError(t, err)
|
||||
commits := [][]byte{
|
||||
bytesutil.PadTo([]byte("a"), 48),
|
||||
bytesutil.PadTo([]byte("b"), 48),
|
||||
bytesutil.PadTo([]byte("c"), 48),
|
||||
bytesutil.PadTo([]byte("d"), 48),
|
||||
windowSlots = windowSlots + primitives.Slot(params.BeaconConfig().FuluForkEpoch)
|
||||
maxBlobs := params.LastNetworkScheduleEntry().MaxBlobsPerBlock
|
||||
commits := make([][]byte, maxBlobs+1)
|
||||
for i := 0; i < len(commits); i++ {
|
||||
commits[i] = bytesutil.PadTo([]byte{byte(i)}, 48)
|
||||
}
|
||||
cases := []struct {
|
||||
name string
|
||||
@@ -47,41 +50,40 @@ func Test_commitmentsToCheck(t *testing.T) {
|
||||
{
|
||||
name: "commitments within da",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
d := util.NewBeaconBlockDeneb()
|
||||
d.Block.Body.BlobKzgCommitments = commits
|
||||
d.Block.Slot = 100
|
||||
d := util.NewBeaconBlockFulu()
|
||||
d.Block.Body.BlobKzgCommitments = commits[:maxBlobs]
|
||||
d.Block.Slot = fulu + 100
|
||||
sb, err := blocks.NewSignedBeaconBlock(d)
|
||||
require.NoError(t, err)
|
||||
rb, err := blocks.NewROBlock(sb)
|
||||
require.NoError(t, err)
|
||||
return rb
|
||||
},
|
||||
commits: commits,
|
||||
slot: 100,
|
||||
commits: commits[:maxBlobs],
|
||||
slot: fulu + 100,
|
||||
},
|
||||
{
|
||||
name: "commitments outside da",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
d := util.NewBeaconBlockDeneb()
|
||||
d := util.NewBeaconBlockFulu()
|
||||
d.Block.Slot = fulu
|
||||
// block is from slot 0, "current slot" is window size +1 (so outside the window)
|
||||
d.Block.Body.BlobKzgCommitments = commits
|
||||
d.Block.Body.BlobKzgCommitments = commits[:maxBlobs]
|
||||
sb, err := blocks.NewSignedBeaconBlock(d)
|
||||
require.NoError(t, err)
|
||||
rb, err := blocks.NewROBlock(sb)
|
||||
require.NoError(t, err)
|
||||
return rb
|
||||
},
|
||||
slot: windowSlots + 1,
|
||||
slot: fulu + windowSlots + 1,
|
||||
},
|
||||
{
|
||||
name: "excessive commitments",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
d := util.NewBeaconBlockDeneb()
|
||||
d.Block.Slot = 100
|
||||
d := util.NewBeaconBlockFulu()
|
||||
d.Block.Slot = fulu + 100
|
||||
// block is from slot 0, "current slot" is window size +1 (so outside the window)
|
||||
d.Block.Body.BlobKzgCommitments = commits
|
||||
// Double the number of commitments, assert that this is over the limit
|
||||
d.Block.Body.BlobKzgCommitments = append(commits, d.Block.Body.BlobKzgCommitments...)
|
||||
sb, err := blocks.NewSignedBeaconBlock(d)
|
||||
require.NoError(t, err)
|
||||
rb, err := blocks.NewROBlock(sb)
|
||||
@@ -115,67 +117,69 @@ func Test_commitmentsToCheck(t *testing.T) {
|
||||
func TestLazilyPersistent_Missing(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
store := filesystem.NewEphemeralBlobStorage(t)
|
||||
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
|
||||
blk, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 3)
|
||||
blk, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, ds, 3)
|
||||
|
||||
mbv := &mockBlobBatchVerifier{t: t, scs: blobSidecars}
|
||||
as := NewLazilyPersistentStore(store, mbv)
|
||||
|
||||
// Only one commitment persisted, should return error with other indices
|
||||
require.NoError(t, as.Persist(1, blobSidecars[2]))
|
||||
err := as.IsDataAvailable(ctx, 1, blk)
|
||||
require.NoError(t, as.Persist(ds, blobSidecars[2]))
|
||||
err := as.IsDataAvailable(ctx, ds, blk)
|
||||
require.ErrorIs(t, err, errMissingSidecar)
|
||||
|
||||
// All but one persisted, return missing idx
|
||||
require.NoError(t, as.Persist(1, blobSidecars[0]))
|
||||
err = as.IsDataAvailable(ctx, 1, blk)
|
||||
require.NoError(t, as.Persist(ds, blobSidecars[0]))
|
||||
err = as.IsDataAvailable(ctx, ds, blk)
|
||||
require.ErrorIs(t, err, errMissingSidecar)
|
||||
|
||||
// All persisted, return nil
|
||||
require.NoError(t, as.Persist(1, blobSidecars...))
|
||||
require.NoError(t, as.Persist(ds, blobSidecars...))
|
||||
|
||||
require.NoError(t, as.IsDataAvailable(ctx, 1, blk))
|
||||
require.NoError(t, as.IsDataAvailable(ctx, ds, blk))
|
||||
}
|
||||
|
||||
func TestLazilyPersistent_Mismatch(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
store := filesystem.NewEphemeralBlobStorage(t)
|
||||
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
|
||||
blk, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 3)
|
||||
blk, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, ds, 3)
|
||||
|
||||
mbv := &mockBlobBatchVerifier{t: t, err: errors.New("kzg check should not run")}
|
||||
blobSidecars[0].KzgCommitment = bytesutil.PadTo([]byte("nope"), 48)
|
||||
as := NewLazilyPersistentStore(store, mbv)
|
||||
|
||||
// Only one commitment persisted, should return error with other indices
|
||||
require.NoError(t, as.Persist(1, blobSidecars[0]))
|
||||
err := as.IsDataAvailable(ctx, 1, blk)
|
||||
require.NoError(t, as.Persist(ds, blobSidecars[0]))
|
||||
err := as.IsDataAvailable(ctx, ds, blk)
|
||||
require.NotNil(t, err)
|
||||
require.ErrorIs(t, err, errCommitmentMismatch)
|
||||
}
|
||||
|
||||
func TestLazyPersistOnceCommitted(t *testing.T) {
|
||||
_, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 6)
|
||||
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
_, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, ds, 6)
|
||||
|
||||
as := NewLazilyPersistentStore(filesystem.NewEphemeralBlobStorage(t), &mockBlobBatchVerifier{})
|
||||
// stashes as expected
|
||||
require.NoError(t, as.Persist(1, blobSidecars...))
|
||||
require.NoError(t, as.Persist(ds, blobSidecars...))
|
||||
// ignores duplicates
|
||||
require.ErrorIs(t, as.Persist(1, blobSidecars...), ErrDuplicateSidecar)
|
||||
require.ErrorIs(t, as.Persist(ds, blobSidecars...), ErrDuplicateSidecar)
|
||||
|
||||
// ignores index out of bound
|
||||
blobSidecars[0].Index = 6
|
||||
require.ErrorIs(t, as.Persist(1, blobSidecars[0]), errIndexOutOfBounds)
|
||||
|
||||
_, moreBlobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 4)
|
||||
require.ErrorIs(t, as.Persist(ds, blobSidecars[0]), errIndexOutOfBounds)
|
||||
_, moreBlobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, ds, 4)
|
||||
|
||||
// ignores sidecars before the retention period
|
||||
slotOOB, err := slots.EpochStart(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, as.Persist(32+slotOOB, moreBlobSidecars[0]))
|
||||
slotOOB := util.SlotAtEpoch(t, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
|
||||
slotOOB += ds + 32
|
||||
require.NoError(t, as.Persist(slotOOB, moreBlobSidecars[0]))
|
||||
|
||||
// doesn't ignore new sidecars with a different block root
|
||||
require.NoError(t, as.Persist(1, moreBlobSidecars...))
|
||||
require.NoError(t, as.Persist(ds, moreBlobSidecars...))
|
||||
}
|
||||
|
||||
type mockBlobBatchVerifier struct {
|
||||
|
||||
@@ -39,7 +39,7 @@ func filterTestCaseSetup(slot primitives.Slot, nBlobs int, onDisk []int, numExpe
|
||||
entry := &blobCacheEntry{}
|
||||
if len(onDisk) > 0 {
|
||||
od := map[[32]byte][]int{blk.Root(): onDisk}
|
||||
sumz := filesystem.NewMockBlobStorageSummarizer(t, od)
|
||||
sumz := filesystem.NewMockBlobStorageSummarizer(t, slots.ToEpoch(slot), od)
|
||||
sum := sumz.Summary(blk.Root())
|
||||
entry.setDiskSummary(sum)
|
||||
}
|
||||
|
||||
@@ -21,7 +21,8 @@ import (
|
||||
)
|
||||
|
||||
func TestBlobStorage_SaveBlobData(t *testing.T) {
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, params.BeaconConfig().MaxBlobsPerBlock(1))
|
||||
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, ds, params.BeaconConfig().MaxBlobsPerBlock(ds))
|
||||
testSidecars := verification.FakeVerifySliceForTest(t, sidecars)
|
||||
|
||||
t.Run("no error for duplicate", func(t *testing.T) {
|
||||
@@ -127,21 +128,22 @@ func TestBlobStorage_SaveBlobData(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBlobIndicesBounds(t *testing.T) {
|
||||
es := util.SlotAtEpoch(t, params.BeaconConfig().ElectraForkEpoch)
|
||||
fs := afero.NewMemMapFs()
|
||||
root := [32]byte{}
|
||||
|
||||
okIdx := uint64(params.BeaconConfig().MaxBlobsPerBlock(0)) - 1
|
||||
writeFakeSSZ(t, fs, root, 0, okIdx)
|
||||
okIdx := uint64(params.BeaconConfig().MaxBlobsPerBlock(es)) - 1
|
||||
writeFakeSSZ(t, fs, root, es, okIdx)
|
||||
bs := NewWarmedEphemeralBlobStorageUsingFs(t, fs, WithLayout(LayoutNameByEpoch))
|
||||
indices := bs.Summary(root).mask
|
||||
expected := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(0))
|
||||
expected := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(es))
|
||||
expected[okIdx] = true
|
||||
for i := range expected {
|
||||
require.Equal(t, expected[i], indices[i])
|
||||
}
|
||||
|
||||
oobIdx := uint64(params.BeaconConfig().MaxBlobsPerBlock(0))
|
||||
writeFakeSSZ(t, fs, root, 0, oobIdx)
|
||||
oobIdx := uint64(params.BeaconConfig().MaxBlobsPerBlock(es))
|
||||
writeFakeSSZ(t, fs, root, es, oobIdx)
|
||||
// This now fails at cache warmup time.
|
||||
require.ErrorIs(t, warmCache(bs.layout, bs.cache), errIndexOutOfBounds)
|
||||
}
|
||||
|
||||
@@ -6,14 +6,17 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
)
|
||||
|
||||
func TestSlotByRoot_Summary(t *testing.T) {
|
||||
noneSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(0))
|
||||
allSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(0))
|
||||
firstSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(0))
|
||||
lastSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(0))
|
||||
oneSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(0))
|
||||
ee := params.BeaconConfig().ElectraForkEpoch
|
||||
es := util.SlotAtEpoch(t, ee)
|
||||
noneSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(es))
|
||||
allSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(es))
|
||||
firstSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(es))
|
||||
lastSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(es))
|
||||
oneSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(es))
|
||||
firstSet[0] = true
|
||||
lastSet[len(lastSet)-1] = true
|
||||
oneSet[1] = true
|
||||
@@ -53,7 +56,7 @@ func TestSlotByRoot_Summary(t *testing.T) {
|
||||
for _, c := range cases {
|
||||
if c.expected != nil {
|
||||
key := bytesutil.ToBytes32([]byte(c.name))
|
||||
sc.cache[key] = BlobStorageSummary{epoch: 0, mask: c.expected}
|
||||
sc.cache[key] = BlobStorageSummary{epoch: ee, mask: c.expected}
|
||||
}
|
||||
}
|
||||
for _, c := range cases {
|
||||
@@ -73,6 +76,7 @@ func TestSlotByRoot_Summary(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAllAvailable(t *testing.T) {
|
||||
es := util.SlotAtEpoch(t, params.BeaconConfig().ElectraForkEpoch)
|
||||
idxUpTo := func(u int) []int {
|
||||
r := make([]int, u)
|
||||
for i := range r {
|
||||
@@ -125,13 +129,13 @@ func TestAllAvailable(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "out of bound is safe",
|
||||
count: params.BeaconConfig().MaxBlobsPerBlock(0) + 1,
|
||||
count: params.BeaconConfig().MaxBlobsPerBlock(es) + 1,
|
||||
aa: false,
|
||||
},
|
||||
{
|
||||
name: "max present",
|
||||
count: params.BeaconConfig().MaxBlobsPerBlock(0),
|
||||
idxSet: idxUpTo(params.BeaconConfig().MaxBlobsPerBlock(0)),
|
||||
count: params.BeaconConfig().MaxBlobsPerBlock(es),
|
||||
idxSet: idxUpTo(params.BeaconConfig().MaxBlobsPerBlock(es)),
|
||||
aa: true,
|
||||
},
|
||||
{
|
||||
@@ -143,7 +147,7 @@ func TestAllAvailable(t *testing.T) {
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
mask := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(0))
|
||||
mask := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(es))
|
||||
for _, idx := range c.idxSet {
|
||||
mask[idx] = true
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
@@ -60,12 +61,13 @@ func TestRootFromDir(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSlotFromFile(t *testing.T) {
|
||||
es := util.SlotAtEpoch(t, params.BeaconConfig().ElectraForkEpoch)
|
||||
cases := []struct {
|
||||
slot primitives.Slot
|
||||
}{
|
||||
{slot: 0},
|
||||
{slot: 2},
|
||||
{slot: 1123581321},
|
||||
{slot: es + 0},
|
||||
{slot: es + 2},
|
||||
{slot: es + 1123581321},
|
||||
{slot: math.MaxUint64},
|
||||
}
|
||||
for _, c := range cases {
|
||||
@@ -243,39 +245,40 @@ func TestSlotFromBlob(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestIterationComplete(t *testing.T) {
|
||||
de := params.BeaconConfig().DenebForkEpoch
|
||||
targets := []migrationTestTarget{
|
||||
{
|
||||
ident: ezIdent(t, "0x0125e54c64c925018c9296965a5b622d9f5ab626c10917860dcfb6aa09a0a00b", 1234, 0),
|
||||
path: "by-epoch/0/1234/0x0125e54c64c925018c9296965a5b622d9f5ab626c10917860dcfb6aa09a0a00b/0.ssz",
|
||||
ident: ezIdent(t, "0x0125e54c64c925018c9296965a5b622d9f5ab626c10917860dcfb6aa09a0a00b", de+1234, 0),
|
||||
path: "by-epoch/%d/%d/0x0125e54c64c925018c9296965a5b622d9f5ab626c10917860dcfb6aa09a0a00b/0.ssz",
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", 5330, 0),
|
||||
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", de+5330, 0),
|
||||
slotOffset: 31,
|
||||
path: "by-epoch/1/5330/0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86/0.ssz",
|
||||
path: "by-epoch/%d/%d/0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86/0.ssz",
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", 5330, 1),
|
||||
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", de+5330, 1),
|
||||
slotOffset: 31,
|
||||
path: "by-epoch/1/5330/0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86/1.ssz",
|
||||
path: "by-epoch/%d/%d/0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86/1.ssz",
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", 16777216, 0),
|
||||
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", -1+math.MaxUint64/32, 0),
|
||||
slotOffset: 16,
|
||||
path: "by-epoch/4096/16777216/0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c/0.ssz",
|
||||
path: "by-epoch/%d/%d/0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c/0.ssz",
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", 16777216, 1),
|
||||
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", -1+math.MaxUint64/32, 1),
|
||||
slotOffset: 16,
|
||||
path: "by-epoch/4096/16777216/0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c/1.ssz",
|
||||
path: "by-epoch/%d/%d/0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c/1.ssz",
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x42eabe3d2c125410cd226de6f2825fb7575ab896c3f52e43de1fa29e4c809aba", 16777217, 0),
|
||||
ident: ezIdent(t, "0x42eabe3d2c125410cd226de6f2825fb7575ab896c3f52e43de1fa29e4c809aba", -1+math.MaxUint64/32, 0),
|
||||
slotOffset: 16,
|
||||
path: "by-epoch/4096/16777217/0x42eabe3d2c125410cd226de6f2825fb7575ab896c3f52e43de1fa29e4c809aba/0.ssz",
|
||||
path: "by-epoch/%d/%d/0x42eabe3d2c125410cd226de6f2825fb7575ab896c3f52e43de1fa29e4c809aba/0.ssz",
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x666cea5034e22bd3b849cb33914cad59afd88ee08e4d5bc0e997411c945fbc1d", 11235, 1),
|
||||
path: "by-epoch/2/11235/0x666cea5034e22bd3b849cb33914cad59afd88ee08e4d5bc0e997411c945fbc1d/1.ssz",
|
||||
ident: ezIdent(t, "0x666cea5034e22bd3b849cb33914cad59afd88ee08e4d5bc0e997411c945fbc1d", de+11235, 1),
|
||||
path: "by-epoch/%d/%d/0x666cea5034e22bd3b849cb33914cad59afd88ee08e4d5bc0e997411c945fbc1d/1.ssz",
|
||||
},
|
||||
}
|
||||
fs := afero.NewMemMapFs()
|
||||
@@ -299,6 +302,7 @@ func TestIterationComplete(t *testing.T) {
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, tar.ident.epoch, entry.epoch)
|
||||
require.Equal(t, true, entry.HasIndex(tar.ident.index))
|
||||
require.Equal(t, tar.path, byEpoch.sszPath(tar.ident))
|
||||
path := fmt.Sprintf(tar.path, periodForEpoch(tar.ident.epoch), tar.ident.epoch)
|
||||
require.Equal(t, path, byEpoch.sszPath(tar.ident))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,10 +4,10 @@ import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
@@ -18,9 +18,7 @@ func ezIdent(t *testing.T, rootStr string, epoch primitives.Epoch, index uint64)
|
||||
}
|
||||
|
||||
func setupTestBlobFile(t *testing.T, ident blobIdent, offset primitives.Slot, fs afero.Fs, l fsLayout) {
|
||||
slot, err := slots.EpochStart(ident.epoch)
|
||||
require.NoError(t, err)
|
||||
slot += offset
|
||||
slot := util.SlotAtEpoch(t, ident.epoch) + offset
|
||||
_, sc := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, 1)
|
||||
scb, err := sc[0].MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
@@ -53,6 +51,7 @@ func testAssertFsMigrated(t *testing.T, fs afero.Fs, ident blobIdent, before, af
|
||||
}
|
||||
|
||||
func TestMigrations(t *testing.T) {
|
||||
de := params.BeaconConfig().DenebForkEpoch
|
||||
cases := []struct {
|
||||
name string
|
||||
forwardLayout string
|
||||
@@ -65,18 +64,18 @@ func TestMigrations(t *testing.T) {
|
||||
forwardLayout: LayoutNameByEpoch,
|
||||
targets: []migrationTestTarget{
|
||||
{
|
||||
ident: ezIdent(t, "0x0125e54c64c925018c9296965a5b622d9f5ab626c10917860dcfb6aa09a0a00b", 1234, 0),
|
||||
ident: ezIdent(t, "0x0125e54c64c925018c9296965a5b622d9f5ab626c10917860dcfb6aa09a0a00b", de+1234, 0),
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", 5330, 0),
|
||||
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", de+5330, 0),
|
||||
slotOffset: 31,
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", 5330, 1),
|
||||
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", de+5330, 1),
|
||||
slotOffset: 31,
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", 16777216, 0),
|
||||
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", de+16777216, 0),
|
||||
slotOffset: 16,
|
||||
},
|
||||
},
|
||||
@@ -87,33 +86,33 @@ func TestMigrations(t *testing.T) {
|
||||
forwardLayout: LayoutNameByEpoch,
|
||||
targets: []migrationTestTarget{
|
||||
{
|
||||
ident: ezIdent(t, "0x0125e54c64c925018c9296965a5b622d9f5ab626c10917860dcfb6aa09a0a00b", 1234, 0),
|
||||
ident: ezIdent(t, "0x0125e54c64c925018c9296965a5b622d9f5ab626c10917860dcfb6aa09a0a00b", de+1234, 0),
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", 5330, 0),
|
||||
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", de+5330, 0),
|
||||
slotOffset: 31,
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", 5330, 1),
|
||||
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", de+5330, 1),
|
||||
slotOffset: 31,
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", 16777216, 0),
|
||||
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", de+16777216, 0),
|
||||
slotOffset: 16,
|
||||
migrated: true,
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", 16777216, 1),
|
||||
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", de+16777216, 1),
|
||||
slotOffset: 16,
|
||||
migrated: true,
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x42eabe3d2c125410cd226de6f2825fb7575ab896c3f52e43de1fa29e4c809aba", 16777217, 0),
|
||||
ident: ezIdent(t, "0x42eabe3d2c125410cd226de6f2825fb7575ab896c3f52e43de1fa29e4c809aba", de+16777217, 0),
|
||||
slotOffset: 16,
|
||||
migrated: true,
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x666cea5034e22bd3b849cb33914cad59afd88ee08e4d5bc0e997411c945fbc1d", 11235, 1),
|
||||
ident: ezIdent(t, "0x666cea5034e22bd3b849cb33914cad59afd88ee08e4d5bc0e997411c945fbc1d", de+11235, 1),
|
||||
migrated: true,
|
||||
},
|
||||
},
|
||||
|
||||
@@ -88,11 +88,11 @@ func NewEphemeralBlobStorageWithMocker(t testing.TB) (*BlobMocker, *BlobStorage)
|
||||
return &BlobMocker{fs: fs, bs: bs}, bs
|
||||
}
|
||||
|
||||
func NewMockBlobStorageSummarizer(t *testing.T, set map[[32]byte][]int) BlobStorageSummarizer {
|
||||
func NewMockBlobStorageSummarizer(t *testing.T, epoch primitives.Epoch, set map[[32]byte][]int) BlobStorageSummarizer {
|
||||
c := newBlobStorageCache()
|
||||
for k, v := range set {
|
||||
for i := range v {
|
||||
if err := c.ensure(blobIdent{root: k, epoch: 0, index: uint64(v[i])}); err != nil {
|
||||
if err := c.ensure(blobIdent{root: k, epoch: epoch, index: uint64(v[i])}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -142,6 +142,7 @@ func testRoots(n int) [][32]byte {
|
||||
}
|
||||
|
||||
func TestLayoutPruneBefore(t *testing.T) {
|
||||
electra := params.BeaconConfig().ElectraForkEpoch
|
||||
roots := testRoots(10)
|
||||
cases := []struct {
|
||||
name string
|
||||
@@ -153,27 +154,27 @@ func TestLayoutPruneBefore(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "none pruned",
|
||||
pruneBefore: 1,
|
||||
pruneBefore: electra + 1,
|
||||
pruned: []testIdent{},
|
||||
remain: []testIdent{
|
||||
{offset: 1, blobIdent: blobIdent{root: roots[0], epoch: 1, index: 0}},
|
||||
{offset: 1, blobIdent: blobIdent{root: roots[1], epoch: 1, index: 0}},
|
||||
{offset: 1, blobIdent: blobIdent{root: roots[0], epoch: electra + 1, index: 0}},
|
||||
{offset: 1, blobIdent: blobIdent{root: roots[1], epoch: electra + 1, index: 0}},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "expected pruned before epoch",
|
||||
pruneBefore: 3,
|
||||
pruneBefore: electra + 3,
|
||||
pruned: []testIdent{
|
||||
{offset: 0, blobIdent: blobIdent{root: roots[0], epoch: 1, index: 0}},
|
||||
{offset: 31, blobIdent: blobIdent{root: roots[1], epoch: 1, index: 5}},
|
||||
{offset: 0, blobIdent: blobIdent{root: roots[2], epoch: 2, index: 0}},
|
||||
{offset: 31, blobIdent: blobIdent{root: roots[3], epoch: 2, index: 3}},
|
||||
{offset: 0, blobIdent: blobIdent{root: roots[0], epoch: electra + 1, index: 0}},
|
||||
{offset: 31, blobIdent: blobIdent{root: roots[1], epoch: electra + 1, index: 5}},
|
||||
{offset: 0, blobIdent: blobIdent{root: roots[2], epoch: electra + 2, index: 0}},
|
||||
{offset: 31, blobIdent: blobIdent{root: roots[3], epoch: electra + 2, index: 3}},
|
||||
},
|
||||
remain: []testIdent{
|
||||
{offset: 0, blobIdent: blobIdent{root: roots[4], epoch: 3, index: 2}}, // boundary
|
||||
{offset: 31, blobIdent: blobIdent{root: roots[5], epoch: 3, index: 0}}, // boundary
|
||||
{offset: 0, blobIdent: blobIdent{root: roots[6], epoch: 4, index: 1}},
|
||||
{offset: 31, blobIdent: blobIdent{root: roots[7], epoch: 4, index: 5}},
|
||||
{offset: 0, blobIdent: blobIdent{root: roots[4], epoch: electra + 3, index: 2}}, // boundary
|
||||
{offset: 31, blobIdent: blobIdent{root: roots[5], epoch: electra + 3, index: 0}}, // boundary
|
||||
{offset: 0, blobIdent: blobIdent{root: roots[6], epoch: electra + 4, index: 1}},
|
||||
{offset: 31, blobIdent: blobIdent{root: roots[7], epoch: electra + 4, index: 5}},
|
||||
},
|
||||
sum: pruneSummary{blobsPruned: 4},
|
||||
},
|
||||
|
||||
@@ -954,7 +954,9 @@ func (s *Store) CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint pr
|
||||
deletedRoots := make([][32]byte, 0)
|
||||
|
||||
oRoot, err := s.OriginCheckpointBlockRoot(ctx)
|
||||
if err != nil {
|
||||
if err != nil && !errors.Is(err, ErrNotFoundOriginBlockRoot) {
|
||||
// If the node did not use checkpoint sync, there will be no origin block root.
|
||||
// Use zero hash which will never match any actual state root
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
@@ -1283,3 +1284,50 @@ func BenchmarkState_CheckStateSaveTime_10(b *testing.B) { checkStateSaveTime(b,
|
||||
|
||||
func BenchmarkState_CheckStateReadTime_1(b *testing.B) { checkStateReadTime(b, 1) }
|
||||
func BenchmarkState_CheckStateReadTime_10(b *testing.B) { checkStateReadTime(b, 10) }
|
||||
|
||||
func TestStore_CleanUpDirtyStates_NoOriginRoot(t *testing.T) {
|
||||
// This test verifies that CleanUpDirtyStates does not fail when the origin block root is not set,
|
||||
// which can happen when starting from genesis or in certain fork scenarios like Fulu.
|
||||
db := setupDB(t)
|
||||
genesisState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
genesisRoot := [fieldparams.RootLength]byte{'a'}
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(t.Context(), genesisRoot))
|
||||
require.NoError(t, db.SaveState(t.Context(), genesisState, genesisRoot))
|
||||
// Note: We intentionally do NOT call SaveOriginCheckpointBlockRoot here
|
||||
// to simulate the scenario where origin block root is not set
|
||||
slotsPerArchivedPoint := primitives.Slot(128)
|
||||
bRoots := make([][fieldparams.RootLength]byte, 0)
|
||||
prevRoot := genesisRoot
|
||||
for i := primitives.Slot(1); i <= slotsPerArchivedPoint; i++ { // skip slot 0
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = i
|
||||
b.Block.ParentRoot = prevRoot[:]
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(t.Context(), wsb))
|
||||
bRoots = append(bRoots, r)
|
||||
prevRoot = r
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetSlot(i))
|
||||
require.NoError(t, db.SaveState(t.Context(), st, r))
|
||||
}
|
||||
require.NoError(t, db.SaveFinalizedCheckpoint(t.Context(), ðpb.Checkpoint{
|
||||
Root: bRoots[len(bRoots)-1][:],
|
||||
Epoch: primitives.Epoch(slotsPerArchivedPoint / params.BeaconConfig().SlotsPerEpoch),
|
||||
}))
|
||||
// This should not fail even though origin block root is not set
|
||||
err = db.CleanUpDirtyStates(t.Context(), slotsPerArchivedPoint)
|
||||
require.NoError(t, err)
|
||||
// Verify that cleanup still works correctly
|
||||
for i, root := range bRoots {
|
||||
if primitives.Slot(i) >= slotsPerArchivedPoint.SubSlot(slotsPerArchivedPoint.Div(3)) {
|
||||
require.Equal(t, true, db.HasState(t.Context(), root))
|
||||
} else {
|
||||
require.Equal(t, false, db.HasState(t.Context(), root))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -254,6 +254,7 @@ func (s *Store) getCacheUpdatesByPeriod(headBlock interfaces.ReadOnlySignedBeaco
|
||||
return updatesByPeriod, nil
|
||||
}
|
||||
|
||||
// SetLastFinalityUpdate should be used only for testing.
|
||||
func (s *Store) SetLastFinalityUpdate(update interfaces.LightClientFinalityUpdate, broadcast bool) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
@@ -263,9 +264,11 @@ func (s *Store) SetLastFinalityUpdate(update interfaces.LightClientFinalityUpdat
|
||||
|
||||
func (s *Store) setLastFinalityUpdate(update interfaces.LightClientFinalityUpdate, broadcast bool) {
|
||||
if broadcast && IsFinalityUpdateValidForBroadcast(update, s.lastFinalityUpdate) {
|
||||
if err := s.p2p.BroadcastLightClientFinalityUpdate(context.Background(), update); err != nil {
|
||||
log.WithError(err).Error("Could not broadcast light client finality update")
|
||||
}
|
||||
go func() {
|
||||
if err := s.p2p.BroadcastLightClientFinalityUpdate(context.Background(), update); err != nil {
|
||||
log.WithError(err).Error("Could not broadcast light client finality update")
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
s.lastFinalityUpdate = update
|
||||
@@ -283,6 +286,7 @@ func (s *Store) LastFinalityUpdate() interfaces.LightClientFinalityUpdate {
|
||||
return s.lastFinalityUpdate
|
||||
}
|
||||
|
||||
// SetLastOptimisticUpdate should be used only for testing.
|
||||
func (s *Store) SetLastOptimisticUpdate(update interfaces.LightClientOptimisticUpdate, broadcast bool) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
@@ -292,9 +296,11 @@ func (s *Store) SetLastOptimisticUpdate(update interfaces.LightClientOptimisticU
|
||||
|
||||
func (s *Store) setLastOptimisticUpdate(update interfaces.LightClientOptimisticUpdate, broadcast bool) {
|
||||
if broadcast {
|
||||
if err := s.p2p.BroadcastLightClientOptimisticUpdate(context.Background(), update); err != nil {
|
||||
log.WithError(err).Error("Could not broadcast light client optimistic update")
|
||||
}
|
||||
go func() {
|
||||
if err := s.p2p.BroadcastLightClientOptimisticUpdate(context.Background(), update); err != nil {
|
||||
log.WithError(err).Error("Could not broadcast light client optimistic update")
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
s.lastOptimisticUpdate = update
|
||||
|
||||
@@ -3,6 +3,7 @@ package light_client
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/async/event"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
@@ -74,6 +75,7 @@ func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
|
||||
p2p := p2pTesting.NewTestP2P(t)
|
||||
lcStore := NewLightClientStore(p2p, new(event.Feed), testDB.SetupDB(t))
|
||||
|
||||
timeForGoroutinesToFinish := 20 * time.Microsecond
|
||||
// update 0 with basic data and no supermajority following an empty lastFinalityUpdate - should save and broadcast
|
||||
l0 := util.NewTestLightClient(t, version.Altair)
|
||||
update0, err := NewLightClientFinalityUpdateFromBeaconState(l0.Ctx, l0.State, l0.Block, l0.AttestedState, l0.AttestedBlock, l0.FinalizedBlock)
|
||||
@@ -85,6 +87,7 @@ func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update0, true)
|
||||
require.Equal(t, update0, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
time.Sleep(timeForGoroutinesToFinish) // give some time for the broadcast goroutine to finish
|
||||
require.Equal(t, true, p2p.BroadcastCalled.Load(), "Broadcast should have been called after setting a new last finality update when previous is nil")
|
||||
p2p.BroadcastCalled.Store(false) // Reset for next test
|
||||
|
||||
@@ -99,6 +102,7 @@ func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update1, true)
|
||||
require.Equal(t, update1, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
time.Sleep(timeForGoroutinesToFinish) // give some time for the broadcast goroutine to finish
|
||||
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been called after setting a new last finality update without supermajority")
|
||||
p2p.BroadcastCalled.Store(false) // Reset for next test
|
||||
|
||||
@@ -113,6 +117,7 @@ func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update2, true)
|
||||
require.Equal(t, update2, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
time.Sleep(timeForGoroutinesToFinish) // give some time for the broadcast goroutine to finish
|
||||
require.Equal(t, true, p2p.BroadcastCalled.Load(), "Broadcast should have been called after setting a new last finality update with supermajority")
|
||||
p2p.BroadcastCalled.Store(false) // Reset for next test
|
||||
|
||||
@@ -127,6 +132,7 @@ func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update3, true)
|
||||
require.Equal(t, update3, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
time.Sleep(timeForGoroutinesToFinish) // give some time for the broadcast goroutine to finish
|
||||
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been when previous was already broadcast")
|
||||
|
||||
// update 4 with increased finality slot, increased attested slot, and supermajority - should save and broadcast
|
||||
@@ -140,6 +146,7 @@ func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update4, true)
|
||||
require.Equal(t, update4, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
time.Sleep(timeForGoroutinesToFinish) // give some time for the broadcast goroutine to finish
|
||||
require.Equal(t, true, p2p.BroadcastCalled.Load(), "Broadcast should have been called after a new finality update with increased finality slot")
|
||||
p2p.BroadcastCalled.Store(false) // Reset for next test
|
||||
|
||||
@@ -154,6 +161,7 @@ func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update5, true)
|
||||
require.Equal(t, update5, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
time.Sleep(timeForGoroutinesToFinish) // give some time for the broadcast goroutine to finish
|
||||
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been called when previous was already broadcast with supermajority")
|
||||
|
||||
// update 6 with the same new finality slot, increased attested slot, and no supermajority - should save but not broadcast
|
||||
@@ -167,6 +175,7 @@ func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update6, true)
|
||||
require.Equal(t, update6, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
time.Sleep(timeForGoroutinesToFinish) // give some time for the broadcast goroutine to finish
|
||||
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been called when previous was already broadcast with supermajority")
|
||||
}
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package blstoexec
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sync"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/blocks"
|
||||
@@ -87,7 +86,7 @@ func (p *Pool) PendingBLSToExecChanges() ([]*ethpb.SignedBLSToExecutionChange, e
|
||||
func (p *Pool) BLSToExecChangesForInclusion(st state.ReadOnlyBeaconState) ([]*ethpb.SignedBLSToExecutionChange, error) {
|
||||
p.lock.RLock()
|
||||
defer p.lock.RUnlock()
|
||||
length := int(math.Min(float64(params.BeaconConfig().MaxBlsToExecutionChanges), float64(p.pending.Len())))
|
||||
length := int(min(float64(params.BeaconConfig().MaxBlsToExecutionChanges), float64(p.pending.Len())))
|
||||
result := make([]*ethpb.SignedBLSToExecutionChange, 0, length)
|
||||
node := p.pending.Last()
|
||||
for node != nil && len(result) < length {
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package voluntaryexits
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sync"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/blocks"
|
||||
@@ -63,7 +62,7 @@ func (p *Pool) PendingExits() ([]*ethpb.SignedVoluntaryExit, error) {
|
||||
// return more than the block enforced MaxVoluntaryExits.
|
||||
func (p *Pool) ExitsForInclusion(state state.ReadOnlyBeaconState, slot types.Slot) ([]*ethpb.SignedVoluntaryExit, error) {
|
||||
p.lock.RLock()
|
||||
length := int(math.Min(float64(params.BeaconConfig().MaxVoluntaryExits), float64(p.pending.Len())))
|
||||
length := int(min(float64(params.BeaconConfig().MaxVoluntaryExits), float64(p.pending.Len())))
|
||||
result := make([]*ethpb.SignedVoluntaryExit, 0, length)
|
||||
node := p.pending.First()
|
||||
for node != nil && len(result) < length {
|
||||
|
||||
@@ -139,6 +139,7 @@ go_test(
|
||||
"sender_test.go",
|
||||
"service_test.go",
|
||||
"subnets_test.go",
|
||||
"topics_test.go",
|
||||
"utils_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
@@ -162,6 +163,7 @@ go_test(
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
|
||||
@@ -5,14 +5,18 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"slices"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/altair"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/hash"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
@@ -274,6 +278,20 @@ func (s *Service) BroadcastLightClientOptimisticUpdate(ctx context.Context, upda
|
||||
return errors.New("attempted to broadcast nil light client optimistic update")
|
||||
}
|
||||
|
||||
// add delay to ensure block has time to propagate
|
||||
slotStart, err := slots.StartTime(s.genesisTime, update.SignatureSlot())
|
||||
if err != nil {
|
||||
err := errors.Wrap(err, "could not compute slot start time")
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
timeSinceSlotStart := time.Since(slotStart)
|
||||
expectedDelay := slots.ComponentDuration(primitives.BP(params.BeaconConfig().SyncMessageDueBPS))
|
||||
if timeSinceSlotStart < expectedDelay {
|
||||
waitDuration := expectedDelay - timeSinceSlotStart
|
||||
<-time.After(waitDuration)
|
||||
}
|
||||
|
||||
digest := params.ForkDigest(slots.ToEpoch(update.AttestedHeader().Beacon().Slot))
|
||||
if err := s.broadcastObject(ctx, update, lcOptimisticToTopic(digest)); err != nil {
|
||||
log.WithError(err).Debug("Failed to broadcast light client optimistic update")
|
||||
@@ -294,6 +312,20 @@ func (s *Service) BroadcastLightClientFinalityUpdate(ctx context.Context, update
|
||||
return errors.New("attempted to broadcast nil light client finality update")
|
||||
}
|
||||
|
||||
// add delay to ensure block has time to propagate
|
||||
slotStart, err := slots.StartTime(s.genesisTime, update.SignatureSlot())
|
||||
if err != nil {
|
||||
err := errors.Wrap(err, "could not compute slot start time")
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
timeSinceSlotStart := time.Since(slotStart)
|
||||
expectedDelay := slots.ComponentDuration(primitives.BP(params.BeaconConfig().SyncMessageDueBPS))
|
||||
if timeSinceSlotStart < expectedDelay {
|
||||
waitDuration := expectedDelay - timeSinceSlotStart
|
||||
<-time.After(waitDuration)
|
||||
}
|
||||
|
||||
forkDigest := params.ForkDigest(slots.ToEpoch(update.AttestedHeader().Beacon().Slot))
|
||||
if err := s.broadcastObject(ctx, update, lcFinalityToTopic(forkDigest)); err != nil {
|
||||
log.WithError(err).Debug("Failed to broadcast light client finality update")
|
||||
@@ -306,86 +338,150 @@ func (s *Service) BroadcastLightClientFinalityUpdate(ctx context.Context, update
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastDataColumnSidecar broadcasts a data column to the p2p network, the message is assumed to be
|
||||
// broadcasted to the current fork and to the input column subnet.
|
||||
func (s *Service) BroadcastDataColumnSidecar(
|
||||
dataColumnSubnet uint64,
|
||||
dataColumnSidecar blocks.VerifiedRODataColumn,
|
||||
) error {
|
||||
// Add tracing to the function.
|
||||
ctx, span := trace.StartSpan(s.ctx, "p2p.BroadcastDataColumnSidecar")
|
||||
defer span.End()
|
||||
// BroadcastDataColumnSidecars broadcasts multiple data column sidecars to the p2p network, after ensuring
|
||||
// there is at least one peer in each needed subnet. If not, it will attempt to find one before broadcasting.
|
||||
// This function is non-blocking. It stops trying to broadcast a given sidecar when more than one slot has passed, or the context is
|
||||
// cancelled (whichever comes first).
|
||||
func (s *Service) BroadcastDataColumnSidecars(ctx context.Context, sidecars []blocks.VerifiedRODataColumn) error {
|
||||
// Increase the number of broadcast attempts.
|
||||
dataColumnSidecarBroadcastAttempts.Add(float64(len(sidecars)))
|
||||
|
||||
// Retrieve the current fork digest.
|
||||
forkDigest, err := s.currentForkDigest()
|
||||
if err != nil {
|
||||
err := errors.Wrap(err, "current fork digest")
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
return errors.Wrap(err, "current fork digest")
|
||||
}
|
||||
|
||||
// Non-blocking broadcast, with attempts to discover a column subnet peer if none available.
|
||||
go s.internalBroadcastDataColumnSidecar(ctx, dataColumnSubnet, dataColumnSidecar, forkDigest)
|
||||
go s.broadcastDataColumnSidecars(ctx, forkDigest, sidecars)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) internalBroadcastDataColumnSidecar(
|
||||
ctx context.Context,
|
||||
columnSubnet uint64,
|
||||
dataColumnSidecar blocks.VerifiedRODataColumn,
|
||||
forkDigest [fieldparams.VersionLength]byte,
|
||||
) {
|
||||
// Add tracing to the function.
|
||||
_, span := trace.StartSpan(ctx, "p2p.internalBroadcastDataColumnSidecar")
|
||||
defer span.End()
|
||||
|
||||
// Increase the number of broadcast attempts.
|
||||
dataColumnSidecarBroadcastAttempts.Inc()
|
||||
|
||||
// Define a one-slot length context timeout.
|
||||
secondsPerSlot := params.BeaconConfig().SecondsPerSlot
|
||||
oneSlot := time.Duration(secondsPerSlot) * time.Second
|
||||
ctx, cancel := context.WithTimeout(ctx, oneSlot)
|
||||
defer cancel()
|
||||
|
||||
// Build the topic corresponding to this column subnet and this fork digest.
|
||||
topic := dataColumnSubnetToTopic(columnSubnet, forkDigest)
|
||||
|
||||
// Compute the wrapped subnet index.
|
||||
wrappedSubIdx := columnSubnet + dataColumnSubnetVal
|
||||
|
||||
// Find peers if needed.
|
||||
if err := s.findPeersIfNeeded(ctx, wrappedSubIdx, DataColumnSubnetTopicFormat, forkDigest, columnSubnet); err != nil {
|
||||
log.WithError(err).Error("Failed to find peers for data column subnet")
|
||||
tracing.AnnotateError(span, err)
|
||||
// broadcastDataColumnSidecars broadcasts multiple data column sidecars to the p2p network, after ensuring
|
||||
// there is at least one peer in each needed subnet. If not, it will attempt to find one before broadcasting.
|
||||
// It returns when all broadcasts are complete, or the context is cancelled (whichever comes first).
|
||||
func (s *Service) broadcastDataColumnSidecars(ctx context.Context, forkDigest [fieldparams.VersionLength]byte, sidecars []blocks.VerifiedRODataColumn) {
|
||||
type rootAndIndex struct {
|
||||
root [fieldparams.RootLength]byte
|
||||
index uint64
|
||||
}
|
||||
|
||||
// Broadcast the data column sidecar to the network.
|
||||
if err := s.broadcastObject(ctx, dataColumnSidecar, topic); err != nil {
|
||||
log.WithError(err).Error("Failed to broadcast data column sidecar")
|
||||
tracing.AnnotateError(span, err)
|
||||
var (
|
||||
wg sync.WaitGroup
|
||||
timings sync.Map
|
||||
)
|
||||
|
||||
logLevel := logrus.GetLevel()
|
||||
|
||||
slotPerRoot := make(map[[fieldparams.RootLength]byte]primitives.Slot, 1)
|
||||
for _, sidecar := range sidecars {
|
||||
slotPerRoot[sidecar.BlockRoot()] = sidecar.Slot()
|
||||
|
||||
wg.Go(func() {
|
||||
// Add tracing to the function.
|
||||
ctx, span := trace.StartSpan(s.ctx, "p2p.broadcastDataColumnSidecars")
|
||||
defer span.End()
|
||||
|
||||
// Compute the subnet for this data column sidecar.
|
||||
subnet := peerdas.ComputeSubnetForDataColumnSidecar(sidecar.Index)
|
||||
|
||||
// Build the topic corresponding to subnet column subnet and this fork digest.
|
||||
topic := dataColumnSubnetToTopic(subnet, forkDigest)
|
||||
|
||||
// Compute the wrapped subnet index.
|
||||
wrappedSubIdx := subnet + dataColumnSubnetVal
|
||||
|
||||
// Find peers if needed.
|
||||
if err := s.findPeersIfNeeded(ctx, wrappedSubIdx, DataColumnSubnetTopicFormat, forkDigest, subnet); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
log.WithError(err).Error("Cannot find peers if needed")
|
||||
return
|
||||
}
|
||||
|
||||
// Broadcast the data column sidecar to the network.
|
||||
if err := s.broadcastObject(ctx, sidecar, topic); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
log.WithError(err).Error("Cannot broadcast data column sidecar")
|
||||
return
|
||||
}
|
||||
|
||||
// Increase the number of successful broadcasts.
|
||||
dataColumnSidecarBroadcasts.Inc()
|
||||
|
||||
// Record the timing for log purposes.
|
||||
if logLevel >= logrus.DebugLevel {
|
||||
root := sidecar.BlockRoot()
|
||||
timings.Store(rootAndIndex{root: root, index: sidecar.Index}, time.Now())
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Wait for all broadcasts to finish.
|
||||
wg.Wait()
|
||||
|
||||
// The rest of this function is only for debug logging purposes.
|
||||
if logLevel < logrus.DebugLevel {
|
||||
return
|
||||
}
|
||||
|
||||
header := dataColumnSidecar.SignedBlockHeader.GetHeader()
|
||||
slot := header.GetSlot()
|
||||
|
||||
slotStartTime, err := slots.StartTime(s.genesisTime, slot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to convert slot to time")
|
||||
type logInfo struct {
|
||||
durationMin time.Duration
|
||||
durationMax time.Duration
|
||||
indices []uint64
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": slot,
|
||||
"timeSinceSlotStart": time.Since(slotStartTime),
|
||||
"root": fmt.Sprintf("%#x", dataColumnSidecar.BlockRoot()),
|
||||
"columnSubnet": columnSubnet,
|
||||
"blobCount": len(dataColumnSidecar.Column),
|
||||
}).Debug("Broadcasted data column sidecar")
|
||||
logInfoPerRoot := make(map[[fieldparams.RootLength]byte]*logInfo, 1)
|
||||
|
||||
// Increase the number of successful broadcasts.
|
||||
dataColumnSidecarBroadcasts.Inc()
|
||||
timings.Range(func(key any, value any) bool {
|
||||
rootAndIndex, ok := key.(rootAndIndex)
|
||||
if !ok {
|
||||
log.Error("Could not cast key to rootAndIndex")
|
||||
return true
|
||||
}
|
||||
|
||||
broadcastTime, ok := value.(time.Time)
|
||||
if !ok {
|
||||
log.Error("Could not cast value to time.Time")
|
||||
return true
|
||||
}
|
||||
|
||||
slot, ok := slotPerRoot[rootAndIndex.root]
|
||||
if !ok {
|
||||
log.WithField("root", fmt.Sprintf("%#x", rootAndIndex.root)).Error("Could not find slot for root")
|
||||
return true
|
||||
}
|
||||
|
||||
duration, err := slots.SinceSlotStart(slot, s.genesisTime, broadcastTime)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not compute duration since slot start")
|
||||
return true
|
||||
}
|
||||
|
||||
info, ok := logInfoPerRoot[rootAndIndex.root]
|
||||
if !ok {
|
||||
logInfoPerRoot[rootAndIndex.root] = &logInfo{durationMin: duration, durationMax: duration, indices: []uint64{rootAndIndex.index}}
|
||||
return true
|
||||
}
|
||||
|
||||
info.durationMin = min(info.durationMin, duration)
|
||||
info.durationMax = max(info.durationMax, duration)
|
||||
info.indices = append(info.indices, rootAndIndex.index)
|
||||
|
||||
return true
|
||||
})
|
||||
|
||||
for root, info := range logInfoPerRoot {
|
||||
slices.Sort(info.indices)
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"slot": slotPerRoot[root],
|
||||
"count": len(info.indices),
|
||||
"indices": helpers.PrettySlice(info.indices),
|
||||
"timeSinceSlotStartMin": info.durationMin,
|
||||
"timeSinceSlotStartMax": info.durationMax,
|
||||
}).Debug("Broadcasted data column sidecars")
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) findPeersIfNeeded(
|
||||
|
||||
@@ -15,11 +15,12 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
|
||||
p2ptest "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/wrapper"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
@@ -60,7 +61,6 @@ func TestService_Broadcast(t *testing.T) {
|
||||
topic := "/eth2/%x/testing"
|
||||
// Set a test gossip mapping for testpb.TestSimpleMessage.
|
||||
GossipTypeMapping[reflect.TypeOf(msg)] = topic
|
||||
p.clock = startup.NewClock(p.genesisTime, bytesutil.ToBytes32(p.genesisValidatorsRoot))
|
||||
digest, err := p.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
topic = fmt.Sprintf(topic, digest)
|
||||
@@ -530,6 +530,11 @@ func TestService_BroadcastBlob(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_BroadcastLightClientOptimisticUpdate(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig().Copy()
|
||||
config.SyncMessageDueBPS = 60 // ~72 millisecond
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
@@ -540,7 +545,7 @@ func TestService_BroadcastLightClientOptimisticUpdate(t *testing.T) {
|
||||
pubsub: p1.PubSub(),
|
||||
joinedTopics: map[string]*pubsub.Topic{},
|
||||
cfg: &Config{},
|
||||
genesisTime: time.Now(),
|
||||
genesisTime: time.Now().Add(-33 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second), // the signature slot of the mock update is 33
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
subnetsLock: make(map[uint64]*sync.RWMutex),
|
||||
subnetsLockLock: sync.Mutex{},
|
||||
@@ -567,12 +572,19 @@ func TestService_BroadcastLightClientOptimisticUpdate(t *testing.T) {
|
||||
wg.Add(1)
|
||||
go func(tt *testing.T) {
|
||||
defer wg.Done()
|
||||
ctx, cancel := context.WithTimeout(t.Context(), 1*time.Second)
|
||||
ctx, cancel := context.WithTimeout(t.Context(), 150*time.Millisecond)
|
||||
defer cancel()
|
||||
|
||||
incomingMessage, err := sub.Next(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
slotStartTime, err := slots.StartTime(p.genesisTime, msg.SignatureSlot())
|
||||
require.NoError(t, err)
|
||||
expectedDelay := slots.ComponentDuration(primitives.BP(params.BeaconConfig().SyncMessageDueBPS))
|
||||
if time.Now().Before(slotStartTime.Add(expectedDelay)) {
|
||||
tt.Errorf("Message received too early, now %v, expected at least %v", time.Now(), slotStartTime.Add(expectedDelay))
|
||||
}
|
||||
|
||||
result := ðpb.LightClientOptimisticUpdateAltair{}
|
||||
require.NoError(t, p.Encoding().DecodeGossip(incomingMessage.Data, result))
|
||||
if !proto.Equal(result, msg.Proto()) {
|
||||
@@ -594,6 +606,11 @@ func TestService_BroadcastLightClientOptimisticUpdate(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_BroadcastLightClientFinalityUpdate(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig().Copy()
|
||||
config.SyncMessageDueBPS = 60 // ~72 millisecond
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
@@ -604,7 +621,7 @@ func TestService_BroadcastLightClientFinalityUpdate(t *testing.T) {
|
||||
pubsub: p1.PubSub(),
|
||||
joinedTopics: map[string]*pubsub.Topic{},
|
||||
cfg: &Config{},
|
||||
genesisTime: time.Now(),
|
||||
genesisTime: time.Now().Add(-33 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second), // the signature slot of the mock update is 33
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
subnetsLock: make(map[uint64]*sync.RWMutex),
|
||||
subnetsLockLock: sync.Mutex{},
|
||||
@@ -631,12 +648,19 @@ func TestService_BroadcastLightClientFinalityUpdate(t *testing.T) {
|
||||
wg.Add(1)
|
||||
go func(tt *testing.T) {
|
||||
defer wg.Done()
|
||||
ctx, cancel := context.WithTimeout(t.Context(), 1*time.Second)
|
||||
ctx, cancel := context.WithTimeout(t.Context(), 150*time.Millisecond)
|
||||
defer cancel()
|
||||
|
||||
incomingMessage, err := sub.Next(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
slotStartTime, err := slots.StartTime(p.genesisTime, msg.SignatureSlot())
|
||||
require.NoError(t, err)
|
||||
expectedDelay := slots.ComponentDuration(primitives.BP(params.BeaconConfig().SyncMessageDueBPS))
|
||||
if time.Now().Before(slotStartTime.Add(expectedDelay)) {
|
||||
tt.Errorf("Message received too early, now %v, expected at least %v", time.Now(), slotStartTime.Add(expectedDelay))
|
||||
}
|
||||
|
||||
result := ðpb.LightClientFinalityUpdateAltair{}
|
||||
require.NoError(t, p.Encoding().DecodeGossip(incomingMessage.Data, result))
|
||||
if !proto.Equal(result, msg.Proto()) {
|
||||
@@ -664,6 +688,8 @@ func TestService_BroadcastDataColumn(t *testing.T) {
|
||||
topicFormat = DataColumnSubnetTopicFormat
|
||||
)
|
||||
|
||||
ctx := t.Context()
|
||||
|
||||
// Load the KZG trust setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
@@ -686,7 +712,7 @@ func TestService_BroadcastDataColumn(t *testing.T) {
|
||||
_, pkey, ipAddr := createHost(t, port)
|
||||
|
||||
service := &Service{
|
||||
ctx: t.Context(),
|
||||
ctx: ctx,
|
||||
host: p1.BHost,
|
||||
pubsub: p1.PubSub(),
|
||||
joinedTopics: map[string]*pubsub.Topic{},
|
||||
@@ -695,7 +721,7 @@ func TestService_BroadcastDataColumn(t *testing.T) {
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
subnetsLock: make(map[uint64]*sync.RWMutex),
|
||||
subnetsLockLock: sync.Mutex{},
|
||||
peers: peers.NewStatus(t.Context(), &peers.StatusConfig{ScorerParams: &scorers.Config{}}),
|
||||
peers: peers.NewStatus(ctx, &peers.StatusConfig{ScorerParams: &scorers.Config{}}),
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
|
||||
@@ -722,7 +748,7 @@ func TestService_BroadcastDataColumn(t *testing.T) {
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
// Broadcast to peers and wait.
|
||||
err = service.BroadcastDataColumnSidecar(subnet, verifiedRoSidecar)
|
||||
err = service.BroadcastDataColumnSidecars(ctx, []blocks.VerifiedRODataColumn{verifiedRoSidecar})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Receive the message.
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
@@ -10,32 +12,28 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var errNoCustodyInfo = errors.New("no custody info available")
|
||||
|
||||
var _ CustodyManager = (*Service)(nil)
|
||||
|
||||
// EarliestAvailableSlot returns the earliest available slot.
|
||||
func (s *Service) EarliestAvailableSlot() (primitives.Slot, error) {
|
||||
s.custodyInfoLock.RLock()
|
||||
defer s.custodyInfoLock.RUnlock()
|
||||
|
||||
if s.custodyInfo == nil {
|
||||
return 0, errors.New("no custody info available")
|
||||
// It blocks until the custody info is set or the context is done.
|
||||
func (s *Service) EarliestAvailableSlot(ctx context.Context) (primitives.Slot, error) {
|
||||
custodyInfo, err := s.waitForCustodyInfo(ctx)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "wait for custody info")
|
||||
}
|
||||
|
||||
return s.custodyInfo.earliestAvailableSlot, nil
|
||||
return custodyInfo.earliestAvailableSlot, nil
|
||||
}
|
||||
|
||||
// CustodyGroupCount returns the custody group count.
|
||||
func (s *Service) CustodyGroupCount() (uint64, error) {
|
||||
s.custodyInfoLock.Lock()
|
||||
defer s.custodyInfoLock.Unlock()
|
||||
|
||||
if s.custodyInfo == nil {
|
||||
return 0, errNoCustodyInfo
|
||||
// It blocks until the custody info is set or the context is done.
|
||||
func (s *Service) CustodyGroupCount(ctx context.Context) (uint64, error) {
|
||||
custodyInfo, err := s.waitForCustodyInfo(ctx)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "wait for custody info")
|
||||
}
|
||||
|
||||
return s.custodyInfo.groupCount, nil
|
||||
return custodyInfo.groupCount, nil
|
||||
}
|
||||
|
||||
// UpdateCustodyInfo updates the stored custody group count to the incoming one
|
||||
@@ -79,6 +77,9 @@ func (s *Service) UpdateCustodyInfo(earliestAvailableSlot primitives.Slot, custo
|
||||
earliestAvailableSlot: earliestAvailableSlot,
|
||||
groupCount: custodyGroupCount,
|
||||
}
|
||||
|
||||
close(s.custodyInfoSet)
|
||||
|
||||
return earliestAvailableSlot, custodyGroupCount, nil
|
||||
}
|
||||
|
||||
@@ -147,6 +148,33 @@ func (s *Service) CustodyGroupCountFromPeer(pid peer.ID) uint64 {
|
||||
return custodyCount
|
||||
}
|
||||
|
||||
func (s *Service) waitForCustodyInfo(ctx context.Context) (custodyInfo, error) {
|
||||
select {
|
||||
case <-s.custodyInfoSet:
|
||||
info, ok := s.copyCustodyInfo()
|
||||
if !ok {
|
||||
return custodyInfo{}, errors.New("custody info was set but is nil")
|
||||
}
|
||||
|
||||
return info, nil
|
||||
case <-ctx.Done():
|
||||
return custodyInfo{}, ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
// copyCustodyInfo returns a copy of the current custody info in a thread-safe manner.
|
||||
// If no custody info is set, it returns false as the second return value.
|
||||
func (s *Service) copyCustodyInfo() (custodyInfo, bool) {
|
||||
s.custodyInfoLock.RLock()
|
||||
defer s.custodyInfoLock.RUnlock()
|
||||
|
||||
if s.custodyInfo == nil {
|
||||
return custodyInfo{}, false
|
||||
}
|
||||
|
||||
return *s.custodyInfo, true
|
||||
}
|
||||
|
||||
// custodyGroupCountFromPeerENR retrieves the custody count from the peer's ENR.
|
||||
// If the ENR is not available, it defaults to the minimum number of custody groups
|
||||
// an honest node custodies and serves samples from.
|
||||
|
||||
@@ -20,58 +20,37 @@ import (
|
||||
)
|
||||
|
||||
func TestEarliestAvailableSlot(t *testing.T) {
|
||||
t.Run("No custody info available", func(t *testing.T) {
|
||||
service := &Service{
|
||||
custodyInfo: nil,
|
||||
}
|
||||
const expected primitives.Slot = 100
|
||||
|
||||
_, err := service.EarliestAvailableSlot()
|
||||
service := &Service{
|
||||
custodyInfoSet: make(chan struct{}),
|
||||
custodyInfo: &custodyInfo{
|
||||
earliestAvailableSlot: expected,
|
||||
},
|
||||
}
|
||||
|
||||
require.NotNil(t, err)
|
||||
})
|
||||
close(service.custodyInfoSet)
|
||||
slot, err := service.EarliestAvailableSlot(t.Context())
|
||||
|
||||
t.Run("Valid custody info", func(t *testing.T) {
|
||||
const expected primitives.Slot = 100
|
||||
|
||||
service := &Service{
|
||||
custodyInfo: &custodyInfo{
|
||||
earliestAvailableSlot: expected,
|
||||
},
|
||||
}
|
||||
|
||||
slot, err := service.EarliestAvailableSlot()
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, slot)
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, slot)
|
||||
}
|
||||
|
||||
func TestCustodyGroupCount(t *testing.T) {
|
||||
t.Run("No custody info available", func(t *testing.T) {
|
||||
service := &Service{
|
||||
custodyInfo: nil,
|
||||
}
|
||||
const expected uint64 = 5
|
||||
|
||||
_, err := service.CustodyGroupCount()
|
||||
service := &Service{
|
||||
custodyInfoSet: make(chan struct{}),
|
||||
custodyInfo: &custodyInfo{
|
||||
groupCount: expected,
|
||||
},
|
||||
}
|
||||
|
||||
require.NotNil(t, err)
|
||||
require.Equal(t, true, strings.Contains(err.Error(), "no custody info available"))
|
||||
})
|
||||
close(service.custodyInfoSet)
|
||||
count, err := service.CustodyGroupCount(t.Context())
|
||||
|
||||
t.Run("Valid custody info", func(t *testing.T) {
|
||||
const expected uint64 = 5
|
||||
|
||||
service := &Service{
|
||||
custodyInfo: &custodyInfo{
|
||||
groupCount: expected,
|
||||
},
|
||||
}
|
||||
|
||||
count, err := service.CustodyGroupCount()
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, count)
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, count)
|
||||
}
|
||||
|
||||
func TestUpdateCustodyInfo(t *testing.T) {
|
||||
@@ -163,7 +142,8 @@ func TestUpdateCustodyInfo(t *testing.T) {
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
service := &Service{
|
||||
custodyInfo: tc.initialCustodyInfo,
|
||||
custodyInfoSet: make(chan struct{}),
|
||||
custodyInfo: tc.initialCustodyInfo,
|
||||
}
|
||||
|
||||
slot, groupCount, err := service.UpdateCustodyInfo(tc.inputSlot, tc.inputGroupCount)
|
||||
|
||||
@@ -253,7 +253,7 @@ func (s *Service) RefreshPersistentSubnets() {
|
||||
return
|
||||
}
|
||||
|
||||
custodyGroupCount, err = s.CustodyGroupCount()
|
||||
custodyGroupCount, err = s.CustodyGroupCount(s.ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not retrieve custody group count")
|
||||
return
|
||||
@@ -604,27 +604,13 @@ func (s *Service) createLocalNode(
|
||||
localNode = initializeSyncCommSubnets(localNode)
|
||||
|
||||
if params.FuluEnabled() {
|
||||
// TODO: Replace this quick fix with a proper synchronization scheme (chan?)
|
||||
const delay = 1 * time.Second
|
||||
|
||||
var custodyGroupCount uint64
|
||||
|
||||
err := errNoCustodyInfo
|
||||
for errors.Is(err, errNoCustodyInfo) {
|
||||
custodyGroupCount, err = s.CustodyGroupCount()
|
||||
if errors.Is(err, errNoCustodyInfo) {
|
||||
log.WithField("delay", delay).Debug("No custody info available yet, retrying later")
|
||||
time.Sleep(delay)
|
||||
continue
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "retrieve custody group count")
|
||||
}
|
||||
|
||||
custodyGroupCountEntry := peerdas.Cgc(custodyGroupCount)
|
||||
localNode.Set(custodyGroupCountEntry)
|
||||
custodyGroupCount, err := s.CustodyGroupCount(s.ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not retrieve custody group count")
|
||||
}
|
||||
|
||||
custodyGroupCountEntry := peerdas.Cgc(custodyGroupCount)
|
||||
localNode.Set(custodyGroupCountEntry)
|
||||
}
|
||||
|
||||
if s.cfg != nil && s.cfg.HostAddress != "" {
|
||||
|
||||
@@ -281,9 +281,13 @@ func TestCreateLocalNode(t *testing.T) {
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
cfg: tt.cfg,
|
||||
ctx: t.Context(),
|
||||
custodyInfo: &custodyInfo{groupCount: custodyRequirement},
|
||||
custodyInfoSet: make(chan struct{}),
|
||||
}
|
||||
|
||||
close(service.custodyInfoSet)
|
||||
|
||||
localNode, err := service.createLocalNode(privKey, address, udpPort, tcpPort, quicPort)
|
||||
if tt.expectedError {
|
||||
require.NotNil(t, err)
|
||||
@@ -912,9 +916,13 @@ func TestRefreshPersistentSubnets(t *testing.T) {
|
||||
peers: p2p.Peers(),
|
||||
genesisTime: time.Now().Add(-time.Duration(tc.epochSinceGenesis*secondsPerEpoch) * time.Second),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
ctx: t.Context(),
|
||||
custodyInfoSet: make(chan struct{}),
|
||||
custodyInfo: &custodyInfo{groupCount: custodyGroupCount},
|
||||
}
|
||||
|
||||
close(service.custodyInfoSet)
|
||||
|
||||
// Set the listener and the metadata.
|
||||
createListener := func() (*discover.UDPv5, error) {
|
||||
return service.createListener(nil, privateKey)
|
||||
|
||||
@@ -79,6 +79,9 @@ func compareForkENR(self, peer *enr.Record) error {
|
||||
// we allow the connection to continue until the fork boundary.
|
||||
return nil
|
||||
}
|
||||
if selfEntry.NextForkEpoch == params.BeaconConfig().FarFutureEpoch {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Since we agree on the next fork epoch, we require next fork version to also be in agreement.
|
||||
if !bytes.Equal(peerEntry.NextForkVersion, selfEntry.NextForkVersion) {
|
||||
|
||||
@@ -122,6 +122,29 @@ func TestCompareForkENR(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestIgnoreFarFutureMismatch(t *testing.T) {
|
||||
db, err := enode.OpenDB("")
|
||||
assert.NoError(t, err)
|
||||
_, k := createAddrAndPrivKey(t)
|
||||
current := params.GetNetworkScheduleEntry(params.BeaconConfig().ElectraForkEpoch)
|
||||
next := params.NetworkScheduleEntry{
|
||||
Epoch: params.BeaconConfig().FarFutureEpoch,
|
||||
ForkDigest: [4]byte{0xFF, 0xFF, 0xFF, 0xFF}, // Ensure a unique digest for testing.
|
||||
ForkVersion: [4]byte{0xFF, 0xFF, 0xFF, 0xFF},
|
||||
}
|
||||
self := enode.NewLocalNode(db, k)
|
||||
require.NoError(t, updateENR(self, current, next))
|
||||
|
||||
peerNext := params.NetworkScheduleEntry{
|
||||
Epoch: params.BeaconConfig().FarFutureEpoch,
|
||||
ForkDigest: [4]byte{0xAA, 0xAA, 0xAA, 0xAA}, // Different unique digest for testing.
|
||||
ForkVersion: [4]byte{0xAA, 0xAA, 0xAA, 0xAA},
|
||||
}
|
||||
peer := enode.NewLocalNode(db, k)
|
||||
require.NoError(t, updateENR(peer, current, peerNext))
|
||||
require.NoError(t, compareForkENR(self.Node().Record(), peer.Node().Record()))
|
||||
}
|
||||
|
||||
func TestNfdSetAndLoad(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096
|
||||
|
||||
@@ -52,7 +52,7 @@ type (
|
||||
BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.BlobSidecar) error
|
||||
BroadcastLightClientOptimisticUpdate(ctx context.Context, update interfaces.LightClientOptimisticUpdate) error
|
||||
BroadcastLightClientFinalityUpdate(ctx context.Context, update interfaces.LightClientFinalityUpdate) error
|
||||
BroadcastDataColumnSidecar(columnSubnet uint64, dataColumnSidecar blocks.VerifiedRODataColumn) error
|
||||
BroadcastDataColumnSidecars(ctx context.Context, sidecars []blocks.VerifiedRODataColumn) error
|
||||
}
|
||||
|
||||
// SetStreamHandler configures p2p to handle streams of a certain topic ID.
|
||||
@@ -123,8 +123,8 @@ type (
|
||||
|
||||
// CustodyManager abstracts some data columns related methods.
|
||||
CustodyManager interface {
|
||||
EarliestAvailableSlot() (primitives.Slot, error)
|
||||
CustodyGroupCount() (uint64, error)
|
||||
EarliestAvailableSlot(ctx context.Context) (primitives.Slot, error)
|
||||
CustodyGroupCount(ctx context.Context) (uint64, error)
|
||||
UpdateCustodyInfo(earliestAvailableSlot primitives.Slot, custodyGroupCount uint64) (primitives.Slot, uint64, error)
|
||||
CustodyGroupCountFromPeer(peer.ID) uint64
|
||||
}
|
||||
|
||||
@@ -198,9 +198,11 @@ func (s *Service) updateMetrics() {
|
||||
overallScore := s.peers.Scorers().Score(pid)
|
||||
peerScoresByClient[foundName] = append(peerScoresByClient[foundName], overallScore)
|
||||
}
|
||||
connectedPeersCount.Reset() // Clear out previous results.
|
||||
for agent, total := range numConnectedPeersByClient {
|
||||
connectedPeersCount.WithLabelValues(agent).Set(total)
|
||||
}
|
||||
avgScoreConnectedClients.Reset() // Clear out previous results.
|
||||
for agent, scoringData := range peerScoresByClient {
|
||||
avgScore := average(scoringData)
|
||||
avgScoreConnectedClients.WithLabelValues(agent).Set(avgScore)
|
||||
|
||||
@@ -24,7 +24,6 @@ package peers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"net"
|
||||
"sort"
|
||||
"strings"
|
||||
@@ -411,7 +410,7 @@ func (p *Status) RandomizeBackOff(pid peer.ID) {
|
||||
return
|
||||
}
|
||||
|
||||
duration := time.Duration(math.Max(MinBackOffDuration, float64(p.rand.Intn(MaxBackOffDuration)))) * time.Millisecond
|
||||
duration := time.Duration(max(MinBackOffDuration, float64(p.rand.Intn(MaxBackOffDuration)))) * time.Millisecond
|
||||
peerData.NextValidTime = time.Now().Add(duration)
|
||||
}
|
||||
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
mathutil "github.com/OffchainLabs/prysm/v6/math"
|
||||
pbrpc "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
pubsubpb "github.com/libp2p/go-libp2p-pubsub/pb"
|
||||
@@ -135,13 +134,15 @@ func (s *Service) peerInspector(peerMap map[peer.ID]*pubsub.PeerScoreSnapshot) {
|
||||
|
||||
// pubsubOptions creates a list of options to configure our router with.
|
||||
func (s *Service) pubsubOptions() []pubsub.Option {
|
||||
filt := pubsub.NewAllowlistSubscriptionFilter(s.allTopicStrings()...)
|
||||
filt = pubsub.WrapLimitSubscriptionFilter(filt, pubsubSubscriptionRequestLimit)
|
||||
psOpts := []pubsub.Option{
|
||||
pubsub.WithMessageSignaturePolicy(pubsub.StrictNoSign),
|
||||
pubsub.WithNoAuthor(),
|
||||
pubsub.WithMessageIdFn(func(pmsg *pubsubpb.Message) string {
|
||||
return MsgID(s.genesisValidatorsRoot, pmsg)
|
||||
}),
|
||||
pubsub.WithSubscriptionFilter(s),
|
||||
pubsub.WithSubscriptionFilter(filt),
|
||||
pubsub.WithPeerOutboundQueueSize(int(s.cfg.QueueSize)),
|
||||
pubsub.WithMaxMessageSize(int(MaxMessageSize())), // lint:ignore uintcast -- Max Message Size is a config value and is naturally bounded by networking limitations.
|
||||
pubsub.WithValidateQueueSize(int(s.cfg.QueueSize)),
|
||||
@@ -246,5 +247,5 @@ func ExtractGossipDigest(topic string) ([4]byte, error) {
|
||||
// # Allow 1024 bytes for framing and encoding overhead but at least 1MiB in case MAX_PAYLOAD_SIZE is small.
|
||||
// return max(max_compressed_len(MAX_PAYLOAD_SIZE) + 1024, 1024 * 1024)
|
||||
func MaxMessageSize() uint64 {
|
||||
return mathutil.Max(encoder.MaxCompressedLen(params.BeaconConfig().MaxPayloadSize)+1024, 1024*1024)
|
||||
return max(encoder.MaxCompressedLen(params.BeaconConfig().MaxPayloadSize)+1024, 1024*1024)
|
||||
}
|
||||
|
||||
@@ -130,7 +130,7 @@ func (s *Service) logCheckSubscribableError(pid peer.ID) func(string) bool {
|
||||
log.WithError(err).WithFields(logrus.Fields{
|
||||
"peerID": pid,
|
||||
"topic": topic,
|
||||
}).Debug("Peer subscription rejected")
|
||||
}).Trace("Peer subscription rejected")
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
@@ -92,7 +91,7 @@ type Service struct {
|
||||
peerDisconnectionTime *cache.Cache
|
||||
custodyInfo *custodyInfo
|
||||
custodyInfoLock sync.RWMutex // Lock access to custodyInfo
|
||||
clock *startup.Clock
|
||||
custodyInfoSet chan struct{}
|
||||
allForkDigests map[[4]byte]struct{}
|
||||
}
|
||||
|
||||
@@ -139,6 +138,7 @@ func NewService(ctx context.Context, cfg *Config) (*Service, error) {
|
||||
joinedTopics: make(map[string]*pubsub.Topic, len(gossipTopicMappings)),
|
||||
subnetsLock: make(map[uint64]*sync.RWMutex),
|
||||
peerDisconnectionTime: cache.New(1*time.Second, 1*time.Minute),
|
||||
custodyInfoSet: make(chan struct{}),
|
||||
}
|
||||
|
||||
ipAddr := prysmnetwork.IPAddr()
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"github.com/holiman/uint256"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -223,8 +224,14 @@ func (s *Service) findPeersWithSubnets(
|
||||
// Skip nodes that are not subscribed to any of the defective subnets.
|
||||
nodeSubnets, err := filter(node)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "filter node")
|
||||
log.WithError(err).WithFields(logrus.Fields{
|
||||
"nodeID": node.ID(),
|
||||
"topicFormat": topicFormat,
|
||||
}).Debug("Could not get needed subnets from peer")
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
if len(nodeSubnets) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -169,7 +169,7 @@ func (*FakeP2P) BroadcastLightClientFinalityUpdate(_ context.Context, _ interfac
|
||||
}
|
||||
|
||||
// BroadcastDataColumnSidecar -- fake.
|
||||
func (*FakeP2P) BroadcastDataColumnSidecar(_ uint64, _ blocks.VerifiedRODataColumn) error {
|
||||
func (*FakeP2P) BroadcastDataColumnSidecars(_ context.Context, _ []blocks.VerifiedRODataColumn) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -199,12 +199,12 @@ func (*FakeP2P) InterceptUpgraded(network.Conn) (allow bool, reason control.Disc
|
||||
}
|
||||
|
||||
// EarliestAvailableSlot -- fake.
|
||||
func (*FakeP2P) EarliestAvailableSlot() (primitives.Slot, error) {
|
||||
func (*FakeP2P) EarliestAvailableSlot(context.Context) (primitives.Slot, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// CustodyGroupCount -- fake.
|
||||
func (*FakeP2P) CustodyGroupCount() (uint64, error) {
|
||||
func (*FakeP2P) CustodyGroupCount(context.Context) (uint64, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -63,7 +63,7 @@ func (m *MockBroadcaster) BroadcastLightClientFinalityUpdate(_ context.Context,
|
||||
}
|
||||
|
||||
// BroadcastDataColumnSidecar broadcasts a data column for mock.
|
||||
func (m *MockBroadcaster) BroadcastDataColumnSidecar(uint64, blocks.VerifiedRODataColumn) error {
|
||||
func (m *MockBroadcaster) BroadcastDataColumnSidecars(context.Context, []blocks.VerifiedRODataColumn) error {
|
||||
m.BroadcastCalled.Store(true)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -233,7 +233,7 @@ func (p *TestP2P) BroadcastLightClientFinalityUpdate(_ context.Context, _ interf
|
||||
}
|
||||
|
||||
// BroadcastDataColumnSidecar broadcasts a data column for mock.
|
||||
func (p *TestP2P) BroadcastDataColumnSidecar(uint64, blocks.VerifiedRODataColumn) error {
|
||||
func (p *TestP2P) BroadcastDataColumnSidecars(context.Context, []blocks.VerifiedRODataColumn) error {
|
||||
p.BroadcastCalled.Store(true)
|
||||
return nil
|
||||
}
|
||||
@@ -473,7 +473,7 @@ func (*TestP2P) InterceptUpgraded(network.Conn) (allow bool, reason control.Disc
|
||||
}
|
||||
|
||||
// EarliestAvailableSlot .
|
||||
func (s *TestP2P) EarliestAvailableSlot() (primitives.Slot, error) {
|
||||
func (s *TestP2P) EarliestAvailableSlot(context.Context) (primitives.Slot, error) {
|
||||
s.custodyInfoMut.RLock()
|
||||
defer s.custodyInfoMut.RUnlock()
|
||||
|
||||
@@ -481,7 +481,7 @@ func (s *TestP2P) EarliestAvailableSlot() (primitives.Slot, error) {
|
||||
}
|
||||
|
||||
// CustodyGroupCount .
|
||||
func (s *TestP2P) CustodyGroupCount() (uint64, error) {
|
||||
func (s *TestP2P) CustodyGroupCount(context.Context) (uint64, error) {
|
||||
s.custodyInfoMut.RLock()
|
||||
defer s.custodyInfoMut.RUnlock()
|
||||
|
||||
|
||||
@@ -1,5 +1,15 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"slices"
|
||||
"strconv"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/encoder"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
)
|
||||
|
||||
const (
|
||||
// GossipProtocolAndDigest represents the protocol and fork digest prefix in a gossip topic.
|
||||
GossipProtocolAndDigest = "/eth2/%x/"
|
||||
@@ -66,3 +76,129 @@ const (
|
||||
// DataColumnSubnetTopicFormat is the topic format for the data column subnet.
|
||||
DataColumnSubnetTopicFormat = GossipProtocolAndDigest + GossipDataColumnSidecarMessage + "_%d"
|
||||
)
|
||||
|
||||
// topic is a struct representing a single gossipsub topic.
|
||||
// It can also be used to represent a set of subnet topics: see appendSubnetsBelow().
|
||||
// topic is intended to be used as an immutable value - it is hashable so it can be used as a map key
|
||||
// and it uses strings in order to leverage golangs string interning for memory efficiency.
|
||||
type topic struct {
|
||||
full string
|
||||
digest string
|
||||
message string
|
||||
start primitives.Epoch
|
||||
end primitives.Epoch
|
||||
suffix string
|
||||
subnet uint64
|
||||
}
|
||||
|
||||
func (t topic) String() string {
|
||||
return t.full
|
||||
}
|
||||
|
||||
// sszEnc is used to get the protocol suffix for topics. This value has been effectively hardcoded
|
||||
// since phase0.
|
||||
var sszEnc = &encoder.SszNetworkEncoder{}
|
||||
|
||||
// newTopic constructs a topic value for an ordinary topic structure (without subnets).
|
||||
func newTopic(start, end primitives.Epoch, digest [4]byte, message string) topic {
|
||||
suffix := sszEnc.ProtocolSuffix()
|
||||
t := topic{digest: hex.EncodeToString(digest[:]), message: message, start: start, end: end, suffix: suffix}
|
||||
t.full = "/" + "eth2" + "/" + t.digest + "/" + t.message + t.suffix
|
||||
return t
|
||||
}
|
||||
|
||||
// newSubnetTopic constructs a topic value for a topic with a subnet structure.
|
||||
func newSubnetTopic(start, end primitives.Epoch, digest [4]byte, message string, subnet uint64) topic {
|
||||
t := newTopic(start, end, digest, message)
|
||||
t.subnet = subnet
|
||||
t.full = "/" + "eth2" + "/" + t.digest + "/" + t.message + "_" + strconv.Itoa(int(t.subnet)) + t.suffix
|
||||
return t
|
||||
}
|
||||
|
||||
// allTopicStrings returns the full topic string for all topics
|
||||
// that could be derived from the current fork schedule.
|
||||
func (s *Service) allTopicStrings() []string {
|
||||
topics := s.allTopics()
|
||||
topicStrs := make([]string, 0, len(topics))
|
||||
for _, t := range topics {
|
||||
topicStrs = append(topicStrs, t.String())
|
||||
}
|
||||
return topicStrs
|
||||
}
|
||||
|
||||
// appendSubnetsBelow uses the value of top.subnet as the subnet count
|
||||
// and creates a topic value for each subnet less than the subnet count, appending them all
|
||||
// to appendTo.
|
||||
func appendSubnetsBelow(top topic, digest [4]byte, appendTo []topic) []topic {
|
||||
for i := range top.subnet {
|
||||
appendTo = append(appendTo, newSubnetTopic(top.start, top.end, digest, top.message, i))
|
||||
}
|
||||
return appendTo
|
||||
}
|
||||
|
||||
// allTopics returns all topics that could be derived from the current fork schedule.
|
||||
func (s *Service) allTopics() []topic {
|
||||
cfg := params.BeaconConfig()
|
||||
// bellatrix: no special topics; electra: blobs topics handled all together
|
||||
genesis, altair, capella := cfg.GenesisEpoch, cfg.AltairForkEpoch, cfg.CapellaForkEpoch
|
||||
deneb, fulu, future := cfg.DenebForkEpoch, cfg.FuluForkEpoch, cfg.FarFutureEpoch
|
||||
// Templates are starter topics - they have a placeholder digest and the subnet is set to the maximum value
|
||||
// for the subnet (see how this is used in allSubnetsBelow). These are not directly returned by the method,
|
||||
// they are copied and modified for each digest where they apply based on the start and end epochs.
|
||||
empty := [4]byte{0, 0, 0, 0} // empty digest for templates, replaced by real digests in per-fork copies.
|
||||
templates := []topic{
|
||||
newTopic(genesis, future, empty, GossipBlockMessage),
|
||||
newTopic(genesis, future, empty, GossipAggregateAndProofMessage),
|
||||
newTopic(genesis, future, empty, GossipExitMessage),
|
||||
newTopic(genesis, future, empty, GossipProposerSlashingMessage),
|
||||
newTopic(genesis, future, empty, GossipAttesterSlashingMessage),
|
||||
newSubnetTopic(genesis, future, empty, GossipAttestationMessage, cfg.AttestationSubnetCount),
|
||||
newSubnetTopic(altair, future, empty, GossipSyncCommitteeMessage, cfg.SyncCommitteeSubnetCount),
|
||||
newTopic(altair, future, empty, GossipContributionAndProofMessage),
|
||||
newTopic(altair, future, empty, GossipLightClientOptimisticUpdateMessage),
|
||||
newTopic(altair, future, empty, GossipLightClientFinalityUpdateMessage),
|
||||
newTopic(capella, future, empty, GossipBlsToExecutionChangeMessage),
|
||||
}
|
||||
last := params.GetNetworkScheduleEntry(genesis)
|
||||
schedule := []params.NetworkScheduleEntry{last}
|
||||
for next := params.NextNetworkScheduleEntry(last.Epoch); next.ForkDigest != last.ForkDigest; next = params.NextNetworkScheduleEntry(next.Epoch) {
|
||||
schedule = append(schedule, next)
|
||||
last = next
|
||||
}
|
||||
slices.Reverse(schedule) // reverse the fork schedule because it simplifies dealing with BPOs
|
||||
fullTopics := make([]topic, 0, len(templates))
|
||||
for _, top := range templates {
|
||||
for _, entry := range schedule {
|
||||
if top.start <= entry.Epoch && entry.Epoch < top.end {
|
||||
if top.subnet > 0 { // subnet topics in the list above should set this value to the max subnet count: see allSubnetsBelow
|
||||
fullTopics = appendSubnetsBelow(top, entry.ForkDigest, fullTopics)
|
||||
} else {
|
||||
fullTopics = append(fullTopics, newTopic(top.start, top.end, entry.ForkDigest, top.message))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
end := future
|
||||
// We're iterating from high to low per the slices.Reverse above.
|
||||
// So we'll update end = n.Epoch as we go down, and use that as the end for the next entry.
|
||||
// This loop either adds blob or data column sidecar topics depending on the fork.
|
||||
for _, entry := range schedule {
|
||||
if entry.Epoch < deneb {
|
||||
break
|
||||
// note: there is a special case where deneb is the genesis fork, in which case
|
||||
// we'll generate blob sidecar topics for the earlier schedule, but
|
||||
// this only happens in devnets where it doesn't really matter.
|
||||
}
|
||||
message := GossipDataColumnSidecarMessage
|
||||
subnets := cfg.DataColumnSidecarSubnetCount
|
||||
if entry.Epoch < fulu {
|
||||
message = GossipBlobSidecarMessage
|
||||
subnets = uint64(cfg.MaxBlobsPerBlockAtEpoch(entry.Epoch))
|
||||
}
|
||||
// Set subnet to max value, allSubnetsBelow will iterate every index up to that value.
|
||||
top := newSubnetTopic(entry.Epoch, end, entry.ForkDigest, message, subnets)
|
||||
fullTopics = appendSubnetsBelow(top, entry.ForkDigest, fullTopics)
|
||||
end = entry.Epoch // These topics / subnet structures are mutually exclusive, so set each end to the next highest entry.
|
||||
}
|
||||
return fullTopics
|
||||
}
|
||||
|
||||
70
beacon-chain/p2p/topics_test.go
Normal file
70
beacon-chain/p2p/topics_test.go
Normal file
@@ -0,0 +1,70 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
)
|
||||
|
||||
func TestAllTopics(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.MainnetConfig()
|
||||
cfg.FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096*2
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
s := &Service{}
|
||||
all := s.allTopicStrings()
|
||||
tops := map[string]struct{}{}
|
||||
for _, t := range all {
|
||||
tops[t] = struct{}{}
|
||||
}
|
||||
require.Equal(t, len(tops), len(all), "duplicate topics found")
|
||||
expected := []string{
|
||||
"/eth2/ad532ceb/sync_committee_contribution_and_proof/ssz_snappy",
|
||||
"/eth2/ad532ceb/beacon_aggregate_and_proof/ssz_snappy",
|
||||
"/eth2/ad532ceb/beacon_block/ssz_snappy",
|
||||
"/eth2/ad532ceb/bls_to_execution_change/ssz_snappy",
|
||||
"/eth2/afcaaba0/beacon_attestation_19/ssz_snappy",
|
||||
"/eth2/cc2c5cdb/data_column_sidecar_0/ssz_snappy",
|
||||
"/eth2/cc2c5cdb/data_column_sidecar_127/ssz_snappy",
|
||||
}
|
||||
forks := []primitives.Epoch{cfg.GenesisEpoch, cfg.AltairForkEpoch,
|
||||
cfg.BellatrixForkEpoch, cfg.CapellaForkEpoch, cfg.DenebForkEpoch,
|
||||
cfg.ElectraForkEpoch, cfg.FuluForkEpoch}
|
||||
// sanity check: we should always have a block topic.
|
||||
// construct it by hand in case there are bugs in newTopic.
|
||||
for _, f := range forks {
|
||||
digest := params.ForkDigest(f)
|
||||
expected = append(expected, "/eth2/"+hex.EncodeToString(digest[:])+"/beacon_block/ssz_snappy")
|
||||
}
|
||||
for _, e := range expected {
|
||||
_, ok := tops[e]
|
||||
require.Equal(t, true, ok)
|
||||
}
|
||||
// we should have no data column subnets before fulu
|
||||
electraColumn := newSubnetTopic(cfg.ElectraForkEpoch, cfg.FuluForkEpoch,
|
||||
params.ForkDigest(params.BeaconConfig().ElectraForkEpoch),
|
||||
GossipDataColumnSidecarMessage,
|
||||
cfg.DataColumnSidecarSubnetCount-1)
|
||||
// we should have no blob sidecars before deneb or after electra
|
||||
blobBeforeDeneb := newSubnetTopic(cfg.DenebForkEpoch-1, cfg.DenebForkEpoch,
|
||||
params.ForkDigest(cfg.DenebForkEpoch-1),
|
||||
GossipBlobSidecarMessage,
|
||||
uint64(cfg.MaxBlobsPerBlockAtEpoch(cfg.DenebForkEpoch-1))-1)
|
||||
blobAfterElectra := newSubnetTopic(cfg.FuluForkEpoch, cfg.FarFutureEpoch,
|
||||
params.ForkDigest(cfg.FuluForkEpoch),
|
||||
GossipBlobSidecarMessage,
|
||||
uint64(cfg.MaxBlobsPerBlockAtEpoch(cfg.FuluForkEpoch))-1)
|
||||
unexpected := []string{
|
||||
"/eth2/cc2c5cdb/data_column_sidecar_128/ssz_snappy",
|
||||
electraColumn.String(),
|
||||
blobBeforeDeneb.String(),
|
||||
blobAfterElectra.String(),
|
||||
}
|
||||
for _, e := range unexpected {
|
||||
_, ok := tops[e]
|
||||
require.Equal(t, false, ok)
|
||||
}
|
||||
}
|
||||
@@ -4876,8 +4876,16 @@ func TestServer_broadcastBlobSidecars(t *testing.T) {
|
||||
}
|
||||
|
||||
func Test_validateBlobs(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096*2
|
||||
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
es := util.SlotAtEpoch(t, params.BeaconConfig().ElectraForkEpoch)
|
||||
fe := params.BeaconConfig().FuluForkEpoch
|
||||
fs := util.SlotAtEpoch(t, fe)
|
||||
|
||||
require.NoError(t, kzg.Start())
|
||||
|
||||
denebMax := params.BeaconConfig().MaxBlobsPerBlock(ds)
|
||||
blob := util.GetRandBlob(123)
|
||||
// Generate proper commitment and proof for the blob
|
||||
var kzgBlob kzg.Blob
|
||||
@@ -4887,6 +4895,7 @@ func Test_validateBlobs(t *testing.T) {
|
||||
proof, err := kzg.ComputeBlobKZGProof(&kzgBlob, commitment)
|
||||
require.NoError(t, err)
|
||||
blk := util.NewBeaconBlockDeneb()
|
||||
blk.Block.Slot = ds
|
||||
blk.Block.Body.BlobKzgCommitments = [][]byte{commitment[:]}
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
@@ -4902,10 +4911,11 @@ func Test_validateBlobs(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.ErrorContains(t, "could not verify blob proofs", s.validateBlobs(b, [][]byte{blob[:]}, [][]byte{proof[:]}))
|
||||
|
||||
electraMax := params.BeaconConfig().MaxBlobsPerBlock(es)
|
||||
blobs := [][]byte{}
|
||||
commitments := [][]byte{}
|
||||
proofs := [][]byte{}
|
||||
for i := 0; i < 10; i++ {
|
||||
for i := 0; i < electraMax+1; i++ {
|
||||
blobs = append(blobs, blob[:])
|
||||
commitments = append(commitments, commitment[:])
|
||||
proofs = append(proofs, proof[:])
|
||||
@@ -4923,6 +4933,7 @@ func Test_validateBlobs(t *testing.T) {
|
||||
|
||||
t.Run("Deneb block with valid single blob", func(t *testing.T) {
|
||||
blk := util.NewBeaconBlockDeneb()
|
||||
blk.Block.Slot = ds
|
||||
blk.Block.Body.BlobKzgCommitments = [][]byte{commitment[:]}
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
@@ -4931,107 +4942,54 @@ func Test_validateBlobs(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Deneb block with max blobs (6)", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
defer params.OverrideBeaconConfig(cfg)
|
||||
|
||||
testCfg := params.BeaconConfig().Copy()
|
||||
testCfg.DenebForkEpoch = 0
|
||||
testCfg.ElectraForkEpoch = 100
|
||||
testCfg.DeprecatedMaxBlobsPerBlock = 6
|
||||
params.OverrideBeaconConfig(testCfg)
|
||||
|
||||
blk := util.NewBeaconBlockDeneb()
|
||||
blk.Block.Slot = 10 // Deneb slot
|
||||
blk.Block.Slot = ds
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:6]
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
s := &Server{}
|
||||
// Should pass with exactly 6 blobs
|
||||
require.NoError(t, s.validateBlobs(b, blobs[:6], proofs[:6]))
|
||||
require.NoError(t, s.validateBlobs(b, blobs[:denebMax], proofs[:denebMax]))
|
||||
})
|
||||
|
||||
t.Run("Deneb block exceeding max blobs", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
defer params.OverrideBeaconConfig(cfg)
|
||||
|
||||
testCfg := params.BeaconConfig().Copy()
|
||||
testCfg.DenebForkEpoch = 0
|
||||
testCfg.ElectraForkEpoch = 100
|
||||
testCfg.DeprecatedMaxBlobsPerBlock = 6
|
||||
params.OverrideBeaconConfig(testCfg)
|
||||
|
||||
blk := util.NewBeaconBlockDeneb()
|
||||
blk.Block.Slot = 10 // Deneb slot
|
||||
blk.Block.Slot = ds
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:7]
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
s := &Server{}
|
||||
// Should fail with 7 blobs when max is 6
|
||||
err = s.validateBlobs(b, blobs[:7], proofs[:7])
|
||||
require.ErrorContains(t, "number of blobs over max, 7 > 6", err)
|
||||
err = s.validateBlobs(b, blobs[:denebMax+1], proofs[:denebMax+1])
|
||||
require.ErrorContains(t, "number of blobs over max", err)
|
||||
})
|
||||
|
||||
t.Run("Electra block with valid blobs", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
defer params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Set up Electra config with max 9 blobs
|
||||
testCfg := params.BeaconConfig().Copy()
|
||||
testCfg.DenebForkEpoch = 0
|
||||
testCfg.ElectraForkEpoch = 5
|
||||
testCfg.DeprecatedMaxBlobsPerBlock = 6
|
||||
testCfg.DeprecatedMaxBlobsPerBlockElectra = 9
|
||||
params.OverrideBeaconConfig(testCfg)
|
||||
|
||||
blk := util.NewBeaconBlockElectra()
|
||||
blk.Block.Slot = 160 // Electra slot (epoch 5+)
|
||||
blk.Block.Slot = es
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:9]
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
s := &Server{}
|
||||
// Should pass with 9 blobs in Electra
|
||||
require.NoError(t, s.validateBlobs(b, blobs[:9], proofs[:9]))
|
||||
require.NoError(t, s.validateBlobs(b, blobs[:electraMax], proofs[:electraMax]))
|
||||
})
|
||||
|
||||
t.Run("Electra block exceeding max blobs", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
defer params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Set up Electra config with max 9 blobs
|
||||
testCfg := params.BeaconConfig().Copy()
|
||||
testCfg.DenebForkEpoch = 0
|
||||
testCfg.ElectraForkEpoch = 5
|
||||
testCfg.DeprecatedMaxBlobsPerBlock = 6
|
||||
testCfg.DeprecatedMaxBlobsPerBlockElectra = 9
|
||||
params.OverrideBeaconConfig(testCfg)
|
||||
|
||||
blk := util.NewBeaconBlockElectra()
|
||||
blk.Block.Slot = 160 // Electra slot
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:10]
|
||||
blk.Block.Slot = es
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:electraMax+1]
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
s := &Server{}
|
||||
// Should fail with 10 blobs when max is 9
|
||||
err = s.validateBlobs(b, blobs[:10], proofs[:10])
|
||||
require.ErrorContains(t, "number of blobs over max, 10 > 9", err)
|
||||
err = s.validateBlobs(b, blobs[:electraMax+1], proofs[:electraMax+1])
|
||||
require.ErrorContains(t, "number of blobs over max", err)
|
||||
})
|
||||
|
||||
t.Run("Fulu block with valid cell proofs", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
defer params.OverrideBeaconConfig(cfg)
|
||||
|
||||
testCfg := params.BeaconConfig().Copy()
|
||||
testCfg.DenebForkEpoch = 0
|
||||
testCfg.ElectraForkEpoch = 5
|
||||
testCfg.FuluForkEpoch = 10
|
||||
testCfg.DeprecatedMaxBlobsPerBlock = 6
|
||||
testCfg.DeprecatedMaxBlobsPerBlockElectra = 9
|
||||
testCfg.NumberOfColumns = 128 // Standard PeerDAS configuration
|
||||
params.OverrideBeaconConfig(testCfg)
|
||||
|
||||
// Create Fulu block with proper cell proofs
|
||||
blk := util.NewBeaconBlockFulu()
|
||||
blk.Block.Slot = 320 // Epoch 10 (Fulu fork)
|
||||
blk.Block.Slot = fs
|
||||
|
||||
// Generate valid commitments and cell proofs for testing
|
||||
blobCount := 2
|
||||
@@ -5075,18 +5033,8 @@ func Test_validateBlobs(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Fulu block with invalid cell proof count", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
defer params.OverrideBeaconConfig(cfg)
|
||||
|
||||
testCfg := params.BeaconConfig().Copy()
|
||||
testCfg.DenebForkEpoch = 0
|
||||
testCfg.ElectraForkEpoch = 5
|
||||
testCfg.FuluForkEpoch = 10
|
||||
testCfg.NumberOfColumns = 128
|
||||
params.OverrideBeaconConfig(testCfg)
|
||||
|
||||
blk := util.NewBeaconBlockFulu()
|
||||
blk.Block.Slot = 320 // Epoch 10 (Fulu fork)
|
||||
blk.Block.Slot = fs
|
||||
|
||||
// Create valid commitments but wrong number of cell proofs
|
||||
blobCount := 2
|
||||
@@ -5123,6 +5071,7 @@ func Test_validateBlobs(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
blk := util.NewBeaconBlockDeneb()
|
||||
blk.Block.Slot = ds
|
||||
blk.Block.Body.BlobKzgCommitments = [][]byte{sk.PublicKey().Marshal()}
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
@@ -5134,6 +5083,7 @@ func Test_validateBlobs(t *testing.T) {
|
||||
|
||||
t.Run("empty blobs and proofs should pass", func(t *testing.T) {
|
||||
blk := util.NewBeaconBlockDeneb()
|
||||
blk.Block.Slot = ds
|
||||
blk.Block.Body.BlobKzgCommitments = [][]byte{}
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
@@ -5148,53 +5098,48 @@ func Test_validateBlobs(t *testing.T) {
|
||||
|
||||
// Set up config with BlobSchedule (BPO - Blob Production Optimization)
|
||||
testCfg := params.BeaconConfig().Copy()
|
||||
testCfg.DenebForkEpoch = 0
|
||||
testCfg.ElectraForkEpoch = 100
|
||||
testCfg.FuluForkEpoch = 200
|
||||
testCfg.DeprecatedMaxBlobsPerBlock = 6
|
||||
testCfg.DeprecatedMaxBlobsPerBlockElectra = 9
|
||||
// Define blob schedule with progressive increases
|
||||
testCfg.BlobSchedule = []params.BlobScheduleEntry{
|
||||
{Epoch: 0, MaxBlobsPerBlock: 3}, // Start with 3 blobs
|
||||
{Epoch: 10, MaxBlobsPerBlock: 5}, // Increase to 5 at epoch 10
|
||||
{Epoch: 20, MaxBlobsPerBlock: 7}, // Increase to 7 at epoch 20
|
||||
{Epoch: 30, MaxBlobsPerBlock: 9}, // Increase to 9 at epoch 30
|
||||
{Epoch: fe + 1, MaxBlobsPerBlock: 3}, // Start with 3 blobs
|
||||
{Epoch: fe + 10, MaxBlobsPerBlock: 5}, // Increase to 5 at epoch 10
|
||||
{Epoch: fe + 20, MaxBlobsPerBlock: 7}, // Increase to 7 at epoch 20
|
||||
{Epoch: fe + 30, MaxBlobsPerBlock: 9}, // Increase to 9 at epoch 30
|
||||
}
|
||||
params.OverrideBeaconConfig(testCfg)
|
||||
|
||||
s := &Server{}
|
||||
|
||||
// Test epoch 0-9: max 3 blobs
|
||||
t.Run("epoch 0-9: max 3 blobs", func(t *testing.T) {
|
||||
t.Run("deneb under and over max", func(t *testing.T) {
|
||||
blk := util.NewBeaconBlockDeneb()
|
||||
blk.Block.Slot = 5 // Epoch 0
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:3]
|
||||
blk.Block.Slot = ds
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:denebMax]
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.validateBlobs(b, blobs[:3], proofs[:3]))
|
||||
require.NoError(t, s.validateBlobs(b, blobs[:denebMax], proofs[:denebMax]))
|
||||
|
||||
// Should fail with 4 blobs
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:4]
|
||||
b, err = blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
err = s.validateBlobs(b, blobs[:4], proofs[:4])
|
||||
require.ErrorContains(t, "number of blobs over max, 4 > 3", err)
|
||||
err = s.validateBlobs(b, blobs[:denebMax+1], proofs[:denebMax+1])
|
||||
require.ErrorContains(t, "number of blobs over max", err)
|
||||
})
|
||||
|
||||
// Test epoch 30+: max 9 blobs
|
||||
t.Run("epoch 30+: max 9 blobs", func(t *testing.T) {
|
||||
blk := util.NewBeaconBlockDeneb()
|
||||
blk.Block.Slot = 960 // Epoch 30
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:9]
|
||||
t.Run("different max in electra", func(t *testing.T) {
|
||||
blk := util.NewBeaconBlockElectra()
|
||||
blk.Block.Slot = es
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:electraMax]
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.validateBlobs(b, blobs[:9], proofs[:9]))
|
||||
require.NoError(t, s.validateBlobs(b, blobs[:electraMax], proofs[:electraMax]))
|
||||
|
||||
// Should fail with 10 blobs
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:10]
|
||||
// exceed the electra max
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:electraMax+1]
|
||||
b, err = blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
err = s.validateBlobs(b, blobs[:10], proofs[:10])
|
||||
err = s.validateBlobs(b, blobs[:electraMax+1], proofs[:electraMax+1])
|
||||
require.ErrorContains(t, "number of blobs over max, 10 > 9", err)
|
||||
})
|
||||
})
|
||||
|
||||
@@ -36,11 +36,13 @@ import (
|
||||
func TestBlobs(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.DenebForkEpoch = 1
|
||||
cfg.FuluForkEpoch = cfg.ElectraForkEpoch + 4096*2
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
es := util.SlotAtEpoch(t, cfg.ElectraForkEpoch)
|
||||
ds := util.SlotAtEpoch(t, cfg.DenebForkEpoch)
|
||||
|
||||
db := testDB.SetupDB(t)
|
||||
denebBlock, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 123, 4)
|
||||
denebBlock, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, es, 4)
|
||||
require.NoError(t, db.SaveBlock(t.Context(), denebBlock))
|
||||
bs := filesystem.NewEphemeralBlobStorage(t)
|
||||
testSidecars := verification.FakeVerifySliceForTest(t, blobs)
|
||||
@@ -170,7 +172,7 @@ func TestBlobs(t *testing.T) {
|
||||
require.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
t.Run("slot", func(t *testing.T) {
|
||||
u := "http://foo.example/123"
|
||||
u := fmt.Sprintf("http://foo.example/%d", es)
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
@@ -194,7 +196,7 @@ func TestBlobs(t *testing.T) {
|
||||
require.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
t.Run("slot not found", func(t *testing.T) {
|
||||
u := "http://foo.example/122"
|
||||
u := fmt.Sprintf("http://foo.example/%d", es-1)
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
@@ -211,7 +213,7 @@ func TestBlobs(t *testing.T) {
|
||||
assert.Equal(t, http.StatusNotFound, writer.Code)
|
||||
})
|
||||
t.Run("one blob only", func(t *testing.T) {
|
||||
u := "http://foo.example/123?indices=2"
|
||||
u := fmt.Sprintf("http://foo.example/%d?indices=2", es)
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
@@ -242,7 +244,7 @@ func TestBlobs(t *testing.T) {
|
||||
require.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
t.Run("no blobs returns an empty array", func(t *testing.T) {
|
||||
u := "http://foo.example/123"
|
||||
u := fmt.Sprintf("http://foo.example/%d", es)
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
@@ -266,8 +268,8 @@ func TestBlobs(t *testing.T) {
|
||||
require.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
t.Run("blob index over max", func(t *testing.T) {
|
||||
overLimit := maxBlobsPerBlockByVersion(version.Deneb)
|
||||
u := fmt.Sprintf("http://foo.example/123?indices=%d", overLimit)
|
||||
overLimit := params.BeaconConfig().MaxBlobsPerBlock(ds)
|
||||
u := fmt.Sprintf("http://foo.example/%d?indices=%d", es, overLimit)
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
@@ -281,7 +283,7 @@ func TestBlobs(t *testing.T) {
|
||||
assert.Equal(t, true, strings.Contains(e.Message, fmt.Sprintf("requested blob indices [%d] are invalid", overLimit)))
|
||||
})
|
||||
t.Run("outside retention period returns 200 with what we have", func(t *testing.T) {
|
||||
u := "http://foo.example/123"
|
||||
u := fmt.Sprintf("http://foo.example/%d", es)
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
@@ -305,13 +307,13 @@ func TestBlobs(t *testing.T) {
|
||||
require.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
t.Run("block without commitments returns 200 w/empty list ", func(t *testing.T) {
|
||||
denebBlock, _ := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 333, 0)
|
||||
denebBlock, _ := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, es+128, 0)
|
||||
commitments, err := denebBlock.Block().Body().BlobKzgCommitments()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(commitments), 0)
|
||||
require.NoError(t, db.SaveBlock(t.Context(), denebBlock))
|
||||
|
||||
u := "http://foo.example/333"
|
||||
u := fmt.Sprintf("http://foo.example/%d", es+128)
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
@@ -423,16 +425,17 @@ func TestBlobs(t *testing.T) {
|
||||
func TestBlobs_Electra(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.DenebForkEpoch = 0
|
||||
cfg.ElectraForkEpoch = 1
|
||||
cfg.FuluForkEpoch = cfg.ElectraForkEpoch + 4096*2
|
||||
cfg.BlobSchedule = []params.BlobScheduleEntry{
|
||||
{Epoch: 0, MaxBlobsPerBlock: 6},
|
||||
{Epoch: 1, MaxBlobsPerBlock: 9},
|
||||
{Epoch: cfg.FuluForkEpoch + 4096, MaxBlobsPerBlock: 6},
|
||||
{Epoch: cfg.FuluForkEpoch + 4096 + 128, MaxBlobsPerBlock: 9},
|
||||
}
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
es := util.SlotAtEpoch(t, cfg.ElectraForkEpoch)
|
||||
db := testDB.SetupDB(t)
|
||||
electraBlock, blobs := util.GenerateTestElectraBlockWithSidecar(t, [32]byte{}, 123, maxBlobsPerBlockByVersion(version.Electra))
|
||||
overLimit := params.BeaconConfig().MaxBlobsPerBlock(es)
|
||||
electraBlock, blobs := util.GenerateTestElectraBlockWithSidecar(t, [32]byte{}, es, overLimit)
|
||||
require.NoError(t, db.SaveBlock(t.Context(), electraBlock))
|
||||
bs := filesystem.NewEphemeralBlobStorage(t)
|
||||
testSidecars := verification.FakeVerifySliceForTest(t, blobs)
|
||||
@@ -450,7 +453,7 @@ func TestBlobs_Electra(t *testing.T) {
|
||||
TimeFetcher: mockChainService,
|
||||
}
|
||||
t.Run("max blobs for electra", func(t *testing.T) {
|
||||
u := "http://foo.example/123"
|
||||
u := fmt.Sprintf("http://foo.example/%d", es)
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
@@ -468,7 +471,7 @@ func TestBlobs_Electra(t *testing.T) {
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.SidecarsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, maxBlobsPerBlockByVersion(version.Electra), len(resp.Data))
|
||||
require.Equal(t, overLimit, len(resp.Data))
|
||||
sidecar := resp.Data[0]
|
||||
require.NotNil(t, sidecar)
|
||||
assert.Equal(t, "0", sidecar.Index)
|
||||
@@ -481,8 +484,8 @@ func TestBlobs_Electra(t *testing.T) {
|
||||
require.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
t.Run("requested blob index at max", func(t *testing.T) {
|
||||
limit := maxBlobsPerBlockByVersion(version.Electra) - 1
|
||||
u := fmt.Sprintf("http://foo.example/123?indices=%d", limit)
|
||||
limit := params.BeaconConfig().MaxBlobsPerBlock(es) - 1
|
||||
u := fmt.Sprintf("http://foo.example/%d?indices=%d", es, limit)
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
@@ -513,8 +516,8 @@ func TestBlobs_Electra(t *testing.T) {
|
||||
require.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
t.Run("blob index over max", func(t *testing.T) {
|
||||
overLimit := maxBlobsPerBlockByVersion(version.Electra)
|
||||
u := fmt.Sprintf("http://foo.example/123?indices=%d", overLimit)
|
||||
overLimit := params.BeaconConfig().MaxBlobsPerBlock(es)
|
||||
u := fmt.Sprintf("http://foo.example/%d?indices=%d", es, overLimit)
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
@@ -530,6 +533,7 @@ func TestBlobs_Electra(t *testing.T) {
|
||||
}
|
||||
|
||||
func Test_parseIndices(t *testing.T) {
|
||||
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
tests := []struct {
|
||||
name string
|
||||
query string
|
||||
@@ -559,7 +563,7 @@ func Test_parseIndices(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := parseIndices(&url.URL{RawQuery: tt.query}, 0)
|
||||
got, err := parseIndices(&url.URL{RawQuery: tt.query}, ds)
|
||||
if err != nil && tt.wantErr != "" {
|
||||
require.StringContains(t, tt.wantErr, err.Error())
|
||||
return
|
||||
@@ -588,6 +592,7 @@ func TestGetBlobs(t *testing.T) {
|
||||
{Epoch: 20, MaxBlobsPerBlock: 12}, // Fulu
|
||||
}
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
es := util.SlotAtEpoch(t, cfg.ElectraForkEpoch)
|
||||
|
||||
db := testDB.SetupDB(t)
|
||||
denebBlock, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 123, 4)
|
||||
@@ -1009,7 +1014,8 @@ func TestGetBlobs(t *testing.T) {
|
||||
|
||||
// Test for Electra fork
|
||||
t.Run("electra max blobs", func(t *testing.T) {
|
||||
electraBlock, electraBlobs := util.GenerateTestElectraBlockWithSidecar(t, [32]byte{}, 323, maxBlobsPerBlockByVersion(version.Electra))
|
||||
overLimit := params.BeaconConfig().MaxBlobsPerBlock(es)
|
||||
electraBlock, electraBlobs := util.GenerateTestElectraBlockWithSidecar(t, [32]byte{}, 323, overLimit)
|
||||
require.NoError(t, db.SaveBlock(t.Context(), electraBlock))
|
||||
electraBs := filesystem.NewEphemeralBlobStorage(t)
|
||||
electraSidecars := verification.FakeVerifySliceForTest(t, electraBlobs)
|
||||
@@ -1036,7 +1042,8 @@ func TestGetBlobs(t *testing.T) {
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.GetBlobsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, maxBlobsPerBlockByVersion(version.Electra), len(resp.Data))
|
||||
|
||||
require.Equal(t, overLimit, len(resp.Data))
|
||||
blob := resp.Data[0]
|
||||
require.NotNil(t, blob)
|
||||
assert.Equal(t, hexutil.Encode(electraBlobs[0].Blob), blob)
|
||||
@@ -1145,14 +1152,3 @@ func unmarshalBlobs(t *testing.T, response []byte) [][]byte {
|
||||
}
|
||||
return blobs
|
||||
}
|
||||
|
||||
func maxBlobsPerBlockByVersion(v int) int {
|
||||
if v >= version.Fulu {
|
||||
return params.BeaconConfig().DeprecatedMaxBlobsPerBlockFulu
|
||||
}
|
||||
if v >= version.Electra {
|
||||
return params.BeaconConfig().DeprecatedMaxBlobsPerBlockElectra
|
||||
}
|
||||
|
||||
return params.BeaconConfig().DeprecatedMaxBlobsPerBlock
|
||||
}
|
||||
|
||||
@@ -124,11 +124,21 @@ func convertValueForJSON(v reflect.Value, tag string) interface{} {
|
||||
if !v.Field(i).CanInterface() {
|
||||
continue // unexported
|
||||
}
|
||||
key := f.Tag.Get("json")
|
||||
if key == "" || key == "-" {
|
||||
jsonTag := f.Tag.Get("json")
|
||||
if jsonTag == "-" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Parse JSON tag options (e.g., "fieldname,omitempty")
|
||||
parts := strings.Split(jsonTag, ",")
|
||||
key := parts[0]
|
||||
|
||||
if key == "" {
|
||||
key = f.Name
|
||||
}
|
||||
m[key] = convertValueForJSON(v.Field(i), tag)
|
||||
|
||||
fieldValue := convertValueForJSON(v.Field(i), tag)
|
||||
m[key] = fieldValue
|
||||
}
|
||||
return m
|
||||
|
||||
@@ -157,8 +167,8 @@ func prepareConfigSpec() (map[string]interface{}, error) {
|
||||
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
tField := t.Field(i)
|
||||
_, isSpec := tField.Tag.Lookup("spec")
|
||||
if !isSpec {
|
||||
specTag, isSpec := tField.Tag.Lookup("spec")
|
||||
if !isSpec || specTag != "true" {
|
||||
continue
|
||||
}
|
||||
if shouldSkip(tField) {
|
||||
|
||||
@@ -145,7 +145,6 @@ func TestGetSpec(t *testing.T) {
|
||||
config.PendingDepositsLimit = 82
|
||||
config.MaxPendingPartialsPerWithdrawalsSweep = 83
|
||||
config.PendingConsolidationsLimit = 84
|
||||
config.MaxPartialWithdrawalsPerPayload = 85
|
||||
config.FullExitRequestAmount = 86
|
||||
config.MaxConsolidationsRequestsPerPayload = 87
|
||||
config.MaxAttesterSlashingsElectra = 88
|
||||
@@ -164,6 +163,7 @@ func TestGetSpec(t *testing.T) {
|
||||
config.KzgCommitmentInclusionProofDepth = 101
|
||||
config.BlobsidecarSubnetCount = 102
|
||||
config.BlobsidecarSubnetCountElectra = 103
|
||||
config.SyncMessageDueBPS = 104
|
||||
|
||||
var dbp [4]byte
|
||||
copy(dbp[:], []byte{'0', '0', '0', '1'})
|
||||
@@ -201,8 +201,7 @@ func TestGetSpec(t *testing.T) {
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp))
|
||||
data, ok := resp.Data.(map[string]interface{})
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
assert.Equal(t, 176, len(data))
|
||||
assert.Equal(t, 171, len(data))
|
||||
for k, v := range data {
|
||||
t.Run(k, func(t *testing.T) {
|
||||
switch k {
|
||||
@@ -240,8 +239,6 @@ func TestGetSpec(t *testing.T) {
|
||||
assert.Equal(t, "14", v)
|
||||
case "RANDOM_SUBNETS_PER_VALIDATOR":
|
||||
assert.Equal(t, "15", v)
|
||||
case "EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION":
|
||||
assert.Equal(t, "16", v)
|
||||
case "SECONDS_PER_ETH1_BLOCK":
|
||||
assert.Equal(t, "17", v)
|
||||
case "DEPOSIT_CHAIN_ID":
|
||||
@@ -438,8 +435,6 @@ func TestGetSpec(t *testing.T) {
|
||||
assert.Equal(t, "16777216", v)
|
||||
case "PROPOSER_SCORE_BOOST":
|
||||
assert.Equal(t, "40", v)
|
||||
case "INTERVALS_PER_SLOT":
|
||||
assert.Equal(t, "3", v)
|
||||
case "MAX_WITHDRAWALS_PER_PAYLOAD":
|
||||
assert.Equal(t, "74", v)
|
||||
case "MAX_BLS_TO_EXECUTION_CHANGES":
|
||||
@@ -456,9 +451,6 @@ func TestGetSpec(t *testing.T) {
|
||||
assert.Equal(t, "8", v)
|
||||
case "MAX_REQUEST_LIGHT_CLIENT_UPDATES":
|
||||
assert.Equal(t, "128", v)
|
||||
case "SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY":
|
||||
case "NODE_ID_BITS":
|
||||
assert.Equal(t, "256", v)
|
||||
case "ATTESTATION_SUBNET_EXTRA_BITS":
|
||||
assert.Equal(t, "0", v)
|
||||
case "ATTESTATION_SUBNET_PREFIX_BITS":
|
||||
@@ -521,8 +513,6 @@ func TestGetSpec(t *testing.T) {
|
||||
assert.Equal(t, "83", v)
|
||||
case "PENDING_CONSOLIDATIONS_LIMIT":
|
||||
assert.Equal(t, "84", v)
|
||||
case "MAX_PARTIAL_WITHDRAWALS_PER_PAYLOAD":
|
||||
assert.Equal(t, "85", v)
|
||||
case "FULL_EXIT_REQUEST_AMOUNT":
|
||||
assert.Equal(t, "86", v)
|
||||
case "MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD":
|
||||
@@ -541,8 +531,6 @@ func TestGetSpec(t *testing.T) {
|
||||
assert.Equal(t, "93", v)
|
||||
case "MAX_PENDING_DEPOSITS_PER_EPOCH":
|
||||
assert.Equal(t, "94", v)
|
||||
case "TARGET_BLOBS_PER_BLOCK_ELECTRA":
|
||||
assert.Equal(t, "6", v)
|
||||
case "MAX_BLOBS_PER_BLOCK_ELECTRA":
|
||||
assert.Equal(t, "9", v)
|
||||
case "MAX_REQUEST_BLOB_SIDECARS_ELECTRA":
|
||||
@@ -573,12 +561,12 @@ func TestGetSpec(t *testing.T) {
|
||||
assert.Equal(t, "100", v)
|
||||
case "KZG_COMMITMENT_INCLUSION_PROOF_DEPTH":
|
||||
assert.Equal(t, "101", v)
|
||||
case "MAX_BLOBS_PER_BLOCK_FULU":
|
||||
assert.Equal(t, "12", v)
|
||||
case "BLOB_SIDECAR_SUBNET_COUNT":
|
||||
assert.Equal(t, "102", v)
|
||||
case "BLOB_SIDECAR_SUBNET_COUNT_ELECTRA":
|
||||
assert.Equal(t, "103", v)
|
||||
case "SYNC_MESSAGE_DUE_BPS":
|
||||
assert.Equal(t, "104", v)
|
||||
case "BLOB_SCHEDULE":
|
||||
// BLOB_SCHEDULE should be an empty slice when no schedule is defined
|
||||
blobSchedule, ok := v.([]interface{})
|
||||
@@ -692,6 +680,27 @@ func TestGetSpec_BlobSchedule(t *testing.T) {
|
||||
// Check second entry - values should be strings for consistent API output
|
||||
assert.Equal(t, "200", blobSchedule[1]["EPOCH"])
|
||||
assert.Equal(t, "9", blobSchedule[1]["MAX_BLOBS_PER_BLOCK"])
|
||||
|
||||
// Verify that fields with json:"-" are NOT present in the blob schedule entries
|
||||
for i, entry := range blobSchedule {
|
||||
t.Run(fmt.Sprintf("entry_%d_omits_json_dash_fields", i), func(t *testing.T) {
|
||||
// These fields have `json:"-"` in NetworkScheduleEntry and should be omitted
|
||||
_, hasForkVersion := entry["ForkVersion"]
|
||||
assert.Equal(t, false, hasForkVersion, "ForkVersion should be omitted due to json:\"-\"")
|
||||
|
||||
_, hasForkDigest := entry["ForkDigest"]
|
||||
assert.Equal(t, false, hasForkDigest, "ForkDigest should be omitted due to json:\"-\"")
|
||||
|
||||
_, hasBPOEpoch := entry["BPOEpoch"]
|
||||
assert.Equal(t, false, hasBPOEpoch, "BPOEpoch should be omitted due to json:\"-\"")
|
||||
|
||||
_, hasVersionEnum := entry["VersionEnum"]
|
||||
assert.Equal(t, false, hasVersionEnum, "VersionEnum should be omitted due to json:\"-\"")
|
||||
|
||||
_, hasIsFork := entry["isFork"]
|
||||
assert.Equal(t, false, hasIsFork, "isFork should be omitted due to json:\"-\"")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetSpec_BlobSchedule_NotFulu(t *testing.T) {
|
||||
|
||||
@@ -68,7 +68,11 @@ func (rs *BlockRewardService) GetBlockRewardsData(ctx context.Context, blk inter
|
||||
Code: http.StatusInternalServerError,
|
||||
}
|
||||
}
|
||||
exitInfo := validators.ExitInformation(st)
|
||||
var exitInfo *validators.ExitInfo
|
||||
if len(blk.Body().ProposerSlashings()) > 0 || len(blk.Body().AttesterSlashings()) > 0 {
|
||||
// ExitInformation is expensive to compute, only do it if we need it.
|
||||
exitInfo = validators.ExitInformation(st)
|
||||
}
|
||||
st, err = coreblocks.ProcessAttesterSlashings(ctx, st, blk.Body().AttesterSlashings(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, &httputil.DefaultJsonError{
|
||||
|
||||
@@ -690,6 +690,10 @@ func (s *Server) ProduceSyncCommitteeContribution(w http.ResponseWriter, r *http
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if index >= params.BeaconConfig().SyncCommitteeSubnetCount {
|
||||
httputil.HandleError(w, fmt.Sprintf("Subcommittee index needs to be between 0 and %d, %d is outside of this range.", params.BeaconConfig().SyncCommitteeSubnetCount-1, index), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
_, slot, ok := shared.UintFromQuery(w, r, "slot", true)
|
||||
if !ok {
|
||||
return
|
||||
|
||||
@@ -2117,6 +2117,27 @@ func TestProduceSyncCommitteeContribution(t *testing.T) {
|
||||
server.ProduceSyncCommitteeContribution(writer, request)
|
||||
assert.Equal(t, http.StatusServiceUnavailable, writer.Code)
|
||||
})
|
||||
t.Run("invalid subcommittee_index", func(t *testing.T) {
|
||||
url := "http://example.com?slot=1&subcommittee_index=10&beacon_block_root=0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
// Use non-optimistic server for this test
|
||||
server := Server{
|
||||
CoreService: &core.Service{
|
||||
HeadFetcher: &mockChain.ChainService{
|
||||
SyncCommitteeIndices: []primitives.CommitteeIndex{0},
|
||||
},
|
||||
},
|
||||
SyncCommitteePool: syncCommitteePool,
|
||||
OptimisticModeFetcher: &mockChain.ChainService{}, // Optimistic: false by default
|
||||
}
|
||||
|
||||
server.ProduceSyncCommitteeContribution(writer, request)
|
||||
assert.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
require.ErrorContains(t, "Subcommittee index needs to be between 0 and 3, 10 is outside of this range.", errors.New(writer.Body.String()))
|
||||
})
|
||||
}
|
||||
|
||||
func TestServer_RegisterValidator(t *testing.T) {
|
||||
|
||||
@@ -3,7 +3,6 @@ package lookup
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain"
|
||||
@@ -284,14 +283,9 @@ func (p *BeaconDbBlocker) Blobs(ctx context.Context, id string, opts ...options.
|
||||
return make([]*blocks.VerifiedROBlob, 0), nil
|
||||
}
|
||||
|
||||
// Compute the first Fulu slot.
|
||||
fuluForkEpoch := params.BeaconConfig().FuluForkEpoch
|
||||
fuluForkSlot := primitives.Slot(math.MaxUint64)
|
||||
if fuluForkEpoch != primitives.Epoch(math.MaxUint64) {
|
||||
fuluForkSlot, err = slots.EpochStart(fuluForkEpoch)
|
||||
if err != nil {
|
||||
return nil, &core.RpcError{Err: errors.Wrap(err, "could not calculate Fulu start slot"), Reason: core.Internal}
|
||||
}
|
||||
fuluForkSlot, err := slots.EpochStart(params.BeaconConfig().FuluForkEpoch)
|
||||
if err != nil {
|
||||
return nil, &core.RpcError{Err: errors.Wrap(err, "could not calculate Fulu start slot"), Reason: core.Internal}
|
||||
}
|
||||
|
||||
// Convert versioned hashes to indices if provided
|
||||
|
||||
@@ -190,7 +190,7 @@ func TestBlobsErrorHandling(t *testing.T) {
|
||||
|
||||
t.Run("non-existent block by slot returns 404", func(t *testing.T) {
|
||||
blocker := &BeaconDbBlocker{
|
||||
BeaconDB: db,
|
||||
BeaconDB: db,
|
||||
ChainInfoFetcher: &mockChain.ChainService{},
|
||||
}
|
||||
|
||||
@@ -275,39 +275,19 @@ func TestBlobsErrorHandling(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetBlob(t *testing.T) {
|
||||
const (
|
||||
slot = 123
|
||||
blobCount = 4
|
||||
denebForEpoch = 1
|
||||
fuluForkEpoch = 2
|
||||
)
|
||||
|
||||
setupDeneb := func(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.DenebForkEpoch = denebForEpoch
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
}
|
||||
|
||||
setupFulu := func(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.DenebForkEpoch = denebForEpoch
|
||||
cfg.FuluForkEpoch = fuluForkEpoch
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
}
|
||||
|
||||
const blobCount = 4
|
||||
ctx := t.Context()
|
||||
db := testDB.SetupDB(t)
|
||||
params.SetupTestConfigCleanup(t)
|
||||
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().DenebForkEpoch + 4096*2
|
||||
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
db := testDB.SetupDB(t)
|
||||
require.NoError(t, kzg.Start())
|
||||
|
||||
// Create and save Deneb block and blob sidecars.
|
||||
_, blobStorage := filesystem.NewEphemeralBlobStorageAndFs(t)
|
||||
|
||||
denebBlock, storedBlobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [fieldparams.RootLength]byte{}, slot, blobCount)
|
||||
denebBlock, storedBlobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [fieldparams.RootLength]byte{}, ds, blobCount, util.WithDenebSlot(ds))
|
||||
denebBlockRoot := denebBlock.Root()
|
||||
|
||||
verifiedStoredSidecars := verification.FakeVerifySliceForTest(t, storedBlobSidecars)
|
||||
@@ -316,13 +296,14 @@ func TestGetBlob(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
err = db.SaveBlock(t.Context(), denebBlock)
|
||||
err := db.SaveBlock(t.Context(), denebBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create Electra block and blob sidecars. (Electra block = Fulu block),
|
||||
// save the block, convert blob sidecars to data column sidecars and save the block.
|
||||
fuluForkSlot := fuluForkEpoch * params.BeaconConfig().SlotsPerEpoch
|
||||
fuluBlock, fuluBlobSidecars := util.GenerateTestElectraBlockWithSidecar(t, [fieldparams.RootLength]byte{}, fuluForkSlot, blobCount)
|
||||
fs := util.SlotAtEpoch(t, params.BeaconConfig().FuluForkEpoch)
|
||||
dsStr := fmt.Sprintf("%d", ds)
|
||||
fuluBlock, fuluBlobSidecars := util.GenerateTestElectraBlockWithSidecar(t, [fieldparams.RootLength]byte{}, fs, blobCount)
|
||||
fuluBlockRoot := fuluBlock.Root()
|
||||
|
||||
cellsAndProofsList := make([]kzg.CellsAndProofs, 0, len(fuluBlobSidecars))
|
||||
@@ -347,8 +328,6 @@ func TestGetBlob(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("genesis", func(t *testing.T) {
|
||||
setupDeneb(t)
|
||||
|
||||
blocker := &BeaconDbBlocker{}
|
||||
_, rpcErr := blocker.Blobs(ctx, "genesis")
|
||||
require.Equal(t, http.StatusBadRequest, core.ErrorReasonToHTTP(rpcErr.Reason))
|
||||
@@ -356,8 +335,6 @@ func TestGetBlob(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("head", func(t *testing.T) {
|
||||
setupDeneb(t)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{
|
||||
Root: denebBlockRoot[:],
|
||||
@@ -388,8 +365,6 @@ func TestGetBlob(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("finalized", func(t *testing.T) {
|
||||
setupDeneb(t)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ðpb.Checkpoint{Root: denebBlockRoot[:]}},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
@@ -405,8 +380,6 @@ func TestGetBlob(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("justified", func(t *testing.T) {
|
||||
setupDeneb(t)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{CurrentJustifiedCheckPoint: ðpb.Checkpoint{Root: denebBlockRoot[:]}},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
@@ -422,8 +395,6 @@ func TestGetBlob(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("root", func(t *testing.T) {
|
||||
setupDeneb(t)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
@@ -438,8 +409,6 @@ func TestGetBlob(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("slot", func(t *testing.T) {
|
||||
setupDeneb(t)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
@@ -449,7 +418,7 @@ func TestGetBlob(t *testing.T) {
|
||||
BlobStorage: blobStorage,
|
||||
}
|
||||
|
||||
verifiedBlobs, rpcErr := blocker.Blobs(ctx, "123")
|
||||
verifiedBlobs, rpcErr := blocker.Blobs(ctx, dsStr)
|
||||
require.IsNil(t, rpcErr)
|
||||
require.Equal(t, blobCount, len(verifiedBlobs))
|
||||
})
|
||||
@@ -457,8 +426,6 @@ func TestGetBlob(t *testing.T) {
|
||||
t.Run("one blob only", func(t *testing.T) {
|
||||
const index = 2
|
||||
|
||||
setupDeneb(t)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ðpb.Checkpoint{Root: denebBlockRoot[:]}},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
@@ -468,7 +435,7 @@ func TestGetBlob(t *testing.T) {
|
||||
BlobStorage: blobStorage,
|
||||
}
|
||||
|
||||
retrievedVerifiedSidecars, rpcErr := blocker.Blobs(ctx, "123", options.WithIndices([]int{index}))
|
||||
retrievedVerifiedSidecars, rpcErr := blocker.Blobs(ctx, dsStr, options.WithIndices([]int{index}))
|
||||
require.IsNil(t, rpcErr)
|
||||
require.Equal(t, 1, len(retrievedVerifiedSidecars))
|
||||
|
||||
@@ -483,8 +450,6 @@ func TestGetBlob(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("no blobs returns an empty array", func(t *testing.T) {
|
||||
setupDeneb(t)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ðpb.Checkpoint{Root: denebBlockRoot[:]}},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
@@ -494,14 +459,12 @@ func TestGetBlob(t *testing.T) {
|
||||
BlobStorage: filesystem.NewEphemeralBlobStorage(t),
|
||||
}
|
||||
|
||||
verifiedBlobs, rpcErr := blocker.Blobs(ctx, "123")
|
||||
verifiedBlobs, rpcErr := blocker.Blobs(ctx, dsStr)
|
||||
require.IsNil(t, rpcErr)
|
||||
require.Equal(t, 0, len(verifiedBlobs))
|
||||
})
|
||||
|
||||
t.Run("no blob at index", func(t *testing.T) {
|
||||
setupDeneb(t)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ðpb.Checkpoint{Root: denebBlockRoot[:]}},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
@@ -512,14 +475,12 @@ func TestGetBlob(t *testing.T) {
|
||||
}
|
||||
|
||||
noBlobIndex := len(storedBlobSidecars) + 1
|
||||
_, rpcErr := blocker.Blobs(ctx, "123", options.WithIndices([]int{0, noBlobIndex}))
|
||||
_, rpcErr := blocker.Blobs(ctx, dsStr, options.WithIndices([]int{0, noBlobIndex}))
|
||||
require.NotNil(t, rpcErr)
|
||||
require.Equal(t, core.ErrorReason(core.NotFound), rpcErr.Reason)
|
||||
})
|
||||
|
||||
t.Run("index too big", func(t *testing.T) {
|
||||
setupDeneb(t)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ðpb.Checkpoint{Root: denebBlockRoot[:]}},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
@@ -528,14 +489,12 @@ func TestGetBlob(t *testing.T) {
|
||||
BeaconDB: db,
|
||||
BlobStorage: blobStorage,
|
||||
}
|
||||
_, rpcErr := blocker.Blobs(ctx, "123", options.WithIndices([]int{0, math.MaxInt}))
|
||||
_, rpcErr := blocker.Blobs(ctx, dsStr, options.WithIndices([]int{0, math.MaxInt}))
|
||||
require.NotNil(t, rpcErr)
|
||||
require.Equal(t, core.ErrorReason(core.BadRequest), rpcErr.Reason)
|
||||
})
|
||||
|
||||
t.Run("not enough stored data column sidecars", func(t *testing.T) {
|
||||
setupFulu(t)
|
||||
|
||||
_, dataColumnStorage := filesystem.NewEphemeralDataColumnStorageAndFs(t)
|
||||
err = dataColumnStorage.Save(verifiedRoDataColumnSidecars[:fieldparams.CellsPerBlob-1])
|
||||
require.NoError(t, err)
|
||||
@@ -555,8 +514,6 @@ func TestGetBlob(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("reconstruction needed", func(t *testing.T) {
|
||||
setupFulu(t)
|
||||
|
||||
_, dataColumnStorage := filesystem.NewEphemeralDataColumnStorageAndFs(t)
|
||||
err = dataColumnStorage.Save(verifiedRoDataColumnSidecars[1 : peerdas.MinimumColumnCountToReconstruct()+1])
|
||||
require.NoError(t, err)
|
||||
@@ -582,8 +539,6 @@ func TestGetBlob(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("no reconstruction needed", func(t *testing.T) {
|
||||
setupFulu(t)
|
||||
|
||||
_, dataColumnStorage := filesystem.NewEphemeralDataColumnStorageAndFs(t)
|
||||
err = dataColumnStorage.Save(verifiedRoDataColumnSidecars)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package validator
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/async/event"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed"
|
||||
@@ -53,51 +55,100 @@ func (vs *Server) StreamBlocksAltair(req *ethpb.StreamBlocksRequest, stream ethp
|
||||
//
|
||||
// StreamSlots sends a the block's slot and dependent roots to clients every single time a block is received by the beacon node.
|
||||
func (vs *Server) StreamSlots(req *ethpb.StreamSlotsRequest, stream ethpb.BeaconNodeValidator_StreamSlotsServer) error {
|
||||
ch := make(chan *feed.Event, 1)
|
||||
bufchan := make(chan *feed.Event)
|
||||
errchan := make(chan error)
|
||||
defer func() {
|
||||
select {
|
||||
case err := <-errchan:
|
||||
log.WithError(err).Debug("error from sending goroutine after StreamSlots timeout")
|
||||
default:
|
||||
return
|
||||
}
|
||||
}() // drain errchan to make sure the goroutine can't get stuck
|
||||
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case ev := <-bufchan:
|
||||
if ev == nil {
|
||||
// channel closed
|
||||
return
|
||||
}
|
||||
var s primitives.Slot
|
||||
var currDependentRoot, prevDependentRoot [32]byte
|
||||
if req.VerifiedOnly {
|
||||
if ev.Type != statefeed.BlockProcessed {
|
||||
continue
|
||||
}
|
||||
data, ok := ev.Data.(*statefeed.BlockProcessedData)
|
||||
if !ok || data == nil {
|
||||
continue
|
||||
}
|
||||
s = data.Slot
|
||||
currDependentRoot = data.CurrDependentRoot
|
||||
prevDependentRoot = data.PrevDependentRoot
|
||||
} else {
|
||||
if ev.Type != blockfeed.ReceivedBlock {
|
||||
continue
|
||||
}
|
||||
data, ok := ev.Data.(*blockfeed.ReceivedBlockData)
|
||||
if !ok || data == nil {
|
||||
continue
|
||||
}
|
||||
s = data.SignedBlock.Block().Slot()
|
||||
currDependentRoot = data.CurrDependentRoot
|
||||
prevDependentRoot = data.PrevDependentRoot
|
||||
}
|
||||
if err := stream.Send(
|
||||
ðpb.StreamSlotsResponse{
|
||||
Slot: s,
|
||||
PreviousDutyDependentRoot: prevDependentRoot[:],
|
||||
CurrentDutyDependentRoot: currDependentRoot[:],
|
||||
}); err != nil {
|
||||
errchan <- status.Errorf(codes.Unavailable, "Could not send over stream: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
subchan := make(chan *feed.Event, 1)
|
||||
var sub event.Subscription
|
||||
if req.VerifiedOnly {
|
||||
sub = vs.StateNotifier.StateFeed().Subscribe(ch)
|
||||
sub = vs.StateNotifier.StateFeed().Subscribe(subchan)
|
||||
} else {
|
||||
sub = vs.BlockNotifier.BlockFeed().Subscribe(ch)
|
||||
sub = vs.BlockNotifier.BlockFeed().Subscribe(subchan)
|
||||
}
|
||||
defer func() {
|
||||
// drain the subchan because non-blocking send and unsubscribe can be racy
|
||||
for {
|
||||
select {
|
||||
case ev := <-subchan:
|
||||
if ev == nil {
|
||||
return
|
||||
}
|
||||
default:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
defer close(bufchan)
|
||||
defer sub.Unsubscribe()
|
||||
|
||||
for {
|
||||
select {
|
||||
case ev := <-ch:
|
||||
var s primitives.Slot
|
||||
var currDependentRoot, prevDependentRoot [32]byte
|
||||
if req.VerifiedOnly {
|
||||
if ev.Type != statefeed.BlockProcessed {
|
||||
continue
|
||||
}
|
||||
data, ok := ev.Data.(*statefeed.BlockProcessedData)
|
||||
if !ok || data == nil {
|
||||
continue
|
||||
}
|
||||
s = data.Slot
|
||||
currDependentRoot = data.CurrDependentRoot
|
||||
prevDependentRoot = data.PrevDependentRoot
|
||||
} else {
|
||||
if ev.Type != blockfeed.ReceivedBlock {
|
||||
continue
|
||||
}
|
||||
data, ok := ev.Data.(*blockfeed.ReceivedBlockData)
|
||||
if !ok || data == nil {
|
||||
continue
|
||||
}
|
||||
s = data.SignedBlock.Block().Slot()
|
||||
currDependentRoot = data.CurrDependentRoot
|
||||
prevDependentRoot = data.PrevDependentRoot
|
||||
}
|
||||
if err := stream.Send(
|
||||
ðpb.StreamSlotsResponse{
|
||||
Slot: s,
|
||||
PreviousDutyDependentRoot: prevDependentRoot[:],
|
||||
CurrentDutyDependentRoot: currDependentRoot[:],
|
||||
}); err != nil {
|
||||
return status.Errorf(codes.Unavailable, "Could not send over stream: %v", err)
|
||||
case ev := <-subchan:
|
||||
select {
|
||||
// This select implements a non-blocking channel send with a timeout to prevent blocking the subscription
|
||||
case bufchan <- ev:
|
||||
continue
|
||||
case <-time.After(time.Second):
|
||||
return status.Error(codes.ResourceExhausted, "Could not keep up with block events, exiting stream")
|
||||
case <-vs.Ctx.Done():
|
||||
return status.Error(codes.Canceled, "Context canceled")
|
||||
}
|
||||
case err := <-errchan:
|
||||
return err
|
||||
case <-sub.Err():
|
||||
return status.Error(codes.Aborted, "Subscriber closed, exiting goroutine")
|
||||
case <-vs.Ctx.Done():
|
||||
|
||||
@@ -17,13 +17,6 @@ import (
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
const (
|
||||
// validatorLookupThreshold determines when to use full assignment map vs cached linear search.
|
||||
// For requests with fewer validators, we use cached linear search to avoid the overhead
|
||||
// of building a complete assignment map for all validators in the epoch.
|
||||
validatorLookupThreshold = 3000
|
||||
)
|
||||
|
||||
// GetDutiesV2 returns the duties assigned to a list of validators specified
|
||||
// in the request object.
|
||||
//
|
||||
@@ -60,7 +53,26 @@ func (vs *Server) dutiesv2(ctx context.Context, req *ethpb.DutiesRequest) (*ethp
|
||||
span.SetAttributes(trace.Int64Attribute("num_pubkeys", int64(len(req.PublicKeys))))
|
||||
defer span.End()
|
||||
|
||||
meta, err := loadDutiesMetadata(ctx, s, req.Epoch, len(req.PublicKeys))
|
||||
// Collect validator indices from public keys and cache the lookups
|
||||
type validatorInfo struct {
|
||||
index primitives.ValidatorIndex
|
||||
found bool
|
||||
}
|
||||
validatorLookup := make(map[string]validatorInfo, len(req.PublicKeys))
|
||||
requestIndices := make([]primitives.ValidatorIndex, 0, len(req.PublicKeys))
|
||||
|
||||
for _, pubKey := range req.PublicKeys {
|
||||
key := string(pubKey)
|
||||
if _, exists := validatorLookup[key]; !exists {
|
||||
idx, ok := s.ValidatorIndexByPubkey(bytesutil.ToBytes48(pubKey))
|
||||
validatorLookup[key] = validatorInfo{index: idx, found: ok}
|
||||
if ok {
|
||||
requestIndices = append(requestIndices, idx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
meta, err := loadDutiesMetadata(ctx, s, req.Epoch, requestIndices)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -68,14 +80,14 @@ func (vs *Server) dutiesv2(ctx context.Context, req *ethpb.DutiesRequest) (*ethp
|
||||
validatorAssignments := make([]*ethpb.DutiesV2Response_Duty, 0, len(req.PublicKeys))
|
||||
nextValidatorAssignments := make([]*ethpb.DutiesV2Response_Duty, 0, len(req.PublicKeys))
|
||||
|
||||
// start loop for assignments for current and next epochs
|
||||
// Build duties using cached validator index lookups
|
||||
for _, pubKey := range req.PublicKeys {
|
||||
if ctx.Err() != nil {
|
||||
return nil, status.Errorf(codes.Aborted, "Could not continue fetching assignments: %v", ctx.Err())
|
||||
}
|
||||
|
||||
validatorIndex, ok := s.ValidatorIndexByPubkey(bytesutil.ToBytes48(pubKey))
|
||||
if !ok {
|
||||
info := validatorLookup[string(pubKey)]
|
||||
if !info.found {
|
||||
unknownDuty := ðpb.DutiesV2Response_Duty{
|
||||
PublicKey: pubKey,
|
||||
Status: ethpb.ValidatorStatus_UNKNOWN_STATUS,
|
||||
@@ -85,16 +97,15 @@ func (vs *Server) dutiesv2(ctx context.Context, req *ethpb.DutiesRequest) (*ethp
|
||||
continue
|
||||
}
|
||||
|
||||
meta.current.liteAssignment = vs.getValidatorAssignment(meta.current, validatorIndex)
|
||||
currentAssignment := vs.getValidatorAssignment(meta.current, info.index)
|
||||
nextAssignment := vs.getValidatorAssignment(meta.next, info.index)
|
||||
|
||||
meta.next.liteAssignment = vs.getValidatorAssignment(meta.next, validatorIndex)
|
||||
|
||||
assignment, nextAssignment, err := vs.buildValidatorDuty(pubKey, validatorIndex, s, req.Epoch, meta)
|
||||
assignment, nextDuty, err := vs.buildValidatorDuty(pubKey, info.index, s, req.Epoch, meta, currentAssignment, nextAssignment)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
validatorAssignments = append(validatorAssignments, assignment)
|
||||
nextValidatorAssignments = append(nextValidatorAssignments, nextAssignment)
|
||||
nextValidatorAssignments = append(nextValidatorAssignments, nextDuty)
|
||||
}
|
||||
|
||||
// Dependent roots for fork choice
|
||||
@@ -147,18 +158,15 @@ type dutiesMetadata struct {
|
||||
}
|
||||
|
||||
type metadata struct {
|
||||
committeesAtSlot uint64
|
||||
proposalSlots map[primitives.ValidatorIndex][]primitives.Slot
|
||||
startSlot primitives.Slot
|
||||
committeesBySlot [][][]primitives.ValidatorIndex
|
||||
validatorAssignmentMap map[primitives.ValidatorIndex]*helpers.LiteAssignment
|
||||
liteAssignment *helpers.LiteAssignment
|
||||
committeesAtSlot uint64
|
||||
proposalSlots map[primitives.ValidatorIndex][]primitives.Slot
|
||||
committeeAssignments map[primitives.ValidatorIndex]*helpers.CommitteeAssignment
|
||||
}
|
||||
|
||||
func loadDutiesMetadata(ctx context.Context, s state.BeaconState, reqEpoch primitives.Epoch, numValidators int) (*dutiesMetadata, error) {
|
||||
func loadDutiesMetadata(ctx context.Context, s state.BeaconState, reqEpoch primitives.Epoch, requestIndices []primitives.ValidatorIndex) (*dutiesMetadata, error) {
|
||||
meta := &dutiesMetadata{}
|
||||
var err error
|
||||
meta.current, err = loadMetadata(ctx, s, reqEpoch, numValidators)
|
||||
meta.current, err = loadMetadata(ctx, s, reqEpoch, requestIndices)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -168,14 +176,14 @@ func loadDutiesMetadata(ctx context.Context, s state.BeaconState, reqEpoch primi
|
||||
return nil, status.Errorf(codes.Internal, "Could not compute proposer slots: %v", err)
|
||||
}
|
||||
|
||||
meta.next, err = loadMetadata(ctx, s, reqEpoch+1, numValidators)
|
||||
meta.next, err = loadMetadata(ctx, s, reqEpoch+1, requestIndices)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return meta, nil
|
||||
}
|
||||
|
||||
func loadMetadata(ctx context.Context, s state.BeaconState, reqEpoch primitives.Epoch, numValidators int) (*metadata, error) {
|
||||
func loadMetadata(ctx context.Context, s state.BeaconState, reqEpoch primitives.Epoch, requestIndices []primitives.ValidatorIndex) (*metadata, error) {
|
||||
meta := &metadata{}
|
||||
|
||||
if err := helpers.VerifyAssignmentEpoch(reqEpoch, s); err != nil {
|
||||
@@ -188,56 +196,36 @@ func loadMetadata(ctx context.Context, s state.BeaconState, reqEpoch primitives.
|
||||
}
|
||||
meta.committeesAtSlot = helpers.SlotCommitteeCount(activeValidatorCount)
|
||||
|
||||
meta.startSlot, err = slots.EpochStart(reqEpoch)
|
||||
// Use CommitteeAssignments which only computes committees for requested validators
|
||||
meta.committeeAssignments, err = helpers.CommitteeAssignments(ctx, s, reqEpoch, requestIndices)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
meta.committeesBySlot, err = helpers.PrecomputeCommittees(ctx, s, meta.startSlot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if numValidators >= validatorLookupThreshold {
|
||||
meta.validatorAssignmentMap = buildValidatorAssignmentMap(meta.committeesBySlot, meta.startSlot)
|
||||
return nil, status.Errorf(codes.Internal, "Could not compute committee assignments: %v", err)
|
||||
}
|
||||
|
||||
return meta, nil
|
||||
}
|
||||
|
||||
// buildValidatorAssignmentMap creates a map from validator index to assignment for O(1) lookup.
|
||||
func buildValidatorAssignmentMap(
|
||||
bySlot [][][]primitives.ValidatorIndex,
|
||||
startSlot primitives.Slot,
|
||||
) map[primitives.ValidatorIndex]*helpers.LiteAssignment {
|
||||
validatorToAssignment := make(map[primitives.ValidatorIndex]*helpers.LiteAssignment)
|
||||
|
||||
for relativeSlot, committees := range bySlot {
|
||||
for cIdx, committee := range committees {
|
||||
for pos, vIdx := range committee {
|
||||
validatorToAssignment[vIdx] = &helpers.LiteAssignment{
|
||||
AttesterSlot: startSlot + primitives.Slot(relativeSlot),
|
||||
CommitteeIndex: primitives.CommitteeIndex(cIdx),
|
||||
CommitteeLength: uint64(len(committee)),
|
||||
ValidatorCommitteeIndex: uint64(pos),
|
||||
}
|
||||
}
|
||||
// findValidatorIndexInCommittee finds the position of a validator in a committee.
|
||||
func findValidatorIndexInCommittee(committee []primitives.ValidatorIndex, validatorIndex primitives.ValidatorIndex) uint64 {
|
||||
for i, vIdx := range committee {
|
||||
if vIdx == validatorIndex {
|
||||
return uint64(i)
|
||||
}
|
||||
}
|
||||
return validatorToAssignment
|
||||
return 0
|
||||
}
|
||||
|
||||
// getValidatorAssignment retrieves the assignment for a validator using either
|
||||
// the pre-built assignment map (for large requests) or linear search (for small requests).
|
||||
// getValidatorAssignment retrieves the assignment for a validator from CommitteeAssignments.
|
||||
func (vs *Server) getValidatorAssignment(meta *metadata, validatorIndex primitives.ValidatorIndex) *helpers.LiteAssignment {
|
||||
if meta.validatorAssignmentMap != nil {
|
||||
if assignment, exists := meta.validatorAssignmentMap[validatorIndex]; exists {
|
||||
return assignment
|
||||
if assignment, exists := meta.committeeAssignments[validatorIndex]; exists {
|
||||
return &helpers.LiteAssignment{
|
||||
AttesterSlot: assignment.AttesterSlot,
|
||||
CommitteeIndex: assignment.CommitteeIndex,
|
||||
CommitteeLength: uint64(len(assignment.Committee)),
|
||||
ValidatorCommitteeIndex: findValidatorIndexInCommittee(assignment.Committee, validatorIndex),
|
||||
}
|
||||
return &helpers.LiteAssignment{}
|
||||
}
|
||||
|
||||
return helpers.AssignmentForValidator(meta.committeesBySlot, meta.startSlot, validatorIndex)
|
||||
return &helpers.LiteAssignment{}
|
||||
}
|
||||
|
||||
// buildValidatorDuty builds both current‑epoch and next‑epoch V2 duty objects
|
||||
@@ -248,21 +236,23 @@ func (vs *Server) buildValidatorDuty(
|
||||
s state.BeaconState,
|
||||
reqEpoch primitives.Epoch,
|
||||
meta *dutiesMetadata,
|
||||
currentAssignment *helpers.LiteAssignment,
|
||||
nextAssignment *helpers.LiteAssignment,
|
||||
) (*ethpb.DutiesV2Response_Duty, *ethpb.DutiesV2Response_Duty, error) {
|
||||
assignment := ðpb.DutiesV2Response_Duty{PublicKey: pubKey}
|
||||
nextAssignment := ðpb.DutiesV2Response_Duty{PublicKey: pubKey}
|
||||
nextDuty := ðpb.DutiesV2Response_Duty{PublicKey: pubKey}
|
||||
|
||||
statusEnum := assignmentStatus(s, idx)
|
||||
assignment.ValidatorIndex = idx
|
||||
assignment.Status = statusEnum
|
||||
assignment.CommitteesAtSlot = meta.current.committeesAtSlot
|
||||
assignment.ProposerSlots = meta.current.proposalSlots[idx]
|
||||
populateCommitteeFields(assignment, meta.current.liteAssignment)
|
||||
populateCommitteeFields(assignment, currentAssignment)
|
||||
|
||||
nextAssignment.ValidatorIndex = idx
|
||||
nextAssignment.Status = statusEnum
|
||||
nextAssignment.CommitteesAtSlot = meta.next.committeesAtSlot
|
||||
populateCommitteeFields(nextAssignment, meta.next.liteAssignment)
|
||||
nextDuty.ValidatorIndex = idx
|
||||
nextDuty.Status = statusEnum
|
||||
nextDuty.CommitteesAtSlot = meta.next.committeesAtSlot
|
||||
populateCommitteeFields(nextDuty, nextAssignment)
|
||||
|
||||
// Sync committee flags
|
||||
if coreTime.HigherEqualThanAltairVersionAndEpoch(s, reqEpoch) {
|
||||
@@ -271,7 +261,7 @@ func (vs *Server) buildValidatorDuty(
|
||||
return nil, nil, status.Errorf(codes.Internal, "Could not determine current epoch sync committee: %v", err)
|
||||
}
|
||||
assignment.IsSyncCommittee = inSync
|
||||
nextAssignment.IsSyncCommittee = inSync
|
||||
nextDuty.IsSyncCommittee = inSync
|
||||
if inSync {
|
||||
if err := core.RegisterSyncSubnetCurrentPeriodProto(s, reqEpoch, pubKey, statusEnum); err != nil {
|
||||
return nil, nil, status.Errorf(codes.Internal, "Could not register sync subnet current period: %v", err)
|
||||
@@ -290,18 +280,16 @@ func (vs *Server) buildValidatorDuty(
|
||||
if err != nil {
|
||||
return nil, nil, status.Errorf(codes.Internal, "Could not determine next epoch sync committee: %v", err)
|
||||
}
|
||||
nextAssignment.IsSyncCommittee = nextInSync
|
||||
nextDuty.IsSyncCommittee = nextInSync
|
||||
if nextInSync {
|
||||
go func() {
|
||||
if err := core.RegisterSyncSubnetNextPeriodProto(s, reqEpoch, pubKey, statusEnum); err != nil {
|
||||
log.WithError(err).Warn("Could not register sync subnet next period")
|
||||
}
|
||||
}()
|
||||
if err := core.RegisterSyncSubnetNextPeriodProto(s, reqEpoch, pubKey, statusEnum); err != nil {
|
||||
log.WithError(err).Warn("Could not register sync subnet next period")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return assignment, nextAssignment, nil
|
||||
return assignment, nextDuty, nil
|
||||
}
|
||||
|
||||
func populateCommitteeFields(duty *ethpb.DutiesV2Response_Duty, la *helpers.LiteAssignment) {
|
||||
|
||||
@@ -560,105 +560,20 @@ func TestGetDutiesV2_SyncNotReady(t *testing.T) {
|
||||
assert.ErrorContains(t, "Syncing to latest head", err)
|
||||
}
|
||||
|
||||
func TestBuildValidatorAssignmentMap(t *testing.T) {
|
||||
start := primitives.Slot(200)
|
||||
bySlot := [][][]primitives.ValidatorIndex{
|
||||
{{1, 2, 3}}, // slot 200, committee 0
|
||||
{{7, 8, 9}}, // slot 201, committee 0
|
||||
{{4, 5}, {10, 11}}, // slot 202, committee 0 & 1
|
||||
}
|
||||
|
||||
assignmentMap := buildValidatorAssignmentMap(bySlot, start)
|
||||
|
||||
// Test validator 8 assignment (slot 201, committee 0, position 1)
|
||||
vIdx := primitives.ValidatorIndex(8)
|
||||
got, exists := assignmentMap[vIdx]
|
||||
assert.Equal(t, true, exists)
|
||||
require.NotNil(t, got)
|
||||
assert.Equal(t, start+1, got.AttesterSlot)
|
||||
assert.Equal(t, primitives.CommitteeIndex(0), got.CommitteeIndex)
|
||||
assert.Equal(t, uint64(3), got.CommitteeLength)
|
||||
assert.Equal(t, uint64(1), got.ValidatorCommitteeIndex)
|
||||
|
||||
// Test validator 1 assignment (slot 200, committee 0, position 0)
|
||||
vIdx1 := primitives.ValidatorIndex(1)
|
||||
got1, exists1 := assignmentMap[vIdx1]
|
||||
assert.Equal(t, true, exists1)
|
||||
require.NotNil(t, got1)
|
||||
assert.Equal(t, start, got1.AttesterSlot)
|
||||
assert.Equal(t, primitives.CommitteeIndex(0), got1.CommitteeIndex)
|
||||
assert.Equal(t, uint64(3), got1.CommitteeLength)
|
||||
assert.Equal(t, uint64(0), got1.ValidatorCommitteeIndex)
|
||||
|
||||
// Test validator 10 assignment (slot 202, committee 1, position 0)
|
||||
vIdx10 := primitives.ValidatorIndex(10)
|
||||
got10, exists10 := assignmentMap[vIdx10]
|
||||
assert.Equal(t, true, exists10)
|
||||
require.NotNil(t, got10)
|
||||
assert.Equal(t, start+2, got10.AttesterSlot)
|
||||
assert.Equal(t, primitives.CommitteeIndex(1), got10.CommitteeIndex)
|
||||
assert.Equal(t, uint64(2), got10.CommitteeLength)
|
||||
assert.Equal(t, uint64(0), got10.ValidatorCommitteeIndex)
|
||||
|
||||
// Test non-existent validator
|
||||
_, exists99 := assignmentMap[primitives.ValidatorIndex(99)]
|
||||
assert.Equal(t, false, exists99)
|
||||
|
||||
// Verify that we get the same results as the linear search
|
||||
for _, committees := range bySlot {
|
||||
for _, committee := range committees {
|
||||
for _, validatorIdx := range committee {
|
||||
linearResult := helpers.AssignmentForValidator(bySlot, start, validatorIdx)
|
||||
mapResult, mapExists := assignmentMap[validatorIdx]
|
||||
assert.Equal(t, true, mapExists)
|
||||
require.DeepEqual(t, linearResult, mapResult)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetValidatorAssignment_WithAssignmentMap(t *testing.T) {
|
||||
func TestGetValidatorAssignment(t *testing.T) {
|
||||
start := primitives.Slot(100)
|
||||
bySlot := [][][]primitives.ValidatorIndex{
|
||||
{{1, 2, 3}},
|
||||
{{4, 5, 6}},
|
||||
|
||||
// Test using CommitteeAssignments
|
||||
committeeAssignments := map[primitives.ValidatorIndex]*helpers.CommitteeAssignment{
|
||||
5: {
|
||||
Committee: []primitives.ValidatorIndex{4, 5, 6},
|
||||
AttesterSlot: start + 1,
|
||||
CommitteeIndex: primitives.CommitteeIndex(0),
|
||||
},
|
||||
}
|
||||
|
||||
// Test with pre-built assignment map (large request scenario)
|
||||
meta := &metadata{
|
||||
startSlot: start,
|
||||
committeesBySlot: bySlot,
|
||||
validatorAssignmentMap: buildValidatorAssignmentMap(bySlot, start),
|
||||
}
|
||||
|
||||
vs := &Server{}
|
||||
|
||||
// Test existing validator (validator 2 is at position 1 in the committee, not position 2)
|
||||
assignment := vs.getValidatorAssignment(meta, primitives.ValidatorIndex(2))
|
||||
require.NotNil(t, assignment)
|
||||
assert.Equal(t, start, assignment.AttesterSlot)
|
||||
assert.Equal(t, primitives.CommitteeIndex(0), assignment.CommitteeIndex)
|
||||
assert.Equal(t, uint64(1), assignment.ValidatorCommitteeIndex)
|
||||
|
||||
// Test non-existent validator should return empty assignment
|
||||
assignment = vs.getValidatorAssignment(meta, primitives.ValidatorIndex(99))
|
||||
require.NotNil(t, assignment)
|
||||
assert.Equal(t, primitives.Slot(0), assignment.AttesterSlot)
|
||||
assert.Equal(t, primitives.CommitteeIndex(0), assignment.CommitteeIndex)
|
||||
}
|
||||
|
||||
func TestGetValidatorAssignment_WithoutAssignmentMap(t *testing.T) {
|
||||
start := primitives.Slot(100)
|
||||
bySlot := [][][]primitives.ValidatorIndex{
|
||||
{{1, 2, 3}},
|
||||
{{4, 5, 6}},
|
||||
}
|
||||
|
||||
// Test without assignment map (small request scenario)
|
||||
meta := &metadata{
|
||||
startSlot: start,
|
||||
committeesBySlot: bySlot,
|
||||
validatorAssignmentMap: nil, // No map - should use linear search
|
||||
committeeAssignments: committeeAssignments,
|
||||
}
|
||||
|
||||
vs := &Server{}
|
||||
@@ -676,53 +591,3 @@ func TestGetValidatorAssignment_WithoutAssignmentMap(t *testing.T) {
|
||||
assert.Equal(t, primitives.Slot(0), assignment.AttesterSlot)
|
||||
assert.Equal(t, primitives.CommitteeIndex(0), assignment.CommitteeIndex)
|
||||
}
|
||||
|
||||
func TestLoadMetadata_ThresholdBehavior(t *testing.T) {
|
||||
state, _ := util.DeterministicGenesisState(t, 128)
|
||||
epoch := primitives.Epoch(0)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
numValidators int
|
||||
expectAssignmentMap bool
|
||||
}{
|
||||
{
|
||||
name: "Small request - below threshold",
|
||||
numValidators: 100,
|
||||
expectAssignmentMap: false,
|
||||
},
|
||||
{
|
||||
name: "Large request - at threshold",
|
||||
numValidators: validatorLookupThreshold,
|
||||
expectAssignmentMap: true,
|
||||
},
|
||||
{
|
||||
name: "Large request - above threshold",
|
||||
numValidators: validatorLookupThreshold + 1000,
|
||||
expectAssignmentMap: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
meta, err := loadMetadata(t.Context(), state, epoch, tt.numValidators)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, meta)
|
||||
|
||||
if tt.expectAssignmentMap {
|
||||
require.NotNil(t, meta.validatorAssignmentMap, "Expected assignment map to be built for large requests")
|
||||
assert.Equal(t, true, len(meta.validatorAssignmentMap) > 0, "Assignment map should not be empty")
|
||||
} else {
|
||||
// For small requests, the map should be nil (not initialized)
|
||||
if meta.validatorAssignmentMap != nil {
|
||||
t.Errorf("Expected no assignment map for small requests, got: %v", meta.validatorAssignmentMap)
|
||||
}
|
||||
}
|
||||
|
||||
// Common fields should always be set
|
||||
assert.Equal(t, true, meta.committeesAtSlot > 0)
|
||||
require.NotNil(t, meta.committeesBySlot)
|
||||
assert.Equal(t, true, len(meta.committeesBySlot) > 0)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -352,7 +352,7 @@ func (vs *Server) broadcastAndReceiveSidecars(
|
||||
dataColumnSidecars []blocks.RODataColumn,
|
||||
) error {
|
||||
if block.Version() >= version.Fulu {
|
||||
if err := vs.broadcastAndReceiveDataColumns(ctx, dataColumnSidecars, root); err != nil {
|
||||
if err := vs.broadcastAndReceiveDataColumns(ctx, dataColumnSidecars); err != nil {
|
||||
return errors.Wrap(err, "broadcast and receive data columns")
|
||||
}
|
||||
return nil
|
||||
@@ -495,43 +495,22 @@ func (vs *Server) broadcastAndReceiveBlobs(ctx context.Context, sidecars []*ethp
|
||||
}
|
||||
|
||||
// broadcastAndReceiveDataColumns handles the broadcasting and reception of data columns sidecars.
|
||||
func (vs *Server) broadcastAndReceiveDataColumns(
|
||||
ctx context.Context,
|
||||
roSidecars []blocks.RODataColumn,
|
||||
root [fieldparams.RootLength]byte,
|
||||
) error {
|
||||
verifiedRODataColumns := make([]blocks.VerifiedRODataColumn, 0, len(roSidecars))
|
||||
eg, _ := errgroup.WithContext(ctx)
|
||||
for _, roSidecar := range roSidecars {
|
||||
// We build this block ourselves, so we can upgrade the read only data column sidecar into a verified one.
|
||||
verifiedRODataColumn := blocks.NewVerifiedRODataColumn(roSidecar)
|
||||
verifiedRODataColumns = append(verifiedRODataColumns, verifiedRODataColumn)
|
||||
|
||||
eg.Go(func() error {
|
||||
// Compute the subnet index based on the column index.
|
||||
subnet := peerdas.ComputeSubnetForDataColumnSidecar(roSidecar.Index)
|
||||
|
||||
if err := vs.P2P.BroadcastDataColumnSidecar(subnet, verifiedRODataColumn); err != nil {
|
||||
return errors.Wrap(err, "broadcast data column")
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
func (vs *Server) broadcastAndReceiveDataColumns(ctx context.Context, roSidecars []blocks.RODataColumn) error {
|
||||
// We built this block ourselves, so we can upgrade the read only data column sidecar into a verified one.
|
||||
verifiedSidecars := make([]blocks.VerifiedRODataColumn, 0, len(roSidecars))
|
||||
for _, sidecar := range roSidecars {
|
||||
verifiedSidecar := blocks.NewVerifiedRODataColumn(sidecar)
|
||||
verifiedSidecars = append(verifiedSidecars, verifiedSidecar)
|
||||
}
|
||||
|
||||
if err := vs.DataColumnReceiver.ReceiveDataColumns(verifiedRODataColumns); err != nil {
|
||||
return errors.Wrap(err, "receive data column")
|
||||
// Broadcast sidecars (non blocking).
|
||||
if err := vs.P2P.BroadcastDataColumnSidecars(ctx, verifiedSidecars); err != nil {
|
||||
return errors.Wrap(err, "broadcast data column sidecars")
|
||||
}
|
||||
|
||||
for _, verifiedRODataColumn := range verifiedRODataColumns {
|
||||
vs.OperationNotifier.OperationFeed().Send(&feed.Event{
|
||||
Type: operation.DataColumnSidecarReceived,
|
||||
Data: &operation.DataColumnSidecarReceivedData{DataColumn: &verifiedRODataColumn}, // #nosec G601
|
||||
})
|
||||
}
|
||||
|
||||
if err := eg.Wait(); err != nil {
|
||||
return errors.Wrap(err, "wait for data columns to be broadcasted")
|
||||
// In parallel, receive sidecars.
|
||||
if err := vs.DataColumnReceiver.ReceiveDataColumns(verifiedSidecars); err != nil {
|
||||
return errors.Wrap(err, "receive data columns")
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/container/trie"
|
||||
"github.com/OffchainLabs/prysm/v6/math"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
@@ -143,7 +142,7 @@ func (vs *Server) deposits(
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not retrieve requests start index")
|
||||
}
|
||||
eth1DepositIndexLimit := math.Min(canonicalEth1Data.DepositCount, requestsStartIndex)
|
||||
eth1DepositIndexLimit := min(canonicalEth1Data.DepositCount, requestsStartIndex)
|
||||
if beaconState.Eth1DepositIndex() < eth1DepositIndexLimit {
|
||||
if uint64(dep.Index) >= beaconState.Eth1DepositIndex() && uint64(dep.Index) < eth1DepositIndexLimit {
|
||||
pendingDeps = append(pendingDeps, dep)
|
||||
|
||||
@@ -12,13 +12,18 @@ import (
|
||||
|
||||
func (vs *Server) getSlashings(ctx context.Context, head state.BeaconState) ([]*ethpb.ProposerSlashing, []ethpb.AttSlashing) {
|
||||
var err error
|
||||
|
||||
proposerSlashings := vs.SlashingsPool.PendingProposerSlashings(ctx, head, false /*noLimit*/)
|
||||
attSlashings := vs.SlashingsPool.PendingAttesterSlashings(ctx, head, false /*noLimit*/)
|
||||
validProposerSlashings := make([]*ethpb.ProposerSlashing, 0, len(proposerSlashings))
|
||||
validAttSlashings := make([]ethpb.AttSlashing, 0, len(attSlashings))
|
||||
if len(proposerSlashings) == 0 && len(attSlashings) == 0 {
|
||||
return validProposerSlashings, validAttSlashings
|
||||
}
|
||||
// ExitInformation is expensive to compute, only do it if we need it.
|
||||
exitInfo := v.ExitInformation(head)
|
||||
if err := helpers.UpdateTotalActiveBalanceCache(head, exitInfo.TotalActiveBalance); err != nil {
|
||||
log.WithError(err).Warn("Could not update total active balance cache")
|
||||
}
|
||||
proposerSlashings := vs.SlashingsPool.PendingProposerSlashings(ctx, head, false /*noLimit*/)
|
||||
validProposerSlashings := make([]*ethpb.ProposerSlashing, 0, len(proposerSlashings))
|
||||
for _, slashing := range proposerSlashings {
|
||||
_, err = blocks.ProcessProposerSlashing(ctx, head, slashing, exitInfo)
|
||||
if err != nil {
|
||||
@@ -27,8 +32,6 @@ func (vs *Server) getSlashings(ctx context.Context, head state.BeaconState) ([]*
|
||||
}
|
||||
validProposerSlashings = append(validProposerSlashings, slashing)
|
||||
}
|
||||
attSlashings := vs.SlashingsPool.PendingAttesterSlashings(ctx, head, false /*noLimit*/)
|
||||
validAttSlashings := make([]ethpb.AttSlashing, 0, len(attSlashings))
|
||||
for _, slashing := range attSlashings {
|
||||
_, err = blocks.ProcessAttesterSlashing(ctx, head, slashing, exitInfo)
|
||||
if err != nil {
|
||||
|
||||
@@ -74,6 +74,18 @@ func WithTimeAsNow(t time.Time) ClockOpt {
|
||||
}
|
||||
}
|
||||
|
||||
func WithSlotAsNow(s types.Slot) ClockOpt {
|
||||
return func(g *Clock) {
|
||||
g.now = func() time.Time {
|
||||
t, err := slots.StartTime(g.t, s)
|
||||
if err != nil {
|
||||
panic(err) // lint:nopanic -- This is a programming error if genesis/slot are invalid.
|
||||
}
|
||||
return t
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NewClock constructs a Clock value from a genesis timestamp (t) and a Genesis Validator Root (vr).
|
||||
// The WithNower ClockOpt can be used in tests to specify an alternate `time.Now` implementation,
|
||||
// for instance to return a value for `Now` spanning a certain number of slots from genesis time, to control the current slot.
|
||||
|
||||
@@ -160,7 +160,7 @@ func (b *BeaconState) ExpectedWithdrawals() ([]*enginev1.Withdrawal, uint64, err
|
||||
}
|
||||
|
||||
validatorsLen := b.validatorsLen()
|
||||
bound := mathutil.Min(uint64(validatorsLen), params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep)
|
||||
bound := min(uint64(validatorsLen), params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep)
|
||||
for i := uint64(0); i < bound; i++ {
|
||||
val, err := b.validatorAtIndexReadOnly(validatorIndex)
|
||||
if err != nil {
|
||||
|
||||
@@ -3,7 +3,6 @@ package stategen
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
@@ -54,7 +53,7 @@ func (s *State) saveStateByRoot(ctx context.Context, blockRoot [32]byte, st stat
|
||||
defer span.End()
|
||||
|
||||
// Duration can't be 0 to prevent panic for division.
|
||||
duration := uint64(math.Max(float64(s.saveHotStateDB.duration), 1))
|
||||
duration := uint64(max(float64(s.saveHotStateDB.duration), 1))
|
||||
|
||||
s.saveHotStateDB.lock.Lock()
|
||||
if s.saveHotStateDB.enabled && st.Slot().Mod(duration) == 0 {
|
||||
|
||||
@@ -17,6 +17,7 @@ go_library(
|
||||
"fuzz_exports.go", # keep
|
||||
"log.go",
|
||||
"metrics.go",
|
||||
"once.go",
|
||||
"options.go",
|
||||
"pending_attestations_queue.go",
|
||||
"pending_blocks_queue.go",
|
||||
@@ -45,7 +46,6 @@ go_library(
|
||||
"subscriber_bls_to_execution_change.go",
|
||||
"subscriber_data_column_sidecar.go",
|
||||
"subscriber_handlers.go",
|
||||
"subscriber_light_client.go",
|
||||
"subscriber_sync_committee_message.go",
|
||||
"subscriber_sync_contribution_proof.go",
|
||||
"subscription_topic_handler.go",
|
||||
@@ -113,7 +113,6 @@ go_library(
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/light-client:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
"//container/leaky-bucket:go_default_library",
|
||||
@@ -174,6 +173,8 @@ go_test(
|
||||
"error_test.go",
|
||||
"fork_watcher_test.go",
|
||||
"kzg_batch_verifier_test.go",
|
||||
"once_test.go",
|
||||
"pending_attestations_queue_bucket_test.go",
|
||||
"pending_attestations_queue_test.go",
|
||||
"pending_blocks_queue_test.go",
|
||||
"rate_limiter_test.go",
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
@@ -24,8 +25,9 @@ func testBlobGen(t *testing.T, start primitives.Slot, n int) ([]blocks.ROBlock,
|
||||
}
|
||||
|
||||
func TestValidateNext_happy(t *testing.T) {
|
||||
current := primitives.Slot(128)
|
||||
blks, blobs := testBlobGen(t, 63, 4)
|
||||
startSlot := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
current := startSlot + 65
|
||||
blks, blobs := testBlobGen(t, startSlot, 4)
|
||||
cfg := &blobSyncConfig{
|
||||
retentionStart: 0,
|
||||
nbv: testNewBlobVerifier(),
|
||||
@@ -74,8 +76,9 @@ func TestValidateNext_sigMatch(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestValidateNext_errorsFromVerifier(t *testing.T) {
|
||||
current := primitives.Slot(128)
|
||||
blks, blobs := testBlobGen(t, 63, 1)
|
||||
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
current := primitives.Slot(ds + 96)
|
||||
blks, blobs := testBlobGen(t, ds+31, 1)
|
||||
cases := []struct {
|
||||
name string
|
||||
err error
|
||||
|
||||
@@ -2,6 +2,7 @@ package sync
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"math"
|
||||
"math/big"
|
||||
"testing"
|
||||
@@ -18,9 +19,11 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
types "github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
leakybucket "github.com/OffchainLabs/prysm/v6/container/leaky-bucket"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/genesis"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
@@ -165,23 +168,12 @@ func (r *expectedBlobChunk) requireExpected(t *testing.T, s *Service, stream net
|
||||
require.Equal(t, rob.Index, r.sidecar.Index)
|
||||
}
|
||||
|
||||
func (c *blobsTestCase) setup(t *testing.T) (*Service, []blocks.ROBlob, func()) {
|
||||
cfg := params.BeaconConfig()
|
||||
copiedCfg := cfg.Copy()
|
||||
repositionFutureEpochs(copiedCfg)
|
||||
copiedCfg.InitializeForkSchedule()
|
||||
params.OverrideBeaconConfig(copiedCfg)
|
||||
cleanup := func() {
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
}
|
||||
maxBlobs := int(params.BeaconConfig().MaxBlobsPerBlock(0))
|
||||
chain, clock := defaultMockChain(t)
|
||||
func (c *blobsTestCase) setup(t *testing.T) (*Service, []blocks.ROBlob) {
|
||||
maxBlobs := int(params.BeaconConfig().MaxBlobsPerBlockAtEpoch(params.BeaconConfig().DenebForkEpoch))
|
||||
chain := defaultMockChain(t, c.clock.CurrentEpoch())
|
||||
if c.chain == nil {
|
||||
c.chain = chain
|
||||
}
|
||||
if c.clock == nil {
|
||||
c.clock = clock
|
||||
}
|
||||
d := db.SetupDB(t)
|
||||
|
||||
sidecars := make([]blocks.ROBlob, 0)
|
||||
@@ -208,16 +200,16 @@ func (c *blobsTestCase) setup(t *testing.T) (*Service, []blocks.ROBlob, func())
|
||||
|
||||
client := p2ptest.NewTestP2P(t)
|
||||
s := &Service{
|
||||
cfg: &config{p2p: client, chain: c.chain, clock: clock, beaconDB: d, blobStorage: filesystem.NewEphemeralBlobStorage(t)},
|
||||
cfg: &config{p2p: client, chain: c.chain, clock: c.clock, beaconDB: d, blobStorage: filesystem.NewEphemeralBlobStorage(t)},
|
||||
rateLimiter: newRateLimiter(client),
|
||||
}
|
||||
|
||||
byRootRate := params.BeaconConfig().MaxRequestBlobSidecars * uint64(params.BeaconConfig().MaxBlobsPerBlock(0))
|
||||
byRangeRate := params.BeaconConfig().MaxRequestBlobSidecars * uint64(params.BeaconConfig().MaxBlobsPerBlock(0))
|
||||
byRootRate := params.BeaconConfig().MaxRequestBlobSidecars * uint64(maxBlobs)
|
||||
byRangeRate := params.BeaconConfig().MaxRequestBlobSidecars * uint64(maxBlobs)
|
||||
s.setRateCollector(p2p.RPCBlobSidecarsByRootTopicV1, leakybucket.NewCollector(0.000001, int64(byRootRate), time.Second, false))
|
||||
s.setRateCollector(p2p.RPCBlobSidecarsByRangeTopicV1, leakybucket.NewCollector(0.000001, int64(byRangeRate), time.Second, false))
|
||||
|
||||
return s, sidecars, cleanup
|
||||
return s, sidecars
|
||||
}
|
||||
|
||||
func defaultExpectedRequirer(t *testing.T, s *Service, expect []*expectedBlobChunk) func(network.Stream) {
|
||||
@@ -225,12 +217,16 @@ func defaultExpectedRequirer(t *testing.T, s *Service, expect []*expectedBlobChu
|
||||
for _, ex := range expect {
|
||||
ex.requireExpected(t, s, stream)
|
||||
}
|
||||
|
||||
encoding := s.cfg.p2p.Encoding()
|
||||
_, _, err := ReadStatusCode(stream, encoding)
|
||||
require.ErrorIs(t, err, io.EOF)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *blobsTestCase) run(t *testing.T) {
|
||||
s, sidecars, cleanup := c.setup(t)
|
||||
defer cleanup()
|
||||
blobRpcThrottleInterval = time.Microsecond * 1
|
||||
s, sidecars := c.setup(t)
|
||||
req := c.requestFromSidecars(sidecars)
|
||||
expect := c.defineExpected(t, sidecars, req)
|
||||
m := map[types.Slot][]blocks.ROBlob{}
|
||||
@@ -266,41 +262,32 @@ func (c *blobsTestCase) run(t *testing.T) {
|
||||
// so it is helpful in tests to temporarily reposition the epochs to give room for some math.
|
||||
func repositionFutureEpochs(cfg *params.BeaconChainConfig) {
|
||||
if cfg.FuluForkEpoch == math.MaxUint64 {
|
||||
cfg.FuluForkEpoch = cfg.ElectraForkEpoch + 100
|
||||
cfg.FuluForkEpoch = cfg.ElectraForkEpoch + 4096*2
|
||||
}
|
||||
}
|
||||
|
||||
func defaultMockChain(t *testing.T) (*mock.ChainService, *startup.Clock) {
|
||||
de := params.BeaconConfig().DenebForkEpoch
|
||||
df, err := params.Fork(de)
|
||||
func defaultMockChain(t *testing.T, current primitives.Epoch) *mock.ChainService {
|
||||
fe := current - 2
|
||||
df, err := params.Fork(current)
|
||||
require.NoError(t, err)
|
||||
denebBuffer := params.BeaconConfig().MinEpochsForBlobsSidecarsRequest + 1000
|
||||
ce := de + denebBuffer
|
||||
fe := ce - 2
|
||||
cs, err := slots.EpochStart(ce)
|
||||
require.NoError(t, err)
|
||||
genesis := time.Now()
|
||||
mockNow := startup.MockNower{}
|
||||
clock := startup.NewClock(genesis, params.BeaconConfig().GenesisValidatorsRoot, startup.WithNower(mockNow.Now))
|
||||
mockNow.SetSlot(t, clock, cs)
|
||||
chain := &mock.ChainService{
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{Epoch: fe},
|
||||
Fork: df,
|
||||
}
|
||||
|
||||
return chain, clock
|
||||
return chain
|
||||
}
|
||||
|
||||
func TestTestcaseSetup_BlocksAndBlobs(t *testing.T) {
|
||||
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
ctx := t.Context()
|
||||
nblocks := 10
|
||||
c := &blobsTestCase{nblocks: nblocks}
|
||||
c := &blobsTestCase{nblocks: nblocks, clock: startup.NewClock(genesis.Time(), genesis.ValidatorsRoot(), startup.WithSlotAsNow(ds))}
|
||||
c.oldestSlot = c.defaultOldestSlotByRoot
|
||||
s, sidecars, cleanup := c.setup(t)
|
||||
s, sidecars := c.setup(t)
|
||||
req := blobRootRequestFromSidecars(sidecars)
|
||||
expect := c.filterExpectedByRoot(t, sidecars, req)
|
||||
defer cleanup()
|
||||
maxed := nblocks * params.BeaconConfig().MaxBlobsPerBlock(0)
|
||||
maxed := nblocks * params.BeaconConfig().MaxBlobsPerBlockAtEpoch(params.BeaconConfig().DenebForkEpoch)
|
||||
require.Equal(t, maxed, len(sidecars))
|
||||
require.Equal(t, maxed, len(expect))
|
||||
for _, sc := range sidecars {
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -29,13 +30,13 @@ func (s *Service) updateCustodyInfoIfNeeded() error {
|
||||
const minimumPeerCount = 1
|
||||
|
||||
// Get our actual custody group count.
|
||||
actualCustodyGrounpCount, err := s.cfg.p2p.CustodyGroupCount()
|
||||
actualCustodyGrounpCount, err := s.cfg.p2p.CustodyGroupCount(s.ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "p2p custody group count")
|
||||
}
|
||||
|
||||
// Get our target custody group count.
|
||||
targetCustodyGroupCount, err := s.custodyGroupCount()
|
||||
targetCustodyGroupCount, err := s.custodyGroupCount(s.ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "custody group count")
|
||||
}
|
||||
@@ -88,7 +89,7 @@ func (s *Service) updateCustodyInfoIfNeeded() error {
|
||||
|
||||
// custodyGroupCount computes the custody group count based on the custody requirement,
|
||||
// the validators custody requirement, and whether the node is subscribed to all data subnets.
|
||||
func (s *Service) custodyGroupCount() (uint64, error) {
|
||||
func (s *Service) custodyGroupCount(context.Context) (uint64, error) {
|
||||
beaconConfig := params.BeaconConfig()
|
||||
|
||||
if flags.Get().SubscribeAllDataSubnets {
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
eth "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
)
|
||||
@@ -55,9 +54,9 @@ func setupCustodyTest(t *testing.T, withChain bool) *testSetup {
|
||||
|
||||
if withChain {
|
||||
const headSlot = primitives.Slot(100)
|
||||
block, err := blocks.NewSignedBeaconBlock(ð.SignedBeaconBlock{
|
||||
Block: ð.BeaconBlock{
|
||||
Body: ð.BeaconBlockBody{},
|
||||
block, err := blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlock{
|
||||
Block: ðpb.BeaconBlock{
|
||||
Body: ðpb.BeaconBlockBody{},
|
||||
Slot: headSlot,
|
||||
},
|
||||
})
|
||||
@@ -90,11 +89,13 @@ func setupCustodyTest(t *testing.T, withChain bool) *testSetup {
|
||||
}
|
||||
|
||||
func (ts *testSetup) assertCustodyInfo(t *testing.T, expectedSlot primitives.Slot, expectedCount uint64) {
|
||||
p2pEarliestSlot, err := ts.p2pService.EarliestAvailableSlot()
|
||||
ctx := t.Context()
|
||||
|
||||
p2pEarliestSlot, err := ts.p2pService.EarliestAvailableSlot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedSlot, p2pEarliestSlot)
|
||||
|
||||
p2pCustodyCount, err := ts.p2pService.CustodyGroupCount()
|
||||
p2pCustodyCount, err := ts.p2pService.CustodyGroupCount(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedCount, p2pCustodyCount)
|
||||
|
||||
@@ -170,13 +171,15 @@ func TestCustodyGroupCount(t *testing.T) {
|
||||
config.CustodyRequirement = 3
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
ctx := t.Context()
|
||||
|
||||
t.Run("SubscribeAllDataSubnets enabled returns NumberOfCustodyGroups", func(t *testing.T) {
|
||||
withSubscribeAllDataSubnets(t, func() {
|
||||
service := &Service{
|
||||
ctx: context.Background(),
|
||||
}
|
||||
|
||||
result, err := service.custodyGroupCount()
|
||||
result, err := service.custodyGroupCount(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, config.NumberOfCustodyGroups, result)
|
||||
})
|
||||
@@ -188,7 +191,7 @@ func TestCustodyGroupCount(t *testing.T) {
|
||||
trackedValidatorsCache: cache.NewTrackedValidatorsCache(),
|
||||
}
|
||||
|
||||
result, err := service.custodyGroupCount()
|
||||
result, err := service.custodyGroupCount(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, config.CustodyRequirement, result)
|
||||
})
|
||||
|
||||
@@ -20,7 +20,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
leakybucket "github.com/OffchainLabs/prysm/v6/container/leaky-bucket"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/rand"
|
||||
eth "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
goPeer "github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
@@ -921,7 +920,7 @@ func buildByRangeRequests(
|
||||
func buildByRootRequest(indicesByRoot map[[fieldparams.RootLength]byte]map[uint64]bool) p2ptypes.DataColumnsByRootIdentifiers {
|
||||
identifiers := make(p2ptypes.DataColumnsByRootIdentifiers, 0, len(indicesByRoot))
|
||||
for root, indices := range indicesByRoot {
|
||||
identifier := ð.DataColumnsByRootIdentifier{
|
||||
identifier := ðpb.DataColumnsByRootIdentifier{
|
||||
BlockRoot: root[:],
|
||||
Columns: helpers.SortedSliceFromMap(indices),
|
||||
}
|
||||
@@ -929,7 +928,7 @@ func buildByRootRequest(indicesByRoot map[[fieldparams.RootLength]byte]map[uint6
|
||||
}
|
||||
|
||||
// Sort identifiers to have a deterministic output.
|
||||
slices.SortFunc(identifiers, func(left, right *eth.DataColumnsByRootIdentifier) int {
|
||||
slices.SortFunc(identifiers, func(left, right *ethpb.DataColumnsByRootIdentifier) int {
|
||||
if cmp := bytes.Compare(left.BlockRoot, right.BlockRoot); cmp != 0 {
|
||||
return cmp
|
||||
}
|
||||
@@ -1023,17 +1022,20 @@ func computeIndicesByRootByPeer(
|
||||
peersByIndex := make(map[uint64]map[goPeer.ID]bool)
|
||||
headSlotByPeer := make(map[goPeer.ID]primitives.Slot)
|
||||
for peer := range peers {
|
||||
log := log.WithField("peerID", peer)
|
||||
|
||||
// Computes the custody columns for each peer
|
||||
nodeID, err := prysmP2P.ConvertPeerIDToNodeID(peer)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "convert peer ID to node ID for peer %s", peer)
|
||||
log.WithError(err).Debug("Failed to convert peer ID to node ID")
|
||||
continue
|
||||
}
|
||||
|
||||
custodyGroupCount := p2p.CustodyGroupCountFromPeer(peer)
|
||||
|
||||
dasInfo, _, err := peerdas.Info(nodeID, custodyGroupCount)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "peerdas info for peer %s", peer)
|
||||
log.WithError(err).Debug("Failed to get peer DAS info")
|
||||
continue
|
||||
}
|
||||
|
||||
for column := range dasInfo.CustodyColumns {
|
||||
@@ -1046,11 +1048,13 @@ func computeIndicesByRootByPeer(
|
||||
// Compute the head slot for each peer
|
||||
peerChainState, err := p2p.Peers().ChainState(peer)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "get chain state for peer %s", peer)
|
||||
log.WithError(err).Debug("Failed to get peer chain state")
|
||||
continue
|
||||
}
|
||||
|
||||
if peerChainState == nil {
|
||||
return nil, errors.Errorf("chain state is nil for peer %s", peer)
|
||||
log.Debug("Peer chain state is nil")
|
||||
continue
|
||||
}
|
||||
|
||||
// Our view of the head slot of a peer is not updated in real time.
|
||||
|
||||
@@ -23,7 +23,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/wrapper"
|
||||
leakybucket "github.com/OffchainLabs/prysm/v6/container/leaky-bucket"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
pb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
@@ -144,7 +143,7 @@ func TestFetchDataColumnSidecars(t *testing.T) {
|
||||
HeadSlot: 8,
|
||||
})
|
||||
|
||||
p2p.Peers().SetMetadata(other.PeerID(), wrapper.WrappedMetadataV2(&pb.MetaDataV2{
|
||||
p2p.Peers().SetMetadata(other.PeerID(), wrapper.WrappedMetadataV2(ðpb.MetaDataV2{
|
||||
CustodyGroupCount: 128,
|
||||
}))
|
||||
|
||||
|
||||
@@ -117,6 +117,7 @@ func TestService_decodePubsubMessage(t *testing.T) {
|
||||
|
||||
func TestExtractDataType(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096*2
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
|
||||
type args struct {
|
||||
@@ -304,6 +305,9 @@ func TestExtractDataType(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestExtractDataTypeFromTypeMapInvalid(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096*2
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
chain := &mock.ChainService{ValidatorsRoot: [32]byte{}}
|
||||
_, err := extractDataTypeFromTypeMap(types.BlockMap, []byte{0x00, 0x01}, chain)
|
||||
require.ErrorIs(t, err, errInvalidDigest)
|
||||
|
||||
@@ -2,7 +2,6 @@ package sync
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
@@ -12,6 +11,7 @@ import (
|
||||
libp2pcore "github.com/libp2p/go-libp2p/core"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
multiplex "github.com/libp2p/go-mplex"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@@ -38,7 +38,7 @@ func ReadStatusCode(stream network.Stream, encoding encoder.NetworkEncoding) (ui
|
||||
b := make([]byte, 1)
|
||||
_, err := stream.Read(b)
|
||||
if err != nil {
|
||||
return 0, "", err
|
||||
return 0, "", errors.Wrap(err, "stream read")
|
||||
}
|
||||
|
||||
if b[0] == responseCodeSuccess {
|
||||
@@ -52,7 +52,7 @@ func ReadStatusCode(stream network.Stream, encoding encoder.NetworkEncoding) (ui
|
||||
SetStreamReadDeadline(stream, params.BeaconConfig().RespTimeoutDuration())
|
||||
msg := &types.ErrorMessage{}
|
||||
if err := encoding.DecodeWithMaxLength(stream, msg); err != nil {
|
||||
return 0, "", err
|
||||
return 0, "", errors.Wrap(err, "decode error message")
|
||||
}
|
||||
|
||||
return b[0], string(*msg), nil
|
||||
|
||||
@@ -9,23 +9,28 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Is a background routine that observes for new incoming forks. Depending on the epoch
|
||||
// it will be in charge of subscribing/unsubscribing the relevant topics at the fork boundaries.
|
||||
func (s *Service) forkWatcher() {
|
||||
<-s.initialSyncComplete
|
||||
// p2pHandlerControlLoop runs in a continuous loop to ensure that:
|
||||
// - We are subscribed to the correct gossipsub topics (for the current and upcoming epoch).
|
||||
// - We have registered the correct RPC stream handlers (for the current and upcoming epoch).
|
||||
// - We have cleaned up gossipsub topics and RPC stream handlers that are no longer needed.
|
||||
func (s *Service) p2pHandlerControlLoop() {
|
||||
// At startup, launch registration and peer discovery loops, and register rpc stream handlers.
|
||||
startEntry := params.GetNetworkScheduleEntry(s.cfg.clock.CurrentEpoch())
|
||||
s.registerSubscribers(startEntry)
|
||||
|
||||
slotTicker := slots.NewSlotTicker(s.cfg.clock.GenesisTime(), params.BeaconConfig().SecondsPerSlot)
|
||||
for {
|
||||
select {
|
||||
// In the event of a node restart, we will still end up subscribing to the correct
|
||||
// topics during/after the fork epoch. This routine is to ensure correct
|
||||
// subscriptions for nodes running before a fork epoch.
|
||||
case currSlot := <-slotTicker.C():
|
||||
currEpoch := slots.ToEpoch(currSlot)
|
||||
if err := s.registerForUpcomingFork(currEpoch); err != nil {
|
||||
case <-slotTicker.C():
|
||||
current := s.cfg.clock.CurrentEpoch()
|
||||
if err := s.ensureRegistrationsForEpoch(current); err != nil {
|
||||
log.WithError(err).Error("Unable to check for fork in the next epoch")
|
||||
continue
|
||||
}
|
||||
if err := s.deregisterFromPastFork(currEpoch); err != nil {
|
||||
if err := s.ensureDeregistrationForEpoch(current); err != nil {
|
||||
log.WithError(err).Error("Unable to check for fork in the previous epoch")
|
||||
continue
|
||||
}
|
||||
@@ -37,102 +42,90 @@ func (s *Service) forkWatcher() {
|
||||
}
|
||||
}
|
||||
|
||||
// registerForUpcomingFork registers appropriate gossip and RPC topic if there is a fork in the next epoch.
|
||||
func (s *Service) registerForUpcomingFork(currentEpoch primitives.Epoch) error {
|
||||
nextEntry := params.GetNetworkScheduleEntry(currentEpoch + 1)
|
||||
// Check if there is a fork in the next epoch.
|
||||
if nextEntry.ForkDigest == s.registeredNetworkEntry.ForkDigest {
|
||||
return nil
|
||||
}
|
||||
// ensureRegistrationsForEpoch ensures that gossip topic and RPC stream handler
|
||||
// registrations are in place for the current and subsequent epoch.
|
||||
func (s *Service) ensureRegistrationsForEpoch(epoch primitives.Epoch) error {
|
||||
current := params.GetNetworkScheduleEntry(epoch)
|
||||
s.registerSubscribers(current)
|
||||
|
||||
if s.subHandler.digestExists(nextEntry.ForkDigest) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Register the subscribers (gossipsub) for the next epoch.
|
||||
s.registerSubscribers(nextEntry.Epoch, nextEntry.ForkDigest)
|
||||
|
||||
// Get the handlers for the current and next fork.
|
||||
currentHandler, err := s.rpcHandlerByTopicFromEpoch(currentEpoch)
|
||||
currentHandler, err := s.rpcHandlerByTopicFromFork(current.VersionEnum)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "RPC handler by topic from before fork epoch")
|
||||
}
|
||||
if !s.digestActionDone(current.ForkDigest, registerRpcOnce) {
|
||||
for topic, handler := range currentHandler {
|
||||
s.registerRPC(topic, handler)
|
||||
}
|
||||
}
|
||||
|
||||
nextHandler, err := s.rpcHandlerByTopicFromEpoch(nextEntry.Epoch)
|
||||
next := params.GetNetworkScheduleEntry(epoch + 1)
|
||||
if current.Epoch == next.Epoch {
|
||||
return nil // no fork in the next epoch
|
||||
}
|
||||
s.registerSubscribers(next)
|
||||
|
||||
if s.digestActionDone(next.ForkDigest, registerRpcOnce) {
|
||||
return nil
|
||||
}
|
||||
|
||||
nextHandler, err := s.rpcHandlerByTopicFromFork(next.VersionEnum)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "RPC handler by topic from fork epoch")
|
||||
}
|
||||
|
||||
// Compute newly added topics.
|
||||
newHandlersByTopic := addedRPCHandlerByTopic(currentHandler, nextHandler)
|
||||
|
||||
// Register the new RPC handlers.
|
||||
// We deregister the old topics later, at least one epoch after the fork.
|
||||
for topic, handler := range newHandlersByTopic {
|
||||
s.registerRPC(topic, handler)
|
||||
}
|
||||
|
||||
s.registeredNetworkEntry = nextEntry
|
||||
return nil
|
||||
}
|
||||
|
||||
// deregisterFromPastFork deregisters appropriate gossip and RPC topic if there is a fork in the current epoch.
|
||||
func (s *Service) deregisterFromPastFork(currentEpoch primitives.Epoch) error {
|
||||
// Get the fork.
|
||||
currentFork, err := params.Fork(currentEpoch)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "genesis validators root")
|
||||
}
|
||||
// ensureDeregistrationForEpoch deregisters appropriate gossip and RPC topic if there is a fork in the current epoch.
|
||||
func (s *Service) ensureDeregistrationForEpoch(currentEpoch primitives.Epoch) error {
|
||||
current := params.GetNetworkScheduleEntry(currentEpoch)
|
||||
|
||||
// If we are still in our genesis fork version then exit early.
|
||||
if currentFork.Epoch == params.BeaconConfig().GenesisEpoch {
|
||||
if current.Epoch == params.BeaconConfig().GenesisEpoch {
|
||||
return nil
|
||||
}
|
||||
if currentEpoch < current.Epoch+1 {
|
||||
return nil // wait until we are 1 epoch into the fork
|
||||
}
|
||||
|
||||
// Get the epoch after the fork epoch.
|
||||
afterForkEpoch := currentFork.Epoch + 1
|
||||
previous := params.GetNetworkScheduleEntry(current.Epoch - 1)
|
||||
// Remove stream handlers for all topics that are in the set of
|
||||
// currentTopics-previousTopics
|
||||
if !s.digestActionDone(previous.ForkDigest, unregisterRpcOnce) {
|
||||
previousTopics, err := s.rpcHandlerByTopicFromFork(previous.VersionEnum)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "RPC handler by topic from before fork epoch")
|
||||
}
|
||||
currentTopics, err := s.rpcHandlerByTopicFromFork(current.VersionEnum)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "RPC handler by topic from fork epoch")
|
||||
}
|
||||
topicsToRemove := removedRPCTopics(previousTopics, currentTopics)
|
||||
for topic := range topicsToRemove {
|
||||
fullTopic := topic + s.cfg.p2p.Encoding().ProtocolSuffix()
|
||||
s.cfg.p2p.Host().RemoveStreamHandler(protocol.ID(fullTopic))
|
||||
log.WithField("topic", fullTopic).Debug("Removed RPC handler")
|
||||
}
|
||||
}
|
||||
|
||||
// Start de-registering if the current epoch is after the fork epoch.
|
||||
if currentEpoch != afterForkEpoch {
|
||||
// Unsubscribe from all gossip topics with the previous fork digest.
|
||||
if s.digestActionDone(previous.ForkDigest, unregisterGossipOnce) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Look at the previous fork's digest.
|
||||
beforeForkEpoch := currentFork.Epoch - 1
|
||||
|
||||
beforeForkDigest := params.ForkDigest(beforeForkEpoch)
|
||||
|
||||
// Exit early if there are no topics with that particular digest.
|
||||
if !s.subHandler.digestExists(beforeForkDigest) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Compute the RPC handlers that are no longer needed.
|
||||
beforeForkHandlerByTopic, err := s.rpcHandlerByTopicFromEpoch(beforeForkEpoch)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "RPC handler by topic from before fork epoch")
|
||||
}
|
||||
|
||||
forkHandlerByTopic, err := s.rpcHandlerByTopicFromEpoch(currentFork.Epoch)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "RPC handler by topic from fork epoch")
|
||||
}
|
||||
|
||||
topicsToRemove := removedRPCTopics(beforeForkHandlerByTopic, forkHandlerByTopic)
|
||||
for topic := range topicsToRemove {
|
||||
fullTopic := topic + s.cfg.p2p.Encoding().ProtocolSuffix()
|
||||
s.cfg.p2p.Host().RemoveStreamHandler(protocol.ID(fullTopic))
|
||||
log.WithField("topic", fullTopic).Debug("Removed RPC handler")
|
||||
}
|
||||
|
||||
// Run through all our current active topics and see
|
||||
// if there are any subscriptions to be removed.
|
||||
for _, t := range s.subHandler.allTopics() {
|
||||
retDigest, err := p2p.ExtractGossipDigest(t)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not retrieve digest")
|
||||
continue
|
||||
}
|
||||
if retDigest == beforeForkDigest {
|
||||
if retDigest == previous.ForkDigest {
|
||||
s.unSubscribeFromTopic(t)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -50,12 +50,36 @@ func testForkWatcherService(t *testing.T, current primitives.Epoch) *Service {
|
||||
return r
|
||||
}
|
||||
|
||||
func TestRegisterSubscriptions_Idempotent(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
genesis.StoreEmbeddedDuringTest(t, params.BeaconConfig().ConfigName)
|
||||
fulu := params.BeaconConfig().ElectraForkEpoch + 4096*2
|
||||
params.BeaconConfig().FuluForkEpoch = fulu
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
|
||||
current := fulu - 1
|
||||
s := testForkWatcherService(t, current)
|
||||
next := params.GetNetworkScheduleEntry(fulu)
|
||||
wg := attachSpawner(s)
|
||||
require.Equal(t, true, s.registerSubscribers(next))
|
||||
done := make(chan struct{})
|
||||
go func() { wg.Wait(); close(done) }()
|
||||
select {
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatal("timed out waiting for subscriptions to be registered")
|
||||
case <-done:
|
||||
}
|
||||
// the goal of this callback is just to assert that spawn is never called.
|
||||
s.subscriptionSpawner = func(func()) { t.Error("registration routines spawned twice for the same digest") }
|
||||
require.NoError(t, s.ensureRegistrationsForEpoch(fulu))
|
||||
}
|
||||
|
||||
func TestService_CheckForNextEpochFork(t *testing.T) {
|
||||
closedChan := make(chan struct{})
|
||||
close(closedChan)
|
||||
params.SetupTestConfigCleanup(t)
|
||||
genesis.StoreEmbeddedDuringTest(t, params.BeaconConfig().ConfigName)
|
||||
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 1096*2
|
||||
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096*2
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
|
||||
tests := []struct {
|
||||
@@ -171,7 +195,7 @@ func TestService_CheckForNextEpochFork(t *testing.T) {
|
||||
current := tt.epochAtRegistration(tt.forkEpoch)
|
||||
s := testForkWatcherService(t, current)
|
||||
wg := attachSpawner(s)
|
||||
require.NoError(t, s.registerForUpcomingFork(s.cfg.clock.CurrentEpoch()))
|
||||
require.NoError(t, s.ensureRegistrationsForEpoch(s.cfg.clock.CurrentEpoch()))
|
||||
wg.Wait()
|
||||
tt.checkRegistration(t, s)
|
||||
|
||||
@@ -193,10 +217,13 @@ func TestService_CheckForNextEpochFork(t *testing.T) {
|
||||
// Move the clock to just before the next fork epoch and ensure deregistration is correct
|
||||
wg = attachSpawner(s)
|
||||
s.cfg.clock = defaultClockWithTimeAtEpoch(tt.nextForkEpoch - 1)
|
||||
require.NoError(t, s.registerForUpcomingFork(s.cfg.clock.CurrentEpoch()))
|
||||
require.NoError(t, s.ensureRegistrationsForEpoch(s.cfg.clock.CurrentEpoch()))
|
||||
wg.Wait()
|
||||
|
||||
require.NoError(t, s.ensureDeregistrationForEpoch(tt.nextForkEpoch))
|
||||
assert.Equal(t, true, s.subHandler.digestExists(digest))
|
||||
// deregister as if it is the epoch after the next fork epoch
|
||||
require.NoError(t, s.deregisterFromPastFork(tt.nextForkEpoch+1))
|
||||
require.NoError(t, s.ensureDeregistrationForEpoch(tt.nextForkEpoch+1))
|
||||
assert.Equal(t, false, s.subHandler.digestExists(digest))
|
||||
assert.Equal(t, true, s.subHandler.digestExists(nextDigest))
|
||||
})
|
||||
|
||||
@@ -384,7 +384,7 @@ func (f *blocksFetcher) fetchSidecars(ctx context.Context, pid peer.ID, peers []
|
||||
}
|
||||
|
||||
// Compute the columns to request.
|
||||
custodyGroupCount, err := f.p2p.CustodyGroupCount()
|
||||
custodyGroupCount, err := f.p2p.CustodyGroupCount(ctx)
|
||||
if err != nil {
|
||||
return blobsPid, errors.Wrap(err, "custody group count")
|
||||
}
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
mathutil "github.com/OffchainLabs/prysm/v6/math"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
prysmTime "github.com/OffchainLabs/prysm/v6/time"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
@@ -131,8 +130,8 @@ func trimPeers(peers []peer.ID, peersPercentage float64) []peer.ID {
|
||||
// Weak/slow peers will be pushed down the list and trimmed since only percentage of peers is selected.
|
||||
limit := uint64(math.Round(float64(len(peers)) * peersPercentage))
|
||||
// Limit cannot be less that minimum peers required by sync mechanism.
|
||||
limit = mathutil.Max(limit, uint64(required))
|
||||
limit = max(limit, uint64(required))
|
||||
// Limit cannot be higher than number of peers available (safe-guard).
|
||||
limit = mathutil.Min(limit, uint64(len(peers)))
|
||||
limit = min(limit, uint64(len(peers)))
|
||||
return peers[:limit]
|
||||
}
|
||||
|
||||
@@ -1017,13 +1017,13 @@ func TestBlobRangeForBlocks(t *testing.T) {
|
||||
for i := range blks {
|
||||
sbbs[i] = blks[i]
|
||||
}
|
||||
retentionStart := primitives.Slot(5)
|
||||
retentionStart := blks[len(blks)/2].Block().Slot()
|
||||
bwb, err := sortedBlockWithVerifiedBlobSlice(sbbs)
|
||||
require.NoError(t, err)
|
||||
bounds := countCommitments(bwb, retentionStart).blobRange(nil)
|
||||
require.Equal(t, retentionStart, bounds.low)
|
||||
higher := primitives.Slot(len(blks) + 1)
|
||||
bounds = countCommitments(bwb, higher).blobRange(nil)
|
||||
maxBlkSlot := blks[len(blks)-1].Block().Slot()
|
||||
bounds = countCommitments(bwb, maxBlkSlot+1).blobRange(nil)
|
||||
var nilBounds *blobRange
|
||||
require.Equal(t, nilBounds, bounds)
|
||||
|
||||
@@ -1054,17 +1054,17 @@ func TestBlobRequest(t *testing.T) {
|
||||
}
|
||||
bwb, err := sortedBlockWithVerifiedBlobSlice(sbbs)
|
||||
require.NoError(t, err)
|
||||
maxBlkSlot := primitives.Slot(len(blks) - 1)
|
||||
|
||||
tooHigh := primitives.Slot(len(blks) + 1)
|
||||
maxBlkSlot := blks[len(blks)-1].Block().Slot()
|
||||
tooHigh := maxBlkSlot + 1
|
||||
req = countCommitments(bwb, tooHigh).blobRange(nil).Request()
|
||||
require.Equal(t, nilReq, req)
|
||||
|
||||
req = countCommitments(bwb, maxBlkSlot).blobRange(nil).Request()
|
||||
require.Equal(t, uint64(1), req.Count)
|
||||
require.Equal(t, maxBlkSlot, req.StartSlot)
|
||||
require.Equal(t, uint64(1), req.Count)
|
||||
|
||||
halfway := primitives.Slot(5)
|
||||
halfway := blks[len(blks)/2].Block().Slot()
|
||||
req = countCommitments(bwb, halfway).blobRange(nil).Request()
|
||||
require.Equal(t, halfway, req.StartSlot)
|
||||
// adding 1 to include the halfway slot itself
|
||||
@@ -1103,6 +1103,12 @@ func TestCountCommitments(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCommitmentCountList(t *testing.T) {
|
||||
de := params.BeaconConfig().DenebForkEpoch
|
||||
ds := util.SlotAtEpoch(t, de)
|
||||
denebRel := func(s primitives.Slot) primitives.Slot {
|
||||
return ds + s
|
||||
}
|
||||
maxBlobs := params.BeaconConfig().MaxBlobsPerBlock(ds)
|
||||
cases := []struct {
|
||||
name string
|
||||
cc commitmentCountList
|
||||
@@ -1119,20 +1125,20 @@ func TestCommitmentCountList(t *testing.T) {
|
||||
{
|
||||
name: "nil bss, single slot",
|
||||
cc: []commitmentCount{
|
||||
{slot: 11235, count: 1},
|
||||
{slot: denebRel(11235), count: 1},
|
||||
},
|
||||
expected: &blobRange{low: 11235, high: 11235},
|
||||
request: ðpb.BlobSidecarsByRangeRequest{StartSlot: 11235, Count: 1},
|
||||
expected: &blobRange{low: denebRel(11235), high: denebRel(11235)},
|
||||
request: ðpb.BlobSidecarsByRangeRequest{StartSlot: denebRel(11235), Count: 1},
|
||||
},
|
||||
{
|
||||
name: "nil bss, sparse slots",
|
||||
cc: []commitmentCount{
|
||||
{slot: 11235, count: 1},
|
||||
{slot: 11240, count: params.BeaconConfig().MaxBlobsPerBlock(0)},
|
||||
{slot: 11250, count: 3},
|
||||
{slot: denebRel(11235), count: 1},
|
||||
{slot: denebRel(11240), count: maxBlobs},
|
||||
{slot: denebRel(11250), count: 3},
|
||||
},
|
||||
expected: &blobRange{low: 11235, high: 11250},
|
||||
request: ðpb.BlobSidecarsByRangeRequest{StartSlot: 11235, Count: 16},
|
||||
expected: &blobRange{low: denebRel(11235), high: denebRel(11250)},
|
||||
request: ðpb.BlobSidecarsByRangeRequest{StartSlot: denebRel(11235), Count: 16},
|
||||
},
|
||||
{
|
||||
name: "AllAvailable in middle, some avail low, none high",
|
||||
@@ -1141,15 +1147,15 @@ func TestCommitmentCountList(t *testing.T) {
|
||||
bytesutil.ToBytes32([]byte("0")): {0, 1},
|
||||
bytesutil.ToBytes32([]byte("1")): {0, 1, 2, 3, 4, 5},
|
||||
}
|
||||
return filesystem.NewMockBlobStorageSummarizer(t, onDisk)
|
||||
return filesystem.NewMockBlobStorageSummarizer(t, de, onDisk)
|
||||
},
|
||||
cc: []commitmentCount{
|
||||
{slot: 0, count: 3, root: bytesutil.ToBytes32([]byte("0"))},
|
||||
{slot: 5, count: params.BeaconConfig().MaxBlobsPerBlock(0), root: bytesutil.ToBytes32([]byte("1"))},
|
||||
{slot: 15, count: 3},
|
||||
{slot: denebRel(0), count: 3, root: bytesutil.ToBytes32([]byte("0"))},
|
||||
{slot: denebRel(5), count: maxBlobs, root: bytesutil.ToBytes32([]byte("1"))},
|
||||
{slot: denebRel(15), count: 3},
|
||||
},
|
||||
expected: &blobRange{low: 0, high: 15},
|
||||
request: ðpb.BlobSidecarsByRangeRequest{StartSlot: 0, Count: 16},
|
||||
expected: &blobRange{low: denebRel(0), high: denebRel(15)},
|
||||
request: ðpb.BlobSidecarsByRangeRequest{StartSlot: denebRel(0), Count: 16},
|
||||
},
|
||||
{
|
||||
name: "AllAvailable at high and low",
|
||||
@@ -1158,15 +1164,15 @@ func TestCommitmentCountList(t *testing.T) {
|
||||
bytesutil.ToBytes32([]byte("0")): {0, 1},
|
||||
bytesutil.ToBytes32([]byte("2")): {0, 1, 2, 3, 4, 5},
|
||||
}
|
||||
return filesystem.NewMockBlobStorageSummarizer(t, onDisk)
|
||||
return filesystem.NewMockBlobStorageSummarizer(t, de, onDisk)
|
||||
},
|
||||
cc: []commitmentCount{
|
||||
{slot: 0, count: 2, root: bytesutil.ToBytes32([]byte("0"))},
|
||||
{slot: 5, count: 3},
|
||||
{slot: 15, count: params.BeaconConfig().MaxBlobsPerBlock(0), root: bytesutil.ToBytes32([]byte("2"))},
|
||||
{slot: denebRel(0), count: 2, root: bytesutil.ToBytes32([]byte("0"))},
|
||||
{slot: denebRel(5), count: 3},
|
||||
{slot: denebRel(15), count: maxBlobs, root: bytesutil.ToBytes32([]byte("2"))},
|
||||
},
|
||||
expected: &blobRange{low: 5, high: 5},
|
||||
request: ðpb.BlobSidecarsByRangeRequest{StartSlot: 5, Count: 1},
|
||||
expected: &blobRange{low: denebRel(5), high: denebRel(5)},
|
||||
request: ðpb.BlobSidecarsByRangeRequest{StartSlot: denebRel(5), Count: 1},
|
||||
},
|
||||
{
|
||||
name: "AllAvailable at high and low, adjacent range in middle",
|
||||
@@ -1175,16 +1181,16 @@ func TestCommitmentCountList(t *testing.T) {
|
||||
bytesutil.ToBytes32([]byte("0")): {0, 1},
|
||||
bytesutil.ToBytes32([]byte("2")): {0, 1, 2, 3, 4, 5},
|
||||
}
|
||||
return filesystem.NewMockBlobStorageSummarizer(t, onDisk)
|
||||
return filesystem.NewMockBlobStorageSummarizer(t, de, onDisk)
|
||||
},
|
||||
cc: []commitmentCount{
|
||||
{slot: 0, count: 2, root: bytesutil.ToBytes32([]byte("0"))},
|
||||
{slot: 5, count: 3},
|
||||
{slot: 6, count: 3},
|
||||
{slot: 15, count: params.BeaconConfig().MaxBlobsPerBlock(0), root: bytesutil.ToBytes32([]byte("2"))},
|
||||
{slot: denebRel(0), count: 2, root: bytesutil.ToBytes32([]byte("0"))},
|
||||
{slot: denebRel(5), count: 3},
|
||||
{slot: denebRel(6), count: 3},
|
||||
{slot: denebRel(15), count: maxBlobs, root: bytesutil.ToBytes32([]byte("2"))},
|
||||
},
|
||||
expected: &blobRange{low: 5, high: 6},
|
||||
request: ðpb.BlobSidecarsByRangeRequest{StartSlot: 5, Count: 2},
|
||||
expected: &blobRange{low: denebRel(5), high: denebRel(6)},
|
||||
request: ðpb.BlobSidecarsByRangeRequest{StartSlot: denebRel(5), Count: 2},
|
||||
},
|
||||
{
|
||||
name: "AllAvailable at high and low, range in middle",
|
||||
@@ -1194,16 +1200,16 @@ func TestCommitmentCountList(t *testing.T) {
|
||||
bytesutil.ToBytes32([]byte("1")): {0, 1},
|
||||
bytesutil.ToBytes32([]byte("2")): {0, 1, 2, 3, 4, 5},
|
||||
}
|
||||
return filesystem.NewMockBlobStorageSummarizer(t, onDisk)
|
||||
return filesystem.NewMockBlobStorageSummarizer(t, de, onDisk)
|
||||
},
|
||||
cc: []commitmentCount{
|
||||
{slot: 0, count: 2, root: bytesutil.ToBytes32([]byte("0"))},
|
||||
{slot: 5, count: 3, root: bytesutil.ToBytes32([]byte("1"))},
|
||||
{slot: 10, count: 3},
|
||||
{slot: 15, count: params.BeaconConfig().MaxBlobsPerBlock(0), root: bytesutil.ToBytes32([]byte("2"))},
|
||||
{slot: denebRel(0), count: 2, root: bytesutil.ToBytes32([]byte("0"))},
|
||||
{slot: denebRel(5), count: 3, root: bytesutil.ToBytes32([]byte("1"))},
|
||||
{slot: denebRel(10), count: 3},
|
||||
{slot: denebRel(15), count: maxBlobs, root: bytesutil.ToBytes32([]byte("2"))},
|
||||
},
|
||||
expected: &blobRange{low: 5, high: 10},
|
||||
request: ðpb.BlobSidecarsByRangeRequest{StartSlot: 5, Count: 6},
|
||||
expected: &blobRange{low: denebRel(5), high: denebRel(10)},
|
||||
request: ðpb.BlobSidecarsByRangeRequest{StartSlot: denebRel(5), Count: 6},
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
@@ -1218,8 +1224,8 @@ func TestCommitmentCountList(t *testing.T) {
|
||||
require.IsNil(t, br.Request())
|
||||
} else {
|
||||
req := br.Request()
|
||||
require.DeepEqual(t, req.StartSlot, c.request.StartSlot)
|
||||
require.DeepEqual(t, req.Count, c.request.Count)
|
||||
require.Equal(t, req.StartSlot, c.request.StartSlot)
|
||||
require.Equal(t, req.Count, c.request.Count)
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -1299,7 +1305,7 @@ func TestVerifyAndPopulateBlobs(t *testing.T) {
|
||||
r1: {0, 1},
|
||||
r7: {0, 1, 2, 3, 4, 5},
|
||||
}
|
||||
bss := filesystem.NewMockBlobStorageSummarizer(t, onDisk)
|
||||
bss := filesystem.NewMockBlobStorageSummarizer(t, params.BeaconConfig().DenebForkEpoch, onDisk)
|
||||
err := verifyAndPopulateBlobs(bwb, blobs, testReqFromResp(bwb), bss)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 6, len(bwb[i1].Blobs))
|
||||
|
||||
@@ -413,7 +413,7 @@ func (s *Service) fetchOriginDataColumnSidecars(roBlock blocks.ROBlock, delay ti
|
||||
}
|
||||
|
||||
// Compute the indices we need to custody.
|
||||
custodyGroupCount, err := s.cfg.P2P.CustodyGroupCount()
|
||||
custodyGroupCount, err := s.cfg.P2P.CustodyGroupCount(s.ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "custody group count")
|
||||
}
|
||||
|
||||
@@ -439,6 +439,7 @@ func TestService_Synced(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMissingBlobRequest(t *testing.T) {
|
||||
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
cases := []struct {
|
||||
name string
|
||||
setup func(t *testing.T) (blocks.ROBlock, *filesystem.BlobStorage)
|
||||
@@ -476,7 +477,7 @@ func TestMissingBlobRequest(t *testing.T) {
|
||||
{
|
||||
name: "2 commitments, 1 missing",
|
||||
setup: func(t *testing.T) (blocks.ROBlock, *filesystem.BlobStorage) {
|
||||
bk, _ := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 0, 2)
|
||||
bk, _ := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, ds, 2)
|
||||
bm, fs := filesystem.NewEphemeralBlobStorageWithMocker(t)
|
||||
require.NoError(t, bm.CreateFakeIndices(bk.Root(), bk.Block().Slot(), 1))
|
||||
return bk, fs
|
||||
@@ -486,7 +487,7 @@ func TestMissingBlobRequest(t *testing.T) {
|
||||
{
|
||||
name: "2 commitments, 0 missing",
|
||||
setup: func(t *testing.T) (blocks.ROBlock, *filesystem.BlobStorage) {
|
||||
bk, _ := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 0, 2)
|
||||
bk, _ := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, ds, 2)
|
||||
bm, fs := filesystem.NewEphemeralBlobStorageWithMocker(t)
|
||||
require.NoError(t, bm.CreateFakeIndices(bk.Root(), bk.Block().Slot(), 0, 1))
|
||||
return bk, fs
|
||||
@@ -629,7 +630,7 @@ func TestFetchOriginSidecars(t *testing.T) {
|
||||
|
||||
// Compute the columns to request.
|
||||
p2p := p2ptest.NewTestP2P(t)
|
||||
custodyGroupCount, err := p2p.CustodyGroupCount()
|
||||
custodyGroupCount, err := p2p.CustodyGroupCount(t.Context())
|
||||
require.NoError(t, err)
|
||||
|
||||
samplingSize := max(custodyGroupCount, samplesPerSlot)
|
||||
|
||||
40
beacon-chain/sync/once.go
Normal file
40
beacon-chain/sync/once.go
Normal file
@@ -0,0 +1,40 @@
|
||||
package sync
|
||||
|
||||
import "sync"
|
||||
|
||||
// oncePerDigest represents an action that should only be performed once per fork digest.
|
||||
type oncePerDigest uint8
|
||||
|
||||
const (
|
||||
doneZero oncePerDigest = 0
|
||||
registerGossipOnce oncePerDigest = 1 << 0
|
||||
unregisterGossipOnce oncePerDigest = 1 << 1
|
||||
registerRpcOnce oncePerDigest = 1 << 2
|
||||
unregisterRpcOnce oncePerDigest = 1 << 3
|
||||
)
|
||||
|
||||
// perDigestSet keeps track of which oncePerDigest actions
|
||||
// have been performed for each fork digest.
|
||||
type perDigestSet struct {
|
||||
sync.Mutex
|
||||
history map[[4]byte]oncePerDigest
|
||||
}
|
||||
|
||||
// digestActionDone marks the action as done for the given digest, returning true if it was already done.
|
||||
func (s *Service) digestActionDone(digest [4]byte, action oncePerDigest) bool {
|
||||
s.digestActions.Lock()
|
||||
defer s.digestActions.Unlock()
|
||||
// lazy initialize registrationHistory; the lock is not a reference type so it is ready to go
|
||||
if s.digestActions.history == nil {
|
||||
s.digestActions.history = make(map[[4]byte]oncePerDigest)
|
||||
}
|
||||
|
||||
prev := s.digestActions.history[digest]
|
||||
// Return true if the bit was already set
|
||||
if prev&action != 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
s.digestActions.history[digest] = prev | action
|
||||
return false
|
||||
}
|
||||
40
beacon-chain/sync/once_test.go
Normal file
40
beacon-chain/sync/once_test.go
Normal file
@@ -0,0 +1,40 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"slices"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestDigestActionDone(t *testing.T) {
|
||||
digests := [][4]byte{
|
||||
{0, 0, 0, 0},
|
||||
{1, 2, 3, 4},
|
||||
{4, 3, 2, 1},
|
||||
}
|
||||
actions := []oncePerDigest{
|
||||
registerGossipOnce,
|
||||
unregisterGossipOnce,
|
||||
registerRpcOnce,
|
||||
unregisterRpcOnce,
|
||||
}
|
||||
testCombos := func(d [][4]byte, a []oncePerDigest) {
|
||||
s := &Service{}
|
||||
for _, digest := range d {
|
||||
for _, action := range a {
|
||||
t.Run(fmt.Sprintf("digest=%#x/action=%d", digest, action), func(t *testing.T) {
|
||||
if s.digestActionDone(digest, action) {
|
||||
t.Fatal("expected first call to return false")
|
||||
}
|
||||
if !s.digestActionDone(digest, action) {
|
||||
t.Fatal("expected second call to return true")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
testCombos(digests, actions)
|
||||
slices.Reverse(digests)
|
||||
slices.Reverse(actions)
|
||||
testCombos(digests, actions)
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user