mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-09 21:38:05 -05:00
Compare commits
15 Commits
improve-ev
...
v6.1.2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
38955fd08c | ||
|
|
71f05b597f | ||
|
|
0d742c6f88 | ||
|
|
06b5409ff0 | ||
|
|
9805e90d73 | ||
|
|
537f3cb863 | ||
|
|
b45e87abd6 | ||
|
|
4c4b12cca7 | ||
|
|
aabded250f | ||
|
|
4f9e56fc70 | ||
|
|
2a86132994 | ||
|
|
74c47e25a9 | ||
|
|
28eb1a4c3c | ||
|
|
1f89394727 | ||
|
|
bf1095c782 |
@@ -59,6 +59,7 @@ go_test(
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/api"
|
||||
"github.com/OffchainLabs/prysm/v6/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
@@ -170,8 +171,11 @@ func TestClient_RegisterValidator(t *testing.T) {
|
||||
|
||||
func TestClient_GetHeader(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
expectedPath := "/eth/v1/builder/header/23/0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2/0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"
|
||||
var slot primitives.Slot = 23
|
||||
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
es := util.SlotAtEpoch(t, params.BeaconConfig().ElectraForkEpoch)
|
||||
expectedPath := "/eth/v1/builder/header/%d/0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2/0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"
|
||||
expectedPath = fmt.Sprintf(expectedPath, ds)
|
||||
var slot primitives.Slot = ds
|
||||
parentHash := ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||
pubkey := ezDecode(t, "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a")
|
||||
t.Run("server error", func(t *testing.T) {
|
||||
@@ -533,7 +537,7 @@ func TestClient_GetHeader(t *testing.T) {
|
||||
require.Equal(t, expectedPath, r.URL.Path)
|
||||
epr := &ExecHeaderResponseElectra{}
|
||||
require.NoError(t, json.Unmarshal([]byte(testExampleHeaderResponseElectra), epr))
|
||||
pro, err := epr.ToProto(100)
|
||||
pro, err := epr.ToProto(es)
|
||||
require.NoError(t, err)
|
||||
ssz, err := pro.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/math"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
ethpbv1 "github.com/OffchainLabs/prysm/v6/proto/eth/v1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
@@ -108,7 +107,7 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
|
||||
commonRoot = params.BeaconConfig().ZeroHash
|
||||
}
|
||||
dis := headSlot + newHeadSlot - 2*forkSlot
|
||||
dep := math.Max(uint64(headSlot-forkSlot), uint64(newHeadSlot-forkSlot))
|
||||
dep := max(uint64(headSlot-forkSlot), uint64(newHeadSlot-forkSlot))
|
||||
oldWeight, err := s.cfg.ForkChoiceStore.Weight(oldHeadRoot)
|
||||
if err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", oldHeadRoot)).Warn("Could not determine node weight")
|
||||
@@ -135,7 +134,7 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
|
||||
Type: statefeed.Reorg,
|
||||
Data: ðpbv1.EventChainReorg{
|
||||
Slot: newHeadSlot,
|
||||
Depth: math.Max(uint64(headSlot-forkSlot), uint64(newHeadSlot-forkSlot)),
|
||||
Depth: max(uint64(headSlot-forkSlot), uint64(newHeadSlot-forkSlot)),
|
||||
OldHeadBlock: oldHeadRoot[:],
|
||||
NewHeadBlock: newHeadRoot[:],
|
||||
OldHeadState: oldStateRoot[:],
|
||||
|
||||
@@ -712,7 +712,7 @@ func (s *Service) areDataColumnsAvailable(
|
||||
nodeID := s.cfg.P2P.NodeID()
|
||||
|
||||
// Get the custody group sampling size for the node.
|
||||
custodyGroupCount, err := s.cfg.P2P.CustodyGroupCount()
|
||||
custodyGroupCount, err := s.cfg.P2P.CustodyGroupCount(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "custody group count")
|
||||
}
|
||||
|
||||
@@ -2413,6 +2413,8 @@ func driftGenesisTime(s *Service, slot primitives.Slot, delay time.Duration) {
|
||||
}
|
||||
|
||||
func TestMissingBlobIndices(t *testing.T) {
|
||||
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
maxBlobs := params.BeaconConfig().MaxBlobsPerBlock(ds)
|
||||
cases := []struct {
|
||||
name string
|
||||
expected [][]byte
|
||||
@@ -2426,23 +2428,23 @@ func TestMissingBlobIndices(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "expected exceeds max",
|
||||
expected: fakeCommitments(params.BeaconConfig().MaxBlobsPerBlock(0) + 1),
|
||||
expected: fakeCommitments(maxBlobs + 1),
|
||||
err: errMaxBlobsExceeded,
|
||||
},
|
||||
{
|
||||
name: "first missing",
|
||||
expected: fakeCommitments(params.BeaconConfig().MaxBlobsPerBlock(0)),
|
||||
expected: fakeCommitments(maxBlobs),
|
||||
present: []uint64{1, 2, 3, 4, 5},
|
||||
result: fakeResult([]uint64{0}),
|
||||
},
|
||||
{
|
||||
name: "all missing",
|
||||
expected: fakeCommitments(params.BeaconConfig().MaxBlobsPerBlock(0)),
|
||||
expected: fakeCommitments(maxBlobs),
|
||||
result: fakeResult([]uint64{0, 1, 2, 3, 4, 5}),
|
||||
},
|
||||
{
|
||||
name: "none missing",
|
||||
expected: fakeCommitments(params.BeaconConfig().MaxBlobsPerBlock(0)),
|
||||
expected: fakeCommitments(maxBlobs),
|
||||
present: []uint64{0, 1, 2, 3, 4, 5},
|
||||
result: fakeResult([]uint64{}),
|
||||
},
|
||||
@@ -2475,8 +2477,8 @@ func TestMissingBlobIndices(t *testing.T) {
|
||||
for _, c := range cases {
|
||||
bm, bs := filesystem.NewEphemeralBlobStorageWithMocker(t)
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
require.NoError(t, bm.CreateFakeIndices(c.root, 0, c.present...))
|
||||
missing, err := missingBlobIndices(bs, c.root, c.expected, 0)
|
||||
require.NoError(t, bm.CreateFakeIndices(c.root, ds, c.present...))
|
||||
missing, err := missingBlobIndices(bs, c.root, c.expected, ds)
|
||||
if c.err != nil {
|
||||
require.ErrorIs(t, err, c.err)
|
||||
return
|
||||
@@ -2904,22 +2906,21 @@ type testIsAvailableParams struct {
|
||||
columnsToSave []uint64
|
||||
}
|
||||
|
||||
func testIsAvailableSetup(t *testing.T, params testIsAvailableParams) (context.Context, context.CancelFunc, *Service, [fieldparams.RootLength]byte, interfaces.SignedBeaconBlock) {
|
||||
func testIsAvailableSetup(t *testing.T, p testIsAvailableParams) (context.Context, context.CancelFunc, *Service, [fieldparams.RootLength]byte, interfaces.SignedBeaconBlock) {
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
|
||||
options := append(params.options, WithDataColumnStorage(dataColumnStorage))
|
||||
options := append(p.options, WithDataColumnStorage(dataColumnStorage))
|
||||
service, _ := minimalTestService(t, options...)
|
||||
fs := util.SlotAtEpoch(t, params.BeaconConfig().FuluForkEpoch)
|
||||
|
||||
genesisState, secretKeys := util.DeterministicGenesisStateElectra(t, 32 /*validator count*/)
|
||||
|
||||
err := service.saveGenesisData(ctx, genesisState)
|
||||
require.NoError(t, err)
|
||||
genesisState, secretKeys := util.DeterministicGenesisStateElectra(t, 32, util.WithElectraStateSlot(fs))
|
||||
require.NoError(t, service.saveGenesisData(ctx, genesisState))
|
||||
|
||||
conf := util.DefaultBlockGenConfig()
|
||||
conf.NumBlobKzgCommitments = params.blobKzgCommitmentsCount
|
||||
conf.NumBlobKzgCommitments = p.blobKzgCommitmentsCount
|
||||
|
||||
signedBeaconBlock, err := util.GenerateFullBlockFulu(genesisState, secretKeys, conf, 10 /*block slot*/)
|
||||
signedBeaconBlock, err := util.GenerateFullBlockFulu(genesisState, secretKeys, conf, fs+1)
|
||||
require.NoError(t, err)
|
||||
|
||||
block := signedBeaconBlock.Block
|
||||
@@ -2929,8 +2930,8 @@ func testIsAvailableSetup(t *testing.T, params testIsAvailableParams) (context.C
|
||||
root, err := block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
dataColumnsParams := make([]util.DataColumnParam, 0, len(params.columnsToSave))
|
||||
for _, i := range params.columnsToSave {
|
||||
dataColumnsParams := make([]util.DataColumnParam, 0, len(p.columnsToSave))
|
||||
for _, i := range p.columnsToSave {
|
||||
dataColumnParam := util.DataColumnParam{
|
||||
Index: i,
|
||||
Slot: block.Slot,
|
||||
@@ -2954,8 +2955,12 @@ func testIsAvailableSetup(t *testing.T, params testIsAvailableParams) (context.C
|
||||
}
|
||||
|
||||
func TestIsDataAvailable(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.AltairForkEpoch, cfg.BellatrixForkEpoch, cfg.CapellaForkEpoch, cfg.DenebForkEpoch, cfg.ElectraForkEpoch, cfg.FuluForkEpoch = 0, 0, 0, 0, 0, 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
t.Run("Fulu - out of retention window", func(t *testing.T) {
|
||||
params := testIsAvailableParams{options: []Option{WithGenesisTime(time.Unix(0, 0))}}
|
||||
params := testIsAvailableParams{}
|
||||
ctx, _, service, root, signed := testIsAvailableSetup(t, params)
|
||||
|
||||
roBlock, err := consensusblocks.NewROBlockWithRoot(signed, root)
|
||||
@@ -2972,7 +2977,6 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
err = service.isDataAvailable(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("Fulu - more than half of the columns in custody", func(t *testing.T) {
|
||||
minimumColumnsCountToReconstruct := peerdas.MinimumColumnCountToReconstruct()
|
||||
indices := make([]uint64, 0, minimumColumnsCountToReconstruct)
|
||||
|
||||
@@ -562,8 +562,9 @@ func TestNotifyIndex(t *testing.T) {
|
||||
var root [32]byte
|
||||
copy(root[:], "exampleRoot")
|
||||
|
||||
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
// Test notifying a new index
|
||||
bn.notifyIndex(root, 1, 1)
|
||||
bn.notifyIndex(root, 1, ds)
|
||||
if !bn.seenIndex[root][1] {
|
||||
t.Errorf("Index was not marked as seen")
|
||||
}
|
||||
@@ -580,7 +581,7 @@ func TestNotifyIndex(t *testing.T) {
|
||||
}
|
||||
|
||||
// Test notifying a new index again
|
||||
bn.notifyIndex(root, 2, 1)
|
||||
bn.notifyIndex(root, 2, ds)
|
||||
if !bn.seenIndex[root][2] {
|
||||
t.Errorf("Index was not marked as seen")
|
||||
}
|
||||
|
||||
@@ -106,14 +106,14 @@ type mockCustodyManager struct {
|
||||
custodyGroupCount uint64
|
||||
}
|
||||
|
||||
func (dch *mockCustodyManager) EarliestAvailableSlot() (primitives.Slot, error) {
|
||||
func (dch *mockCustodyManager) EarliestAvailableSlot(context.Context) (primitives.Slot, error) {
|
||||
dch.mut.RLock()
|
||||
defer dch.mut.RUnlock()
|
||||
|
||||
return dch.earliestAvailableSlot, nil
|
||||
}
|
||||
|
||||
func (dch *mockCustodyManager) CustodyGroupCount() (uint64, error) {
|
||||
func (dch *mockCustodyManager) CustodyGroupCount(context.Context) (uint64, error) {
|
||||
dch.mut.RLock()
|
||||
defer dch.mut.RUnlock()
|
||||
|
||||
|
||||
3
beacon-chain/cache/committee.go
vendored
3
beacon-chain/cache/committee.go
vendored
@@ -5,7 +5,6 @@ package cache
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"math"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -272,7 +271,7 @@ func (c *CommitteeCache) checkInProgress(ctx context.Context, seed [32]byte) err
|
||||
// for the in progress boolean to flip to false.
|
||||
time.Sleep(time.Duration(delay) * time.Nanosecond)
|
||||
delay *= delayFactor
|
||||
delay = math.Min(delay, maxDelay)
|
||||
delay = min(delay, maxDelay)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -52,7 +52,7 @@ func create(leaves [][32]byte, depth uint64) MerkleTreeNode {
|
||||
if depth == 0 {
|
||||
return &LeafNode{hash: leaves[0]}
|
||||
}
|
||||
split := math.Min(math.PowerOf2(depth-1), length)
|
||||
split := min(math.PowerOf2(depth-1), length)
|
||||
left := create(leaves[0:split], depth-1)
|
||||
right := create(leaves[split:], depth-1)
|
||||
return &InnerNode{left: left, right: right}
|
||||
|
||||
3
beacon-chain/cache/skip_slot_cache.go
vendored
3
beacon-chain/cache/skip_slot_cache.go
vendored
@@ -2,7 +2,6 @@ package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -90,7 +89,7 @@ func (c *SkipSlotCache) Get(ctx context.Context, r [32]byte) (state.BeaconState,
|
||||
// for the in progress boolean to flip to false.
|
||||
time.Sleep(time.Duration(delay) * time.Nanosecond)
|
||||
delay *= delayFactor
|
||||
delay = math.Min(delay, maxDelay)
|
||||
delay = min(delay, maxDelay)
|
||||
}
|
||||
span.SetAttributes(trace.BoolAttribute("inProgress", inProgress))
|
||||
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/hash"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/math"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
@@ -209,7 +208,7 @@ func IsSyncCommitteeAggregator(sig []byte) (bool, error) {
|
||||
}
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
modulo := math.Max(1, cfg.SyncCommitteeSize/cfg.SyncCommitteeSubnetCount/cfg.TargetAggregatorsPerSyncSubcommittee)
|
||||
modulo := max(1, cfg.SyncCommitteeSize/cfg.SyncCommitteeSubnetCount/cfg.TargetAggregatorsPerSyncSubcommittee)
|
||||
hashedSig := hash.Hash(sig)
|
||||
return bytesutil.FromBytes8(hashedSig[:8])%modulo == 0, nil
|
||||
}
|
||||
|
||||
@@ -39,7 +39,6 @@ go_library(
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/contracts/deposit"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/math"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -34,7 +33,7 @@ func ActivateValidatorWithEffectiveBalance(beaconState state.BeaconState, deposi
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
validator.EffectiveBalance = math.Min(balance-balance%params.BeaconConfig().EffectiveBalanceIncrement, params.BeaconConfig().MaxEffectiveBalance)
|
||||
validator.EffectiveBalance = min(balance-balance%params.BeaconConfig().EffectiveBalanceIncrement, params.BeaconConfig().MaxEffectiveBalance)
|
||||
if validator.EffectiveBalance ==
|
||||
params.BeaconConfig().MaxEffectiveBalance {
|
||||
validator.ActivationEligibilityEpoch = 0
|
||||
|
||||
@@ -233,7 +233,7 @@ func ProcessSlashings(st state.BeaconState) error {
|
||||
// a callback is used here to apply the following actions to all validators
|
||||
// below equally.
|
||||
increment := params.BeaconConfig().EffectiveBalanceIncrement
|
||||
minSlashing := math.Min(totalSlashing*slashingMultiplier, totalBalance)
|
||||
minSlashing := min(totalSlashing*slashingMultiplier, totalBalance)
|
||||
|
||||
// Modified in Electra:EIP7251
|
||||
var penaltyPerEffectiveBalanceIncrement uint64
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/time"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/math"
|
||||
)
|
||||
|
||||
// ProcessSlashingsPrecompute processes the slashed validators during epoch processing.
|
||||
@@ -21,7 +20,7 @@ func ProcessSlashingsPrecompute(s state.BeaconState, pBal *Balance) error {
|
||||
totalSlashing += slashing
|
||||
}
|
||||
|
||||
minSlashing := math.Min(totalSlashing*params.BeaconConfig().ProportionalSlashingMultiplier, pBal.ActiveCurrentEpoch)
|
||||
minSlashing := min(totalSlashing*params.BeaconConfig().ProportionalSlashingMultiplier, pBal.ActiveCurrentEpoch)
|
||||
epochToWithdraw := currentEpoch + exitLength/2
|
||||
|
||||
var hasSlashing bool
|
||||
|
||||
@@ -79,7 +79,7 @@ func TotalActiveBalance(s state.ReadOnlyBeaconState) (uint64, error) {
|
||||
}
|
||||
|
||||
// Spec defines `EffectiveBalanceIncrement` as min to avoid divisions by zero.
|
||||
total = mathutil.Max(params.BeaconConfig().EffectiveBalanceIncrement, total)
|
||||
total = max(params.BeaconConfig().EffectiveBalanceIncrement, total)
|
||||
if err := balanceCache.AddTotalEffectiveBalance(s, total); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/math"
|
||||
v1alpha1 "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
)
|
||||
@@ -95,7 +94,7 @@ func ComputeWeakSubjectivityPeriod(ctx context.Context, st state.ReadOnlyBeaconS
|
||||
if T*(200+3*D) < t*(200+12*D) {
|
||||
epochsForValidatorSetChurn := N * (t*(200+12*D) - T*(200+3*D)) / (600 * delta * (2*t + T))
|
||||
epochsForBalanceTopUps := N * (200 + 3*D) / (600 * Delta)
|
||||
wsp += math.Max(epochsForValidatorSetChurn, epochsForBalanceTopUps)
|
||||
wsp += max(epochsForValidatorSetChurn, epochsForBalanceTopUps)
|
||||
} else {
|
||||
wsp += 3 * N * D * t / (200 * Delta * (T - t))
|
||||
}
|
||||
|
||||
@@ -125,11 +125,12 @@ func TestReconstructDataColumnSidecars(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestReconstructBlobs(t *testing.T) {
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096*2
|
||||
|
||||
require.NoError(t, kzg.Start())
|
||||
var emptyBlock blocks.ROBlock
|
||||
fs := util.SlotAtEpoch(t, params.BeaconConfig().FuluForkEpoch)
|
||||
|
||||
t.Run("no index", func(t *testing.T) {
|
||||
actual, err := peerdas.ReconstructBlobs(emptyBlock, nil, nil)
|
||||
@@ -190,10 +191,10 @@ func TestReconstructBlobs(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("not committed to the same block", func(t *testing.T) {
|
||||
_, _, verifiedRoSidecars := util.GenerateTestFuluBlockWithSidecars(t, 3, util.WithParentRoot([fieldparams.RootLength]byte{1}))
|
||||
roBlock, _, _ := util.GenerateTestFuluBlockWithSidecars(t, 3, util.WithParentRoot([fieldparams.RootLength]byte{2}))
|
||||
_, _, verifiedRoSidecars := util.GenerateTestFuluBlockWithSidecars(t, 3, util.WithParentRoot([fieldparams.RootLength]byte{1}), util.WithSlot(fs))
|
||||
roBlock, _, _ := util.GenerateTestFuluBlockWithSidecars(t, 3, util.WithParentRoot([fieldparams.RootLength]byte{2}), util.WithSlot(fs))
|
||||
|
||||
_, err = peerdas.ReconstructBlobs(roBlock, verifiedRoSidecars, []int{0})
|
||||
_, err := peerdas.ReconstructBlobs(roBlock, verifiedRoSidecars, []int{0})
|
||||
require.ErrorContains(t, peerdas.ErrRootMismatch.Error(), err)
|
||||
})
|
||||
|
||||
|
||||
@@ -16,61 +16,60 @@ func TestDataColumnsAlignWithBlock(t *testing.T) {
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096*2
|
||||
fs := util.SlotAtEpoch(t, params.BeaconConfig().ElectraForkEpoch)
|
||||
require.NoError(t, err)
|
||||
fuluMax := params.BeaconConfig().MaxBlobsPerBlock(fs)
|
||||
t.Run("pre fulu", func(t *testing.T) {
|
||||
block, _ := util.GenerateTestElectraBlockWithSidecar(t, [fieldparams.RootLength]byte{}, 0, 0)
|
||||
block, _ := util.GenerateTestElectraBlockWithSidecar(t, [fieldparams.RootLength]byte{}, fs, 0)
|
||||
err := peerdas.DataColumnsAlignWithBlock(block, nil)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("too many commitmnets", func(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.BlobSchedule = []params.BlobScheduleEntry{{}}
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
block, _, _ := util.GenerateTestFuluBlockWithSidecars(t, 3)
|
||||
t.Run("too many commitments", func(t *testing.T) {
|
||||
block, _, _ := util.GenerateTestFuluBlockWithSidecars(t, fuluMax+1, util.WithSlot(fs))
|
||||
err := peerdas.DataColumnsAlignWithBlock(block, nil)
|
||||
require.ErrorIs(t, err, peerdas.ErrTooManyCommitments)
|
||||
})
|
||||
|
||||
t.Run("root mismatch", func(t *testing.T) {
|
||||
_, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2)
|
||||
block, _, _ := util.GenerateTestFuluBlockWithSidecars(t, 0)
|
||||
_, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2, util.WithSlot(fs))
|
||||
block, _, _ := util.GenerateTestFuluBlockWithSidecars(t, 0, util.WithSlot(fs))
|
||||
err := peerdas.DataColumnsAlignWithBlock(block, sidecars)
|
||||
require.ErrorIs(t, err, peerdas.ErrRootMismatch)
|
||||
})
|
||||
|
||||
t.Run("column size mismatch", func(t *testing.T) {
|
||||
block, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2)
|
||||
block, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2, util.WithSlot(fs))
|
||||
sidecars[0].Column = [][]byte{}
|
||||
err := peerdas.DataColumnsAlignWithBlock(block, sidecars)
|
||||
require.ErrorIs(t, err, peerdas.ErrBlockColumnSizeMismatch)
|
||||
})
|
||||
|
||||
t.Run("KZG commitments size mismatch", func(t *testing.T) {
|
||||
block, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2)
|
||||
block, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2, util.WithSlot(fs))
|
||||
sidecars[0].KzgCommitments = [][]byte{}
|
||||
err := peerdas.DataColumnsAlignWithBlock(block, sidecars)
|
||||
require.ErrorIs(t, err, peerdas.ErrBlockColumnSizeMismatch)
|
||||
})
|
||||
|
||||
t.Run("KZG proofs mismatch", func(t *testing.T) {
|
||||
block, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2)
|
||||
block, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2, util.WithSlot(fs))
|
||||
sidecars[0].KzgProofs = [][]byte{}
|
||||
err := peerdas.DataColumnsAlignWithBlock(block, sidecars)
|
||||
require.ErrorIs(t, err, peerdas.ErrBlockColumnSizeMismatch)
|
||||
})
|
||||
|
||||
t.Run("commitment mismatch", func(t *testing.T) {
|
||||
block, _, _ := util.GenerateTestFuluBlockWithSidecars(t, 2)
|
||||
_, alteredSidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2)
|
||||
block, _, _ := util.GenerateTestFuluBlockWithSidecars(t, 2, util.WithSlot(fs))
|
||||
_, alteredSidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2, util.WithSlot(fs))
|
||||
alteredSidecars[1].KzgCommitments[0][0]++ // Overflow is OK
|
||||
err := peerdas.DataColumnsAlignWithBlock(block, alteredSidecars)
|
||||
require.ErrorIs(t, err, peerdas.ErrCommitmentMismatch)
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
block, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2)
|
||||
block, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2, util.WithSlot(fs))
|
||||
err := peerdas.DataColumnsAlignWithBlock(block, sidecars)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/math"
|
||||
mathutil "github.com/OffchainLabs/prysm/v6/math"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
@@ -60,7 +59,7 @@ func ExitInformation(s state.BeaconState) *ExitInfo {
|
||||
_ = err
|
||||
|
||||
// Apply minimum balance as per spec
|
||||
exitInfo.TotalActiveBalance = mathutil.Max(params.BeaconConfig().EffectiveBalanceIncrement, totalActiveBalance)
|
||||
exitInfo.TotalActiveBalance = max(params.BeaconConfig().EffectiveBalanceIncrement, totalActiveBalance)
|
||||
return exitInfo
|
||||
}
|
||||
|
||||
|
||||
@@ -18,13 +18,16 @@ import (
|
||||
)
|
||||
|
||||
func Test_commitmentsToCheck(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096*2
|
||||
fulu := primitives.Slot(params.BeaconConfig().FuluForkEpoch) * params.BeaconConfig().SlotsPerEpoch
|
||||
windowSlots, err := slots.EpochEnd(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
|
||||
require.NoError(t, err)
|
||||
commits := [][]byte{
|
||||
bytesutil.PadTo([]byte("a"), 48),
|
||||
bytesutil.PadTo([]byte("b"), 48),
|
||||
bytesutil.PadTo([]byte("c"), 48),
|
||||
bytesutil.PadTo([]byte("d"), 48),
|
||||
windowSlots = windowSlots + primitives.Slot(params.BeaconConfig().FuluForkEpoch)
|
||||
maxBlobs := params.LastNetworkScheduleEntry().MaxBlobsPerBlock
|
||||
commits := make([][]byte, maxBlobs+1)
|
||||
for i := 0; i < len(commits); i++ {
|
||||
commits[i] = bytesutil.PadTo([]byte{byte(i)}, 48)
|
||||
}
|
||||
cases := []struct {
|
||||
name string
|
||||
@@ -47,41 +50,40 @@ func Test_commitmentsToCheck(t *testing.T) {
|
||||
{
|
||||
name: "commitments within da",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
d := util.NewBeaconBlockDeneb()
|
||||
d.Block.Body.BlobKzgCommitments = commits
|
||||
d.Block.Slot = 100
|
||||
d := util.NewBeaconBlockFulu()
|
||||
d.Block.Body.BlobKzgCommitments = commits[:maxBlobs]
|
||||
d.Block.Slot = fulu + 100
|
||||
sb, err := blocks.NewSignedBeaconBlock(d)
|
||||
require.NoError(t, err)
|
||||
rb, err := blocks.NewROBlock(sb)
|
||||
require.NoError(t, err)
|
||||
return rb
|
||||
},
|
||||
commits: commits,
|
||||
slot: 100,
|
||||
commits: commits[:maxBlobs],
|
||||
slot: fulu + 100,
|
||||
},
|
||||
{
|
||||
name: "commitments outside da",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
d := util.NewBeaconBlockDeneb()
|
||||
d := util.NewBeaconBlockFulu()
|
||||
d.Block.Slot = fulu
|
||||
// block is from slot 0, "current slot" is window size +1 (so outside the window)
|
||||
d.Block.Body.BlobKzgCommitments = commits
|
||||
d.Block.Body.BlobKzgCommitments = commits[:maxBlobs]
|
||||
sb, err := blocks.NewSignedBeaconBlock(d)
|
||||
require.NoError(t, err)
|
||||
rb, err := blocks.NewROBlock(sb)
|
||||
require.NoError(t, err)
|
||||
return rb
|
||||
},
|
||||
slot: windowSlots + 1,
|
||||
slot: fulu + windowSlots + 1,
|
||||
},
|
||||
{
|
||||
name: "excessive commitments",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
d := util.NewBeaconBlockDeneb()
|
||||
d.Block.Slot = 100
|
||||
d := util.NewBeaconBlockFulu()
|
||||
d.Block.Slot = fulu + 100
|
||||
// block is from slot 0, "current slot" is window size +1 (so outside the window)
|
||||
d.Block.Body.BlobKzgCommitments = commits
|
||||
// Double the number of commitments, assert that this is over the limit
|
||||
d.Block.Body.BlobKzgCommitments = append(commits, d.Block.Body.BlobKzgCommitments...)
|
||||
sb, err := blocks.NewSignedBeaconBlock(d)
|
||||
require.NoError(t, err)
|
||||
rb, err := blocks.NewROBlock(sb)
|
||||
@@ -115,67 +117,69 @@ func Test_commitmentsToCheck(t *testing.T) {
|
||||
func TestLazilyPersistent_Missing(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
store := filesystem.NewEphemeralBlobStorage(t)
|
||||
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
|
||||
blk, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 3)
|
||||
blk, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, ds, 3)
|
||||
|
||||
mbv := &mockBlobBatchVerifier{t: t, scs: blobSidecars}
|
||||
as := NewLazilyPersistentStore(store, mbv)
|
||||
|
||||
// Only one commitment persisted, should return error with other indices
|
||||
require.NoError(t, as.Persist(1, blobSidecars[2]))
|
||||
err := as.IsDataAvailable(ctx, 1, blk)
|
||||
require.NoError(t, as.Persist(ds, blobSidecars[2]))
|
||||
err := as.IsDataAvailable(ctx, ds, blk)
|
||||
require.ErrorIs(t, err, errMissingSidecar)
|
||||
|
||||
// All but one persisted, return missing idx
|
||||
require.NoError(t, as.Persist(1, blobSidecars[0]))
|
||||
err = as.IsDataAvailable(ctx, 1, blk)
|
||||
require.NoError(t, as.Persist(ds, blobSidecars[0]))
|
||||
err = as.IsDataAvailable(ctx, ds, blk)
|
||||
require.ErrorIs(t, err, errMissingSidecar)
|
||||
|
||||
// All persisted, return nil
|
||||
require.NoError(t, as.Persist(1, blobSidecars...))
|
||||
require.NoError(t, as.Persist(ds, blobSidecars...))
|
||||
|
||||
require.NoError(t, as.IsDataAvailable(ctx, 1, blk))
|
||||
require.NoError(t, as.IsDataAvailable(ctx, ds, blk))
|
||||
}
|
||||
|
||||
func TestLazilyPersistent_Mismatch(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
store := filesystem.NewEphemeralBlobStorage(t)
|
||||
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
|
||||
blk, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 3)
|
||||
blk, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, ds, 3)
|
||||
|
||||
mbv := &mockBlobBatchVerifier{t: t, err: errors.New("kzg check should not run")}
|
||||
blobSidecars[0].KzgCommitment = bytesutil.PadTo([]byte("nope"), 48)
|
||||
as := NewLazilyPersistentStore(store, mbv)
|
||||
|
||||
// Only one commitment persisted, should return error with other indices
|
||||
require.NoError(t, as.Persist(1, blobSidecars[0]))
|
||||
err := as.IsDataAvailable(ctx, 1, blk)
|
||||
require.NoError(t, as.Persist(ds, blobSidecars[0]))
|
||||
err := as.IsDataAvailable(ctx, ds, blk)
|
||||
require.NotNil(t, err)
|
||||
require.ErrorIs(t, err, errCommitmentMismatch)
|
||||
}
|
||||
|
||||
func TestLazyPersistOnceCommitted(t *testing.T) {
|
||||
_, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 6)
|
||||
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
_, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, ds, 6)
|
||||
|
||||
as := NewLazilyPersistentStore(filesystem.NewEphemeralBlobStorage(t), &mockBlobBatchVerifier{})
|
||||
// stashes as expected
|
||||
require.NoError(t, as.Persist(1, blobSidecars...))
|
||||
require.NoError(t, as.Persist(ds, blobSidecars...))
|
||||
// ignores duplicates
|
||||
require.ErrorIs(t, as.Persist(1, blobSidecars...), ErrDuplicateSidecar)
|
||||
require.ErrorIs(t, as.Persist(ds, blobSidecars...), ErrDuplicateSidecar)
|
||||
|
||||
// ignores index out of bound
|
||||
blobSidecars[0].Index = 6
|
||||
require.ErrorIs(t, as.Persist(1, blobSidecars[0]), errIndexOutOfBounds)
|
||||
|
||||
_, moreBlobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 4)
|
||||
require.ErrorIs(t, as.Persist(ds, blobSidecars[0]), errIndexOutOfBounds)
|
||||
_, moreBlobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, ds, 4)
|
||||
|
||||
// ignores sidecars before the retention period
|
||||
slotOOB, err := slots.EpochStart(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, as.Persist(32+slotOOB, moreBlobSidecars[0]))
|
||||
slotOOB := util.SlotAtEpoch(t, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
|
||||
slotOOB += ds + 32
|
||||
require.NoError(t, as.Persist(slotOOB, moreBlobSidecars[0]))
|
||||
|
||||
// doesn't ignore new sidecars with a different block root
|
||||
require.NoError(t, as.Persist(1, moreBlobSidecars...))
|
||||
require.NoError(t, as.Persist(ds, moreBlobSidecars...))
|
||||
}
|
||||
|
||||
type mockBlobBatchVerifier struct {
|
||||
|
||||
@@ -39,7 +39,7 @@ func filterTestCaseSetup(slot primitives.Slot, nBlobs int, onDisk []int, numExpe
|
||||
entry := &blobCacheEntry{}
|
||||
if len(onDisk) > 0 {
|
||||
od := map[[32]byte][]int{blk.Root(): onDisk}
|
||||
sumz := filesystem.NewMockBlobStorageSummarizer(t, od)
|
||||
sumz := filesystem.NewMockBlobStorageSummarizer(t, slots.ToEpoch(slot), od)
|
||||
sum := sumz.Summary(blk.Root())
|
||||
entry.setDiskSummary(sum)
|
||||
}
|
||||
|
||||
@@ -21,7 +21,8 @@ import (
|
||||
)
|
||||
|
||||
func TestBlobStorage_SaveBlobData(t *testing.T) {
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, params.BeaconConfig().MaxBlobsPerBlock(1))
|
||||
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, ds, params.BeaconConfig().MaxBlobsPerBlock(ds))
|
||||
testSidecars := verification.FakeVerifySliceForTest(t, sidecars)
|
||||
|
||||
t.Run("no error for duplicate", func(t *testing.T) {
|
||||
@@ -127,21 +128,22 @@ func TestBlobStorage_SaveBlobData(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBlobIndicesBounds(t *testing.T) {
|
||||
es := util.SlotAtEpoch(t, params.BeaconConfig().ElectraForkEpoch)
|
||||
fs := afero.NewMemMapFs()
|
||||
root := [32]byte{}
|
||||
|
||||
okIdx := uint64(params.BeaconConfig().MaxBlobsPerBlock(0)) - 1
|
||||
writeFakeSSZ(t, fs, root, 0, okIdx)
|
||||
okIdx := uint64(params.BeaconConfig().MaxBlobsPerBlock(es)) - 1
|
||||
writeFakeSSZ(t, fs, root, es, okIdx)
|
||||
bs := NewWarmedEphemeralBlobStorageUsingFs(t, fs, WithLayout(LayoutNameByEpoch))
|
||||
indices := bs.Summary(root).mask
|
||||
expected := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(0))
|
||||
expected := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(es))
|
||||
expected[okIdx] = true
|
||||
for i := range expected {
|
||||
require.Equal(t, expected[i], indices[i])
|
||||
}
|
||||
|
||||
oobIdx := uint64(params.BeaconConfig().MaxBlobsPerBlock(0))
|
||||
writeFakeSSZ(t, fs, root, 0, oobIdx)
|
||||
oobIdx := uint64(params.BeaconConfig().MaxBlobsPerBlock(es))
|
||||
writeFakeSSZ(t, fs, root, es, oobIdx)
|
||||
// This now fails at cache warmup time.
|
||||
require.ErrorIs(t, warmCache(bs.layout, bs.cache), errIndexOutOfBounds)
|
||||
}
|
||||
|
||||
@@ -6,14 +6,17 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
)
|
||||
|
||||
func TestSlotByRoot_Summary(t *testing.T) {
|
||||
noneSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(0))
|
||||
allSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(0))
|
||||
firstSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(0))
|
||||
lastSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(0))
|
||||
oneSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(0))
|
||||
ee := params.BeaconConfig().ElectraForkEpoch
|
||||
es := util.SlotAtEpoch(t, ee)
|
||||
noneSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(es))
|
||||
allSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(es))
|
||||
firstSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(es))
|
||||
lastSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(es))
|
||||
oneSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(es))
|
||||
firstSet[0] = true
|
||||
lastSet[len(lastSet)-1] = true
|
||||
oneSet[1] = true
|
||||
@@ -53,7 +56,7 @@ func TestSlotByRoot_Summary(t *testing.T) {
|
||||
for _, c := range cases {
|
||||
if c.expected != nil {
|
||||
key := bytesutil.ToBytes32([]byte(c.name))
|
||||
sc.cache[key] = BlobStorageSummary{epoch: 0, mask: c.expected}
|
||||
sc.cache[key] = BlobStorageSummary{epoch: ee, mask: c.expected}
|
||||
}
|
||||
}
|
||||
for _, c := range cases {
|
||||
@@ -73,6 +76,7 @@ func TestSlotByRoot_Summary(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAllAvailable(t *testing.T) {
|
||||
es := util.SlotAtEpoch(t, params.BeaconConfig().ElectraForkEpoch)
|
||||
idxUpTo := func(u int) []int {
|
||||
r := make([]int, u)
|
||||
for i := range r {
|
||||
@@ -125,13 +129,13 @@ func TestAllAvailable(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "out of bound is safe",
|
||||
count: params.BeaconConfig().MaxBlobsPerBlock(0) + 1,
|
||||
count: params.BeaconConfig().MaxBlobsPerBlock(es) + 1,
|
||||
aa: false,
|
||||
},
|
||||
{
|
||||
name: "max present",
|
||||
count: params.BeaconConfig().MaxBlobsPerBlock(0),
|
||||
idxSet: idxUpTo(params.BeaconConfig().MaxBlobsPerBlock(0)),
|
||||
count: params.BeaconConfig().MaxBlobsPerBlock(es),
|
||||
idxSet: idxUpTo(params.BeaconConfig().MaxBlobsPerBlock(es)),
|
||||
aa: true,
|
||||
},
|
||||
{
|
||||
@@ -143,7 +147,7 @@ func TestAllAvailable(t *testing.T) {
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
mask := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(0))
|
||||
mask := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(es))
|
||||
for _, idx := range c.idxSet {
|
||||
mask[idx] = true
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
@@ -60,12 +61,13 @@ func TestRootFromDir(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSlotFromFile(t *testing.T) {
|
||||
es := util.SlotAtEpoch(t, params.BeaconConfig().ElectraForkEpoch)
|
||||
cases := []struct {
|
||||
slot primitives.Slot
|
||||
}{
|
||||
{slot: 0},
|
||||
{slot: 2},
|
||||
{slot: 1123581321},
|
||||
{slot: es + 0},
|
||||
{slot: es + 2},
|
||||
{slot: es + 1123581321},
|
||||
{slot: math.MaxUint64},
|
||||
}
|
||||
for _, c := range cases {
|
||||
@@ -243,39 +245,40 @@ func TestSlotFromBlob(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestIterationComplete(t *testing.T) {
|
||||
de := params.BeaconConfig().DenebForkEpoch
|
||||
targets := []migrationTestTarget{
|
||||
{
|
||||
ident: ezIdent(t, "0x0125e54c64c925018c9296965a5b622d9f5ab626c10917860dcfb6aa09a0a00b", 1234, 0),
|
||||
path: "by-epoch/0/1234/0x0125e54c64c925018c9296965a5b622d9f5ab626c10917860dcfb6aa09a0a00b/0.ssz",
|
||||
ident: ezIdent(t, "0x0125e54c64c925018c9296965a5b622d9f5ab626c10917860dcfb6aa09a0a00b", de+1234, 0),
|
||||
path: "by-epoch/%d/%d/0x0125e54c64c925018c9296965a5b622d9f5ab626c10917860dcfb6aa09a0a00b/0.ssz",
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", 5330, 0),
|
||||
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", de+5330, 0),
|
||||
slotOffset: 31,
|
||||
path: "by-epoch/1/5330/0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86/0.ssz",
|
||||
path: "by-epoch/%d/%d/0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86/0.ssz",
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", 5330, 1),
|
||||
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", de+5330, 1),
|
||||
slotOffset: 31,
|
||||
path: "by-epoch/1/5330/0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86/1.ssz",
|
||||
path: "by-epoch/%d/%d/0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86/1.ssz",
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", 16777216, 0),
|
||||
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", -1+math.MaxUint64/32, 0),
|
||||
slotOffset: 16,
|
||||
path: "by-epoch/4096/16777216/0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c/0.ssz",
|
||||
path: "by-epoch/%d/%d/0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c/0.ssz",
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", 16777216, 1),
|
||||
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", -1+math.MaxUint64/32, 1),
|
||||
slotOffset: 16,
|
||||
path: "by-epoch/4096/16777216/0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c/1.ssz",
|
||||
path: "by-epoch/%d/%d/0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c/1.ssz",
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x42eabe3d2c125410cd226de6f2825fb7575ab896c3f52e43de1fa29e4c809aba", 16777217, 0),
|
||||
ident: ezIdent(t, "0x42eabe3d2c125410cd226de6f2825fb7575ab896c3f52e43de1fa29e4c809aba", -1+math.MaxUint64/32, 0),
|
||||
slotOffset: 16,
|
||||
path: "by-epoch/4096/16777217/0x42eabe3d2c125410cd226de6f2825fb7575ab896c3f52e43de1fa29e4c809aba/0.ssz",
|
||||
path: "by-epoch/%d/%d/0x42eabe3d2c125410cd226de6f2825fb7575ab896c3f52e43de1fa29e4c809aba/0.ssz",
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x666cea5034e22bd3b849cb33914cad59afd88ee08e4d5bc0e997411c945fbc1d", 11235, 1),
|
||||
path: "by-epoch/2/11235/0x666cea5034e22bd3b849cb33914cad59afd88ee08e4d5bc0e997411c945fbc1d/1.ssz",
|
||||
ident: ezIdent(t, "0x666cea5034e22bd3b849cb33914cad59afd88ee08e4d5bc0e997411c945fbc1d", de+11235, 1),
|
||||
path: "by-epoch/%d/%d/0x666cea5034e22bd3b849cb33914cad59afd88ee08e4d5bc0e997411c945fbc1d/1.ssz",
|
||||
},
|
||||
}
|
||||
fs := afero.NewMemMapFs()
|
||||
@@ -299,6 +302,7 @@ func TestIterationComplete(t *testing.T) {
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, tar.ident.epoch, entry.epoch)
|
||||
require.Equal(t, true, entry.HasIndex(tar.ident.index))
|
||||
require.Equal(t, tar.path, byEpoch.sszPath(tar.ident))
|
||||
path := fmt.Sprintf(tar.path, periodForEpoch(tar.ident.epoch), tar.ident.epoch)
|
||||
require.Equal(t, path, byEpoch.sszPath(tar.ident))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,10 +4,10 @@ import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
@@ -18,9 +18,7 @@ func ezIdent(t *testing.T, rootStr string, epoch primitives.Epoch, index uint64)
|
||||
}
|
||||
|
||||
func setupTestBlobFile(t *testing.T, ident blobIdent, offset primitives.Slot, fs afero.Fs, l fsLayout) {
|
||||
slot, err := slots.EpochStart(ident.epoch)
|
||||
require.NoError(t, err)
|
||||
slot += offset
|
||||
slot := util.SlotAtEpoch(t, ident.epoch) + offset
|
||||
_, sc := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, 1)
|
||||
scb, err := sc[0].MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
@@ -53,6 +51,7 @@ func testAssertFsMigrated(t *testing.T, fs afero.Fs, ident blobIdent, before, af
|
||||
}
|
||||
|
||||
func TestMigrations(t *testing.T) {
|
||||
de := params.BeaconConfig().DenebForkEpoch
|
||||
cases := []struct {
|
||||
name string
|
||||
forwardLayout string
|
||||
@@ -65,18 +64,18 @@ func TestMigrations(t *testing.T) {
|
||||
forwardLayout: LayoutNameByEpoch,
|
||||
targets: []migrationTestTarget{
|
||||
{
|
||||
ident: ezIdent(t, "0x0125e54c64c925018c9296965a5b622d9f5ab626c10917860dcfb6aa09a0a00b", 1234, 0),
|
||||
ident: ezIdent(t, "0x0125e54c64c925018c9296965a5b622d9f5ab626c10917860dcfb6aa09a0a00b", de+1234, 0),
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", 5330, 0),
|
||||
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", de+5330, 0),
|
||||
slotOffset: 31,
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", 5330, 1),
|
||||
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", de+5330, 1),
|
||||
slotOffset: 31,
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", 16777216, 0),
|
||||
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", de+16777216, 0),
|
||||
slotOffset: 16,
|
||||
},
|
||||
},
|
||||
@@ -87,33 +86,33 @@ func TestMigrations(t *testing.T) {
|
||||
forwardLayout: LayoutNameByEpoch,
|
||||
targets: []migrationTestTarget{
|
||||
{
|
||||
ident: ezIdent(t, "0x0125e54c64c925018c9296965a5b622d9f5ab626c10917860dcfb6aa09a0a00b", 1234, 0),
|
||||
ident: ezIdent(t, "0x0125e54c64c925018c9296965a5b622d9f5ab626c10917860dcfb6aa09a0a00b", de+1234, 0),
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", 5330, 0),
|
||||
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", de+5330, 0),
|
||||
slotOffset: 31,
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", 5330, 1),
|
||||
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", de+5330, 1),
|
||||
slotOffset: 31,
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", 16777216, 0),
|
||||
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", de+16777216, 0),
|
||||
slotOffset: 16,
|
||||
migrated: true,
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", 16777216, 1),
|
||||
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", de+16777216, 1),
|
||||
slotOffset: 16,
|
||||
migrated: true,
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x42eabe3d2c125410cd226de6f2825fb7575ab896c3f52e43de1fa29e4c809aba", 16777217, 0),
|
||||
ident: ezIdent(t, "0x42eabe3d2c125410cd226de6f2825fb7575ab896c3f52e43de1fa29e4c809aba", de+16777217, 0),
|
||||
slotOffset: 16,
|
||||
migrated: true,
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x666cea5034e22bd3b849cb33914cad59afd88ee08e4d5bc0e997411c945fbc1d", 11235, 1),
|
||||
ident: ezIdent(t, "0x666cea5034e22bd3b849cb33914cad59afd88ee08e4d5bc0e997411c945fbc1d", de+11235, 1),
|
||||
migrated: true,
|
||||
},
|
||||
},
|
||||
|
||||
@@ -88,11 +88,11 @@ func NewEphemeralBlobStorageWithMocker(t testing.TB) (*BlobMocker, *BlobStorage)
|
||||
return &BlobMocker{fs: fs, bs: bs}, bs
|
||||
}
|
||||
|
||||
func NewMockBlobStorageSummarizer(t *testing.T, set map[[32]byte][]int) BlobStorageSummarizer {
|
||||
func NewMockBlobStorageSummarizer(t *testing.T, epoch primitives.Epoch, set map[[32]byte][]int) BlobStorageSummarizer {
|
||||
c := newBlobStorageCache()
|
||||
for k, v := range set {
|
||||
for i := range v {
|
||||
if err := c.ensure(blobIdent{root: k, epoch: 0, index: uint64(v[i])}); err != nil {
|
||||
if err := c.ensure(blobIdent{root: k, epoch: epoch, index: uint64(v[i])}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -142,6 +142,7 @@ func testRoots(n int) [][32]byte {
|
||||
}
|
||||
|
||||
func TestLayoutPruneBefore(t *testing.T) {
|
||||
electra := params.BeaconConfig().ElectraForkEpoch
|
||||
roots := testRoots(10)
|
||||
cases := []struct {
|
||||
name string
|
||||
@@ -153,27 +154,27 @@ func TestLayoutPruneBefore(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "none pruned",
|
||||
pruneBefore: 1,
|
||||
pruneBefore: electra + 1,
|
||||
pruned: []testIdent{},
|
||||
remain: []testIdent{
|
||||
{offset: 1, blobIdent: blobIdent{root: roots[0], epoch: 1, index: 0}},
|
||||
{offset: 1, blobIdent: blobIdent{root: roots[1], epoch: 1, index: 0}},
|
||||
{offset: 1, blobIdent: blobIdent{root: roots[0], epoch: electra + 1, index: 0}},
|
||||
{offset: 1, blobIdent: blobIdent{root: roots[1], epoch: electra + 1, index: 0}},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "expected pruned before epoch",
|
||||
pruneBefore: 3,
|
||||
pruneBefore: electra + 3,
|
||||
pruned: []testIdent{
|
||||
{offset: 0, blobIdent: blobIdent{root: roots[0], epoch: 1, index: 0}},
|
||||
{offset: 31, blobIdent: blobIdent{root: roots[1], epoch: 1, index: 5}},
|
||||
{offset: 0, blobIdent: blobIdent{root: roots[2], epoch: 2, index: 0}},
|
||||
{offset: 31, blobIdent: blobIdent{root: roots[3], epoch: 2, index: 3}},
|
||||
{offset: 0, blobIdent: blobIdent{root: roots[0], epoch: electra + 1, index: 0}},
|
||||
{offset: 31, blobIdent: blobIdent{root: roots[1], epoch: electra + 1, index: 5}},
|
||||
{offset: 0, blobIdent: blobIdent{root: roots[2], epoch: electra + 2, index: 0}},
|
||||
{offset: 31, blobIdent: blobIdent{root: roots[3], epoch: electra + 2, index: 3}},
|
||||
},
|
||||
remain: []testIdent{
|
||||
{offset: 0, blobIdent: blobIdent{root: roots[4], epoch: 3, index: 2}}, // boundary
|
||||
{offset: 31, blobIdent: blobIdent{root: roots[5], epoch: 3, index: 0}}, // boundary
|
||||
{offset: 0, blobIdent: blobIdent{root: roots[6], epoch: 4, index: 1}},
|
||||
{offset: 31, blobIdent: blobIdent{root: roots[7], epoch: 4, index: 5}},
|
||||
{offset: 0, blobIdent: blobIdent{root: roots[4], epoch: electra + 3, index: 2}}, // boundary
|
||||
{offset: 31, blobIdent: blobIdent{root: roots[5], epoch: electra + 3, index: 0}}, // boundary
|
||||
{offset: 0, blobIdent: blobIdent{root: roots[6], epoch: electra + 4, index: 1}},
|
||||
{offset: 31, blobIdent: blobIdent{root: roots[7], epoch: electra + 4, index: 5}},
|
||||
},
|
||||
sum: pruneSummary{blobsPruned: 4},
|
||||
},
|
||||
|
||||
@@ -954,7 +954,9 @@ func (s *Store) CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint pr
|
||||
deletedRoots := make([][32]byte, 0)
|
||||
|
||||
oRoot, err := s.OriginCheckpointBlockRoot(ctx)
|
||||
if err != nil {
|
||||
if err != nil && !errors.Is(err, ErrNotFoundOriginBlockRoot) {
|
||||
// If the node did not use checkpoint sync, there will be no origin block root.
|
||||
// Use zero hash which will never match any actual state root
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
@@ -1283,3 +1284,50 @@ func BenchmarkState_CheckStateSaveTime_10(b *testing.B) { checkStateSaveTime(b,
|
||||
|
||||
func BenchmarkState_CheckStateReadTime_1(b *testing.B) { checkStateReadTime(b, 1) }
|
||||
func BenchmarkState_CheckStateReadTime_10(b *testing.B) { checkStateReadTime(b, 10) }
|
||||
|
||||
func TestStore_CleanUpDirtyStates_NoOriginRoot(t *testing.T) {
|
||||
// This test verifies that CleanUpDirtyStates does not fail when the origin block root is not set,
|
||||
// which can happen when starting from genesis or in certain fork scenarios like Fulu.
|
||||
db := setupDB(t)
|
||||
genesisState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
genesisRoot := [fieldparams.RootLength]byte{'a'}
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(t.Context(), genesisRoot))
|
||||
require.NoError(t, db.SaveState(t.Context(), genesisState, genesisRoot))
|
||||
// Note: We intentionally do NOT call SaveOriginCheckpointBlockRoot here
|
||||
// to simulate the scenario where origin block root is not set
|
||||
slotsPerArchivedPoint := primitives.Slot(128)
|
||||
bRoots := make([][fieldparams.RootLength]byte, 0)
|
||||
prevRoot := genesisRoot
|
||||
for i := primitives.Slot(1); i <= slotsPerArchivedPoint; i++ { // skip slot 0
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = i
|
||||
b.Block.ParentRoot = prevRoot[:]
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(t.Context(), wsb))
|
||||
bRoots = append(bRoots, r)
|
||||
prevRoot = r
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetSlot(i))
|
||||
require.NoError(t, db.SaveState(t.Context(), st, r))
|
||||
}
|
||||
require.NoError(t, db.SaveFinalizedCheckpoint(t.Context(), ðpb.Checkpoint{
|
||||
Root: bRoots[len(bRoots)-1][:],
|
||||
Epoch: primitives.Epoch(slotsPerArchivedPoint / params.BeaconConfig().SlotsPerEpoch),
|
||||
}))
|
||||
// This should not fail even though origin block root is not set
|
||||
err = db.CleanUpDirtyStates(t.Context(), slotsPerArchivedPoint)
|
||||
require.NoError(t, err)
|
||||
// Verify that cleanup still works correctly
|
||||
for i, root := range bRoots {
|
||||
if primitives.Slot(i) >= slotsPerArchivedPoint.SubSlot(slotsPerArchivedPoint.Div(3)) {
|
||||
require.Equal(t, true, db.HasState(t.Context(), root))
|
||||
} else {
|
||||
require.Equal(t, false, db.HasState(t.Context(), root))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -264,9 +264,11 @@ func (s *Store) SetLastFinalityUpdate(update interfaces.LightClientFinalityUpdat
|
||||
|
||||
func (s *Store) setLastFinalityUpdate(update interfaces.LightClientFinalityUpdate, broadcast bool) {
|
||||
if broadcast && IsFinalityUpdateValidForBroadcast(update, s.lastFinalityUpdate) {
|
||||
if err := s.p2p.BroadcastLightClientFinalityUpdate(context.Background(), update); err != nil {
|
||||
log.WithError(err).Error("Could not broadcast light client finality update")
|
||||
}
|
||||
go func() {
|
||||
if err := s.p2p.BroadcastLightClientFinalityUpdate(context.Background(), update); err != nil {
|
||||
log.WithError(err).Error("Could not broadcast light client finality update")
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
s.lastFinalityUpdate = update
|
||||
@@ -294,9 +296,11 @@ func (s *Store) SetLastOptimisticUpdate(update interfaces.LightClientOptimisticU
|
||||
|
||||
func (s *Store) setLastOptimisticUpdate(update interfaces.LightClientOptimisticUpdate, broadcast bool) {
|
||||
if broadcast {
|
||||
if err := s.p2p.BroadcastLightClientOptimisticUpdate(context.Background(), update); err != nil {
|
||||
log.WithError(err).Error("Could not broadcast light client optimistic update")
|
||||
}
|
||||
go func() {
|
||||
if err := s.p2p.BroadcastLightClientOptimisticUpdate(context.Background(), update); err != nil {
|
||||
log.WithError(err).Error("Could not broadcast light client optimistic update")
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
s.lastOptimisticUpdate = update
|
||||
|
||||
@@ -3,6 +3,7 @@ package light_client
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/async/event"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
@@ -74,6 +75,7 @@ func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
|
||||
p2p := p2pTesting.NewTestP2P(t)
|
||||
lcStore := NewLightClientStore(p2p, new(event.Feed), testDB.SetupDB(t))
|
||||
|
||||
timeForGoroutinesToFinish := 20 * time.Microsecond
|
||||
// update 0 with basic data and no supermajority following an empty lastFinalityUpdate - should save and broadcast
|
||||
l0 := util.NewTestLightClient(t, version.Altair)
|
||||
update0, err := NewLightClientFinalityUpdateFromBeaconState(l0.Ctx, l0.State, l0.Block, l0.AttestedState, l0.AttestedBlock, l0.FinalizedBlock)
|
||||
@@ -85,6 +87,7 @@ func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update0, true)
|
||||
require.Equal(t, update0, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
time.Sleep(timeForGoroutinesToFinish) // give some time for the broadcast goroutine to finish
|
||||
require.Equal(t, true, p2p.BroadcastCalled.Load(), "Broadcast should have been called after setting a new last finality update when previous is nil")
|
||||
p2p.BroadcastCalled.Store(false) // Reset for next test
|
||||
|
||||
@@ -99,6 +102,7 @@ func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update1, true)
|
||||
require.Equal(t, update1, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
time.Sleep(timeForGoroutinesToFinish) // give some time for the broadcast goroutine to finish
|
||||
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been called after setting a new last finality update without supermajority")
|
||||
p2p.BroadcastCalled.Store(false) // Reset for next test
|
||||
|
||||
@@ -113,6 +117,7 @@ func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update2, true)
|
||||
require.Equal(t, update2, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
time.Sleep(timeForGoroutinesToFinish) // give some time for the broadcast goroutine to finish
|
||||
require.Equal(t, true, p2p.BroadcastCalled.Load(), "Broadcast should have been called after setting a new last finality update with supermajority")
|
||||
p2p.BroadcastCalled.Store(false) // Reset for next test
|
||||
|
||||
@@ -127,6 +132,7 @@ func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update3, true)
|
||||
require.Equal(t, update3, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
time.Sleep(timeForGoroutinesToFinish) // give some time for the broadcast goroutine to finish
|
||||
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been when previous was already broadcast")
|
||||
|
||||
// update 4 with increased finality slot, increased attested slot, and supermajority - should save and broadcast
|
||||
@@ -140,6 +146,7 @@ func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update4, true)
|
||||
require.Equal(t, update4, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
time.Sleep(timeForGoroutinesToFinish) // give some time for the broadcast goroutine to finish
|
||||
require.Equal(t, true, p2p.BroadcastCalled.Load(), "Broadcast should have been called after a new finality update with increased finality slot")
|
||||
p2p.BroadcastCalled.Store(false) // Reset for next test
|
||||
|
||||
@@ -154,6 +161,7 @@ func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update5, true)
|
||||
require.Equal(t, update5, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
time.Sleep(timeForGoroutinesToFinish) // give some time for the broadcast goroutine to finish
|
||||
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been called when previous was already broadcast with supermajority")
|
||||
|
||||
// update 6 with the same new finality slot, increased attested slot, and no supermajority - should save but not broadcast
|
||||
@@ -167,6 +175,7 @@ func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update6, true)
|
||||
require.Equal(t, update6, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
time.Sleep(timeForGoroutinesToFinish) // give some time for the broadcast goroutine to finish
|
||||
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been called when previous was already broadcast with supermajority")
|
||||
}
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package blstoexec
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sync"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/blocks"
|
||||
@@ -87,7 +86,7 @@ func (p *Pool) PendingBLSToExecChanges() ([]*ethpb.SignedBLSToExecutionChange, e
|
||||
func (p *Pool) BLSToExecChangesForInclusion(st state.ReadOnlyBeaconState) ([]*ethpb.SignedBLSToExecutionChange, error) {
|
||||
p.lock.RLock()
|
||||
defer p.lock.RUnlock()
|
||||
length := int(math.Min(float64(params.BeaconConfig().MaxBlsToExecutionChanges), float64(p.pending.Len())))
|
||||
length := int(min(float64(params.BeaconConfig().MaxBlsToExecutionChanges), float64(p.pending.Len())))
|
||||
result := make([]*ethpb.SignedBLSToExecutionChange, 0, length)
|
||||
node := p.pending.Last()
|
||||
for node != nil && len(result) < length {
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package voluntaryexits
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sync"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/blocks"
|
||||
@@ -63,7 +62,7 @@ func (p *Pool) PendingExits() ([]*ethpb.SignedVoluntaryExit, error) {
|
||||
// return more than the block enforced MaxVoluntaryExits.
|
||||
func (p *Pool) ExitsForInclusion(state state.ReadOnlyBeaconState, slot types.Slot) ([]*ethpb.SignedVoluntaryExit, error) {
|
||||
p.lock.RLock()
|
||||
length := int(math.Min(float64(params.BeaconConfig().MaxVoluntaryExits), float64(p.pending.Len())))
|
||||
length := int(min(float64(params.BeaconConfig().MaxVoluntaryExits), float64(p.pending.Len())))
|
||||
result := make([]*ethpb.SignedVoluntaryExit, 0, length)
|
||||
node := p.pending.First()
|
||||
for node != nil && len(result) < length {
|
||||
|
||||
@@ -139,6 +139,7 @@ go_test(
|
||||
"sender_test.go",
|
||||
"service_test.go",
|
||||
"subnets_test.go",
|
||||
"topics_test.go",
|
||||
"utils_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
|
||||
@@ -278,6 +278,20 @@ func (s *Service) BroadcastLightClientOptimisticUpdate(ctx context.Context, upda
|
||||
return errors.New("attempted to broadcast nil light client optimistic update")
|
||||
}
|
||||
|
||||
// add delay to ensure block has time to propagate
|
||||
slotStart, err := slots.StartTime(s.genesisTime, update.SignatureSlot())
|
||||
if err != nil {
|
||||
err := errors.Wrap(err, "could not compute slot start time")
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
timeSinceSlotStart := time.Since(slotStart)
|
||||
expectedDelay := slots.ComponentDuration(primitives.BP(params.BeaconConfig().SyncMessageDueBPS))
|
||||
if timeSinceSlotStart < expectedDelay {
|
||||
waitDuration := expectedDelay - timeSinceSlotStart
|
||||
<-time.After(waitDuration)
|
||||
}
|
||||
|
||||
digest := params.ForkDigest(slots.ToEpoch(update.AttestedHeader().Beacon().Slot))
|
||||
if err := s.broadcastObject(ctx, update, lcOptimisticToTopic(digest)); err != nil {
|
||||
log.WithError(err).Debug("Failed to broadcast light client optimistic update")
|
||||
@@ -298,6 +312,20 @@ func (s *Service) BroadcastLightClientFinalityUpdate(ctx context.Context, update
|
||||
return errors.New("attempted to broadcast nil light client finality update")
|
||||
}
|
||||
|
||||
// add delay to ensure block has time to propagate
|
||||
slotStart, err := slots.StartTime(s.genesisTime, update.SignatureSlot())
|
||||
if err != nil {
|
||||
err := errors.Wrap(err, "could not compute slot start time")
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
timeSinceSlotStart := time.Since(slotStart)
|
||||
expectedDelay := slots.ComponentDuration(primitives.BP(params.BeaconConfig().SyncMessageDueBPS))
|
||||
if timeSinceSlotStart < expectedDelay {
|
||||
waitDuration := expectedDelay - timeSinceSlotStart
|
||||
<-time.After(waitDuration)
|
||||
}
|
||||
|
||||
forkDigest := params.ForkDigest(slots.ToEpoch(update.AttestedHeader().Beacon().Slot))
|
||||
if err := s.broadcastObject(ctx, update, lcFinalityToTopic(forkDigest)); err != nil {
|
||||
log.WithError(err).Debug("Failed to broadcast light client finality update")
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/wrapper"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
@@ -529,6 +530,11 @@ func TestService_BroadcastBlob(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_BroadcastLightClientOptimisticUpdate(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig().Copy()
|
||||
config.SyncMessageDueBPS = 60 // ~72 millisecond
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
@@ -539,7 +545,7 @@ func TestService_BroadcastLightClientOptimisticUpdate(t *testing.T) {
|
||||
pubsub: p1.PubSub(),
|
||||
joinedTopics: map[string]*pubsub.Topic{},
|
||||
cfg: &Config{},
|
||||
genesisTime: time.Now(),
|
||||
genesisTime: time.Now().Add(-33 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second), // the signature slot of the mock update is 33
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
subnetsLock: make(map[uint64]*sync.RWMutex),
|
||||
subnetsLockLock: sync.Mutex{},
|
||||
@@ -566,12 +572,19 @@ func TestService_BroadcastLightClientOptimisticUpdate(t *testing.T) {
|
||||
wg.Add(1)
|
||||
go func(tt *testing.T) {
|
||||
defer wg.Done()
|
||||
ctx, cancel := context.WithTimeout(t.Context(), 1*time.Second)
|
||||
ctx, cancel := context.WithTimeout(t.Context(), 150*time.Millisecond)
|
||||
defer cancel()
|
||||
|
||||
incomingMessage, err := sub.Next(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
slotStartTime, err := slots.StartTime(p.genesisTime, msg.SignatureSlot())
|
||||
require.NoError(t, err)
|
||||
expectedDelay := slots.ComponentDuration(primitives.BP(params.BeaconConfig().SyncMessageDueBPS))
|
||||
if time.Now().Before(slotStartTime.Add(expectedDelay)) {
|
||||
tt.Errorf("Message received too early, now %v, expected at least %v", time.Now(), slotStartTime.Add(expectedDelay))
|
||||
}
|
||||
|
||||
result := ðpb.LightClientOptimisticUpdateAltair{}
|
||||
require.NoError(t, p.Encoding().DecodeGossip(incomingMessage.Data, result))
|
||||
if !proto.Equal(result, msg.Proto()) {
|
||||
@@ -593,6 +606,11 @@ func TestService_BroadcastLightClientOptimisticUpdate(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_BroadcastLightClientFinalityUpdate(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig().Copy()
|
||||
config.SyncMessageDueBPS = 60 // ~72 millisecond
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
@@ -603,7 +621,7 @@ func TestService_BroadcastLightClientFinalityUpdate(t *testing.T) {
|
||||
pubsub: p1.PubSub(),
|
||||
joinedTopics: map[string]*pubsub.Topic{},
|
||||
cfg: &Config{},
|
||||
genesisTime: time.Now(),
|
||||
genesisTime: time.Now().Add(-33 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second), // the signature slot of the mock update is 33
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
subnetsLock: make(map[uint64]*sync.RWMutex),
|
||||
subnetsLockLock: sync.Mutex{},
|
||||
@@ -630,12 +648,19 @@ func TestService_BroadcastLightClientFinalityUpdate(t *testing.T) {
|
||||
wg.Add(1)
|
||||
go func(tt *testing.T) {
|
||||
defer wg.Done()
|
||||
ctx, cancel := context.WithTimeout(t.Context(), 1*time.Second)
|
||||
ctx, cancel := context.WithTimeout(t.Context(), 150*time.Millisecond)
|
||||
defer cancel()
|
||||
|
||||
incomingMessage, err := sub.Next(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
slotStartTime, err := slots.StartTime(p.genesisTime, msg.SignatureSlot())
|
||||
require.NoError(t, err)
|
||||
expectedDelay := slots.ComponentDuration(primitives.BP(params.BeaconConfig().SyncMessageDueBPS))
|
||||
if time.Now().Before(slotStartTime.Add(expectedDelay)) {
|
||||
tt.Errorf("Message received too early, now %v, expected at least %v", time.Now(), slotStartTime.Add(expectedDelay))
|
||||
}
|
||||
|
||||
result := ðpb.LightClientFinalityUpdateAltair{}
|
||||
require.NoError(t, p.Encoding().DecodeGossip(incomingMessage.Data, result))
|
||||
if !proto.Equal(result, msg.Proto()) {
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
@@ -10,32 +12,28 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var errNoCustodyInfo = errors.New("no custody info available")
|
||||
|
||||
var _ CustodyManager = (*Service)(nil)
|
||||
|
||||
// EarliestAvailableSlot returns the earliest available slot.
|
||||
func (s *Service) EarliestAvailableSlot() (primitives.Slot, error) {
|
||||
s.custodyInfoLock.RLock()
|
||||
defer s.custodyInfoLock.RUnlock()
|
||||
|
||||
if s.custodyInfo == nil {
|
||||
return 0, errors.New("no custody info available")
|
||||
// It blocks until the custody info is set or the context is done.
|
||||
func (s *Service) EarliestAvailableSlot(ctx context.Context) (primitives.Slot, error) {
|
||||
custodyInfo, err := s.waitForCustodyInfo(ctx)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "wait for custody info")
|
||||
}
|
||||
|
||||
return s.custodyInfo.earliestAvailableSlot, nil
|
||||
return custodyInfo.earliestAvailableSlot, nil
|
||||
}
|
||||
|
||||
// CustodyGroupCount returns the custody group count.
|
||||
func (s *Service) CustodyGroupCount() (uint64, error) {
|
||||
s.custodyInfoLock.Lock()
|
||||
defer s.custodyInfoLock.Unlock()
|
||||
|
||||
if s.custodyInfo == nil {
|
||||
return 0, errNoCustodyInfo
|
||||
// It blocks until the custody info is set or the context is done.
|
||||
func (s *Service) CustodyGroupCount(ctx context.Context) (uint64, error) {
|
||||
custodyInfo, err := s.waitForCustodyInfo(ctx)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "wait for custody info")
|
||||
}
|
||||
|
||||
return s.custodyInfo.groupCount, nil
|
||||
return custodyInfo.groupCount, nil
|
||||
}
|
||||
|
||||
// UpdateCustodyInfo updates the stored custody group count to the incoming one
|
||||
@@ -79,6 +77,9 @@ func (s *Service) UpdateCustodyInfo(earliestAvailableSlot primitives.Slot, custo
|
||||
earliestAvailableSlot: earliestAvailableSlot,
|
||||
groupCount: custodyGroupCount,
|
||||
}
|
||||
|
||||
close(s.custodyInfoSet)
|
||||
|
||||
return earliestAvailableSlot, custodyGroupCount, nil
|
||||
}
|
||||
|
||||
@@ -147,6 +148,33 @@ func (s *Service) CustodyGroupCountFromPeer(pid peer.ID) uint64 {
|
||||
return custodyCount
|
||||
}
|
||||
|
||||
func (s *Service) waitForCustodyInfo(ctx context.Context) (custodyInfo, error) {
|
||||
select {
|
||||
case <-s.custodyInfoSet:
|
||||
info, ok := s.copyCustodyInfo()
|
||||
if !ok {
|
||||
return custodyInfo{}, errors.New("custody info was set but is nil")
|
||||
}
|
||||
|
||||
return info, nil
|
||||
case <-ctx.Done():
|
||||
return custodyInfo{}, ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
// copyCustodyInfo returns a copy of the current custody info in a thread-safe manner.
|
||||
// If no custody info is set, it returns false as the second return value.
|
||||
func (s *Service) copyCustodyInfo() (custodyInfo, bool) {
|
||||
s.custodyInfoLock.RLock()
|
||||
defer s.custodyInfoLock.RUnlock()
|
||||
|
||||
if s.custodyInfo == nil {
|
||||
return custodyInfo{}, false
|
||||
}
|
||||
|
||||
return *s.custodyInfo, true
|
||||
}
|
||||
|
||||
// custodyGroupCountFromPeerENR retrieves the custody count from the peer's ENR.
|
||||
// If the ENR is not available, it defaults to the minimum number of custody groups
|
||||
// an honest node custodies and serves samples from.
|
||||
|
||||
@@ -20,58 +20,37 @@ import (
|
||||
)
|
||||
|
||||
func TestEarliestAvailableSlot(t *testing.T) {
|
||||
t.Run("No custody info available", func(t *testing.T) {
|
||||
service := &Service{
|
||||
custodyInfo: nil,
|
||||
}
|
||||
const expected primitives.Slot = 100
|
||||
|
||||
_, err := service.EarliestAvailableSlot()
|
||||
service := &Service{
|
||||
custodyInfoSet: make(chan struct{}),
|
||||
custodyInfo: &custodyInfo{
|
||||
earliestAvailableSlot: expected,
|
||||
},
|
||||
}
|
||||
|
||||
require.NotNil(t, err)
|
||||
})
|
||||
close(service.custodyInfoSet)
|
||||
slot, err := service.EarliestAvailableSlot(t.Context())
|
||||
|
||||
t.Run("Valid custody info", func(t *testing.T) {
|
||||
const expected primitives.Slot = 100
|
||||
|
||||
service := &Service{
|
||||
custodyInfo: &custodyInfo{
|
||||
earliestAvailableSlot: expected,
|
||||
},
|
||||
}
|
||||
|
||||
slot, err := service.EarliestAvailableSlot()
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, slot)
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, slot)
|
||||
}
|
||||
|
||||
func TestCustodyGroupCount(t *testing.T) {
|
||||
t.Run("No custody info available", func(t *testing.T) {
|
||||
service := &Service{
|
||||
custodyInfo: nil,
|
||||
}
|
||||
const expected uint64 = 5
|
||||
|
||||
_, err := service.CustodyGroupCount()
|
||||
service := &Service{
|
||||
custodyInfoSet: make(chan struct{}),
|
||||
custodyInfo: &custodyInfo{
|
||||
groupCount: expected,
|
||||
},
|
||||
}
|
||||
|
||||
require.NotNil(t, err)
|
||||
require.Equal(t, true, strings.Contains(err.Error(), "no custody info available"))
|
||||
})
|
||||
close(service.custodyInfoSet)
|
||||
count, err := service.CustodyGroupCount(t.Context())
|
||||
|
||||
t.Run("Valid custody info", func(t *testing.T) {
|
||||
const expected uint64 = 5
|
||||
|
||||
service := &Service{
|
||||
custodyInfo: &custodyInfo{
|
||||
groupCount: expected,
|
||||
},
|
||||
}
|
||||
|
||||
count, err := service.CustodyGroupCount()
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, count)
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, count)
|
||||
}
|
||||
|
||||
func TestUpdateCustodyInfo(t *testing.T) {
|
||||
@@ -163,7 +142,8 @@ func TestUpdateCustodyInfo(t *testing.T) {
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
service := &Service{
|
||||
custodyInfo: tc.initialCustodyInfo,
|
||||
custodyInfoSet: make(chan struct{}),
|
||||
custodyInfo: tc.initialCustodyInfo,
|
||||
}
|
||||
|
||||
slot, groupCount, err := service.UpdateCustodyInfo(tc.inputSlot, tc.inputGroupCount)
|
||||
|
||||
@@ -253,7 +253,7 @@ func (s *Service) RefreshPersistentSubnets() {
|
||||
return
|
||||
}
|
||||
|
||||
custodyGroupCount, err = s.CustodyGroupCount()
|
||||
custodyGroupCount, err = s.CustodyGroupCount(s.ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not retrieve custody group count")
|
||||
return
|
||||
@@ -604,27 +604,13 @@ func (s *Service) createLocalNode(
|
||||
localNode = initializeSyncCommSubnets(localNode)
|
||||
|
||||
if params.FuluEnabled() {
|
||||
// TODO: Replace this quick fix with a proper synchronization scheme (chan?)
|
||||
const delay = 1 * time.Second
|
||||
|
||||
var custodyGroupCount uint64
|
||||
|
||||
err := errNoCustodyInfo
|
||||
for errors.Is(err, errNoCustodyInfo) {
|
||||
custodyGroupCount, err = s.CustodyGroupCount()
|
||||
if errors.Is(err, errNoCustodyInfo) {
|
||||
log.WithField("delay", delay).Debug("No custody info available yet, retrying later")
|
||||
time.Sleep(delay)
|
||||
continue
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "retrieve custody group count")
|
||||
}
|
||||
|
||||
custodyGroupCountEntry := peerdas.Cgc(custodyGroupCount)
|
||||
localNode.Set(custodyGroupCountEntry)
|
||||
custodyGroupCount, err := s.CustodyGroupCount(s.ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not retrieve custody group count")
|
||||
}
|
||||
|
||||
custodyGroupCountEntry := peerdas.Cgc(custodyGroupCount)
|
||||
localNode.Set(custodyGroupCountEntry)
|
||||
}
|
||||
|
||||
if s.cfg != nil && s.cfg.HostAddress != "" {
|
||||
|
||||
@@ -281,9 +281,13 @@ func TestCreateLocalNode(t *testing.T) {
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
cfg: tt.cfg,
|
||||
ctx: t.Context(),
|
||||
custodyInfo: &custodyInfo{groupCount: custodyRequirement},
|
||||
custodyInfoSet: make(chan struct{}),
|
||||
}
|
||||
|
||||
close(service.custodyInfoSet)
|
||||
|
||||
localNode, err := service.createLocalNode(privKey, address, udpPort, tcpPort, quicPort)
|
||||
if tt.expectedError {
|
||||
require.NotNil(t, err)
|
||||
@@ -912,9 +916,13 @@ func TestRefreshPersistentSubnets(t *testing.T) {
|
||||
peers: p2p.Peers(),
|
||||
genesisTime: time.Now().Add(-time.Duration(tc.epochSinceGenesis*secondsPerEpoch) * time.Second),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
ctx: t.Context(),
|
||||
custodyInfoSet: make(chan struct{}),
|
||||
custodyInfo: &custodyInfo{groupCount: custodyGroupCount},
|
||||
}
|
||||
|
||||
close(service.custodyInfoSet)
|
||||
|
||||
// Set the listener and the metadata.
|
||||
createListener := func() (*discover.UDPv5, error) {
|
||||
return service.createListener(nil, privateKey)
|
||||
|
||||
@@ -79,6 +79,9 @@ func compareForkENR(self, peer *enr.Record) error {
|
||||
// we allow the connection to continue until the fork boundary.
|
||||
return nil
|
||||
}
|
||||
if selfEntry.NextForkEpoch == params.BeaconConfig().FarFutureEpoch {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Since we agree on the next fork epoch, we require next fork version to also be in agreement.
|
||||
if !bytes.Equal(peerEntry.NextForkVersion, selfEntry.NextForkVersion) {
|
||||
|
||||
@@ -122,6 +122,29 @@ func TestCompareForkENR(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestIgnoreFarFutureMismatch(t *testing.T) {
|
||||
db, err := enode.OpenDB("")
|
||||
assert.NoError(t, err)
|
||||
_, k := createAddrAndPrivKey(t)
|
||||
current := params.GetNetworkScheduleEntry(params.BeaconConfig().ElectraForkEpoch)
|
||||
next := params.NetworkScheduleEntry{
|
||||
Epoch: params.BeaconConfig().FarFutureEpoch,
|
||||
ForkDigest: [4]byte{0xFF, 0xFF, 0xFF, 0xFF}, // Ensure a unique digest for testing.
|
||||
ForkVersion: [4]byte{0xFF, 0xFF, 0xFF, 0xFF},
|
||||
}
|
||||
self := enode.NewLocalNode(db, k)
|
||||
require.NoError(t, updateENR(self, current, next))
|
||||
|
||||
peerNext := params.NetworkScheduleEntry{
|
||||
Epoch: params.BeaconConfig().FarFutureEpoch,
|
||||
ForkDigest: [4]byte{0xAA, 0xAA, 0xAA, 0xAA}, // Different unique digest for testing.
|
||||
ForkVersion: [4]byte{0xAA, 0xAA, 0xAA, 0xAA},
|
||||
}
|
||||
peer := enode.NewLocalNode(db, k)
|
||||
require.NoError(t, updateENR(peer, current, peerNext))
|
||||
require.NoError(t, compareForkENR(self.Node().Record(), peer.Node().Record()))
|
||||
}
|
||||
|
||||
func TestNfdSetAndLoad(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096
|
||||
|
||||
@@ -123,8 +123,8 @@ type (
|
||||
|
||||
// CustodyManager abstracts some data columns related methods.
|
||||
CustodyManager interface {
|
||||
EarliestAvailableSlot() (primitives.Slot, error)
|
||||
CustodyGroupCount() (uint64, error)
|
||||
EarliestAvailableSlot(ctx context.Context) (primitives.Slot, error)
|
||||
CustodyGroupCount(ctx context.Context) (uint64, error)
|
||||
UpdateCustodyInfo(earliestAvailableSlot primitives.Slot, custodyGroupCount uint64) (primitives.Slot, uint64, error)
|
||||
CustodyGroupCountFromPeer(peer.ID) uint64
|
||||
}
|
||||
|
||||
@@ -198,9 +198,11 @@ func (s *Service) updateMetrics() {
|
||||
overallScore := s.peers.Scorers().Score(pid)
|
||||
peerScoresByClient[foundName] = append(peerScoresByClient[foundName], overallScore)
|
||||
}
|
||||
connectedPeersCount.Reset() // Clear out previous results.
|
||||
for agent, total := range numConnectedPeersByClient {
|
||||
connectedPeersCount.WithLabelValues(agent).Set(total)
|
||||
}
|
||||
avgScoreConnectedClients.Reset() // Clear out previous results.
|
||||
for agent, scoringData := range peerScoresByClient {
|
||||
avgScore := average(scoringData)
|
||||
avgScoreConnectedClients.WithLabelValues(agent).Set(avgScore)
|
||||
|
||||
@@ -24,7 +24,6 @@ package peers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"net"
|
||||
"sort"
|
||||
"strings"
|
||||
@@ -411,7 +410,7 @@ func (p *Status) RandomizeBackOff(pid peer.ID) {
|
||||
return
|
||||
}
|
||||
|
||||
duration := time.Duration(math.Max(MinBackOffDuration, float64(p.rand.Intn(MaxBackOffDuration)))) * time.Millisecond
|
||||
duration := time.Duration(max(MinBackOffDuration, float64(p.rand.Intn(MaxBackOffDuration)))) * time.Millisecond
|
||||
peerData.NextValidTime = time.Now().Add(duration)
|
||||
}
|
||||
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
mathutil "github.com/OffchainLabs/prysm/v6/math"
|
||||
pbrpc "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
pubsubpb "github.com/libp2p/go-libp2p-pubsub/pb"
|
||||
@@ -135,13 +134,15 @@ func (s *Service) peerInspector(peerMap map[peer.ID]*pubsub.PeerScoreSnapshot) {
|
||||
|
||||
// pubsubOptions creates a list of options to configure our router with.
|
||||
func (s *Service) pubsubOptions() []pubsub.Option {
|
||||
filt := pubsub.NewAllowlistSubscriptionFilter(s.allTopicStrings()...)
|
||||
filt = pubsub.WrapLimitSubscriptionFilter(filt, pubsubSubscriptionRequestLimit)
|
||||
psOpts := []pubsub.Option{
|
||||
pubsub.WithMessageSignaturePolicy(pubsub.StrictNoSign),
|
||||
pubsub.WithNoAuthor(),
|
||||
pubsub.WithMessageIdFn(func(pmsg *pubsubpb.Message) string {
|
||||
return MsgID(s.genesisValidatorsRoot, pmsg)
|
||||
}),
|
||||
pubsub.WithSubscriptionFilter(s),
|
||||
pubsub.WithSubscriptionFilter(filt),
|
||||
pubsub.WithPeerOutboundQueueSize(int(s.cfg.QueueSize)),
|
||||
pubsub.WithMaxMessageSize(int(MaxMessageSize())), // lint:ignore uintcast -- Max Message Size is a config value and is naturally bounded by networking limitations.
|
||||
pubsub.WithValidateQueueSize(int(s.cfg.QueueSize)),
|
||||
@@ -246,5 +247,5 @@ func ExtractGossipDigest(topic string) ([4]byte, error) {
|
||||
// # Allow 1024 bytes for framing and encoding overhead but at least 1MiB in case MAX_PAYLOAD_SIZE is small.
|
||||
// return max(max_compressed_len(MAX_PAYLOAD_SIZE) + 1024, 1024 * 1024)
|
||||
func MaxMessageSize() uint64 {
|
||||
return mathutil.Max(encoder.MaxCompressedLen(params.BeaconConfig().MaxPayloadSize)+1024, 1024*1024)
|
||||
return max(encoder.MaxCompressedLen(params.BeaconConfig().MaxPayloadSize)+1024, 1024*1024)
|
||||
}
|
||||
|
||||
@@ -130,7 +130,7 @@ func (s *Service) logCheckSubscribableError(pid peer.ID) func(string) bool {
|
||||
log.WithError(err).WithFields(logrus.Fields{
|
||||
"peerID": pid,
|
||||
"topic": topic,
|
||||
}).Debug("Peer subscription rejected")
|
||||
}).Trace("Peer subscription rejected")
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -91,6 +91,7 @@ type Service struct {
|
||||
peerDisconnectionTime *cache.Cache
|
||||
custodyInfo *custodyInfo
|
||||
custodyInfoLock sync.RWMutex // Lock access to custodyInfo
|
||||
custodyInfoSet chan struct{}
|
||||
allForkDigests map[[4]byte]struct{}
|
||||
}
|
||||
|
||||
@@ -137,6 +138,7 @@ func NewService(ctx context.Context, cfg *Config) (*Service, error) {
|
||||
joinedTopics: make(map[string]*pubsub.Topic, len(gossipTopicMappings)),
|
||||
subnetsLock: make(map[uint64]*sync.RWMutex),
|
||||
peerDisconnectionTime: cache.New(1*time.Second, 1*time.Minute),
|
||||
custodyInfoSet: make(chan struct{}),
|
||||
}
|
||||
|
||||
ipAddr := prysmnetwork.IPAddr()
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"github.com/holiman/uint256"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -223,8 +224,14 @@ func (s *Service) findPeersWithSubnets(
|
||||
// Skip nodes that are not subscribed to any of the defective subnets.
|
||||
nodeSubnets, err := filter(node)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "filter node")
|
||||
log.WithError(err).WithFields(logrus.Fields{
|
||||
"nodeID": node.ID(),
|
||||
"topicFormat": topicFormat,
|
||||
}).Debug("Could not get needed subnets from peer")
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
if len(nodeSubnets) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -199,12 +199,12 @@ func (*FakeP2P) InterceptUpgraded(network.Conn) (allow bool, reason control.Disc
|
||||
}
|
||||
|
||||
// EarliestAvailableSlot -- fake.
|
||||
func (*FakeP2P) EarliestAvailableSlot() (primitives.Slot, error) {
|
||||
func (*FakeP2P) EarliestAvailableSlot(context.Context) (primitives.Slot, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// CustodyGroupCount -- fake.
|
||||
func (*FakeP2P) CustodyGroupCount() (uint64, error) {
|
||||
func (*FakeP2P) CustodyGroupCount(context.Context) (uint64, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -473,7 +473,7 @@ func (*TestP2P) InterceptUpgraded(network.Conn) (allow bool, reason control.Disc
|
||||
}
|
||||
|
||||
// EarliestAvailableSlot .
|
||||
func (s *TestP2P) EarliestAvailableSlot() (primitives.Slot, error) {
|
||||
func (s *TestP2P) EarliestAvailableSlot(context.Context) (primitives.Slot, error) {
|
||||
s.custodyInfoMut.RLock()
|
||||
defer s.custodyInfoMut.RUnlock()
|
||||
|
||||
@@ -481,7 +481,7 @@ func (s *TestP2P) EarliestAvailableSlot() (primitives.Slot, error) {
|
||||
}
|
||||
|
||||
// CustodyGroupCount .
|
||||
func (s *TestP2P) CustodyGroupCount() (uint64, error) {
|
||||
func (s *TestP2P) CustodyGroupCount(context.Context) (uint64, error) {
|
||||
s.custodyInfoMut.RLock()
|
||||
defer s.custodyInfoMut.RUnlock()
|
||||
|
||||
|
||||
@@ -1,5 +1,15 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"slices"
|
||||
"strconv"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/encoder"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
)
|
||||
|
||||
const (
|
||||
// GossipProtocolAndDigest represents the protocol and fork digest prefix in a gossip topic.
|
||||
GossipProtocolAndDigest = "/eth2/%x/"
|
||||
@@ -66,3 +76,129 @@ const (
|
||||
// DataColumnSubnetTopicFormat is the topic format for the data column subnet.
|
||||
DataColumnSubnetTopicFormat = GossipProtocolAndDigest + GossipDataColumnSidecarMessage + "_%d"
|
||||
)
|
||||
|
||||
// topic is a struct representing a single gossipsub topic.
|
||||
// It can also be used to represent a set of subnet topics: see appendSubnetsBelow().
|
||||
// topic is intended to be used as an immutable value - it is hashable so it can be used as a map key
|
||||
// and it uses strings in order to leverage golangs string interning for memory efficiency.
|
||||
type topic struct {
|
||||
full string
|
||||
digest string
|
||||
message string
|
||||
start primitives.Epoch
|
||||
end primitives.Epoch
|
||||
suffix string
|
||||
subnet uint64
|
||||
}
|
||||
|
||||
func (t topic) String() string {
|
||||
return t.full
|
||||
}
|
||||
|
||||
// sszEnc is used to get the protocol suffix for topics. This value has been effectively hardcoded
|
||||
// since phase0.
|
||||
var sszEnc = &encoder.SszNetworkEncoder{}
|
||||
|
||||
// newTopic constructs a topic value for an ordinary topic structure (without subnets).
|
||||
func newTopic(start, end primitives.Epoch, digest [4]byte, message string) topic {
|
||||
suffix := sszEnc.ProtocolSuffix()
|
||||
t := topic{digest: hex.EncodeToString(digest[:]), message: message, start: start, end: end, suffix: suffix}
|
||||
t.full = "/" + "eth2" + "/" + t.digest + "/" + t.message + t.suffix
|
||||
return t
|
||||
}
|
||||
|
||||
// newSubnetTopic constructs a topic value for a topic with a subnet structure.
|
||||
func newSubnetTopic(start, end primitives.Epoch, digest [4]byte, message string, subnet uint64) topic {
|
||||
t := newTopic(start, end, digest, message)
|
||||
t.subnet = subnet
|
||||
t.full = "/" + "eth2" + "/" + t.digest + "/" + t.message + "_" + strconv.Itoa(int(t.subnet)) + t.suffix
|
||||
return t
|
||||
}
|
||||
|
||||
// allTopicStrings returns the full topic string for all topics
|
||||
// that could be derived from the current fork schedule.
|
||||
func (s *Service) allTopicStrings() []string {
|
||||
topics := s.allTopics()
|
||||
topicStrs := make([]string, 0, len(topics))
|
||||
for _, t := range topics {
|
||||
topicStrs = append(topicStrs, t.String())
|
||||
}
|
||||
return topicStrs
|
||||
}
|
||||
|
||||
// appendSubnetsBelow uses the value of top.subnet as the subnet count
|
||||
// and creates a topic value for each subnet less than the subnet count, appending them all
|
||||
// to appendTo.
|
||||
func appendSubnetsBelow(top topic, digest [4]byte, appendTo []topic) []topic {
|
||||
for i := range top.subnet {
|
||||
appendTo = append(appendTo, newSubnetTopic(top.start, top.end, digest, top.message, i))
|
||||
}
|
||||
return appendTo
|
||||
}
|
||||
|
||||
// allTopics returns all topics that could be derived from the current fork schedule.
|
||||
func (s *Service) allTopics() []topic {
|
||||
cfg := params.BeaconConfig()
|
||||
// bellatrix: no special topics; electra: blobs topics handled all together
|
||||
genesis, altair, capella := cfg.GenesisEpoch, cfg.AltairForkEpoch, cfg.CapellaForkEpoch
|
||||
deneb, fulu, future := cfg.DenebForkEpoch, cfg.FuluForkEpoch, cfg.FarFutureEpoch
|
||||
// Templates are starter topics - they have a placeholder digest and the subnet is set to the maximum value
|
||||
// for the subnet (see how this is used in allSubnetsBelow). These are not directly returned by the method,
|
||||
// they are copied and modified for each digest where they apply based on the start and end epochs.
|
||||
empty := [4]byte{0, 0, 0, 0} // empty digest for templates, replaced by real digests in per-fork copies.
|
||||
templates := []topic{
|
||||
newTopic(genesis, future, empty, GossipBlockMessage),
|
||||
newTopic(genesis, future, empty, GossipAggregateAndProofMessage),
|
||||
newTopic(genesis, future, empty, GossipExitMessage),
|
||||
newTopic(genesis, future, empty, GossipProposerSlashingMessage),
|
||||
newTopic(genesis, future, empty, GossipAttesterSlashingMessage),
|
||||
newSubnetTopic(genesis, future, empty, GossipAttestationMessage, cfg.AttestationSubnetCount),
|
||||
newSubnetTopic(altair, future, empty, GossipSyncCommitteeMessage, cfg.SyncCommitteeSubnetCount),
|
||||
newTopic(altair, future, empty, GossipContributionAndProofMessage),
|
||||
newTopic(altair, future, empty, GossipLightClientOptimisticUpdateMessage),
|
||||
newTopic(altair, future, empty, GossipLightClientFinalityUpdateMessage),
|
||||
newTopic(capella, future, empty, GossipBlsToExecutionChangeMessage),
|
||||
}
|
||||
last := params.GetNetworkScheduleEntry(genesis)
|
||||
schedule := []params.NetworkScheduleEntry{last}
|
||||
for next := params.NextNetworkScheduleEntry(last.Epoch); next.ForkDigest != last.ForkDigest; next = params.NextNetworkScheduleEntry(next.Epoch) {
|
||||
schedule = append(schedule, next)
|
||||
last = next
|
||||
}
|
||||
slices.Reverse(schedule) // reverse the fork schedule because it simplifies dealing with BPOs
|
||||
fullTopics := make([]topic, 0, len(templates))
|
||||
for _, top := range templates {
|
||||
for _, entry := range schedule {
|
||||
if top.start <= entry.Epoch && entry.Epoch < top.end {
|
||||
if top.subnet > 0 { // subnet topics in the list above should set this value to the max subnet count: see allSubnetsBelow
|
||||
fullTopics = appendSubnetsBelow(top, entry.ForkDigest, fullTopics)
|
||||
} else {
|
||||
fullTopics = append(fullTopics, newTopic(top.start, top.end, entry.ForkDigest, top.message))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
end := future
|
||||
// We're iterating from high to low per the slices.Reverse above.
|
||||
// So we'll update end = n.Epoch as we go down, and use that as the end for the next entry.
|
||||
// This loop either adds blob or data column sidecar topics depending on the fork.
|
||||
for _, entry := range schedule {
|
||||
if entry.Epoch < deneb {
|
||||
break
|
||||
// note: there is a special case where deneb is the genesis fork, in which case
|
||||
// we'll generate blob sidecar topics for the earlier schedule, but
|
||||
// this only happens in devnets where it doesn't really matter.
|
||||
}
|
||||
message := GossipDataColumnSidecarMessage
|
||||
subnets := cfg.DataColumnSidecarSubnetCount
|
||||
if entry.Epoch < fulu {
|
||||
message = GossipBlobSidecarMessage
|
||||
subnets = uint64(cfg.MaxBlobsPerBlockAtEpoch(entry.Epoch))
|
||||
}
|
||||
// Set subnet to max value, allSubnetsBelow will iterate every index up to that value.
|
||||
top := newSubnetTopic(entry.Epoch, end, entry.ForkDigest, message, subnets)
|
||||
fullTopics = appendSubnetsBelow(top, entry.ForkDigest, fullTopics)
|
||||
end = entry.Epoch // These topics / subnet structures are mutually exclusive, so set each end to the next highest entry.
|
||||
}
|
||||
return fullTopics
|
||||
}
|
||||
|
||||
70
beacon-chain/p2p/topics_test.go
Normal file
70
beacon-chain/p2p/topics_test.go
Normal file
@@ -0,0 +1,70 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
)
|
||||
|
||||
func TestAllTopics(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.MainnetConfig()
|
||||
cfg.FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096*2
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
s := &Service{}
|
||||
all := s.allTopicStrings()
|
||||
tops := map[string]struct{}{}
|
||||
for _, t := range all {
|
||||
tops[t] = struct{}{}
|
||||
}
|
||||
require.Equal(t, len(tops), len(all), "duplicate topics found")
|
||||
expected := []string{
|
||||
"/eth2/ad532ceb/sync_committee_contribution_and_proof/ssz_snappy",
|
||||
"/eth2/ad532ceb/beacon_aggregate_and_proof/ssz_snappy",
|
||||
"/eth2/ad532ceb/beacon_block/ssz_snappy",
|
||||
"/eth2/ad532ceb/bls_to_execution_change/ssz_snappy",
|
||||
"/eth2/afcaaba0/beacon_attestation_19/ssz_snappy",
|
||||
"/eth2/cc2c5cdb/data_column_sidecar_0/ssz_snappy",
|
||||
"/eth2/cc2c5cdb/data_column_sidecar_127/ssz_snappy",
|
||||
}
|
||||
forks := []primitives.Epoch{cfg.GenesisEpoch, cfg.AltairForkEpoch,
|
||||
cfg.BellatrixForkEpoch, cfg.CapellaForkEpoch, cfg.DenebForkEpoch,
|
||||
cfg.ElectraForkEpoch, cfg.FuluForkEpoch}
|
||||
// sanity check: we should always have a block topic.
|
||||
// construct it by hand in case there are bugs in newTopic.
|
||||
for _, f := range forks {
|
||||
digest := params.ForkDigest(f)
|
||||
expected = append(expected, "/eth2/"+hex.EncodeToString(digest[:])+"/beacon_block/ssz_snappy")
|
||||
}
|
||||
for _, e := range expected {
|
||||
_, ok := tops[e]
|
||||
require.Equal(t, true, ok)
|
||||
}
|
||||
// we should have no data column subnets before fulu
|
||||
electraColumn := newSubnetTopic(cfg.ElectraForkEpoch, cfg.FuluForkEpoch,
|
||||
params.ForkDigest(params.BeaconConfig().ElectraForkEpoch),
|
||||
GossipDataColumnSidecarMessage,
|
||||
cfg.DataColumnSidecarSubnetCount-1)
|
||||
// we should have no blob sidecars before deneb or after electra
|
||||
blobBeforeDeneb := newSubnetTopic(cfg.DenebForkEpoch-1, cfg.DenebForkEpoch,
|
||||
params.ForkDigest(cfg.DenebForkEpoch-1),
|
||||
GossipBlobSidecarMessage,
|
||||
uint64(cfg.MaxBlobsPerBlockAtEpoch(cfg.DenebForkEpoch-1))-1)
|
||||
blobAfterElectra := newSubnetTopic(cfg.FuluForkEpoch, cfg.FarFutureEpoch,
|
||||
params.ForkDigest(cfg.FuluForkEpoch),
|
||||
GossipBlobSidecarMessage,
|
||||
uint64(cfg.MaxBlobsPerBlockAtEpoch(cfg.FuluForkEpoch))-1)
|
||||
unexpected := []string{
|
||||
"/eth2/cc2c5cdb/data_column_sidecar_128/ssz_snappy",
|
||||
electraColumn.String(),
|
||||
blobBeforeDeneb.String(),
|
||||
blobAfterElectra.String(),
|
||||
}
|
||||
for _, e := range unexpected {
|
||||
_, ok := tops[e]
|
||||
require.Equal(t, false, ok)
|
||||
}
|
||||
}
|
||||
@@ -4876,8 +4876,16 @@ func TestServer_broadcastBlobSidecars(t *testing.T) {
|
||||
}
|
||||
|
||||
func Test_validateBlobs(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096*2
|
||||
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
es := util.SlotAtEpoch(t, params.BeaconConfig().ElectraForkEpoch)
|
||||
fe := params.BeaconConfig().FuluForkEpoch
|
||||
fs := util.SlotAtEpoch(t, fe)
|
||||
|
||||
require.NoError(t, kzg.Start())
|
||||
|
||||
denebMax := params.BeaconConfig().MaxBlobsPerBlock(ds)
|
||||
blob := util.GetRandBlob(123)
|
||||
// Generate proper commitment and proof for the blob
|
||||
var kzgBlob kzg.Blob
|
||||
@@ -4887,6 +4895,7 @@ func Test_validateBlobs(t *testing.T) {
|
||||
proof, err := kzg.ComputeBlobKZGProof(&kzgBlob, commitment)
|
||||
require.NoError(t, err)
|
||||
blk := util.NewBeaconBlockDeneb()
|
||||
blk.Block.Slot = ds
|
||||
blk.Block.Body.BlobKzgCommitments = [][]byte{commitment[:]}
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
@@ -4902,10 +4911,11 @@ func Test_validateBlobs(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.ErrorContains(t, "could not verify blob proofs", s.validateBlobs(b, [][]byte{blob[:]}, [][]byte{proof[:]}))
|
||||
|
||||
electraMax := params.BeaconConfig().MaxBlobsPerBlock(es)
|
||||
blobs := [][]byte{}
|
||||
commitments := [][]byte{}
|
||||
proofs := [][]byte{}
|
||||
for i := 0; i < 10; i++ {
|
||||
for i := 0; i < electraMax+1; i++ {
|
||||
blobs = append(blobs, blob[:])
|
||||
commitments = append(commitments, commitment[:])
|
||||
proofs = append(proofs, proof[:])
|
||||
@@ -4923,6 +4933,7 @@ func Test_validateBlobs(t *testing.T) {
|
||||
|
||||
t.Run("Deneb block with valid single blob", func(t *testing.T) {
|
||||
blk := util.NewBeaconBlockDeneb()
|
||||
blk.Block.Slot = ds
|
||||
blk.Block.Body.BlobKzgCommitments = [][]byte{commitment[:]}
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
@@ -4931,107 +4942,54 @@ func Test_validateBlobs(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Deneb block with max blobs (6)", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
defer params.OverrideBeaconConfig(cfg)
|
||||
|
||||
testCfg := params.BeaconConfig().Copy()
|
||||
testCfg.DenebForkEpoch = 0
|
||||
testCfg.ElectraForkEpoch = 100
|
||||
testCfg.DeprecatedMaxBlobsPerBlock = 6
|
||||
params.OverrideBeaconConfig(testCfg)
|
||||
|
||||
blk := util.NewBeaconBlockDeneb()
|
||||
blk.Block.Slot = 10 // Deneb slot
|
||||
blk.Block.Slot = ds
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:6]
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
s := &Server{}
|
||||
// Should pass with exactly 6 blobs
|
||||
require.NoError(t, s.validateBlobs(b, blobs[:6], proofs[:6]))
|
||||
require.NoError(t, s.validateBlobs(b, blobs[:denebMax], proofs[:denebMax]))
|
||||
})
|
||||
|
||||
t.Run("Deneb block exceeding max blobs", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
defer params.OverrideBeaconConfig(cfg)
|
||||
|
||||
testCfg := params.BeaconConfig().Copy()
|
||||
testCfg.DenebForkEpoch = 0
|
||||
testCfg.ElectraForkEpoch = 100
|
||||
testCfg.DeprecatedMaxBlobsPerBlock = 6
|
||||
params.OverrideBeaconConfig(testCfg)
|
||||
|
||||
blk := util.NewBeaconBlockDeneb()
|
||||
blk.Block.Slot = 10 // Deneb slot
|
||||
blk.Block.Slot = ds
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:7]
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
s := &Server{}
|
||||
// Should fail with 7 blobs when max is 6
|
||||
err = s.validateBlobs(b, blobs[:7], proofs[:7])
|
||||
require.ErrorContains(t, "number of blobs over max, 7 > 6", err)
|
||||
err = s.validateBlobs(b, blobs[:denebMax+1], proofs[:denebMax+1])
|
||||
require.ErrorContains(t, "number of blobs over max", err)
|
||||
})
|
||||
|
||||
t.Run("Electra block with valid blobs", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
defer params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Set up Electra config with max 9 blobs
|
||||
testCfg := params.BeaconConfig().Copy()
|
||||
testCfg.DenebForkEpoch = 0
|
||||
testCfg.ElectraForkEpoch = 5
|
||||
testCfg.DeprecatedMaxBlobsPerBlock = 6
|
||||
testCfg.DeprecatedMaxBlobsPerBlockElectra = 9
|
||||
params.OverrideBeaconConfig(testCfg)
|
||||
|
||||
blk := util.NewBeaconBlockElectra()
|
||||
blk.Block.Slot = 160 // Electra slot (epoch 5+)
|
||||
blk.Block.Slot = es
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:9]
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
s := &Server{}
|
||||
// Should pass with 9 blobs in Electra
|
||||
require.NoError(t, s.validateBlobs(b, blobs[:9], proofs[:9]))
|
||||
require.NoError(t, s.validateBlobs(b, blobs[:electraMax], proofs[:electraMax]))
|
||||
})
|
||||
|
||||
t.Run("Electra block exceeding max blobs", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
defer params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Set up Electra config with max 9 blobs
|
||||
testCfg := params.BeaconConfig().Copy()
|
||||
testCfg.DenebForkEpoch = 0
|
||||
testCfg.ElectraForkEpoch = 5
|
||||
testCfg.DeprecatedMaxBlobsPerBlock = 6
|
||||
testCfg.DeprecatedMaxBlobsPerBlockElectra = 9
|
||||
params.OverrideBeaconConfig(testCfg)
|
||||
|
||||
blk := util.NewBeaconBlockElectra()
|
||||
blk.Block.Slot = 160 // Electra slot
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:10]
|
||||
blk.Block.Slot = es
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:electraMax+1]
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
s := &Server{}
|
||||
// Should fail with 10 blobs when max is 9
|
||||
err = s.validateBlobs(b, blobs[:10], proofs[:10])
|
||||
require.ErrorContains(t, "number of blobs over max, 10 > 9", err)
|
||||
err = s.validateBlobs(b, blobs[:electraMax+1], proofs[:electraMax+1])
|
||||
require.ErrorContains(t, "number of blobs over max", err)
|
||||
})
|
||||
|
||||
t.Run("Fulu block with valid cell proofs", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
defer params.OverrideBeaconConfig(cfg)
|
||||
|
||||
testCfg := params.BeaconConfig().Copy()
|
||||
testCfg.DenebForkEpoch = 0
|
||||
testCfg.ElectraForkEpoch = 5
|
||||
testCfg.FuluForkEpoch = 10
|
||||
testCfg.DeprecatedMaxBlobsPerBlock = 6
|
||||
testCfg.DeprecatedMaxBlobsPerBlockElectra = 9
|
||||
testCfg.NumberOfColumns = 128 // Standard PeerDAS configuration
|
||||
params.OverrideBeaconConfig(testCfg)
|
||||
|
||||
// Create Fulu block with proper cell proofs
|
||||
blk := util.NewBeaconBlockFulu()
|
||||
blk.Block.Slot = 320 // Epoch 10 (Fulu fork)
|
||||
blk.Block.Slot = fs
|
||||
|
||||
// Generate valid commitments and cell proofs for testing
|
||||
blobCount := 2
|
||||
@@ -5075,18 +5033,8 @@ func Test_validateBlobs(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Fulu block with invalid cell proof count", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
defer params.OverrideBeaconConfig(cfg)
|
||||
|
||||
testCfg := params.BeaconConfig().Copy()
|
||||
testCfg.DenebForkEpoch = 0
|
||||
testCfg.ElectraForkEpoch = 5
|
||||
testCfg.FuluForkEpoch = 10
|
||||
testCfg.NumberOfColumns = 128
|
||||
params.OverrideBeaconConfig(testCfg)
|
||||
|
||||
blk := util.NewBeaconBlockFulu()
|
||||
blk.Block.Slot = 320 // Epoch 10 (Fulu fork)
|
||||
blk.Block.Slot = fs
|
||||
|
||||
// Create valid commitments but wrong number of cell proofs
|
||||
blobCount := 2
|
||||
@@ -5123,6 +5071,7 @@ func Test_validateBlobs(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
blk := util.NewBeaconBlockDeneb()
|
||||
blk.Block.Slot = ds
|
||||
blk.Block.Body.BlobKzgCommitments = [][]byte{sk.PublicKey().Marshal()}
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
@@ -5134,6 +5083,7 @@ func Test_validateBlobs(t *testing.T) {
|
||||
|
||||
t.Run("empty blobs and proofs should pass", func(t *testing.T) {
|
||||
blk := util.NewBeaconBlockDeneb()
|
||||
blk.Block.Slot = ds
|
||||
blk.Block.Body.BlobKzgCommitments = [][]byte{}
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
@@ -5148,53 +5098,48 @@ func Test_validateBlobs(t *testing.T) {
|
||||
|
||||
// Set up config with BlobSchedule (BPO - Blob Production Optimization)
|
||||
testCfg := params.BeaconConfig().Copy()
|
||||
testCfg.DenebForkEpoch = 0
|
||||
testCfg.ElectraForkEpoch = 100
|
||||
testCfg.FuluForkEpoch = 200
|
||||
testCfg.DeprecatedMaxBlobsPerBlock = 6
|
||||
testCfg.DeprecatedMaxBlobsPerBlockElectra = 9
|
||||
// Define blob schedule with progressive increases
|
||||
testCfg.BlobSchedule = []params.BlobScheduleEntry{
|
||||
{Epoch: 0, MaxBlobsPerBlock: 3}, // Start with 3 blobs
|
||||
{Epoch: 10, MaxBlobsPerBlock: 5}, // Increase to 5 at epoch 10
|
||||
{Epoch: 20, MaxBlobsPerBlock: 7}, // Increase to 7 at epoch 20
|
||||
{Epoch: 30, MaxBlobsPerBlock: 9}, // Increase to 9 at epoch 30
|
||||
{Epoch: fe + 1, MaxBlobsPerBlock: 3}, // Start with 3 blobs
|
||||
{Epoch: fe + 10, MaxBlobsPerBlock: 5}, // Increase to 5 at epoch 10
|
||||
{Epoch: fe + 20, MaxBlobsPerBlock: 7}, // Increase to 7 at epoch 20
|
||||
{Epoch: fe + 30, MaxBlobsPerBlock: 9}, // Increase to 9 at epoch 30
|
||||
}
|
||||
params.OverrideBeaconConfig(testCfg)
|
||||
|
||||
s := &Server{}
|
||||
|
||||
// Test epoch 0-9: max 3 blobs
|
||||
t.Run("epoch 0-9: max 3 blobs", func(t *testing.T) {
|
||||
t.Run("deneb under and over max", func(t *testing.T) {
|
||||
blk := util.NewBeaconBlockDeneb()
|
||||
blk.Block.Slot = 5 // Epoch 0
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:3]
|
||||
blk.Block.Slot = ds
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:denebMax]
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.validateBlobs(b, blobs[:3], proofs[:3]))
|
||||
require.NoError(t, s.validateBlobs(b, blobs[:denebMax], proofs[:denebMax]))
|
||||
|
||||
// Should fail with 4 blobs
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:4]
|
||||
b, err = blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
err = s.validateBlobs(b, blobs[:4], proofs[:4])
|
||||
require.ErrorContains(t, "number of blobs over max, 4 > 3", err)
|
||||
err = s.validateBlobs(b, blobs[:denebMax+1], proofs[:denebMax+1])
|
||||
require.ErrorContains(t, "number of blobs over max", err)
|
||||
})
|
||||
|
||||
// Test epoch 30+: max 9 blobs
|
||||
t.Run("epoch 30+: max 9 blobs", func(t *testing.T) {
|
||||
blk := util.NewBeaconBlockDeneb()
|
||||
blk.Block.Slot = 960 // Epoch 30
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:9]
|
||||
t.Run("different max in electra", func(t *testing.T) {
|
||||
blk := util.NewBeaconBlockElectra()
|
||||
blk.Block.Slot = es
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:electraMax]
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.validateBlobs(b, blobs[:9], proofs[:9]))
|
||||
require.NoError(t, s.validateBlobs(b, blobs[:electraMax], proofs[:electraMax]))
|
||||
|
||||
// Should fail with 10 blobs
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:10]
|
||||
// exceed the electra max
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:electraMax+1]
|
||||
b, err = blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
err = s.validateBlobs(b, blobs[:10], proofs[:10])
|
||||
err = s.validateBlobs(b, blobs[:electraMax+1], proofs[:electraMax+1])
|
||||
require.ErrorContains(t, "number of blobs over max, 10 > 9", err)
|
||||
})
|
||||
})
|
||||
|
||||
@@ -36,11 +36,13 @@ import (
|
||||
func TestBlobs(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.DenebForkEpoch = 1
|
||||
cfg.FuluForkEpoch = cfg.ElectraForkEpoch + 4096*2
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
es := util.SlotAtEpoch(t, cfg.ElectraForkEpoch)
|
||||
ds := util.SlotAtEpoch(t, cfg.DenebForkEpoch)
|
||||
|
||||
db := testDB.SetupDB(t)
|
||||
denebBlock, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 123, 4)
|
||||
denebBlock, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, es, 4)
|
||||
require.NoError(t, db.SaveBlock(t.Context(), denebBlock))
|
||||
bs := filesystem.NewEphemeralBlobStorage(t)
|
||||
testSidecars := verification.FakeVerifySliceForTest(t, blobs)
|
||||
@@ -170,7 +172,7 @@ func TestBlobs(t *testing.T) {
|
||||
require.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
t.Run("slot", func(t *testing.T) {
|
||||
u := "http://foo.example/123"
|
||||
u := fmt.Sprintf("http://foo.example/%d", es)
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
@@ -194,7 +196,7 @@ func TestBlobs(t *testing.T) {
|
||||
require.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
t.Run("slot not found", func(t *testing.T) {
|
||||
u := "http://foo.example/122"
|
||||
u := fmt.Sprintf("http://foo.example/%d", es-1)
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
@@ -211,7 +213,7 @@ func TestBlobs(t *testing.T) {
|
||||
assert.Equal(t, http.StatusNotFound, writer.Code)
|
||||
})
|
||||
t.Run("one blob only", func(t *testing.T) {
|
||||
u := "http://foo.example/123?indices=2"
|
||||
u := fmt.Sprintf("http://foo.example/%d?indices=2", es)
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
@@ -242,7 +244,7 @@ func TestBlobs(t *testing.T) {
|
||||
require.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
t.Run("no blobs returns an empty array", func(t *testing.T) {
|
||||
u := "http://foo.example/123"
|
||||
u := fmt.Sprintf("http://foo.example/%d", es)
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
@@ -266,8 +268,8 @@ func TestBlobs(t *testing.T) {
|
||||
require.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
t.Run("blob index over max", func(t *testing.T) {
|
||||
overLimit := maxBlobsPerBlockByVersion(version.Deneb)
|
||||
u := fmt.Sprintf("http://foo.example/123?indices=%d", overLimit)
|
||||
overLimit := params.BeaconConfig().MaxBlobsPerBlock(ds)
|
||||
u := fmt.Sprintf("http://foo.example/%d?indices=%d", es, overLimit)
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
@@ -281,7 +283,7 @@ func TestBlobs(t *testing.T) {
|
||||
assert.Equal(t, true, strings.Contains(e.Message, fmt.Sprintf("requested blob indices [%d] are invalid", overLimit)))
|
||||
})
|
||||
t.Run("outside retention period returns 200 with what we have", func(t *testing.T) {
|
||||
u := "http://foo.example/123"
|
||||
u := fmt.Sprintf("http://foo.example/%d", es)
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
@@ -305,13 +307,13 @@ func TestBlobs(t *testing.T) {
|
||||
require.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
t.Run("block without commitments returns 200 w/empty list ", func(t *testing.T) {
|
||||
denebBlock, _ := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 333, 0)
|
||||
denebBlock, _ := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, es+128, 0)
|
||||
commitments, err := denebBlock.Block().Body().BlobKzgCommitments()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(commitments), 0)
|
||||
require.NoError(t, db.SaveBlock(t.Context(), denebBlock))
|
||||
|
||||
u := "http://foo.example/333"
|
||||
u := fmt.Sprintf("http://foo.example/%d", es+128)
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
@@ -423,16 +425,17 @@ func TestBlobs(t *testing.T) {
|
||||
func TestBlobs_Electra(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.DenebForkEpoch = 0
|
||||
cfg.ElectraForkEpoch = 1
|
||||
cfg.FuluForkEpoch = cfg.ElectraForkEpoch + 4096*2
|
||||
cfg.BlobSchedule = []params.BlobScheduleEntry{
|
||||
{Epoch: 0, MaxBlobsPerBlock: 6},
|
||||
{Epoch: 1, MaxBlobsPerBlock: 9},
|
||||
{Epoch: cfg.FuluForkEpoch + 4096, MaxBlobsPerBlock: 6},
|
||||
{Epoch: cfg.FuluForkEpoch + 4096 + 128, MaxBlobsPerBlock: 9},
|
||||
}
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
es := util.SlotAtEpoch(t, cfg.ElectraForkEpoch)
|
||||
db := testDB.SetupDB(t)
|
||||
electraBlock, blobs := util.GenerateTestElectraBlockWithSidecar(t, [32]byte{}, 123, maxBlobsPerBlockByVersion(version.Electra))
|
||||
overLimit := params.BeaconConfig().MaxBlobsPerBlock(es)
|
||||
electraBlock, blobs := util.GenerateTestElectraBlockWithSidecar(t, [32]byte{}, es, overLimit)
|
||||
require.NoError(t, db.SaveBlock(t.Context(), electraBlock))
|
||||
bs := filesystem.NewEphemeralBlobStorage(t)
|
||||
testSidecars := verification.FakeVerifySliceForTest(t, blobs)
|
||||
@@ -450,7 +453,7 @@ func TestBlobs_Electra(t *testing.T) {
|
||||
TimeFetcher: mockChainService,
|
||||
}
|
||||
t.Run("max blobs for electra", func(t *testing.T) {
|
||||
u := "http://foo.example/123"
|
||||
u := fmt.Sprintf("http://foo.example/%d", es)
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
@@ -468,7 +471,7 @@ func TestBlobs_Electra(t *testing.T) {
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.SidecarsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, maxBlobsPerBlockByVersion(version.Electra), len(resp.Data))
|
||||
require.Equal(t, overLimit, len(resp.Data))
|
||||
sidecar := resp.Data[0]
|
||||
require.NotNil(t, sidecar)
|
||||
assert.Equal(t, "0", sidecar.Index)
|
||||
@@ -481,8 +484,8 @@ func TestBlobs_Electra(t *testing.T) {
|
||||
require.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
t.Run("requested blob index at max", func(t *testing.T) {
|
||||
limit := maxBlobsPerBlockByVersion(version.Electra) - 1
|
||||
u := fmt.Sprintf("http://foo.example/123?indices=%d", limit)
|
||||
limit := params.BeaconConfig().MaxBlobsPerBlock(es) - 1
|
||||
u := fmt.Sprintf("http://foo.example/%d?indices=%d", es, limit)
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
@@ -513,8 +516,8 @@ func TestBlobs_Electra(t *testing.T) {
|
||||
require.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
t.Run("blob index over max", func(t *testing.T) {
|
||||
overLimit := maxBlobsPerBlockByVersion(version.Electra)
|
||||
u := fmt.Sprintf("http://foo.example/123?indices=%d", overLimit)
|
||||
overLimit := params.BeaconConfig().MaxBlobsPerBlock(es)
|
||||
u := fmt.Sprintf("http://foo.example/%d?indices=%d", es, overLimit)
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
@@ -530,6 +533,7 @@ func TestBlobs_Electra(t *testing.T) {
|
||||
}
|
||||
|
||||
func Test_parseIndices(t *testing.T) {
|
||||
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
tests := []struct {
|
||||
name string
|
||||
query string
|
||||
@@ -559,7 +563,7 @@ func Test_parseIndices(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := parseIndices(&url.URL{RawQuery: tt.query}, 0)
|
||||
got, err := parseIndices(&url.URL{RawQuery: tt.query}, ds)
|
||||
if err != nil && tt.wantErr != "" {
|
||||
require.StringContains(t, tt.wantErr, err.Error())
|
||||
return
|
||||
@@ -588,6 +592,7 @@ func TestGetBlobs(t *testing.T) {
|
||||
{Epoch: 20, MaxBlobsPerBlock: 12}, // Fulu
|
||||
}
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
es := util.SlotAtEpoch(t, cfg.ElectraForkEpoch)
|
||||
|
||||
db := testDB.SetupDB(t)
|
||||
denebBlock, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 123, 4)
|
||||
@@ -1009,7 +1014,8 @@ func TestGetBlobs(t *testing.T) {
|
||||
|
||||
// Test for Electra fork
|
||||
t.Run("electra max blobs", func(t *testing.T) {
|
||||
electraBlock, electraBlobs := util.GenerateTestElectraBlockWithSidecar(t, [32]byte{}, 323, maxBlobsPerBlockByVersion(version.Electra))
|
||||
overLimit := params.BeaconConfig().MaxBlobsPerBlock(es)
|
||||
electraBlock, electraBlobs := util.GenerateTestElectraBlockWithSidecar(t, [32]byte{}, 323, overLimit)
|
||||
require.NoError(t, db.SaveBlock(t.Context(), electraBlock))
|
||||
electraBs := filesystem.NewEphemeralBlobStorage(t)
|
||||
electraSidecars := verification.FakeVerifySliceForTest(t, electraBlobs)
|
||||
@@ -1036,7 +1042,8 @@ func TestGetBlobs(t *testing.T) {
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.GetBlobsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, maxBlobsPerBlockByVersion(version.Electra), len(resp.Data))
|
||||
|
||||
require.Equal(t, overLimit, len(resp.Data))
|
||||
blob := resp.Data[0]
|
||||
require.NotNil(t, blob)
|
||||
assert.Equal(t, hexutil.Encode(electraBlobs[0].Blob), blob)
|
||||
@@ -1145,14 +1152,3 @@ func unmarshalBlobs(t *testing.T, response []byte) [][]byte {
|
||||
}
|
||||
return blobs
|
||||
}
|
||||
|
||||
func maxBlobsPerBlockByVersion(v int) int {
|
||||
if v >= version.Fulu {
|
||||
return params.BeaconConfig().DeprecatedMaxBlobsPerBlockFulu
|
||||
}
|
||||
if v >= version.Electra {
|
||||
return params.BeaconConfig().DeprecatedMaxBlobsPerBlockElectra
|
||||
}
|
||||
|
||||
return params.BeaconConfig().DeprecatedMaxBlobsPerBlock
|
||||
}
|
||||
|
||||
@@ -167,8 +167,8 @@ func prepareConfigSpec() (map[string]interface{}, error) {
|
||||
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
tField := t.Field(i)
|
||||
_, isSpec := tField.Tag.Lookup("spec")
|
||||
if !isSpec {
|
||||
specTag, isSpec := tField.Tag.Lookup("spec")
|
||||
if !isSpec || specTag != "true" {
|
||||
continue
|
||||
}
|
||||
if shouldSkip(tField) {
|
||||
|
||||
@@ -145,7 +145,6 @@ func TestGetSpec(t *testing.T) {
|
||||
config.PendingDepositsLimit = 82
|
||||
config.MaxPendingPartialsPerWithdrawalsSweep = 83
|
||||
config.PendingConsolidationsLimit = 84
|
||||
config.MaxPartialWithdrawalsPerPayload = 85
|
||||
config.FullExitRequestAmount = 86
|
||||
config.MaxConsolidationsRequestsPerPayload = 87
|
||||
config.MaxAttesterSlashingsElectra = 88
|
||||
@@ -164,6 +163,7 @@ func TestGetSpec(t *testing.T) {
|
||||
config.KzgCommitmentInclusionProofDepth = 101
|
||||
config.BlobsidecarSubnetCount = 102
|
||||
config.BlobsidecarSubnetCountElectra = 103
|
||||
config.SyncMessageDueBPS = 104
|
||||
|
||||
var dbp [4]byte
|
||||
copy(dbp[:], []byte{'0', '0', '0', '1'})
|
||||
@@ -201,8 +201,7 @@ func TestGetSpec(t *testing.T) {
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp))
|
||||
data, ok := resp.Data.(map[string]interface{})
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
assert.Equal(t, 176, len(data))
|
||||
assert.Equal(t, 171, len(data))
|
||||
for k, v := range data {
|
||||
t.Run(k, func(t *testing.T) {
|
||||
switch k {
|
||||
@@ -240,8 +239,6 @@ func TestGetSpec(t *testing.T) {
|
||||
assert.Equal(t, "14", v)
|
||||
case "RANDOM_SUBNETS_PER_VALIDATOR":
|
||||
assert.Equal(t, "15", v)
|
||||
case "EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION":
|
||||
assert.Equal(t, "16", v)
|
||||
case "SECONDS_PER_ETH1_BLOCK":
|
||||
assert.Equal(t, "17", v)
|
||||
case "DEPOSIT_CHAIN_ID":
|
||||
@@ -438,8 +435,6 @@ func TestGetSpec(t *testing.T) {
|
||||
assert.Equal(t, "16777216", v)
|
||||
case "PROPOSER_SCORE_BOOST":
|
||||
assert.Equal(t, "40", v)
|
||||
case "INTERVALS_PER_SLOT":
|
||||
assert.Equal(t, "3", v)
|
||||
case "MAX_WITHDRAWALS_PER_PAYLOAD":
|
||||
assert.Equal(t, "74", v)
|
||||
case "MAX_BLS_TO_EXECUTION_CHANGES":
|
||||
@@ -456,9 +451,6 @@ func TestGetSpec(t *testing.T) {
|
||||
assert.Equal(t, "8", v)
|
||||
case "MAX_REQUEST_LIGHT_CLIENT_UPDATES":
|
||||
assert.Equal(t, "128", v)
|
||||
case "SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY":
|
||||
case "NODE_ID_BITS":
|
||||
assert.Equal(t, "256", v)
|
||||
case "ATTESTATION_SUBNET_EXTRA_BITS":
|
||||
assert.Equal(t, "0", v)
|
||||
case "ATTESTATION_SUBNET_PREFIX_BITS":
|
||||
@@ -521,8 +513,6 @@ func TestGetSpec(t *testing.T) {
|
||||
assert.Equal(t, "83", v)
|
||||
case "PENDING_CONSOLIDATIONS_LIMIT":
|
||||
assert.Equal(t, "84", v)
|
||||
case "MAX_PARTIAL_WITHDRAWALS_PER_PAYLOAD":
|
||||
assert.Equal(t, "85", v)
|
||||
case "FULL_EXIT_REQUEST_AMOUNT":
|
||||
assert.Equal(t, "86", v)
|
||||
case "MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD":
|
||||
@@ -541,8 +531,6 @@ func TestGetSpec(t *testing.T) {
|
||||
assert.Equal(t, "93", v)
|
||||
case "MAX_PENDING_DEPOSITS_PER_EPOCH":
|
||||
assert.Equal(t, "94", v)
|
||||
case "TARGET_BLOBS_PER_BLOCK_ELECTRA":
|
||||
assert.Equal(t, "6", v)
|
||||
case "MAX_BLOBS_PER_BLOCK_ELECTRA":
|
||||
assert.Equal(t, "9", v)
|
||||
case "MAX_REQUEST_BLOB_SIDECARS_ELECTRA":
|
||||
@@ -573,12 +561,12 @@ func TestGetSpec(t *testing.T) {
|
||||
assert.Equal(t, "100", v)
|
||||
case "KZG_COMMITMENT_INCLUSION_PROOF_DEPTH":
|
||||
assert.Equal(t, "101", v)
|
||||
case "MAX_BLOBS_PER_BLOCK_FULU":
|
||||
assert.Equal(t, "12", v)
|
||||
case "BLOB_SIDECAR_SUBNET_COUNT":
|
||||
assert.Equal(t, "102", v)
|
||||
case "BLOB_SIDECAR_SUBNET_COUNT_ELECTRA":
|
||||
assert.Equal(t, "103", v)
|
||||
case "SYNC_MESSAGE_DUE_BPS":
|
||||
assert.Equal(t, "104", v)
|
||||
case "BLOB_SCHEDULE":
|
||||
// BLOB_SCHEDULE should be an empty slice when no schedule is defined
|
||||
blobSchedule, ok := v.([]interface{})
|
||||
|
||||
@@ -3,7 +3,6 @@ package lookup
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain"
|
||||
@@ -284,14 +283,9 @@ func (p *BeaconDbBlocker) Blobs(ctx context.Context, id string, opts ...options.
|
||||
return make([]*blocks.VerifiedROBlob, 0), nil
|
||||
}
|
||||
|
||||
// Compute the first Fulu slot.
|
||||
fuluForkEpoch := params.BeaconConfig().FuluForkEpoch
|
||||
fuluForkSlot := primitives.Slot(math.MaxUint64)
|
||||
if fuluForkEpoch != primitives.Epoch(math.MaxUint64) {
|
||||
fuluForkSlot, err = slots.EpochStart(fuluForkEpoch)
|
||||
if err != nil {
|
||||
return nil, &core.RpcError{Err: errors.Wrap(err, "could not calculate Fulu start slot"), Reason: core.Internal}
|
||||
}
|
||||
fuluForkSlot, err := slots.EpochStart(params.BeaconConfig().FuluForkEpoch)
|
||||
if err != nil {
|
||||
return nil, &core.RpcError{Err: errors.Wrap(err, "could not calculate Fulu start slot"), Reason: core.Internal}
|
||||
}
|
||||
|
||||
// Convert versioned hashes to indices if provided
|
||||
|
||||
@@ -190,7 +190,7 @@ func TestBlobsErrorHandling(t *testing.T) {
|
||||
|
||||
t.Run("non-existent block by slot returns 404", func(t *testing.T) {
|
||||
blocker := &BeaconDbBlocker{
|
||||
BeaconDB: db,
|
||||
BeaconDB: db,
|
||||
ChainInfoFetcher: &mockChain.ChainService{},
|
||||
}
|
||||
|
||||
@@ -275,39 +275,19 @@ func TestBlobsErrorHandling(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetBlob(t *testing.T) {
|
||||
const (
|
||||
slot = 123
|
||||
blobCount = 4
|
||||
denebForEpoch = 1
|
||||
fuluForkEpoch = 2
|
||||
)
|
||||
|
||||
setupDeneb := func(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.DenebForkEpoch = denebForEpoch
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
}
|
||||
|
||||
setupFulu := func(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.DenebForkEpoch = denebForEpoch
|
||||
cfg.FuluForkEpoch = fuluForkEpoch
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
}
|
||||
|
||||
const blobCount = 4
|
||||
ctx := t.Context()
|
||||
db := testDB.SetupDB(t)
|
||||
params.SetupTestConfigCleanup(t)
|
||||
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().DenebForkEpoch + 4096*2
|
||||
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
db := testDB.SetupDB(t)
|
||||
require.NoError(t, kzg.Start())
|
||||
|
||||
// Create and save Deneb block and blob sidecars.
|
||||
_, blobStorage := filesystem.NewEphemeralBlobStorageAndFs(t)
|
||||
|
||||
denebBlock, storedBlobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [fieldparams.RootLength]byte{}, slot, blobCount)
|
||||
denebBlock, storedBlobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [fieldparams.RootLength]byte{}, ds, blobCount, util.WithDenebSlot(ds))
|
||||
denebBlockRoot := denebBlock.Root()
|
||||
|
||||
verifiedStoredSidecars := verification.FakeVerifySliceForTest(t, storedBlobSidecars)
|
||||
@@ -316,13 +296,14 @@ func TestGetBlob(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
err = db.SaveBlock(t.Context(), denebBlock)
|
||||
err := db.SaveBlock(t.Context(), denebBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create Electra block and blob sidecars. (Electra block = Fulu block),
|
||||
// save the block, convert blob sidecars to data column sidecars and save the block.
|
||||
fuluForkSlot := fuluForkEpoch * params.BeaconConfig().SlotsPerEpoch
|
||||
fuluBlock, fuluBlobSidecars := util.GenerateTestElectraBlockWithSidecar(t, [fieldparams.RootLength]byte{}, fuluForkSlot, blobCount)
|
||||
fs := util.SlotAtEpoch(t, params.BeaconConfig().FuluForkEpoch)
|
||||
dsStr := fmt.Sprintf("%d", ds)
|
||||
fuluBlock, fuluBlobSidecars := util.GenerateTestElectraBlockWithSidecar(t, [fieldparams.RootLength]byte{}, fs, blobCount)
|
||||
fuluBlockRoot := fuluBlock.Root()
|
||||
|
||||
cellsAndProofsList := make([]kzg.CellsAndProofs, 0, len(fuluBlobSidecars))
|
||||
@@ -347,8 +328,6 @@ func TestGetBlob(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("genesis", func(t *testing.T) {
|
||||
setupDeneb(t)
|
||||
|
||||
blocker := &BeaconDbBlocker{}
|
||||
_, rpcErr := blocker.Blobs(ctx, "genesis")
|
||||
require.Equal(t, http.StatusBadRequest, core.ErrorReasonToHTTP(rpcErr.Reason))
|
||||
@@ -356,8 +335,6 @@ func TestGetBlob(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("head", func(t *testing.T) {
|
||||
setupDeneb(t)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{
|
||||
Root: denebBlockRoot[:],
|
||||
@@ -388,8 +365,6 @@ func TestGetBlob(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("finalized", func(t *testing.T) {
|
||||
setupDeneb(t)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ðpb.Checkpoint{Root: denebBlockRoot[:]}},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
@@ -405,8 +380,6 @@ func TestGetBlob(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("justified", func(t *testing.T) {
|
||||
setupDeneb(t)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{CurrentJustifiedCheckPoint: ðpb.Checkpoint{Root: denebBlockRoot[:]}},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
@@ -422,8 +395,6 @@ func TestGetBlob(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("root", func(t *testing.T) {
|
||||
setupDeneb(t)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
@@ -438,8 +409,6 @@ func TestGetBlob(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("slot", func(t *testing.T) {
|
||||
setupDeneb(t)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
@@ -449,7 +418,7 @@ func TestGetBlob(t *testing.T) {
|
||||
BlobStorage: blobStorage,
|
||||
}
|
||||
|
||||
verifiedBlobs, rpcErr := blocker.Blobs(ctx, "123")
|
||||
verifiedBlobs, rpcErr := blocker.Blobs(ctx, dsStr)
|
||||
require.IsNil(t, rpcErr)
|
||||
require.Equal(t, blobCount, len(verifiedBlobs))
|
||||
})
|
||||
@@ -457,8 +426,6 @@ func TestGetBlob(t *testing.T) {
|
||||
t.Run("one blob only", func(t *testing.T) {
|
||||
const index = 2
|
||||
|
||||
setupDeneb(t)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ðpb.Checkpoint{Root: denebBlockRoot[:]}},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
@@ -468,7 +435,7 @@ func TestGetBlob(t *testing.T) {
|
||||
BlobStorage: blobStorage,
|
||||
}
|
||||
|
||||
retrievedVerifiedSidecars, rpcErr := blocker.Blobs(ctx, "123", options.WithIndices([]int{index}))
|
||||
retrievedVerifiedSidecars, rpcErr := blocker.Blobs(ctx, dsStr, options.WithIndices([]int{index}))
|
||||
require.IsNil(t, rpcErr)
|
||||
require.Equal(t, 1, len(retrievedVerifiedSidecars))
|
||||
|
||||
@@ -483,8 +450,6 @@ func TestGetBlob(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("no blobs returns an empty array", func(t *testing.T) {
|
||||
setupDeneb(t)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ðpb.Checkpoint{Root: denebBlockRoot[:]}},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
@@ -494,14 +459,12 @@ func TestGetBlob(t *testing.T) {
|
||||
BlobStorage: filesystem.NewEphemeralBlobStorage(t),
|
||||
}
|
||||
|
||||
verifiedBlobs, rpcErr := blocker.Blobs(ctx, "123")
|
||||
verifiedBlobs, rpcErr := blocker.Blobs(ctx, dsStr)
|
||||
require.IsNil(t, rpcErr)
|
||||
require.Equal(t, 0, len(verifiedBlobs))
|
||||
})
|
||||
|
||||
t.Run("no blob at index", func(t *testing.T) {
|
||||
setupDeneb(t)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ðpb.Checkpoint{Root: denebBlockRoot[:]}},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
@@ -512,14 +475,12 @@ func TestGetBlob(t *testing.T) {
|
||||
}
|
||||
|
||||
noBlobIndex := len(storedBlobSidecars) + 1
|
||||
_, rpcErr := blocker.Blobs(ctx, "123", options.WithIndices([]int{0, noBlobIndex}))
|
||||
_, rpcErr := blocker.Blobs(ctx, dsStr, options.WithIndices([]int{0, noBlobIndex}))
|
||||
require.NotNil(t, rpcErr)
|
||||
require.Equal(t, core.ErrorReason(core.NotFound), rpcErr.Reason)
|
||||
})
|
||||
|
||||
t.Run("index too big", func(t *testing.T) {
|
||||
setupDeneb(t)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ðpb.Checkpoint{Root: denebBlockRoot[:]}},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
@@ -528,14 +489,12 @@ func TestGetBlob(t *testing.T) {
|
||||
BeaconDB: db,
|
||||
BlobStorage: blobStorage,
|
||||
}
|
||||
_, rpcErr := blocker.Blobs(ctx, "123", options.WithIndices([]int{0, math.MaxInt}))
|
||||
_, rpcErr := blocker.Blobs(ctx, dsStr, options.WithIndices([]int{0, math.MaxInt}))
|
||||
require.NotNil(t, rpcErr)
|
||||
require.Equal(t, core.ErrorReason(core.BadRequest), rpcErr.Reason)
|
||||
})
|
||||
|
||||
t.Run("not enough stored data column sidecars", func(t *testing.T) {
|
||||
setupFulu(t)
|
||||
|
||||
_, dataColumnStorage := filesystem.NewEphemeralDataColumnStorageAndFs(t)
|
||||
err = dataColumnStorage.Save(verifiedRoDataColumnSidecars[:fieldparams.CellsPerBlob-1])
|
||||
require.NoError(t, err)
|
||||
@@ -555,8 +514,6 @@ func TestGetBlob(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("reconstruction needed", func(t *testing.T) {
|
||||
setupFulu(t)
|
||||
|
||||
_, dataColumnStorage := filesystem.NewEphemeralDataColumnStorageAndFs(t)
|
||||
err = dataColumnStorage.Save(verifiedRoDataColumnSidecars[1 : peerdas.MinimumColumnCountToReconstruct()+1])
|
||||
require.NoError(t, err)
|
||||
@@ -582,8 +539,6 @@ func TestGetBlob(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("no reconstruction needed", func(t *testing.T) {
|
||||
setupFulu(t)
|
||||
|
||||
_, dataColumnStorage := filesystem.NewEphemeralDataColumnStorageAndFs(t)
|
||||
err = dataColumnStorage.Save(verifiedRoDataColumnSidecars)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/container/trie"
|
||||
"github.com/OffchainLabs/prysm/v6/math"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
@@ -143,7 +142,7 @@ func (vs *Server) deposits(
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not retrieve requests start index")
|
||||
}
|
||||
eth1DepositIndexLimit := math.Min(canonicalEth1Data.DepositCount, requestsStartIndex)
|
||||
eth1DepositIndexLimit := min(canonicalEth1Data.DepositCount, requestsStartIndex)
|
||||
if beaconState.Eth1DepositIndex() < eth1DepositIndexLimit {
|
||||
if uint64(dep.Index) >= beaconState.Eth1DepositIndex() && uint64(dep.Index) < eth1DepositIndexLimit {
|
||||
pendingDeps = append(pendingDeps, dep)
|
||||
|
||||
@@ -74,6 +74,18 @@ func WithTimeAsNow(t time.Time) ClockOpt {
|
||||
}
|
||||
}
|
||||
|
||||
func WithSlotAsNow(s types.Slot) ClockOpt {
|
||||
return func(g *Clock) {
|
||||
g.now = func() time.Time {
|
||||
t, err := slots.StartTime(g.t, s)
|
||||
if err != nil {
|
||||
panic(err) // lint:nopanic -- This is a programming error if genesis/slot are invalid.
|
||||
}
|
||||
return t
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NewClock constructs a Clock value from a genesis timestamp (t) and a Genesis Validator Root (vr).
|
||||
// The WithNower ClockOpt can be used in tests to specify an alternate `time.Now` implementation,
|
||||
// for instance to return a value for `Now` spanning a certain number of slots from genesis time, to control the current slot.
|
||||
|
||||
@@ -160,7 +160,7 @@ func (b *BeaconState) ExpectedWithdrawals() ([]*enginev1.Withdrawal, uint64, err
|
||||
}
|
||||
|
||||
validatorsLen := b.validatorsLen()
|
||||
bound := mathutil.Min(uint64(validatorsLen), params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep)
|
||||
bound := min(uint64(validatorsLen), params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep)
|
||||
for i := uint64(0); i < bound; i++ {
|
||||
val, err := b.validatorAtIndexReadOnly(validatorIndex)
|
||||
if err != nil {
|
||||
|
||||
@@ -3,7 +3,6 @@ package stategen
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
@@ -54,7 +53,7 @@ func (s *State) saveStateByRoot(ctx context.Context, blockRoot [32]byte, st stat
|
||||
defer span.End()
|
||||
|
||||
// Duration can't be 0 to prevent panic for division.
|
||||
duration := uint64(math.Max(float64(s.saveHotStateDB.duration), 1))
|
||||
duration := uint64(max(float64(s.saveHotStateDB.duration), 1))
|
||||
|
||||
s.saveHotStateDB.lock.Lock()
|
||||
if s.saveHotStateDB.enabled && st.Slot().Mod(duration) == 0 {
|
||||
|
||||
@@ -17,6 +17,7 @@ go_library(
|
||||
"fuzz_exports.go", # keep
|
||||
"log.go",
|
||||
"metrics.go",
|
||||
"once.go",
|
||||
"options.go",
|
||||
"pending_attestations_queue.go",
|
||||
"pending_blocks_queue.go",
|
||||
@@ -172,6 +173,8 @@ go_test(
|
||||
"error_test.go",
|
||||
"fork_watcher_test.go",
|
||||
"kzg_batch_verifier_test.go",
|
||||
"once_test.go",
|
||||
"pending_attestations_queue_bucket_test.go",
|
||||
"pending_attestations_queue_test.go",
|
||||
"pending_blocks_queue_test.go",
|
||||
"rate_limiter_test.go",
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
@@ -24,8 +25,9 @@ func testBlobGen(t *testing.T, start primitives.Slot, n int) ([]blocks.ROBlock,
|
||||
}
|
||||
|
||||
func TestValidateNext_happy(t *testing.T) {
|
||||
current := primitives.Slot(128)
|
||||
blks, blobs := testBlobGen(t, 63, 4)
|
||||
startSlot := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
current := startSlot + 65
|
||||
blks, blobs := testBlobGen(t, startSlot, 4)
|
||||
cfg := &blobSyncConfig{
|
||||
retentionStart: 0,
|
||||
nbv: testNewBlobVerifier(),
|
||||
@@ -74,8 +76,9 @@ func TestValidateNext_sigMatch(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestValidateNext_errorsFromVerifier(t *testing.T) {
|
||||
current := primitives.Slot(128)
|
||||
blks, blobs := testBlobGen(t, 63, 1)
|
||||
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
current := primitives.Slot(ds + 96)
|
||||
blks, blobs := testBlobGen(t, ds+31, 1)
|
||||
cases := []struct {
|
||||
name string
|
||||
err error
|
||||
|
||||
@@ -2,6 +2,7 @@ package sync
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"math"
|
||||
"math/big"
|
||||
"testing"
|
||||
@@ -18,9 +19,11 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
types "github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
leakybucket "github.com/OffchainLabs/prysm/v6/container/leaky-bucket"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/genesis"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
@@ -165,23 +168,12 @@ func (r *expectedBlobChunk) requireExpected(t *testing.T, s *Service, stream net
|
||||
require.Equal(t, rob.Index, r.sidecar.Index)
|
||||
}
|
||||
|
||||
func (c *blobsTestCase) setup(t *testing.T) (*Service, []blocks.ROBlob, func()) {
|
||||
cfg := params.BeaconConfig()
|
||||
copiedCfg := cfg.Copy()
|
||||
repositionFutureEpochs(copiedCfg)
|
||||
copiedCfg.InitializeForkSchedule()
|
||||
params.OverrideBeaconConfig(copiedCfg)
|
||||
cleanup := func() {
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
}
|
||||
maxBlobs := int(params.BeaconConfig().MaxBlobsPerBlock(0))
|
||||
chain, clock := defaultMockChain(t)
|
||||
func (c *blobsTestCase) setup(t *testing.T) (*Service, []blocks.ROBlob) {
|
||||
maxBlobs := int(params.BeaconConfig().MaxBlobsPerBlockAtEpoch(params.BeaconConfig().DenebForkEpoch))
|
||||
chain := defaultMockChain(t, c.clock.CurrentEpoch())
|
||||
if c.chain == nil {
|
||||
c.chain = chain
|
||||
}
|
||||
if c.clock == nil {
|
||||
c.clock = clock
|
||||
}
|
||||
d := db.SetupDB(t)
|
||||
|
||||
sidecars := make([]blocks.ROBlob, 0)
|
||||
@@ -208,16 +200,16 @@ func (c *blobsTestCase) setup(t *testing.T) (*Service, []blocks.ROBlob, func())
|
||||
|
||||
client := p2ptest.NewTestP2P(t)
|
||||
s := &Service{
|
||||
cfg: &config{p2p: client, chain: c.chain, clock: clock, beaconDB: d, blobStorage: filesystem.NewEphemeralBlobStorage(t)},
|
||||
cfg: &config{p2p: client, chain: c.chain, clock: c.clock, beaconDB: d, blobStorage: filesystem.NewEphemeralBlobStorage(t)},
|
||||
rateLimiter: newRateLimiter(client),
|
||||
}
|
||||
|
||||
byRootRate := params.BeaconConfig().MaxRequestBlobSidecars * uint64(params.BeaconConfig().MaxBlobsPerBlock(0))
|
||||
byRangeRate := params.BeaconConfig().MaxRequestBlobSidecars * uint64(params.BeaconConfig().MaxBlobsPerBlock(0))
|
||||
byRootRate := params.BeaconConfig().MaxRequestBlobSidecars * uint64(maxBlobs)
|
||||
byRangeRate := params.BeaconConfig().MaxRequestBlobSidecars * uint64(maxBlobs)
|
||||
s.setRateCollector(p2p.RPCBlobSidecarsByRootTopicV1, leakybucket.NewCollector(0.000001, int64(byRootRate), time.Second, false))
|
||||
s.setRateCollector(p2p.RPCBlobSidecarsByRangeTopicV1, leakybucket.NewCollector(0.000001, int64(byRangeRate), time.Second, false))
|
||||
|
||||
return s, sidecars, cleanup
|
||||
return s, sidecars
|
||||
}
|
||||
|
||||
func defaultExpectedRequirer(t *testing.T, s *Service, expect []*expectedBlobChunk) func(network.Stream) {
|
||||
@@ -225,12 +217,16 @@ func defaultExpectedRequirer(t *testing.T, s *Service, expect []*expectedBlobChu
|
||||
for _, ex := range expect {
|
||||
ex.requireExpected(t, s, stream)
|
||||
}
|
||||
|
||||
encoding := s.cfg.p2p.Encoding()
|
||||
_, _, err := ReadStatusCode(stream, encoding)
|
||||
require.ErrorIs(t, err, io.EOF)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *blobsTestCase) run(t *testing.T) {
|
||||
s, sidecars, cleanup := c.setup(t)
|
||||
defer cleanup()
|
||||
blobRpcThrottleInterval = time.Microsecond * 1
|
||||
s, sidecars := c.setup(t)
|
||||
req := c.requestFromSidecars(sidecars)
|
||||
expect := c.defineExpected(t, sidecars, req)
|
||||
m := map[types.Slot][]blocks.ROBlob{}
|
||||
@@ -266,41 +262,32 @@ func (c *blobsTestCase) run(t *testing.T) {
|
||||
// so it is helpful in tests to temporarily reposition the epochs to give room for some math.
|
||||
func repositionFutureEpochs(cfg *params.BeaconChainConfig) {
|
||||
if cfg.FuluForkEpoch == math.MaxUint64 {
|
||||
cfg.FuluForkEpoch = cfg.ElectraForkEpoch + 100
|
||||
cfg.FuluForkEpoch = cfg.ElectraForkEpoch + 4096*2
|
||||
}
|
||||
}
|
||||
|
||||
func defaultMockChain(t *testing.T) (*mock.ChainService, *startup.Clock) {
|
||||
de := params.BeaconConfig().DenebForkEpoch
|
||||
df, err := params.Fork(de)
|
||||
func defaultMockChain(t *testing.T, current primitives.Epoch) *mock.ChainService {
|
||||
fe := current - 2
|
||||
df, err := params.Fork(current)
|
||||
require.NoError(t, err)
|
||||
denebBuffer := params.BeaconConfig().MinEpochsForBlobsSidecarsRequest + 1000
|
||||
ce := de + denebBuffer
|
||||
fe := ce - 2
|
||||
cs, err := slots.EpochStart(ce)
|
||||
require.NoError(t, err)
|
||||
genesis := time.Now()
|
||||
mockNow := startup.MockNower{}
|
||||
clock := startup.NewClock(genesis, params.BeaconConfig().GenesisValidatorsRoot, startup.WithNower(mockNow.Now))
|
||||
mockNow.SetSlot(t, clock, cs)
|
||||
chain := &mock.ChainService{
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{Epoch: fe},
|
||||
Fork: df,
|
||||
}
|
||||
|
||||
return chain, clock
|
||||
return chain
|
||||
}
|
||||
|
||||
func TestTestcaseSetup_BlocksAndBlobs(t *testing.T) {
|
||||
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
ctx := t.Context()
|
||||
nblocks := 10
|
||||
c := &blobsTestCase{nblocks: nblocks}
|
||||
c := &blobsTestCase{nblocks: nblocks, clock: startup.NewClock(genesis.Time(), genesis.ValidatorsRoot(), startup.WithSlotAsNow(ds))}
|
||||
c.oldestSlot = c.defaultOldestSlotByRoot
|
||||
s, sidecars, cleanup := c.setup(t)
|
||||
s, sidecars := c.setup(t)
|
||||
req := blobRootRequestFromSidecars(sidecars)
|
||||
expect := c.filterExpectedByRoot(t, sidecars, req)
|
||||
defer cleanup()
|
||||
maxed := nblocks * params.BeaconConfig().MaxBlobsPerBlock(0)
|
||||
maxed := nblocks * params.BeaconConfig().MaxBlobsPerBlockAtEpoch(params.BeaconConfig().DenebForkEpoch)
|
||||
require.Equal(t, maxed, len(sidecars))
|
||||
require.Equal(t, maxed, len(expect))
|
||||
for _, sc := range sidecars {
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -29,13 +30,13 @@ func (s *Service) updateCustodyInfoIfNeeded() error {
|
||||
const minimumPeerCount = 1
|
||||
|
||||
// Get our actual custody group count.
|
||||
actualCustodyGrounpCount, err := s.cfg.p2p.CustodyGroupCount()
|
||||
actualCustodyGrounpCount, err := s.cfg.p2p.CustodyGroupCount(s.ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "p2p custody group count")
|
||||
}
|
||||
|
||||
// Get our target custody group count.
|
||||
targetCustodyGroupCount, err := s.custodyGroupCount()
|
||||
targetCustodyGroupCount, err := s.custodyGroupCount(s.ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "custody group count")
|
||||
}
|
||||
@@ -88,7 +89,7 @@ func (s *Service) updateCustodyInfoIfNeeded() error {
|
||||
|
||||
// custodyGroupCount computes the custody group count based on the custody requirement,
|
||||
// the validators custody requirement, and whether the node is subscribed to all data subnets.
|
||||
func (s *Service) custodyGroupCount() (uint64, error) {
|
||||
func (s *Service) custodyGroupCount(context.Context) (uint64, error) {
|
||||
beaconConfig := params.BeaconConfig()
|
||||
|
||||
if flags.Get().SubscribeAllDataSubnets {
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
eth "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
)
|
||||
@@ -55,9 +54,9 @@ func setupCustodyTest(t *testing.T, withChain bool) *testSetup {
|
||||
|
||||
if withChain {
|
||||
const headSlot = primitives.Slot(100)
|
||||
block, err := blocks.NewSignedBeaconBlock(ð.SignedBeaconBlock{
|
||||
Block: ð.BeaconBlock{
|
||||
Body: ð.BeaconBlockBody{},
|
||||
block, err := blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlock{
|
||||
Block: ðpb.BeaconBlock{
|
||||
Body: ðpb.BeaconBlockBody{},
|
||||
Slot: headSlot,
|
||||
},
|
||||
})
|
||||
@@ -90,11 +89,13 @@ func setupCustodyTest(t *testing.T, withChain bool) *testSetup {
|
||||
}
|
||||
|
||||
func (ts *testSetup) assertCustodyInfo(t *testing.T, expectedSlot primitives.Slot, expectedCount uint64) {
|
||||
p2pEarliestSlot, err := ts.p2pService.EarliestAvailableSlot()
|
||||
ctx := t.Context()
|
||||
|
||||
p2pEarliestSlot, err := ts.p2pService.EarliestAvailableSlot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedSlot, p2pEarliestSlot)
|
||||
|
||||
p2pCustodyCount, err := ts.p2pService.CustodyGroupCount()
|
||||
p2pCustodyCount, err := ts.p2pService.CustodyGroupCount(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedCount, p2pCustodyCount)
|
||||
|
||||
@@ -170,13 +171,15 @@ func TestCustodyGroupCount(t *testing.T) {
|
||||
config.CustodyRequirement = 3
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
ctx := t.Context()
|
||||
|
||||
t.Run("SubscribeAllDataSubnets enabled returns NumberOfCustodyGroups", func(t *testing.T) {
|
||||
withSubscribeAllDataSubnets(t, func() {
|
||||
service := &Service{
|
||||
ctx: context.Background(),
|
||||
}
|
||||
|
||||
result, err := service.custodyGroupCount()
|
||||
result, err := service.custodyGroupCount(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, config.NumberOfCustodyGroups, result)
|
||||
})
|
||||
@@ -188,7 +191,7 @@ func TestCustodyGroupCount(t *testing.T) {
|
||||
trackedValidatorsCache: cache.NewTrackedValidatorsCache(),
|
||||
}
|
||||
|
||||
result, err := service.custodyGroupCount()
|
||||
result, err := service.custodyGroupCount(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, config.CustodyRequirement, result)
|
||||
})
|
||||
|
||||
@@ -20,7 +20,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
leakybucket "github.com/OffchainLabs/prysm/v6/container/leaky-bucket"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/rand"
|
||||
eth "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
goPeer "github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
@@ -921,7 +920,7 @@ func buildByRangeRequests(
|
||||
func buildByRootRequest(indicesByRoot map[[fieldparams.RootLength]byte]map[uint64]bool) p2ptypes.DataColumnsByRootIdentifiers {
|
||||
identifiers := make(p2ptypes.DataColumnsByRootIdentifiers, 0, len(indicesByRoot))
|
||||
for root, indices := range indicesByRoot {
|
||||
identifier := ð.DataColumnsByRootIdentifier{
|
||||
identifier := ðpb.DataColumnsByRootIdentifier{
|
||||
BlockRoot: root[:],
|
||||
Columns: helpers.SortedSliceFromMap(indices),
|
||||
}
|
||||
@@ -929,7 +928,7 @@ func buildByRootRequest(indicesByRoot map[[fieldparams.RootLength]byte]map[uint6
|
||||
}
|
||||
|
||||
// Sort identifiers to have a deterministic output.
|
||||
slices.SortFunc(identifiers, func(left, right *eth.DataColumnsByRootIdentifier) int {
|
||||
slices.SortFunc(identifiers, func(left, right *ethpb.DataColumnsByRootIdentifier) int {
|
||||
if cmp := bytes.Compare(left.BlockRoot, right.BlockRoot); cmp != 0 {
|
||||
return cmp
|
||||
}
|
||||
@@ -1023,17 +1022,20 @@ func computeIndicesByRootByPeer(
|
||||
peersByIndex := make(map[uint64]map[goPeer.ID]bool)
|
||||
headSlotByPeer := make(map[goPeer.ID]primitives.Slot)
|
||||
for peer := range peers {
|
||||
log := log.WithField("peerID", peer)
|
||||
|
||||
// Computes the custody columns for each peer
|
||||
nodeID, err := prysmP2P.ConvertPeerIDToNodeID(peer)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "convert peer ID to node ID for peer %s", peer)
|
||||
log.WithError(err).Debug("Failed to convert peer ID to node ID")
|
||||
continue
|
||||
}
|
||||
|
||||
custodyGroupCount := p2p.CustodyGroupCountFromPeer(peer)
|
||||
|
||||
dasInfo, _, err := peerdas.Info(nodeID, custodyGroupCount)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "peerdas info for peer %s", peer)
|
||||
log.WithError(err).Debug("Failed to get peer DAS info")
|
||||
continue
|
||||
}
|
||||
|
||||
for column := range dasInfo.CustodyColumns {
|
||||
@@ -1046,11 +1048,13 @@ func computeIndicesByRootByPeer(
|
||||
// Compute the head slot for each peer
|
||||
peerChainState, err := p2p.Peers().ChainState(peer)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "get chain state for peer %s", peer)
|
||||
log.WithError(err).Debug("Failed to get peer chain state")
|
||||
continue
|
||||
}
|
||||
|
||||
if peerChainState == nil {
|
||||
return nil, errors.Errorf("chain state is nil for peer %s", peer)
|
||||
log.Debug("Peer chain state is nil")
|
||||
continue
|
||||
}
|
||||
|
||||
// Our view of the head slot of a peer is not updated in real time.
|
||||
|
||||
@@ -23,7 +23,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/wrapper"
|
||||
leakybucket "github.com/OffchainLabs/prysm/v6/container/leaky-bucket"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
pb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
@@ -144,7 +143,7 @@ func TestFetchDataColumnSidecars(t *testing.T) {
|
||||
HeadSlot: 8,
|
||||
})
|
||||
|
||||
p2p.Peers().SetMetadata(other.PeerID(), wrapper.WrappedMetadataV2(&pb.MetaDataV2{
|
||||
p2p.Peers().SetMetadata(other.PeerID(), wrapper.WrappedMetadataV2(ðpb.MetaDataV2{
|
||||
CustodyGroupCount: 128,
|
||||
}))
|
||||
|
||||
|
||||
@@ -117,6 +117,7 @@ func TestService_decodePubsubMessage(t *testing.T) {
|
||||
|
||||
func TestExtractDataType(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096*2
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
|
||||
type args struct {
|
||||
@@ -304,6 +305,9 @@ func TestExtractDataType(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestExtractDataTypeFromTypeMapInvalid(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096*2
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
chain := &mock.ChainService{ValidatorsRoot: [32]byte{}}
|
||||
_, err := extractDataTypeFromTypeMap(types.BlockMap, []byte{0x00, 0x01}, chain)
|
||||
require.ErrorIs(t, err, errInvalidDigest)
|
||||
|
||||
@@ -2,7 +2,6 @@ package sync
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
@@ -12,6 +11,7 @@ import (
|
||||
libp2pcore "github.com/libp2p/go-libp2p/core"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
multiplex "github.com/libp2p/go-mplex"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@@ -38,7 +38,7 @@ func ReadStatusCode(stream network.Stream, encoding encoder.NetworkEncoding) (ui
|
||||
b := make([]byte, 1)
|
||||
_, err := stream.Read(b)
|
||||
if err != nil {
|
||||
return 0, "", err
|
||||
return 0, "", errors.Wrap(err, "stream read")
|
||||
}
|
||||
|
||||
if b[0] == responseCodeSuccess {
|
||||
@@ -52,7 +52,7 @@ func ReadStatusCode(stream network.Stream, encoding encoder.NetworkEncoding) (ui
|
||||
SetStreamReadDeadline(stream, params.BeaconConfig().RespTimeoutDuration())
|
||||
msg := &types.ErrorMessage{}
|
||||
if err := encoding.DecodeWithMaxLength(stream, msg); err != nil {
|
||||
return 0, "", err
|
||||
return 0, "", errors.Wrap(err, "decode error message")
|
||||
}
|
||||
|
||||
return b[0], string(*msg), nil
|
||||
|
||||
@@ -9,23 +9,28 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Is a background routine that observes for new incoming forks. Depending on the epoch
|
||||
// it will be in charge of subscribing/unsubscribing the relevant topics at the fork boundaries.
|
||||
func (s *Service) forkWatcher() {
|
||||
<-s.initialSyncComplete
|
||||
// p2pHandlerControlLoop runs in a continuous loop to ensure that:
|
||||
// - We are subscribed to the correct gossipsub topics (for the current and upcoming epoch).
|
||||
// - We have registered the correct RPC stream handlers (for the current and upcoming epoch).
|
||||
// - We have cleaned up gossipsub topics and RPC stream handlers that are no longer needed.
|
||||
func (s *Service) p2pHandlerControlLoop() {
|
||||
// At startup, launch registration and peer discovery loops, and register rpc stream handlers.
|
||||
startEntry := params.GetNetworkScheduleEntry(s.cfg.clock.CurrentEpoch())
|
||||
s.registerSubscribers(startEntry)
|
||||
|
||||
slotTicker := slots.NewSlotTicker(s.cfg.clock.GenesisTime(), params.BeaconConfig().SecondsPerSlot)
|
||||
for {
|
||||
select {
|
||||
// In the event of a node restart, we will still end up subscribing to the correct
|
||||
// topics during/after the fork epoch. This routine is to ensure correct
|
||||
// subscriptions for nodes running before a fork epoch.
|
||||
case currSlot := <-slotTicker.C():
|
||||
currEpoch := slots.ToEpoch(currSlot)
|
||||
if err := s.registerForUpcomingFork(currEpoch); err != nil {
|
||||
case <-slotTicker.C():
|
||||
current := s.cfg.clock.CurrentEpoch()
|
||||
if err := s.ensureRegistrationsForEpoch(current); err != nil {
|
||||
log.WithError(err).Error("Unable to check for fork in the next epoch")
|
||||
continue
|
||||
}
|
||||
if err := s.deregisterFromPastFork(currEpoch); err != nil {
|
||||
if err := s.ensureDeregistrationForEpoch(current); err != nil {
|
||||
log.WithError(err).Error("Unable to check for fork in the previous epoch")
|
||||
continue
|
||||
}
|
||||
@@ -37,102 +42,90 @@ func (s *Service) forkWatcher() {
|
||||
}
|
||||
}
|
||||
|
||||
// registerForUpcomingFork registers appropriate gossip and RPC topic if there is a fork in the next epoch.
|
||||
func (s *Service) registerForUpcomingFork(currentEpoch primitives.Epoch) error {
|
||||
nextEntry := params.GetNetworkScheduleEntry(currentEpoch + 1)
|
||||
// Check if there is a fork in the next epoch.
|
||||
if nextEntry.ForkDigest == s.registeredNetworkEntry.ForkDigest {
|
||||
return nil
|
||||
}
|
||||
// ensureRegistrationsForEpoch ensures that gossip topic and RPC stream handler
|
||||
// registrations are in place for the current and subsequent epoch.
|
||||
func (s *Service) ensureRegistrationsForEpoch(epoch primitives.Epoch) error {
|
||||
current := params.GetNetworkScheduleEntry(epoch)
|
||||
s.registerSubscribers(current)
|
||||
|
||||
if s.subHandler.digestExists(nextEntry.ForkDigest) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Register the subscribers (gossipsub) for the next epoch.
|
||||
s.registerSubscribers(nextEntry.Epoch, nextEntry.ForkDigest)
|
||||
|
||||
// Get the handlers for the current and next fork.
|
||||
currentHandler, err := s.rpcHandlerByTopicFromEpoch(currentEpoch)
|
||||
currentHandler, err := s.rpcHandlerByTopicFromFork(current.VersionEnum)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "RPC handler by topic from before fork epoch")
|
||||
}
|
||||
if !s.digestActionDone(current.ForkDigest, registerRpcOnce) {
|
||||
for topic, handler := range currentHandler {
|
||||
s.registerRPC(topic, handler)
|
||||
}
|
||||
}
|
||||
|
||||
nextHandler, err := s.rpcHandlerByTopicFromEpoch(nextEntry.Epoch)
|
||||
next := params.GetNetworkScheduleEntry(epoch + 1)
|
||||
if current.Epoch == next.Epoch {
|
||||
return nil // no fork in the next epoch
|
||||
}
|
||||
s.registerSubscribers(next)
|
||||
|
||||
if s.digestActionDone(next.ForkDigest, registerRpcOnce) {
|
||||
return nil
|
||||
}
|
||||
|
||||
nextHandler, err := s.rpcHandlerByTopicFromFork(next.VersionEnum)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "RPC handler by topic from fork epoch")
|
||||
}
|
||||
|
||||
// Compute newly added topics.
|
||||
newHandlersByTopic := addedRPCHandlerByTopic(currentHandler, nextHandler)
|
||||
|
||||
// Register the new RPC handlers.
|
||||
// We deregister the old topics later, at least one epoch after the fork.
|
||||
for topic, handler := range newHandlersByTopic {
|
||||
s.registerRPC(topic, handler)
|
||||
}
|
||||
|
||||
s.registeredNetworkEntry = nextEntry
|
||||
return nil
|
||||
}
|
||||
|
||||
// deregisterFromPastFork deregisters appropriate gossip and RPC topic if there is a fork in the current epoch.
|
||||
func (s *Service) deregisterFromPastFork(currentEpoch primitives.Epoch) error {
|
||||
// Get the fork.
|
||||
currentFork, err := params.Fork(currentEpoch)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "genesis validators root")
|
||||
}
|
||||
// ensureDeregistrationForEpoch deregisters appropriate gossip and RPC topic if there is a fork in the current epoch.
|
||||
func (s *Service) ensureDeregistrationForEpoch(currentEpoch primitives.Epoch) error {
|
||||
current := params.GetNetworkScheduleEntry(currentEpoch)
|
||||
|
||||
// If we are still in our genesis fork version then exit early.
|
||||
if currentFork.Epoch == params.BeaconConfig().GenesisEpoch {
|
||||
if current.Epoch == params.BeaconConfig().GenesisEpoch {
|
||||
return nil
|
||||
}
|
||||
if currentEpoch < current.Epoch+1 {
|
||||
return nil // wait until we are 1 epoch into the fork
|
||||
}
|
||||
|
||||
// Get the epoch after the fork epoch.
|
||||
afterForkEpoch := currentFork.Epoch + 1
|
||||
previous := params.GetNetworkScheduleEntry(current.Epoch - 1)
|
||||
// Remove stream handlers for all topics that are in the set of
|
||||
// currentTopics-previousTopics
|
||||
if !s.digestActionDone(previous.ForkDigest, unregisterRpcOnce) {
|
||||
previousTopics, err := s.rpcHandlerByTopicFromFork(previous.VersionEnum)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "RPC handler by topic from before fork epoch")
|
||||
}
|
||||
currentTopics, err := s.rpcHandlerByTopicFromFork(current.VersionEnum)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "RPC handler by topic from fork epoch")
|
||||
}
|
||||
topicsToRemove := removedRPCTopics(previousTopics, currentTopics)
|
||||
for topic := range topicsToRemove {
|
||||
fullTopic := topic + s.cfg.p2p.Encoding().ProtocolSuffix()
|
||||
s.cfg.p2p.Host().RemoveStreamHandler(protocol.ID(fullTopic))
|
||||
log.WithField("topic", fullTopic).Debug("Removed RPC handler")
|
||||
}
|
||||
}
|
||||
|
||||
// Start de-registering if the current epoch is after the fork epoch.
|
||||
if currentEpoch != afterForkEpoch {
|
||||
// Unsubscribe from all gossip topics with the previous fork digest.
|
||||
if s.digestActionDone(previous.ForkDigest, unregisterGossipOnce) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Look at the previous fork's digest.
|
||||
beforeForkEpoch := currentFork.Epoch - 1
|
||||
|
||||
beforeForkDigest := params.ForkDigest(beforeForkEpoch)
|
||||
|
||||
// Exit early if there are no topics with that particular digest.
|
||||
if !s.subHandler.digestExists(beforeForkDigest) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Compute the RPC handlers that are no longer needed.
|
||||
beforeForkHandlerByTopic, err := s.rpcHandlerByTopicFromEpoch(beforeForkEpoch)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "RPC handler by topic from before fork epoch")
|
||||
}
|
||||
|
||||
forkHandlerByTopic, err := s.rpcHandlerByTopicFromEpoch(currentFork.Epoch)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "RPC handler by topic from fork epoch")
|
||||
}
|
||||
|
||||
topicsToRemove := removedRPCTopics(beforeForkHandlerByTopic, forkHandlerByTopic)
|
||||
for topic := range topicsToRemove {
|
||||
fullTopic := topic + s.cfg.p2p.Encoding().ProtocolSuffix()
|
||||
s.cfg.p2p.Host().RemoveStreamHandler(protocol.ID(fullTopic))
|
||||
log.WithField("topic", fullTopic).Debug("Removed RPC handler")
|
||||
}
|
||||
|
||||
// Run through all our current active topics and see
|
||||
// if there are any subscriptions to be removed.
|
||||
for _, t := range s.subHandler.allTopics() {
|
||||
retDigest, err := p2p.ExtractGossipDigest(t)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not retrieve digest")
|
||||
continue
|
||||
}
|
||||
if retDigest == beforeForkDigest {
|
||||
if retDigest == previous.ForkDigest {
|
||||
s.unSubscribeFromTopic(t)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -50,12 +50,36 @@ func testForkWatcherService(t *testing.T, current primitives.Epoch) *Service {
|
||||
return r
|
||||
}
|
||||
|
||||
func TestRegisterSubscriptions_Idempotent(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
genesis.StoreEmbeddedDuringTest(t, params.BeaconConfig().ConfigName)
|
||||
fulu := params.BeaconConfig().ElectraForkEpoch + 4096*2
|
||||
params.BeaconConfig().FuluForkEpoch = fulu
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
|
||||
current := fulu - 1
|
||||
s := testForkWatcherService(t, current)
|
||||
next := params.GetNetworkScheduleEntry(fulu)
|
||||
wg := attachSpawner(s)
|
||||
require.Equal(t, true, s.registerSubscribers(next))
|
||||
done := make(chan struct{})
|
||||
go func() { wg.Wait(); close(done) }()
|
||||
select {
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatal("timed out waiting for subscriptions to be registered")
|
||||
case <-done:
|
||||
}
|
||||
// the goal of this callback is just to assert that spawn is never called.
|
||||
s.subscriptionSpawner = func(func()) { t.Error("registration routines spawned twice for the same digest") }
|
||||
require.NoError(t, s.ensureRegistrationsForEpoch(fulu))
|
||||
}
|
||||
|
||||
func TestService_CheckForNextEpochFork(t *testing.T) {
|
||||
closedChan := make(chan struct{})
|
||||
close(closedChan)
|
||||
params.SetupTestConfigCleanup(t)
|
||||
genesis.StoreEmbeddedDuringTest(t, params.BeaconConfig().ConfigName)
|
||||
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 1096*2
|
||||
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096*2
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
|
||||
tests := []struct {
|
||||
@@ -171,7 +195,7 @@ func TestService_CheckForNextEpochFork(t *testing.T) {
|
||||
current := tt.epochAtRegistration(tt.forkEpoch)
|
||||
s := testForkWatcherService(t, current)
|
||||
wg := attachSpawner(s)
|
||||
require.NoError(t, s.registerForUpcomingFork(s.cfg.clock.CurrentEpoch()))
|
||||
require.NoError(t, s.ensureRegistrationsForEpoch(s.cfg.clock.CurrentEpoch()))
|
||||
wg.Wait()
|
||||
tt.checkRegistration(t, s)
|
||||
|
||||
@@ -193,10 +217,13 @@ func TestService_CheckForNextEpochFork(t *testing.T) {
|
||||
// Move the clock to just before the next fork epoch and ensure deregistration is correct
|
||||
wg = attachSpawner(s)
|
||||
s.cfg.clock = defaultClockWithTimeAtEpoch(tt.nextForkEpoch - 1)
|
||||
require.NoError(t, s.registerForUpcomingFork(s.cfg.clock.CurrentEpoch()))
|
||||
require.NoError(t, s.ensureRegistrationsForEpoch(s.cfg.clock.CurrentEpoch()))
|
||||
wg.Wait()
|
||||
|
||||
require.NoError(t, s.ensureDeregistrationForEpoch(tt.nextForkEpoch))
|
||||
assert.Equal(t, true, s.subHandler.digestExists(digest))
|
||||
// deregister as if it is the epoch after the next fork epoch
|
||||
require.NoError(t, s.deregisterFromPastFork(tt.nextForkEpoch+1))
|
||||
require.NoError(t, s.ensureDeregistrationForEpoch(tt.nextForkEpoch+1))
|
||||
assert.Equal(t, false, s.subHandler.digestExists(digest))
|
||||
assert.Equal(t, true, s.subHandler.digestExists(nextDigest))
|
||||
})
|
||||
|
||||
@@ -384,7 +384,7 @@ func (f *blocksFetcher) fetchSidecars(ctx context.Context, pid peer.ID, peers []
|
||||
}
|
||||
|
||||
// Compute the columns to request.
|
||||
custodyGroupCount, err := f.p2p.CustodyGroupCount()
|
||||
custodyGroupCount, err := f.p2p.CustodyGroupCount(ctx)
|
||||
if err != nil {
|
||||
return blobsPid, errors.Wrap(err, "custody group count")
|
||||
}
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
mathutil "github.com/OffchainLabs/prysm/v6/math"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
prysmTime "github.com/OffchainLabs/prysm/v6/time"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
@@ -131,8 +130,8 @@ func trimPeers(peers []peer.ID, peersPercentage float64) []peer.ID {
|
||||
// Weak/slow peers will be pushed down the list and trimmed since only percentage of peers is selected.
|
||||
limit := uint64(math.Round(float64(len(peers)) * peersPercentage))
|
||||
// Limit cannot be less that minimum peers required by sync mechanism.
|
||||
limit = mathutil.Max(limit, uint64(required))
|
||||
limit = max(limit, uint64(required))
|
||||
// Limit cannot be higher than number of peers available (safe-guard).
|
||||
limit = mathutil.Min(limit, uint64(len(peers)))
|
||||
limit = min(limit, uint64(len(peers)))
|
||||
return peers[:limit]
|
||||
}
|
||||
|
||||
@@ -1017,13 +1017,13 @@ func TestBlobRangeForBlocks(t *testing.T) {
|
||||
for i := range blks {
|
||||
sbbs[i] = blks[i]
|
||||
}
|
||||
retentionStart := primitives.Slot(5)
|
||||
retentionStart := blks[len(blks)/2].Block().Slot()
|
||||
bwb, err := sortedBlockWithVerifiedBlobSlice(sbbs)
|
||||
require.NoError(t, err)
|
||||
bounds := countCommitments(bwb, retentionStart).blobRange(nil)
|
||||
require.Equal(t, retentionStart, bounds.low)
|
||||
higher := primitives.Slot(len(blks) + 1)
|
||||
bounds = countCommitments(bwb, higher).blobRange(nil)
|
||||
maxBlkSlot := blks[len(blks)-1].Block().Slot()
|
||||
bounds = countCommitments(bwb, maxBlkSlot+1).blobRange(nil)
|
||||
var nilBounds *blobRange
|
||||
require.Equal(t, nilBounds, bounds)
|
||||
|
||||
@@ -1054,17 +1054,17 @@ func TestBlobRequest(t *testing.T) {
|
||||
}
|
||||
bwb, err := sortedBlockWithVerifiedBlobSlice(sbbs)
|
||||
require.NoError(t, err)
|
||||
maxBlkSlot := primitives.Slot(len(blks) - 1)
|
||||
|
||||
tooHigh := primitives.Slot(len(blks) + 1)
|
||||
maxBlkSlot := blks[len(blks)-1].Block().Slot()
|
||||
tooHigh := maxBlkSlot + 1
|
||||
req = countCommitments(bwb, tooHigh).blobRange(nil).Request()
|
||||
require.Equal(t, nilReq, req)
|
||||
|
||||
req = countCommitments(bwb, maxBlkSlot).blobRange(nil).Request()
|
||||
require.Equal(t, uint64(1), req.Count)
|
||||
require.Equal(t, maxBlkSlot, req.StartSlot)
|
||||
require.Equal(t, uint64(1), req.Count)
|
||||
|
||||
halfway := primitives.Slot(5)
|
||||
halfway := blks[len(blks)/2].Block().Slot()
|
||||
req = countCommitments(bwb, halfway).blobRange(nil).Request()
|
||||
require.Equal(t, halfway, req.StartSlot)
|
||||
// adding 1 to include the halfway slot itself
|
||||
@@ -1103,6 +1103,12 @@ func TestCountCommitments(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCommitmentCountList(t *testing.T) {
|
||||
de := params.BeaconConfig().DenebForkEpoch
|
||||
ds := util.SlotAtEpoch(t, de)
|
||||
denebRel := func(s primitives.Slot) primitives.Slot {
|
||||
return ds + s
|
||||
}
|
||||
maxBlobs := params.BeaconConfig().MaxBlobsPerBlock(ds)
|
||||
cases := []struct {
|
||||
name string
|
||||
cc commitmentCountList
|
||||
@@ -1119,20 +1125,20 @@ func TestCommitmentCountList(t *testing.T) {
|
||||
{
|
||||
name: "nil bss, single slot",
|
||||
cc: []commitmentCount{
|
||||
{slot: 11235, count: 1},
|
||||
{slot: denebRel(11235), count: 1},
|
||||
},
|
||||
expected: &blobRange{low: 11235, high: 11235},
|
||||
request: ðpb.BlobSidecarsByRangeRequest{StartSlot: 11235, Count: 1},
|
||||
expected: &blobRange{low: denebRel(11235), high: denebRel(11235)},
|
||||
request: ðpb.BlobSidecarsByRangeRequest{StartSlot: denebRel(11235), Count: 1},
|
||||
},
|
||||
{
|
||||
name: "nil bss, sparse slots",
|
||||
cc: []commitmentCount{
|
||||
{slot: 11235, count: 1},
|
||||
{slot: 11240, count: params.BeaconConfig().MaxBlobsPerBlock(0)},
|
||||
{slot: 11250, count: 3},
|
||||
{slot: denebRel(11235), count: 1},
|
||||
{slot: denebRel(11240), count: maxBlobs},
|
||||
{slot: denebRel(11250), count: 3},
|
||||
},
|
||||
expected: &blobRange{low: 11235, high: 11250},
|
||||
request: ðpb.BlobSidecarsByRangeRequest{StartSlot: 11235, Count: 16},
|
||||
expected: &blobRange{low: denebRel(11235), high: denebRel(11250)},
|
||||
request: ðpb.BlobSidecarsByRangeRequest{StartSlot: denebRel(11235), Count: 16},
|
||||
},
|
||||
{
|
||||
name: "AllAvailable in middle, some avail low, none high",
|
||||
@@ -1141,15 +1147,15 @@ func TestCommitmentCountList(t *testing.T) {
|
||||
bytesutil.ToBytes32([]byte("0")): {0, 1},
|
||||
bytesutil.ToBytes32([]byte("1")): {0, 1, 2, 3, 4, 5},
|
||||
}
|
||||
return filesystem.NewMockBlobStorageSummarizer(t, onDisk)
|
||||
return filesystem.NewMockBlobStorageSummarizer(t, de, onDisk)
|
||||
},
|
||||
cc: []commitmentCount{
|
||||
{slot: 0, count: 3, root: bytesutil.ToBytes32([]byte("0"))},
|
||||
{slot: 5, count: params.BeaconConfig().MaxBlobsPerBlock(0), root: bytesutil.ToBytes32([]byte("1"))},
|
||||
{slot: 15, count: 3},
|
||||
{slot: denebRel(0), count: 3, root: bytesutil.ToBytes32([]byte("0"))},
|
||||
{slot: denebRel(5), count: maxBlobs, root: bytesutil.ToBytes32([]byte("1"))},
|
||||
{slot: denebRel(15), count: 3},
|
||||
},
|
||||
expected: &blobRange{low: 0, high: 15},
|
||||
request: ðpb.BlobSidecarsByRangeRequest{StartSlot: 0, Count: 16},
|
||||
expected: &blobRange{low: denebRel(0), high: denebRel(15)},
|
||||
request: ðpb.BlobSidecarsByRangeRequest{StartSlot: denebRel(0), Count: 16},
|
||||
},
|
||||
{
|
||||
name: "AllAvailable at high and low",
|
||||
@@ -1158,15 +1164,15 @@ func TestCommitmentCountList(t *testing.T) {
|
||||
bytesutil.ToBytes32([]byte("0")): {0, 1},
|
||||
bytesutil.ToBytes32([]byte("2")): {0, 1, 2, 3, 4, 5},
|
||||
}
|
||||
return filesystem.NewMockBlobStorageSummarizer(t, onDisk)
|
||||
return filesystem.NewMockBlobStorageSummarizer(t, de, onDisk)
|
||||
},
|
||||
cc: []commitmentCount{
|
||||
{slot: 0, count: 2, root: bytesutil.ToBytes32([]byte("0"))},
|
||||
{slot: 5, count: 3},
|
||||
{slot: 15, count: params.BeaconConfig().MaxBlobsPerBlock(0), root: bytesutil.ToBytes32([]byte("2"))},
|
||||
{slot: denebRel(0), count: 2, root: bytesutil.ToBytes32([]byte("0"))},
|
||||
{slot: denebRel(5), count: 3},
|
||||
{slot: denebRel(15), count: maxBlobs, root: bytesutil.ToBytes32([]byte("2"))},
|
||||
},
|
||||
expected: &blobRange{low: 5, high: 5},
|
||||
request: ðpb.BlobSidecarsByRangeRequest{StartSlot: 5, Count: 1},
|
||||
expected: &blobRange{low: denebRel(5), high: denebRel(5)},
|
||||
request: ðpb.BlobSidecarsByRangeRequest{StartSlot: denebRel(5), Count: 1},
|
||||
},
|
||||
{
|
||||
name: "AllAvailable at high and low, adjacent range in middle",
|
||||
@@ -1175,16 +1181,16 @@ func TestCommitmentCountList(t *testing.T) {
|
||||
bytesutil.ToBytes32([]byte("0")): {0, 1},
|
||||
bytesutil.ToBytes32([]byte("2")): {0, 1, 2, 3, 4, 5},
|
||||
}
|
||||
return filesystem.NewMockBlobStorageSummarizer(t, onDisk)
|
||||
return filesystem.NewMockBlobStorageSummarizer(t, de, onDisk)
|
||||
},
|
||||
cc: []commitmentCount{
|
||||
{slot: 0, count: 2, root: bytesutil.ToBytes32([]byte("0"))},
|
||||
{slot: 5, count: 3},
|
||||
{slot: 6, count: 3},
|
||||
{slot: 15, count: params.BeaconConfig().MaxBlobsPerBlock(0), root: bytesutil.ToBytes32([]byte("2"))},
|
||||
{slot: denebRel(0), count: 2, root: bytesutil.ToBytes32([]byte("0"))},
|
||||
{slot: denebRel(5), count: 3},
|
||||
{slot: denebRel(6), count: 3},
|
||||
{slot: denebRel(15), count: maxBlobs, root: bytesutil.ToBytes32([]byte("2"))},
|
||||
},
|
||||
expected: &blobRange{low: 5, high: 6},
|
||||
request: ðpb.BlobSidecarsByRangeRequest{StartSlot: 5, Count: 2},
|
||||
expected: &blobRange{low: denebRel(5), high: denebRel(6)},
|
||||
request: ðpb.BlobSidecarsByRangeRequest{StartSlot: denebRel(5), Count: 2},
|
||||
},
|
||||
{
|
||||
name: "AllAvailable at high and low, range in middle",
|
||||
@@ -1194,16 +1200,16 @@ func TestCommitmentCountList(t *testing.T) {
|
||||
bytesutil.ToBytes32([]byte("1")): {0, 1},
|
||||
bytesutil.ToBytes32([]byte("2")): {0, 1, 2, 3, 4, 5},
|
||||
}
|
||||
return filesystem.NewMockBlobStorageSummarizer(t, onDisk)
|
||||
return filesystem.NewMockBlobStorageSummarizer(t, de, onDisk)
|
||||
},
|
||||
cc: []commitmentCount{
|
||||
{slot: 0, count: 2, root: bytesutil.ToBytes32([]byte("0"))},
|
||||
{slot: 5, count: 3, root: bytesutil.ToBytes32([]byte("1"))},
|
||||
{slot: 10, count: 3},
|
||||
{slot: 15, count: params.BeaconConfig().MaxBlobsPerBlock(0), root: bytesutil.ToBytes32([]byte("2"))},
|
||||
{slot: denebRel(0), count: 2, root: bytesutil.ToBytes32([]byte("0"))},
|
||||
{slot: denebRel(5), count: 3, root: bytesutil.ToBytes32([]byte("1"))},
|
||||
{slot: denebRel(10), count: 3},
|
||||
{slot: denebRel(15), count: maxBlobs, root: bytesutil.ToBytes32([]byte("2"))},
|
||||
},
|
||||
expected: &blobRange{low: 5, high: 10},
|
||||
request: ðpb.BlobSidecarsByRangeRequest{StartSlot: 5, Count: 6},
|
||||
expected: &blobRange{low: denebRel(5), high: denebRel(10)},
|
||||
request: ðpb.BlobSidecarsByRangeRequest{StartSlot: denebRel(5), Count: 6},
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
@@ -1218,8 +1224,8 @@ func TestCommitmentCountList(t *testing.T) {
|
||||
require.IsNil(t, br.Request())
|
||||
} else {
|
||||
req := br.Request()
|
||||
require.DeepEqual(t, req.StartSlot, c.request.StartSlot)
|
||||
require.DeepEqual(t, req.Count, c.request.Count)
|
||||
require.Equal(t, req.StartSlot, c.request.StartSlot)
|
||||
require.Equal(t, req.Count, c.request.Count)
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -1299,7 +1305,7 @@ func TestVerifyAndPopulateBlobs(t *testing.T) {
|
||||
r1: {0, 1},
|
||||
r7: {0, 1, 2, 3, 4, 5},
|
||||
}
|
||||
bss := filesystem.NewMockBlobStorageSummarizer(t, onDisk)
|
||||
bss := filesystem.NewMockBlobStorageSummarizer(t, params.BeaconConfig().DenebForkEpoch, onDisk)
|
||||
err := verifyAndPopulateBlobs(bwb, blobs, testReqFromResp(bwb), bss)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 6, len(bwb[i1].Blobs))
|
||||
|
||||
@@ -413,7 +413,7 @@ func (s *Service) fetchOriginDataColumnSidecars(roBlock blocks.ROBlock, delay ti
|
||||
}
|
||||
|
||||
// Compute the indices we need to custody.
|
||||
custodyGroupCount, err := s.cfg.P2P.CustodyGroupCount()
|
||||
custodyGroupCount, err := s.cfg.P2P.CustodyGroupCount(s.ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "custody group count")
|
||||
}
|
||||
|
||||
@@ -439,6 +439,7 @@ func TestService_Synced(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMissingBlobRequest(t *testing.T) {
|
||||
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
cases := []struct {
|
||||
name string
|
||||
setup func(t *testing.T) (blocks.ROBlock, *filesystem.BlobStorage)
|
||||
@@ -476,7 +477,7 @@ func TestMissingBlobRequest(t *testing.T) {
|
||||
{
|
||||
name: "2 commitments, 1 missing",
|
||||
setup: func(t *testing.T) (blocks.ROBlock, *filesystem.BlobStorage) {
|
||||
bk, _ := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 0, 2)
|
||||
bk, _ := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, ds, 2)
|
||||
bm, fs := filesystem.NewEphemeralBlobStorageWithMocker(t)
|
||||
require.NoError(t, bm.CreateFakeIndices(bk.Root(), bk.Block().Slot(), 1))
|
||||
return bk, fs
|
||||
@@ -486,7 +487,7 @@ func TestMissingBlobRequest(t *testing.T) {
|
||||
{
|
||||
name: "2 commitments, 0 missing",
|
||||
setup: func(t *testing.T) (blocks.ROBlock, *filesystem.BlobStorage) {
|
||||
bk, _ := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 0, 2)
|
||||
bk, _ := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, ds, 2)
|
||||
bm, fs := filesystem.NewEphemeralBlobStorageWithMocker(t)
|
||||
require.NoError(t, bm.CreateFakeIndices(bk.Root(), bk.Block().Slot(), 0, 1))
|
||||
return bk, fs
|
||||
@@ -629,7 +630,7 @@ func TestFetchOriginSidecars(t *testing.T) {
|
||||
|
||||
// Compute the columns to request.
|
||||
p2p := p2ptest.NewTestP2P(t)
|
||||
custodyGroupCount, err := p2p.CustodyGroupCount()
|
||||
custodyGroupCount, err := p2p.CustodyGroupCount(t.Context())
|
||||
require.NoError(t, err)
|
||||
|
||||
samplingSize := max(custodyGroupCount, samplesPerSlot)
|
||||
|
||||
40
beacon-chain/sync/once.go
Normal file
40
beacon-chain/sync/once.go
Normal file
@@ -0,0 +1,40 @@
|
||||
package sync
|
||||
|
||||
import "sync"
|
||||
|
||||
// oncePerDigest represents an action that should only be performed once per fork digest.
|
||||
type oncePerDigest uint8
|
||||
|
||||
const (
|
||||
doneZero oncePerDigest = 0
|
||||
registerGossipOnce oncePerDigest = 1 << 0
|
||||
unregisterGossipOnce oncePerDigest = 1 << 1
|
||||
registerRpcOnce oncePerDigest = 1 << 2
|
||||
unregisterRpcOnce oncePerDigest = 1 << 3
|
||||
)
|
||||
|
||||
// perDigestSet keeps track of which oncePerDigest actions
|
||||
// have been performed for each fork digest.
|
||||
type perDigestSet struct {
|
||||
sync.Mutex
|
||||
history map[[4]byte]oncePerDigest
|
||||
}
|
||||
|
||||
// digestActionDone marks the action as done for the given digest, returning true if it was already done.
|
||||
func (s *Service) digestActionDone(digest [4]byte, action oncePerDigest) bool {
|
||||
s.digestActions.Lock()
|
||||
defer s.digestActions.Unlock()
|
||||
// lazy initialize registrationHistory; the lock is not a reference type so it is ready to go
|
||||
if s.digestActions.history == nil {
|
||||
s.digestActions.history = make(map[[4]byte]oncePerDigest)
|
||||
}
|
||||
|
||||
prev := s.digestActions.history[digest]
|
||||
// Return true if the bit was already set
|
||||
if prev&action != 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
s.digestActions.history[digest] = prev | action
|
||||
return false
|
||||
}
|
||||
40
beacon-chain/sync/once_test.go
Normal file
40
beacon-chain/sync/once_test.go
Normal file
@@ -0,0 +1,40 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"slices"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestDigestActionDone(t *testing.T) {
|
||||
digests := [][4]byte{
|
||||
{0, 0, 0, 0},
|
||||
{1, 2, 3, 4},
|
||||
{4, 3, 2, 1},
|
||||
}
|
||||
actions := []oncePerDigest{
|
||||
registerGossipOnce,
|
||||
unregisterGossipOnce,
|
||||
registerRpcOnce,
|
||||
unregisterRpcOnce,
|
||||
}
|
||||
testCombos := func(d [][4]byte, a []oncePerDigest) {
|
||||
s := &Service{}
|
||||
for _, digest := range d {
|
||||
for _, action := range a {
|
||||
t.Run(fmt.Sprintf("digest=%#x/action=%d", digest, action), func(t *testing.T) {
|
||||
if s.digestActionDone(digest, action) {
|
||||
t.Fatal("expected first call to return false")
|
||||
}
|
||||
if !s.digestActionDone(digest, action) {
|
||||
t.Fatal("expected second call to return true")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
testCombos(digests, actions)
|
||||
slices.Reverse(digests)
|
||||
slices.Reverse(actions)
|
||||
testCombos(digests, actions)
|
||||
}
|
||||
@@ -7,9 +7,11 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/operation"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
@@ -18,6 +20,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -47,12 +50,16 @@ func (s *Service) processPendingAttsForBlock(ctx context.Context, bRoot [32]byte
|
||||
s.pendingAttsLock.RUnlock()
|
||||
|
||||
if len(attestations) > 0 {
|
||||
start := time.Now()
|
||||
s.processAttestations(ctx, attestations)
|
||||
duration := time.Since(start)
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockRoot": hex.EncodeToString(bytesutil.Trunc(bRoot[:])),
|
||||
"pendingAttsCount": len(attestations),
|
||||
"duration": duration,
|
||||
}).Debug("Verified and saved pending attestations to pool")
|
||||
}
|
||||
|
||||
randGen := rand.NewGenerator()
|
||||
// Delete the missing block root key from pending attestation queue so a node will not request for the block again.
|
||||
s.pendingAttsLock.Lock()
|
||||
@@ -72,18 +79,224 @@ func (s *Service) processPendingAttsForBlock(ctx context.Context, bRoot [32]byte
|
||||
}
|
||||
|
||||
func (s *Service) processAttestations(ctx context.Context, attestations []any) {
|
||||
for _, signedAtt := range attestations {
|
||||
// The pending attestations can arrive as both aggregates and attestations,
|
||||
// and each form has to be processed differently.
|
||||
switch t := signedAtt.(type) {
|
||||
if len(attestations) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
atts := make([]ethpb.Att, 0, len(attestations))
|
||||
for _, att := range attestations {
|
||||
switch v := att.(type) {
|
||||
case ethpb.Att:
|
||||
s.processAtt(ctx, t)
|
||||
atts = append(atts, v)
|
||||
case ethpb.SignedAggregateAttAndProof:
|
||||
s.processAggregate(ctx, t)
|
||||
s.processAggregate(ctx, v)
|
||||
default:
|
||||
log.Warnf("Unexpected item of type %T in pending attestation queue. Item will not be processed", t)
|
||||
log.Warnf("Unexpected attestation type %T, skipping", v)
|
||||
}
|
||||
}
|
||||
|
||||
for _, bucket := range bucketAttestationsByData(atts) {
|
||||
s.processAttestationBucket(ctx, bucket)
|
||||
}
|
||||
}
|
||||
|
||||
// attestationBucket groups attestations with the same AttestationData for batch processing.
|
||||
type attestationBucket struct {
|
||||
dataHash [32]byte
|
||||
data *ethpb.AttestationData
|
||||
attestations []ethpb.Att
|
||||
}
|
||||
|
||||
// processAttestationBucket processes a bucket of attestations with shared AttestationData.
|
||||
func (s *Service) processAttestationBucket(ctx context.Context, bucket *attestationBucket) {
|
||||
if bucket == nil || len(bucket.attestations) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
data := bucket.data
|
||||
|
||||
// Shared validations for the entire bucket.
|
||||
if !s.cfg.chain.InForkchoice(bytesutil.ToBytes32(data.BeaconBlockRoot)) {
|
||||
log.WithError(blockchain.ErrNotDescendantOfFinalized).WithField("root", fmt.Sprintf("%#x", data.BeaconBlockRoot)).Debug("Failed forkchoice check for bucket")
|
||||
return
|
||||
}
|
||||
|
||||
preState, err := s.cfg.chain.AttestationTargetState(ctx, data.Target)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Failed to get attestation prestate for bucket")
|
||||
return
|
||||
}
|
||||
|
||||
if err := s.cfg.chain.VerifyLmdFfgConsistency(ctx, bucket.attestations[0]); err != nil {
|
||||
log.WithError(err).Debug("Failed FFG consistency check for bucket")
|
||||
return
|
||||
}
|
||||
|
||||
// Collect valid attestations for both single and electra formats.
|
||||
// Broadcast takes single format but attestation pool and batch signature verification take electra format.
|
||||
forBroadcast := make([]ethpb.Att, 0, len(bucket.attestations))
|
||||
forPool := make([]ethpb.Att, 0, len(bucket.attestations))
|
||||
|
||||
for _, att := range bucket.attestations {
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, preState, data.Slot, att.GetCommitteeIndex())
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Failed to get committee from state")
|
||||
continue
|
||||
}
|
||||
|
||||
valid, err := validateAttesterData(ctx, att, committee)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Failed attester data validation")
|
||||
continue
|
||||
}
|
||||
if valid != pubsub.ValidationAccept {
|
||||
log.Debug("Pending attestation rejected due to invalid data")
|
||||
continue
|
||||
}
|
||||
|
||||
var conv ethpb.Att
|
||||
if att.Version() >= version.Electra {
|
||||
single, ok := att.(*ethpb.SingleAttestation)
|
||||
if !ok {
|
||||
log.Debugf("Wrong type: expected %T, got %T", ðpb.SingleAttestation{}, att)
|
||||
continue
|
||||
}
|
||||
conv = single.ToAttestationElectra(committee)
|
||||
} else {
|
||||
conv = att
|
||||
}
|
||||
|
||||
forBroadcast = append(forBroadcast, att)
|
||||
forPool = append(forPool, conv)
|
||||
}
|
||||
|
||||
if len(forPool) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
verified := s.batchVerifyAttestationSignatures(ctx, forPool, preState)
|
||||
verifiedSet := make(map[ethpb.Att]struct{}, len(verified))
|
||||
for _, att := range verified {
|
||||
verifiedSet[att] = struct{}{}
|
||||
}
|
||||
|
||||
for i, poolAtt := range forPool {
|
||||
if _, ok := verifiedSet[poolAtt]; ok {
|
||||
s.processVerifiedAttestation(ctx, forBroadcast[i], poolAtt, preState)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// batchVerifyAttestationSignatures attempts batch verification, with individual fallback on failure.
|
||||
func (s *Service) batchVerifyAttestationSignatures(
|
||||
ctx context.Context,
|
||||
attestations []ethpb.Att,
|
||||
preState state.ReadOnlyBeaconState,
|
||||
) []ethpb.Att {
|
||||
const fallbackMsg = "batch verification failed, using individual checks"
|
||||
|
||||
set, err := blocks.AttestationSignatureBatch(ctx, preState, attestations)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug(fallbackMsg)
|
||||
return s.fallbackToIndividualVerification(ctx, attestations, preState)
|
||||
}
|
||||
|
||||
ok, err := set.Verify()
|
||||
if err != nil || !ok {
|
||||
if err != nil {
|
||||
log.WithError(err).Debug(fallbackMsg)
|
||||
} else {
|
||||
log.Debug(fallbackMsg)
|
||||
}
|
||||
return s.fallbackToIndividualVerification(ctx, attestations, preState)
|
||||
}
|
||||
|
||||
return attestations
|
||||
}
|
||||
|
||||
// fallbackToIndividualVerification verifies each attestation individually if batch verification fails.
|
||||
func (s *Service) fallbackToIndividualVerification(
|
||||
ctx context.Context,
|
||||
attestations []ethpb.Att,
|
||||
preState state.ReadOnlyBeaconState,
|
||||
) []ethpb.Att {
|
||||
verified := make([]ethpb.Att, 0, len(attestations))
|
||||
|
||||
for _, att := range attestations {
|
||||
res, err := s.validateUnaggregatedAttWithState(ctx, att, preState)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Individual signature verification error")
|
||||
continue
|
||||
}
|
||||
if res == pubsub.ValidationAccept {
|
||||
verified = append(verified, att)
|
||||
}
|
||||
}
|
||||
|
||||
return verified
|
||||
}
|
||||
|
||||
// saveAttestation saves an attestation to the appropriate pool.
|
||||
func (s *Service) saveAttestation(att ethpb.Att) error {
|
||||
if features.Get().EnableExperimentalAttestationPool {
|
||||
return s.cfg.attestationCache.Add(att)
|
||||
}
|
||||
return s.cfg.attPool.SaveUnaggregatedAttestation(att)
|
||||
}
|
||||
|
||||
// processVerifiedAttestation handles a signature-verified attestation.
|
||||
func (s *Service) processVerifiedAttestation(
|
||||
ctx context.Context,
|
||||
broadcastAtt ethpb.Att,
|
||||
poolAtt ethpb.Att,
|
||||
preState state.ReadOnlyBeaconState,
|
||||
) {
|
||||
data := broadcastAtt.GetData()
|
||||
|
||||
if err := s.saveAttestation(poolAtt); err != nil {
|
||||
log.WithError(err).Debug("Failed to save unaggregated attestation")
|
||||
return
|
||||
}
|
||||
|
||||
if key, err := generateUnaggregatedAttCacheKey(broadcastAtt); err != nil {
|
||||
log.WithError(err).Error("Failed to generate cache key for attestation tracking")
|
||||
} else {
|
||||
s.setSeenUnaggregatedAtt(key)
|
||||
}
|
||||
|
||||
valCount, err := helpers.ActiveValidatorCount(ctx, preState, slots.ToEpoch(data.Slot))
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Failed to retrieve active validator count")
|
||||
return
|
||||
}
|
||||
|
||||
if err := s.cfg.p2p.BroadcastAttestation(ctx, helpers.ComputeSubnetForAttestation(valCount, broadcastAtt), broadcastAtt); err != nil {
|
||||
log.WithError(err).Debug("Failed to broadcast attestation")
|
||||
}
|
||||
|
||||
var (
|
||||
eventType feed.EventType
|
||||
eventData any
|
||||
)
|
||||
|
||||
switch {
|
||||
case broadcastAtt.Version() >= version.Electra:
|
||||
if sa, ok := broadcastAtt.(*ethpb.SingleAttestation); ok {
|
||||
eventType = operation.SingleAttReceived
|
||||
eventData = &operation.SingleAttReceivedData{Attestation: sa}
|
||||
break
|
||||
}
|
||||
fallthrough
|
||||
default:
|
||||
eventType = operation.UnaggregatedAttReceived
|
||||
eventData = &operation.UnAggregatedAttReceivedData{Attestation: broadcastAtt}
|
||||
}
|
||||
|
||||
// Send event notification
|
||||
s.cfg.attestationNotifier.OperationFeed().Send(&feed.Event{
|
||||
Type: eventType,
|
||||
Data: eventData,
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Service) processAggregate(ctx context.Context, aggregate ethpb.SignedAggregateAttAndProof) {
|
||||
@@ -94,9 +307,10 @@ func (s *Service) processAggregate(ctx context.Context, aggregate ethpb.SignedAg
|
||||
valRes, err := s.validateAggregatedAtt(ctx, aggregate)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Pending aggregated attestation failed validation")
|
||||
return
|
||||
}
|
||||
aggValid := pubsub.ValidationAccept == valRes
|
||||
if s.validateBlockInAttestation(ctx, aggregate) && aggValid {
|
||||
if aggValid && s.validateBlockInAttestation(ctx, aggregate) {
|
||||
if features.Get().EnableExperimentalAttestationPool {
|
||||
if err = s.cfg.attestationCache.Add(att); err != nil {
|
||||
log.WithError(err).Debug("Could not save aggregated attestation")
|
||||
@@ -123,114 +337,6 @@ func (s *Service) processAggregate(ctx context.Context, aggregate ethpb.SignedAg
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) processAtt(ctx context.Context, att ethpb.Att) {
|
||||
data := att.GetData()
|
||||
|
||||
// This is an important validation before retrieving attestation pre state to defend against
|
||||
// attestation's target intentionally referencing a checkpoint that's long ago.
|
||||
if !s.cfg.chain.InForkchoice(bytesutil.ToBytes32(data.BeaconBlockRoot)) {
|
||||
log.WithError(blockchain.ErrNotDescendantOfFinalized).Debug("Could not verify finalized consistency")
|
||||
return
|
||||
}
|
||||
if err := s.cfg.chain.VerifyLmdFfgConsistency(ctx, att); err != nil {
|
||||
log.WithError(err).Debug("Could not verify FFG consistency")
|
||||
return
|
||||
}
|
||||
preState, err := s.cfg.chain.AttestationTargetState(ctx, data.Target)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not retrieve attestation prestate")
|
||||
return
|
||||
}
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, preState, data.Slot, att.GetCommitteeIndex())
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not retrieve committee from state")
|
||||
return
|
||||
}
|
||||
valid, err := validateAttesterData(ctx, att, committee)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not validate attester data")
|
||||
return
|
||||
} else if valid != pubsub.ValidationAccept {
|
||||
log.Debug("Attestation failed attester data validation")
|
||||
return
|
||||
}
|
||||
|
||||
// Decide if the attestation is an Electra SingleAttestation or a Phase0 unaggregated attestation
|
||||
var (
|
||||
attForValidation ethpb.Att
|
||||
broadcastAtt ethpb.Att
|
||||
eventType feed.EventType
|
||||
eventData interface{}
|
||||
)
|
||||
|
||||
if att.Version() >= version.Electra {
|
||||
singleAtt, ok := att.(*ethpb.SingleAttestation)
|
||||
if !ok {
|
||||
log.Debugf("Attestation has wrong type (expected %T, got %T)", ðpb.SingleAttestation{}, att)
|
||||
return
|
||||
}
|
||||
// Convert Electra SingleAttestation to unaggregated ElectraAttestation. This is needed because many parts of the codebase assume that attestations have a certain structure and SingleAttestation validates these assumptions.
|
||||
attForValidation = singleAtt.ToAttestationElectra(committee)
|
||||
broadcastAtt = singleAtt
|
||||
eventType = operation.SingleAttReceived
|
||||
eventData = &operation.SingleAttReceivedData{
|
||||
Attestation: singleAtt,
|
||||
}
|
||||
} else {
|
||||
// Phase0 attestation
|
||||
attForValidation = att
|
||||
broadcastAtt = att
|
||||
eventType = operation.UnaggregatedAttReceived
|
||||
eventData = &operation.UnAggregatedAttReceivedData{
|
||||
Attestation: att,
|
||||
}
|
||||
}
|
||||
|
||||
valid, err = s.validateUnaggregatedAttWithState(ctx, attForValidation, preState)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Pending unaggregated attestation failed validation")
|
||||
return
|
||||
}
|
||||
|
||||
if valid == pubsub.ValidationAccept {
|
||||
if features.Get().EnableExperimentalAttestationPool {
|
||||
if err = s.cfg.attestationCache.Add(attForValidation); err != nil {
|
||||
log.WithError(err).Debug("Could not save unaggregated attestation")
|
||||
return
|
||||
}
|
||||
} else {
|
||||
if err := s.cfg.attPool.SaveUnaggregatedAttestation(attForValidation); err != nil {
|
||||
log.WithError(err).Debug("Could not save unaggregated attestation")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
attKey, err := generateUnaggregatedAttCacheKey(att)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not generate cache key for attestation tracking")
|
||||
} else {
|
||||
s.setSeenUnaggregatedAtt(attKey)
|
||||
}
|
||||
|
||||
valCount, err := helpers.ActiveValidatorCount(ctx, preState, slots.ToEpoch(data.Slot))
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not retrieve active validator count")
|
||||
return
|
||||
}
|
||||
|
||||
// Broadcast the final 'broadcastAtt' object
|
||||
if err := s.cfg.p2p.BroadcastAttestation(ctx, helpers.ComputeSubnetForAttestation(valCount, broadcastAtt), broadcastAtt); err != nil {
|
||||
log.WithError(err).Debug("Could not broadcast")
|
||||
}
|
||||
|
||||
// Feed event notification for other services
|
||||
s.cfg.attestationNotifier.OperationFeed().Send(&feed.Event{
|
||||
Type: eventType,
|
||||
Data: eventData,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// This defines how pending aggregates are saved in the map. The key is the
|
||||
// root of the missing block. The value is the list of pending attestations/aggregates
|
||||
// that voted for that block root. The caller of this function is responsible
|
||||
@@ -372,3 +478,29 @@ func (s *Service) validatePendingAtts(ctx context.Context, slot primitives.Slot)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// bucketAttestationsByData groups attestations by their AttestationData hash.
|
||||
func bucketAttestationsByData(attestations []ethpb.Att) map[[32]byte]*attestationBucket {
|
||||
bucketMap := make(map[[32]byte]*attestationBucket)
|
||||
|
||||
for _, att := range attestations {
|
||||
data := att.GetData()
|
||||
dataHash, err := data.HashTreeRoot()
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Failed to hash attestation data, skipping attestation")
|
||||
continue
|
||||
}
|
||||
|
||||
if bucket, ok := bucketMap[dataHash]; ok {
|
||||
bucket.attestations = append(bucket.attestations, att)
|
||||
} else {
|
||||
bucketMap[dataHash] = &attestationBucket{
|
||||
dataHash: dataHash,
|
||||
data: data,
|
||||
attestations: []ethpb.Att{att},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return bucketMap
|
||||
}
|
||||
|
||||
417
beacon-chain/sync/pending_attestations_queue_bucket_test.go
Normal file
417
beacon-chain/sync/pending_attestations_queue_bucket_test.go
Normal file
@@ -0,0 +1,417 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
mockChain "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func TestProcessAttestationBucket(t *testing.T) {
|
||||
t.Run("EmptyBucket", func(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
s := &Service{}
|
||||
|
||||
s.processAttestationBucket(context.Background(), nil)
|
||||
|
||||
emptyBucket := &attestationBucket{
|
||||
attestations: []ethpb.Att{},
|
||||
}
|
||||
s.processAttestationBucket(context.Background(), emptyBucket)
|
||||
|
||||
require.Equal(t, 0, len(hook.Entries), "Should not log any messages for empty buckets")
|
||||
})
|
||||
|
||||
t.Run("ForkchoiceFailure", func(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
chainService := &mockChain.ChainService{
|
||||
NotFinalized: true, // This makes InForkchoice return false
|
||||
}
|
||||
|
||||
s := &Service{
|
||||
cfg: &config{
|
||||
chain: chainService,
|
||||
},
|
||||
}
|
||||
|
||||
attData := ðpb.AttestationData{
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot"), 32),
|
||||
}
|
||||
|
||||
bucket := &attestationBucket{
|
||||
data: attData,
|
||||
attestations: []ethpb.Att{util.NewAttestation()},
|
||||
}
|
||||
|
||||
s.processAttestationBucket(context.Background(), bucket)
|
||||
|
||||
require.Equal(t, 1, len(hook.Entries))
|
||||
assert.StringContains(t, "Failed forkchoice check for bucket", hook.LastEntry().Message)
|
||||
require.NotNil(t, hook.LastEntry().Data["error"])
|
||||
})
|
||||
|
||||
t.Run("CommitteeFailure", func(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
beaconState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetSlot(1))
|
||||
|
||||
chainService := &mockChain.ChainService{
|
||||
State: beaconState,
|
||||
ValidAttestation: true,
|
||||
}
|
||||
|
||||
s := &Service{
|
||||
cfg: &config{
|
||||
chain: chainService,
|
||||
},
|
||||
}
|
||||
|
||||
attData := ðpb.AttestationData{
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot"), 32),
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: bytesutil.PadTo([]byte("blockroot"), 32),
|
||||
},
|
||||
CommitteeIndex: 999999,
|
||||
}
|
||||
|
||||
att := util.NewAttestation()
|
||||
att.Data = attData
|
||||
|
||||
bucket := &attestationBucket{
|
||||
data: attData,
|
||||
attestations: []ethpb.Att{att},
|
||||
}
|
||||
|
||||
s.processAttestationBucket(context.Background(), bucket)
|
||||
|
||||
require.Equal(t, 1, len(hook.Entries))
|
||||
assert.StringContains(t, "Failed to get committee from state", hook.LastEntry().Message)
|
||||
})
|
||||
|
||||
t.Run("FFGConsistencyFailure", func(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
|
||||
validators := make([]*ethpb.Validator, 64)
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: 1000000,
|
||||
EffectiveBalance: 32000000000,
|
||||
}
|
||||
}
|
||||
|
||||
beaconState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetSlot(1))
|
||||
require.NoError(t, beaconState.SetValidators(validators))
|
||||
|
||||
chainService := &mockChain.ChainService{
|
||||
State: beaconState,
|
||||
}
|
||||
|
||||
s := &Service{
|
||||
cfg: &config{
|
||||
chain: chainService,
|
||||
},
|
||||
}
|
||||
|
||||
attData := ðpb.AttestationData{
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot"), 32),
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: bytesutil.PadTo([]byte("different_target"), 32), // Different from BeaconBlockRoot to trigger FFG failure
|
||||
},
|
||||
}
|
||||
|
||||
att := util.NewAttestation()
|
||||
att.Data = attData
|
||||
|
||||
bucket := &attestationBucket{
|
||||
data: attData,
|
||||
attestations: []ethpb.Att{att},
|
||||
}
|
||||
|
||||
s.processAttestationBucket(context.Background(), bucket)
|
||||
|
||||
require.Equal(t, 1, len(hook.Entries))
|
||||
assert.StringContains(t, "Failed FFG consistency check for bucket", hook.LastEntry().Message)
|
||||
})
|
||||
|
||||
t.Run("ProcessingSuccess", func(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
validators := make([]*ethpb.Validator, 64)
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: 1000000,
|
||||
EffectiveBalance: 32000000000,
|
||||
}
|
||||
}
|
||||
|
||||
beaconState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetSlot(1))
|
||||
require.NoError(t, beaconState.SetValidators(validators))
|
||||
|
||||
chainService := &mockChain.ChainService{
|
||||
State: beaconState,
|
||||
ValidAttestation: true,
|
||||
}
|
||||
|
||||
s := &Service{
|
||||
cfg: &config{
|
||||
chain: chainService,
|
||||
},
|
||||
}
|
||||
|
||||
// Test with Phase0 attestation
|
||||
t.Run("Phase0_NoError", func(t *testing.T) {
|
||||
hook.Reset() // Reset logs before test
|
||||
phase0Att := util.NewAttestation()
|
||||
phase0Att.Data.Slot = 1
|
||||
phase0Att.Data.CommitteeIndex = 0
|
||||
|
||||
bucket := &attestationBucket{
|
||||
data: phase0Att.GetData(),
|
||||
attestations: []ethpb.Att{phase0Att},
|
||||
}
|
||||
|
||||
s.processAttestationBucket(context.Background(), bucket)
|
||||
})
|
||||
|
||||
// Test with SingleAttestation
|
||||
t.Run("Electra_NoError", func(t *testing.T) {
|
||||
hook.Reset() // Reset logs before test
|
||||
attData := ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 0,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot"), 32),
|
||||
Source: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: bytesutil.PadTo([]byte("source"), 32),
|
||||
},
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: bytesutil.PadTo([]byte("blockroot"), 32), // Same as BeaconBlockRoot for LMD/FFG consistency
|
||||
},
|
||||
}
|
||||
|
||||
singleAtt := ðpb.SingleAttestation{
|
||||
CommitteeId: 0,
|
||||
AttesterIndex: 0,
|
||||
Data: attData,
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
|
||||
bucket := &attestationBucket{
|
||||
data: singleAtt.GetData(),
|
||||
attestations: []ethpb.Att{singleAtt},
|
||||
}
|
||||
|
||||
s.processAttestationBucket(context.Background(), bucket)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestBucketAttestationsByData(t *testing.T) {
|
||||
t.Run("EmptyInput", func(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
buckets := bucketAttestationsByData(nil)
|
||||
require.Equal(t, 0, len(buckets))
|
||||
require.Equal(t, 0, len(hook.Entries))
|
||||
|
||||
buckets = bucketAttestationsByData([]ethpb.Att{})
|
||||
require.Equal(t, 0, len(buckets))
|
||||
require.Equal(t, 0, len(hook.Entries))
|
||||
})
|
||||
|
||||
t.Run("SingleAttestation", func(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
att := util.NewAttestation()
|
||||
att.Data.Slot = 1
|
||||
att.Data.CommitteeIndex = 0
|
||||
|
||||
buckets := bucketAttestationsByData([]ethpb.Att{att})
|
||||
|
||||
require.Equal(t, 1, len(buckets))
|
||||
var bucket *attestationBucket
|
||||
for _, b := range buckets {
|
||||
bucket = b
|
||||
break
|
||||
}
|
||||
require.NotNil(t, bucket)
|
||||
require.Equal(t, 1, len(bucket.attestations))
|
||||
require.Equal(t, att, bucket.attestations[0])
|
||||
require.Equal(t, att.GetData(), bucket.data)
|
||||
require.Equal(t, 0, len(hook.Entries))
|
||||
})
|
||||
|
||||
t.Run("MultipleAttestationsSameData", func(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
|
||||
att1 := util.NewAttestation()
|
||||
att1.Data.Slot = 1
|
||||
att1.Data.CommitteeIndex = 0
|
||||
|
||||
att2 := util.NewAttestation()
|
||||
att2.Data = att1.Data // Same data
|
||||
att2.Signature = make([]byte, 96) // Different signature
|
||||
|
||||
buckets := bucketAttestationsByData([]ethpb.Att{att1, att2})
|
||||
|
||||
require.Equal(t, 1, len(buckets), "Should have one bucket for same data")
|
||||
var bucket *attestationBucket
|
||||
for _, b := range buckets {
|
||||
bucket = b
|
||||
break
|
||||
}
|
||||
require.NotNil(t, bucket)
|
||||
require.Equal(t, 2, len(bucket.attestations), "Should have both attestations in one bucket")
|
||||
require.Equal(t, att1.GetData(), bucket.data)
|
||||
require.Equal(t, 0, len(hook.Entries))
|
||||
})
|
||||
|
||||
t.Run("MultipleAttestationsDifferentData", func(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
|
||||
att1 := util.NewAttestation()
|
||||
att1.Data.Slot = 1
|
||||
att1.Data.CommitteeIndex = 0
|
||||
|
||||
att2 := util.NewAttestation()
|
||||
att2.Data.Slot = 2 // Different slot
|
||||
att2.Data.CommitteeIndex = 1
|
||||
|
||||
buckets := bucketAttestationsByData([]ethpb.Att{att1, att2})
|
||||
|
||||
require.Equal(t, 2, len(buckets), "Should have two buckets for different data")
|
||||
bucketCount := 0
|
||||
for _, bucket := range buckets {
|
||||
require.Equal(t, 1, len(bucket.attestations), "Each bucket should have one attestation")
|
||||
bucketCount++
|
||||
}
|
||||
require.Equal(t, 2, bucketCount, "Should have exactly two buckets")
|
||||
require.Equal(t, 0, len(hook.Entries))
|
||||
})
|
||||
|
||||
t.Run("MixedAttestationTypes", func(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
|
||||
// Create Phase0 attestation
|
||||
phase0Att := util.NewAttestation()
|
||||
phase0Att.Data.Slot = 1
|
||||
phase0Att.Data.CommitteeIndex = 0
|
||||
|
||||
electraAtt := ðpb.SingleAttestation{
|
||||
CommitteeId: 0,
|
||||
AttesterIndex: 1,
|
||||
Data: phase0Att.Data, // Same data
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
|
||||
buckets := bucketAttestationsByData([]ethpb.Att{phase0Att, electraAtt})
|
||||
|
||||
require.Equal(t, 1, len(buckets), "Should have one bucket for same data")
|
||||
var bucket *attestationBucket
|
||||
for _, b := range buckets {
|
||||
bucket = b
|
||||
break
|
||||
}
|
||||
require.NotNil(t, bucket)
|
||||
require.Equal(t, 2, len(bucket.attestations), "Should have both attestations in one bucket")
|
||||
require.Equal(t, phase0Att.GetData(), bucket.data)
|
||||
require.Equal(t, 0, len(hook.Entries))
|
||||
})
|
||||
}
|
||||
|
||||
func TestBatchVerifyAttestationSignatures(t *testing.T) {
|
||||
t.Run("EmptyInput", func(t *testing.T) {
|
||||
s := &Service{}
|
||||
|
||||
beaconState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
|
||||
result := s.batchVerifyAttestationSignatures(context.Background(), []ethpb.Att{}, beaconState)
|
||||
|
||||
// Empty input should return empty output
|
||||
require.Equal(t, 0, len(result))
|
||||
})
|
||||
|
||||
t.Run("BatchVerificationWithState", func(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
validators := make([]*ethpb.Validator, 64)
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: 1000000,
|
||||
EffectiveBalance: 32000000000,
|
||||
}
|
||||
}
|
||||
|
||||
beaconState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetSlot(1))
|
||||
require.NoError(t, beaconState.SetValidators(validators))
|
||||
|
||||
s := &Service{}
|
||||
|
||||
att := util.NewAttestation()
|
||||
att.Data.Slot = 1
|
||||
attestations := []ethpb.Att{att}
|
||||
|
||||
result := s.batchVerifyAttestationSignatures(context.Background(), attestations, beaconState)
|
||||
require.NotNil(t, result)
|
||||
|
||||
if len(result) == 0 && len(hook.Entries) > 0 {
|
||||
_ = false // Check if fallback message is logged
|
||||
for _, entry := range hook.Entries {
|
||||
if entry.Message == "batch verification failed, using individual checks" {
|
||||
_ = true // Found the fallback message
|
||||
break
|
||||
}
|
||||
}
|
||||
// It's OK if fallback message is logged - this means the function is working correctly
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("BatchVerificationFailureFallbackToIndividual", func(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
beaconState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetSlot(1))
|
||||
|
||||
chainService := &mockChain.ChainService{
|
||||
State: beaconState,
|
||||
ValidAttestation: false, // This will cause verification to fail
|
||||
}
|
||||
|
||||
s := &Service{
|
||||
cfg: &config{
|
||||
chain: chainService,
|
||||
},
|
||||
}
|
||||
|
||||
att := util.NewAttestation()
|
||||
att.Data.Slot = 1
|
||||
attestations := []ethpb.Att{att}
|
||||
|
||||
result := s.batchVerifyAttestationSignatures(context.Background(), attestations, beaconState)
|
||||
|
||||
require.Equal(t, 0, len(result))
|
||||
|
||||
require.NotEqual(t, 0, len(hook.Entries), "Should have log entries")
|
||||
found := false
|
||||
for _, entry := range hook.Entries {
|
||||
if entry.Message == "batch verification failed, using individual checks" {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
require.Equal(t, true, found, "Should log fallback message")
|
||||
})
|
||||
}
|
||||
@@ -11,11 +11,9 @@ import (
|
||||
p2ptypes "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
libp2pcore "github.com/libp2p/go-libp2p/core"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/pkg/errors"
|
||||
@@ -122,41 +120,9 @@ func (s *Service) rpcHandlerByTopicFromFork(forkIndex int) (map[string]rpcHandle
|
||||
return nil, errors.Errorf("RPC handler not found for fork index %d", forkIndex)
|
||||
}
|
||||
|
||||
// rpcHandlerByTopic returns the RPC handlers for a given epoch.
|
||||
func (s *Service) rpcHandlerByTopicFromEpoch(epoch primitives.Epoch) (map[string]rpcHandler, error) {
|
||||
// Get the beacon config.
|
||||
beaconConfig := params.BeaconConfig()
|
||||
|
||||
if epoch >= beaconConfig.FuluForkEpoch {
|
||||
return s.rpcHandlerByTopicFromFork(version.Fulu)
|
||||
}
|
||||
|
||||
if epoch >= beaconConfig.ElectraForkEpoch {
|
||||
return s.rpcHandlerByTopicFromFork(version.Electra)
|
||||
}
|
||||
|
||||
if epoch >= beaconConfig.DenebForkEpoch {
|
||||
return s.rpcHandlerByTopicFromFork(version.Deneb)
|
||||
}
|
||||
|
||||
if epoch >= beaconConfig.CapellaForkEpoch {
|
||||
return s.rpcHandlerByTopicFromFork(version.Capella)
|
||||
}
|
||||
|
||||
if epoch >= beaconConfig.BellatrixForkEpoch {
|
||||
return s.rpcHandlerByTopicFromFork(version.Bellatrix)
|
||||
}
|
||||
|
||||
if epoch >= beaconConfig.AltairForkEpoch {
|
||||
return s.rpcHandlerByTopicFromFork(version.Altair)
|
||||
}
|
||||
|
||||
return s.rpcHandlerByTopicFromFork(version.Phase0)
|
||||
}
|
||||
|
||||
// addedRPCHandlerByTopic returns the RPC handlers that are added in the new map that are not present in the old map.
|
||||
func addedRPCHandlerByTopic(previous, next map[string]rpcHandler) map[string]rpcHandler {
|
||||
added := make(map[string]rpcHandler)
|
||||
added := make(map[string]rpcHandler, len(next))
|
||||
|
||||
for topic, handler := range next {
|
||||
if _, ok := previous[topic]; !ok {
|
||||
@@ -181,13 +147,12 @@ func removedRPCTopics(previous, next map[string]rpcHandler) map[string]bool {
|
||||
}
|
||||
|
||||
// registerRPCHandlers for p2p RPC.
|
||||
func (s *Service) registerRPCHandlers() error {
|
||||
// Get the current epoch.
|
||||
currentSlot := s.cfg.clock.CurrentSlot()
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
|
||||
func (s *Service) registerRPCHandlers(nse params.NetworkScheduleEntry) error {
|
||||
if s.digestActionDone(nse.ForkDigest, registerRpcOnce) {
|
||||
return nil
|
||||
}
|
||||
// Get the RPC handlers for the current epoch.
|
||||
handlerByTopic, err := s.rpcHandlerByTopicFromEpoch(currentEpoch)
|
||||
handlerByTopic, err := s.rpcHandlerByTopicFromFork(nse.VersionEnum)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "rpc handler by topic from epoch")
|
||||
}
|
||||
|
||||
@@ -854,7 +854,7 @@ func TestRPCBeaconBlocksByRange_FilterBlocks(t *testing.T) {
|
||||
blocks := make([]*ethpb.SignedBeaconBlock, 0, req.Count)
|
||||
for i := req.StartSlot; i < req.StartSlot.Add(req.Count*req.Step); i += primitives.Slot(req.Step) {
|
||||
code, _, err := ReadStatusCode(stream, &encoder.SszNetworkEncoder{})
|
||||
if err != nil && err != io.EOF {
|
||||
if err != nil && !errors.Is(err, io.EOF) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if code != 0 || errors.Is(err, io.EOF) {
|
||||
|
||||
@@ -93,9 +93,13 @@ func (s *Service) sendBeaconBlocksRequest(ctx context.Context, requests *types.B
|
||||
// requestAndSaveMissingDataColumns checks if the data columns are missing for the given block.
|
||||
// If so, requests them and saves them to the storage.
|
||||
func (s *Service) requestAndSaveMissingDataColumnSidecars(blks []blocks.ROBlock) error {
|
||||
if len(blks) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
|
||||
|
||||
custodyGroupCount, err := s.cfg.p2p.CustodyGroupCount()
|
||||
custodyGroupCount, err := s.cfg.p2p.CustodyGroupCount(s.ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "custody group count")
|
||||
}
|
||||
|
||||
@@ -415,6 +415,7 @@ func TestRequestPendingBlobs(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestConstructPendingBlobsRequest(t *testing.T) {
|
||||
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
d := db.SetupDB(t)
|
||||
bs := filesystem.NewEphemeralBlobStorage(t)
|
||||
s := &Service{cfg: &config{beaconDB: d, blobStorage: bs}}
|
||||
@@ -436,6 +437,7 @@ func TestConstructPendingBlobsRequest(t *testing.T) {
|
||||
ParentRoot: bytesutil.PadTo([]byte{}, 32),
|
||||
StateRoot: bytesutil.PadTo([]byte{}, 32),
|
||||
BodyRoot: bytesutil.PadTo([]byte{}, 32),
|
||||
Slot: ds,
|
||||
},
|
||||
Signature: bytesutil.PadTo([]byte{}, 96),
|
||||
}
|
||||
|
||||
@@ -57,6 +57,8 @@ func (s *Service) streamBlobBatch(ctx context.Context, batch blockBatch, wQuota
|
||||
return wQuota, nil
|
||||
}
|
||||
|
||||
var blobRpcThrottleInterval = time.Second
|
||||
|
||||
// blobsSidecarsByRangeRPCHandler looks up the request blobs from the database from a given start slot index
|
||||
func (s *Service) blobSidecarsByRangeRPCHandler(ctx context.Context, msg interface{}, stream libp2pcore.Stream) error {
|
||||
var err error
|
||||
@@ -86,7 +88,7 @@ func (s *Service) blobSidecarsByRangeRPCHandler(ctx context.Context, msg interfa
|
||||
}
|
||||
|
||||
// Ticker to stagger out large requests.
|
||||
ticker := time.NewTicker(time.Second)
|
||||
ticker := time.NewTicker(blobRpcThrottleInterval)
|
||||
defer ticker.Stop()
|
||||
batcher, err := newBlockRangeBatcher(rp, s.cfg.beaconDB, s.rateLimiter, s.cfg.chain.IsCanonical, ticker)
|
||||
if err != nil {
|
||||
|
||||
@@ -4,12 +4,14 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
types "github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/genesis"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
)
|
||||
|
||||
func (c *blobsTestCase) defaultOldestSlotByRange(t *testing.T) types.Slot {
|
||||
@@ -18,8 +20,7 @@ func (c *blobsTestCase) defaultOldestSlotByRange(t *testing.T) types.Slot {
|
||||
if oldestEpoch < params.BeaconConfig().DenebForkEpoch {
|
||||
oldestEpoch = params.BeaconConfig().DenebForkEpoch
|
||||
}
|
||||
oldestSlot, err := slots.EpochStart(oldestEpoch)
|
||||
require.NoError(t, err)
|
||||
oldestSlot := util.SlotAtEpoch(t, oldestEpoch)
|
||||
return oldestSlot
|
||||
}
|
||||
|
||||
@@ -89,16 +90,11 @@ func (c *blobsTestCase) runTestBlobSidecarsByRange(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBlobByRangeOK(t *testing.T) {
|
||||
origNC := params.BeaconConfig()
|
||||
// restore network config after test completes
|
||||
defer func() {
|
||||
params.OverrideBeaconConfig(origNC)
|
||||
}()
|
||||
// set MaxRequestBlobSidecars to a low-ish value so the test doesn't timeout.
|
||||
nc := params.BeaconConfig().Copy()
|
||||
nc.MaxRequestBlobSidecars = 100
|
||||
params.OverrideBeaconConfig(nc)
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
retainSlots := util.SlotAtEpoch(t, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
|
||||
current := ds + retainSlots
|
||||
cases := []*blobsTestCase{
|
||||
{
|
||||
name: "beginning of window + 10",
|
||||
@@ -134,11 +130,11 @@ func TestBlobByRangeOK(t *testing.T) {
|
||||
Count: 20,
|
||||
}
|
||||
},
|
||||
total: func() *int { x := params.BeaconConfig().MaxBlobsPerBlock(0) * 10; return &x }(), // 10 blocks * 4 blobs = 40
|
||||
total: func() *int { x := params.BeaconConfig().MaxBlobsPerBlock(ds) * 10; return &x }(), // 10 blocks * 4 blobs = 40
|
||||
},
|
||||
{
|
||||
name: "when request count > MAX_REQUEST_BLOCKS_DENEB, MAX_REQUEST_BLOBS_SIDECARS sidecars in response",
|
||||
nblocks: int(params.BeaconConfig().MaxRequestBlocksDeneb) + 10,
|
||||
nblocks: int(params.BeaconConfig().MaxRequestBlocksDeneb) + 1,
|
||||
requestFromSidecars: func(scs []blocks.ROBlob) interface{} {
|
||||
return ðpb.BlobSidecarsByRangeRequest{
|
||||
StartSlot: scs[0].Slot(),
|
||||
@@ -148,7 +144,9 @@ func TestBlobByRangeOK(t *testing.T) {
|
||||
total: func() *int { x := int(params.BeaconConfig().MaxRequestBlobSidecars); return &x }(),
|
||||
},
|
||||
}
|
||||
clock := startup.NewClock(genesis.Time(), genesis.ValidatorsRoot(), startup.WithSlotAsNow(current))
|
||||
for _, c := range cases {
|
||||
c.clock = clock
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
c.runTestBlobSidecarsByRange(t)
|
||||
})
|
||||
@@ -156,19 +154,12 @@ func TestBlobByRangeOK(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBlobsByRangeValidation(t *testing.T) {
|
||||
cfg := params.BeaconConfig()
|
||||
repositionFutureEpochs(cfg)
|
||||
undo, err := params.SetActiveWithUndo(cfg)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
require.NoError(t, undo())
|
||||
}()
|
||||
denebSlot, err := slots.EpochStart(params.BeaconConfig().DenebForkEpoch)
|
||||
require.NoError(t, err)
|
||||
params.SetupTestConfigCleanup(t)
|
||||
repositionFutureEpochs(params.BeaconConfig())
|
||||
denebSlot := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
|
||||
minReqEpochs := params.BeaconConfig().MinEpochsForBlobsSidecarsRequest
|
||||
minReqSlots, err := slots.EpochStart(minReqEpochs)
|
||||
require.NoError(t, err)
|
||||
minReqSlots := util.SlotAtEpoch(t, minReqEpochs)
|
||||
// spec criteria for mix,max bound checking
|
||||
/*
|
||||
Clients MUST keep a record of signed blobs sidecars seen on the epoch range
|
||||
@@ -231,7 +222,7 @@ func TestBlobsByRangeValidation(t *testing.T) {
|
||||
},
|
||||
start: defaultMinStart,
|
||||
end: defaultMinStart + 9,
|
||||
batch: blobBatchLimit(100),
|
||||
batch: blobBatchLimit(defaultCurrent),
|
||||
},
|
||||
{
|
||||
name: "count > MAX_REQUEST_BLOB_SIDECARS",
|
||||
@@ -243,7 +234,7 @@ func TestBlobsByRangeValidation(t *testing.T) {
|
||||
start: defaultMinStart,
|
||||
end: defaultMinStart - 10 + 999,
|
||||
// a large count is ok, we just limit the amount of actual responses
|
||||
batch: blobBatchLimit(100),
|
||||
batch: blobBatchLimit(defaultCurrent),
|
||||
},
|
||||
{
|
||||
name: "start + count > current",
|
||||
@@ -265,7 +256,7 @@ func TestBlobsByRangeValidation(t *testing.T) {
|
||||
},
|
||||
start: denebSlot,
|
||||
end: denebSlot + 89,
|
||||
batch: blobBatchLimit(100),
|
||||
batch: blobBatchLimit(defaultCurrent - minReqSlots + 100),
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
@@ -285,8 +276,7 @@ func TestBlobsByRangeValidation(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBlobRPCMinValidSlot(t *testing.T) {
|
||||
denebSlot, err := slots.EpochStart(params.BeaconConfig().DenebForkEpoch)
|
||||
require.NoError(t, err)
|
||||
denebSlot := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
cases := []struct {
|
||||
name string
|
||||
current func(t *testing.T) types.Slot
|
||||
@@ -296,9 +286,8 @@ func TestBlobRPCMinValidSlot(t *testing.T) {
|
||||
{
|
||||
name: "before deneb",
|
||||
current: func(t *testing.T) types.Slot {
|
||||
st, err := slots.EpochStart(params.BeaconConfig().DenebForkEpoch - 1)
|
||||
st := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch-1)
|
||||
// note: we no longer need to deal with deneb fork epoch being far future
|
||||
require.NoError(t, err)
|
||||
return st
|
||||
},
|
||||
expected: denebSlot,
|
||||
@@ -306,9 +295,8 @@ func TestBlobRPCMinValidSlot(t *testing.T) {
|
||||
{
|
||||
name: "equal to deneb",
|
||||
current: func(t *testing.T) types.Slot {
|
||||
st, err := slots.EpochStart(params.BeaconConfig().DenebForkEpoch)
|
||||
st := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
// note: we no longer need to deal with deneb fork epoch being far future
|
||||
require.NoError(t, err)
|
||||
return st
|
||||
},
|
||||
expected: denebSlot,
|
||||
@@ -316,9 +304,8 @@ func TestBlobRPCMinValidSlot(t *testing.T) {
|
||||
{
|
||||
name: "after deneb, before expiry starts",
|
||||
current: func(t *testing.T) types.Slot {
|
||||
st, err := slots.EpochStart(params.BeaconConfig().DenebForkEpoch + params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
|
||||
st := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch+params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
|
||||
// note: we no longer need to deal with deneb fork epoch being far future
|
||||
require.NoError(t, err)
|
||||
return st
|
||||
},
|
||||
expected: denebSlot,
|
||||
@@ -326,9 +313,8 @@ func TestBlobRPCMinValidSlot(t *testing.T) {
|
||||
{
|
||||
name: "expiry starts one epoch after deneb + MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS",
|
||||
current: func(t *testing.T) types.Slot {
|
||||
st, err := slots.EpochStart(params.BeaconConfig().DenebForkEpoch + params.BeaconConfig().MinEpochsForBlobsSidecarsRequest + 1)
|
||||
st := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch+params.BeaconConfig().MinEpochsForBlobsSidecarsRequest+1)
|
||||
// note: we no longer need to deal with deneb fork epoch being far future
|
||||
require.NoError(t, err)
|
||||
return st
|
||||
},
|
||||
expected: denebSlot + params.BeaconConfig().SlotsPerEpoch,
|
||||
|
||||
@@ -49,7 +49,7 @@ func (s *Service) blobSidecarByRootRPCHandler(ctx context.Context, msg interface
|
||||
batchSize := flags.Get().BlobBatchLimit
|
||||
var ticker *time.Ticker
|
||||
if len(blobIdents) > batchSize {
|
||||
ticker = time.NewTicker(time.Second)
|
||||
ticker = time.NewTicker(blobRpcThrottleInterval)
|
||||
}
|
||||
|
||||
// Compute the oldest slot we'll allow a peer to request, based on the current slot.
|
||||
@@ -72,17 +72,21 @@ func (s *Service) blobSidecarByRootRPCHandler(ctx context.Context, msg interface
|
||||
root, idx := bytesutil.ToBytes32(blobIdents[i].BlockRoot), blobIdents[i].Index
|
||||
sc, err := s.cfg.blobStorage.Get(root, idx)
|
||||
if err != nil {
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"index": idx,
|
||||
"peer": remotePeer.String(),
|
||||
})
|
||||
|
||||
if db.IsNotFound(err) {
|
||||
log.WithError(err).WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"index": idx,
|
||||
"peer": remotePeer.String(),
|
||||
}).Debugf("Peer requested blob sidecar by root not found in db")
|
||||
log.Trace("Peer requested blob sidecar by root not found in db")
|
||||
continue
|
||||
}
|
||||
log.WithError(err).Errorf("unexpected db error retrieving BlobSidecar, root=%x, index=%d", root, idx)
|
||||
|
||||
log.Error("Unexpected DB error retrieving blob sidecar from storage")
|
||||
s.writeErrorResponseToStream(responseCodeServerError, types.ErrGeneric.Error(), stream)
|
||||
return err
|
||||
|
||||
return errors.Wrap(err, "get blob sidecar by root")
|
||||
}
|
||||
|
||||
// If any root in the request content references a block earlier than minimum_request_epoch,
|
||||
|
||||
@@ -7,13 +7,15 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
p2pTypes "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
types "github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/genesis"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
)
|
||||
|
||||
@@ -123,6 +125,13 @@ func (c *blobsTestCase) runTestBlobSidecarsByRoot(t *testing.T) {
|
||||
if c.streamReader == nil {
|
||||
c.streamReader = defaultExpectedRequirer
|
||||
}
|
||||
if c.clock == nil {
|
||||
de := params.BeaconConfig().DenebForkEpoch
|
||||
denebBuffer := params.BeaconConfig().MinEpochsForBlobsSidecarsRequest + 1000
|
||||
ce := de + denebBuffer
|
||||
cs := util.SlotAtEpoch(t, ce)
|
||||
c.clock = startup.NewClock(genesis.Time(), genesis.ValidatorsRoot(), startup.WithSlotAsNow(cs))
|
||||
}
|
||||
c.run(t)
|
||||
}
|
||||
|
||||
@@ -181,18 +190,20 @@ func readChunkEncodedBlobsAsStreamReader(t *testing.T, s *Service, expect []*exp
|
||||
}
|
||||
|
||||
func TestBlobsByRootValidation(t *testing.T) {
|
||||
cfg := params.BeaconConfig()
|
||||
repositionFutureEpochs(cfg)
|
||||
undo, err := params.SetActiveWithUndo(cfg)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
require.NoError(t, undo())
|
||||
}()
|
||||
capellaSlot, err := slots.EpochStart(params.BeaconConfig().CapellaForkEpoch)
|
||||
require.NoError(t, err)
|
||||
dmc, clock := defaultMockChain(t)
|
||||
params.SetupTestConfigCleanup(t)
|
||||
repositionFutureEpochs(params.BeaconConfig())
|
||||
|
||||
de := params.BeaconConfig().DenebForkEpoch
|
||||
denebBuffer := params.BeaconConfig().MinEpochsForBlobsSidecarsRequest + 1000
|
||||
ce := de + denebBuffer
|
||||
cs := util.SlotAtEpoch(t, ce)
|
||||
clock := startup.NewClock(genesis.Time(), genesis.ValidatorsRoot(), startup.WithSlotAsNow(cs))
|
||||
|
||||
dmc := defaultMockChain(t, ce)
|
||||
capellaSlot := util.SlotAtEpoch(t, params.BeaconConfig().CapellaForkEpoch)
|
||||
dmc.Slot = &capellaSlot
|
||||
dmc.FinalizedCheckPoint = ðpb.Checkpoint{Epoch: params.BeaconConfig().CapellaForkEpoch}
|
||||
maxBlobs := params.BeaconConfig().MaxBlobsPerBlockAtEpoch(params.BeaconConfig().DenebForkEpoch)
|
||||
cases := []*blobsTestCase{
|
||||
{
|
||||
name: "block before minimum_request_epoch",
|
||||
@@ -222,7 +233,7 @@ func TestBlobsByRootValidation(t *testing.T) {
|
||||
name: "block with all indices missing between 2 full blocks",
|
||||
nblocks: 3,
|
||||
missing: map[int]bool{1: true},
|
||||
total: func(i int) *int { return &i }(2 * int(params.BeaconConfig().MaxBlobsPerBlock(0))),
|
||||
total: func(i int) *int { return &i }(2 * int(maxBlobs)),
|
||||
},
|
||||
{
|
||||
name: "exceeds req max",
|
||||
@@ -232,6 +243,7 @@ func TestBlobsByRootValidation(t *testing.T) {
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
c.clock = clock
|
||||
c.runTestBlobSidecarsByRoot(t)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
p2ptypes "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
@@ -41,21 +42,17 @@ func (s *Service) dataColumnSidecarsByRangeRPCHandler(ctx context.Context, msg i
|
||||
maxRequestDataColumnSidecars := beaconConfig.MaxRequestDataColumnSidecars
|
||||
remotePeer := stream.Conn().RemotePeer()
|
||||
|
||||
requestedColumns := request.Columns
|
||||
|
||||
// Format log fields.
|
||||
var requestedColumnsLog interface{} = "all"
|
||||
if uint64(len(requestedColumns)) != beaconConfig.NumberOfColumns {
|
||||
requestedColumnsLog = requestedColumns
|
||||
}
|
||||
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"remotePeer": remotePeer,
|
||||
"requestedColumns": requestedColumnsLog,
|
||||
"startSlot": request.StartSlot,
|
||||
"count": request.Count,
|
||||
"remotePeer": remotePeer,
|
||||
"startSlot": request.StartSlot,
|
||||
"count": request.Count,
|
||||
})
|
||||
|
||||
if log.Logger.Level >= logrus.DebugLevel {
|
||||
slices.Sort(request.Columns)
|
||||
log = log.WithField("requestedColumns", helpers.PrettySlice(request.Columns))
|
||||
}
|
||||
|
||||
// Validate the request regarding rate limiting.
|
||||
if err := s.rateLimiter.validateRequest(stream, rateLimitingAmount); err != nil {
|
||||
return errors.Wrap(err, "rate limiter validate request")
|
||||
@@ -69,13 +66,13 @@ func (s *Service) dataColumnSidecarsByRangeRPCHandler(ctx context.Context, msg i
|
||||
tracing.AnnotateError(span, err)
|
||||
return errors.Wrap(err, "validate data columns by range")
|
||||
}
|
||||
|
||||
log.Trace("Serving data column sidecars by range")
|
||||
|
||||
if rangeParameters == nil {
|
||||
log.Debug("No data columns by range to serve")
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Debug("Serving data columns by range request")
|
||||
|
||||
// Ticker to stagger out large requests.
|
||||
ticker := time.NewTicker(time.Second)
|
||||
|
||||
@@ -104,13 +101,13 @@ func (s *Service) dataColumnSidecarsByRangeRPCHandler(ctx context.Context, msg i
|
||||
|
||||
// Once the quota is reached, we're done serving the request.
|
||||
if maxRequestDataColumnSidecars == 0 {
|
||||
log.WithField("initialQuota", beaconConfig.MaxRequestDataColumnSidecars).Debug("Reached quota for data column sidecars by range request")
|
||||
log.WithField("initialQuota", beaconConfig.MaxRequestDataColumnSidecars).Trace("Reached quota for data column sidecars by range request")
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if err := batch.error(); err != nil {
|
||||
log.WithError(err).Debug("Error in DataColumnSidecarsByRange batch")
|
||||
log.WithError(err).Error("Cannot get next batch of blocks")
|
||||
|
||||
// If we hit a rate limit, the error response has already been written, and the stream is already closed.
|
||||
if !errors.Is(err, p2ptypes.ErrRateLimited) {
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
@@ -33,7 +34,6 @@ func (s *Service) dataColumnSidecarByRootRPCHandler(ctx context.Context, msg int
|
||||
defer span.End()
|
||||
|
||||
batchSize := flags.Get().DataColumnBatchLimit
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
// Check if the message type is the one expected.
|
||||
ref, ok := msg.(types.DataColumnsByRootIdentifiers)
|
||||
@@ -68,26 +68,13 @@ func (s *Service) dataColumnSidecarByRootRPCHandler(ctx context.Context, msg int
|
||||
slices.Sort(columns)
|
||||
}
|
||||
|
||||
// Format nice logs.
|
||||
requestedColumnsByRootLog := make(map[string]interface{})
|
||||
for root, columns := range requestedColumnsByRoot {
|
||||
rootStr := fmt.Sprintf("%#x", root)
|
||||
requestedColumnsByRootLog[rootStr] = "all"
|
||||
if uint64(len(columns)) != numberOfColumns {
|
||||
requestedColumnsByRootLog[rootStr] = columns
|
||||
}
|
||||
}
|
||||
|
||||
// Compute the oldest slot we'll allow a peer to request, based on the current slot.
|
||||
minReqSlot, err := dataColumnsRPCMinValidSlot(s.cfg.clock.CurrentSlot())
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "data columns RPC min valid slot")
|
||||
}
|
||||
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"peer": remotePeer,
|
||||
"columns": requestedColumnsByRootLog,
|
||||
})
|
||||
log := log.WithField("peer", remotePeer)
|
||||
|
||||
defer closeStream(stream, log)
|
||||
|
||||
@@ -96,7 +83,18 @@ func (s *Service) dataColumnSidecarByRootRPCHandler(ctx context.Context, msg int
|
||||
ticker = time.NewTicker(tickerDelay)
|
||||
}
|
||||
|
||||
log.Debug("Serving data column sidecar by root request")
|
||||
if log.Logger.Level >= logrus.TraceLevel {
|
||||
// We optimistially assume the peer requests the same set of columns for all roots,
|
||||
// pre-sizing the map accordingly.
|
||||
requestedRootsByColumnSet := make(map[string][]string, 1)
|
||||
for root, columns := range requestedColumnsByRoot {
|
||||
slices.Sort(columns)
|
||||
prettyColumns := helpers.PrettySlice(columns)
|
||||
requestedRootsByColumnSet[prettyColumns] = append(requestedRootsByColumnSet[prettyColumns], fmt.Sprintf("%#x", root))
|
||||
}
|
||||
|
||||
log.WithField("requested", requestedRootsByColumnSet).Trace("Serving data column sidecars by root")
|
||||
}
|
||||
|
||||
count := 0
|
||||
for _, ident := range requestedColumnIdents {
|
||||
|
||||
@@ -27,7 +27,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
)
|
||||
|
||||
@@ -614,18 +613,19 @@ func TestBlobValidatorFromRangeReq(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSeqBlobValid(t *testing.T) {
|
||||
one, oneBlobs := generateTestBlockWithSidecars(t, [32]byte{}, 0, 3)
|
||||
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
one, oneBlobs := generateTestBlockWithSidecars(t, [32]byte{}, ds, 3)
|
||||
r1, err := one.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
two, twoBlobs := generateTestBlockWithSidecars(t, r1, 1, 3)
|
||||
two, twoBlobs := generateTestBlockWithSidecars(t, r1, ds+1, 3)
|
||||
r2, err := two.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
_, oops := generateTestBlockWithSidecars(t, r2, 0, 4)
|
||||
_, oops := generateTestBlockWithSidecars(t, r2, ds, 4)
|
||||
oops[1].SignedBlockHeader.Header.ParentRoot = bytesutil.PadTo([]byte("derp"), 32)
|
||||
wrongRoot, err := blocks.NewROBlobWithRoot(oops[2].BlobSidecar, bytesutil.ToBytes32([]byte("parentderp")))
|
||||
require.NoError(t, err)
|
||||
oob := oops[3]
|
||||
oob.Index = uint64(params.BeaconConfig().MaxBlobsPerBlock(0))
|
||||
oob.Index = uint64(params.BeaconConfig().MaxBlobsPerBlock(ds))
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
@@ -704,7 +704,7 @@ func TestSendBlobsByRangeRequest(t *testing.T) {
|
||||
|
||||
t.Run("single blob - Deneb", func(t *testing.T) {
|
||||
// Setup genesis such that we are currently in deneb.
|
||||
s := uint64(slots.UnsafeEpochStart(params.BeaconConfig().DenebForkEpoch)) * params.BeaconConfig().SecondsPerSlot
|
||||
s := uint64(util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)) * params.BeaconConfig().SecondsPerSlot
|
||||
clock := startup.NewClock(time.Now().Add(-time.Second*time.Duration(s)), [32]byte{})
|
||||
ctxByte, err := ContextByteVersionsForValRoot(clock.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
@@ -713,7 +713,7 @@ func TestSendBlobsByRangeRequest(t *testing.T) {
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
// Set current slot to a deneb slot.
|
||||
slot := slots.UnsafeEpochStart(params.BeaconConfig().DenebForkEpoch + 1)
|
||||
slot := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch+1)
|
||||
// Create a simple handler that will return a valid response.
|
||||
p2.SetStreamHandler(topic, func(stream network.Stream) {
|
||||
defer func() {
|
||||
@@ -757,7 +757,7 @@ func TestSendBlobsByRangeRequest(t *testing.T) {
|
||||
require.NoError(t, undo())
|
||||
}()
|
||||
// Setup genesis such that we are currently in deneb.
|
||||
s := uint64(slots.UnsafeEpochStart(params.BeaconConfig().DenebForkEpoch)) * params.BeaconConfig().SecondsPerSlot
|
||||
s := uint64(util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)) * params.BeaconConfig().SecondsPerSlot
|
||||
clock := startup.NewClock(time.Now().Add(-time.Second*time.Duration(s)), [32]byte{})
|
||||
ctxByte, err := ContextByteVersionsForValRoot(clock.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
@@ -766,7 +766,7 @@ func TestSendBlobsByRangeRequest(t *testing.T) {
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
// Set current slot to the first slot of the last deneb epoch.
|
||||
slot := slots.UnsafeEpochStart(params.BeaconConfig().DenebForkEpoch)
|
||||
slot := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
// Create a simple handler that will return a valid response.
|
||||
p2.SetStreamHandler(topic, func(stream network.Stream) {
|
||||
defer func() {
|
||||
@@ -825,7 +825,7 @@ func TestSendBlobsByRangeRequest(t *testing.T) {
|
||||
require.NoError(t, undo())
|
||||
}()
|
||||
|
||||
s := uint64(slots.UnsafeEpochStart(params.BeaconConfig().ElectraForkEpoch)) * params.BeaconConfig().SecondsPerSlot
|
||||
s := uint64(util.SlotAtEpoch(t, params.BeaconConfig().ElectraForkEpoch)) * params.BeaconConfig().SecondsPerSlot
|
||||
clock := startup.NewClock(time.Now().Add(-time.Second*time.Duration(s)), [32]byte{})
|
||||
ctxByte, err := ContextByteVersionsForValRoot(clock.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
@@ -834,7 +834,7 @@ func TestSendBlobsByRangeRequest(t *testing.T) {
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
|
||||
slot := slots.UnsafeEpochStart(params.BeaconConfig().ElectraForkEpoch)
|
||||
slot := util.SlotAtEpoch(t, params.BeaconConfig().ElectraForkEpoch)
|
||||
// Create a simple handler that will return a valid response.
|
||||
p2.SetStreamHandler(topic, func(stream network.Stream) {
|
||||
defer func() {
|
||||
|
||||
@@ -160,7 +160,10 @@ func (s *Service) sendRPCStatusRequest(ctx context.Context, peer peer.ID) error
|
||||
}
|
||||
|
||||
cp := s.cfg.chain.FinalizedCheckpt()
|
||||
status := s.buildStatusFromEpoch(currentEpoch, forkDigest, cp.Root, cp.Epoch, headRoot)
|
||||
status, err := s.buildStatusFromEpoch(ctx, currentEpoch, forkDigest, cp.Root, cp.Epoch, headRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "build status from epoch")
|
||||
}
|
||||
|
||||
stream, err := s.cfg.p2p.Send(ctx, status, topic, peer)
|
||||
if err != nil {
|
||||
@@ -312,7 +315,7 @@ func (s *Service) respondWithStatus(ctx context.Context, stream network.Stream)
|
||||
}
|
||||
|
||||
cp := s.cfg.chain.FinalizedCheckpt()
|
||||
status, err := s.buildStatusFromStream(stream, forkDigest, cp.Root, cp.Epoch, headRoot)
|
||||
status, err := s.buildStatusFromStream(ctx, stream, forkDigest, cp.Root, cp.Epoch, headRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "build status")
|
||||
}
|
||||
@@ -329,6 +332,7 @@ func (s *Service) respondWithStatus(ctx context.Context, stream network.Stream)
|
||||
}
|
||||
|
||||
func (s *Service) buildStatusFromStream(
|
||||
ctx context.Context,
|
||||
stream libp2pcore.Stream,
|
||||
forkDigest [4]byte,
|
||||
finalizedRoot []byte,
|
||||
@@ -353,8 +357,8 @@ func (s *Service) buildStatusFromStream(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if streamVersion == p2p.SchemaVersionV2 {
|
||||
earliestAvailableSlot, err := s.cfg.p2p.EarliestAvailableSlot()
|
||||
if params.FuluEnabled() && streamVersion == p2p.SchemaVersionV2 {
|
||||
earliestAvailableSlot, err := s.cfg.p2p.EarliestAvailableSlot(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "earliest available slot")
|
||||
}
|
||||
@@ -383,24 +387,30 @@ func (s *Service) buildStatusFromStream(
|
||||
}
|
||||
|
||||
func (s *Service) buildStatusFromEpoch(
|
||||
ctx context.Context,
|
||||
epoch primitives.Epoch,
|
||||
forkDigest [4]byte,
|
||||
finalizedRoot []byte,
|
||||
FinalizedEpoch primitives.Epoch,
|
||||
headRoot []byte,
|
||||
) ssz.Marshaler {
|
||||
) (ssz.Marshaler, error) {
|
||||
// Get the stream version from the protocol.
|
||||
if epoch >= params.BeaconConfig().FuluForkEpoch {
|
||||
earliestAvailableSlot, err := s.cfg.p2p.EarliestAvailableSlot(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "earliest available slot")
|
||||
}
|
||||
|
||||
status := &pb.StatusV2{
|
||||
ForkDigest: forkDigest[:],
|
||||
FinalizedRoot: finalizedRoot,
|
||||
FinalizedEpoch: FinalizedEpoch,
|
||||
HeadRoot: headRoot,
|
||||
HeadSlot: s.cfg.chain.HeadSlot(),
|
||||
EarliestAvailableSlot: 0,
|
||||
EarliestAvailableSlot: earliestAvailableSlot,
|
||||
}
|
||||
|
||||
return status
|
||||
return status, nil
|
||||
}
|
||||
|
||||
status := &pb.Status{
|
||||
@@ -411,7 +421,7 @@ func (s *Service) buildStatusFromEpoch(
|
||||
HeadSlot: s.cfg.chain.HeadSlot(),
|
||||
}
|
||||
|
||||
return status
|
||||
return status, nil
|
||||
}
|
||||
|
||||
func (s *Service) validateStatusMessage(ctx context.Context, genericMsg interface{}) error {
|
||||
|
||||
@@ -40,6 +40,8 @@ import (
|
||||
)
|
||||
|
||||
func TestStatusRPCHandler_Disconnects_OnForkVersionMismatch(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
@@ -96,9 +98,9 @@ func TestStatusRPCHandler_Disconnects_OnForkVersionMismatch(t *testing.T) {
|
||||
assert.NoError(t, stream.Close())
|
||||
})
|
||||
|
||||
stream1, err := p1.BHost.NewStream(t.Context(), p2.BHost.ID(), pcl)
|
||||
stream1, err := p1.BHost.NewStream(ctx, p2.BHost.ID(), pcl)
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, r.statusRPCHandler(t.Context(), ðpb.Status{ForkDigest: bytesutil.PadTo([]byte("f"), 4), HeadRoot: make([]byte, 32), FinalizedRoot: make([]byte, 32)}, stream1))
|
||||
assert.NoError(t, r.statusRPCHandler(ctx, ðpb.Status{ForkDigest: bytesutil.PadTo([]byte("f"), 4), HeadRoot: make([]byte, 32), FinalizedRoot: make([]byte, 32)}, stream1))
|
||||
|
||||
if util.WaitTimeout(&wg, 1*time.Second) {
|
||||
t.Fatal("Did not receive stream within 1 sec")
|
||||
@@ -111,6 +113,8 @@ func TestStatusRPCHandler_Disconnects_OnForkVersionMismatch(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestStatusRPCHandler_ConnectsOnGenesis(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
@@ -153,12 +157,12 @@ func TestStatusRPCHandler_ConnectsOnGenesis(t *testing.T) {
|
||||
assert.DeepEqual(t, root[:], out.FinalizedRoot)
|
||||
})
|
||||
|
||||
stream1, err := p1.BHost.NewStream(t.Context(), p2.BHost.ID(), pcl)
|
||||
stream1, err := p1.BHost.NewStream(ctx, p2.BHost.ID(), pcl)
|
||||
require.NoError(t, err)
|
||||
digest, err := r.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
|
||||
err = r.statusRPCHandler(t.Context(), ðpb.Status{ForkDigest: digest[:], FinalizedRoot: params.BeaconConfig().ZeroHash[:]}, stream1)
|
||||
err = r.statusRPCHandler(ctx, ðpb.Status{ForkDigest: digest[:], FinalizedRoot: params.BeaconConfig().ZeroHash[:]}, stream1)
|
||||
assert.NoError(t, err)
|
||||
|
||||
if util.WaitTimeout(&wg, 1*time.Second) {
|
||||
@@ -169,6 +173,8 @@ func TestStatusRPCHandler_ConnectsOnGenesis(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestStatusRPCHandler_ReturnsHelloMessage(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
@@ -185,12 +191,12 @@ func TestStatusRPCHandler_ReturnsHelloMessage(t *testing.T) {
|
||||
finalized.Block.Slot = blkSlot
|
||||
finalizedRoot, err := finalized.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
genesisState, err := transition.GenesisBeaconState(t.Context(), nil, 0, ðpb.Eth1Data{})
|
||||
genesisState, err := transition.GenesisBeaconState(ctx, nil, 0, ðpb.Eth1Data{})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, genesisState.SetSlot(111))
|
||||
require.NoError(t, genesisState.UpdateBlockRootAtIndex(111%uint64(params.BeaconConfig().SlotsPerHistoricalRoot), headRoot))
|
||||
util.SaveBlock(t, t.Context(), db, finalized)
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(t.Context(), finalizedRoot))
|
||||
util.SaveBlock(t, ctx, db, finalized)
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, finalizedRoot))
|
||||
finalizedCheckpt := ðpb.Checkpoint{
|
||||
Epoch: 3,
|
||||
Root: finalizedRoot[:],
|
||||
@@ -247,10 +253,10 @@ func TestStatusRPCHandler_ReturnsHelloMessage(t *testing.T) {
|
||||
t.Errorf("Did not receive expected message. Got %+v wanted %+v", out, expected)
|
||||
}
|
||||
})
|
||||
stream1, err := p1.BHost.NewStream(t.Context(), p2.BHost.ID(), pcl)
|
||||
stream1, err := p1.BHost.NewStream(ctx, p2.BHost.ID(), pcl)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = r.statusRPCHandler(t.Context(), ðpb.Status{
|
||||
err = r.statusRPCHandler(ctx, ðpb.Status{
|
||||
ForkDigest: digest[:],
|
||||
FinalizedRoot: finalizedRoot[:],
|
||||
FinalizedEpoch: 3,
|
||||
@@ -434,7 +440,13 @@ func TestHandshakeHandlers_Roundtrip(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestStatusRPCRequest_RequestSent(t *testing.T) {
|
||||
const (
|
||||
earliestAvailableSlot = primitives.Slot(50)
|
||||
custodyGroupCount = uint64(4)
|
||||
)
|
||||
|
||||
beaconConfig := params.BeaconConfig()
|
||||
ctx := t.Context()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
@@ -465,7 +477,7 @@ func TestStatusRPCRequest_RequestSent(t *testing.T) {
|
||||
t.Errorf("Did not receive expected message. Got %+v wanted %+v", out, expected)
|
||||
}
|
||||
|
||||
err = service.respondWithStatus(context.Background(), stream)
|
||||
err = service.respondWithStatus(ctx, stream)
|
||||
require.NoError(t, err)
|
||||
},
|
||||
},
|
||||
@@ -486,14 +498,14 @@ func TestStatusRPCRequest_RequestSent(t *testing.T) {
|
||||
HeadRoot: headRoot,
|
||||
FinalizedEpoch: 5,
|
||||
FinalizedRoot: finalizedRoot,
|
||||
EarliestAvailableSlot: 0,
|
||||
EarliestAvailableSlot: earliestAvailableSlot,
|
||||
}
|
||||
|
||||
if !proto.Equal(out, expected) {
|
||||
t.Errorf("Did not receive expected message. Got %+v wanted %+v", out, expected)
|
||||
}
|
||||
|
||||
err = service.respondWithStatus(t.Context(), stream)
|
||||
err = service.respondWithStatus(ctx, stream)
|
||||
require.NoError(t, err)
|
||||
},
|
||||
},
|
||||
@@ -510,6 +522,11 @@ func TestStatusRPCRequest_RequestSent(t *testing.T) {
|
||||
p1, p2 := p2ptest.NewTestP2P(t), p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
|
||||
updatedEas, updatedCgc, err := p1.UpdateCustodyInfo(earliestAvailableSlot, custodyGroupCount)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, earliestAvailableSlot, updatedEas)
|
||||
require.Equal(t, custodyGroupCount, updatedCgc)
|
||||
|
||||
// Set up a head state with data we expect.
|
||||
head := util.NewBeaconBlock()
|
||||
head.Block.Slot = 111
|
||||
@@ -521,7 +538,7 @@ func TestStatusRPCRequest_RequestSent(t *testing.T) {
|
||||
finalizedRoot, err := finalized.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
genesisState, err := transition.GenesisBeaconState(context.Background(), nil, 0, ðpb.Eth1Data{})
|
||||
genesisState, err := transition.GenesisBeaconState(ctx, nil, 0, ðpb.Eth1Data{})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, genesisState.SetSlot(111))
|
||||
@@ -550,7 +567,7 @@ func TestStatusRPCRequest_RequestSent(t *testing.T) {
|
||||
chain: chain,
|
||||
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
|
||||
},
|
||||
ctx: context.Background(),
|
||||
ctx: ctx,
|
||||
rateLimiter: newRateLimiter(p1),
|
||||
}
|
||||
|
||||
@@ -566,7 +583,7 @@ func TestStatusRPCRequest_RequestSent(t *testing.T) {
|
||||
tc.streamHandler(r, stream, genesisState, chain.Root, headRoot[:], finalizedRoot[:])
|
||||
})
|
||||
|
||||
err = r.sendRPCStatusRequest(context.Background(), p2.BHost.ID())
|
||||
err = r.sendRPCStatusRequest(ctx, p2.BHost.ID())
|
||||
require.ErrorIs(t, err, p2ptypes.ErrInvalidEpoch)
|
||||
|
||||
if util.WaitTimeout(&wg, 1*time.Hour) {
|
||||
@@ -579,6 +596,8 @@ func TestStatusRPCRequest_RequestSent(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestStatusRPCRequest_FinalizedBlockExists(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
db := testingDB.SetupDB(t)
|
||||
@@ -593,14 +612,14 @@ func TestStatusRPCRequest_FinalizedBlockExists(t *testing.T) {
|
||||
finalized.Block.Slot = blkSlot
|
||||
finalizedRoot, err := finalized.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
genesisState, err := transition.GenesisBeaconState(t.Context(), nil, 0, ðpb.Eth1Data{DepositRoot: make([]byte, 32), BlockHash: make([]byte, 32)})
|
||||
genesisState, err := transition.GenesisBeaconState(ctx, nil, 0, ðpb.Eth1Data{DepositRoot: make([]byte, 32), BlockHash: make([]byte, 32)})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, genesisState.SetSlot(111))
|
||||
require.NoError(t, genesisState.UpdateBlockRootAtIndex(111%uint64(params.BeaconConfig().SlotsPerHistoricalRoot), headRoot))
|
||||
blk := util.NewBeaconBlock()
|
||||
blk.Block.Slot = blkSlot
|
||||
util.SaveBlock(t, t.Context(), db, blk)
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(t.Context(), finalizedRoot))
|
||||
util.SaveBlock(t, ctx, db, blk)
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, finalizedRoot))
|
||||
finalizedCheckpt := ðpb.Checkpoint{
|
||||
Epoch: 3,
|
||||
Root: finalizedRoot[:],
|
||||
@@ -628,7 +647,7 @@ func TestStatusRPCRequest_FinalizedBlockExists(t *testing.T) {
|
||||
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
|
||||
stateNotifier: chain.StateNotifier(),
|
||||
},
|
||||
ctx: t.Context(),
|
||||
ctx: ctx,
|
||||
rateLimiter: newRateLimiter(p1),
|
||||
}
|
||||
chain2 := &mock.ChainService{
|
||||
@@ -653,7 +672,7 @@ func TestStatusRPCRequest_FinalizedBlockExists(t *testing.T) {
|
||||
beaconDB: db,
|
||||
stateNotifier: chain.StateNotifier(),
|
||||
},
|
||||
ctx: t.Context(),
|
||||
ctx: ctx,
|
||||
rateLimiter: newRateLimiter(p1),
|
||||
}
|
||||
|
||||
@@ -667,7 +686,7 @@ func TestStatusRPCRequest_FinalizedBlockExists(t *testing.T) {
|
||||
defer wg.Done()
|
||||
out := ðpb.Status{}
|
||||
assert.NoError(t, r.cfg.p2p.Encoding().DecodeWithMaxLength(stream, out))
|
||||
assert.NoError(t, r2.validateStatusMessage(t.Context(), out))
|
||||
assert.NoError(t, r2.validateStatusMessage(ctx, out))
|
||||
})
|
||||
|
||||
p1.AddConnectionHandler(r.sendRPCStatusRequest, nil)
|
||||
@@ -681,9 +700,11 @@ func TestStatusRPCRequest_FinalizedBlockExists(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestStatusRPCRequest_FinalizedBlockSkippedSlots(t *testing.T) {
|
||||
db, err := kv.NewKVStore(t.Context(), t.TempDir())
|
||||
ctx := t.Context()
|
||||
|
||||
db, err := kv.NewKVStore(ctx, t.TempDir())
|
||||
require.NoError(t, err)
|
||||
bState, err := transition.GenesisBeaconState(t.Context(), nil, 0, ðpb.Eth1Data{DepositRoot: make([]byte, 32), BlockHash: make([]byte, 32)})
|
||||
bState, err := transition.GenesisBeaconState(ctx, nil, 0, ðpb.Eth1Data{DepositRoot: make([]byte, 32), BlockHash: make([]byte, 32)})
|
||||
require.NoError(t, err)
|
||||
|
||||
blk := util.NewBeaconBlock()
|
||||
@@ -693,10 +714,10 @@ func TestStatusRPCRequest_FinalizedBlockSkippedSlots(t *testing.T) {
|
||||
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(t.Context(), wsb))
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(t.Context(), genRoot))
|
||||
require.NoError(t, db.SaveBlock(ctx, wsb))
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, genRoot))
|
||||
blocksTillHead := makeBlocks(t, 1, 1000, genRoot)
|
||||
require.NoError(t, db.SaveBlocks(t.Context(), blocksTillHead))
|
||||
require.NoError(t, db.SaveBlocks(ctx, blocksTillHead))
|
||||
|
||||
stateSummaries := make([]*ethpb.StateSummary, len(blocksTillHead))
|
||||
for i, b := range blocksTillHead {
|
||||
@@ -707,7 +728,7 @@ func TestStatusRPCRequest_FinalizedBlockSkippedSlots(t *testing.T) {
|
||||
Root: bRoot[:],
|
||||
}
|
||||
}
|
||||
require.NoError(t, db.SaveStateSummaries(t.Context(), stateSummaries))
|
||||
require.NoError(t, db.SaveStateSummaries(ctx, stateSummaries))
|
||||
|
||||
rootFetcher := func(slot primitives.Slot) [32]byte {
|
||||
rt, err := blocksTillHead[slot-1].Block().HashTreeRoot()
|
||||
@@ -788,7 +809,7 @@ func TestStatusRPCRequest_FinalizedBlockSkippedSlots(t *testing.T) {
|
||||
Epoch: tt.remoteFinalizedEpoch,
|
||||
Root: tt.remoteFinalizedRoot[:],
|
||||
}
|
||||
require.NoError(t, db.SaveFinalizedCheckpoint(t.Context(), finalizedCheckpt))
|
||||
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, finalizedCheckpt))
|
||||
|
||||
epoch := expectedFinalizedEpoch.Add(2)
|
||||
totalSec := uint64(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epoch) * params.BeaconConfig().SecondsPerSlot))
|
||||
@@ -816,7 +837,7 @@ func TestStatusRPCRequest_FinalizedBlockSkippedSlots(t *testing.T) {
|
||||
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
|
||||
stateNotifier: chain.StateNotifier(),
|
||||
},
|
||||
ctx: t.Context(),
|
||||
ctx: ctx,
|
||||
rateLimiter: newRateLimiter(p1),
|
||||
}
|
||||
chain2 := &mock.ChainService{
|
||||
@@ -843,7 +864,7 @@ func TestStatusRPCRequest_FinalizedBlockSkippedSlots(t *testing.T) {
|
||||
stateNotifier: chain.StateNotifier(),
|
||||
},
|
||||
|
||||
ctx: t.Context(),
|
||||
ctx: ctx,
|
||||
rateLimiter: newRateLimiter(p1),
|
||||
}
|
||||
|
||||
@@ -857,7 +878,7 @@ func TestStatusRPCRequest_FinalizedBlockSkippedSlots(t *testing.T) {
|
||||
defer wg.Done()
|
||||
out := ðpb.Status{}
|
||||
assert.NoError(t, r.cfg.p2p.Encoding().DecodeWithMaxLength(stream, out))
|
||||
assert.Equal(t, tt.expectError, r2.validateStatusMessage(t.Context(), out) != nil)
|
||||
assert.Equal(t, tt.expectError, r2.validateStatusMessage(ctx, out) != nil)
|
||||
})
|
||||
|
||||
p1.AddConnectionHandler(r.sendRPCStatusRequest, nil)
|
||||
@@ -973,6 +994,8 @@ func TestStatusRPCRequest_BadPeerHandshake(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestStatusRPC_ValidGenesisMessage(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
// Set up a head state with data we expect.
|
||||
head := util.NewBeaconBlock()
|
||||
head.Block.Slot = 111
|
||||
@@ -983,7 +1006,7 @@ func TestStatusRPC_ValidGenesisMessage(t *testing.T) {
|
||||
finalized.Block.Slot = blkSlot
|
||||
finalizedRoot, err := finalized.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
genesisState, err := transition.GenesisBeaconState(t.Context(), nil, 0, ðpb.Eth1Data{})
|
||||
genesisState, err := transition.GenesisBeaconState(ctx, nil, 0, ðpb.Eth1Data{})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, genesisState.SetSlot(111))
|
||||
require.NoError(t, genesisState.UpdateBlockRootAtIndex(111%uint64(params.BeaconConfig().SlotsPerHistoricalRoot), headRoot))
|
||||
@@ -1008,7 +1031,7 @@ func TestStatusRPC_ValidGenesisMessage(t *testing.T) {
|
||||
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
|
||||
stateNotifier: chain.StateNotifier(),
|
||||
},
|
||||
ctx: t.Context(),
|
||||
ctx: ctx,
|
||||
}
|
||||
digest, err := r.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
@@ -1025,6 +1048,8 @@ func TestStatusRPC_ValidGenesisMessage(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestShouldResync(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
type args struct {
|
||||
genesis time.Time
|
||||
syncing bool
|
||||
@@ -1073,7 +1098,7 @@ func TestShouldResync(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
headState, err := transition.GenesisBeaconState(t.Context(), nil, 0, ðpb.Eth1Data{})
|
||||
headState, err := transition.GenesisBeaconState(ctx, nil, 0, ðpb.Eth1Data{})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, headState.SetSlot(tt.args.headSlot))
|
||||
chain := &mock.ChainService{
|
||||
@@ -1087,7 +1112,7 @@ func TestShouldResync(t *testing.T) {
|
||||
initialSync: &mockSync.Sync{IsSyncing: tt.args.syncing},
|
||||
stateNotifier: chain.StateNotifier(),
|
||||
},
|
||||
ctx: t.Context(),
|
||||
ctx: ctx,
|
||||
}
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := r.shouldReSync(); got != tt.want {
|
||||
|
||||
@@ -180,7 +180,7 @@ type Service struct {
|
||||
slasherEnabled bool
|
||||
lcStore *lightClient.Store
|
||||
dataColumnLogCh chan dataColumnLogEntry
|
||||
registeredNetworkEntry params.NetworkScheduleEntry
|
||||
digestActions perDigestSet
|
||||
subscriptionSpawner func(func()) // see Service.spawn for details
|
||||
}
|
||||
|
||||
@@ -377,10 +377,13 @@ func (s *Service) waitForChainStart() {
|
||||
}
|
||||
s.ctxMap = ctxMap
|
||||
|
||||
// Register respective rpc handlers at state initialized event.
|
||||
err = s.registerRPCHandlers()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not register rpc handlers")
|
||||
// We need to register RPC handlers ASAP so that we can handle incoming status message
|
||||
// requests from peers.
|
||||
nse := params.GetNetworkScheduleEntry(clock.CurrentEpoch())
|
||||
if err := s.registerRPCHandlers(nse); err != nil {
|
||||
// If we fail here, we won't be able to peer with anyone because we can't handle their status messages.
|
||||
log.WithError(err).Error("Failed to register RPC handlers")
|
||||
// TODO: need ability to bubble the error up to the top of the node init tree and exit safely.
|
||||
return
|
||||
}
|
||||
|
||||
@@ -401,22 +404,8 @@ func (s *Service) startDiscoveryAndSubscriptions() {
|
||||
return
|
||||
}
|
||||
|
||||
// Compute the current epoch.
|
||||
currentSlot := slots.CurrentSlot(s.cfg.clock.GenesisTime())
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
|
||||
// Compute the current fork forkDigest.
|
||||
forkDigest, err := s.currentForkDigest()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not retrieve current fork digest")
|
||||
return
|
||||
}
|
||||
|
||||
// Register respective pubsub handlers at state synced event.
|
||||
s.registerSubscribers(currentEpoch, forkDigest)
|
||||
|
||||
// Start the fork watcher.
|
||||
go s.forkWatcher()
|
||||
go s.p2pHandlerControlLoop()
|
||||
}
|
||||
|
||||
func (s *Service) writeErrorResponseToStream(responseCode byte, reason string, stream libp2pcore.Stream) {
|
||||
@@ -454,6 +443,15 @@ func (s *Service) chainIsStarted() bool {
|
||||
return s.chainStarted.IsSet()
|
||||
}
|
||||
|
||||
func (s *Service) waitForInitialSync(ctx context.Context) error {
|
||||
select {
|
||||
case <-s.initialSyncComplete:
|
||||
return nil
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
// Checker defines a struct which can verify whether a node is currently
|
||||
// synchronizing a chain with the rest of peers in the network.
|
||||
type Checker interface {
|
||||
|
||||
@@ -55,7 +55,7 @@ type subscribeParameters struct {
|
||||
topicFormat string
|
||||
validate wrappedVal
|
||||
handle subHandler
|
||||
digest [4]byte
|
||||
nse params.NetworkScheduleEntry
|
||||
// getSubnetsToJoin is a function that returns all subnets the node should join.
|
||||
getSubnetsToJoin func(currentSlot primitives.Slot) map[uint64]bool
|
||||
// getSubnetsRequiringPeers is a function that returns all subnets that require peers to be found
|
||||
@@ -70,7 +70,7 @@ func (p subscribeParameters) shortTopic() string {
|
||||
if fmtLen >= 3 && short[fmtLen-3:] == "_%d" {
|
||||
short = short[:fmtLen-3]
|
||||
}
|
||||
return fmt.Sprintf(short, p.digest)
|
||||
return fmt.Sprintf(short, p.nse.ForkDigest)
|
||||
}
|
||||
|
||||
func (p subscribeParameters) logFields() logrus.Fields {
|
||||
@@ -81,7 +81,7 @@ func (p subscribeParameters) logFields() logrus.Fields {
|
||||
|
||||
// fullTopic is the fully qualified topic string, given to gossipsub.
|
||||
func (p subscribeParameters) fullTopic(subnet uint64, suffix string) string {
|
||||
return fmt.Sprintf(p.topicFormat, p.digest, subnet) + suffix
|
||||
return fmt.Sprintf(p.topicFormat, p.nse.ForkDigest, subnet) + suffix
|
||||
}
|
||||
|
||||
// subnetTracker keeps track of which subnets we are subscribed to, out of the set of
|
||||
@@ -204,41 +204,45 @@ func (s *Service) spawn(f func()) {
|
||||
}
|
||||
|
||||
// Register PubSub subscribers
|
||||
func (s *Service) registerSubscribers(epoch primitives.Epoch, digest [4]byte) {
|
||||
func (s *Service) registerSubscribers(nse params.NetworkScheduleEntry) bool {
|
||||
// If we have already registered for this fork digest, exit early.
|
||||
if s.digestActionDone(nse.ForkDigest, registerGossipOnce) {
|
||||
return false
|
||||
}
|
||||
s.spawn(func() {
|
||||
s.subscribe(p2p.BlockSubnetTopicFormat, s.validateBeaconBlockPubSub, s.beaconBlockSubscriber, digest)
|
||||
s.subscribe(p2p.BlockSubnetTopicFormat, s.validateBeaconBlockPubSub, s.beaconBlockSubscriber, nse)
|
||||
})
|
||||
s.spawn(func() {
|
||||
s.subscribe(p2p.AggregateAndProofSubnetTopicFormat, s.validateAggregateAndProof, s.beaconAggregateProofSubscriber, digest)
|
||||
s.subscribe(p2p.AggregateAndProofSubnetTopicFormat, s.validateAggregateAndProof, s.beaconAggregateProofSubscriber, nse)
|
||||
})
|
||||
s.spawn(func() {
|
||||
s.subscribe(p2p.ExitSubnetTopicFormat, s.validateVoluntaryExit, s.voluntaryExitSubscriber, digest)
|
||||
s.subscribe(p2p.ExitSubnetTopicFormat, s.validateVoluntaryExit, s.voluntaryExitSubscriber, nse)
|
||||
})
|
||||
s.spawn(func() {
|
||||
s.subscribe(p2p.ProposerSlashingSubnetTopicFormat, s.validateProposerSlashing, s.proposerSlashingSubscriber, digest)
|
||||
s.subscribe(p2p.ProposerSlashingSubnetTopicFormat, s.validateProposerSlashing, s.proposerSlashingSubscriber, nse)
|
||||
})
|
||||
s.spawn(func() {
|
||||
s.subscribe(p2p.AttesterSlashingSubnetTopicFormat, s.validateAttesterSlashing, s.attesterSlashingSubscriber, digest)
|
||||
s.subscribe(p2p.AttesterSlashingSubnetTopicFormat, s.validateAttesterSlashing, s.attesterSlashingSubscriber, nse)
|
||||
})
|
||||
s.spawn(func() {
|
||||
s.subscribeWithParameters(subscribeParameters{
|
||||
topicFormat: p2p.AttestationSubnetTopicFormat,
|
||||
validate: s.validateCommitteeIndexBeaconAttestation,
|
||||
handle: s.committeeIndexBeaconAttestationSubscriber,
|
||||
digest: digest,
|
||||
getSubnetsToJoin: s.persistentAndAggregatorSubnetIndices,
|
||||
getSubnetsRequiringPeers: attesterSubnetIndices,
|
||||
nse: nse,
|
||||
})
|
||||
})
|
||||
|
||||
// New gossip topic in Altair
|
||||
if params.BeaconConfig().AltairForkEpoch <= epoch {
|
||||
if params.BeaconConfig().AltairForkEpoch <= nse.Epoch {
|
||||
s.spawn(func() {
|
||||
s.subscribe(
|
||||
p2p.SyncContributionAndProofSubnetTopicFormat,
|
||||
s.validateSyncContributionAndProof,
|
||||
s.syncContributionAndProofSubscriber,
|
||||
digest,
|
||||
nse,
|
||||
)
|
||||
})
|
||||
s.spawn(func() {
|
||||
@@ -246,8 +250,8 @@ func (s *Service) registerSubscribers(epoch primitives.Epoch, digest [4]byte) {
|
||||
topicFormat: p2p.SyncCommitteeSubnetTopicFormat,
|
||||
validate: s.validateSyncCommitteeMessage,
|
||||
handle: s.syncCommitteeMessageSubscriber,
|
||||
digest: digest,
|
||||
getSubnetsToJoin: s.activeSyncSubnetIndices,
|
||||
nse: nse,
|
||||
})
|
||||
})
|
||||
|
||||
@@ -257,7 +261,7 @@ func (s *Service) registerSubscribers(epoch primitives.Epoch, digest [4]byte) {
|
||||
p2p.LightClientOptimisticUpdateTopicFormat,
|
||||
s.validateLightClientOptimisticUpdate,
|
||||
noopHandler,
|
||||
digest,
|
||||
nse,
|
||||
)
|
||||
})
|
||||
s.spawn(func() {
|
||||
@@ -265,32 +269,32 @@ func (s *Service) registerSubscribers(epoch primitives.Epoch, digest [4]byte) {
|
||||
p2p.LightClientFinalityUpdateTopicFormat,
|
||||
s.validateLightClientFinalityUpdate,
|
||||
noopHandler,
|
||||
digest,
|
||||
nse,
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// New gossip topic in Capella
|
||||
if params.BeaconConfig().CapellaForkEpoch <= epoch {
|
||||
if params.BeaconConfig().CapellaForkEpoch <= nse.Epoch {
|
||||
s.spawn(func() {
|
||||
s.subscribe(
|
||||
p2p.BlsToExecutionChangeSubnetTopicFormat,
|
||||
s.validateBlsToExecutionChange,
|
||||
s.blsToExecutionChangeSubscriber,
|
||||
digest,
|
||||
nse,
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
// New gossip topic in Deneb, removed in Electra
|
||||
if params.BeaconConfig().DenebForkEpoch <= epoch && epoch < params.BeaconConfig().ElectraForkEpoch {
|
||||
if params.BeaconConfig().DenebForkEpoch <= nse.Epoch && nse.Epoch < params.BeaconConfig().ElectraForkEpoch {
|
||||
s.spawn(func() {
|
||||
s.subscribeWithParameters(subscribeParameters{
|
||||
topicFormat: p2p.BlobSubnetTopicFormat,
|
||||
validate: s.validateBlob,
|
||||
handle: s.blobSubscriber,
|
||||
digest: digest,
|
||||
nse: nse,
|
||||
getSubnetsToJoin: func(primitives.Slot) map[uint64]bool {
|
||||
return mapFromCount(params.BeaconConfig().BlobsidecarSubnetCount)
|
||||
},
|
||||
@@ -299,13 +303,13 @@ func (s *Service) registerSubscribers(epoch primitives.Epoch, digest [4]byte) {
|
||||
}
|
||||
|
||||
// New gossip topic in Electra, removed in Fulu
|
||||
if params.BeaconConfig().ElectraForkEpoch <= epoch && epoch < params.BeaconConfig().FuluForkEpoch {
|
||||
if params.BeaconConfig().ElectraForkEpoch <= nse.Epoch && nse.Epoch < params.BeaconConfig().FuluForkEpoch {
|
||||
s.spawn(func() {
|
||||
s.subscribeWithParameters(subscribeParameters{
|
||||
topicFormat: p2p.BlobSubnetTopicFormat,
|
||||
validate: s.validateBlob,
|
||||
handle: s.blobSubscriber,
|
||||
digest: digest,
|
||||
nse: nse,
|
||||
getSubnetsToJoin: func(currentSlot primitives.Slot) map[uint64]bool {
|
||||
return mapFromCount(params.BeaconConfig().BlobsidecarSubnetCountElectra)
|
||||
},
|
||||
@@ -314,35 +318,54 @@ func (s *Service) registerSubscribers(epoch primitives.Epoch, digest [4]byte) {
|
||||
}
|
||||
|
||||
// New gossip topic in Fulu.
|
||||
if params.BeaconConfig().FuluForkEpoch <= epoch {
|
||||
if params.BeaconConfig().FuluForkEpoch <= nse.Epoch {
|
||||
s.spawn(func() {
|
||||
s.subscribeWithParameters(subscribeParameters{
|
||||
topicFormat: p2p.DataColumnSubnetTopicFormat,
|
||||
validate: s.validateDataColumn,
|
||||
handle: s.dataColumnSubscriber,
|
||||
digest: digest,
|
||||
nse: nse,
|
||||
getSubnetsToJoin: s.dataColumnSubnetIndices,
|
||||
getSubnetsRequiringPeers: s.allDataColumnSubnets,
|
||||
})
|
||||
})
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (s *Service) subscriptionRequestExpired(nse params.NetworkScheduleEntry) bool {
|
||||
next := params.NextNetworkScheduleEntry(nse.Epoch)
|
||||
return next.Epoch != nse.Epoch && s.cfg.clock.CurrentEpoch() > next.Epoch
|
||||
}
|
||||
|
||||
func (s *Service) subscribeLogFields(topic string, nse params.NetworkScheduleEntry) logrus.Fields {
|
||||
return logrus.Fields{
|
||||
"topic": topic,
|
||||
"digest": nse.ForkDigest,
|
||||
"forkEpoch": nse.Epoch,
|
||||
"currentEpoch": s.cfg.clock.CurrentEpoch(),
|
||||
}
|
||||
}
|
||||
|
||||
// subscribe to a given topic with a given validator and subscription handler.
|
||||
// The base protobuf message is used to initialize new messages for decoding.
|
||||
func (s *Service) subscribe(topic string, validator wrappedVal, handle subHandler, digest [4]byte) {
|
||||
<-s.initialSyncComplete
|
||||
_, e, err := params.ForkDataFromDigest(digest)
|
||||
if err != nil {
|
||||
// Impossible condition as it would mean digest does not exist.
|
||||
panic(err) // lint:nopanic -- Impossible condition.
|
||||
func (s *Service) subscribe(topic string, validator wrappedVal, handle subHandler, nse params.NetworkScheduleEntry) {
|
||||
if err := s.waitForInitialSync(s.ctx); err != nil {
|
||||
log.WithFields(s.subscribeLogFields(topic, nse)).WithError(err).Debug("Context cancelled while waiting for initial sync, not subscribing to topic")
|
||||
return
|
||||
}
|
||||
base := p2p.GossipTopicMappings(topic, e)
|
||||
// Check if this subscribe request is still valid - we may have crossed another fork epoch while waiting for initial sync.
|
||||
if s.subscriptionRequestExpired(nse) {
|
||||
// If we are already past the next fork epoch, do not subscribe to this topic.
|
||||
log.WithFields(s.subscribeLogFields(topic, nse)).Debug("Not subscribing to topic as we are already past the next fork epoch")
|
||||
return
|
||||
}
|
||||
base := p2p.GossipTopicMappings(topic, nse.Epoch)
|
||||
if base == nil {
|
||||
// Impossible condition as it would mean topic does not exist.
|
||||
panic(fmt.Sprintf("%s is not mapped to any message in GossipTopicMappings", topic)) // lint:nopanic -- Impossible condition.
|
||||
}
|
||||
s.subscribeWithBase(s.addDigestToTopic(topic, digest), validator, handle)
|
||||
s.subscribeWithBase(s.addDigestToTopic(topic, nse.ForkDigest), validator, handle)
|
||||
}
|
||||
|
||||
func (s *Service) subscribeWithBase(topic string, validator wrappedVal, handle subHandler) *pubsub.Subscription {
|
||||
@@ -352,7 +375,7 @@ func (s *Service) subscribeWithBase(topic string, validator wrappedVal, handle s
|
||||
// Do not resubscribe already seen subscriptions.
|
||||
ok := s.subHandler.topicExists(topic)
|
||||
if ok {
|
||||
log.WithField("topic", topic).Debug("Provided topic already has an active subscription running")
|
||||
log.WithField("topic", topic).Error("Provided topic already has an active subscription running")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -504,89 +527,93 @@ func (s *Service) wrapAndReportValidation(topic string, v wrappedVal) (string, p
|
||||
}
|
||||
}
|
||||
|
||||
// pruneSubscriptions unsubscribes from topics we are currently subscribed to but that are
|
||||
// pruneNotWanted unsubscribes from topics we are currently subscribed to but that are
|
||||
// not in the list of wanted subnets.
|
||||
// This function mutates the `subscriptionBySubnet` map, which is used to keep track of the current subscriptions.
|
||||
func (s *Service) pruneSubscriptions(t *subnetTracker, wantedSubnets map[uint64]bool) {
|
||||
func (s *Service) pruneNotWanted(t *subnetTracker, wantedSubnets map[uint64]bool) {
|
||||
for _, subnet := range t.unwanted(wantedSubnets) {
|
||||
t.cancelSubscription(subnet)
|
||||
s.unSubscribeFromTopic(t.fullTopic(subnet, s.cfg.p2p.Encoding().ProtocolSuffix()))
|
||||
}
|
||||
}
|
||||
|
||||
// subscribeToSubnets subscribes to needed subnets and unsubscribe from unneeded ones.
|
||||
// This functions mutates the `subscriptionBySubnet` map, which is used to keep track of the current subscriptions.
|
||||
func (s *Service) subscribeToSubnets(t *subnetTracker) error {
|
||||
// Do not subscribe if not synced.
|
||||
if s.chainStarted.IsSet() && s.cfg.initialSync.Syncing() {
|
||||
return nil
|
||||
}
|
||||
|
||||
valid, err := isDigestValid(t.digest, s.cfg.clock)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "is digest valid")
|
||||
}
|
||||
|
||||
// Unsubscribe from all subnets if digest is not valid. It's likely to be the case after a hard fork.
|
||||
if !valid {
|
||||
s.pruneSubscriptions(t, nil)
|
||||
return errInvalidDigest
|
||||
}
|
||||
|
||||
subnetsToJoin := t.getSubnetsToJoin(s.cfg.clock.CurrentSlot())
|
||||
s.pruneSubscriptions(t, subnetsToJoin)
|
||||
for _, subnet := range t.missing(subnetsToJoin) {
|
||||
// TODO: subscribeWithBase appends the protocol suffix, other methods don't. Make this consistent.
|
||||
topic := t.fullTopic(subnet, "")
|
||||
t.track(subnet, s.subscribeWithBase(topic, t.validate, t.handle))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// subscribeWithParameters subscribes to a list of subnets.
|
||||
func (s *Service) subscribeWithParameters(p subscribeParameters) {
|
||||
ctx, cancel := context.WithCancel(s.ctx)
|
||||
defer cancel()
|
||||
|
||||
tracker := newSubnetTracker(p)
|
||||
// Try once immediately so we don't have to wait until the next slot.
|
||||
s.ensureSubnetPeersAndSubscribe(tracker)
|
||||
|
||||
go s.logMinimumPeersPerSubnet(p)
|
||||
go s.ensurePeers(ctx, tracker)
|
||||
go s.logMinimumPeersPerSubnet(ctx, p)
|
||||
|
||||
if err := s.waitForInitialSync(ctx); err != nil {
|
||||
log.WithFields(p.logFields()).WithError(err).Debug("Could not subscribe to subnets as initial sync failed")
|
||||
return
|
||||
}
|
||||
s.trySubscribeSubnets(tracker)
|
||||
slotTicker := slots.NewSlotTicker(s.cfg.clock.GenesisTime(), params.BeaconConfig().SecondsPerSlot)
|
||||
defer slotTicker.Done()
|
||||
for {
|
||||
select {
|
||||
case <-slotTicker.C():
|
||||
s.ensureSubnetPeersAndSubscribe(tracker)
|
||||
// Check if this subscribe request is still valid - we may have crossed another fork epoch while waiting for initial sync.
|
||||
if s.subscriptionRequestExpired(p.nse) {
|
||||
// If we are already past the next fork epoch, do not subscribe to this topic.
|
||||
log.WithFields(logrus.Fields{
|
||||
"topic": p.shortTopic(),
|
||||
"digest": p.nse.ForkDigest,
|
||||
"epoch": p.nse.Epoch,
|
||||
"currentEpoch": s.cfg.clock.CurrentEpoch(),
|
||||
}).Debug("Exiting topic subnet subscription loop")
|
||||
return
|
||||
}
|
||||
s.trySubscribeSubnets(tracker)
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) ensureSubnetPeersAndSubscribe(tracker *subnetTracker) {
|
||||
timeout := time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second
|
||||
minPeers := flags.Get().MinimumPeersPerSubnet
|
||||
logFields := tracker.logFields()
|
||||
neededSubnets := computeAllNeededSubnets(s.cfg.clock.CurrentSlot(), tracker.getSubnetsToJoin, tracker.getSubnetsRequiringPeers)
|
||||
|
||||
if err := s.subscribeToSubnets(tracker); err != nil {
|
||||
if errors.Is(err, errInvalidDigest) {
|
||||
log.WithFields(logFields).Debug("Digest is invalid, stopping subscription")
|
||||
return
|
||||
}
|
||||
log.WithFields(logFields).WithError(err).Error("Could not subscribe to subnets")
|
||||
return
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(s.ctx, timeout)
|
||||
defer cancel()
|
||||
if err := s.cfg.p2p.FindAndDialPeersWithSubnets(ctx, tracker.topicFormat, tracker.digest, minPeers, neededSubnets); err != nil && !errors.Is(err, context.DeadlineExceeded) {
|
||||
log.WithFields(logFields).WithError(err).Debug("Could not find peers with subnets")
|
||||
// trySubscribeSubnets attempts to subscribe to any missing subnets that we should be subscribed to.
|
||||
// Only if initial sync is complete.
|
||||
func (s *Service) trySubscribeSubnets(t *subnetTracker) {
|
||||
subnetsToJoin := t.getSubnetsToJoin(s.cfg.clock.CurrentSlot())
|
||||
s.pruneNotWanted(t, subnetsToJoin)
|
||||
for _, subnet := range t.missing(subnetsToJoin) {
|
||||
// TODO: subscribeWithBase appends the protocol suffix, other methods don't. Make this consistent.
|
||||
topic := t.fullTopic(subnet, "")
|
||||
t.track(subnet, s.subscribeWithBase(topic, t.validate, t.handle))
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) logMinimumPeersPerSubnet(p subscribeParameters) {
|
||||
func (s *Service) ensurePeers(ctx context.Context, tracker *subnetTracker) {
|
||||
// Try once immediately so we don't have to wait until the next slot.
|
||||
s.tryEnsurePeers(ctx, tracker)
|
||||
|
||||
oncePerSlot := slots.NewSlotTicker(s.cfg.clock.GenesisTime(), params.BeaconConfig().SecondsPerSlot)
|
||||
defer oncePerSlot.Done()
|
||||
for {
|
||||
select {
|
||||
case <-oncePerSlot.C():
|
||||
s.tryEnsurePeers(ctx, tracker)
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) tryEnsurePeers(ctx context.Context, tracker *subnetTracker) {
|
||||
timeout := (time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second) - 100*time.Millisecond
|
||||
minPeers := flags.Get().MinimumPeersPerSubnet
|
||||
neededSubnets := computeAllNeededSubnets(s.cfg.clock.CurrentSlot(), tracker.getSubnetsToJoin, tracker.getSubnetsRequiringPeers)
|
||||
ctx, cancel := context.WithTimeout(ctx, timeout)
|
||||
defer cancel()
|
||||
err := s.cfg.p2p.FindAndDialPeersWithSubnets(ctx, tracker.topicFormat, tracker.nse.ForkDigest, minPeers, neededSubnets)
|
||||
if err != nil && !errors.Is(err, context.DeadlineExceeded) {
|
||||
log.WithFields(tracker.logFields()).WithError(err).Debug("Could not find peers with subnets")
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) logMinimumPeersPerSubnet(ctx context.Context, p subscribeParameters) {
|
||||
logFields := p.logFields()
|
||||
minimumPeersPerSubnet := flags.Get().MinimumPeersPerSubnet
|
||||
// Warn the user if we are not subscribed to enough peers in the subnets.
|
||||
@@ -603,7 +630,7 @@ func (s *Service) logMinimumPeersPerSubnet(p subscribeParameters) {
|
||||
isSubnetWithMissingPeers := false
|
||||
// Find new peers for wanted subnets if needed.
|
||||
for index := range subnetsToFindPeersIndex {
|
||||
topic := fmt.Sprintf(p.topicFormat, p.digest, index)
|
||||
topic := fmt.Sprintf(p.topicFormat, p.nse.ForkDigest, index)
|
||||
|
||||
// Check if we have enough peers in the subnet. Skip if we do.
|
||||
if count := s.connectedPeersCount(topic); count < minimumPeersPerSubnet {
|
||||
@@ -617,7 +644,7 @@ func (s *Service) logMinimumPeersPerSubnet(p subscribeParameters) {
|
||||
if !isSubnetWithMissingPeers {
|
||||
log.WithFields(logFields).Debug("All subnets have enough connected peers")
|
||||
}
|
||||
case <-s.ctx.Done():
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -680,7 +707,7 @@ func (s *Service) samplingSize() (uint64, error) {
|
||||
return 0, errors.Wrap(err, "validators custody requirement")
|
||||
}
|
||||
|
||||
custodyGroupCount, err := s.cfg.p2p.CustodyGroupCount()
|
||||
custodyGroupCount, err := s.cfg.p2p.CustodyGroupCount(s.ctx)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "custody group count")
|
||||
}
|
||||
@@ -785,7 +812,7 @@ func isDigestValid(digest [4]byte, clock *startup.Clock) (bool, error) {
|
||||
// In the event there is a fork the next epoch,
|
||||
// we skip the check, as we subscribe subnets an
|
||||
// epoch in advance.
|
||||
if params.DigestChangesAfter(current) {
|
||||
if params.NextNetworkScheduleEntry(current).Epoch == current+1 {
|
||||
return true, nil
|
||||
}
|
||||
return params.ForkDigest(current) == digest, nil
|
||||
|
||||
@@ -299,7 +299,7 @@ func (s *Service) columnIndicesToSample() (map[uint64]bool, error) {
|
||||
nodeID := s.cfg.p2p.NodeID()
|
||||
|
||||
// Get the custody group sampling size for the node.
|
||||
custodyGroupCount, err := s.cfg.p2p.CustodyGroupCount()
|
||||
custodyGroupCount, err := s.cfg.p2p.CustodyGroupCount(s.ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "custody group count")
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user