Compare commits

..

6 Commits

Author SHA1 Message Date
satushh
c603321733 initialise registeredNetworkEntry in startDiscoveryAndSubscriptions (by james) 2025-09-29 18:59:57 +01:00
satushh
8f816d6f49 Track existing subscriptions to prevent repeated attempts 2025-09-29 13:42:03 +01:00
satushh
0a8603268b consider existing case of topic 2025-09-28 21:31:48 +01:00
satushh
6fd2d5f268 bazel run //:gazelle -- fix 2025-09-28 21:11:42 +01:00
satushh
8056f55522 fix race condition and proper removal of topic 2025-09-28 21:06:53 +01:00
satushh
9ab42d18da tests to check race condition 2025-09-28 20:55:13 +01:00
81 changed files with 1251 additions and 942 deletions

View File

@@ -34,7 +34,6 @@ build:minimal --@io_bazel_rules_go//go/config:tags=minimal
build:release --compilation_mode=opt
build:release --stamp
build:release --define pgo_enabled=1
build:release --strip=always
# Build binary with cgo symbolizer for debugging / profiling.
build:cgo_symbolizer --copt=-g

View File

@@ -9,7 +9,7 @@ on:
jobs:
run-changelog-check:
runs-on: ubuntu-4
runs-on: ubuntu-latest
steps:
- name: Checkout source code
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0

View File

@@ -3,7 +3,7 @@ on: [push, pull_request]
jobs:
check-specrefs:
runs-on: ubuntu-4
runs-on: ubuntu-latest
steps:
- name: Checkout repository

View File

@@ -10,7 +10,7 @@ on:
jobs:
clang-format-checking:
runs-on: ubuntu-4
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
# Is this step failing for you?

View File

@@ -10,7 +10,7 @@ permissions:
jobs:
list:
runs-on: ubuntu-4
runs-on: ubuntu-latest
timeout-minutes: 180
steps:
- uses: actions/checkout@v3
@@ -25,7 +25,7 @@ jobs:
fuzz-tests: ${{steps.list.outputs.fuzz-tests}}
fuzz:
runs-on: ubuntu-4
runs-on: ubuntu-latest
timeout-minutes: 360
needs: list
strategy:

View File

@@ -11,7 +11,7 @@ on:
jobs:
formatting:
name: Formatting
runs-on: ubuntu-4
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
@@ -22,7 +22,7 @@ jobs:
gosec:
name: Gosec scan
runs-on: ubuntu-4
runs-on: ubuntu-latest
env:
GO111MODULE: on
steps:
@@ -40,7 +40,7 @@ jobs:
lint:
name: Lint
runs-on: ubuntu-4
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
@@ -59,7 +59,7 @@ jobs:
build:
name: Build
runs-on: ubuntu-4
runs-on: ubuntu-latest
steps:
- name: Set up Go 1.25.1
uses: actions/setup-go@v4

View File

@@ -8,7 +8,7 @@ on:
jobs:
Horusec_Scan:
name: horusec-Scan
runs-on: ubuntu-4
runs-on: ubuntu-latest
if: github.ref == 'refs/heads/develop'
steps:
- name: Check out code
@@ -19,4 +19,4 @@ jobs:
- name: Running Security Scan
run: |
curl -fsSL https://raw.githubusercontent.com/ZupIT/horusec/main/deployments/scripts/install.sh | bash -s latest
horusec start -t="10000" -p="./" -e="true" -i="**/crypto/bls/herumi/**, **/**/*_test.go, **/third_party/afl/**, **/crypto/keystore/key.go"
horusec start -t="10000" -p="./" -e="true" -i="**/crypto/bls/herumi/**, **/**/*_test.go, **/third_party/afl/**, **/crypto/keystore/key.go"

View File

@@ -109,7 +109,6 @@ func VerifyCellKZGProofBatch(commitmentsBytes []Bytes48, cellIndices []uint64, c
}
// RecoverCellsAndKZGProofs recovers the complete cells and KZG proofs from a given set of cell indices and partial cells.
// Note: `len(cellIndices)` must be equal to `len(partialCells)` and `cellIndices` must be sorted in ascending order.
func RecoverCellsAndKZGProofs(cellIndices []uint64, partialCells []Cell) (CellsAndProofs, error) {
// Convert `Cell` type to `ckzg4844.Cell`
ckzgPartialCells := make([]ckzg4844.Cell, len(partialCells))

View File

@@ -72,13 +72,7 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
if features.Get().EnableLightClient && slots.ToEpoch(s.CurrentSlot()) >= params.BeaconConfig().AltairForkEpoch {
defer s.processLightClientUpdates(cfg)
}
// Track whether block processing succeeded to only send event on success
var blockProcessed bool
defer func() {
if blockProcessed {
s.sendStateFeedOnBlock(cfg)
}
}()
defer s.sendStateFeedOnBlock(cfg)
defer reportProcessingTime(startTime)
defer reportAttestationInclusion(cfg.roblock.Block())
@@ -107,22 +101,16 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
newBlockHeadElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
if cfg.headRoot != cfg.roblock.Root() {
s.logNonCanonicalBlockReceived(cfg.roblock.Root(), cfg.headRoot)
// Mark as processed even for non-canonical blocks that succeed
blockProcessed = true
return nil
}
if err := s.getFCUArgs(cfg, fcuArgs); err != nil {
log.WithError(err).Error("Could not get forkchoice update argument")
// Mark as processed - block was successfully inserted even if FCU args failed
blockProcessed = true
return nil
}
if err := s.sendFCU(cfg, fcuArgs); err != nil {
return errors.Wrap(err, "could not send FCU to engine")
}
// Block successfully processed
blockProcessed = true
return nil
}

View File

@@ -9,10 +9,8 @@ import (
"testing"
"time"
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/blocks"
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
@@ -3464,156 +3462,3 @@ func TestProcessLightClientFinalityUpdate(t *testing.T) {
}
}
}
// Test_postBlockProcess_EventSending tests that block processed events are only sent
// when block processing succeeds according to the decision tree:
//
// Block Processing Flow:
// ├─ InsertNode FAILS (fork choice timeout)
// │ └─ blockProcessed = false ❌ NO EVENT
// │
// ├─ InsertNode succeeds
// │ ├─ handleBlockAttestations FAILS
// │ │ └─ blockProcessed = false ❌ NO EVENT
// │ │
// │ ├─ Block is NON-CANONICAL (not head)
// │ │ └─ blockProcessed = true ✅ SEND EVENT (Line 111)
// │ │
// │ ├─ Block IS CANONICAL (new head)
// │ │ ├─ getFCUArgs FAILS
// │ │ │ └─ blockProcessed = true ✅ SEND EVENT (Line 117)
// │ │ │
// │ │ ├─ sendFCU FAILS
// │ │ │ └─ blockProcessed = false ❌ NO EVENT
// │ │ │
// │ │ └─ Full success
// │ │ └─ blockProcessed = true ✅ SEND EVENT (Line 125)
func Test_postBlockProcess_EventSending(t *testing.T) {
ctx := context.Background()
// Helper to create a minimal valid block and state
createTestBlockAndState := func(t *testing.T, slot primitives.Slot, parentRoot [32]byte) (consensusblocks.ROBlock, state.BeaconState) {
st, _ := util.DeterministicGenesisState(t, 64)
require.NoError(t, st.SetSlot(slot))
stateRoot, err := st.HashTreeRoot(ctx)
require.NoError(t, err)
blk := util.NewBeaconBlock()
blk.Block.Slot = slot
blk.Block.ProposerIndex = 0
blk.Block.ParentRoot = parentRoot[:]
blk.Block.StateRoot = stateRoot[:]
signed := util.HydrateSignedBeaconBlock(blk)
roBlock, err := consensusblocks.NewSignedBeaconBlock(signed)
require.NoError(t, err)
roBlk, err := consensusblocks.NewROBlock(roBlock)
require.NoError(t, err)
return roBlk, st
}
tests := []struct {
name string
setupService func(*Service, [32]byte)
expectEvent bool
expectError bool
errorContains string
}{
{
name: "Block successfully processed - sends event",
setupService: func(s *Service, blockRoot [32]byte) {
// Default setup should work
},
expectEvent: true,
expectError: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Create service with required options
opts := testServiceOptsWithDB(t)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
// Initialize fork choice with genesis block
st, _ := util.DeterministicGenesisState(t, 64)
require.NoError(t, st.SetSlot(0))
genesisBlock := util.NewBeaconBlock()
genesisBlock.Block.StateRoot = bytesutil.PadTo([]byte("genesisState"), 32)
signedGenesis := util.HydrateSignedBeaconBlock(genesisBlock)
block, err := consensusblocks.NewSignedBeaconBlock(signedGenesis)
require.NoError(t, err)
genesisRoot, err := block.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, block))
require.NoError(t, service.cfg.BeaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, genesisRoot))
genesisROBlock, err := consensusblocks.NewROBlock(block)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, genesisROBlock))
// Create test block and state with genesis as parent
roBlock, postSt := createTestBlockAndState(t, 100, genesisRoot)
// Apply additional service setup if provided
if tt.setupService != nil {
tt.setupService(service, roBlock.Root())
}
// Create post block process config
cfg := &postBlockProcessConfig{
ctx: ctx,
roblock: roBlock,
postState: postSt,
isValidPayload: true,
}
// Execute postBlockProcess
err = service.postBlockProcess(cfg)
// Check error expectation
if tt.expectError {
require.NotNil(t, err)
if tt.errorContains != "" {
require.ErrorContains(t, tt.errorContains, err)
}
} else {
require.NoError(t, err)
}
// Give a moment for deferred functions to execute
time.Sleep(10 * time.Millisecond)
// Check event expectation
notifier := service.cfg.StateNotifier.(*mock.MockStateNotifier)
events := notifier.ReceivedEvents()
if tt.expectEvent {
require.NotEqual(t, 0, len(events), "Expected event to be sent but none were received")
// Verify it's a BlockProcessed event
foundBlockProcessed := false
for _, evt := range events {
if evt.Type == statefeed.BlockProcessed {
foundBlockProcessed = true
data, ok := evt.Data.(*statefeed.BlockProcessedData)
require.Equal(t, true, ok, "Event data should be BlockProcessedData")
require.Equal(t, roBlock.Root(), data.BlockRoot, "Event should contain correct block root")
break
}
}
require.Equal(t, true, foundBlockProcessed, "Expected BlockProcessed event type")
} else {
// For no-event cases, verify no BlockProcessed events were sent
for _, evt := range events {
require.NotEqual(t, statefeed.BlockProcessed, evt.Type,
"Expected no BlockProcessed event but one was sent")
}
}
})
}
}

View File

@@ -89,7 +89,7 @@ func (mb *mockBroadcaster) BroadcastLightClientFinalityUpdate(_ context.Context,
return nil
}
func (mb *mockBroadcaster) BroadcastDataColumnSidecars(_ context.Context, _ []blocks.VerifiedRODataColumn) error {
func (mb *mockBroadcaster) BroadcastDataColumnSidecar(_ uint64, _ blocks.VerifiedRODataColumn) error {
mb.broadcastCalled = true
return nil
}

View File

@@ -42,9 +42,6 @@ func ProcessAttesterSlashings(
slashings []ethpb.AttSlashing,
exitInfo *validators.ExitInfo,
) (state.BeaconState, error) {
if exitInfo == nil && len(slashings) > 0 {
return nil, errors.New("exit info required to process attester slashings")
}
var err error
for _, slashing := range slashings {
beaconState, err = ProcessAttesterSlashing(ctx, beaconState, slashing, exitInfo)
@@ -62,9 +59,6 @@ func ProcessAttesterSlashing(
slashing ethpb.AttSlashing,
exitInfo *validators.ExitInfo,
) (state.BeaconState, error) {
if exitInfo == nil {
return nil, errors.New("exit info is required to process attester slashing")
}
if err := VerifyAttesterSlashing(ctx, beaconState, slashing); err != nil {
return nil, errors.Wrap(err, "could not verify attester slashing")
}

View File

@@ -55,9 +55,6 @@ func ProcessVoluntaryExits(
if len(exits) == 0 {
return beaconState, nil
}
if exitInfo == nil {
return nil, errors.New("exit info required to process voluntary exits")
}
for idx, exit := range exits {
if exit == nil || exit.Exit == nil {
return nil, errors.New("nil voluntary exit in block body")

View File

@@ -51,9 +51,6 @@ func ProcessProposerSlashings(
slashings []*ethpb.ProposerSlashing,
exitInfo *validators.ExitInfo,
) (state.BeaconState, error) {
if exitInfo == nil && len(slashings) > 0 {
return nil, errors.New("exit info required to process proposer slashings")
}
var err error
for _, slashing := range slashings {
beaconState, err = ProcessProposerSlashing(ctx, beaconState, slashing, exitInfo)
@@ -78,9 +75,6 @@ func ProcessProposerSlashing(
if err = VerifyProposerSlashing(beaconState, slashing); err != nil {
return nil, errors.Wrap(err, "could not verify proposer slashing")
}
if exitInfo == nil {
return nil, errors.New("exit info is required to process proposer slashing")
}
beaconState, err = validators.SlashValidator(ctx, beaconState, slashing.Header_1.Header.ProposerIndex, exitInfo)
if err != nil {
return nil, errors.Wrapf(err, "could not slash proposer index %d", slashing.Header_1.Header.ProposerIndex)

View File

@@ -53,15 +53,9 @@ func ProcessOperations(ctx context.Context, st state.BeaconState, block interfac
// 6110 validations are in VerifyOperationLengths
bb := block.Body()
// Electra extends the altair operations.
var exitInfo *v.ExitInfo
hasSlashings := len(bb.ProposerSlashings()) > 0 || len(bb.AttesterSlashings()) > 0
hasExits := len(bb.VoluntaryExits()) > 0
if hasSlashings || hasExits {
// ExitInformation is expensive to compute, only do it if we need it.
exitInfo = v.ExitInformation(st)
if err := helpers.UpdateTotalActiveBalanceCache(st, exitInfo.TotalActiveBalance); err != nil {
return nil, errors.Wrap(err, "could not update total active balance cache")
}
exitInfo := v.ExitInformation(st)
if err := helpers.UpdateTotalActiveBalanceCache(st, exitInfo.TotalActiveBalance); err != nil {
return nil, errors.Wrap(err, "could not update total active balance cache")
}
st, err = ProcessProposerSlashings(ctx, st, bb.ProposerSlashings(), exitInfo)
if err != nil {

View File

@@ -13,7 +13,6 @@ import (
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/runtime/version"
"github.com/OffchainLabs/prysm/v6/time/slots"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/pkg/errors"
@@ -92,18 +91,6 @@ func ProcessWithdrawalRequests(ctx context.Context, st state.BeaconState, wrs []
ctx, span := trace.StartSpan(ctx, "electra.ProcessWithdrawalRequests")
defer span.End()
currentEpoch := slots.ToEpoch(st.Slot())
if len(wrs) == 0 {
return st, nil
}
// It is correct to compute exitInfo once for all withdrawals in the block, as the ExitInfo pointer is
// updated within InitiateValidatorExit which is the only function that uses it.
var exitInfo *validators.ExitInfo
if st.Version() < version.Electra {
exitInfo = validators.ExitInformation(st)
} else {
// After Electra, the function InitiateValidatorExit ignores the exitInfo passed to it and recomputes it anyway.
exitInfo = &validators.ExitInfo{}
}
for _, wr := range wrs {
if wr == nil {
return nil, errors.New("nil execution layer withdrawal request")
@@ -161,8 +148,7 @@ func ProcessWithdrawalRequests(ctx context.Context, st state.BeaconState, wrs []
// Only exit validator if it has no pending withdrawals in the queue
if pendingBalanceToWithdraw == 0 {
var err error
// exitInfo is updated within InitiateValidatorExit
st, err = validators.InitiateValidatorExit(ctx, st, vIdx, exitInfo)
st, err = validators.InitiateValidatorExit(ctx, st, vIdx, validators.ExitInformation(st))
if err != nil {
return nil, err
}

View File

@@ -96,17 +96,12 @@ func ProcessRegistryUpdates(ctx context.Context, st state.BeaconState) (state.Be
}
// Process validators eligible for ejection.
if len(eligibleForEjection) > 0 {
// It is safe to compute exitInfo once for all ejections in the epoch, as the ExitInfo pointer is
// updated within InitiateValidatorExit which is the only function that uses it.
exitInfo := validators.ExitInformation(st)
for _, idx := range eligibleForEjection {
// Here is fine to do a quadratic loop since this should
// barely happen
st, err = validators.InitiateValidatorExit(ctx, st, idx, exitInfo)
if err != nil && !errors.Is(err, validators.ErrValidatorAlreadyExited) {
return nil, errors.Wrapf(err, "could not initiate exit for validator %d", idx)
}
for _, idx := range eligibleForEjection {
// Here is fine to do a quadratic loop since this should
// barely happen
st, err = validators.InitiateValidatorExit(ctx, st, idx, validators.ExitInformation(st))
if err != nil && !errors.Is(err, validators.ErrValidatorAlreadyExited) {
return nil, errors.Wrapf(err, "could not initiate exit for validator %d", idx)
}
}

View File

@@ -399,6 +399,7 @@ func CommitteeAssignments(ctx context.Context, state state.BeaconState, epoch pr
ctx, span := trace.StartSpan(ctx, "helpers.CommitteeAssignments")
defer span.End()
// Verify if the epoch is valid for assignment based on the provided state.
if err := VerifyAssignmentEpoch(epoch, state); err != nil {
return nil, err
}
@@ -406,15 +407,12 @@ func CommitteeAssignments(ctx context.Context, state state.BeaconState, epoch pr
if err != nil {
return nil, err
}
// Deduplicate and make set for O(1) membership checks.
vals := make(map[primitives.ValidatorIndex]struct{}, len(validators))
vals := make(map[primitives.ValidatorIndex]struct{})
for _, v := range validators {
vals[v] = struct{}{}
}
remaining := len(vals)
assignments := make(map[primitives.ValidatorIndex]*CommitteeAssignment, len(vals))
assignments := make(map[primitives.ValidatorIndex]*CommitteeAssignment)
// Compute committee assignments for each slot in the epoch.
for slot := startSlot; slot < startSlot+params.BeaconConfig().SlotsPerEpoch; slot++ {
committees, err := BeaconCommittees(ctx, state, slot)
if err != nil {
@@ -422,7 +420,7 @@ func CommitteeAssignments(ctx context.Context, state state.BeaconState, epoch pr
}
for j, committee := range committees {
for _, vIndex := range committee {
if _, ok := vals[vIndex]; !ok {
if _, ok := vals[vIndex]; !ok { // Skip if the validator is not in the provided validators slice.
continue
}
if _, ok := assignments[vIndex]; !ok {
@@ -431,11 +429,6 @@ func CommitteeAssignments(ctx context.Context, state state.BeaconState, epoch pr
assignments[vIndex].Committee = committee
assignments[vIndex].AttesterSlot = slot
assignments[vIndex].CommitteeIndex = primitives.CommitteeIndex(j)
delete(vals, vIndex)
remaining--
if remaining == 0 {
return assignments, nil // early exit
}
}
}
}

View File

@@ -1,8 +1,6 @@
package peerdas
import (
"sort"
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/config/params"
@@ -30,8 +28,7 @@ func MinimumColumnCountToReconstruct() uint64 {
// ReconstructDataColumnSidecars reconstructs all the data column sidecars from the given input data column sidecars.
// All input sidecars must be committed to the same block.
// `inVerifiedRoSidecars` should contain enough sidecars to reconstruct the missing columns, and should not contain any duplicate.
// WARNING: This function sorts inplace `verifiedRoSidecars` by index.
// `inVerifiedRoSidecars` should contain enough (unique) sidecars to reconstruct the missing columns.
func ReconstructDataColumnSidecars(verifiedRoSidecars []blocks.VerifiedRODataColumn) ([]blocks.VerifiedRODataColumn, error) {
// Check if there is at least one input sidecar.
if len(verifiedRoSidecars) == 0 {
@@ -54,17 +51,18 @@ func ReconstructDataColumnSidecars(verifiedRoSidecars []blocks.VerifiedRODataCol
}
}
// Deduplicate sidecars.
sidecarByIndex := make(map[uint64]blocks.VerifiedRODataColumn, len(verifiedRoSidecars))
for _, inVerifiedRoSidecar := range verifiedRoSidecars {
sidecarByIndex[inVerifiedRoSidecar.Index] = inVerifiedRoSidecar
}
// Check if there is enough sidecars to reconstruct the missing columns.
sidecarCount := len(verifiedRoSidecars)
sidecarCount := len(sidecarByIndex)
if uint64(sidecarCount) < MinimumColumnCountToReconstruct() {
return nil, ErrNotEnoughDataColumnSidecars
}
// Sort the input sidecars by index.
sort.Slice(verifiedRoSidecars, func(i, j int) bool {
return verifiedRoSidecars[i].Index < verifiedRoSidecars[j].Index
})
// Recover cells and compute proofs in parallel.
var wg errgroup.Group
cellsAndProofs := make([]kzg.CellsAndProofs, blobCount)
@@ -73,10 +71,10 @@ func ReconstructDataColumnSidecars(verifiedRoSidecars []blocks.VerifiedRODataCol
cellsIndices := make([]uint64, 0, sidecarCount)
cells := make([]kzg.Cell, 0, sidecarCount)
for _, sidecar := range verifiedRoSidecars {
for columnIndex, sidecar := range sidecarByIndex {
cell := sidecar.Column[blobIndex]
cells = append(cells, kzg.Cell(cell))
cellsIndices = append(cellsIndices, sidecar.Index)
cellsIndices = append(cellsIndices, columnIndex)
}
// Recover the cells and proofs for the corresponding blob

View File

@@ -10,7 +10,6 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/electra"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition/interop"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/validators"
v "github.com/OffchainLabs/prysm/v6/beacon-chain/core/validators"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
@@ -379,16 +378,9 @@ func ProcessBlockForStateRoot(
func altairOperations(ctx context.Context, st state.BeaconState, beaconBlock interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
var err error
hasSlashings := len(beaconBlock.Body().ProposerSlashings()) > 0 || len(beaconBlock.Body().AttesterSlashings()) > 0
// exitInfo is only needed for voluntary exits pre Electra.
hasExits := st.Version() < version.Electra && len(beaconBlock.Body().VoluntaryExits()) > 0
exitInfo := &validators.ExitInfo{}
if hasSlashings || hasExits {
// ExitInformation is expensive to compute, only do it if we need it.
exitInfo = v.ExitInformation(st)
if err := helpers.UpdateTotalActiveBalanceCache(st, exitInfo.TotalActiveBalance); err != nil {
return nil, errors.Wrap(err, "could not update total active balance cache")
}
exitInfo := v.ExitInformation(st)
if err := helpers.UpdateTotalActiveBalanceCache(st, exitInfo.TotalActiveBalance); err != nil {
return nil, errors.Wrap(err, "could not update total active balance cache")
}
st, err = b.ProcessProposerSlashings(ctx, st, beaconBlock.Body().ProposerSlashings(), exitInfo)
if err != nil {
@@ -415,15 +407,10 @@ func altairOperations(ctx context.Context, st state.BeaconState, beaconBlock int
// This calls phase 0 block operations.
func phase0Operations(ctx context.Context, st state.BeaconState, beaconBlock interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
var err error
hasSlashings := len(beaconBlock.Body().ProposerSlashings()) > 0 || len(beaconBlock.Body().AttesterSlashings()) > 0
hasExits := len(beaconBlock.Body().VoluntaryExits()) > 0
var exitInfo *v.ExitInfo
if hasSlashings || hasExits {
// ExitInformation is expensive to compute, only do it if we need it.
exitInfo = v.ExitInformation(st)
if err := helpers.UpdateTotalActiveBalanceCache(st, exitInfo.TotalActiveBalance); err != nil {
return nil, errors.Wrap(err, "could not update total active balance cache")
}
exitInfo := v.ExitInformation(st)
if err := helpers.UpdateTotalActiveBalanceCache(st, exitInfo.TotalActiveBalance); err != nil {
return nil, errors.Wrap(err, "could not update total active balance cache")
}
st, err = b.ProcessProposerSlashings(ctx, st, beaconBlock.Body().ProposerSlashings(), exitInfo)
if err != nil {

View File

@@ -98,9 +98,7 @@ func InitiateValidatorExit(
if validator.ExitEpoch != params.BeaconConfig().FarFutureEpoch {
return s, ErrValidatorAlreadyExited
}
if exitInfo == nil {
return nil, errors.New("exit info is required to process validator exit")
}
// Compute exit queue epoch.
if s.Version() < version.Electra {
if err = initiateValidatorExitPreElectra(ctx, s, exitInfo); err != nil {
@@ -179,9 +177,6 @@ func initiateValidatorExitPreElectra(ctx context.Context, s state.BeaconState, e
// if exit_queue_churn >= get_validator_churn_limit(state):
// exit_queue_epoch += Epoch(1)
exitableEpoch := helpers.ActivationExitEpoch(time.CurrentEpoch(s))
if exitInfo == nil {
return errors.New("exit info is required to process validator exit")
}
if exitableEpoch > exitInfo.HighestExitEpoch {
exitInfo.HighestExitEpoch = exitableEpoch
exitInfo.Churn = 0
@@ -240,9 +235,7 @@ func SlashValidator(
exitInfo *ExitInfo,
) (state.BeaconState, error) {
var err error
if exitInfo == nil {
return nil, errors.New("exit info is required to slash validator")
}
s, err = InitiateValidatorExitForTotalBal(ctx, s, slashedIdx, exitInfo, primitives.Gwei(exitInfo.TotalActiveBalance))
if err != nil && !errors.Is(err, ErrValidatorAlreadyExited) {
return nil, errors.Wrapf(err, "could not initiate validator %d exit", slashedIdx)

View File

@@ -254,7 +254,6 @@ func (s *Store) getCacheUpdatesByPeriod(headBlock interfaces.ReadOnlySignedBeaco
return updatesByPeriod, nil
}
// SetLastFinalityUpdate should be used only for testing.
func (s *Store) SetLastFinalityUpdate(update interfaces.LightClientFinalityUpdate, broadcast bool) {
s.mu.Lock()
defer s.mu.Unlock()
@@ -284,7 +283,6 @@ func (s *Store) LastFinalityUpdate() interfaces.LightClientFinalityUpdate {
return s.lastFinalityUpdate
}
// SetLastOptimisticUpdate should be used only for testing.
func (s *Store) SetLastOptimisticUpdate(update interfaces.LightClientOptimisticUpdate, broadcast bool) {
s.mu.Lock()
defer s.mu.Unlock()

View File

@@ -162,7 +162,6 @@ go_test(
"//cmd/beacon-chain/flags:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//consensus-types/wrapper:go_default_library",

View File

@@ -5,18 +5,14 @@ import (
"context"
"fmt"
"reflect"
"slices"
"sync"
"time"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/altair"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/crypto/hash"
"github.com/OffchainLabs/prysm/v6/monitoring/tracing"
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
@@ -310,150 +306,86 @@ func (s *Service) BroadcastLightClientFinalityUpdate(ctx context.Context, update
return nil
}
// BroadcastDataColumnSidecars broadcasts multiple data column sidecars to the p2p network, after ensuring
// there is at least one peer in each needed subnet. If not, it will attempt to find one before broadcasting.
// This function is non-blocking. It stops trying to broadcast a given sidecar when more than one slot has passed, or the context is
// cancelled (whichever comes first).
func (s *Service) BroadcastDataColumnSidecars(ctx context.Context, sidecars []blocks.VerifiedRODataColumn) error {
// Increase the number of broadcast attempts.
dataColumnSidecarBroadcastAttempts.Add(float64(len(sidecars)))
// BroadcastDataColumnSidecar broadcasts a data column to the p2p network, the message is assumed to be
// broadcasted to the current fork and to the input column subnet.
func (s *Service) BroadcastDataColumnSidecar(
dataColumnSubnet uint64,
dataColumnSidecar blocks.VerifiedRODataColumn,
) error {
// Add tracing to the function.
ctx, span := trace.StartSpan(s.ctx, "p2p.BroadcastDataColumnSidecar")
defer span.End()
// Retrieve the current fork digest.
forkDigest, err := s.currentForkDigest()
if err != nil {
return errors.Wrap(err, "current fork digest")
err := errors.Wrap(err, "current fork digest")
tracing.AnnotateError(span, err)
return err
}
go s.broadcastDataColumnSidecars(ctx, forkDigest, sidecars)
// Non-blocking broadcast, with attempts to discover a column subnet peer if none available.
go s.internalBroadcastDataColumnSidecar(ctx, dataColumnSubnet, dataColumnSidecar, forkDigest)
return nil
}
// broadcastDataColumnSidecars broadcasts multiple data column sidecars to the p2p network, after ensuring
// there is at least one peer in each needed subnet. If not, it will attempt to find one before broadcasting.
// It returns when all broadcasts are complete, or the context is cancelled (whichever comes first).
func (s *Service) broadcastDataColumnSidecars(ctx context.Context, forkDigest [fieldparams.VersionLength]byte, sidecars []blocks.VerifiedRODataColumn) {
type rootAndIndex struct {
root [fieldparams.RootLength]byte
index uint64
func (s *Service) internalBroadcastDataColumnSidecar(
ctx context.Context,
columnSubnet uint64,
dataColumnSidecar blocks.VerifiedRODataColumn,
forkDigest [fieldparams.VersionLength]byte,
) {
// Add tracing to the function.
_, span := trace.StartSpan(ctx, "p2p.internalBroadcastDataColumnSidecar")
defer span.End()
// Increase the number of broadcast attempts.
dataColumnSidecarBroadcastAttempts.Inc()
// Define a one-slot length context timeout.
secondsPerSlot := params.BeaconConfig().SecondsPerSlot
oneSlot := time.Duration(secondsPerSlot) * time.Second
ctx, cancel := context.WithTimeout(ctx, oneSlot)
defer cancel()
// Build the topic corresponding to this column subnet and this fork digest.
topic := dataColumnSubnetToTopic(columnSubnet, forkDigest)
// Compute the wrapped subnet index.
wrappedSubIdx := columnSubnet + dataColumnSubnetVal
// Find peers if needed.
if err := s.findPeersIfNeeded(ctx, wrappedSubIdx, DataColumnSubnetTopicFormat, forkDigest, columnSubnet); err != nil {
log.WithError(err).Error("Failed to find peers for data column subnet")
tracing.AnnotateError(span, err)
}
var (
wg sync.WaitGroup
timings sync.Map
)
logLevel := logrus.GetLevel()
slotPerRoot := make(map[[fieldparams.RootLength]byte]primitives.Slot, 1)
for _, sidecar := range sidecars {
slotPerRoot[sidecar.BlockRoot()] = sidecar.Slot()
wg.Go(func() {
// Add tracing to the function.
ctx, span := trace.StartSpan(s.ctx, "p2p.broadcastDataColumnSidecars")
defer span.End()
// Compute the subnet for this data column sidecar.
subnet := peerdas.ComputeSubnetForDataColumnSidecar(sidecar.Index)
// Build the topic corresponding to subnet column subnet and this fork digest.
topic := dataColumnSubnetToTopic(subnet, forkDigest)
// Compute the wrapped subnet index.
wrappedSubIdx := subnet + dataColumnSubnetVal
// Find peers if needed.
if err := s.findPeersIfNeeded(ctx, wrappedSubIdx, DataColumnSubnetTopicFormat, forkDigest, subnet); err != nil {
tracing.AnnotateError(span, err)
log.WithError(err).Error("Cannot find peers if needed")
return
}
// Broadcast the data column sidecar to the network.
if err := s.broadcastObject(ctx, sidecar, topic); err != nil {
tracing.AnnotateError(span, err)
log.WithError(err).Error("Cannot broadcast data column sidecar")
return
}
// Increase the number of successful broadcasts.
dataColumnSidecarBroadcasts.Inc()
// Record the timing for log purposes.
if logLevel >= logrus.DebugLevel {
root := sidecar.BlockRoot()
timings.Store(rootAndIndex{root: root, index: sidecar.Index}, time.Now())
}
})
}
// Wait for all broadcasts to finish.
wg.Wait()
// The rest of this function is only for debug logging purposes.
if logLevel < logrus.DebugLevel {
// Broadcast the data column sidecar to the network.
if err := s.broadcastObject(ctx, dataColumnSidecar, topic); err != nil {
log.WithError(err).Error("Failed to broadcast data column sidecar")
tracing.AnnotateError(span, err)
return
}
type logInfo struct {
durationMin time.Duration
durationMax time.Duration
indices []uint64
header := dataColumnSidecar.SignedBlockHeader.GetHeader()
slot := header.GetSlot()
slotStartTime, err := slots.StartTime(s.genesisTime, slot)
if err != nil {
log.WithError(err).Error("Failed to convert slot to time")
}
logInfoPerRoot := make(map[[fieldparams.RootLength]byte]*logInfo, 1)
log.WithFields(logrus.Fields{
"slot": slot,
"timeSinceSlotStart": time.Since(slotStartTime),
"root": fmt.Sprintf("%#x", dataColumnSidecar.BlockRoot()),
"columnSubnet": columnSubnet,
"blobCount": len(dataColumnSidecar.Column),
}).Debug("Broadcasted data column sidecar")
timings.Range(func(key any, value any) bool {
rootAndIndex, ok := key.(rootAndIndex)
if !ok {
log.Error("Could not cast key to rootAndIndex")
return true
}
broadcastTime, ok := value.(time.Time)
if !ok {
log.Error("Could not cast value to time.Time")
return true
}
slot, ok := slotPerRoot[rootAndIndex.root]
if !ok {
log.WithField("root", fmt.Sprintf("%#x", rootAndIndex.root)).Error("Could not find slot for root")
return true
}
duration, err := slots.SinceSlotStart(slot, s.genesisTime, broadcastTime)
if err != nil {
log.WithError(err).Error("Could not compute duration since slot start")
return true
}
info, ok := logInfoPerRoot[rootAndIndex.root]
if !ok {
logInfoPerRoot[rootAndIndex.root] = &logInfo{durationMin: duration, durationMax: duration, indices: []uint64{rootAndIndex.index}}
return true
}
info.durationMin = min(info.durationMin, duration)
info.durationMax = max(info.durationMax, duration)
info.indices = append(info.indices, rootAndIndex.index)
return true
})
for root, info := range logInfoPerRoot {
slices.Sort(info.indices)
log.WithFields(logrus.Fields{
"root": fmt.Sprintf("%#x", root),
"slot": slotPerRoot[root],
"count": len(info.indices),
"indices": helpers.PrettySlice(info.indices),
"timeSinceSlotStartMin": info.durationMin,
"timeSinceSlotStartMax": info.durationMax,
}).Debug("Broadcasted data column sidecars")
}
// Increase the number of successful broadcasts.
dataColumnSidecarBroadcasts.Inc()
}
func (s *Service) findPeersIfNeeded(

View File

@@ -15,10 +15,10 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
p2ptest "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v6/consensus-types/wrapper"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
@@ -60,6 +60,7 @@ func TestService_Broadcast(t *testing.T) {
topic := "/eth2/%x/testing"
// Set a test gossip mapping for testpb.TestSimpleMessage.
GossipTypeMapping[reflect.TypeOf(msg)] = topic
p.clock = startup.NewClock(p.genesisTime, bytesutil.ToBytes32(p.genesisValidatorsRoot))
digest, err := p.currentForkDigest()
require.NoError(t, err)
topic = fmt.Sprintf(topic, digest)
@@ -663,8 +664,6 @@ func TestService_BroadcastDataColumn(t *testing.T) {
topicFormat = DataColumnSubnetTopicFormat
)
ctx := t.Context()
// Load the KZG trust setup.
err := kzg.Start()
require.NoError(t, err)
@@ -687,7 +686,7 @@ func TestService_BroadcastDataColumn(t *testing.T) {
_, pkey, ipAddr := createHost(t, port)
service := &Service{
ctx: ctx,
ctx: t.Context(),
host: p1.BHost,
pubsub: p1.PubSub(),
joinedTopics: map[string]*pubsub.Topic{},
@@ -696,7 +695,7 @@ func TestService_BroadcastDataColumn(t *testing.T) {
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
subnetsLock: make(map[uint64]*sync.RWMutex),
subnetsLockLock: sync.Mutex{},
peers: peers.NewStatus(ctx, &peers.StatusConfig{ScorerParams: &scorers.Config{}}),
peers: peers.NewStatus(t.Context(), &peers.StatusConfig{ScorerParams: &scorers.Config{}}),
custodyInfo: &custodyInfo{},
}
@@ -723,7 +722,7 @@ func TestService_BroadcastDataColumn(t *testing.T) {
time.Sleep(50 * time.Millisecond)
// Broadcast to peers and wait.
err = service.BroadcastDataColumnSidecars(ctx, []blocks.VerifiedRODataColumn{verifiedRoSidecar})
err = service.BroadcastDataColumnSidecar(subnet, verifiedRoSidecar)
require.NoError(t, err)
// Receive the message.

View File

@@ -52,7 +52,7 @@ type (
BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.BlobSidecar) error
BroadcastLightClientOptimisticUpdate(ctx context.Context, update interfaces.LightClientOptimisticUpdate) error
BroadcastLightClientFinalityUpdate(ctx context.Context, update interfaces.LightClientFinalityUpdate) error
BroadcastDataColumnSidecars(ctx context.Context, sidecars []blocks.VerifiedRODataColumn) error
BroadcastDataColumnSidecar(columnSubnet uint64, dataColumnSidecar blocks.VerifiedRODataColumn) error
}
// SetStreamHandler configures p2p to handle streams of a certain topic ID.

View File

@@ -14,6 +14,7 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
"github.com/OffchainLabs/prysm/v6/config/features"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
@@ -91,6 +92,7 @@ type Service struct {
peerDisconnectionTime *cache.Cache
custodyInfo *custodyInfo
custodyInfoLock sync.RWMutex // Lock access to custodyInfo
clock *startup.Clock
allForkDigests map[[4]byte]struct{}
}

View File

@@ -169,7 +169,7 @@ func (*FakeP2P) BroadcastLightClientFinalityUpdate(_ context.Context, _ interfac
}
// BroadcastDataColumnSidecar -- fake.
func (*FakeP2P) BroadcastDataColumnSidecars(_ context.Context, _ []blocks.VerifiedRODataColumn) error {
func (*FakeP2P) BroadcastDataColumnSidecar(_ uint64, _ blocks.VerifiedRODataColumn) error {
return nil
}

View File

@@ -63,7 +63,7 @@ func (m *MockBroadcaster) BroadcastLightClientFinalityUpdate(_ context.Context,
}
// BroadcastDataColumnSidecar broadcasts a data column for mock.
func (m *MockBroadcaster) BroadcastDataColumnSidecars(context.Context, []blocks.VerifiedRODataColumn) error {
func (m *MockBroadcaster) BroadcastDataColumnSidecar(uint64, blocks.VerifiedRODataColumn) error {
m.BroadcastCalled.Store(true)
return nil
}

View File

@@ -233,7 +233,7 @@ func (p *TestP2P) BroadcastLightClientFinalityUpdate(_ context.Context, _ interf
}
// BroadcastDataColumnSidecar broadcasts a data column for mock.
func (p *TestP2P) BroadcastDataColumnSidecars(context.Context, []blocks.VerifiedRODataColumn) error {
func (p *TestP2P) BroadcastDataColumnSidecar(uint64, blocks.VerifiedRODataColumn) error {
p.BroadcastCalled.Store(true)
return nil
}

View File

@@ -27,7 +27,5 @@ go_test(
"//testing/require:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
],
)

View File

@@ -124,28 +124,14 @@ func convertValueForJSON(v reflect.Value, tag string) interface{} {
if !v.Field(i).CanInterface() {
continue // unexported
}
jsonTag := f.Tag.Get("json")
if jsonTag == "-" {
continue
}
// Parse JSON tag options (e.g., "fieldname,omitempty")
parts := strings.Split(jsonTag, ",")
key := parts[0]
if key == "" {
key := f.Tag.Get("json")
if key == "" || key == "-" {
key = f.Name
}
fieldValue := convertValueForJSON(v.Field(i), tag)
m[key] = fieldValue
m[key] = convertValueForJSON(v.Field(i), tag)
}
return m
// ===== String =====
case reflect.String:
return v.String()
// ===== Default =====
default:
log.WithFields(log.Fields{

View File

@@ -8,7 +8,6 @@ import (
"math"
"net/http"
"net/http/httptest"
"reflect"
"testing"
"github.com/OffchainLabs/prysm/v6/api/server/structs"
@@ -18,8 +17,6 @@ import (
"github.com/OffchainLabs/prysm/v6/testing/require"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
log "github.com/sirupsen/logrus"
logTest "github.com/sirupsen/logrus/hooks/test"
)
func TestGetDepositContract(t *testing.T) {
@@ -692,27 +689,6 @@ func TestGetSpec_BlobSchedule(t *testing.T) {
// Check second entry - values should be strings for consistent API output
assert.Equal(t, "200", blobSchedule[1]["EPOCH"])
assert.Equal(t, "9", blobSchedule[1]["MAX_BLOBS_PER_BLOCK"])
// Verify that fields with json:"-" are NOT present in the blob schedule entries
for i, entry := range blobSchedule {
t.Run(fmt.Sprintf("entry_%d_omits_json_dash_fields", i), func(t *testing.T) {
// These fields have `json:"-"` in NetworkScheduleEntry and should be omitted
_, hasForkVersion := entry["ForkVersion"]
assert.Equal(t, false, hasForkVersion, "ForkVersion should be omitted due to json:\"-\"")
_, hasForkDigest := entry["ForkDigest"]
assert.Equal(t, false, hasForkDigest, "ForkDigest should be omitted due to json:\"-\"")
_, hasBPOEpoch := entry["BPOEpoch"]
assert.Equal(t, false, hasBPOEpoch, "BPOEpoch should be omitted due to json:\"-\"")
_, hasVersionEnum := entry["VersionEnum"]
assert.Equal(t, false, hasVersionEnum, "VersionEnum should be omitted due to json:\"-\"")
_, hasIsFork := entry["isFork"]
assert.Equal(t, false, hasIsFork, "isFork should be omitted due to json:\"-\"")
})
}
}
func TestGetSpec_BlobSchedule_NotFulu(t *testing.T) {
@@ -739,35 +715,3 @@ func TestGetSpec_BlobSchedule_NotFulu(t *testing.T) {
_, exists := data["BLOB_SCHEDULE"]
require.Equal(t, false, exists)
}
func TestConvertValueForJSON_NoErrorLogsForStrings(t *testing.T) {
logHook := logTest.NewLocal(log.StandardLogger())
defer logHook.Reset()
stringTestCases := []struct {
tag string
value string
}{
{"CONFIG_NAME", "mainnet"},
{"PRESET_BASE", "mainnet"},
{"DEPOSIT_CONTRACT_ADDRESS", "0x00000000219ab540356cBB839Cbe05303d7705Fa"},
{"TERMINAL_TOTAL_DIFFICULTY", "58750000000000000000000"},
}
for _, tc := range stringTestCases {
t.Run(tc.tag, func(t *testing.T) {
logHook.Reset()
// Convert the string value
v := reflect.ValueOf(tc.value)
result := convertValueForJSON(v, tc.tag)
// Verify the result is correct
require.Equal(t, tc.value, result)
// Verify NO error was logged about unsupported field kind
require.LogsDoNotContain(t, logHook, "Unsupported config field kind")
require.LogsDoNotContain(t, logHook, "kind=string")
})
}
}

View File

@@ -68,11 +68,7 @@ func (rs *BlockRewardService) GetBlockRewardsData(ctx context.Context, blk inter
Code: http.StatusInternalServerError,
}
}
var exitInfo *validators.ExitInfo
if len(blk.Body().ProposerSlashings()) > 0 || len(blk.Body().AttesterSlashings()) > 0 {
// ExitInformation is expensive to compute, only do it if we need it.
exitInfo = validators.ExitInformation(st)
}
exitInfo := validators.ExitInformation(st)
st, err = coreblocks.ProcessAttesterSlashings(ctx, st, blk.Body().AttesterSlashings(), exitInfo)
if err != nil {
return nil, &httputil.DefaultJsonError{

View File

@@ -690,10 +690,6 @@ func (s *Server) ProduceSyncCommitteeContribution(w http.ResponseWriter, r *http
if !ok {
return
}
if index >= params.BeaconConfig().SyncCommitteeSubnetCount {
httputil.HandleError(w, fmt.Sprintf("Subcommittee index needs to be between 0 and %d, %d is outside of this range.", params.BeaconConfig().SyncCommitteeSubnetCount-1, index), http.StatusBadRequest)
return
}
_, slot, ok := shared.UintFromQuery(w, r, "slot", true)
if !ok {
return

View File

@@ -2117,27 +2117,6 @@ func TestProduceSyncCommitteeContribution(t *testing.T) {
server.ProduceSyncCommitteeContribution(writer, request)
assert.Equal(t, http.StatusServiceUnavailable, writer.Code)
})
t.Run("invalid subcommittee_index", func(t *testing.T) {
url := "http://example.com?slot=1&subcommittee_index=10&beacon_block_root=0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
request := httptest.NewRequest(http.MethodGet, url, nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
// Use non-optimistic server for this test
server := Server{
CoreService: &core.Service{
HeadFetcher: &mockChain.ChainService{
SyncCommitteeIndices: []primitives.CommitteeIndex{0},
},
},
SyncCommitteePool: syncCommitteePool,
OptimisticModeFetcher: &mockChain.ChainService{}, // Optimistic: false by default
}
server.ProduceSyncCommitteeContribution(writer, request)
assert.Equal(t, http.StatusBadRequest, writer.Code)
require.ErrorContains(t, "Subcommittee index needs to be between 0 and 3, 10 is outside of this range.", errors.New(writer.Body.String()))
})
}
func TestServer_RegisterValidator(t *testing.T) {

View File

@@ -17,6 +17,13 @@ import (
"google.golang.org/grpc/status"
)
const (
// validatorLookupThreshold determines when to use full assignment map vs cached linear search.
// For requests with fewer validators, we use cached linear search to avoid the overhead
// of building a complete assignment map for all validators in the epoch.
validatorLookupThreshold = 3000
)
// GetDutiesV2 returns the duties assigned to a list of validators specified
// in the request object.
//
@@ -53,26 +60,7 @@ func (vs *Server) dutiesv2(ctx context.Context, req *ethpb.DutiesRequest) (*ethp
span.SetAttributes(trace.Int64Attribute("num_pubkeys", int64(len(req.PublicKeys))))
defer span.End()
// Collect validator indices from public keys and cache the lookups
type validatorInfo struct {
index primitives.ValidatorIndex
found bool
}
validatorLookup := make(map[string]validatorInfo, len(req.PublicKeys))
requestIndices := make([]primitives.ValidatorIndex, 0, len(req.PublicKeys))
for _, pubKey := range req.PublicKeys {
key := string(pubKey)
if _, exists := validatorLookup[key]; !exists {
idx, ok := s.ValidatorIndexByPubkey(bytesutil.ToBytes48(pubKey))
validatorLookup[key] = validatorInfo{index: idx, found: ok}
if ok {
requestIndices = append(requestIndices, idx)
}
}
}
meta, err := loadDutiesMetadata(ctx, s, req.Epoch, requestIndices)
meta, err := loadDutiesMetadata(ctx, s, req.Epoch, len(req.PublicKeys))
if err != nil {
return nil, err
}
@@ -80,14 +68,14 @@ func (vs *Server) dutiesv2(ctx context.Context, req *ethpb.DutiesRequest) (*ethp
validatorAssignments := make([]*ethpb.DutiesV2Response_Duty, 0, len(req.PublicKeys))
nextValidatorAssignments := make([]*ethpb.DutiesV2Response_Duty, 0, len(req.PublicKeys))
// Build duties using cached validator index lookups
// start loop for assignments for current and next epochs
for _, pubKey := range req.PublicKeys {
if ctx.Err() != nil {
return nil, status.Errorf(codes.Aborted, "Could not continue fetching assignments: %v", ctx.Err())
}
info := validatorLookup[string(pubKey)]
if !info.found {
validatorIndex, ok := s.ValidatorIndexByPubkey(bytesutil.ToBytes48(pubKey))
if !ok {
unknownDuty := &ethpb.DutiesV2Response_Duty{
PublicKey: pubKey,
Status: ethpb.ValidatorStatus_UNKNOWN_STATUS,
@@ -97,15 +85,16 @@ func (vs *Server) dutiesv2(ctx context.Context, req *ethpb.DutiesRequest) (*ethp
continue
}
currentAssignment := vs.getValidatorAssignment(meta.current, info.index)
nextAssignment := vs.getValidatorAssignment(meta.next, info.index)
meta.current.liteAssignment = vs.getValidatorAssignment(meta.current, validatorIndex)
assignment, nextDuty, err := vs.buildValidatorDuty(pubKey, info.index, s, req.Epoch, meta, currentAssignment, nextAssignment)
meta.next.liteAssignment = vs.getValidatorAssignment(meta.next, validatorIndex)
assignment, nextAssignment, err := vs.buildValidatorDuty(pubKey, validatorIndex, s, req.Epoch, meta)
if err != nil {
return nil, err
}
validatorAssignments = append(validatorAssignments, assignment)
nextValidatorAssignments = append(nextValidatorAssignments, nextDuty)
nextValidatorAssignments = append(nextValidatorAssignments, nextAssignment)
}
// Dependent roots for fork choice
@@ -158,15 +147,18 @@ type dutiesMetadata struct {
}
type metadata struct {
committeesAtSlot uint64
proposalSlots map[primitives.ValidatorIndex][]primitives.Slot
committeeAssignments map[primitives.ValidatorIndex]*helpers.CommitteeAssignment
committeesAtSlot uint64
proposalSlots map[primitives.ValidatorIndex][]primitives.Slot
startSlot primitives.Slot
committeesBySlot [][][]primitives.ValidatorIndex
validatorAssignmentMap map[primitives.ValidatorIndex]*helpers.LiteAssignment
liteAssignment *helpers.LiteAssignment
}
func loadDutiesMetadata(ctx context.Context, s state.BeaconState, reqEpoch primitives.Epoch, requestIndices []primitives.ValidatorIndex) (*dutiesMetadata, error) {
func loadDutiesMetadata(ctx context.Context, s state.BeaconState, reqEpoch primitives.Epoch, numValidators int) (*dutiesMetadata, error) {
meta := &dutiesMetadata{}
var err error
meta.current, err = loadMetadata(ctx, s, reqEpoch, requestIndices)
meta.current, err = loadMetadata(ctx, s, reqEpoch, numValidators)
if err != nil {
return nil, err
}
@@ -176,14 +168,14 @@ func loadDutiesMetadata(ctx context.Context, s state.BeaconState, reqEpoch primi
return nil, status.Errorf(codes.Internal, "Could not compute proposer slots: %v", err)
}
meta.next, err = loadMetadata(ctx, s, reqEpoch+1, requestIndices)
meta.next, err = loadMetadata(ctx, s, reqEpoch+1, numValidators)
if err != nil {
return nil, err
}
return meta, nil
}
func loadMetadata(ctx context.Context, s state.BeaconState, reqEpoch primitives.Epoch, requestIndices []primitives.ValidatorIndex) (*metadata, error) {
func loadMetadata(ctx context.Context, s state.BeaconState, reqEpoch primitives.Epoch, numValidators int) (*metadata, error) {
meta := &metadata{}
if err := helpers.VerifyAssignmentEpoch(reqEpoch, s); err != nil {
@@ -196,36 +188,56 @@ func loadMetadata(ctx context.Context, s state.BeaconState, reqEpoch primitives.
}
meta.committeesAtSlot = helpers.SlotCommitteeCount(activeValidatorCount)
// Use CommitteeAssignments which only computes committees for requested validators
meta.committeeAssignments, err = helpers.CommitteeAssignments(ctx, s, reqEpoch, requestIndices)
meta.startSlot, err = slots.EpochStart(reqEpoch)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not compute committee assignments: %v", err)
return nil, err
}
meta.committeesBySlot, err = helpers.PrecomputeCommittees(ctx, s, meta.startSlot)
if err != nil {
return nil, err
}
if numValidators >= validatorLookupThreshold {
meta.validatorAssignmentMap = buildValidatorAssignmentMap(meta.committeesBySlot, meta.startSlot)
}
return meta, nil
}
// findValidatorIndexInCommittee finds the position of a validator in a committee.
func findValidatorIndexInCommittee(committee []primitives.ValidatorIndex, validatorIndex primitives.ValidatorIndex) uint64 {
for i, vIdx := range committee {
if vIdx == validatorIndex {
return uint64(i)
// buildValidatorAssignmentMap creates a map from validator index to assignment for O(1) lookup.
func buildValidatorAssignmentMap(
bySlot [][][]primitives.ValidatorIndex,
startSlot primitives.Slot,
) map[primitives.ValidatorIndex]*helpers.LiteAssignment {
validatorToAssignment := make(map[primitives.ValidatorIndex]*helpers.LiteAssignment)
for relativeSlot, committees := range bySlot {
for cIdx, committee := range committees {
for pos, vIdx := range committee {
validatorToAssignment[vIdx] = &helpers.LiteAssignment{
AttesterSlot: startSlot + primitives.Slot(relativeSlot),
CommitteeIndex: primitives.CommitteeIndex(cIdx),
CommitteeLength: uint64(len(committee)),
ValidatorCommitteeIndex: uint64(pos),
}
}
}
}
return 0
return validatorToAssignment
}
// getValidatorAssignment retrieves the assignment for a validator from CommitteeAssignments.
// getValidatorAssignment retrieves the assignment for a validator using either
// the pre-built assignment map (for large requests) or linear search (for small requests).
func (vs *Server) getValidatorAssignment(meta *metadata, validatorIndex primitives.ValidatorIndex) *helpers.LiteAssignment {
if assignment, exists := meta.committeeAssignments[validatorIndex]; exists {
return &helpers.LiteAssignment{
AttesterSlot: assignment.AttesterSlot,
CommitteeIndex: assignment.CommitteeIndex,
CommitteeLength: uint64(len(assignment.Committee)),
ValidatorCommitteeIndex: findValidatorIndexInCommittee(assignment.Committee, validatorIndex),
if meta.validatorAssignmentMap != nil {
if assignment, exists := meta.validatorAssignmentMap[validatorIndex]; exists {
return assignment
}
return &helpers.LiteAssignment{}
}
return &helpers.LiteAssignment{}
return helpers.AssignmentForValidator(meta.committeesBySlot, meta.startSlot, validatorIndex)
}
// buildValidatorDuty builds both currentepoch and nextepoch V2 duty objects
@@ -236,23 +248,21 @@ func (vs *Server) buildValidatorDuty(
s state.BeaconState,
reqEpoch primitives.Epoch,
meta *dutiesMetadata,
currentAssignment *helpers.LiteAssignment,
nextAssignment *helpers.LiteAssignment,
) (*ethpb.DutiesV2Response_Duty, *ethpb.DutiesV2Response_Duty, error) {
assignment := &ethpb.DutiesV2Response_Duty{PublicKey: pubKey}
nextDuty := &ethpb.DutiesV2Response_Duty{PublicKey: pubKey}
nextAssignment := &ethpb.DutiesV2Response_Duty{PublicKey: pubKey}
statusEnum := assignmentStatus(s, idx)
assignment.ValidatorIndex = idx
assignment.Status = statusEnum
assignment.CommitteesAtSlot = meta.current.committeesAtSlot
assignment.ProposerSlots = meta.current.proposalSlots[idx]
populateCommitteeFields(assignment, currentAssignment)
populateCommitteeFields(assignment, meta.current.liteAssignment)
nextDuty.ValidatorIndex = idx
nextDuty.Status = statusEnum
nextDuty.CommitteesAtSlot = meta.next.committeesAtSlot
populateCommitteeFields(nextDuty, nextAssignment)
nextAssignment.ValidatorIndex = idx
nextAssignment.Status = statusEnum
nextAssignment.CommitteesAtSlot = meta.next.committeesAtSlot
populateCommitteeFields(nextAssignment, meta.next.liteAssignment)
// Sync committee flags
if coreTime.HigherEqualThanAltairVersionAndEpoch(s, reqEpoch) {
@@ -261,7 +271,7 @@ func (vs *Server) buildValidatorDuty(
return nil, nil, status.Errorf(codes.Internal, "Could not determine current epoch sync committee: %v", err)
}
assignment.IsSyncCommittee = inSync
nextDuty.IsSyncCommittee = inSync
nextAssignment.IsSyncCommittee = inSync
if inSync {
if err := core.RegisterSyncSubnetCurrentPeriodProto(s, reqEpoch, pubKey, statusEnum); err != nil {
return nil, nil, status.Errorf(codes.Internal, "Could not register sync subnet current period: %v", err)
@@ -280,16 +290,18 @@ func (vs *Server) buildValidatorDuty(
if err != nil {
return nil, nil, status.Errorf(codes.Internal, "Could not determine next epoch sync committee: %v", err)
}
nextDuty.IsSyncCommittee = nextInSync
nextAssignment.IsSyncCommittee = nextInSync
if nextInSync {
if err := core.RegisterSyncSubnetNextPeriodProto(s, reqEpoch, pubKey, statusEnum); err != nil {
log.WithError(err).Warn("Could not register sync subnet next period")
}
go func() {
if err := core.RegisterSyncSubnetNextPeriodProto(s, reqEpoch, pubKey, statusEnum); err != nil {
log.WithError(err).Warn("Could not register sync subnet next period")
}
}()
}
}
}
return assignment, nextDuty, nil
return assignment, nextAssignment, nil
}
func populateCommitteeFields(duty *ethpb.DutiesV2Response_Duty, la *helpers.LiteAssignment) {

View File

@@ -560,20 +560,105 @@ func TestGetDutiesV2_SyncNotReady(t *testing.T) {
assert.ErrorContains(t, "Syncing to latest head", err)
}
func TestGetValidatorAssignment(t *testing.T) {
start := primitives.Slot(100)
// Test using CommitteeAssignments
committeeAssignments := map[primitives.ValidatorIndex]*helpers.CommitteeAssignment{
5: {
Committee: []primitives.ValidatorIndex{4, 5, 6},
AttesterSlot: start + 1,
CommitteeIndex: primitives.CommitteeIndex(0),
},
func TestBuildValidatorAssignmentMap(t *testing.T) {
start := primitives.Slot(200)
bySlot := [][][]primitives.ValidatorIndex{
{{1, 2, 3}}, // slot 200, committee 0
{{7, 8, 9}}, // slot 201, committee 0
{{4, 5}, {10, 11}}, // slot 202, committee 0 & 1
}
assignmentMap := buildValidatorAssignmentMap(bySlot, start)
// Test validator 8 assignment (slot 201, committee 0, position 1)
vIdx := primitives.ValidatorIndex(8)
got, exists := assignmentMap[vIdx]
assert.Equal(t, true, exists)
require.NotNil(t, got)
assert.Equal(t, start+1, got.AttesterSlot)
assert.Equal(t, primitives.CommitteeIndex(0), got.CommitteeIndex)
assert.Equal(t, uint64(3), got.CommitteeLength)
assert.Equal(t, uint64(1), got.ValidatorCommitteeIndex)
// Test validator 1 assignment (slot 200, committee 0, position 0)
vIdx1 := primitives.ValidatorIndex(1)
got1, exists1 := assignmentMap[vIdx1]
assert.Equal(t, true, exists1)
require.NotNil(t, got1)
assert.Equal(t, start, got1.AttesterSlot)
assert.Equal(t, primitives.CommitteeIndex(0), got1.CommitteeIndex)
assert.Equal(t, uint64(3), got1.CommitteeLength)
assert.Equal(t, uint64(0), got1.ValidatorCommitteeIndex)
// Test validator 10 assignment (slot 202, committee 1, position 0)
vIdx10 := primitives.ValidatorIndex(10)
got10, exists10 := assignmentMap[vIdx10]
assert.Equal(t, true, exists10)
require.NotNil(t, got10)
assert.Equal(t, start+2, got10.AttesterSlot)
assert.Equal(t, primitives.CommitteeIndex(1), got10.CommitteeIndex)
assert.Equal(t, uint64(2), got10.CommitteeLength)
assert.Equal(t, uint64(0), got10.ValidatorCommitteeIndex)
// Test non-existent validator
_, exists99 := assignmentMap[primitives.ValidatorIndex(99)]
assert.Equal(t, false, exists99)
// Verify that we get the same results as the linear search
for _, committees := range bySlot {
for _, committee := range committees {
for _, validatorIdx := range committee {
linearResult := helpers.AssignmentForValidator(bySlot, start, validatorIdx)
mapResult, mapExists := assignmentMap[validatorIdx]
assert.Equal(t, true, mapExists)
require.DeepEqual(t, linearResult, mapResult)
}
}
}
}
func TestGetValidatorAssignment_WithAssignmentMap(t *testing.T) {
start := primitives.Slot(100)
bySlot := [][][]primitives.ValidatorIndex{
{{1, 2, 3}},
{{4, 5, 6}},
}
// Test with pre-built assignment map (large request scenario)
meta := &metadata{
committeeAssignments: committeeAssignments,
startSlot: start,
committeesBySlot: bySlot,
validatorAssignmentMap: buildValidatorAssignmentMap(bySlot, start),
}
vs := &Server{}
// Test existing validator (validator 2 is at position 1 in the committee, not position 2)
assignment := vs.getValidatorAssignment(meta, primitives.ValidatorIndex(2))
require.NotNil(t, assignment)
assert.Equal(t, start, assignment.AttesterSlot)
assert.Equal(t, primitives.CommitteeIndex(0), assignment.CommitteeIndex)
assert.Equal(t, uint64(1), assignment.ValidatorCommitteeIndex)
// Test non-existent validator should return empty assignment
assignment = vs.getValidatorAssignment(meta, primitives.ValidatorIndex(99))
require.NotNil(t, assignment)
assert.Equal(t, primitives.Slot(0), assignment.AttesterSlot)
assert.Equal(t, primitives.CommitteeIndex(0), assignment.CommitteeIndex)
}
func TestGetValidatorAssignment_WithoutAssignmentMap(t *testing.T) {
start := primitives.Slot(100)
bySlot := [][][]primitives.ValidatorIndex{
{{1, 2, 3}},
{{4, 5, 6}},
}
// Test without assignment map (small request scenario)
meta := &metadata{
startSlot: start,
committeesBySlot: bySlot,
validatorAssignmentMap: nil, // No map - should use linear search
}
vs := &Server{}
@@ -591,3 +676,53 @@ func TestGetValidatorAssignment(t *testing.T) {
assert.Equal(t, primitives.Slot(0), assignment.AttesterSlot)
assert.Equal(t, primitives.CommitteeIndex(0), assignment.CommitteeIndex)
}
func TestLoadMetadata_ThresholdBehavior(t *testing.T) {
state, _ := util.DeterministicGenesisState(t, 128)
epoch := primitives.Epoch(0)
tests := []struct {
name string
numValidators int
expectAssignmentMap bool
}{
{
name: "Small request - below threshold",
numValidators: 100,
expectAssignmentMap: false,
},
{
name: "Large request - at threshold",
numValidators: validatorLookupThreshold,
expectAssignmentMap: true,
},
{
name: "Large request - above threshold",
numValidators: validatorLookupThreshold + 1000,
expectAssignmentMap: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
meta, err := loadMetadata(t.Context(), state, epoch, tt.numValidators)
require.NoError(t, err)
require.NotNil(t, meta)
if tt.expectAssignmentMap {
require.NotNil(t, meta.validatorAssignmentMap, "Expected assignment map to be built for large requests")
assert.Equal(t, true, len(meta.validatorAssignmentMap) > 0, "Assignment map should not be empty")
} else {
// For small requests, the map should be nil (not initialized)
if meta.validatorAssignmentMap != nil {
t.Errorf("Expected no assignment map for small requests, got: %v", meta.validatorAssignmentMap)
}
}
// Common fields should always be set
assert.Equal(t, true, meta.committeesAtSlot > 0)
require.NotNil(t, meta.committeesBySlot)
assert.Equal(t, true, len(meta.committeesBySlot) > 0)
})
}
}

View File

@@ -352,7 +352,7 @@ func (vs *Server) broadcastAndReceiveSidecars(
dataColumnSidecars []blocks.RODataColumn,
) error {
if block.Version() >= version.Fulu {
if err := vs.broadcastAndReceiveDataColumns(ctx, dataColumnSidecars); err != nil {
if err := vs.broadcastAndReceiveDataColumns(ctx, dataColumnSidecars, root); err != nil {
return errors.Wrap(err, "broadcast and receive data columns")
}
return nil
@@ -495,22 +495,43 @@ func (vs *Server) broadcastAndReceiveBlobs(ctx context.Context, sidecars []*ethp
}
// broadcastAndReceiveDataColumns handles the broadcasting and reception of data columns sidecars.
func (vs *Server) broadcastAndReceiveDataColumns(ctx context.Context, roSidecars []blocks.RODataColumn) error {
// We built this block ourselves, so we can upgrade the read only data column sidecar into a verified one.
verifiedSidecars := make([]blocks.VerifiedRODataColumn, 0, len(roSidecars))
for _, sidecar := range roSidecars {
verifiedSidecar := blocks.NewVerifiedRODataColumn(sidecar)
verifiedSidecars = append(verifiedSidecars, verifiedSidecar)
func (vs *Server) broadcastAndReceiveDataColumns(
ctx context.Context,
roSidecars []blocks.RODataColumn,
root [fieldparams.RootLength]byte,
) error {
verifiedRODataColumns := make([]blocks.VerifiedRODataColumn, 0, len(roSidecars))
eg, _ := errgroup.WithContext(ctx)
for _, roSidecar := range roSidecars {
// We build this block ourselves, so we can upgrade the read only data column sidecar into a verified one.
verifiedRODataColumn := blocks.NewVerifiedRODataColumn(roSidecar)
verifiedRODataColumns = append(verifiedRODataColumns, verifiedRODataColumn)
eg.Go(func() error {
// Compute the subnet index based on the column index.
subnet := peerdas.ComputeSubnetForDataColumnSidecar(roSidecar.Index)
if err := vs.P2P.BroadcastDataColumnSidecar(subnet, verifiedRODataColumn); err != nil {
return errors.Wrap(err, "broadcast data column")
}
return nil
})
}
// Broadcast sidecars (non blocking).
if err := vs.P2P.BroadcastDataColumnSidecars(ctx, verifiedSidecars); err != nil {
return errors.Wrap(err, "broadcast data column sidecars")
if err := vs.DataColumnReceiver.ReceiveDataColumns(verifiedRODataColumns); err != nil {
return errors.Wrap(err, "receive data column")
}
// In parallel, receive sidecars.
if err := vs.DataColumnReceiver.ReceiveDataColumns(verifiedSidecars); err != nil {
return errors.Wrap(err, "receive data columns")
for _, verifiedRODataColumn := range verifiedRODataColumns {
vs.OperationNotifier.OperationFeed().Send(&feed.Event{
Type: operation.DataColumnSidecarReceived,
Data: &operation.DataColumnSidecarReceivedData{DataColumn: &verifiedRODataColumn}, // #nosec G601
})
}
if err := eg.Wait(); err != nil {
return errors.Wrap(err, "wait for data columns to be broadcasted")
}
return nil

View File

@@ -12,18 +12,13 @@ import (
func (vs *Server) getSlashings(ctx context.Context, head state.BeaconState) ([]*ethpb.ProposerSlashing, []ethpb.AttSlashing) {
var err error
proposerSlashings := vs.SlashingsPool.PendingProposerSlashings(ctx, head, false /*noLimit*/)
attSlashings := vs.SlashingsPool.PendingAttesterSlashings(ctx, head, false /*noLimit*/)
validProposerSlashings := make([]*ethpb.ProposerSlashing, 0, len(proposerSlashings))
validAttSlashings := make([]ethpb.AttSlashing, 0, len(attSlashings))
if len(proposerSlashings) == 0 && len(attSlashings) == 0 {
return validProposerSlashings, validAttSlashings
}
// ExitInformation is expensive to compute, only do it if we need it.
exitInfo := v.ExitInformation(head)
if err := helpers.UpdateTotalActiveBalanceCache(head, exitInfo.TotalActiveBalance); err != nil {
log.WithError(err).Warn("Could not update total active balance cache")
}
proposerSlashings := vs.SlashingsPool.PendingProposerSlashings(ctx, head, false /*noLimit*/)
validProposerSlashings := make([]*ethpb.ProposerSlashing, 0, len(proposerSlashings))
for _, slashing := range proposerSlashings {
_, err = blocks.ProcessProposerSlashing(ctx, head, slashing, exitInfo)
if err != nil {
@@ -32,6 +27,8 @@ func (vs *Server) getSlashings(ctx context.Context, head state.BeaconState) ([]*
}
validProposerSlashings = append(validProposerSlashings, slashing)
}
attSlashings := vs.SlashingsPool.PendingAttesterSlashings(ctx, head, false /*noLimit*/)
validAttSlashings := make([]ethpb.AttSlashing, 0, len(attSlashings))
for _, slashing := range attSlashings {
_, err = blocks.ProcessAttesterSlashing(ctx, head, slashing, exitInfo)
if err != nil {

View File

@@ -45,6 +45,7 @@ go_library(
"subscriber_bls_to_execution_change.go",
"subscriber_data_column_sidecar.go",
"subscriber_handlers.go",
"subscriber_light_client.go",
"subscriber_sync_committee_message.go",
"subscriber_sync_contribution_proof.go",
"subscription_topic_handler.go",
@@ -112,6 +113,7 @@ go_library(
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/light-client:go_default_library",
"//consensus-types/primitives:go_default_library",
"//consensus-types/wrapper:go_default_library",
"//container/leaky-bucket:go_default_library",
@@ -194,6 +196,7 @@ go_test(
"subscriber_beacon_aggregate_proof_test.go",
"subscriber_beacon_blocks_test.go",
"subscriber_data_column_sidecar_test.go",
"subscriber_race_test.go",
"subscriber_test.go",
"subscription_topic_handler_test.go",
"sync_fuzz_test.go",

View File

@@ -4,8 +4,9 @@ import (
"bytes"
"context"
"encoding/hex"
"fmt"
"sync"
"github.com/OffchainLabs/prysm/v6/async"
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/operation"
@@ -23,51 +24,70 @@ import (
"github.com/sirupsen/logrus"
)
var pendingAttsLimit = 32768
// This defines how often a node cleans up and processes pending attestations in the queue.
var processPendingAttsPeriod = slots.DivideSlotBy(2 /* twice per slot */)
var pendingAttsLimit = 10000
// This method processes pending attestations as a "known" block as arrived. With validations,
// the valid attestations get saved into the operation mem pool, and the invalid attestations gets deleted
// from the sync pending pool.
func (s *Service) processPendingAttsForBlock(ctx context.Context, bRoot [32]byte) error {
ctx, span := trace.StartSpan(ctx, "processPendingAttsForBlock")
// This processes pending attestation queues on every processPendingAttsPeriod.
func (s *Service) runPendingAttsQueue() {
// Prevents multiple queue processing goroutines (invoked by RunEvery) from contending for data.
mutex := new(sync.Mutex)
async.RunEvery(s.ctx, processPendingAttsPeriod, func() {
mutex.Lock()
if err := s.processPendingAtts(s.ctx); err != nil {
log.WithError(err).Debug("Could not process pending attestation")
}
mutex.Unlock()
})
}
// This defines how pending attestations are processed. It contains features:
// 1. Clean up invalid pending attestations from the queue.
// 2. Check if pending attestations can be processed when the block has arrived.
// 3. Request block from a random peer if unable to proceed step 2.
func (s *Service) processPendingAtts(ctx context.Context) error {
ctx, span := trace.StartSpan(ctx, "processPendingAtts")
defer span.End()
// Confirm that the pending attestation's missing block arrived and the node processed the block.
if !s.cfg.beaconDB.HasBlock(ctx, bRoot) || !(s.cfg.beaconDB.HasState(ctx, bRoot) || s.cfg.beaconDB.HasStateSummary(ctx, bRoot)) || !s.cfg.chain.InForkchoice(bRoot) {
return fmt.Errorf("could not process unknown block root %#x", bRoot)
}
// Before a node processes pending attestations queue, it verifies
// the attestations in the queue are still valid. Attestations will
// be deleted from the queue if invalid (i.e. getting stalled from falling too many slots behind).
s.validatePendingAtts(ctx, s.cfg.clock.CurrentSlot())
s.pendingAttsLock.RLock()
attestations := s.blkRootToPendingAtts[bRoot]
roots := make([][32]byte, 0, len(s.blkRootToPendingAtts))
for br := range s.blkRootToPendingAtts {
roots = append(roots, br)
}
s.pendingAttsLock.RUnlock()
if len(attestations) > 0 {
s.processAttestations(ctx, attestations)
log.WithFields(logrus.Fields{
"blockRoot": hex.EncodeToString(bytesutil.Trunc(bRoot[:])),
"pendingAttsCount": len(attestations),
}).Debug("Verified and saved pending attestations to pool")
}
var pendingRoots [][32]byte
randGen := rand.NewGenerator()
// Delete the missing block root key from pending attestation queue so a node will not request for the block again.
s.pendingAttsLock.Lock()
delete(s.blkRootToPendingAtts, bRoot)
pendingRoots := make([][32]byte, 0, len(s.blkRootToPendingAtts))
s.pendingQueueLock.RLock()
for r := range s.blkRootToPendingAtts {
if !s.seenPendingBlocks[r] {
pendingRoots = append(pendingRoots, r)
for _, bRoot := range roots {
s.pendingAttsLock.RLock()
attestations := s.blkRootToPendingAtts[bRoot]
s.pendingAttsLock.RUnlock()
// has the pending attestation's missing block arrived and the node processed block yet?
if s.cfg.beaconDB.HasBlock(ctx, bRoot) && (s.cfg.beaconDB.HasState(ctx, bRoot) || s.cfg.beaconDB.HasStateSummary(ctx, bRoot)) && s.cfg.chain.InForkchoice(bRoot) {
s.processAttestations(ctx, attestations)
log.WithFields(logrus.Fields{
"blockRoot": hex.EncodeToString(bytesutil.Trunc(bRoot[:])),
"pendingAttsCount": len(attestations),
}).Debug("Verified and saved pending attestations to pool")
// Delete the missing block root key from pending attestation queue so a node will not request for the block again.
s.pendingAttsLock.Lock()
delete(s.blkRootToPendingAtts, bRoot)
s.pendingAttsLock.Unlock()
} else {
s.pendingQueueLock.RLock()
seen := s.seenPendingBlocks[bRoot]
s.pendingQueueLock.RUnlock()
if !seen {
pendingRoots = append(pendingRoots, bRoot)
}
}
}
s.pendingQueueLock.RUnlock()
s.pendingAttsLock.Unlock()
// Request the blocks for the pending attestations that could not be processed.
return s.sendBatchRootRequest(ctx, pendingRoots, randGen)
}

View File

@@ -53,47 +53,16 @@ func TestProcessPendingAtts_NoBlockRequestBlock(t *testing.T) {
p1.Peers().SetConnectionState(p2.PeerID(), peers.Connected)
p1.Peers().SetChainState(p2.PeerID(), &ethpb.StatusV2{})
// Create and save block 'A' to DB
blockA := util.NewBeaconBlock()
util.SaveBlock(t, t.Context(), db, blockA)
rootA, err := blockA.Block.HashTreeRoot()
require.NoError(t, err)
// Save state for block 'A'
stateA, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, db.SaveState(t.Context(), stateA, rootA))
// Setup chain service with block 'A' in forkchoice
chain := &mock.ChainService{
Genesis: prysmTime.Now(),
FinalizedCheckPoint: &ethpb.Checkpoint{},
// NotFinalized: false means InForkchoice returns true
}
chain := &mock.ChainService{Genesis: prysmTime.Now(), FinalizedCheckPoint: &ethpb.Checkpoint{}}
r := &Service{
cfg: &config{p2p: p1, beaconDB: db, chain: chain, clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot)},
blkRootToPendingAtts: make(map[[32]byte][]any),
seenPendingBlocks: make(map[[32]byte]bool),
chainStarted: abool.New(),
}
// Add pending attestations for OTHER block roots (not block A)
// These are blocks we don't have yet, so they should be requested
attB := &ethpb.Attestation{Data: &ethpb.AttestationData{
BeaconBlockRoot: bytesutil.PadTo([]byte{'B'}, 32),
Target: &ethpb.Checkpoint{Root: make([]byte, 32)},
}}
attC := &ethpb.Attestation{Data: &ethpb.AttestationData{
BeaconBlockRoot: bytesutil.PadTo([]byte{'C'}, 32),
Target: &ethpb.Checkpoint{Root: make([]byte, 32)},
}}
r.blkRootToPendingAtts[[32]byte{'B'}] = []any{attB}
r.blkRootToPendingAtts[[32]byte{'C'}] = []any{attC}
// Process block A (which exists and has no pending attestations)
// This should skip processing attestations for A and request blocks B and C
require.NoError(t, r.processPendingAttsForBlock(t.Context(), rootA))
a := &ethpb.Attestation{Data: &ethpb.AttestationData{Target: &ethpb.Checkpoint{Root: make([]byte, 32)}}}
r.blkRootToPendingAtts[[32]byte{'A'}] = []any{a}
require.NoError(t, r.processPendingAtts(t.Context()))
require.LogsContain(t, hook, "Requesting block by root")
}
@@ -172,7 +141,7 @@ func TestProcessPendingAtts_HasBlockSaveUnaggregatedAtt(t *testing.T) {
require.NoError(t, r.cfg.beaconDB.SaveState(t.Context(), s, root))
r.blkRootToPendingAtts[root] = []any{att}
require.NoError(t, r.processPendingAttsForBlock(t.Context(), root))
require.NoError(t, r.processPendingAtts(t.Context()))
var wg sync.WaitGroup
wg.Add(1)
@@ -266,7 +235,7 @@ func TestProcessPendingAtts_HasBlockSaveUnaggregatedAttElectra(t *testing.T) {
require.NoError(t, r.cfg.beaconDB.SaveState(t.Context(), s, root))
r.blkRootToPendingAtts[root] = []any{att}
require.NoError(t, r.processPendingAttsForBlock(t.Context(), root))
require.NoError(t, r.processPendingAtts(t.Context()))
var wg sync.WaitGroup
wg.Add(1)
go func() {
@@ -391,7 +360,7 @@ func TestProcessPendingAtts_HasBlockSaveUnAggregatedAttElectra_VerifyAlreadySeen
r.blkRootToPendingAtts[root] = []any{
att,
}
require.NoError(t, r.processPendingAttsForBlock(t.Context(), root))
require.NoError(t, r.processPendingAtts(t.Context()))
// Verify that the event feed receives the expected attestation.
var wg sync.WaitGroup
@@ -502,7 +471,7 @@ func TestProcessPendingAtts_NoBroadcastWithBadSignature(t *testing.T) {
}
s.blkRootToPendingAtts[r32] = []any{&ethpb.SignedAggregateAttestationAndProof{Message: a, Signature: make([]byte, fieldparams.BLSSignatureLength)}}
require.NoError(t, s.processPendingAttsForBlock(t.Context(), r32))
require.NoError(t, s.processPendingAtts(t.Context()))
assert.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcasted bad aggregate")
@@ -541,7 +510,7 @@ func TestProcessPendingAtts_NoBroadcastWithBadSignature(t *testing.T) {
require.NoError(t, err)
s.blkRootToPendingAtts[r32] = []any{&ethpb.SignedAggregateAttestationAndProof{Message: aggregateAndProof, Signature: aggreSig}}
require.NoError(t, s.processPendingAttsForBlock(t.Context(), r32))
require.NoError(t, s.processPendingAtts(t.Context()))
assert.Equal(t, true, p2p.BroadcastCalled.Load(), "The good aggregate was not broadcasted")
@@ -632,7 +601,7 @@ func TestProcessPendingAtts_HasBlockSaveAggregatedAtt(t *testing.T) {
require.NoError(t, r.cfg.beaconDB.SaveState(t.Context(), s, root))
r.blkRootToPendingAtts[root] = []any{&ethpb.SignedAggregateAttestationAndProof{Message: aggregateAndProof, Signature: aggreSig}}
require.NoError(t, r.processPendingAttsForBlock(t.Context(), root))
require.NoError(t, r.processPendingAtts(t.Context()))
assert.Equal(t, 1, len(r.cfg.attPool.AggregatedAttestations()), "Did not save aggregated att")
assert.DeepEqual(t, att, r.cfg.attPool.AggregatedAttestations()[0], "Incorrect saved att")
@@ -727,7 +696,7 @@ func TestProcessPendingAtts_HasBlockSaveAggregatedAttElectra(t *testing.T) {
require.NoError(t, r.cfg.beaconDB.SaveState(t.Context(), s, root))
r.blkRootToPendingAtts[root] = []any{&ethpb.SignedAggregateAttestationAndProofElectra{Message: aggregateAndProof, Signature: aggreSig}}
require.NoError(t, r.processPendingAttsForBlock(t.Context(), root))
require.NoError(t, r.processPendingAtts(t.Context()))
assert.Equal(t, 1, len(r.cfg.attPool.AggregatedAttestations()), "Did not save aggregated att")
assert.DeepEqual(t, att, r.cfg.attPool.AggregatedAttestations()[0], "Incorrect saved att")
@@ -811,8 +780,8 @@ func TestProcessPendingAtts_BlockNotInForkChoice(t *testing.T) {
// Add pending attestation
r.blkRootToPendingAtts[root] = []any{&ethpb.SignedAggregateAttestationAndProof{Message: aggregateAndProof}}
// Process pending attestations - should return error because block is not in fork choice
require.ErrorContains(t, "could not process unknown block root", r.processPendingAttsForBlock(t.Context(), root))
// Process pending attestations - should not process because block is not in fork choice
require.NoError(t, r.processPendingAtts(t.Context()))
// Verify attestations were not processed (should still be pending)
assert.Equal(t, 1, len(r.blkRootToPendingAtts[root]), "Attestations should still be pending")

View File

@@ -97,7 +97,7 @@ func (s *Service) requestAndSaveMissingDataColumnSidecars(blks []blocks.ROBlock)
custodyGroupCount, err := s.cfg.p2p.CustodyGroupCount()
if err != nil {
return errors.Wrap(err, "custody group count")
return errors.Wrap(err, "fetch custody group count from peer")
}
samplingSize := max(custodyGroupCount, samplesPerSlot)

View File

@@ -274,6 +274,7 @@ func (s *Service) Start() {
s.cfg.p2p.AddPingMethod(s.sendPingRequest)
s.processPendingBlocksQueue()
s.runPendingAttsQueue()
s.maintainPeerStatuses()
if params.FuluEnabled() {
@@ -415,6 +416,11 @@ func (s *Service) startDiscoveryAndSubscriptions() {
// Register respective pubsub handlers at state synced event.
s.registerSubscribers(currentEpoch, forkDigest)
// Initialize registeredNetworkEntry to the current network schedule entry to avoid
// duplicate subscriber registration on the first forkWatcher tick when the next
// epoch has the same digest.
s.registeredNetworkEntry = params.GetNetworkScheduleEntry(currentEpoch)
// Start the fork watcher.
go s.forkWatcher()
}

View File

@@ -44,11 +44,6 @@ type wrappedVal func(context.Context, peer.ID, *pubsub.Message) (pubsub.Validati
// subHandler represents handler for a given subscription.
type subHandler func(context.Context, proto.Message) error
// noopHandler is used for subscriptions that do not require anything to be done.
var noopHandler subHandler = func(ctx context.Context, msg proto.Message) error {
return nil
}
// subscribeParameters holds the parameters that are needed to construct a set of subscriptions topics for a given
// set of gossipsub subnets.
type subscribeParameters struct {
@@ -256,7 +251,7 @@ func (s *Service) registerSubscribers(epoch primitives.Epoch, digest [4]byte) {
s.subscribe(
p2p.LightClientOptimisticUpdateTopicFormat,
s.validateLightClientOptimisticUpdate,
noopHandler,
s.lightClientOptimisticUpdateSubscriber,
digest,
)
})
@@ -264,7 +259,7 @@ func (s *Service) registerSubscribers(epoch primitives.Epoch, digest [4]byte) {
s.subscribe(
p2p.LightClientFinalityUpdateTopicFormat,
s.validateLightClientFinalityUpdate,
noopHandler,
s.lightClientFinalityUpdateSubscriber,
digest,
)
})
@@ -349,15 +344,23 @@ func (s *Service) subscribeWithBase(topic string, validator wrappedVal, handle s
topic += s.cfg.p2p.Encoding().ProtocolSuffix()
log := log.WithField("topic", topic)
// Do not resubscribe already seen subscriptions.
ok := s.subHandler.topicExists(topic)
if ok {
// 1) Fast-path bail if it already exists.
if s.subHandler.topicExists(topic) {
log.WithField("topic", topic).Debug("Provided topic already has an active subscription running")
return nil
}
// 2) Otherwise, atomically reserve to block concurrent goroutines.
if !s.subHandler.tryReserveTopic(topic) {
// Someone else reserved first.
log.WithField("topic", topic).Debug("Provided topic already has an active subscription running")
return nil
}
if err := s.cfg.p2p.PubSub().RegisterTopicValidator(s.wrapAndReportValidation(topic, validator)); err != nil {
log.WithError(err).Error("Could not register validator for topic")
// Clean up the reservation since we're not proceeding
s.subHandler.removeTopic(topic)
return nil
}
@@ -367,9 +370,12 @@ func (s *Service) subscribeWithBase(topic string, validator wrappedVal, handle s
// libp2p PubSub library or a subscription request to a topic that fails to match the topic
// subscription filter.
log.WithError(err).Error("Could not subscribe topic")
// Clean up the reservation since we're not proceeding
s.subHandler.removeTopic(topic)
return nil
}
// Update the reservation with the actual subscription
s.subHandler.addTopic(sub.Topic(), sub)
// Pipeline decodes the incoming subscription data, runs the validation, and handles the
@@ -419,6 +425,8 @@ func (s *Service) subscribeWithBase(topic string, validator wrappedVal, handle s
// Cancel subscription in the event of an error, as we are
// now exiting topic event loop.
sub.Cancel()
// Remove topic from our tracking to allow resubscription.
s.subHandler.removeTopic(topic)
return
}
@@ -538,7 +546,15 @@ func (s *Service) subscribeToSubnets(t *subnetTracker) error {
for _, subnet := range t.missing(subnetsToJoin) {
// TODO: subscribeWithBase appends the protocol suffix, other methods don't. Make this consistent.
topic := t.fullTopic(subnet, "")
t.track(subnet, s.subscribeWithBase(topic, t.validate, t.handle))
sub := s.subscribeWithBase(topic, t.validate, t.handle)
// Even if sub is nil (topic already exists), we need to track the subnet
// to avoid repeated subscription attempts every slot.
if sub == nil {
// Topic already exists, get the existing subscription for tracking
fullTopic := topic + s.cfg.p2p.Encoding().ProtocolSuffix()
sub = s.subHandler.subForTopic(fullTopic)
}
t.track(subnet, sub)
}
return nil

View File

@@ -68,10 +68,7 @@ func (s *Service) beaconBlockSubscriber(ctx context.Context, msg proto.Message)
}
return err
}
if err := s.processPendingAttsForBlock(ctx, root); err != nil {
return errors.Wrap(err, "process pending atts for block")
}
return nil
return err
}
// processSidecarsFromExecutionFromBlock retrieves (if available) sidecars data from the execution client,
@@ -270,9 +267,16 @@ func (s *Service) broadcastAndReceiveUnseenDataColumnSidecars(
unseenIndices[sidecar.Index] = true
}
// Broadcast all the data column sidecars we reconstructed but did not see via gossip (non blocking).
if err := s.cfg.p2p.BroadcastDataColumnSidecars(ctx, unseenSidecars); err != nil {
return nil, errors.Wrap(err, "broadcast data column sidecars")
// Broadcast all the data column sidecars we reconstructed but did not see via gossip.
for _, sidecar := range unseenSidecars {
// Compute the subnet for this data column sidecar.
subnet := peerdas.ComputeSubnetForDataColumnSidecar(sidecar.Index)
// Broadcast the data column sidecar.
if err := s.cfg.p2p.BroadcastDataColumnSidecar(subnet, sidecar); err != nil {
// Don't return on error on broadcast failure, just log it.
log.WithError(err).Error("Broadcast data column")
}
}
// Receive data column sidecars.

View File

@@ -0,0 +1,66 @@
package sync
import (
"context"
"fmt"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed"
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
lightclientTypes "github.com/OffchainLabs/prysm/v6/consensus-types/light-client"
"github.com/sirupsen/logrus"
"google.golang.org/protobuf/proto"
)
func (s *Service) lightClientOptimisticUpdateSubscriber(_ context.Context, msg proto.Message) error {
update, err := lightclientTypes.NewWrappedOptimisticUpdate(msg)
if err != nil {
return err
}
attestedHeaderRoot, err := update.AttestedHeader().Beacon().HashTreeRoot()
if err != nil {
return err
}
log.WithFields(logrus.Fields{
"attestedSlot": fmt.Sprintf("%d", update.AttestedHeader().Beacon().Slot),
"signatureSlot": fmt.Sprintf("%d", update.SignatureSlot()),
"attestedHeaderRoot": fmt.Sprintf("%x", attestedHeaderRoot),
}).Debug("Saving newly received light client optimistic update.")
s.lcStore.SetLastOptimisticUpdate(update, false)
s.cfg.stateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.LightClientOptimisticUpdate,
Data: update,
})
return nil
}
func (s *Service) lightClientFinalityUpdateSubscriber(_ context.Context, msg proto.Message) error {
update, err := lightclientTypes.NewWrappedFinalityUpdate(msg)
if err != nil {
return err
}
attestedHeaderRoot, err := update.AttestedHeader().Beacon().HashTreeRoot()
if err != nil {
return err
}
log.WithFields(logrus.Fields{
"attestedSlot": fmt.Sprintf("%d", update.AttestedHeader().Beacon().Slot),
"signatureSlot": fmt.Sprintf("%d", update.SignatureSlot()),
"attestedHeaderRoot": fmt.Sprintf("%x", attestedHeaderRoot),
}).Debug("Saving newly received light client finality update.")
s.lcStore.SetLastFinalityUpdate(update, false)
s.cfg.stateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.LightClientFinalityUpdate,
Data: update,
})
return nil
}

View File

@@ -0,0 +1,347 @@
package sync
import (
"context"
"runtime"
"sync"
"sync/atomic"
"testing"
"time"
"github.com/OffchainLabs/prysm/v6/async/abool"
mockChain "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
p2ptest "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
mockSync "github.com/OffchainLabs/prysm/v6/beacon-chain/sync/initial-sync/testing"
"github.com/OffchainLabs/prysm/v6/testing/require"
"google.golang.org/protobuf/proto"
)
// TestSubscriptionCleanup_MissingRemoveTopic tests the following bug:
// When a subscription's message loop fails and sub.Cancel() is called,
// removeTopic() is NOT called, leaving stale entries in subTopics map.
// This likely causes memory leaks and prevents resubscription (missed attestations).
func TestSubscriptionCleanup_MissingRemoveTopic(t *testing.T) {
t.Run("memory leak with repeated failures", func(t *testing.T) {
// This test verifies that removeTopic() is called when subscription fails
// Fresh setup for this subtest
p2pService := p2ptest.NewTestP2P(t)
gt := time.Now()
vr := [32]byte{'A'}
r := &Service{
ctx: context.Background(),
cfg: &config{
p2p: p2pService,
initialSync: &mockSync.Sync{IsSyncing: false},
chain: &mockChain.ChainService{
ValidatorsRoot: vr,
Genesis: gt,
},
clock: startup.NewClock(gt, vr),
},
subHandler: newSubTopicHandler(),
chainStarted: abool.New(),
}
markInitSyncComplete(t, r)
digest, err := r.currentForkDigest()
require.NoError(t, err)
p2pService.Digest = digest
getMapSize := func() int {
r.subHandler.RLock()
defer r.subHandler.RUnlock()
return len(r.subHandler.subTopics)
}
baseTopic := "/eth2/%x/voluntary_exit"
// Do one cycle: subscribe, cancel, check cleanup
iterCtx, iterCancel := context.WithCancel(context.Background())
r.ctx = iterCtx
handler := func(ctx context.Context, msg proto.Message) error {
return nil
}
r.markForChainStart()
// Subscribe
sub := r.subscribeWithBase(baseTopic, r.noopValidator, handler)
require.NotNil(t, sub, "First subscription should succeed")
// Verify subscribed
sizeAfterSubscribe := getMapSize()
require.Equal(t, 1, sizeAfterSubscribe, "Should have 1 entry after subscribe")
// Cancel to simulate failure
iterCancel()
time.Sleep(300 * time.Millisecond)
// Check cleanup happened - this is the core fix verification
sizeAfterCancel := getMapSize()
if sizeAfterCancel != 0 {
t.Errorf("After context cancellation, subTopics has %d entries (expected 0). "+
"removeTopic() should have been called at line 420.",
sizeAfterCancel)
} else {
t.Logf("SUCCESS: Cleanup working correctly - map size is 0 after cancellation")
}
})
}
// TestConcurrentSubscription_RaceCondition tests the following bug:
// Multiple goroutines can pass topicExists() check simultaneously
// before any calls addTopic(), causing duplicate subscriptions.
func TestConcurrentSubscription_RaceCondition(t *testing.T) {
tests := []struct {
name string
numGoroutines int
iterations int
useBarrier bool
}{
{
name: "two concurrent",
numGoroutines: 2,
iterations: 20,
useBarrier: true,
},
{
name: "five concurrent",
numGoroutines: 5,
iterations: 15,
useBarrier: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
duplicateDetected := 0
for iter := 0; iter < tt.iterations; iter++ {
// Fresh setup for each iteration
p2pService := p2ptest.NewTestP2P(t)
gt := time.Now()
vr := [32]byte{'A'}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
r := &Service{
ctx: ctx,
cfg: &config{
p2p: p2pService,
initialSync: &mockSync.Sync{IsSyncing: false},
chain: &mockChain.ChainService{
ValidatorsRoot: vr,
Genesis: gt,
},
clock: startup.NewClock(gt, vr),
},
subHandler: newSubTopicHandler(),
chainStarted: abool.New(),
}
markInitSyncComplete(t, r)
digest, err := r.currentForkDigest()
require.NoError(t, err)
p2pService.Digest = digest
baseTopic := "/eth2/%x/voluntary_exit"
r.markForChainStart()
// Track successful subscriptions
successfulSubs := atomic.Int32{}
checksPassed := atomic.Int32{}
// Barrier to synchronize goroutine starts
var barrier sync.WaitGroup
if tt.useBarrier {
barrier.Add(tt.numGoroutines)
}
startSignal := make(chan struct{})
var wg sync.WaitGroup
// Launch concurrent subscription attempts
for i := 0; i < tt.numGoroutines; i++ {
wg.Add(1)
go func() {
defer wg.Done()
if tt.useBarrier {
barrier.Done()
barrier.Wait()
}
<-startSignal
// Attempt subscription
// ideally only one goroutine should get a non-nil subscription
handler := func(ctx context.Context, msg proto.Message) error {
return nil
}
sub := r.subscribeWithBase(baseTopic, r.noopValidator, handler)
if sub != nil {
successfulSubs.Add(1)
}
// Count how many goroutines attempted (for stats)
checksPassed.Add(1)
}()
}
// Wait for all goroutines to be ready
if tt.useBarrier {
barrier.Wait()
}
time.Sleep(10 * time.Millisecond)
// Start all goroutines simultaneously
close(startSignal)
// Wait for completion
wg.Wait()
time.Sleep(200 * time.Millisecond)
// Check results
subs := successfulSubs.Load()
attempts := checksPassed.Load()
r.subHandler.RLock()
finalMapSize := len(r.subHandler.subTopics)
r.subHandler.RUnlock()
// ideally only ONE goroutine should successfully subscribe
// If more than one succeeds, a race condition exists
if subs > 1 {
duplicateDetected++
t.Logf("Iteration %d: RACE DETECTED - %d goroutines attempted, "+
"%d successful subscriptions (expected 1), final map size: %d",
iter, attempts, subs, finalMapSize)
}
// The map should have exactly 0 or 1 entry
if finalMapSize > 1 {
t.Errorf("Iteration %d: INCONSISTENT STATE - map has %d entries (expected 0-1). "+
"This indicates multiple goroutines subscribed concurrently.",
iter, finalMapSize)
}
// Cleanup
cancel()
r.subHandler.Lock()
for topic := range r.subHandler.subTopics {
sub := r.subHandler.subTopics[topic]
if sub != nil {
sub.Cancel()
}
delete(r.subHandler.subTopics, topic)
}
r.subHandler.Unlock()
}
if duplicateDetected > 0 {
racePercentage := float64(duplicateDetected) / float64(tt.iterations) * 100
t.Errorf("RACE CONDITION EXISTS in %d/%d iterations (%.1f%%). "+
"Multiple goroutines successfully subscribed (only 1 expected). ",
duplicateDetected, tt.iterations, racePercentage)
} else {
t.Logf("SUCCESS: No Race condition! Only 1 subscription succeeded in all %d iterations", tt.iterations)
}
})
}
}
// TestMemoryGrowth_SubscriptionFailures demonstrates memory growth over time
func TestMemoryGrowth_SubscriptionFailures(t *testing.T) {
p2pService := p2ptest.NewTestP2P(t)
gt := time.Now()
vr := [32]byte{'A'}
r := &Service{
ctx: context.Background(),
cfg: &config{
p2p: p2pService,
initialSync: &mockSync.Sync{IsSyncing: false},
chain: &mockChain.ChainService{
ValidatorsRoot: vr,
Genesis: gt,
},
clock: startup.NewClock(gt, vr),
},
subHandler: newSubTopicHandler(),
chainStarted: abool.New(),
}
markInitSyncComplete(t, r)
digest, err := r.currentForkDigest()
require.NoError(t, err)
p2pService.Digest = digest
baseTopic := "/eth2/%x/voluntary_exit"
getMapSize := func() int {
r.subHandler.RLock()
defer r.subHandler.RUnlock()
return len(r.subHandler.subTopics)
}
failures := 50
var memStats runtime.MemStats
runtime.ReadMemStats(&memStats)
startAlloc := memStats.Alloc
for i := 0; i < failures; i++ {
ctx, cancel := context.WithCancel(context.Background())
r.ctx = ctx
r.markForChainStart()
handler := func(ctx context.Context, msg proto.Message) error {
return nil
}
sub := r.subscribeWithBase(baseTopic, r.noopValidator, handler)
if sub != nil {
// Cancel immediately to simulate failure
cancel()
time.Sleep(100 * time.Millisecond)
}
if i%10 == 0 {
runtime.ReadMemStats(&memStats)
currentAlloc := memStats.Alloc
growth := currentAlloc - startAlloc
t.Logf("After %d failures: subTopics size=%d, heap growth=%d KB",
i, getMapSize(), growth/1024)
}
}
finalSize := getMapSize()
runtime.ReadMemStats(&memStats)
finalAlloc := memStats.Alloc
t.Logf("Final results: %d subscription failures", failures)
t.Logf(" subTopics map size: %d entries", finalSize)
t.Logf(" Start heap: %d KB, Final heap: %d KB", startAlloc/1024, finalAlloc/1024)
// With the bug, even one stale entry is a problem because it prevents resubscription
if finalSize > 0 {
t.Errorf("MEMORY LEAK / STALE ENTRY: After %d failures, %d stale entries remain in subTopics map (expected 0). "+
"Even 1 stale entry prevents resubscription, causing missed attestations in production.",
failures, finalSize)
}
// Check if heap grew significantly (handle wraparound by checking if finalAlloc >= startAlloc)
if finalAlloc >= startAlloc {
totalGrowth := finalAlloc - startAlloc
if totalGrowth > 50*1024 { // 50 KB threshold
t.Logf("NOTE: Heap grew by %d KB over %d failures. ",
totalGrowth/1024, failures)
}
} else {
t.Logf("NOTE: Heap decreased (GC ran), cannot measure growth accurately")
}
}

View File

@@ -9,10 +9,12 @@ import (
"time"
"github.com/OffchainLabs/prysm/v6/async/abool"
"github.com/OffchainLabs/prysm/v6/async/event"
mockChain "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
db "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/light-client"
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/slashings"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/encoder"
@@ -26,6 +28,7 @@ import (
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
pb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/runtime/version"
"github.com/OffchainLabs/prysm/v6/testing/assert"
"github.com/OffchainLabs/prysm/v6/testing/require"
"github.com/OffchainLabs/prysm/v6/testing/util"
@@ -673,3 +676,143 @@ func createPeer(t *testing.T, topics ...string) *p2ptest.TestP2P {
}
return p
}
func TestSubscribe_ReceivesLCOptimisticUpdate(t *testing.T) {
origNC := params.BeaconConfig()
// restore network config after test completes
defer func() {
params.OverrideBeaconConfig(origNC)
}()
params.SetupTestConfigCleanup(t)
p2pService := p2ptest.NewTestP2P(t)
ctx := t.Context()
cfg := params.BeaconConfig().Copy()
cfg.AltairForkEpoch = 1
cfg.ForkVersionSchedule[[4]byte{1, 0, 0, 0}] = 1
params.OverrideBeaconConfig(cfg)
secondsPerSlot := int(params.BeaconConfig().SecondsPerSlot)
slotIntervals := int(params.BeaconConfig().IntervalsPerSlot)
slotsPerEpoch := int(params.BeaconConfig().SlotsPerEpoch)
genesisDrift := slotsPerEpoch*secondsPerSlot + 2*secondsPerSlot + secondsPerSlot/slotIntervals
chainService := &mockChain.ChainService{
ValidatorsRoot: [32]byte{'A'},
Genesis: time.Unix(time.Now().Unix()-int64(genesisDrift), 0),
}
d := db.SetupDB(t)
lcStore := lightClient.NewLightClientStore(&p2ptest.FakeP2P{}, new(event.Feed), d)
r := Service{
ctx: ctx,
cfg: &config{
p2p: p2pService,
initialSync: &mockSync.Sync{IsSyncing: false},
chain: chainService,
beaconDB: d,
clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot),
stateNotifier: &mockChain.MockStateNotifier{},
},
chainStarted: abool.New(),
lcStore: lcStore,
subHandler: newSubTopicHandler(),
}
markInitSyncComplete(t, &r)
topic := p2p.LightClientOptimisticUpdateTopicFormat
var wg sync.WaitGroup
wg.Add(1)
var err error
p2pService.Digest, err = r.currentForkDigest()
require.NoError(t, err)
r.subscribe(topic, r.validateLightClientOptimisticUpdate, func(ctx context.Context, msg proto.Message) error {
require.NoError(t, r.lightClientOptimisticUpdateSubscriber(ctx, msg))
wg.Done()
return nil
}, p2pService.Digest)
r.markForChainStart()
l := util.NewTestLightClient(t, version.Altair, util.WithSupermajority(0))
update, err := lightClient.NewLightClientOptimisticUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock)
require.NoError(t, err, "Error generating light client optimistic update")
p2pService.ReceivePubSub(topic, update.Proto())
if util.WaitTimeout(&wg, time.Second) {
t.Fatal("Did not receive PubSub in 1 second")
}
u := r.lcStore.LastOptimisticUpdate()
assert.DeepEqual(t, update.Proto(), u.Proto())
}
func TestSubscribe_ReceivesLCFinalityUpdate(t *testing.T) {
origNC := params.BeaconConfig()
// restore network config after test completes
defer func() {
params.OverrideBeaconConfig(origNC)
}()
params.SetupTestConfigCleanup(t)
p2pService := p2ptest.NewTestP2P(t)
ctx := t.Context()
cfg := params.BeaconConfig().Copy()
cfg.AltairForkEpoch = 1
cfg.ForkVersionSchedule[[4]byte{1, 0, 0, 0}] = 1
params.OverrideBeaconConfig(cfg)
secondsPerSlot := int(params.BeaconConfig().SecondsPerSlot)
slotIntervals := int(params.BeaconConfig().IntervalsPerSlot)
slotsPerEpoch := int(params.BeaconConfig().SlotsPerEpoch)
genesisDrift := slotsPerEpoch*secondsPerSlot + 2*secondsPerSlot + secondsPerSlot/slotIntervals
chainService := &mockChain.ChainService{
ValidatorsRoot: [32]byte{'A'},
Genesis: time.Unix(time.Now().Unix()-int64(genesisDrift), 0),
}
d := db.SetupDB(t)
lcStore := lightClient.NewLightClientStore(&p2ptest.FakeP2P{}, new(event.Feed), d)
r := Service{
ctx: ctx,
cfg: &config{
p2p: p2pService,
initialSync: &mockSync.Sync{IsSyncing: false},
chain: chainService,
beaconDB: d,
clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot),
stateNotifier: &mockChain.MockStateNotifier{},
},
chainStarted: abool.New(),
lcStore: lcStore,
subHandler: newSubTopicHandler(),
}
markInitSyncComplete(t, &r)
topic := p2p.LightClientFinalityUpdateTopicFormat
var wg sync.WaitGroup
wg.Add(1)
var err error
p2pService.Digest, err = r.currentForkDigest()
require.NoError(t, err)
r.subscribe(topic, r.validateLightClientFinalityUpdate, func(ctx context.Context, msg proto.Message) error {
require.NoError(t, r.lightClientFinalityUpdateSubscriber(ctx, msg))
wg.Done()
return nil
}, p2pService.Digest)
r.markForChainStart()
l := util.NewTestLightClient(t, version.Altair, util.WithSupermajority(0))
update, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
require.NoError(t, err, "Error generating light client finality update")
p2pService.ReceivePubSub(topic, update.Proto())
if util.WaitTimeout(&wg, time.Second) {
t.Fatal("Did not receive PubSub in 1 second")
}
u := r.lcStore.LastFinalityUpdate()
assert.DeepEqual(t, update.Proto(), u.Proto())
}

View File

@@ -26,13 +26,22 @@ func newSubTopicHandler() *subTopicHandler {
func (s *subTopicHandler) addTopic(topic string, sub *pubsub.Subscription) {
s.Lock()
defer s.Unlock()
// Check if this is updating a reserved entry (nil subscription)
existingSub, exists := s.subTopics[topic]
wasReserved := exists && existingSub == nil
s.subTopics[topic] = sub
digest, err := p2p.ExtractGossipDigest(topic)
if err != nil {
log.WithError(err).Error("Could not retrieve digest")
return
}
s.digestMap[digest] += 1
// Only increment digest count if this is a new topic (not just updating a reservation)
if !wasReserved {
s.digestMap[digest] += 1
}
}
func (s *subTopicHandler) topicExists(topic string) bool {
@@ -42,25 +51,57 @@ func (s *subTopicHandler) topicExists(topic string) bool {
return ok
}
// tryReserveTopic atomically checks if a topic exists and reserves it if not.
// Returns true if the topic was successfully reserved (didn't exist before),
// false if the topic already exists or is reserved.
// This prevents the race condition where multiple goroutines check topicExists()
// simultaneously and both proceed to subscribe.
func (s *subTopicHandler) tryReserveTopic(topic string) bool {
s.Lock()
defer s.Unlock()
// Check if topic already exists or is reserved
if _, exists := s.subTopics[topic]; exists {
return false
}
// Reserve the topic with a nil placeholder
// This will be updated with the actual subscription later
s.subTopics[topic] = nil
return true
}
func (s *subTopicHandler) removeTopic(topic string) {
s.Lock()
defer s.Unlock()
// Check if topic exists and whether it was just a reservation (nil)
existingSub, exists := s.subTopics[topic]
if !exists {
return
}
wasReserved := existingSub == nil
delete(s.subTopics, topic)
digest, err := p2p.ExtractGossipDigest(topic)
if err != nil {
log.WithError(err).Error("Could not retrieve digest")
return
}
currAmt, ok := s.digestMap[digest]
// Should never be possible, is a
// defensive check.
if !ok || currAmt <= 0 {
delete(s.digestMap, digest)
return
}
s.digestMap[digest] -= 1
if s.digestMap[digest] == 0 {
delete(s.digestMap, digest)
// Only decrement digest count if this wasn't just a reservation
if !wasReserved {
digest, err := p2p.ExtractGossipDigest(topic)
if err != nil {
log.WithError(err).Error("Could not retrieve digest")
return
}
currAmt, ok := s.digestMap[digest]
// Should never be possible, is a
// defensive check.
if !ok || currAmt <= 0 {
delete(s.digestMap, digest)
return
}
s.digestMap[digest] -= 1
if s.digestMap[digest] == 0 {
delete(s.digestMap, digest)
}
}
}

View File

@@ -429,12 +429,12 @@ func TestService_ValidateBlsToExecutionChange(t *testing.T) {
svc := NewService(ctx, append(opts, tt.svcopts...)...)
markInitSyncComplete(t, svc)
svc, tt.args.topic = tt.setupSvc(svc, tt.args.msg, tt.args.topic)
go svc.Start()
if tt.clock == nil {
tt.clock = startup.NewClock(time.Now(), [32]byte{})
}
require.NoError(t, cw.SetClock(tt.clock))
svc.verifierWaiter = verification.NewInitializerWaiter(cw, chainService.ForkChoiceStore, svc.cfg.stateGen)
go svc.Start()
marshalledObj, err := tt.args.msg.MarshalSSZ()
assert.NoError(t, err)

View File

@@ -5,6 +5,7 @@ import (
"fmt"
"time"
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/light-client"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v6/monitoring/tracing"
@@ -13,7 +14,6 @@ import (
pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/sirupsen/logrus"
"google.golang.org/protobuf/proto"
)
func (s *Service) validateLightClientOptimisticUpdate(ctx context.Context, pid peer.ID, msg *pubsub.Message) (pubsub.ValidationResult, error) {
@@ -31,12 +31,6 @@ func (s *Service) validateLightClientOptimisticUpdate(ctx context.Context, pid p
_, span := trace.StartSpan(ctx, "sync.validateLightClientOptimisticUpdate")
defer span.End()
currentUpdate := s.lcStore.LastOptimisticUpdate()
if currentUpdate == nil {
log.Debug("No existing optimistic update to compare against. Ignoring.")
return pubsub.ValidationIgnore, nil
}
m, err := s.decodePubsubMessage(msg)
if err != nil {
tracing.AnnotateError(span, err)
@@ -70,12 +64,12 @@ func (s *Service) validateLightClientOptimisticUpdate(ctx context.Context, pid p
return pubsub.ValidationIgnore, nil
}
if !proto.Equal(newUpdate.Proto(), currentUpdate.Proto()) {
if !lightclient.IsBetterOptimisticUpdate(newUpdate, s.lcStore.LastOptimisticUpdate()) {
log.WithFields(logrus.Fields{
"attestedSlot": fmt.Sprintf("%d", newUpdate.AttestedHeader().Beacon().Slot),
"signatureSlot": fmt.Sprintf("%d", newUpdate.SignatureSlot()),
"attestedHeaderRoot": fmt.Sprintf("%x", attestedHeaderRoot),
}).Debug("Received light client optimistic update is different from the local one. Ignoring.")
}).Debug("Newly received light client optimistic update ignored. current update is better.")
return pubsub.ValidationIgnore, nil
}
@@ -104,12 +98,6 @@ func (s *Service) validateLightClientFinalityUpdate(ctx context.Context, pid pee
_, span := trace.StartSpan(ctx, "sync.validateLightClientFinalityUpdate")
defer span.End()
currentUpdate := s.lcStore.LastFinalityUpdate()
if currentUpdate == nil {
log.Debug("No existing finality update to compare against. Ignoring.")
return pubsub.ValidationIgnore, nil
}
m, err := s.decodePubsubMessage(msg)
if err != nil {
tracing.AnnotateError(span, err)
@@ -143,12 +131,12 @@ func (s *Service) validateLightClientFinalityUpdate(ctx context.Context, pid pee
return pubsub.ValidationIgnore, nil
}
if !proto.Equal(newUpdate.Proto(), currentUpdate.Proto()) {
if !lightclient.IsFinalityUpdateValidForBroadcast(newUpdate, s.lcStore.LastFinalityUpdate()) {
log.WithFields(logrus.Fields{
"attestedSlot": fmt.Sprintf("%d", newUpdate.AttestedHeader().Beacon().Slot),
"signatureSlot": fmt.Sprintf("%d", newUpdate.SignatureSlot()),
"attestedHeaderRoot": fmt.Sprintf("%x", attestedHeaderRoot),
}).Debug("Received light client finality update is different from the local one. ignoring.")
}).Debug("Newly received light client finality update ignored. current update is better.")
return pubsub.ValidationIgnore, nil
}

View File

@@ -26,13 +26,9 @@ func TestValidateLightClientOptimisticUpdate_NilMessageOrTopic(t *testing.T) {
params.SetupTestConfigCleanup(t)
ctx := t.Context()
p := p2ptest.NewTestP2P(t)
lcStore := lightClient.NewLightClientStore(&p2ptest.FakeP2P{}, new(event.Feed), testDB.SetupDB(t))
s := &Service{cfg: &config{p2p: p, initialSync: &mockSync.Sync{}}, lcStore: lcStore}
mockUpdate, err := util.MockOptimisticUpdate()
require.NoError(t, err)
s.lcStore.SetLastOptimisticUpdate(mockUpdate, false)
s := &Service{cfg: &config{p2p: p, initialSync: &mockSync.Sync{}}}
_, err = s.validateLightClientOptimisticUpdate(ctx, "", nil)
_, err := s.validateLightClientOptimisticUpdate(ctx, "", nil)
require.ErrorIs(t, err, errNilPubsubMessage)
_, err = s.validateLightClientOptimisticUpdate(ctx, "", &pubsub.Message{Message: &pb.Message{}})
@@ -76,27 +72,27 @@ func TestValidateLightClientOptimisticUpdate(t *testing.T) {
name: "no previous update",
oldUpdateOptions: nil,
newUpdateOptions: []util.LightClientOption{},
expectedResult: pubsub.ValidationIgnore,
expectedResult: pubsub.ValidationAccept,
},
{
name: "not enough time passed",
genesisDrift: -secondsPerSlot / slotIntervals,
oldUpdateOptions: nil,
newUpdateOptions: []util.LightClientOption{},
expectedResult: pubsub.ValidationIgnore,
},
{
name: "new update has no age advantage",
oldUpdateOptions: []util.LightClientOption{},
newUpdateOptions: []util.LightClientOption{},
expectedResult: pubsub.ValidationIgnore,
},
{
name: "new update is the same",
oldUpdateOptions: []util.LightClientOption{},
newUpdateOptions: []util.LightClientOption{},
expectedResult: pubsub.ValidationAccept,
},
{
name: "new update is different",
name: "new update is better - younger",
genesisDrift: secondsPerSlot,
oldUpdateOptions: []util.LightClientOption{},
newUpdateOptions: []util.LightClientOption{util.WithIncreasedAttestedSlot(1)},
expectedResult: pubsub.ValidationIgnore,
expectedResult: pubsub.ValidationAccept,
},
}
@@ -153,13 +149,9 @@ func TestValidateLightClientFinalityUpdate_NilMessageOrTopic(t *testing.T) {
params.SetupTestConfigCleanup(t)
ctx := t.Context()
p := p2ptest.NewTestP2P(t)
lcStore := lightClient.NewLightClientStore(&p2ptest.FakeP2P{}, new(event.Feed), testDB.SetupDB(t))
s := &Service{cfg: &config{p2p: p, initialSync: &mockSync.Sync{}}, lcStore: lcStore}
mockUpdate, err := util.MockFinalityUpdate()
require.NoError(t, err)
s.lcStore.SetLastFinalityUpdate(mockUpdate, false)
s := &Service{cfg: &config{p2p: p, initialSync: &mockSync.Sync{}}}
_, err = s.validateLightClientFinalityUpdate(ctx, "", nil)
_, err := s.validateLightClientFinalityUpdate(ctx, "", nil)
require.ErrorIs(t, err, errNilPubsubMessage)
_, err = s.validateLightClientFinalityUpdate(ctx, "", &pubsub.Message{Message: &pb.Message{}})
@@ -203,26 +195,44 @@ func TestValidateLightClientFinalityUpdate(t *testing.T) {
name: "no previous update",
oldUpdateOptions: nil,
newUpdateOptions: []util.LightClientOption{},
expectedResult: pubsub.ValidationIgnore,
expectedResult: pubsub.ValidationAccept,
},
{
name: "not enough time passed",
genesisDrift: -secondsPerSlot / slotIntervals,
oldUpdateOptions: nil,
newUpdateOptions: []util.LightClientOption{},
expectedResult: pubsub.ValidationIgnore,
},
{
name: "new update has no advantage",
oldUpdateOptions: []util.LightClientOption{},
newUpdateOptions: []util.LightClientOption{},
expectedResult: pubsub.ValidationIgnore,
},
{
name: "new update is the same",
oldUpdateOptions: []util.LightClientOption{},
newUpdateOptions: []util.LightClientOption{},
expectedResult: pubsub.ValidationAccept,
},
{
name: "new update is different",
name: "new update is better - age",
genesisDrift: secondsPerSlot,
oldUpdateOptions: []util.LightClientOption{},
newUpdateOptions: []util.LightClientOption{util.WithIncreasedFinalizedSlot(1)},
expectedResult: pubsub.ValidationAccept,
},
{
name: "new update is better - supermajority",
oldUpdateOptions: []util.LightClientOption{},
newUpdateOptions: []util.LightClientOption{util.WithSupermajority(0)},
expectedResult: pubsub.ValidationAccept,
},
{
name: "old update is better - supermajority",
oldUpdateOptions: []util.LightClientOption{util.WithSupermajority(0)},
newUpdateOptions: []util.LightClientOption{},
expectedResult: pubsub.ValidationIgnore,
},
{
name: "old update is better - age",
oldUpdateOptions: []util.LightClientOption{util.WithIncreasedAttestedSlot(1)},
newUpdateOptions: []util.LightClientOption{},
expectedResult: pubsub.ValidationIgnore,
},
}

View File

@@ -410,9 +410,9 @@ func TestService_ValidateSyncCommitteeMessage(t *testing.T) {
var clock *startup.Clock
svc, tt.args.topic, clock = tt.setupSvc(svc, tt.args.msg, tt.args.topic)
markInitSyncComplete(t, svc)
go svc.Start()
require.NoError(t, cw.SetClock(clock))
svc.verifierWaiter = verification.NewInitializerWaiter(cw, chainService.ForkChoiceStore, svc.cfg.stateGen)
go svc.Start()
marshalledObj, err := tt.args.msg.MarshalSSZ()
assert.NoError(t, err)

View File

@@ -2,7 +2,6 @@ package verification
import (
"context"
"crypto/sha256"
"fmt"
"strings"
"time"
@@ -525,39 +524,24 @@ func columnErrBuilder(baseErr error) error {
return errors.Wrap(baseErr, errColumnsInvalid.Error())
}
// incluseionProofKey computes a unique key based on the KZG commitments,
// the KZG commitments inclusion proof, and the signed block header root.
func inclusionProofKey(c blocks.RODataColumn) ([32]byte, error) {
const (
commsIncProofLen = 4
commsIncProofByteCount = commsIncProofLen * 32
)
if len(c.KzgCommitmentsInclusionProof) != commsIncProofLen {
func inclusionProofKey(c blocks.RODataColumn) ([160]byte, error) {
var key [160]byte
if len(c.KzgCommitmentsInclusionProof) != 4 {
// This should be already enforced by ssz unmarshaling; still check so we don't panic on array bounds.
return [32]byte{}, columnErrBuilder(ErrSidecarInclusionProofInvalid)
return key, columnErrBuilder(ErrSidecarInclusionProofInvalid)
}
commsByteCount := len(c.KzgCommitments) * fieldparams.KzgCommitmentSize
unhashedKey := make([]byte, 0, commsIncProofByteCount+fieldparams.RootLength+commsByteCount)
// Include the commitments inclusion proof in the key.
for _, proof := range c.KzgCommitmentsInclusionProof {
unhashedKey = append(unhashedKey, proof...)
}
// Include the block root in the key.
root, err := c.SignedBlockHeader.HashTreeRoot()
if err != nil {
return [32]byte{}, columnErrBuilder(errors.Wrap(err, "hash tree root"))
return [160]byte{}, columnErrBuilder(errors.Wrap(err, "hash tree root"))
}
unhashedKey = append(unhashedKey, root[:]...)
// Include the commitments in the key.
for _, commitment := range c.KzgCommitments {
unhashedKey = append(unhashedKey, commitment...)
for i := range c.KzgCommitmentsInclusionProof {
if copy(key[32*i:32*i+32], c.KzgCommitmentsInclusionProof[i]) != 32 {
return key, columnErrBuilder(ErrSidecarInclusionProofInvalid)
}
}
return sha256.Sum256(unhashedKey), nil
copy(key[128:], root[:])
return key, nil
}

View File

@@ -1,4 +0,0 @@
### Changed
- Compare received LC messages over gossipsub with locally computed ones before forwarding. Also no longer save updates
from gossipsub, just validate and forward.

View File

@@ -1,3 +0,0 @@
### Fixed
- Fixing Unsupported config field kind; value forwarded verbatim errors for type string.

View File

@@ -1,3 +0,0 @@
### Fixed
- adding in improvements to getduties v2, replaces helpers.PrecomputeCommittees() ( exepensive ) with CommitteeAssignments

View File

@@ -1,3 +0,0 @@
### Fixed
- fix /eth/v1/config/spec endpoint to properly skip omitted values.

View File

@@ -1,3 +0,0 @@
### Changed
- Add sources for compute_fork_digest to specrefs

View File

@@ -1,3 +0,0 @@
### Ignored
- `requestAndSaveMissingDataColumnSidecars`: Fix log

View File

@@ -1,3 +0,0 @@
### Fixed
- `inclusionProofKey`: Include the commitments in the key.

View File

@@ -1,2 +0,0 @@
### Changed
- `c-kzg-4844`: Update from `v2.1.1` to `v2.1.5`

View File

@@ -1,2 +0,0 @@
### Changed
- Aggregate logs when broadcasting data column sidecars (one per root instead of one per sidecar)

View File

@@ -1,2 +0,0 @@
### Ignored
- P2P service: Remove unused clock.

View File

@@ -1,4 +0,0 @@
### Fixed
- Fix ProduceSyncCommitteeContribution not returning error when committee index is out of range

View File

@@ -1,3 +0,0 @@
### Fixed
- Avoid unnecessary calls to `ExitInformation()`.

View File

@@ -1,3 +0,0 @@
### Ignored
- Fix races in tests that cause nil panics.

View File

@@ -1,3 +0,0 @@
### Changed
- Process pending attestations as soon as the block arrives.

View File

@@ -1,3 +0,0 @@
### Ignored
- Changed github action runners from `ubuntu-latest` to `ubuntu-4`

View File

@@ -1,3 +0,0 @@
### Changed
- Bazel builds with `--config=release` now properly apply `--strip=always` to strip debug symbols from the release assets.

View File

@@ -776,8 +776,8 @@ def prysm_deps():
importpath = "github.com/ethereum/c-kzg-4844/v2",
patch_args = ["-p1"],
patches = ["//third_party:com_github_ethereum_c_kzg_4844.patch"],
sum = "h1:aVtoLK5xwJ6c5RiqO8g8ptJ5KU+2Hdquf6G3aXiHh5s=",
version = "v2.1.5",
sum = "h1:KhzBVjmURsfr1+S3k/VE35T02+AW2qU9t9gr4R6YpSo=",
version = "v2.1.1",
)
go_repository(
name = "com_github_ethereum_go_ethereum",
@@ -3318,8 +3318,8 @@ def prysm_deps():
importpath = "github.com/supranational/blst",
patch_args = ["-p1"],
patches = ["//third_party:com_github_supranational_blst.patch"],
sum = "h1:nbdqkIGOGfUAD54q1s2YBcBz/WcsxCO9HUQ4aGV5hUw=",
version = "v0.3.16-0.20250831170142-f48500c1fdbe",
sum = "h1:xNMoHRJOTwMn63ip6qoWJ2Ymgvj7E2b9jY2FAwY+qRo=",
version = "v0.3.14",
)
go_repository(
name = "com_github_syndtr_goleveldb",

4
go.mod
View File

@@ -14,7 +14,7 @@ require (
github.com/dgraph-io/ristretto/v2 v2.2.0
github.com/dustin/go-humanize v1.0.1
github.com/emicklei/dot v0.11.0
github.com/ethereum/c-kzg-4844/v2 v2.1.5
github.com/ethereum/c-kzg-4844/v2 v2.1.1
github.com/ethereum/go-ethereum v1.15.9
github.com/fsnotify/fsnotify v1.6.0
github.com/ghodss/yaml v1.0.0
@@ -70,7 +70,7 @@ require (
github.com/spf13/afero v1.10.0
github.com/status-im/keycard-go v0.2.0
github.com/stretchr/testify v1.10.0
github.com/supranational/blst v0.3.16-0.20250831170142-f48500c1fdbe
github.com/supranational/blst v0.3.14
github.com/thomaso-mirodin/intmath v0.0.0-20160323211736-5dc6d854e46e
github.com/trailofbits/go-mutexasserts v0.0.0-20250212181730-4c2b8e9e784b
github.com/tyler-smith/go-bip39 v1.1.0

8
go.sum
View File

@@ -234,8 +234,8 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/ethereum/c-kzg-4844 v1.0.0 h1:0X1LBXxaEtYD9xsyj9B9ctQEZIpnvVDeoBx8aHEwTNA=
github.com/ethereum/c-kzg-4844 v1.0.0/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
github.com/ethereum/c-kzg-4844/v2 v2.1.5 h1:aVtoLK5xwJ6c5RiqO8g8ptJ5KU+2Hdquf6G3aXiHh5s=
github.com/ethereum/c-kzg-4844/v2 v2.1.5/go.mod h1:u59hRTTah4Co6i9fDWtiCjTrblJv0UwsqZKCc0GfgUs=
github.com/ethereum/c-kzg-4844/v2 v2.1.1 h1:KhzBVjmURsfr1+S3k/VE35T02+AW2qU9t9gr4R6YpSo=
github.com/ethereum/c-kzg-4844/v2 v2.1.1/go.mod h1:TC48kOKjJKPbN7C++qIgt0TJzZ70QznYR7Ob+WXl57E=
github.com/ethereum/go-ethereum v1.15.9 h1:bRra1zi+/q+qyXZ6fylZOrlaF8kDdnlTtzNTmNHfX+g=
github.com/ethereum/go-ethereum v1.15.9/go.mod h1:+S9k+jFzlyVTNcYGvqFhzN/SFhI6vA+aOY4T5tLSPL0=
github.com/ethereum/go-verkle v0.2.2 h1:I2W0WjnrFUIzzVPwm8ykY+7pL2d4VhlsePn4j7cnFk8=
@@ -1021,8 +1021,8 @@ github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXl
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/supranational/blst v0.3.16-0.20250831170142-f48500c1fdbe h1:nbdqkIGOGfUAD54q1s2YBcBz/WcsxCO9HUQ4aGV5hUw=
github.com/supranational/blst v0.3.16-0.20250831170142-f48500c1fdbe/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
github.com/supranational/blst v0.3.14 h1:xNMoHRJOTwMn63ip6qoWJ2Ymgvj7E2b9jY2FAwY+qRo=
github.com/supranational/blst v0.3.14/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=

View File

@@ -18,6 +18,7 @@ exceptions:
- UPDATE_TIMEOUT#altair
# Not implemented: gloas (future fork)
- KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH_GLOAS#gloas
- MAX_PAYLOAD_ATTESTATIONS#gloas
- PTC_SIZE#gloas
@@ -214,6 +215,7 @@ exceptions:
# Not implemented: altair
- compute_sync_committee_period_at_slot#altair
- compute_timestamp_at_slot#bellatrix
- get_contribution_and_proof#altair
- get_contribution_due_ms#altair
- get_index_for_new_validator#altair
@@ -283,10 +285,12 @@ exceptions:
- upgrade_lc_update_to_electra#electra
# Not implemented: fulu
- compute_fork_digest#fulu
- compute_matrix#fulu
- get_blob_parameters#fulu
- get_data_column_sidecars_from_block#fulu
- get_data_column_sidecars_from_column_sidecar#fulu
- get_extended_sample_count#fulu
- recover_matrix#fulu
# Not implemented: gloas (future fork)

View File

@@ -1,10 +1,3 @@
- name: AGGREGATE_DUE_BPS
sources: []
spec: |
<spec config_var="AGGREGATE_DUE_BPS" fork="phase0" hash="7eaa811a">
AGGREGATE_DUE_BPS: uint64 = 6667
</spec>
- name: ALTAIR_FORK_EPOCH
sources:
- file: config/params/config.go
@@ -25,11 +18,11 @@
ALTAIR_FORK_VERSION: Version = '0x01000000'
</spec>
- name: ATTESTATION_DUE_BPS
- name: AGGREGATE_DUE_BPS
sources: []
spec: |
<spec config_var="ATTESTATION_DUE_BPS" fork="phase0" hash="929dd1c9">
ATTESTATION_DUE_BPS: uint64 = 3333
<spec config_var="AGGREGATE_DUE_BPS" fork="phase0" hash="7eaa811a">
AGGREGATE_DUE_BPS: uint64 = 6667
</spec>
- name: ATTESTATION_PROPAGATION_SLOT_RANGE
@@ -72,6 +65,13 @@
ATTESTATION_SUBNET_PREFIX_BITS: int = 6
</spec>
- name: ATTESTATION_DUE_BPS
sources: []
spec: |
<spec config_var="ATTESTATION_DUE_BPS" fork="phase0" hash="929dd1c9">
ATTESTATION_DUE_BPS: uint64 = 3333
</spec>
- name: BALANCE_PER_ADDITIONAL_CUSTODY_GROUP
sources:
- file: config/params/config.go
@@ -163,13 +163,6 @@
CHURN_LIMIT_QUOTIENT: uint64 = 65536
</spec>
- name: CONTRIBUTION_DUE_BPS
sources: []
spec: |
<spec config_var="CONTRIBUTION_DUE_BPS" fork="altair" hash="a3808203">
CONTRIBUTION_DUE_BPS: uint64 = 6667
</spec>
- name: CUSTODY_REQUIREMENT
sources:
- file: config/params/config.go
@@ -540,13 +533,6 @@
NUMBER_OF_CUSTODY_GROUPS = 128
</spec>
- name: PROPOSER_REORG_CUTOFF_BPS
sources: []
spec: |
<spec config_var="PROPOSER_REORG_CUTOFF_BPS" fork="phase0" hash="a487cc43">
PROPOSER_REORG_CUTOFF_BPS: uint64 = 1667
</spec>
- name: PROPOSER_SCORE_BOOST
sources:
- file: config/params/config.go
@@ -557,6 +543,13 @@
PROPOSER_SCORE_BOOST: uint64 = 40
</spec>
- name: PROPOSER_REORG_CUTOFF_BPS
sources: []
spec: |
<spec config_var="PROPOSER_REORG_CUTOFF_BPS" fork="phase0" hash="a487cc43">
PROPOSER_REORG_CUTOFF_BPS: uint64 = 1667
</spec>
- name: REORG_HEAD_WEIGHT_THRESHOLD
sources:
- file: config/params/config.go
@@ -617,6 +610,13 @@
SECONDS_PER_SLOT: uint64 = 12
</spec>
- name: SLOT_DURATION_MS
sources: []
spec: |
<spec config_var="SLOT_DURATION_MS" fork="phase0" hash="b6d4ba6d">
SLOT_DURATION_MS: uint64 = 12000
</spec>
- name: SHARD_COMMITTEE_PERIOD
sources:
- file: config/params/config.go
@@ -627,13 +627,6 @@
SHARD_COMMITTEE_PERIOD: uint64 = 256
</spec>
- name: SLOT_DURATION_MS
sources: []
spec: |
<spec config_var="SLOT_DURATION_MS" fork="phase0" hash="b6d4ba6d">
SLOT_DURATION_MS: uint64 = 12000
</spec>
- name: SUBNETS_PER_NODE
sources:
- file: config/params/config.go
@@ -644,13 +637,6 @@
SUBNETS_PER_NODE = 2
</spec>
- name: SYNC_MESSAGE_DUE_BPS
sources: []
spec: |
<spec config_var="SYNC_MESSAGE_DUE_BPS" fork="altair" hash="791b29d8">
SYNC_MESSAGE_DUE_BPS: uint64 = 3333
</spec>
- name: TERMINAL_BLOCK_HASH
sources:
- file: config/params/config.go
@@ -690,3 +676,18 @@
<spec config_var="VALIDATOR_CUSTODY_REQUIREMENT" fork="fulu" hash="4dfc4457">
VALIDATOR_CUSTODY_REQUIREMENT = 8
</spec>
- name: CONTRIBUTION_DUE_BPS
sources: []
spec: |
<spec config_var="CONTRIBUTION_DUE_BPS" fork="altair" hash="a3808203">
CONTRIBUTION_DUE_BPS: uint64 = 6667
</spec>
- name: SYNC_MESSAGE_DUE_BPS
sources: []
spec: |
<spec config_var="SYNC_MESSAGE_DUE_BPS" fork="altair" hash="791b29d8">
SYNC_MESSAGE_DUE_BPS: uint64 = 3333
</spec>

View File

@@ -1,3 +1,4 @@
- name: BASE_REWARDS_PER_EPOCH
sources:
- file: config/params/config.go

View File

@@ -1,3 +1,4 @@
- name: BlobParameters
sources: []
spec: |

View File

@@ -527,11 +527,7 @@
</spec>
- name: compute_fork_digest#fulu
sources:
- file: config/params/fork.go
search: func ForkDigest(
- file: config/params/config.go
search: func entryWithForkDigest(
sources: []
spec: |
<spec fn="compute_fork_digest" fork="fulu" hash="e916a595">
def compute_fork_digest(
@@ -3144,17 +3140,6 @@
return hash(domain_type + uint_to_bytes(epoch) + mix)
</spec>
- name: get_slot_component_duration_ms
sources: []
spec: |
<spec fn="get_slot_component_duration_ms" fork="phase0" hash="b81504df">
def get_slot_component_duration_ms(basis_points: uint64) -> uint64:
"""
Calculate the duration of a slot component in milliseconds.
"""
return basis_points * SLOT_DURATION_MS // BASIS_POINTS
</spec>
- name: get_slot_signature
sources: []
spec: |
@@ -3175,6 +3160,17 @@
return (store.time - store.genesis_time) // SECONDS_PER_SLOT
</spec>
- name: get_slot_component_duration_ms
sources: []
spec: |
<spec fn="get_slot_component_duration_ms" fork="phase0" hash="b81504df">
def get_slot_component_duration_ms(basis_points: uint64) -> uint64:
"""
Calculate the duration of a slot component in milliseconds.
"""
return basis_points * SLOT_DURATION_MS // BASIS_POINTS
</spec>
- name: get_source_deltas
sources:
- file: beacon-chain/core/epoch/precompute/reward_penalty.go
@@ -7283,20 +7279,6 @@
return a - b if a > b else 0
</spec>
- name: seconds_to_milliseconds
sources: []
spec: |
<spec fn="seconds_to_milliseconds" fork="phase0" hash="b2cc9743">
def seconds_to_milliseconds(seconds: uint64) -> uint64:
"""
Convert seconds to milliseconds with overflow protection.
Returns ``UINT64_MAX`` if the result would overflow.
"""
if seconds > UINT64_MAX // 1000:
return UINT64_MAX
return seconds * 1000
</spec>
- name: set_or_append_list
sources: []
spec: |
@@ -7534,6 +7516,20 @@
assert block.state_root == hash_tree_root(state)
</spec>
- name: seconds_to_milliseconds
sources: []
spec: |
<spec fn="seconds_to_milliseconds" fork="phase0" hash="b2cc9743">
def seconds_to_milliseconds(seconds: uint64) -> uint64:
"""
Convert seconds to milliseconds with overflow protection.
Returns ``UINT64_MAX`` if the result would overflow.
"""
if seconds > UINT64_MAX // 1000:
return UINT64_MAX
return seconds * 1000
</spec>
- name: store_target_checkpoint_state
sources: []
spec: |

View File

@@ -230,7 +230,7 @@ func (v *ValidatorService) Start() {
distributed: v.distributed,
disableDutiesPolling: v.disableDutiesPolling,
accountsChangedChannel: make(chan [][fieldparams.BLSPubkeyLength]byte, 1),
eventsChannel: make(chan *eventClient.Event, 100), // 100 gives some room if the validator is slow at processing the events
eventsChannel: make(chan *eventClient.Event, 1),
}
hm := newHealthMonitor(v.ctx, v.cancel, v.maxHealthChecks, v.validator)