Compare commits

...

2 Commits

Author SHA1 Message Date
james-prysm
73e9d6e0ce adding cache for attestation data so we don't call it multiple times (#16236)
**What type of PR is this?**

Other

**What does this PR do? Why is it needed?**

post electra attestation data calls are the same in a slot because
committee information is 0, we can save some api calls by caching the
attestation data per slot.

**Which issues(s) does this PR fix?**

Fixes # https://github.com/OffchainLabs/prysm/issues/16228

**Other notes for review**

**Acknowledgements**

- [x] I have read
[CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
- [x] I have included a uniquely named [changelog fragment
file](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md#maintaining-changelogmd).
- [x] I have added a description with sufficient context for reviewers
to understand this PR.
- [x] I have tested that my changes work as expected and I added a
testing plan to the PR description (if applicable).
2026-01-14 02:30:35 +00:00
Potuz
2e43d50364 Use dependent root when validating data column (#16250)
This PR uses the head state to validate data column in more places than
we currently do. When the parent state is from the previous epoch and is
the head (for example at slot 0) instead of replaying slot and doing an
epoch transition, we use the head state directly.

Another change is that instead of replaying until the parent state in
the case of a head miss, we only replay until the target checkpoint
state, which is more likely to be a checkpoint state in the epoch
boundary cache.

---------

Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-13 18:19:41 +00:00
8 changed files with 554 additions and 136 deletions

View File

@@ -588,6 +588,12 @@ func fcReturnsTargetRoot(root [32]byte) func([32]byte, primitives.Epoch) ([32]by
}
}
func fcReturnsDependentRoot() func([32]byte, primitives.Epoch) ([32]byte, error) {
return func(root [32]byte, epoch primitives.Epoch) ([32]byte, error) {
return root, nil
}
}
type mockSignatureCache struct {
svCalledForSig map[signatureData]bool
svcb func(sig signatureData) (bool, error)

View File

@@ -1,7 +1,6 @@
package verification
import (
"bytes"
"context"
"crypto/sha256"
"fmt"
@@ -19,6 +18,7 @@ import (
"github.com/OffchainLabs/prysm/v7/runtime/logging"
"github.com/OffchainLabs/prysm/v7/time/slots"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
var (
@@ -293,55 +293,57 @@ func (dv *RODataColumnsVerifier) ValidProposerSignature(ctx context.Context) (er
// The returned state is guaranteed to be at the same epoch as the data column's epoch, and have the same randao mix and active
// validator indices as the data column's parent state advanced to the data column's slot.
func (dv *RODataColumnsVerifier) getVerifyingState(ctx context.Context, dataColumn blocks.RODataColumn) (state.ReadOnlyBeaconState, error) {
dataColumnSlot := dataColumn.Slot()
dataColumnEpoch := slots.ToEpoch(dataColumnSlot)
if dataColumnEpoch == 0 {
return dv.hsp.HeadStateReadOnly(ctx)
}
parentRoot := dataColumn.ParentRoot()
dcDependentRoot, err := dv.fc.DependentRootForEpoch(parentRoot, dataColumnEpoch-1)
if err != nil {
return nil, err
}
headRoot, err := dv.hsp.HeadRoot(ctx)
if err != nil {
return nil, err
}
parentRoot := dataColumn.ParentRoot()
dataColumnSlot := dataColumn.Slot()
dataColumnEpoch := slots.ToEpoch(dataColumnSlot)
headSlot := dv.hsp.HeadSlot()
headEpoch := slots.ToEpoch(headSlot)
// Use head if it's the parent
if bytes.Equal(parentRoot[:], headRoot) {
// If they are in the same epoch, then we can return the head state directly
if dataColumnEpoch == headEpoch {
return dv.hsp.HeadStateReadOnly(ctx)
}
// Otherwise, we need to process the head state to the data column's slot
headState, err := dv.hsp.HeadState(ctx)
if err != nil {
return nil, err
}
return transition.ProcessSlotsUsingNextSlotCache(ctx, headState, headRoot, dataColumnSlot)
}
// If head and data column are in the same epoch and head is compatible with the parent's depdendent root, then use head
if dataColumnEpoch == headEpoch {
headDependent, err := dv.fc.DependentRootForEpoch(bytesutil.ToBytes32(headRoot), dataColumnEpoch)
if err != nil {
return nil, err
}
parentDependent, err := dv.fc.DependentRootForEpoch(parentRoot, dataColumnEpoch)
if err != nil {
return nil, err
}
if bytes.Equal(headDependent[:], parentDependent[:]) {
return dv.hsp.HeadStateReadOnly(ctx)
}
}
// Otherwise retrieve the parent state and advance it to the data column's slot
parentState, err := dv.sr.StateByRoot(ctx, parentRoot)
headDependentRoot, err := dv.fc.DependentRootForEpoch(bytesutil.ToBytes32(headRoot), dataColumnEpoch-1)
if err != nil {
return nil, err
}
parentEpoch := slots.ToEpoch(parentState.Slot())
if dataColumnEpoch == parentEpoch {
return parentState, nil
if dcDependentRoot == headDependentRoot {
headSlot := dv.hsp.HeadSlot()
headEpoch := slots.ToEpoch(headSlot)
if headEpoch == dataColumnEpoch || headEpoch == dataColumnEpoch-1 {
return dv.hsp.HeadStateReadOnly(ctx)
}
if headEpoch+1 < dataColumnEpoch {
headState, err := dv.hsp.HeadState(ctx)
if err != nil {
return nil, err
}
return transition.ProcessSlotsUsingNextSlotCache(ctx, headState, headRoot, dataColumnSlot)
}
}
return transition.ProcessSlotsUsingNextSlotCache(ctx, parentState, parentRoot[:], dataColumnSlot)
logrus.WithFields(logrus.Fields{
"slot": dataColumnSlot,
"parentRoot": fmt.Sprintf("%#x", parentRoot),
"headRoot": fmt.Sprintf("%#x", headRoot),
}).Debug("Replying state for data column verification")
targetRoot, err := dv.fc.TargetRootForEpoch(parentRoot, dataColumnEpoch)
if err != nil {
return nil, err
}
targetState, err := dv.sr.StateByRoot(ctx, targetRoot)
if err != nil {
return nil, err
}
targetEpoch := slots.ToEpoch(targetState.Slot())
if targetEpoch == dataColumnEpoch || targetEpoch == dataColumnEpoch-1 {
return targetState, nil
}
return transition.ProcessSlotsUsingNextSlotCache(ctx, targetState, parentRoot[:], dataColumnSlot)
}
func (dv *RODataColumnsVerifier) SidecarParentSeen(parentSeen func([fieldparams.RootLength]byte) bool) (err error) {

View File

@@ -1,6 +1,7 @@
package verification
import (
"context"
"reflect"
"testing"
"time"
@@ -9,6 +10,7 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
forkchoicetypes "github.com/OffchainLabs/prysm/v7/beacon-chain/forkchoice/types"
"github.com/OffchainLabs/prysm/v7/beacon-chain/startup"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
@@ -281,7 +283,7 @@ func TestColumnSlotAboveFinalized(t *testing.T) {
func TestValidProposerSignature(t *testing.T) {
const (
columnSlot = 0
columnSlot = 97
blobCount = 1
)
@@ -294,59 +296,83 @@ func TestValidProposerSignature(t *testing.T) {
// The signature data does not depend on the data column itself, so we can use the first one.
expectedSignatureData := columnToSignatureData(firstColumn)
// Create a proper Fulu state for verification.
// We need enough validators to cover the proposer index.
numValidators := max(uint64(firstColumn.ProposerIndex()+1), 64)
fuluState, _ := util.DeterministicGenesisStateFulu(t, numValidators)
// Head state provider that returns the fuluState via HeadStateReadOnly path.
headStateWithState := &mockHeadStateProvider{
headRoot: parentRoot[:],
headSlot: columnSlot,
headStateReadOnly: fuluState,
}
// Head state provider that will fail (headStateReadOnly is nil).
headStateNotFound := &mockHeadStateProvider{
headRoot: parentRoot[:],
headSlot: columnSlot,
}
testCases := []struct {
isError bool
vscbShouldError bool
svcbReturn bool
stateByRooter StateByRooter
vscbError error
svcbError error
name string
isError bool
vscbShouldError bool
svcbReturn bool
stateByRooter StateByRooter
headStateProvider *mockHeadStateProvider
vscbError error
svcbError error
name string
}{
{
name: "cache hit - success",
svcbReturn: true,
svcbError: nil,
vscbShouldError: true,
vscbError: nil,
stateByRooter: &mockStateByRooter{sbr: sbrErrorIfCalled(t)},
isError: false,
name: "cache hit - success",
svcbReturn: true,
svcbError: nil,
vscbShouldError: true,
vscbError: nil,
stateByRooter: &mockStateByRooter{sbr: sbrErrorIfCalled(t)},
headStateProvider: headStateWithState,
isError: false,
},
{
name: "cache hit - error",
svcbReturn: true,
svcbError: errors.New("derp"),
vscbShouldError: true,
vscbError: nil,
stateByRooter: &mockStateByRooter{sbr: sbrErrorIfCalled(t)},
isError: true,
name: "cache hit - error",
svcbReturn: true,
svcbError: errors.New("derp"),
vscbShouldError: true,
vscbError: nil,
stateByRooter: &mockStateByRooter{sbr: sbrErrorIfCalled(t)},
headStateProvider: headStateWithState,
isError: true,
},
{
name: "cache miss - success",
svcbReturn: false,
svcbError: nil,
vscbShouldError: false,
vscbError: nil,
stateByRooter: sbrForValOverrideWithT(t, firstColumn.ProposerIndex(), validator),
isError: false,
name: "cache miss - success",
svcbReturn: false,
svcbError: nil,
vscbShouldError: false,
vscbError: nil,
stateByRooter: sbrForValOverrideWithT(t, firstColumn.ProposerIndex(), validator),
headStateProvider: headStateWithState,
isError: false,
},
{
name: "cache miss - state not found",
svcbReturn: false,
svcbError: nil,
vscbShouldError: false,
vscbError: nil,
stateByRooter: sbrNotFound(t, expectedSignatureData.Parent),
isError: true,
name: "cache miss - state not found",
svcbReturn: false,
svcbError: nil,
vscbShouldError: false,
vscbError: nil,
stateByRooter: sbrNotFound(t, expectedSignatureData.Parent),
headStateProvider: headStateNotFound,
isError: true,
},
{
name: "cache miss - signature failure",
svcbReturn: false,
svcbError: nil,
vscbShouldError: false,
vscbError: errors.New("signature, not so good!"),
stateByRooter: sbrForValOverrideWithT(t, firstColumn.ProposerIndex(), validator),
isError: true,
name: "cache miss - signature failure",
svcbReturn: false,
svcbError: nil,
vscbShouldError: false,
vscbError: errors.New("signature, not so good!"),
stateByRooter: sbrForValOverrideWithT(t, firstColumn.ProposerIndex(), validator),
headStateProvider: headStateWithState,
isError: true,
},
}
@@ -377,9 +403,10 @@ func TestValidProposerSignature(t *testing.T) {
shared: &sharedResources{
sc: signatureCache,
sr: tc.stateByRooter,
hsp: &mockHeadStateProvider{},
hsp: tc.headStateProvider,
fc: &mockForkchoicer{
TargetRootForEpochCB: fcReturnsTargetRoot([fieldparams.RootLength]byte{}),
DependentRootForEpochCB: fcReturnsDependentRoot(),
TargetRootForEpochCB: fcReturnsTargetRoot([fieldparams.RootLength]byte{}),
},
},
}
@@ -405,7 +432,7 @@ func TestValidProposerSignature(t *testing.T) {
func TestDataColumnsSidecarParentSeen(t *testing.T) {
const (
columnSlot = 0
columnSlot = 97
blobCount = 1
)
@@ -509,7 +536,7 @@ func TestDataColumnsSidecarParentValid(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
const (
columnSlot = 0
columnSlot = 97
blobCount = 1
)
@@ -630,7 +657,7 @@ func TestDataColumnsSidecarDescendsFromFinalized(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
const (
columnSlot = 0
columnSlot = 97
blobCount = 1
)
@@ -693,7 +720,7 @@ func TestDataColumnsSidecarInclusionProven(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
const (
columnSlot = 0
columnSlot = 97
blobCount = 1
)
@@ -748,7 +775,7 @@ func TestDataColumnsSidecarKzgProofVerified(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
const (
columnSlot = 0
columnSlot = 97
blobCount = 1
)
@@ -924,3 +951,135 @@ func TestColumnRequirementSatisfaction(t *testing.T) {
require.NoError(t, err)
}
func TestGetVerifyingStateEdgeCases(t *testing.T) {
const (
columnSlot = 97 // epoch 3
blobCount = 1
)
parentRoot := [fieldparams.RootLength]byte{}
columns := GenerateTestDataColumns(t, parentRoot, columnSlot, blobCount)
// Create a proper Fulu state for verification.
numValidators := max(uint64(columns[0].ProposerIndex()+1), 64)
fuluState, _ := util.DeterministicGenesisStateFulu(t, numValidators)
t.Run("different dependent roots - uses StateByRoot path", func(t *testing.T) {
// Parent and head are on different forks with different dependent roots.
// This forces the code to use TargetRootForEpoch -> StateByRoot path.
signatureCache := &mockSignatureCache{
svcb: func(signatureData signatureData) (bool, error) {
return false, nil // Cache miss
},
vscb: func(signatureData signatureData, _ validatorAtIndexer) (err error) {
return nil // Signature valid
},
}
// StateByRoot will be called because dependent roots differ
stateByRootCalled := false
stateByRooter := &mockStateByRooter{
sbr: func(_ context.Context, root [32]byte) (state.BeaconState, error) {
stateByRootCalled = true
return fuluState, nil
},
}
initializer := Initializer{
shared: &sharedResources{
sc: signatureCache,
sr: stateByRooter,
hsp: &mockHeadStateProvider{
headRoot: []byte{0xff}, // Different from parentRoot
headSlot: columnSlot,
},
fc: &mockForkchoicer{
// Return different roots for parent vs head to simulate different forks
DependentRootForEpochCB: func(root [32]byte, epoch primitives.Epoch) ([32]byte, error) {
return root, nil // Returns input, so parent [0...] != head [0xff...]
},
TargetRootForEpochCB: fcReturnsTargetRoot([fieldparams.RootLength]byte{}),
},
},
}
verifier := initializer.NewDataColumnsVerifier(columns, GossipDataColumnSidecarRequirements)
err := verifier.ValidProposerSignature(t.Context())
require.NoError(t, err)
require.Equal(t, true, stateByRootCalled, "StateByRoot should be called when dependent roots differ")
})
t.Run("same dependent root head far ahead - uses head state with ProcessSlots", func(t *testing.T) {
// Parent is ancestor of head on same chain, but head is in epoch 1 while column is in epoch 3.
// headEpoch (1) + 1 < dataColumnEpoch (3), so ProcessSlots is called on head state.
signatureCache := &mockSignatureCache{
svcb: func(signatureData signatureData) (bool, error) {
return false, nil // Cache miss
},
vscb: func(signatureData signatureData, _ validatorAtIndexer) (err error) {
return nil // Signature valid
},
}
headStateCalled := false
initializer := Initializer{
shared: &sharedResources{
sc: signatureCache,
sr: &mockStateByRooter{sbr: sbrErrorIfCalled(t)}, // Should not be called
hsp: &mockHeadStateProvider{
headRoot: parentRoot[:], // Same as parent
headSlot: 32, // Epoch 1
headState: fuluState.Copy(), // HeadState (not ReadOnly) for ProcessSlots
headStateReadOnly: nil, // Should not use ReadOnly path
},
fc: &mockForkchoicer{
// Return same root for both to simulate same chain
DependentRootForEpochCB: func(root [32]byte, epoch primitives.Epoch) ([32]byte, error) {
return [32]byte{0xaa}, nil // Same for all inputs
},
TargetRootForEpochCB: fcReturnsTargetRoot([fieldparams.RootLength]byte{}),
},
},
}
// Wrap to detect HeadState call
originalHsp := initializer.shared.hsp.(*mockHeadStateProvider)
wrappedHsp := &mockHeadStateProvider{
headRoot: originalHsp.headRoot,
headSlot: originalHsp.headSlot,
headState: originalHsp.headState,
}
initializer.shared.hsp = &headStateCallTracker{
mockHeadStateProvider: wrappedHsp,
headStateCalled: &headStateCalled,
}
verifier := initializer.NewDataColumnsVerifier(columns, GossipDataColumnSidecarRequirements)
err := verifier.ValidProposerSignature(t.Context())
require.NoError(t, err)
require.Equal(t, true, headStateCalled, "HeadState should be called when head is far ahead")
})
}
// headStateCallTracker wraps mockHeadStateProvider to track HeadState calls.
type headStateCallTracker struct {
*mockHeadStateProvider
headStateCalled *bool
}
func (h *headStateCallTracker) HeadState(ctx context.Context) (state.BeaconState, error) {
*h.headStateCalled = true
return h.mockHeadStateProvider.HeadState(ctx)
}
func (h *headStateCallTracker) HeadRoot(ctx context.Context) ([]byte, error) {
return h.mockHeadStateProvider.HeadRoot(ctx)
}
func (h *headStateCallTracker) HeadSlot() primitives.Slot {
return h.mockHeadStateProvider.HeadSlot()
}
func (h *headStateCallTracker) HeadStateReadOnly(ctx context.Context) (state.ReadOnlyBeaconState, error) {
return h.mockHeadStateProvider.HeadStateReadOnly(ctx)
}

View File

@@ -0,0 +1,3 @@
### Changed
- post electra we now call attestation data once per slot and use a cache for subsequent requests

View File

@@ -0,0 +1,2 @@
### Changed
- Use dependent root and target root to verify data column proposer index.

View File

@@ -71,17 +71,9 @@ func (v *validator) SubmitAttestation(ctx context.Context, slot primitives.Slot,
return
}
committeeIndex := duty.CommitteeIndex
postElectra := slots.ToEpoch(slot) >= params.BeaconConfig().ElectraForkEpoch
if postElectra {
committeeIndex = 0
}
req := &ethpb.AttestationDataRequest{
Slot: slot,
CommitteeIndex: committeeIndex,
}
data, err := v.validatorClient.AttestationData(ctx, req)
data, err := v.getAttestationData(ctx, slot, duty.CommitteeIndex)
if err != nil {
log.WithError(err).Error("Could not request attestation to sign at slot")
if v.emitAccountMetrics {

View File

@@ -65,57 +65,59 @@ var (
)
type validator struct {
duties *ethpb.ValidatorDutiesContainer
ticker slots.Ticker
genesisTime time.Time
logValidatorPerformance bool
distributed bool
enableAPI bool
disableDutiesPolling bool
emitAccountMetrics bool
aggregatedSlotCommitteeIDCacheLock sync.Mutex
attLogsLock sync.Mutex
attSelectionLock sync.Mutex
highestValidSlotLock sync.Mutex
domainDataLock sync.RWMutex
blacklistedPubkeysLock sync.RWMutex
prevEpochBalancesLock sync.RWMutex
cachedAttestationDataLock sync.RWMutex
dutiesLock sync.RWMutex
cachedAttestationData *ethpb.AttestationData
accountsChangedChannel chan [][fieldparams.BLSPubkeyLength]byte
eventsChannel chan *eventClient.Event
highestValidSlot primitives.Slot
submittedAggregates map[submittedAttKey]*submittedAtt
graffitiStruct *graffiti.Graffiti
syncCommitteeStats syncCommitteeStats
slotFeed *event.Feed
domainDataCache *ristretto.Cache[string, proto.Message]
aggregatedSlotCommitteeIDCache *lru.Cache
attSelections map[attSelectionKey]iface.BeaconCommitteeSelection
interopKeysConfig *local.InteropKeymanagerConfig
duties *ethpb.ValidatorDutiesContainer
signedValidatorRegistrations map[[fieldparams.BLSPubkeyLength]byte]*ethpb.SignedValidatorRegistrationV1
proposerSettings *proposer.Settings
web3SignerConfig *remoteweb3signer.SetupConfig
startBalances map[[fieldparams.BLSPubkeyLength]byte]uint64
prevEpochBalances map[[fieldparams.BLSPubkeyLength]byte]uint64
blacklistedPubkeys map[[fieldparams.BLSPubkeyLength]byte]bool
pubkeyToStatus map[[fieldparams.BLSPubkeyLength]byte]*validatorStatus
wallet *wallet.Wallet
walletInitializedChan chan *wallet.Wallet
walletInitializedFeed *event.Feed
graffiti []byte
graffitiStruct *graffiti.Graffiti
graffitiOrderedIndex uint64
beaconNodeHosts []string
currentHostIndex uint64
walletInitializedFeed *event.Feed
graffitiOrderedIndex uint64
submittedAtts map[submittedAttKey]*submittedAtt
validatorsRegBatchSize int
validatorClient iface.ValidatorClient
chainClient iface.ChainClient
nodeClient iface.NodeClient
prysmChainClient iface.PrysmChainClient
db db.Database
km keymanager.IKeymanager
web3SignerConfig *remoteweb3signer.SetupConfig
proposerSettings *proposer.Settings
signedValidatorRegistrations map[[fieldparams.BLSPubkeyLength]byte]*ethpb.SignedValidatorRegistrationV1
validatorsRegBatchSize int
interopKeysConfig *local.InteropKeymanagerConfig
attSelections map[attSelectionKey]iface.BeaconCommitteeSelection
aggregatedSlotCommitteeIDCache *lru.Cache
domainDataCache *ristretto.Cache[string, proto.Message]
voteStats voteStats
syncCommitteeStats syncCommitteeStats
submittedAtts map[submittedAttKey]*submittedAtt
submittedAggregates map[submittedAttKey]*submittedAtt
logValidatorPerformance bool
emitAccountMetrics bool
enableAPI bool
distributed bool
domainDataLock sync.RWMutex
attLogsLock sync.Mutex
aggregatedSlotCommitteeIDCacheLock sync.Mutex
highestValidSlotLock sync.Mutex
prevEpochBalancesLock sync.RWMutex
blacklistedPubkeysLock sync.RWMutex
attSelectionLock sync.Mutex
dutiesLock sync.RWMutex
disableDutiesPolling bool
accountsChangedChannel chan [][fieldparams.BLSPubkeyLength]byte
eventsChannel chan *eventClient.Event
accountChangedSub event.Subscription
ticker slots.Ticker
beaconNodeHosts []string
genesisTime time.Time
graffiti []byte
voteStats voteStats
}
type validatorStatus struct {
@@ -977,6 +979,54 @@ func (v *validator) domainData(ctx context.Context, epoch primitives.Epoch, doma
return res, nil
}
// getAttestationData fetches attestation data from the beacon node with caching for post-Electra.
// Post-Electra, attestation data is identical for all validators in the same slot (committee index is always 0),
// so we cache it to avoid redundant beacon node requests.
func (v *validator) getAttestationData(ctx context.Context, slot primitives.Slot, committeeIndex primitives.CommitteeIndex) (*ethpb.AttestationData, error) {
ctx, span := trace.StartSpan(ctx, "validator.getAttestationData")
defer span.End()
postElectra := slots.ToEpoch(slot) >= params.BeaconConfig().ElectraForkEpoch
// Pre-Electra: no caching since committee index varies per validator
if !postElectra {
return v.validatorClient.AttestationData(ctx, &ethpb.AttestationDataRequest{
Slot: slot,
CommitteeIndex: committeeIndex,
})
}
// Post-Electra: check cache first (committee index is always 0)
v.cachedAttestationDataLock.RLock()
if v.cachedAttestationData != nil && v.cachedAttestationData.Slot == slot {
data := v.cachedAttestationData
v.cachedAttestationDataLock.RUnlock()
return data, nil
}
v.cachedAttestationDataLock.RUnlock()
// Cache miss - acquire write lock and fetch
v.cachedAttestationDataLock.Lock()
defer v.cachedAttestationDataLock.Unlock()
// Double-check after acquiring write lock (another goroutine may have filled the cache)
if v.cachedAttestationData != nil && v.cachedAttestationData.Slot == slot {
return v.cachedAttestationData, nil
}
data, err := v.validatorClient.AttestationData(ctx, &ethpb.AttestationDataRequest{
Slot: slot,
CommitteeIndex: 0,
})
if err != nil {
return nil, err
}
v.cachedAttestationData = data
return data, nil
}
func (v *validator) logDuties(slot primitives.Slot, currentEpochDuties []*ethpb.ValidatorDuty, nextEpochDuties []*ethpb.ValidatorDuty) {
attesterKeys := make([][]string, params.BeaconConfig().SlotsPerEpoch)
for i := range attesterKeys {

View File

@@ -2977,3 +2977,207 @@ func TestValidator_CheckDependentRoots(t *testing.T) {
require.NoError(t, v.checkDependentRoots(ctx, head))
})
}
func TestGetAttestationData_PreElectraNoCaching(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
client := validatormock.NewMockValidatorClient(ctrl)
v := &validator{validatorClient: client}
// Pre-Electra slot (Electra fork epoch is far in the future by default)
preElectraSlot := primitives.Slot(10)
expectedData := &ethpb.AttestationData{
Slot: preElectraSlot,
CommitteeIndex: 5,
BeaconBlockRoot: bytesutil.PadTo([]byte("root"), 32),
Source: &ethpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte("source"), 32)},
Target: &ethpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte("target"), 32)},
}
// Each call should go to the beacon node (no caching pre-Electra)
client.EXPECT().AttestationData(gomock.Any(), &ethpb.AttestationDataRequest{
Slot: preElectraSlot,
CommitteeIndex: 5,
}).Return(expectedData, nil)
client.EXPECT().AttestationData(gomock.Any(), &ethpb.AttestationDataRequest{
Slot: preElectraSlot,
CommitteeIndex: 7,
}).Return(expectedData, nil)
// First call with committee index 5
data1, err := v.getAttestationData(context.Background(), preElectraSlot, 5)
require.NoError(t, err)
require.DeepEqual(t, expectedData, data1)
// Second call with different committee index 7 - should still call beacon node
data2, err := v.getAttestationData(context.Background(), preElectraSlot, 7)
require.NoError(t, err)
require.DeepEqual(t, expectedData, data2)
}
func TestGetAttestationData_PostElectraCaching(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
// Set up Electra fork epoch for this test
cfg := params.BeaconConfig().Copy()
originalElectraForkEpoch := cfg.ElectraForkEpoch
cfg.ElectraForkEpoch = 1
params.OverrideBeaconConfig(cfg)
defer func() {
cfg.ElectraForkEpoch = originalElectraForkEpoch
params.OverrideBeaconConfig(cfg)
}()
client := validatormock.NewMockValidatorClient(ctrl)
v := &validator{validatorClient: client}
// Post-Electra slot
postElectraSlot := primitives.Slot(params.BeaconConfig().SlotsPerEpoch + 5)
expectedData := &ethpb.AttestationData{
Slot: postElectraSlot,
CommitteeIndex: 0,
BeaconBlockRoot: bytesutil.PadTo([]byte("root"), 32),
Source: &ethpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte("source"), 32)},
Target: &ethpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte("target"), 32)},
}
// Only ONE call should go to the beacon node (caching post-Electra)
client.EXPECT().AttestationData(gomock.Any(), &ethpb.AttestationDataRequest{
Slot: postElectraSlot,
CommitteeIndex: 0,
}).Return(expectedData, nil).Times(1)
// First call - should hit beacon node
data1, err := v.getAttestationData(context.Background(), postElectraSlot, 5)
require.NoError(t, err)
require.DeepEqual(t, expectedData, data1)
// Second call with different committee index - should use cache
data2, err := v.getAttestationData(context.Background(), postElectraSlot, 7)
require.NoError(t, err)
require.DeepEqual(t, expectedData, data2)
// Third call - should still use cache
data3, err := v.getAttestationData(context.Background(), postElectraSlot, 10)
require.NoError(t, err)
require.DeepEqual(t, expectedData, data3)
}
func TestGetAttestationData_PostElectraCacheInvalidatesOnNewSlot(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
// Set up Electra fork epoch for this test
cfg := params.BeaconConfig().Copy()
originalElectraForkEpoch := cfg.ElectraForkEpoch
cfg.ElectraForkEpoch = 1
params.OverrideBeaconConfig(cfg)
defer func() {
cfg.ElectraForkEpoch = originalElectraForkEpoch
params.OverrideBeaconConfig(cfg)
}()
client := validatormock.NewMockValidatorClient(ctrl)
v := &validator{validatorClient: client}
slot1 := primitives.Slot(params.BeaconConfig().SlotsPerEpoch + 5)
slot2 := primitives.Slot(params.BeaconConfig().SlotsPerEpoch + 6)
dataSlot1 := &ethpb.AttestationData{
Slot: slot1,
CommitteeIndex: 0,
BeaconBlockRoot: bytesutil.PadTo([]byte("root1"), 32),
Source: &ethpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte("source"), 32)},
Target: &ethpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte("target"), 32)},
}
dataSlot2 := &ethpb.AttestationData{
Slot: slot2,
CommitteeIndex: 0,
BeaconBlockRoot: bytesutil.PadTo([]byte("root2"), 32),
Source: &ethpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte("source"), 32)},
Target: &ethpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte("target"), 32)},
}
// Expect one call per slot
client.EXPECT().AttestationData(gomock.Any(), &ethpb.AttestationDataRequest{
Slot: slot1,
CommitteeIndex: 0,
}).Return(dataSlot1, nil).Times(1)
client.EXPECT().AttestationData(gomock.Any(), &ethpb.AttestationDataRequest{
Slot: slot2,
CommitteeIndex: 0,
}).Return(dataSlot2, nil).Times(1)
// First slot - should hit beacon node
data1, err := v.getAttestationData(context.Background(), slot1, 5)
require.NoError(t, err)
require.DeepEqual(t, dataSlot1, data1)
// Same slot - should use cache
data1Again, err := v.getAttestationData(context.Background(), slot1, 7)
require.NoError(t, err)
require.DeepEqual(t, dataSlot1, data1Again)
// New slot - should invalidate cache and hit beacon node
data2, err := v.getAttestationData(context.Background(), slot2, 5)
require.NoError(t, err)
require.DeepEqual(t, dataSlot2, data2)
}
func TestGetAttestationData_PostElectraConcurrentAccess(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
// Set up Electra fork epoch for this test
cfg := params.BeaconConfig().Copy()
originalElectraForkEpoch := cfg.ElectraForkEpoch
cfg.ElectraForkEpoch = 1
params.OverrideBeaconConfig(cfg)
defer func() {
cfg.ElectraForkEpoch = originalElectraForkEpoch
params.OverrideBeaconConfig(cfg)
}()
client := validatormock.NewMockValidatorClient(ctrl)
v := &validator{validatorClient: client}
postElectraSlot := primitives.Slot(params.BeaconConfig().SlotsPerEpoch + 5)
expectedData := &ethpb.AttestationData{
Slot: postElectraSlot,
CommitteeIndex: 0,
BeaconBlockRoot: bytesutil.PadTo([]byte("root"), 32),
Source: &ethpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte("source"), 32)},
Target: &ethpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte("target"), 32)},
}
// Should only call beacon node once despite concurrent requests
client.EXPECT().AttestationData(gomock.Any(), &ethpb.AttestationDataRequest{
Slot: postElectraSlot,
CommitteeIndex: 0,
}).Return(expectedData, nil).Times(1)
var wg sync.WaitGroup
numGoroutines := 10
results := make([]*ethpb.AttestationData, numGoroutines)
errs := make([]error, numGoroutines)
for i := range numGoroutines {
wg.Add(1)
go func(idx int) {
defer wg.Done()
results[idx], errs[idx] = v.getAttestationData(context.Background(), postElectraSlot, primitives.CommitteeIndex(idx))
}(i)
}
wg.Wait()
for i := range numGoroutines {
require.NoError(t, errs[i])
require.DeepEqual(t, expectedData, results[i])
}
}