Compare commits

...

6 Commits

Author SHA1 Message Date
james-prysm
1dbdb1a8e9 reducing e2e eth1lookahead 2025-12-08 15:10:31 -06:00
Potuz
d20ec4c7a1 Track the dependent root of the latest finalized checkpoint (#16103)
This PR adds the dependent root of the latest finalized checkpoint to
forkchoice since this node will be typically pruned upon finalization.
2025-12-08 16:16:32 +00:00
terence
7a70abbd15 Add --ignore-unviable-attestations and deprecate --disable-last-epoch-targets (#16094)
This PR introduces flag `--ignore-unviable-attestations` (replaces and
deprecates `--disable-last-epoch-targets`) to drop attestations whose
target state is not viable; default remains to process them unless
explicitly enabled.
2025-12-05 15:03:04 +00:00
Potuz
a2b84c9320 Use head state in more cases (#16095)
The head state is guaranteed to have the same shuffling and active
indices if the previous dependent root coincides with the target
checkpoint's in some cases.
2025-12-05 03:44:03 +00:00
terence
edef17e41d Add arrival latency tracking for data column sidecars (#16099)
We have this for blob sidecars but not for data columns
2025-12-04 21:28:02 +00:00
Manu NALEPA
85c5d31b5b blobsDataFromStoredDataColumns: Ask the use to use the --supernode flag and shorten the error mesage. (#16097)
**What type of PR is this?**
Other

**What does this PR do? Why is it needed?**
`blobsDataFromStoredDataColumns`: Ask the use to use the `--supernode`
flag and shorten the error mesage.

**Acknowledgements**
- [x] I have read
[CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
- [x] I have included a uniquely named [changelog fragment
file](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md#maintaining-changelogmd).
- [x] I have added a description to this PR with sufficient context for
reviewers to understand this PR.
2025-12-04 15:54:13 +00:00
20 changed files with 201 additions and 27 deletions

View File

@@ -22,10 +22,7 @@ import (
// The caller of this function must have a lock on forkchoice.
func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) state.ReadOnlyBeaconState {
headEpoch := slots.ToEpoch(s.HeadSlot())
if c.Epoch < headEpoch {
return nil
}
if !s.cfg.ForkChoiceStore.IsCanonical([32]byte(c.Root)) {
if c.Epoch < headEpoch || c.Epoch == 0 {
return nil
}
// Only use head state if the head state is compatible with the target checkpoint.
@@ -33,11 +30,11 @@ func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) st
if err != nil {
return nil
}
headDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(headRoot), c.Epoch)
headDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(headRoot), c.Epoch-1)
if err != nil {
return nil
}
targetDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(c.Root), c.Epoch)
targetDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(c.Root), c.Epoch-1)
if err != nil {
return nil
}
@@ -53,7 +50,11 @@ func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) st
}
return st
}
// Otherwise we need to advance the head state to the start of the target epoch.
// At this point we can only have c.Epoch > headEpoch.
if !s.cfg.ForkChoiceStore.IsCanonical([32]byte(c.Root)) {
return nil
}
// Advance the head state to the start of the target epoch.
// This point can only be reached if c.Root == headRoot and c.Epoch > headEpoch.
slot, err := slots.EpochStart(c.Epoch)
if err != nil {

View File

@@ -181,6 +181,123 @@ func TestService_GetRecentPreState(t *testing.T) {
require.NotNil(t, service.getRecentPreState(ctx, &ethpb.Checkpoint{Epoch: 1, Root: ckRoot}))
}
func TestService_GetRecentPreState_Epoch_0(t *testing.T) {
service, _ := minimalTestService(t)
ctx := t.Context()
require.IsNil(t, service.getRecentPreState(ctx, &ethpb.Checkpoint{}))
}
func TestService_GetRecentPreState_Old_Checkpoint(t *testing.T) {
service, _ := minimalTestService(t)
ctx := t.Context()
s, err := util.NewBeaconState()
require.NoError(t, err)
ckRoot := bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)
cp0 := &ethpb.Checkpoint{Epoch: 0, Root: ckRoot}
err = s.SetFinalizedCheckpoint(cp0)
require.NoError(t, err)
st, root, err := prepareForkchoiceState(ctx, 33, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
service.head = &head{
root: [32]byte(ckRoot),
state: s,
slot: 33,
}
require.IsNil(t, service.getRecentPreState(ctx, &ethpb.Checkpoint{}))
}
func TestService_GetRecentPreState_Same_DependentRoots(t *testing.T) {
service, _ := minimalTestService(t)
ctx := t.Context()
s, err := util.NewBeaconState()
require.NoError(t, err)
ckRoot := bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)
cp0 := &ethpb.Checkpoint{Epoch: 0, Root: ckRoot}
// Create a fork 31 <-- 32 <--- 64
// \---------33
// With the same dependent root at epoch 0 for a checkpoint at epoch 2
st, blk, err := prepareForkchoiceState(ctx, 31, [32]byte(ckRoot), [32]byte{}, [32]byte{}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
st, blk, err = prepareForkchoiceState(ctx, 32, [32]byte{'S'}, blk.Root(), [32]byte{}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
st, blk, err = prepareForkchoiceState(ctx, 64, [32]byte{'T'}, blk.Root(), [32]byte{}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
st, blk, err = prepareForkchoiceState(ctx, 33, [32]byte{'U'}, [32]byte(ckRoot), [32]byte{}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
cpRoot := blk.Root()
service.head = &head{
root: [32]byte{'T'},
state: s,
slot: 64,
}
require.NotNil(t, service.getRecentPreState(ctx, &ethpb.Checkpoint{Epoch: 2, Root: cpRoot[:]}))
}
func TestService_GetRecentPreState_Different_DependentRoots(t *testing.T) {
service, _ := minimalTestService(t)
ctx := t.Context()
s, err := util.NewBeaconState()
require.NoError(t, err)
ckRoot := bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)
cp0 := &ethpb.Checkpoint{Epoch: 0, Root: ckRoot}
// Create a fork 30 <-- 31 <-- 32 <--- 64
// \---------33
// With the same dependent root at epoch 0 for a checkpoint at epoch 2
st, blk, err := prepareForkchoiceState(ctx, 30, [32]byte(ckRoot), [32]byte{}, [32]byte{}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
st, blk, err = prepareForkchoiceState(ctx, 31, [32]byte{'S'}, blk.Root(), [32]byte{}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
st, blk, err = prepareForkchoiceState(ctx, 32, [32]byte{'T'}, blk.Root(), [32]byte{}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
st, blk, err = prepareForkchoiceState(ctx, 64, [32]byte{'U'}, blk.Root(), [32]byte{}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
st, blk, err = prepareForkchoiceState(ctx, 33, [32]byte{'V'}, [32]byte(ckRoot), [32]byte{}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
cpRoot := blk.Root()
service.head = &head{
root: [32]byte{'T'},
state: s,
slot: 64,
}
require.IsNil(t, service.getRecentPreState(ctx, &ethpb.Checkpoint{Epoch: 2, Root: cpRoot[:]}))
}
func TestService_GetRecentPreState_Different(t *testing.T) {
service, _ := minimalTestService(t)
ctx := t.Context()
s, err := util.NewBeaconState()
require.NoError(t, err)
ckRoot := bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)
cp0 := &ethpb.Checkpoint{Epoch: 0, Root: ckRoot}
err = s.SetFinalizedCheckpoint(cp0)
require.NoError(t, err)
st, root, err := prepareForkchoiceState(ctx, 33, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
service.head = &head{
root: [32]byte(ckRoot),
state: s,
slot: 33,
}
require.IsNil(t, service.getRecentPreState(ctx, &ethpb.Checkpoint{}))
}
func TestService_GetAttPreState_Concurrency(t *testing.T) {
service, _ := minimalTestService(t)
ctx := t.Context()

View File

@@ -240,7 +240,7 @@ func (f *ForkChoice) IsViableForCheckpoint(cp *forkchoicetypes.Checkpoint) (bool
if node.slot == epochStart {
return true, nil
}
if !features.Get().DisableLastEpochTargets {
if !features.Get().IgnoreUnviableAttestations {
// Allow any node from the checkpoint epoch - 1 to be viable.
nodeEpoch := slots.ToEpoch(node.slot)
if nodeEpoch+1 == cp.Epoch {
@@ -642,8 +642,12 @@ func (f *ForkChoice) DependentRootForEpoch(root [32]byte, epoch primitives.Epoch
if !ok || node == nil {
return [32]byte{}, ErrNilNode
}
if slots.ToEpoch(node.slot) >= epoch && node.parent != nil {
node = node.parent
if slots.ToEpoch(node.slot) >= epoch {
if node.parent != nil {
node = node.parent
} else {
return f.store.finalizedDependentRoot, nil
}
}
return node.root, nil
}

View File

@@ -212,6 +212,9 @@ func (s *Store) prune(ctx context.Context) error {
return nil
}
// Save the new finalized dependent root because it will be pruned
s.finalizedDependentRoot = finalizedNode.parent.root
// Prune nodeByRoot starting from root
if err := s.pruneFinalizedNodeByRootMap(ctx, s.treeRootNode, finalizedNode); err != nil {
return err

View File

@@ -465,6 +465,7 @@ func TestStore_TargetRootForEpoch(t *testing.T) {
ctx := t.Context()
f := setup(1, 1)
// Insert a block in slot 32
state, blk, err := prepareForkchoiceState(ctx, params.BeaconConfig().SlotsPerEpoch, [32]byte{'a'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blk))
@@ -475,6 +476,7 @@ func TestStore_TargetRootForEpoch(t *testing.T) {
require.NoError(t, err)
require.Equal(t, dependent, [32]byte{})
// Insert a block in slot 33
state, blk1, err := prepareForkchoiceState(ctx, params.BeaconConfig().SlotsPerEpoch+1, [32]byte{'b'}, blk.Root(), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blk1))
@@ -488,7 +490,7 @@ func TestStore_TargetRootForEpoch(t *testing.T) {
require.NoError(t, err)
require.Equal(t, dependent, [32]byte{})
// Insert a block for the next epoch (missed slot 0)
// Insert a block for the next epoch (missed slot 0), slot 65
state, blk2, err := prepareForkchoiceState(ctx, 2*params.BeaconConfig().SlotsPerEpoch+1, [32]byte{'c'}, blk1.Root(), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
@@ -509,6 +511,7 @@ func TestStore_TargetRootForEpoch(t *testing.T) {
require.NoError(t, err)
require.Equal(t, dependent, blk1.Root())
// Insert a block at slot 66
state, blk3, err := prepareForkchoiceState(ctx, 2*params.BeaconConfig().SlotsPerEpoch+2, [32]byte{'d'}, blk2.Root(), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blk3))
@@ -533,8 +536,11 @@ func TestStore_TargetRootForEpoch(t *testing.T) {
dependent, err = f.DependentRoot(1)
require.NoError(t, err)
require.Equal(t, [32]byte{}, dependent)
dependent, err = f.DependentRoot(2)
require.NoError(t, err)
require.Equal(t, blk1.Root(), dependent)
// Insert a block for next epoch (slot 0 present)
// Insert a block for the next epoch, slot 96 (descends from finalized at slot 33)
state, blk4, err := prepareForkchoiceState(ctx, 3*params.BeaconConfig().SlotsPerEpoch, [32]byte{'e'}, blk1.Root(), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blk4))
@@ -551,6 +557,7 @@ func TestStore_TargetRootForEpoch(t *testing.T) {
require.NoError(t, err)
require.Equal(t, dependent, blk1.Root())
// Insert a block at slot 97
state, blk5, err := prepareForkchoiceState(ctx, 3*params.BeaconConfig().SlotsPerEpoch+1, [32]byte{'f'}, blk4.Root(), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blk5))
@@ -600,12 +607,16 @@ func TestStore_TargetRootForEpoch(t *testing.T) {
require.NoError(t, err)
require.Equal(t, target, blk1.Root())
// Prune finalization
// Prune finalization, finalize the block at slot 96
s.finalizedCheckpoint.Root = blk4.Root()
require.NoError(t, s.prune(ctx))
target, err = f.TargetRootForEpoch(blk4.Root(), 3)
require.NoError(t, err)
require.Equal(t, blk4.Root(), target)
// Dependent root for the finalized block should be the root of the pruned block at slot 33
dependent, err = f.DependentRootForEpoch(blk4.Root(), 3)
require.NoError(t, err)
require.Equal(t, blk1.Root(), dependent)
}
func TestStore_DependentRootForEpoch(t *testing.T) {

View File

@@ -31,6 +31,7 @@ type Store struct {
proposerBoostRoot [fieldparams.RootLength]byte // latest block root that was boosted after being received in a timely manner.
previousProposerBoostRoot [fieldparams.RootLength]byte // previous block root that was boosted after being received in a timely manner.
previousProposerBoostScore uint64 // previous proposer boosted root score.
finalizedDependentRoot [fieldparams.RootLength]byte // dependent root at finalized checkpoint.
committeeWeight uint64 // tracks the total active validator balance divided by the number of slots per Epoch.
treeRootNode *Node // the root node of the store tree.
headNode *Node // last head Node

View File

@@ -450,7 +450,7 @@ func (p *BeaconDbBlocker) blobsDataFromStoredDataColumns(root [fieldparams.RootL
if count < peerdas.MinimumColumnCountToReconstruct() {
// There is no way to reconstruct the data columns.
return nil, &core.RpcError{
Err: errors.Errorf("the node does not custody enough data columns to reconstruct blobs - please start the beacon node with the `--%s` flag to ensure this call to succeed, or retry later if it is already the case", flags.Supernode.Name),
Err: errors.Errorf("the node does not custody enough data columns to reconstruct blobs - please start the beacon node with the `--%s` flag to ensure this call to succeed", flags.SemiSupernode.Name),
Reason: core.NotFound,
}
}

View File

@@ -157,6 +157,12 @@ var (
Help: "Time for gossiped blob sidecars to arrive",
},
)
dataColumnSidecarArrivalGossipSummary = promauto.NewSummary(
prometheus.SummaryOpts{
Name: "gossip_data_column_sidecar_arrival_milliseconds",
Help: "Time for gossiped data column sidecars to arrive",
},
)
blobSidecarVerificationGossipSummary = promauto.NewSummary(
prometheus.SummaryOpts{
Name: "gossip_blob_sidecar_verification_milliseconds",

View File

@@ -191,6 +191,7 @@ func (s *Service) validateDataColumn(ctx context.Context, pid peer.ID, msg *pubs
sinceSlotStartTime := receivedTime.Sub(startTime)
validationTime := s.cfg.clock.Now().Sub(receivedTime)
dataColumnSidecarArrivalGossipSummary.Observe(float64(sinceSlotStartTime.Milliseconds()))
dataColumnSidecarVerificationGossipHistogram.Observe(float64(validationTime.Milliseconds()))
peerGossipScore := s.cfg.p2p.Peers().Scorers().GossipScorer().Score(pid)

View File

@@ -0,0 +1,3 @@
### Changed
- reduced eth1lookahead value from 8 to 1 since e2e only supports post merge(bellatrix), this will remove the error "Beacon node is not respecting the follow distance. EL client is syncing."

View File

@@ -0,0 +1,3 @@
### Changed
- `blobsDataFromStoredDataColumns`: Ask the use to use the `--supernode` flag and shorten the error mesage.

View File

@@ -0,0 +1,3 @@
### Added
- Track the dependent root of the latest finalized checkpoint in forkchoice.

View File

@@ -0,0 +1,3 @@
### Fixed
- Use head state to validate attestations for old blocks if they are compatible.

View File

@@ -0,0 +1,2 @@
### Added
- prometheus summary `gossip_data_column_sidecar_arrival_milliseconds` to track data column sidecar arrival latency since slot start.

View File

@@ -0,0 +1,2 @@
### Changed
- Introduced flag `--ignore-unviable-attestations` (replaces and deprecates `--disable-last-epoch-targets`) to drop attestations whose target state is not viable; default remains to process them unless explicitly enabled.

View File

@@ -71,7 +71,7 @@ type Flags struct {
DisableResourceManager bool // Disables running the node with libp2p's resource manager.
DisableStakinContractCheck bool // Disables check for deposit contract when proposing blocks
DisableLastEpochTargets bool // Disables processing of states for attestations to old blocks.
IgnoreUnviableAttestations bool // Ignore attestations whose target state is not viable (avoids lagging-node DoS).
EnableVerboseSigVerification bool // EnableVerboseSigVerification specifies whether to verify individual signature if batch verification fails
@@ -281,9 +281,11 @@ func ConfigureBeaconChain(ctx *cli.Context) error {
logEnabled(blacklistRoots)
cfg.BlacklistedRoots = parseBlacklistedRoots(ctx.StringSlice(blacklistRoots.Name))
}
if ctx.IsSet(disableLastEpochTargets.Name) {
logEnabled(disableLastEpochTargets)
cfg.DisableLastEpochTargets = true
cfg.IgnoreUnviableAttestations = false
if ctx.IsSet(ignoreUnviableAttestations.Name) && ctx.Bool(ignoreUnviableAttestations.Name) {
logEnabled(ignoreUnviableAttestations)
cfg.IgnoreUnviableAttestations = true
}
if ctx.IsSet(EnableStateDiff.Name) {

View File

@@ -25,4 +25,6 @@ var upcomingDeprecation = []cli.Flag{
// deprecatedBeaconFlags contains flags that are still used by other components
// and therefore cannot be added to deprecatedFlags
var deprecatedBeaconFlags = []cli.Flag{}
var deprecatedBeaconFlags = []cli.Flag{
deprecatedDisableLastEpochTargets,
}

View File

@@ -201,10 +201,15 @@ var (
Usage: "(Work in progress): Enables the web portal for the validator client.",
Value: false,
}
// disableLastEpochTargets is a flag to disable processing of attestations for old blocks.
disableLastEpochTargets = &cli.BoolFlag{
// deprecatedDisableLastEpochTargets is a flag to disable processing of attestations for old blocks.
deprecatedDisableLastEpochTargets = &cli.BoolFlag{
Name: "disable-last-epoch-targets",
Usage: "Disables processing of last epoch targets.",
Usage: "Deprecated: disables processing of last epoch targets.",
}
// ignoreUnviableAttestations flag to skip attestations whose target state is not viable with respect to head (from lagging nodes).
ignoreUnviableAttestations = &cli.BoolFlag{
Name: "ignore-unviable-attestations",
Usage: "Ignores attestations whose target state is not viable with respect to the current head (avoid expensive state replay from lagging attesters).",
}
)
@@ -251,6 +256,7 @@ var BeaconChainFlags = combinedFlags([]cli.Flag{
disableStakinContractCheck,
SaveFullExecutionPayloads,
enableStartupOptimistic,
ignoreUnviableAttestations,
enableFullSSZDataLogging,
disableVerboseSigVerification,
prepareAllPayloads,
@@ -266,7 +272,6 @@ var BeaconChainFlags = combinedFlags([]cli.Flag{
enableExperimentalAttestationPool,
forceHeadFlag,
blacklistRoots,
disableLastEpochTargets,
}, deprecatedBeaconFlags, deprecatedFlags, upcomingDeprecation)
func combinedFlags(flags ...[]cli.Flag) []cli.Flag {

View File

@@ -64,8 +64,9 @@ SECONDS_PER_ETH1_BLOCK: 2 # Override for e2e tests
MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 1
# [customized] higher frequency of committee turnover and faster time to acceptable voluntary exit
SHARD_COMMITTEE_PERIOD: 4 # Override for e2e tests
# [customized] process deposits more quickly, but insecure
ETH1_FOLLOW_DISTANCE: 8 # Override for e2e tests
# [customized] Post-merge, the follow distance is less critical since finality is guaranteed by the beacon chain.
# Setting to 1 avoids timing issues during e2e startup when the EL has few blocks.
ETH1_FOLLOW_DISTANCE: 1 # Override for e2e tests
# Validator cycle

View File

@@ -17,7 +17,9 @@ const (
func E2ETestConfig() *BeaconChainConfig {
e2eConfig := MinimalSpecConfig()
e2eConfig.DepositContractAddress = "0x4242424242424242424242424242424242424242"
e2eConfig.Eth1FollowDistance = 8
// Post-merge, the follow distance is less critical since finality is guaranteed by the beacon chain.
// Setting to 1 avoids timing issues during e2e startup when the EL has few blocks.
e2eConfig.Eth1FollowDistance = 1
// Misc.
e2eConfig.MinGenesisActiveValidatorCount = 256
@@ -73,7 +75,9 @@ func E2ETestConfig() *BeaconChainConfig {
func E2EMainnetTestConfig() *BeaconChainConfig {
e2eConfig := MainnetConfig()
e2eConfig.DepositContractAddress = "0x4242424242424242424242424242424242424242"
e2eConfig.Eth1FollowDistance = 8
// Post-merge, the follow distance is less critical since finality is guaranteed by the beacon chain.
// Setting to 1 avoids timing issues during e2e startup when the EL has few blocks.
e2eConfig.Eth1FollowDistance = 1
// Misc.
e2eConfig.MinGenesisActiveValidatorCount = 256