Compare commits

...

4 Commits

Author SHA1 Message Date
terence
9dc227213c Add --ignore-unviable-attestations and deprecate --disable-last-epoch-targets (#16094)
This PR introduces flag `--ignore-unviable-attestations` (replaces and
deprecates `--disable-last-epoch-targets`) to drop attestations whose
target state is not viable; default remains to process them unless
explicitly enabled.

(cherry picked from commit 7a70abbd15)
2025-12-05 09:51:04 -06:00
Potuz
a5dbe0f287 Use head state in more cases (#16095)
The head state is guaranteed to have the same shuffling and active
indices if the previous dependent root coincides with the target
checkpoint's in some cases.

(cherry picked from commit a2b84c9320)
2025-12-05 09:50:46 -06:00
Potuz
89391e3102 Dependent root instead of target (#15996)
* Add DepdenentRootForEpoch forkchoice helper

* Use dependent root in helpers to get head

(cherry picked from commit 6735c921f8)
2025-12-05 09:50:38 -06:00
Manu NALEPA
f72de36033 Move the "Not enough connected peers" (for a given subnet) from WARN to DEBUG (#16087)
**What type of PR is this?**
Other

**What does this PR do? Why is it needed?**
Move the "Not enough connected peers" (for a given subnet) from WARN to
DEBUG

**Rationale:**
The
<img width="1839" height="31" alt="image"
src="https://github.com/user-attachments/assets/44dbdc8d-3e37-42ee-967b-75a7a1fbcafb"
/>
log is (potentially) printed every 5 minutes.
Every 5 minutes, the BN checks if, for a given subnet, the actual count
of peers is at least equal to a minimum one.
If not, this kind of log is printed.

When validators are connected and selected to be an aggregator in the
next epoch, the BN needs to subscribe and find new peers in the
corresponding attestation subnet.
If, right after the beacon is subscribed (but before it had time to find
peers), the "5 min ticker" ticks, then this warning log is displayed,
even if the slot for which the validator is selected as an aggregator is
still minutes away.

For this reason, this log is moved from WARN to DEBUG

**Acknowledgements**

- [x] I have read
[CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
- [x] I have included a uniquely named [changelog fragment
file](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md#maintaining-changelogmd).
- [x] I have added a description to this PR with sufficient context for
reviewers to understand this PR.

(cherry picked from commit fa056c2d21)
2025-12-05 09:42:45 -06:00
21 changed files with 308 additions and 38 deletions

View File

@@ -79,6 +79,7 @@ type HeadFetcher interface {
HeadPublicKeyToValidatorIndex(pubKey [fieldparams.BLSPubkeyLength]byte) (primitives.ValidatorIndex, bool)
HeadValidatorIndexToPublicKey(ctx context.Context, index primitives.ValidatorIndex) ([fieldparams.BLSPubkeyLength]byte, error)
ChainHeads() ([][32]byte, []primitives.Slot)
DependentRootForEpoch([32]byte, primitives.Epoch) ([32]byte, error)
TargetRootForEpoch([32]byte, primitives.Epoch) ([32]byte, error)
HeadSyncCommitteeFetcher
HeadDomainFetcher
@@ -470,6 +471,13 @@ func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool,
return !isCanonical, nil
}
// DependentRootForEpoch wraps the corresponding method in forkchoice
func (s *Service) DependentRootForEpoch(root [32]byte, epoch primitives.Epoch) ([32]byte, error) {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.DependentRootForEpoch(root, epoch)
}
// TargetRootForEpoch wraps the corresponding method in forkchoice
func (s *Service) TargetRootForEpoch(root [32]byte, epoch primitives.Epoch) ([32]byte, error) {
s.cfg.ForkChoiceStore.RLock()

View File

@@ -1,7 +1,6 @@
package blockchain
import (
"bytes"
"context"
"fmt"
"strconv"
@@ -23,10 +22,7 @@ import (
// The caller of this function must have a lock on forkchoice.
func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) state.ReadOnlyBeaconState {
headEpoch := slots.ToEpoch(s.HeadSlot())
if c.Epoch < headEpoch {
return nil
}
if !s.cfg.ForkChoiceStore.IsCanonical([32]byte(c.Root)) {
if c.Epoch < headEpoch || c.Epoch == 0 {
return nil
}
// Only use head state if the head state is compatible with the target checkpoint.
@@ -34,11 +30,15 @@ func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) st
if err != nil {
return nil
}
headTarget, err := s.cfg.ForkChoiceStore.TargetRootForEpoch([32]byte(headRoot), c.Epoch)
headDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(headRoot), c.Epoch-1)
if err != nil {
return nil
}
if !bytes.Equal(c.Root, headTarget[:]) {
targetDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(c.Root), c.Epoch-1)
if err != nil {
return nil
}
if targetDependent != headDependent {
return nil
}
@@ -50,7 +50,11 @@ func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) st
}
return st
}
// Otherwise we need to advance the head state to the start of the target epoch.
// At this point we can only have c.Epoch > headEpoch.
if !s.cfg.ForkChoiceStore.IsCanonical([32]byte(c.Root)) {
return nil
}
// Advance the head state to the start of the target epoch.
// This point can only be reached if c.Root == headRoot and c.Epoch > headEpoch.
slot, err := slots.EpochStart(c.Epoch)
if err != nil {

View File

@@ -181,6 +181,123 @@ func TestService_GetRecentPreState(t *testing.T) {
require.NotNil(t, service.getRecentPreState(ctx, &ethpb.Checkpoint{Epoch: 1, Root: ckRoot}))
}
func TestService_GetRecentPreState_Epoch_0(t *testing.T) {
service, _ := minimalTestService(t)
ctx := t.Context()
require.IsNil(t, service.getRecentPreState(ctx, &ethpb.Checkpoint{}))
}
func TestService_GetRecentPreState_Old_Checkpoint(t *testing.T) {
service, _ := minimalTestService(t)
ctx := t.Context()
s, err := util.NewBeaconState()
require.NoError(t, err)
ckRoot := bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)
cp0 := &ethpb.Checkpoint{Epoch: 0, Root: ckRoot}
err = s.SetFinalizedCheckpoint(cp0)
require.NoError(t, err)
st, root, err := prepareForkchoiceState(ctx, 33, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
service.head = &head{
root: [32]byte(ckRoot),
state: s,
slot: 33,
}
require.IsNil(t, service.getRecentPreState(ctx, &ethpb.Checkpoint{}))
}
func TestService_GetRecentPreState_Same_DependentRoots(t *testing.T) {
service, _ := minimalTestService(t)
ctx := t.Context()
s, err := util.NewBeaconState()
require.NoError(t, err)
ckRoot := bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)
cp0 := &ethpb.Checkpoint{Epoch: 0, Root: ckRoot}
// Create a fork 31 <-- 32 <--- 64
// \---------33
// With the same dependent root at epoch 0 for a checkpoint at epoch 2
st, blk, err := prepareForkchoiceState(ctx, 31, [32]byte(ckRoot), [32]byte{}, [32]byte{}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
st, blk, err = prepareForkchoiceState(ctx, 32, [32]byte{'S'}, blk.Root(), [32]byte{}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
st, blk, err = prepareForkchoiceState(ctx, 64, [32]byte{'T'}, blk.Root(), [32]byte{}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
st, blk, err = prepareForkchoiceState(ctx, 33, [32]byte{'U'}, [32]byte(ckRoot), [32]byte{}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
cpRoot := blk.Root()
service.head = &head{
root: [32]byte{'T'},
state: s,
slot: 64,
}
require.NotNil(t, service.getRecentPreState(ctx, &ethpb.Checkpoint{Epoch: 2, Root: cpRoot[:]}))
}
func TestService_GetRecentPreState_Different_DependentRoots(t *testing.T) {
service, _ := minimalTestService(t)
ctx := t.Context()
s, err := util.NewBeaconState()
require.NoError(t, err)
ckRoot := bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)
cp0 := &ethpb.Checkpoint{Epoch: 0, Root: ckRoot}
// Create a fork 30 <-- 31 <-- 32 <--- 64
// \---------33
// With the same dependent root at epoch 0 for a checkpoint at epoch 2
st, blk, err := prepareForkchoiceState(ctx, 30, [32]byte(ckRoot), [32]byte{}, [32]byte{}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
st, blk, err = prepareForkchoiceState(ctx, 31, [32]byte{'S'}, blk.Root(), [32]byte{}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
st, blk, err = prepareForkchoiceState(ctx, 32, [32]byte{'T'}, blk.Root(), [32]byte{}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
st, blk, err = prepareForkchoiceState(ctx, 64, [32]byte{'U'}, blk.Root(), [32]byte{}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
st, blk, err = prepareForkchoiceState(ctx, 33, [32]byte{'V'}, [32]byte(ckRoot), [32]byte{}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
cpRoot := blk.Root()
service.head = &head{
root: [32]byte{'T'},
state: s,
slot: 64,
}
require.IsNil(t, service.getRecentPreState(ctx, &ethpb.Checkpoint{Epoch: 2, Root: cpRoot[:]}))
}
func TestService_GetRecentPreState_Different(t *testing.T) {
service, _ := minimalTestService(t)
ctx := t.Context()
s, err := util.NewBeaconState()
require.NoError(t, err)
ckRoot := bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)
cp0 := &ethpb.Checkpoint{Epoch: 0, Root: ckRoot}
err = s.SetFinalizedCheckpoint(cp0)
require.NoError(t, err)
st, root, err := prepareForkchoiceState(ctx, 33, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
service.head = &head{
root: [32]byte(ckRoot),
state: s,
slot: 33,
}
require.IsNil(t, service.getRecentPreState(ctx, &ethpb.Checkpoint{}))
}
func TestService_GetAttPreState_Concurrency(t *testing.T) {
service, _ := minimalTestService(t)
ctx := t.Context()

View File

@@ -758,6 +758,11 @@ func (c *ChainService) ReceiveDataColumns(dcs []blocks.VerifiedRODataColumn) err
return nil
}
// DependentRootForEpoch mocks the same method in the chain service
func (c *ChainService) DependentRootForEpoch(_ [32]byte, _ primitives.Epoch) ([32]byte, error) {
return c.TargetRoot, nil
}
// TargetRootForEpoch mocks the same method in the chain service
func (c *ChainService) TargetRootForEpoch(_ [32]byte, _ primitives.Epoch) ([32]byte, error) {
return c.TargetRoot, nil

View File

@@ -240,7 +240,7 @@ func (f *ForkChoice) IsViableForCheckpoint(cp *forkchoicetypes.Checkpoint) (bool
if node.slot == epochStart {
return true, nil
}
if !features.Get().DisableLastEpochTargets {
if !features.Get().IgnoreUnviableAttestations {
// Allow any node from the checkpoint epoch - 1 to be viable.
nodeEpoch := slots.ToEpoch(node.slot)
if nodeEpoch+1 == cp.Epoch {
@@ -626,21 +626,26 @@ func (f *ForkChoice) Slot(root [32]byte) (primitives.Slot, error) {
// DependentRoot returns the last root of the epoch prior to the requested ecoch in the canonical chain.
func (f *ForkChoice) DependentRoot(epoch primitives.Epoch) ([32]byte, error) {
tr, err := f.TargetRootForEpoch(f.CachedHeadRoot(), epoch)
return f.DependentRootForEpoch(f.CachedHeadRoot(), epoch)
}
// DependentRootForEpoch return the last root of the epoch prior to the requested ecoch for the given root.
func (f *ForkChoice) DependentRootForEpoch(root [32]byte, epoch primitives.Epoch) ([32]byte, error) {
tr, err := f.TargetRootForEpoch(root, epoch)
if err != nil {
return [32]byte{}, err
}
if tr == [32]byte{} {
return [32]byte{}, nil
}
n, ok := f.store.nodeByRoot[tr]
if !ok || n == nil {
node, ok := f.store.nodeByRoot[tr]
if !ok || node == nil {
return [32]byte{}, ErrNilNode
}
if slots.ToEpoch(n.slot) == epoch && n.parent != nil {
n = n.parent
if slots.ToEpoch(node.slot) >= epoch && node.parent != nil {
node = node.parent
}
return n.root, nil
return node.root, nil
}
// TargetRootForEpoch returns the root of the target block for a given epoch.

View File

@@ -608,6 +608,96 @@ func TestStore_TargetRootForEpoch(t *testing.T) {
require.Equal(t, blk4.Root(), target)
}
func TestStore_DependentRootForEpoch(t *testing.T) {
ctx := t.Context()
f := setup(1, 1)
// Build the following tree structure:
// /------------37
// 0<--31<---32 <---33 <--- 35 <-------- 65 <--- 66
// \-- 36 ------------- 38
// Insert block at slot 31 (epoch 0)
state, blk31, err := prepareForkchoiceState(ctx, 31, [32]byte{31}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blk31))
// Insert block at slot 32 (epoch 1)
state, blk32, err := prepareForkchoiceState(ctx, 32, [32]byte{32}, blk31.Root(), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blk32))
// Insert block at slot 33 (epoch 1)
state, blk33, err := prepareForkchoiceState(ctx, 33, [32]byte{33}, blk32.Root(), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blk33))
// Insert block at slot 35 (epoch 1)
state, blk35, err := prepareForkchoiceState(ctx, 35, [32]byte{35}, blk33.Root(), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blk35))
// Insert fork: block at slot 36 (epoch 1) descending from block 32
state, blk36, err := prepareForkchoiceState(ctx, 36, [32]byte{36}, blk32.Root(), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blk36))
// Insert block at slot 37 (epoch 1) descending from block 33
state, blk37, err := prepareForkchoiceState(ctx, 37, [32]byte{37}, blk33.Root(), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blk37))
// Insert block at slot 38 (epoch 1) descending from block 36
state, blk38, err := prepareForkchoiceState(ctx, 38, [32]byte{38}, blk36.Root(), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blk38))
// Insert block at slot 65 (epoch 2) descending from block 35
state, blk65, err := prepareForkchoiceState(ctx, 65, [32]byte{65}, blk35.Root(), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blk65))
// Insert block at slot 66 (epoch 2) descending from block 65
state, blk66, err := prepareForkchoiceState(ctx, 66, [32]byte{66}, blk65.Root(), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blk66))
// Test dependent root for block 32 at epoch 1 - should be block 31
dependent, err := f.DependentRootForEpoch(blk32.Root(), 1)
require.NoError(t, err)
require.Equal(t, blk31.Root(), dependent)
// Test dependent root for block 32 at epoch 2 - should be block 32
dependent, err = f.DependentRootForEpoch(blk32.Root(), 2)
require.NoError(t, err)
require.Equal(t, blk32.Root(), dependent)
// Test dependent root for block 33 at epoch 1 - should be block 31
dependent, err = f.DependentRootForEpoch(blk33.Root(), 1)
require.NoError(t, err)
require.Equal(t, blk31.Root(), dependent)
// Test dependent root for block 38 at epoch 1 - should be block 31
dependent, err = f.DependentRootForEpoch(blk38.Root(), 1)
require.NoError(t, err)
require.Equal(t, blk31.Root(), dependent)
// Test dependent root for block 36 at epoch 2 - should be block 36
dependent, err = f.DependentRootForEpoch(blk36.Root(), 2)
require.NoError(t, err)
require.Equal(t, blk36.Root(), dependent)
// Test dependent root for block 66 at epoch 1 - should be block 31
dependent, err = f.DependentRootForEpoch(blk66.Root(), 1)
require.NoError(t, err)
require.Equal(t, blk31.Root(), dependent)
// Test dependent root for block 66 at epoch 2 - should be block 35
dependent, err = f.DependentRootForEpoch(blk66.Root(), 2)
require.NoError(t, err)
require.Equal(t, blk35.Root(), dependent)
}
func TestStore_CleanupInserting(t *testing.T) {
f := setup(0, 0)
ctx := t.Context()

View File

@@ -81,6 +81,7 @@ type FastGetter interface {
ShouldOverrideFCU() bool
Slot([32]byte) (primitives.Slot, error)
DependentRoot(primitives.Epoch) ([32]byte, error)
DependentRootForEpoch([32]byte, primitives.Epoch) ([32]byte, error)
TargetRootForEpoch([32]byte, primitives.Epoch) ([32]byte, error)
UnrealizedJustifiedPayloadBlockHash() [32]byte
Weight(root [32]byte) (uint64, error)

View File

@@ -177,6 +177,13 @@ func (ro *ROForkChoice) DependentRoot(epoch primitives.Epoch) ([32]byte, error)
return ro.getter.DependentRoot(epoch)
}
// DependentRootForEpoch delegates to the underlying forkchoice call, under a lock.
func (ro *ROForkChoice) DependentRootForEpoch(root [32]byte, epoch primitives.Epoch) ([32]byte, error) {
ro.l.RLock()
defer ro.l.RUnlock()
return ro.getter.DependentRootForEpoch(root, epoch)
}
// TargetRootForEpoch delegates to the underlying forkchoice call, under a lock.
func (ro *ROForkChoice) TargetRootForEpoch(root [32]byte, epoch primitives.Epoch) ([32]byte, error) {
ro.l.RLock()

View File

@@ -40,6 +40,7 @@ const (
targetRootForEpochCalled
parentRootCalled
dependentRootCalled
dependentRootForEpochCalled
)
func _discard(t *testing.T, e error) {
@@ -305,6 +306,12 @@ func (ro *mockROForkchoice) DependentRoot(_ primitives.Epoch) ([32]byte, error)
return [32]byte{}, nil
}
// DependentRootForEpoch implements FastGetter.
func (ro *mockROForkchoice) DependentRootForEpoch(_ [32]byte, _ primitives.Epoch) ([32]byte, error) {
ro.calls = append(ro.calls, dependentRootForEpochCalled)
return [32]byte{}, nil
}
// TargetRootForEpoch implements FastGetter.
func (ro *mockROForkchoice) TargetRootForEpoch(_ [32]byte, _ primitives.Epoch) ([32]byte, error) {
ro.calls = append(ro.calls, targetRootForEpochCalled)

View File

@@ -638,7 +638,7 @@ func (s *Service) logMinimumPeersPerSubnet(ctx context.Context, p subscribeParam
log.WithFields(logrus.Fields{
"topic": topic,
"actual": count,
}).Warning("Not enough connected peers")
}).Debug("Not enough connected peers")
}
}
if !isSubnetWithMissingPeers {

View File

@@ -337,17 +337,17 @@ func (s *Service) blockVerifyingState(ctx context.Context, blk interfaces.ReadOn
}
return transition.ProcessSlotsUsingNextSlotCache(ctx, headState, headRoot, blockSlot)
}
// If head and block are in the same epoch and head is compatible with the parent's target, then use head
// If head and block are in the same epoch and head is compatible with the parent's dependent root, then use head
if blockEpoch == headEpoch {
headTarget, err := s.cfg.chain.TargetRootForEpoch([32]byte(headRoot), blockEpoch)
headDependent, err := s.cfg.chain.DependentRootForEpoch([32]byte(headRoot), blockEpoch)
if err != nil {
return nil, err
}
parentTarget, err := s.cfg.chain.TargetRootForEpoch([32]byte(parentRoot), blockEpoch)
parentDependent, err := s.cfg.chain.DependentRootForEpoch([32]byte(parentRoot), blockEpoch)
if err != nil {
return nil, err
}
if bytes.Equal(headTarget[:], parentTarget[:]) {
if bytes.Equal(headDependent[:], parentDependent[:]) {
return s.cfg.chain.HeadStateReadOnly(ctx)
}
}

View File

@@ -548,11 +548,12 @@ func TestRequirementSatisfaction(t *testing.T) {
}
type mockForkchoicer struct {
FinalizedCheckpointCB func() *forkchoicetypes.Checkpoint
HasNodeCB func([32]byte) bool
IsCanonicalCB func(root [32]byte) bool
SlotCB func([32]byte) (primitives.Slot, error)
TargetRootForEpochCB func([32]byte, primitives.Epoch) ([32]byte, error)
FinalizedCheckpointCB func() *forkchoicetypes.Checkpoint
HasNodeCB func([32]byte) bool
IsCanonicalCB func(root [32]byte) bool
SlotCB func([32]byte) (primitives.Slot, error)
DependentRootForEpochCB func([32]byte, primitives.Epoch) ([32]byte, error)
TargetRootForEpochCB func([32]byte, primitives.Epoch) ([32]byte, error)
}
var _ Forkchoicer = &mockForkchoicer{}
@@ -573,6 +574,10 @@ func (m *mockForkchoicer) Slot(root [32]byte) (primitives.Slot, error) {
return m.SlotCB(root)
}
func (m *mockForkchoicer) DependentRootForEpoch(root [32]byte, epoch primitives.Epoch) ([32]byte, error) {
return m.DependentRootForEpochCB(root, epoch)
}
func (m *mockForkchoicer) TargetRootForEpoch(root [32]byte, epoch primitives.Epoch) ([32]byte, error) {
return m.TargetRootForEpochCB(root, epoch)
}

View File

@@ -319,17 +319,17 @@ func (dv *RODataColumnsVerifier) getVerifyingState(ctx context.Context, dataColu
return transition.ProcessSlotsUsingNextSlotCache(ctx, headState, headRoot, dataColumnSlot)
}
// If head and data column are in the same epoch and head is compatible with the parent's target, then use head
// If head and data column are in the same epoch and head is compatible with the parent's depdendent root, then use head
if dataColumnEpoch == headEpoch {
headTarget, err := dv.fc.TargetRootForEpoch(bytesutil.ToBytes32(headRoot), dataColumnEpoch)
headDependent, err := dv.fc.DependentRootForEpoch(bytesutil.ToBytes32(headRoot), dataColumnEpoch)
if err != nil {
return nil, err
}
parentTarget, err := dv.fc.TargetRootForEpoch(parentRoot, dataColumnEpoch)
parentDependent, err := dv.fc.DependentRootForEpoch(parentRoot, dataColumnEpoch)
if err != nil {
return nil, err
}
if bytes.Equal(headTarget[:], parentTarget[:]) {
if bytes.Equal(headDependent[:], parentDependent[:]) {
return dv.hsp.HeadStateReadOnly(ctx)
}
}

View File

@@ -25,6 +25,7 @@ type Forkchoicer interface {
HasNode([32]byte) bool
IsCanonical(root [32]byte) bool
Slot([32]byte) (primitives.Slot, error)
DependentRootForEpoch([32]byte, primitives.Epoch) ([32]byte, error)
TargetRootForEpoch([32]byte, primitives.Epoch) ([32]byte, error)
}

View File

@@ -0,0 +1,3 @@
### Changed
- Move the "Not enough connected peers" (for a given subnet) from WARN to DEBUG

View File

@@ -0,0 +1,3 @@
### Changed
- Use dependent root instead of target when possible.

View File

@@ -0,0 +1,3 @@
### Fixed
- Use head state to validate attestations for old blocks if they are compatible.

View File

@@ -0,0 +1,2 @@
### Changed
- Introduced flag `--ignore-unviable-attestations` (replaces and deprecates `--disable-last-epoch-targets`) to drop attestations whose target state is not viable; default remains to process them unless explicitly enabled.

View File

@@ -69,7 +69,7 @@ type Flags struct {
DisableResourceManager bool // Disables running the node with libp2p's resource manager.
DisableStakinContractCheck bool // Disables check for deposit contract when proposing blocks
DisableLastEpochTargets bool // Disables processing of states for attestations to old blocks.
IgnoreUnviableAttestations bool // Ignore attestations whose target state is not viable (avoids lagging-node DoS).
EnableVerboseSigVerification bool // EnableVerboseSigVerification specifies whether to verify individual signature if batch verification fails
@@ -279,9 +279,11 @@ func ConfigureBeaconChain(ctx *cli.Context) error {
logEnabled(blacklistRoots)
cfg.BlacklistedRoots = parseBlacklistedRoots(ctx.StringSlice(blacklistRoots.Name))
}
if ctx.IsSet(disableLastEpochTargets.Name) {
logEnabled(disableLastEpochTargets)
cfg.DisableLastEpochTargets = true
cfg.IgnoreUnviableAttestations = false
if ctx.IsSet(ignoreUnviableAttestations.Name) && ctx.Bool(ignoreUnviableAttestations.Name) {
logEnabled(ignoreUnviableAttestations)
cfg.IgnoreUnviableAttestations = true
}
cfg.AggregateIntervals = [3]time.Duration{aggregateFirstInterval.Value, aggregateSecondInterval.Value, aggregateThirdInterval.Value}

View File

@@ -25,4 +25,6 @@ var upcomingDeprecation = []cli.Flag{
// deprecatedBeaconFlags contains flags that are still used by other components
// and therefore cannot be added to deprecatedFlags
var deprecatedBeaconFlags = []cli.Flag{}
var deprecatedBeaconFlags = []cli.Flag{
deprecatedDisableLastEpochTargets,
}

View File

@@ -197,10 +197,15 @@ var (
Usage: "(Work in progress): Enables the web portal for the validator client.",
Value: false,
}
// disableLastEpochTargets is a flag to disable processing of attestations for old blocks.
disableLastEpochTargets = &cli.BoolFlag{
// deprecatedDisableLastEpochTargets is a flag to disable processing of attestations for old blocks.
deprecatedDisableLastEpochTargets = &cli.BoolFlag{
Name: "disable-last-epoch-targets",
Usage: "Disables processing of last epoch targets.",
Usage: "Deprecated: disables processing of last epoch targets.",
}
// ignoreUnviableAttestations flag to skip attestations whose target state is not viable with respect to head (from lagging nodes).
ignoreUnviableAttestations = &cli.BoolFlag{
Name: "ignore-unviable-attestations",
Usage: "Ignores attestations whose target state is not viable with respect to the current head (avoid expensive state replay from lagging attesters).",
}
)
@@ -247,6 +252,7 @@ var BeaconChainFlags = combinedFlags([]cli.Flag{
disableStakinContractCheck,
SaveFullExecutionPayloads,
enableStartupOptimistic,
ignoreUnviableAttestations,
enableFullSSZDataLogging,
disableVerboseSigVerification,
prepareAllPayloads,
@@ -262,7 +268,6 @@ var BeaconChainFlags = combinedFlags([]cli.Flag{
enableExperimentalAttestationPool,
forceHeadFlag,
blacklistRoots,
disableLastEpochTargets,
}, deprecatedBeaconFlags, deprecatedFlags, upcomingDeprecation)
func combinedFlags(flags ...[]cli.Flag) []cli.Flag {