Compare commits

...

27 Commits

Author SHA1 Message Date
Aditya Asgaonkar
599cb955c4 remove unnecessary method 2024-04-26 10:56:37 -07:00
Aditya Asgaonkar
f6cf7194c8 remove unnecessary type cast 2024-04-26 10:56:37 -07:00
Aditya Asgaonkar
2fe11b479b do not check head viability of safe head 2024-04-26 10:56:37 -07:00
Aditya Asgaonkar
0326d1afd0 shift safe head computation to float64 2024-04-26 10:56:37 -07:00
Aditya Asgaonkar
86685ac536 do not report safe head reorg if oldSafeHeadRoot is 0 2024-04-26 10:56:37 -07:00
Aditya Asgaonkar
4978a0d9e2 fix safe-head-fcu feature flag 2024-04-26 10:56:37 -07:00
Aditya Asgaonkar
8ac914ed82 add flag for safe head in FCU 2024-04-26 10:56:37 -07:00
Aditya Asgaonkar
a1c39d3a7f compute safe head only in first interval of slot 2024-04-26 10:56:37 -07:00
Aditya Asgaonkar
03018936b8 clean up & comments 2024-04-26 10:56:37 -07:00
Aditya Asgaonkar
fa4ef643ae check safe head for all block lower than currentSlot 2024-04-26 10:56:37 -07:00
Aditya Asgaonkar
64230ce27a use currentSlot-1 as safe head latest slot 2024-04-26 10:56:37 -07:00
Aditya Asgaonkar
932b545e3d renaming for consistency 2024-04-26 10:56:37 -07:00
Aditya Asgaonkar
d48ab52451 compute safe head based on voteOnlyWeight 2024-04-26 10:56:37 -07:00
Aditya Asgaonkar
d53c6f08c1 compute safeHeadLatestSlot depending on time into currentSlot 2024-04-26 10:56:37 -07:00
Aditya Asgaonkar
ad4e808e81 update safe head tests 2024-04-26 10:56:37 -07:00
Aditya Asgaonkar
db04052b1d add test for getMaxPossibleSupport 2024-04-26 10:56:37 -07:00
Aditya Asgaonkar
ce00da8f9f better error handling & implement GetSafeHeadPayloadHash 2024-04-26 10:56:37 -07:00
Aditya Asgaonkar
85c5e78d06 fix safe head root & metrics logic 2024-04-26 10:56:37 -07:00
Aditya Asgaonkar
9afbe6c850 bazel run gazelle 2024-04-26 10:56:37 -07:00
Aditya Asgaonkar
232f29ff58 small updates 2024-04-26 10:56:37 -07:00
Aditya Asgaonkar
58d9f2e89a add safeHeadPayloadBlockHash mock 2024-04-26 10:56:37 -07:00
Aditya Asgaonkar
632a7ea3e4 update max possible support computation 2024-04-26 10:56:37 -07:00
Aditya Asgaonkar
a7070331f4 update safe head & metrics 2024-04-26 10:56:37 -07:00
Aditya Asgaonkar
b733d92cd6 update the tracked safe head 2024-04-26 10:56:37 -07:00
Aditya Asgaonkar
cce3b905c5 update FCU calls to use safe head 2024-04-26 10:56:37 -07:00
Aditya Asgaonkar
ea16efe71f basic testcases and minor logic updates 2024-04-26 10:56:37 -07:00
Aditya Asgaonkar
f29894e654 Basic confirmation rule logic 2024-04-26 10:56:37 -07:00
22 changed files with 480 additions and 20 deletions

View File

@@ -31,6 +31,7 @@ type GetForkChoiceDumpResponse struct {
type ForkChoiceDumpExtraData struct {
UnrealizedJustifiedCheckpoint *Checkpoint `json:"unrealized_justified_checkpoint"`
UnrealizedFinalizedCheckpoint *Checkpoint `json:"unrealized_finalized_checkpoint"`
SafeHeadRoot string `json:"safe_head_root"`
ProposerBoostRoot string `json:"proposer_boost_root"`
PreviousProposerBoostRoot string `json:"previous_proposer_boost_root"`
HeadRoot string `json:"head_root"`

View File

@@ -41,6 +41,7 @@ type ForkchoiceFetcher interface {
Ancestor(context.Context, []byte, primitives.Slot) ([]byte, error)
CachedHeadRoot() [32]byte
GetProposerHead() [32]byte
SafeHeadPayloadBlockHash() [32]byte
SetForkChoiceGenesisTime(uint64)
UpdateHead(context.Context, primitives.Slot)
HighestReceivedBlockSlot() primitives.Slot

View File

@@ -93,6 +93,13 @@ func (s *Service) UnrealizedJustifiedPayloadBlockHash() [32]byte {
return s.cfg.ForkChoiceStore.UnrealizedJustifiedPayloadBlockHash()
}
// SafeHeadPayloadBlockHash returns safe head payload block hash from forkchoice.
func (s *Service) SafeHeadPayloadBlockHash() [32]byte {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.SafeHeadPayloadBlockHash()
}
// FinalizedBlockHash returns finalized payload block hash from forkchoice.
func (s *Service) FinalizedBlockHash() [32]byte {
s.cfg.ForkChoiceStore.RLock()

View File

@@ -63,10 +63,15 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*
return nil, nil
}
finalizedHash := s.cfg.ForkChoiceStore.FinalizedPayloadBlockHash()
justifiedHash := s.cfg.ForkChoiceStore.UnrealizedJustifiedPayloadBlockHash()
safeBlockHash := [32]byte{}
if features.Get().SafeHeadFCU {
safeBlockHash = s.cfg.ForkChoiceStore.SafeHeadPayloadBlockHash()
} else {
safeBlockHash = s.cfg.ForkChoiceStore.UnrealizedJustifiedPayloadBlockHash()
}
fcs := &enginev1.ForkchoiceState{
HeadBlockHash: headPayload.BlockHash(),
SafeBlockHash: justifiedHash[:],
SafeBlockHash: safeBlockHash[:],
FinalizedBlockHash: finalizedHash[:],
}
if arg.attributes == nil {

View File

@@ -544,6 +544,11 @@ func (s *ChainService) CachedHeadRoot() [32]byte {
return [32]byte{}
}
// SafeHeadPayloadBlockHash mocks the same method in the chain service
func (s *ChainService) SafeHeadPayloadBlockHash() [32]byte {
return [32]byte{}
}
// GetProposerHead mocks the same method in the chain service
func (s *ChainService) GetProposerHead() [32]byte {
if s.ForkChoiceStore != nil {

View File

@@ -33,6 +33,7 @@ go_library(
"//consensus-types/forkchoice:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//math:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"//time/slots:go_default_library",

View File

@@ -2,6 +2,7 @@ package doublylinkedtree
import (
"context"
goErrors "errors"
"fmt"
"time"
@@ -15,6 +16,7 @@ import (
forkchoice2 "github.com/prysmaticlabs/prysm/v5/consensus-types/forkchoice"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
prysmMath "github.com/prysmaticlabs/prysm/v5/math"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
"github.com/prysmaticlabs/prysm/v5/time/slots"
@@ -30,6 +32,7 @@ func New() *ForkChoice {
unrealizedFinalizedCheckpoint: &forkchoicetypes.Checkpoint{},
prevJustifiedCheckpoint: &forkchoicetypes.Checkpoint{},
finalizedCheckpoint: &forkchoicetypes.Checkpoint{},
safeHeadRoot: [32]byte{},
proposerBoostRoot: [32]byte{},
nodeByRoot: make(map[[fieldparams.RootLength]byte]*Node),
nodeByPayload: make(map[[fieldparams.RootLength]byte]*Node),
@@ -65,17 +68,97 @@ func (f *ForkChoice) Head(
return [32]byte{}, errors.Wrap(err, "could not apply proposer boost score")
}
if err := f.store.treeRootNode.applyWeightChanges(ctx); err != nil {
if err := f.store.treeRootNode.applyWeightChanges(ctx, f.store.proposerBoostRoot, f.store.previousProposerBoostScore); err != nil {
return [32]byte{}, errors.Wrap(err, "could not apply weight changes")
}
jc := f.JustifiedCheckpoint()
fc := f.FinalizedCheckpoint()
currentEpoch := slots.EpochsSinceGenesis(time.Unix(int64(f.store.genesisTime), 0))
if err := f.store.treeRootNode.updateBestDescendant(ctx, jc.Epoch, fc.Epoch, currentEpoch); err != nil {
currentSlot := slots.CurrentSlot(f.store.genesisTime)
secondsSinceSlotStart, err := slots.SecondsSinceSlotStart(currentSlot, f.store.genesisTime, uint64(time.Now().Unix()))
if err != nil {
log.WithError(err).Error("could not compute seconds since slot start")
secondsSinceSlotStart = 0
}
if err := f.store.treeRootNode.updateBestDescendant(ctx, jc.Epoch, fc.Epoch, currentSlot, secondsSinceSlotStart, f.store.committeeWeight); err != nil {
return [32]byte{}, errors.Wrap(err, "could not update best descendant")
}
return f.store.head(ctx)
safeHeadUpdateErr := f.UpdateSafeHead(ctx)
head, err := f.store.head(ctx)
return head, goErrors.Join(err, safeHeadUpdateErr)
}
// UpdateSafeHead updates the safe head in the fork choice store.
func (f *ForkChoice) UpdateSafeHead(
ctx context.Context,
) error {
oldSafeHeadRoot := f.store.safeHeadRoot
newSafeHeadRoot, err := f.store.safeHead(ctx)
if err != nil {
return errors.WithMessage(err, "could not update safe head")
}
// The safe head root has changed.
if oldSafeHeadRoot != newSafeHeadRoot {
newSafeHeadNode, ok := f.store.nodeByRoot[newSafeHeadRoot]
if !ok || newSafeHeadNode == nil {
return ErrNilNode
}
newSafeHeadSlot := newSafeHeadNode.slot
currentSlot := slots.CurrentSlot(f.store.genesisTime)
secondsSinceSlotStart, err := slots.SecondsSinceSlotStart(currentSlot, f.store.genesisTime, uint64(time.Now().Unix()))
if err != nil {
log.WithError(err).Error("could not compute seconds since slot start")
}
log.WithFields(logrus.Fields{
"currentSlot": fmt.Sprintf("%d", currentSlot),
"sinceSlotStartTime": fmt.Sprintf("%d", secondsSinceSlotStart),
"newSafeHeadSlot": fmt.Sprintf("%d", newSafeHeadSlot),
"newSafeHeadRoot": fmt.Sprintf("%#x", newSafeHeadRoot),
"oldSafeHeadRoot": fmt.Sprintf("%#x", oldSafeHeadRoot),
}).Info("Safe head has changed")
// Update metrics.
safeHeadChangesCount.Inc()
safeHeadSlotNumber.Set(float64(newSafeHeadSlot))
// Check if the safe head reorged.
commonRoot, forkSlot, err := f.CommonAncestor(ctx, oldSafeHeadRoot, newSafeHeadRoot)
if err != nil {
log.WithError(err).Error("Could not find common ancestor root")
commonRoot = params.BeaconConfig().ZeroHash
}
if oldSafeHeadRoot != [32]byte{} && commonRoot != oldSafeHeadRoot {
// The safe head has reorged.
// Calculate reorg metrics.
oldSafeHeadNode, ok := f.store.nodeByRoot[oldSafeHeadRoot]
if !ok || oldSafeHeadNode == nil {
return ErrNilNode
}
oldSafeHeadSlot := oldSafeHeadNode.slot
dis := oldSafeHeadSlot + newSafeHeadSlot - 2*forkSlot
dep := prysmMath.Max(uint64(oldSafeHeadSlot-forkSlot), uint64(newSafeHeadSlot-forkSlot))
log.WithFields(logrus.Fields{
"currentSlot": fmt.Sprintf("%d", currentSlot),
"sinceSlotStartTime": fmt.Sprintf("%d", secondsSinceSlotStart),
"newSafeHeadSlot": fmt.Sprintf("%d", newSafeHeadSlot),
"newSafeHeadRoot": fmt.Sprintf("%#x", newSafeHeadRoot),
"oldSafeHeadSlot": fmt.Sprintf("%d", oldSafeHeadSlot),
"oldSafeHeadRoot": fmt.Sprintf("%#x", oldSafeHeadRoot),
"commonAncestorRoot": fmt.Sprintf("%#x", commonRoot),
"distance": dis,
"depth": dep,
}).Debug("Safe head reorg occurred")
// Update reorg metrics.
safeHeadReorgDistance.Observe(float64(dis))
safeHeadReorgDepth.Observe(float64(dep))
safeHeadReorgCount.Inc()
}
// Update safe head
f.store.safeHeadRoot = newSafeHeadRoot
}
return nil
}
// ProcessAttestation processes attestation for vote accounting, it iterates around validator indices
@@ -553,6 +636,17 @@ func (f *ForkChoice) UnrealizedJustifiedPayloadBlockHash() [32]byte {
return node.payloadHash
}
// SafeHeadPayloadBlockHash returns the hash of the payload at the safe head
func (f *ForkChoice) SafeHeadPayloadBlockHash() [32]byte {
safeHeadRoot := f.store.safeHeadRoot
node, ok := f.store.nodeByRoot[safeHeadRoot]
if !ok || node == nil {
// This should not happen
return [32]byte{}
}
return node.payloadHash
}
// ForkChoiceDump returns a full dump of forkchoice.
func (f *ForkChoice) ForkChoiceDump(ctx context.Context) (*forkchoice2.Dump, error) {
jc := &ethpb.Checkpoint{
@@ -586,6 +680,7 @@ func (f *ForkChoice) ForkChoiceDump(ctx context.Context) (*forkchoice2.Dump, err
resp := &forkchoice2.Dump{
JustifiedCheckpoint: jc,
UnrealizedJustifiedCheckpoint: ujc,
SafeHeadRoot: f.store.safeHeadRoot[:],
FinalizedCheckpoint: fc,
UnrealizedFinalizedCheckpoint: ufc,
ProposerBoostRoot: f.store.proposerBoostRoot[:],

View File

@@ -19,6 +19,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/testing/assert"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/prysmaticlabs/prysm/v5/testing/util"
"github.com/prysmaticlabs/prysm/v5/time/slots"
)
// prepareForkchoiceState prepares a beacon State with the given data to mock
@@ -206,11 +207,11 @@ func TestForkChoice_IsCanonicalReorg(t *testing.T) {
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
f.store.nodeByRoot[[32]byte{'3'}].balance = 10
require.NoError(t, f.store.treeRootNode.applyWeightChanges(ctx))
require.NoError(t, f.store.treeRootNode.applyWeightChanges(ctx, params.BeaconConfig().ZeroHash, 0))
require.Equal(t, uint64(10), f.store.nodeByRoot[[32]byte{'1'}].weight)
require.Equal(t, uint64(0), f.store.nodeByRoot[[32]byte{'2'}].weight)
require.NoError(t, f.store.treeRootNode.updateBestDescendant(ctx, 1, 1, 1))
require.NoError(t, f.store.treeRootNode.updateBestDescendant(ctx, 1, 1, 6, 0, f.store.committeeWeight))
require.DeepEqual(t, [32]byte{'3'}, f.store.treeRootNode.bestDescendant.root)
r1 := [32]byte{'1'}
@@ -861,3 +862,80 @@ func TestForkChoiceSlot(t *testing.T) {
require.NoError(t, err)
require.Equal(t, primitives.Slot(3), slot)
}
func TestForkChoiceSafeHead(t *testing.T) {
f := setup(0, 0)
ctx := context.Background()
balances := []uint64{32, 32, 32, 32, 32, 32, 32, 32, 32, 32}
f.balancesByRoot = func(context.Context, [32]byte) ([]uint64, error) {
return balances, nil
}
require.NoError(t, f.updateJustifiedBalances(context.Background(), [32]byte{}))
require.Equal(t, uint64(len(balances)), f.numActiveValidators)
require.Equal(t, uint64(10), f.store.committeeWeight)
require.DeepEqual(t, balances, f.justifiedBalances)
proposerScoreBoost := params.BeaconConfig().ProposerScoreBoost
require.Equal(t, uint64(40), proposerScoreBoost)
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
require.Equal(t, primitives.Slot(32), slotsPerEpoch)
driftGenesisTime(f, primitives.Slot(11), 0)
st, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
for i := 2; i < 10; i++ {
st, blkRoot, err = prepareForkchoiceState(ctx, primitives.Slot(i), indexToHash(uint64(i)), indexToHash(uint64(i-1)), params.BeaconConfig().ZeroHash, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
}
tests := []struct {
name string
currentSlot primitives.Slot
nodeBalances []uint64
wantRoot [32]byte
}{
{
name: "safeHead is head-1",
currentSlot: primitives.Slot(11),
nodeBalances: []uint64{10, 10, 10, 10, 10, 10, 10, 10, 10, 14},
wantRoot: indexToHash(9),
},
{
name: "safeHead is head-2",
currentSlot: primitives.Slot(11),
nodeBalances: []uint64{10, 10, 10, 10, 10, 10, 10, 10, 10, 10},
wantRoot: indexToHash(8),
},
{
name: "safeHead is head-3",
currentSlot: primitives.Slot(11),
nodeBalances: []uint64{10, 10, 10, 10, 10, 10, 10, 10, 10, 0},
wantRoot: indexToHash(6),
},
{
name: "safeHead is justified",
currentSlot: primitives.Slot(11),
nodeBalances: []uint64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
wantRoot: params.BeaconConfig().ZeroHash,
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
driftGenesisTime(f, tc.currentSlot, 0)
require.Equal(t, tc.currentSlot, slots.CurrentSlot(f.store.genesisTime))
s := f.store
s.nodeByRoot[params.BeaconConfig().ZeroHash].balance = tc.nodeBalances[0]
for i := 1; i < 10; i++ {
s.nodeByRoot[indexToHash(uint64(i))].balance = tc.nodeBalances[i]
}
_, err = f.Head(ctx)
require.NoError(t, err)
safeHead := f.store.safeHeadRoot
require.Equal(t, tc.wantRoot, safeHead)
})
}
}

View File

@@ -51,4 +51,34 @@ var (
Help: "The number of times pruning happened.",
},
)
safeHeadSlotNumber = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "doublylinkedtree_safe_head_slot",
Help: "The slot number of the current safe head.",
},
)
safeHeadChangesCount = promauto.NewCounter(
prometheus.CounterOpts{
Name: "doublylinkedtree_safe_head_changed_count",
Help: "The number of times safe head changes.",
},
)
safeHeadReorgCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "safe_head_reorgs_total",
Help: "Count the number of safe head reorgs",
})
safeHeadReorgDistance = promauto.NewHistogram(
prometheus.HistogramOpts{
Name: "safe_head_reorg_distance",
Help: "Captures distance of safe head reorgs. Distance is defined as the number of blocks between the old safe head and the new safe head",
Buckets: []float64{1, 2, 4, 8, 16, 32, 64},
},
)
safeHeadReorgDepth = promauto.NewHistogram(
prometheus.HistogramOpts{
Name: "safe_head_reorg_depth",
Help: "Captures depth of safe head reorgs. Depth is defined as the number of blocks between the safe heads and the common ancestor",
Buckets: []float64{1, 2, 4, 8, 16, 32},
},
)
)

View File

@@ -3,6 +3,7 @@ package doublylinkedtree
import (
"bytes"
"context"
"fmt"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/config/params"
@@ -17,33 +18,91 @@ const ProcessAttestationsThreshold = 10
// applyWeightChanges recomputes the weight of the node passed as an argument and all of its descendants,
// using the current balance stored in each node.
func (n *Node) applyWeightChanges(ctx context.Context) error {
func (n *Node) applyWeightChanges(ctx context.Context, proposerBoostRoot [32]byte, proposerBootScore uint64) error {
// Recursively calling the children to sum their weights.
childrenWeight := uint64(0)
childrenVoteOnlyWeight := uint64(0)
for _, child := range n.children {
if ctx.Err() != nil {
return ctx.Err()
}
if err := child.applyWeightChanges(ctx); err != nil {
if err := child.applyWeightChanges(ctx, proposerBoostRoot, proposerBootScore); err != nil {
return err
}
childrenWeight += child.weight
childrenVoteOnlyWeight += child.voteOnlyWeight
}
if n.root == params.BeaconConfig().ZeroHash {
return nil
}
n.weight = n.balance + childrenWeight
n.voteOnlyWeight = n.balance + childrenVoteOnlyWeight
if n.root == proposerBoostRoot {
if n.balance < proposerBootScore {
return errors.New(fmt.Sprintf("invalid node weight %d is lesser than proposer boost score %d for root %#x", n.balance, proposerBoostRoot, n.root))
}
n.voteOnlyWeight -= proposerBootScore
}
return nil
}
// getMaxPossibleSupport computes the maximum possible voting weight for this node
func (n *Node) getMaxPossibleSupport(currentSlot primitives.Slot, committeeWeight uint64) float64 {
startSlot := n.slot
if n.parent != nil {
startSlot = n.parent.slot + 1
}
if startSlot > currentSlot {
return 0
}
startEpoch := slots.ToEpoch(startSlot)
currentEpoch := slots.ToEpoch(currentSlot)
slotsPerEpoch := uint64(params.BeaconConfig().SlotsPerEpoch)
// If the span of slots does not cover an epoch boundary, simply return the number of slots times committee weight.
if startEpoch == currentEpoch {
return float64(committeeWeight * uint64(currentSlot-startSlot+1))
}
// If the entire validator set is covered between startSlot and currentSlot,
// return the 32 * committeeWeight
if currentEpoch > startEpoch+1 ||
(currentEpoch == startEpoch+1 && uint64(startSlot)%slotsPerEpoch == 0) {
return float64(committeeWeight * slotsPerEpoch)
}
// The span of slots goes across an epoch boundary, but does not cover any full epoch.
// Do a pro-rata calculation of how many committees are contained.
slotsInStartEpoch := slotsPerEpoch - (uint64(startSlot) % slotsPerEpoch)
slotsInCurrentEpoch := (uint64(currentSlot) % slotsPerEpoch) + 1
slotsRemainingInCurrentEpoch := slotsPerEpoch - slotsInCurrentEpoch
weightFromCurrentEpoch := float64(committeeWeight * slotsInCurrentEpoch)
weightFromStartEpoch := float64(committeeWeight*slotsInStartEpoch*slotsRemainingInCurrentEpoch) / float64(slotsPerEpoch)
return weightFromCurrentEpoch + weightFromStartEpoch
}
// isOneConfirmed computes whether this node individually satisfies the LMD safety rule.
func (n *Node) isOneConfirmed(currentSlot primitives.Slot, committeeWeight uint64) bool {
if n.slot >= currentSlot {
return false
}
proposerBoostWeight := float64(committeeWeight*params.BeaconConfig().ProposerScoreBoost) / 100
maxPossibleSupport := n.getMaxPossibleSupport(currentSlot, committeeWeight)
safeThreshold := (maxPossibleSupport + proposerBoostWeight) / 2
return float64(n.voteOnlyWeight) > safeThreshold
}
// updateBestDescendant updates the best descendant of this node and its
// children.
func (n *Node) updateBestDescendant(ctx context.Context, justifiedEpoch, finalizedEpoch, currentEpoch primitives.Epoch) error {
func (n *Node) updateBestDescendant(ctx context.Context,
justifiedEpoch primitives.Epoch, finalizedEpoch primitives.Epoch,
currentSlot primitives.Slot, secondsSinceSlotStart uint64, committeeWeight uint64) error {
if ctx.Err() != nil {
return ctx.Err()
}
if len(n.children) == 0 {
n.bestDescendant = nil
n.bestConfirmedDescendant = nil
return nil
}
@@ -54,9 +113,12 @@ func (n *Node) updateBestDescendant(ctx context.Context, justifiedEpoch, finaliz
if child == nil {
return errors.Wrap(ErrNilNode, "could not update best descendant")
}
if err := child.updateBestDescendant(ctx, justifiedEpoch, finalizedEpoch, currentEpoch); err != nil {
if err := child.updateBestDescendant(ctx,
justifiedEpoch, finalizedEpoch,
currentSlot, secondsSinceSlotStart, committeeWeight); err != nil {
return err
}
currentEpoch := slots.ToEpoch(currentSlot)
childLeadsToViableHead := child.leadsToViableHead(justifiedEpoch, currentEpoch)
if childLeadsToViableHead && !hasViableDescendant {
// The child leads to a viable head, but the current
@@ -78,13 +140,35 @@ func (n *Node) updateBestDescendant(ctx context.Context, justifiedEpoch, finaliz
}
}
if hasViableDescendant {
// This node has a viable descendant.
if bestChild.bestDescendant == nil {
// The best descendant is the best child.
n.bestDescendant = bestChild
} else {
// The best descendant is more than 1 hop away.
n.bestDescendant = bestChild.bestDescendant
}
// Compute safe head only if we are in the first interval of the slot.
// This prevents current epoch attestations from affecting the node's weight for safe head computation.
if secondsSinceSlotStart < params.BeaconConfig().SecondsPerSlot/params.BeaconConfig().IntervalsPerSlot {
// Attestations from the current slot are not accounted for in the fork choice - so compute safe head on the basis of the previous slot
if bestChild.isOneConfirmed(currentSlot-1, committeeWeight) {
// The best child is confirmed.
if bestChild.bestConfirmedDescendant == nil {
// The best child does not have confirmed descendants.
n.bestConfirmedDescendant = bestChild
} else {
// The best child has confirmed descendants.
n.bestConfirmedDescendant = bestChild.bestConfirmedDescendant
}
} else {
// The best child is not confirmed. There is no confirmed descendant.
n.bestConfirmedDescendant = nil
}
}
} else {
n.bestDescendant = nil
n.bestConfirmedDescendant = nil
}
return nil
}

View File

@@ -9,6 +9,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/testing/assert"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/prysmaticlabs/prysm/v5/time/slots"
)
func TestNode_ApplyWeightChanges_PositiveChange(t *testing.T) {
@@ -31,7 +32,7 @@ func TestNode_ApplyWeightChanges_PositiveChange(t *testing.T) {
s.nodeByRoot[indexToHash(2)].balance = 100
s.nodeByRoot[indexToHash(3)].balance = 100
assert.NoError(t, s.treeRootNode.applyWeightChanges(ctx))
assert.NoError(t, s.treeRootNode.applyWeightChanges(ctx, params.BeaconConfig().ZeroHash, 0))
assert.Equal(t, uint64(300), s.nodeByRoot[indexToHash(1)].weight)
assert.Equal(t, uint64(200), s.nodeByRoot[indexToHash(2)].weight)
@@ -61,7 +62,7 @@ func TestNode_ApplyWeightChanges_NegativeChange(t *testing.T) {
s.nodeByRoot[indexToHash(2)].balance = 100
s.nodeByRoot[indexToHash(3)].balance = 100
assert.NoError(t, s.treeRootNode.applyWeightChanges(ctx))
assert.NoError(t, s.treeRootNode.applyWeightChanges(ctx, params.BeaconConfig().ZeroHash, 0))
assert.Equal(t, uint64(300), s.nodeByRoot[indexToHash(1)].weight)
assert.Equal(t, uint64(200), s.nodeByRoot[indexToHash(2)].weight)
@@ -110,7 +111,7 @@ func TestNode_UpdateBestDescendant_HigherWeightChild(t *testing.T) {
s := f.store
s.nodeByRoot[indexToHash(1)].weight = 100
s.nodeByRoot[indexToHash(2)].weight = 200
assert.NoError(t, s.treeRootNode.updateBestDescendant(ctx, 1, 1, 1))
assert.NoError(t, s.treeRootNode.updateBestDescendant(ctx, 1, 1, 2, 0, f.store.committeeWeight))
assert.Equal(t, 2, len(s.treeRootNode.children))
assert.Equal(t, s.treeRootNode.children[1], s.treeRootNode.bestDescendant)
@@ -130,7 +131,7 @@ func TestNode_UpdateBestDescendant_LowerWeightChild(t *testing.T) {
s := f.store
s.nodeByRoot[indexToHash(1)].weight = 200
s.nodeByRoot[indexToHash(2)].weight = 100
assert.NoError(t, s.treeRootNode.updateBestDescendant(ctx, 1, 1, 1))
assert.NoError(t, s.treeRootNode.updateBestDescendant(ctx, 1, 1, 2, 0, f.store.committeeWeight))
assert.Equal(t, 2, len(s.treeRootNode.children))
assert.Equal(t, s.treeRootNode.children[0], s.treeRootNode.bestDescendant)
@@ -327,3 +328,76 @@ func TestNode_TimeStampsChecks(t *testing.T) {
require.ErrorContains(t, "invalid timestamp", err)
require.Equal(t, false, late)
}
func TestNode_MaxPossibleSupport(t *testing.T) {
f := setup(0, 0)
ctx := context.Background()
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
require.Equal(t, primitives.Slot(32), slotsPerEpoch)
state, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
for i := 2; i < 64; i++ {
state, blkRoot, err = prepareForkchoiceState(ctx, primitives.Slot(i), indexToHash(uint64(i)), indexToHash(uint64(i-1)), params.BeaconConfig().ZeroHash, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
}
tests := []struct {
name string
nodeIndex uint64
currentSlot primitives.Slot
committeeWeight uint64
wantValue float64
}{
{
name: "2 slots in same epoch",
nodeIndex: 32,
currentSlot: 33,
committeeWeight: 1,
wantValue: 2,
},
{
name: "31 slots in same epoch",
nodeIndex: 32,
currentSlot: 63,
committeeWeight: 1,
wantValue: 32,
},
{
name: "one full epoch",
nodeIndex: 32,
currentSlot: 64,
committeeWeight: 100,
wantValue: 3200,
},
{
name: "more than one full epoch",
nodeIndex: 32,
currentSlot: 100,
committeeWeight: 100,
wantValue: 3200,
},
{
name: "epoch mid to epoch mid",
nodeIndex: 16,
currentSlot: 48,
committeeWeight: 100,
wantValue: 2450,
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
driftGenesisTime(f, tc.currentSlot, 0)
require.Equal(t, tc.currentSlot, slots.CurrentSlot(f.store.genesisTime))
s := f.store
node := s.nodeByRoot[indexToHash(tc.nodeIndex)]
mps := node.getMaxPossibleSupport(tc.currentSlot, tc.committeeWeight)
require.Equal(t, tc.wantValue, mps)
})
}
}

View File

@@ -60,6 +60,39 @@ func (s *Store) head(ctx context.Context) ([32]byte, error) {
return bestDescendant.root, nil
}
// safeHead starts from justified root and then follows the best descendant links
// to find the best safe head block.
func (s *Store) safeHead(ctx context.Context) ([32]byte, error) {
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.safeHead")
defer span.End()
if err := ctx.Err(); err != nil {
return [32]byte{}, err
}
// JustifiedRoot has to be known
justifiedNode, ok := s.nodeByRoot[s.justifiedCheckpoint.Root]
if !ok || justifiedNode == nil {
// If the justifiedCheckpoint is from genesis, then the root is
// zeroHash. In this case it should be the root of forkchoice
// tree.
if s.justifiedCheckpoint.Epoch == params.BeaconConfig().GenesisEpoch {
justifiedNode = s.treeRootNode
} else {
return [32]byte{}, errors.WithMessage(errUnknownJustifiedRoot, fmt.Sprintf("%#x", s.justifiedCheckpoint.Root))
}
}
// If the justified node doesn't have a best confirmed descendant,
// the best node is itself.
bestConfirmedDescendant := justifiedNode.bestConfirmedDescendant
if bestConfirmedDescendant == nil {
bestConfirmedDescendant = justifiedNode
}
return bestConfirmedDescendant.root, nil
}
// insert registers a new block node to the fork choice store's node list.
// It then updates the new node's parent with the best child and descendant node.
func (s *Store) insert(ctx context.Context,
@@ -127,7 +160,12 @@ func (s *Store) insert(ctx context.Context,
// Update best descendants
jEpoch := s.justifiedCheckpoint.Epoch
fEpoch := s.finalizedCheckpoint.Epoch
if err := s.treeRootNode.updateBestDescendant(ctx, jEpoch, fEpoch, slots.ToEpoch(currentSlot)); err != nil {
secondsSinceSlotStart, err := slots.SecondsSinceSlotStart(currentSlot, s.genesisTime, uint64(time.Now().Unix()))
if err != nil {
log.WithError(err).Error("could not compute seconds since slot start")
secondsSinceSlotStart = 0
}
if err := s.treeRootNode.updateBestDescendant(ctx, jEpoch, fEpoch, currentSlot, secondsSinceSlotStart, s.committeeWeight); err != nil {
return n, err
}
}

View File

@@ -27,6 +27,7 @@ type Store struct {
unrealizedFinalizedCheckpoint *forkchoicetypes.Checkpoint // best unrealized finalized checkpoint in store.
prevJustifiedCheckpoint *forkchoicetypes.Checkpoint // previous justified checkpoint in store.
finalizedCheckpoint *forkchoicetypes.Checkpoint // latest finalized epoch in store.
safeHeadRoot [fieldparams.RootLength]byte // safe head root in store.
proposerBoostRoot [fieldparams.RootLength]byte // latest block root that was boosted after being received in a timely manner.
previousProposerBoostRoot [fieldparams.RootLength]byte // previous block root that was boosted after being received in a timely manner.
previousProposerBoostScore uint64 // previous proposer boosted root score.
@@ -58,9 +59,11 @@ type Node struct {
unrealizedFinalizedEpoch primitives.Epoch // the epoch that would be finalized if the block would be advanced to the next epoch.
balance uint64 // the balance that voted for this node directly
weight uint64 // weight of this node: the total balance including children
voteOnlyWeight uint64 // weight of this node from attestations only - no proposer boost score
bestDescendant *Node // bestDescendant node of this node.
optimistic bool // whether the block has been fully validated or not
timestamp uint64 // The timestamp when the node was inserted.
bestConfirmedDescendant *Node // bestConfirmedDescendant node of this node.
}
// Vote defines an individual validator's vote.

View File

@@ -79,6 +79,7 @@ type FastGetter interface {
Slot([32]byte) (primitives.Slot, error)
TargetRootForEpoch([32]byte, primitives.Epoch) ([32]byte, error)
UnrealizedJustifiedPayloadBlockHash() [32]byte
SafeHeadPayloadBlockHash() [32]byte
Weight(root [32]byte) (uint64, error)
}

View File

@@ -100,6 +100,13 @@ func (ro *ROForkChoice) UnrealizedJustifiedPayloadBlockHash() [32]byte {
return ro.getter.UnrealizedJustifiedPayloadBlockHash()
}
// SafeHeadPayloadBlockHash delegates to the underlying forkchoice call, under a lock.
func (ro *ROForkChoice) SafeHeadPayloadBlockHash() [32]byte {
ro.l.RLock()
defer ro.l.RUnlock()
return ro.getter.SafeHeadPayloadBlockHash()
}
// NodeCount delegates to the underlying forkchoice call, under a lock.
func (ro *ROForkChoice) NodeCount() int {
ro.l.RLock()

View File

@@ -27,6 +27,7 @@ const (
previousJustifiedCheckpointCalled
justifiedPayloadBlockHashCalled
unrealizedJustifiedPayloadBlockHashCalled
safeHeadPayloadBlockHashCalled
nodeCountCalled
highestReceivedBlockSlotCalled
highestReceivedBlockDelayCalled
@@ -104,6 +105,11 @@ func TestROLocking(t *testing.T) {
call: unrealizedJustifiedPayloadBlockHashCalled,
cb: func(g FastGetter) { g.UnrealizedJustifiedPayloadBlockHash() },
},
{
name: "safeHeadPayloadBlockHashCalled",
call: safeHeadPayloadBlockHashCalled,
cb: func(g FastGetter) { g.SafeHeadPayloadBlockHash() },
},
{
name: "nodeCountCalled",
call: nodeCountCalled,
@@ -241,6 +247,11 @@ func (ro *mockROForkchoice) UnrealizedJustifiedPayloadBlockHash() [32]byte {
return [32]byte{}
}
func (ro *mockROForkchoice) SafeHeadPayloadBlockHash() [32]byte {
ro.calls = append(ro.calls, safeHeadPayloadBlockHashCalled)
return [32]byte{}
}
func (ro *mockROForkchoice) NodeCount() int {
ro.calls = append(ro.calls, nodeCountCalled)
return 0

View File

@@ -189,6 +189,7 @@ func (s *Server) GetForkChoice(w http.ResponseWriter, r *http.Request) {
ExtraData: &structs.ForkChoiceDumpExtraData{
UnrealizedJustifiedCheckpoint: structs.CheckpointFromConsensus(dump.UnrealizedJustifiedCheckpoint),
UnrealizedFinalizedCheckpoint: structs.CheckpointFromConsensus(dump.UnrealizedFinalizedCheckpoint),
SafeHeadRoot: hexutil.Encode(dump.SafeHeadRoot),
ProposerBoostRoot: hexutil.Encode(dump.ProposerBoostRoot),
PreviousProposerBoostRoot: hexutil.Encode(dump.PreviousProposerBoostRoot),
HeadRoot: hexutil.Encode(dump.HeadRoot),

View File

@@ -13,6 +13,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/config/features"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
consensusblocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
@@ -108,16 +109,20 @@ func (vs *Server) getLocalPayload(ctx context.Context, blk interfaces.ReadOnlyBe
}
finalizedBlockHash := [32]byte{}
justifiedBlockHash := [32]byte{}
safeBlockHash := [32]byte{}
// Blocks before Bellatrix don't have execution payloads. Use zeros as the hash.
if st.Version() >= version.Bellatrix {
finalizedBlockHash = vs.FinalizationFetcher.FinalizedBlockHash()
justifiedBlockHash = vs.FinalizationFetcher.UnrealizedJustifiedPayloadBlockHash()
if features.Get().SafeHeadFCU {
safeBlockHash = vs.ForkchoiceFetcher.SafeHeadPayloadBlockHash()
} else {
safeBlockHash = vs.FinalizationFetcher.UnrealizedJustifiedPayloadBlockHash()
}
}
f := &enginev1.ForkchoiceState{
HeadBlockHash: parentHash,
SafeBlockHash: justifiedBlockHash[:],
SafeBlockHash: safeBlockHash[:],
FinalizedBlockHash: finalizedBlockHash[:],
}

View File

@@ -150,6 +150,7 @@ func TestServer_getExecutionPayload(t *testing.T) {
ExecutionEngineCaller: &powtesting.EngineClient{PayloadIDBytes: tt.payloadID, ErrForkchoiceUpdated: tt.forkchoiceErr, ExecutionPayload: &pb.ExecutionPayload{}, BuilderOverride: tt.override},
HeadFetcher: &chainMock.ChainService{State: tt.st},
FinalizationFetcher: &chainMock.ChainService{},
ForkchoiceFetcher: &chainMock.ChainService{},
BeaconDB: beaconDB,
PayloadIDCache: cache.NewPayloadIDCache(),
TrackedValidatorsCache: cache.NewTrackedValidatorsCache(),
@@ -248,6 +249,7 @@ func TestServer_getExecutionPayload_UnexpectedFeeRecipient(t *testing.T) {
},
HeadFetcher: &chainMock.ChainService{State: transitionSt},
FinalizationFetcher: &chainMock.ChainService{},
ForkchoiceFetcher: &chainMock.ChainService{},
BeaconDB: beaconDB,
PayloadIDCache: cache.NewPayloadIDCache(),
TrackedValidatorsCache: cache.NewTrackedValidatorsCache(),

View File

@@ -43,6 +43,7 @@ type Flags struct {
EnablePeerScorer bool // EnablePeerScorer enables experimental peer scoring in p2p.
EnableLightClient bool // EnableLightClient enables light client APIs.
EnableQUIC bool // EnableQUIC specifies whether to enable QUIC transport for libp2p.
SafeHeadFCU bool // SafeHeadFCU enables passing safe head into FCU instead of justified block
WriteWalletPasswordOnWebOnboarding bool // WriteWalletPasswordOnWebOnboarding writes the password to disk after Prysm web signup.
EnableDoppelGanger bool // EnableDoppelGanger enables doppelganger protection on startup for the validator.
EnableHistoricalSpaceRepresentation bool // EnableHistoricalSpaceRepresentation enables the saving of registry validators in separate buckets to save space
@@ -195,6 +196,10 @@ func ConfigureBeaconChain(ctx *cli.Context) error {
cfg.DisableGRPCConnectionLogs = true
}
if ctx.Bool(safeHeadFCU.Name) {
log.WithField(safeHeadFCU.Name, safeHeadFCU.Usage).Warn(enabledFeatureFlag)
cfg.SafeHeadFCU = true
}
cfg.EnablePeerScorer = true
if ctx.Bool(disablePeerScorer.Name) {
logDisabled(disablePeerScorer)

View File

@@ -48,6 +48,10 @@ var (
Name: "disable-grpc-connection-logging",
Usage: "Disables displaying logs for newly connected grpc clients.",
}
safeHeadFCU = &cli.BoolFlag{
Name: "safe-head-fcu",
Usage: "Enables safe head block instead of justified block in FCU updates.",
}
disablePeerScorer = &cli.BoolFlag{
Name: "disable-peer-scorer",
Usage: "(Danger): Disables P2P peer scorer. Do NOT use this in production!",
@@ -205,6 +209,7 @@ var BeaconChainFlags = append(deprecatedBeaconFlags, append(deprecatedFlags, []c
HoleskyTestnet,
SepoliaTestnet,
Mainnet,
safeHeadFCU,
disablePeerScorer,
disableBroadcastSlashingFlag,
enableSlasherFlag,

View File

@@ -31,6 +31,7 @@ type Dump struct {
FinalizedCheckpoint *eth.Checkpoint
UnrealizedJustifiedCheckpoint *eth.Checkpoint
UnrealizedFinalizedCheckpoint *eth.Checkpoint
SafeHeadRoot []byte
ProposerBoostRoot []byte
PreviousProposerBoostRoot []byte
HeadRoot []byte