Compare commits

...

4 Commits

Author SHA1 Message Date
terence tsao
89053cdb70 Only update safe head if the highest block slot is current slot 2025-07-21 14:28:46 -07:00
terence tsao
5a95b44107 Better flags 2025-07-19 21:23:57 -07:00
terence tsao
ebe9ec6014 Potuz's first round feedback 2025-07-19 20:32:50 -07:00
Aditya Asgaonkar
3ce627a296 Implement fast confirmation 2025-07-19 20:32:08 -07:00
27 changed files with 898 additions and 18 deletions

View File

@@ -31,6 +31,7 @@ type GetForkChoiceDumpResponse struct {
type ForkChoiceDumpExtraData struct {
UnrealizedJustifiedCheckpoint *Checkpoint `json:"unrealized_justified_checkpoint"`
UnrealizedFinalizedCheckpoint *Checkpoint `json:"unrealized_finalized_checkpoint"`
SafeHeadRoot string `json:"safe_head_root"`
ProposerBoostRoot string `json:"proposer_boost_root"`
PreviousProposerBoostRoot string `json:"previous_proposer_boost_root"`
HeadRoot string `json:"head_root"`

View File

@@ -42,6 +42,7 @@ type ForkchoiceFetcher interface {
CachedHeadRoot() [32]byte
GetProposerHead() [32]byte
SetForkChoiceGenesisTime(time.Time)
SafeBlockHash() [32]byte
UpdateHead(context.Context, primitives.Slot)
HighestReceivedBlockSlot() primitives.Slot
ReceivedBlocksLastEpoch() (uint64, error)

View File

@@ -91,6 +91,12 @@ func (s *Service) UnrealizedJustifiedPayloadBlockHash() [32]byte {
return s.cfg.ForkChoiceStore.UnrealizedJustifiedPayloadBlockHash()
}
func (s *Service) SafeBlockHash() [32]byte {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.SafeBlockHash()
}
// FinalizedBlockHash returns finalized payload block hash from forkchoice.
func (s *Service) FinalizedBlockHash() [32]byte {
s.cfg.ForkChoiceStore.RLock()

View File

@@ -63,10 +63,10 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*
return nil, nil
}
finalizedHash := s.cfg.ForkChoiceStore.FinalizedPayloadBlockHash()
justifiedHash := s.cfg.ForkChoiceStore.UnrealizedJustifiedPayloadBlockHash()
safeBlockHash := s.cfg.ForkChoiceStore.SafeBlockHash()
fcs := &enginev1.ForkchoiceState{
HeadBlockHash: headPayload.BlockHash(),
SafeBlockHash: justifiedHash[:],
SafeBlockHash: safeBlockHash[:],
FinalizedBlockHash: finalizedHash[:],
}
if len(fcs.HeadBlockHash) != 32 || [32]byte(fcs.HeadBlockHash) == [32]byte{} {

View File

@@ -632,6 +632,11 @@ func (s *ChainService) CachedHeadRoot() [32]byte {
return [32]byte{}
}
// SafeBlockHash mocks the same method in the chain service
func (s *ChainService) SafeBlockHash() [32]byte {
return [32]byte{}
}
// GetProposerHead mocks the same method in the chain service
func (s *ChainService) GetProposerHead() [32]byte {
if s.ForkChoiceStore != nil {

View File

@@ -29,6 +29,7 @@ func New() *ForkChoice {
unrealizedFinalizedCheckpoint: &forkchoicetypes.Checkpoint{},
prevJustifiedCheckpoint: &forkchoicetypes.Checkpoint{},
finalizedCheckpoint: &forkchoicetypes.Checkpoint{},
safeHeadRoot: [32]byte{},
proposerBoostRoot: [32]byte{},
nodeByRoot: make(map[[fieldparams.RootLength]byte]*Node),
nodeByPayload: make(map[[fieldparams.RootLength]byte]*Node),
@@ -70,11 +71,115 @@ func (f *ForkChoice) Head(
jc := f.JustifiedCheckpoint()
fc := f.FinalizedCheckpoint()
currentEpoch := slots.EpochsSinceGenesis(f.store.genesisTime)
if err := f.store.treeRootNode.updateBestDescendant(ctx, jc.Epoch, fc.Epoch, currentEpoch); err != nil {
return [32]byte{}, errors.Wrap(err, "could not update best descendant")
currentSlot := slots.CurrentSlot(f.store.genesisTime)
secondsSinceSlotStart, err := slots.SinceSlotStart(currentSlot, f.store.genesisTime, time.Now())
if err != nil {
log.WithError(err).Error("Could not compute seconds since slot start")
}
if err := f.store.treeRootNode.updateBestDescendant(ctx, &updateDescendantArgs{
justifiedEpoch: jc.Epoch,
finalizedEpoch: fc.Epoch,
currentSlot: currentSlot,
secondsSinceSlotStart: secondsSinceSlotStart,
committeeWeight: f.store.committeeWeight,
pbRoot: f.store.proposerBoostRoot,
pbValue: f.store.previousProposerBoostScore,
}); err != nil {
return [32]byte{}, errors.Wrap(err, "Could not update best descendant")
}
h, err := f.store.head(ctx)
if err != nil {
return [32]byte{}, errors.Wrap(err, "Could not get head")
}
// Return early if the head is not the highest received node before updating the safe head.
if f.store.highestReceivedNode.slot != slots.CurrentSlot(f.store.genesisTime) {
return h, nil
}
if err := f.updateSafeHead(ctx); err != nil {
log.WithError(err).Error("Could not update safe head")
}
return h, nil
}
// updateSafeHead updates the safe head in the fork choice store.
func (f *ForkChoice) updateSafeHead(
ctx context.Context,
) error {
oldSafeHeadRoot := f.store.safeHeadRoot
newSafeHeadRoot, err := f.store.safeHead(ctx)
if err != nil {
return errors.Wrap(err, "Could not get safe head")
}
// If the safe head has not changed, return early.
if oldSafeHeadRoot == newSafeHeadRoot {
return nil
}
// Update safe head
f.store.safeHeadRoot = newSafeHeadRoot
f.logSafeHead(ctx, newSafeHeadRoot, oldSafeHeadRoot)
return nil
}
func (f *ForkChoice) logSafeHead(ctx context.Context, newSafeHeadRoot [32]byte, oldSafeHeadRoot [32]byte) {
newSafeHeadNode, ok := f.store.nodeByRoot[newSafeHeadRoot]
if !ok || newSafeHeadNode == nil {
log.WithError(ErrNilNode).Error("Could not find new safe head node")
return
}
newSafeHeadSlot := newSafeHeadNode.slot
currentSlot := slots.CurrentSlot(f.store.genesisTime)
secondsSinceSlotStart, err := slots.SinceSlotStart(currentSlot, f.store.genesisTime, time.Now())
if err != nil {
log.WithError(err).Error("Could not compute seconds since slot start")
}
log.WithFields(logrus.Fields{
"currentSlot": fmt.Sprintf("%d", currentSlot),
"sinceSlotStartTime": fmt.Sprintf("%d", secondsSinceSlotStart.Milliseconds()),
"newSafeHeadSlot": fmt.Sprintf("%d", newSafeHeadSlot),
"newSafeHeadRoot": fmt.Sprintf("%#x", newSafeHeadRoot),
"weight": fmt.Sprintf("%d", newSafeHeadNode.weight),
}).Info("Safe head has changed")
// Update metrics.
safeHeadSlotNumber.Set(float64(newSafeHeadSlot))
// Check if the safe head reorged.
commonRoot, forkSlot, err := f.CommonAncestor(ctx, oldSafeHeadRoot, newSafeHeadRoot)
if err != nil {
log.WithError(err).Error("Could not find common ancestor root")
return
}
// The safe head has reorged. This is bad!
if oldSafeHeadRoot != [32]byte{} && commonRoot != oldSafeHeadRoot {
oldSafeHeadNode, ok := f.store.nodeByRoot[oldSafeHeadRoot]
if !ok || oldSafeHeadNode == nil {
log.WithError(ErrNilNode).Error("Could not find old safe head node")
return
}
oldSafeHeadSlot := oldSafeHeadNode.slot
dis := oldSafeHeadSlot + newSafeHeadSlot - 2*forkSlot
dep := max(uint64(oldSafeHeadSlot-forkSlot), uint64(newSafeHeadSlot-forkSlot))
log.WithFields(logrus.Fields{
"oldSafeHeadSlot": fmt.Sprintf("%d", oldSafeHeadSlot),
"oldSafeHeadRoot": fmt.Sprintf("%#x", oldSafeHeadRoot),
"commonAncestorRoot": fmt.Sprintf("%#x", commonRoot),
"distance": dis,
"depth": dep,
}).Error("Safe head reorg occurred")
safeHeadReorgDistance.Observe(float64(dis))
safeHeadReorgDepth.Observe(float64(dep))
safeHeadReorgCount.Inc()
}
return f.store.head(ctx)
}
// ProcessAttestation processes attestation for vote accounting, it iterates around validator indices
@@ -537,6 +642,23 @@ func (f *ForkChoice) UnrealizedJustifiedPayloadBlockHash() [32]byte {
return node.payloadHash
}
// SafeBlockHash returns the hash of the payload at the safe head
func (f *ForkChoice) SafeBlockHash() [32]byte {
switch params.BeaconConfig().SafeBlockAlgorithm {
case "justified":
return f.JustifiedPayloadBlockHash()
case "fast-confirmation":
safeHeadRoot := f.store.safeHeadRoot
node, ok := f.store.nodeByRoot[safeHeadRoot]
if !ok || node == nil {
return [32]byte{}
}
return node.payloadHash
default:
return f.UnrealizedJustifiedPayloadBlockHash()
}
}
// ForkChoiceDump returns a full dump of forkchoice.
func (f *ForkChoice) ForkChoiceDump(ctx context.Context) (*forkchoice2.Dump, error) {
jc := &ethpb.Checkpoint{
@@ -570,6 +692,7 @@ func (f *ForkChoice) ForkChoiceDump(ctx context.Context) (*forkchoice2.Dump, err
resp := &forkchoice2.Dump{
JustifiedCheckpoint: jc,
UnrealizedJustifiedCheckpoint: ujc,
SafeHeadRoot: f.store.safeHeadRoot[:],
FinalizedCheckpoint: fc,
UnrealizedFinalizedCheckpoint: ufc,
ProposerBoostRoot: f.store.proposerBoostRoot[:],

View File

@@ -19,6 +19,7 @@ import (
"github.com/OffchainLabs/prysm/v6/testing/assert"
"github.com/OffchainLabs/prysm/v6/testing/require"
"github.com/OffchainLabs/prysm/v6/testing/util"
"github.com/OffchainLabs/prysm/v6/time/slots"
)
// prepareForkchoiceState prepares a beacon State with the given data to mock
@@ -229,7 +230,13 @@ func TestForkChoice_IsCanonicalReorg(t *testing.T) {
require.Equal(t, uint64(10), f.store.nodeByRoot[[32]byte{'1'}].weight)
require.Equal(t, uint64(0), f.store.nodeByRoot[[32]byte{'2'}].weight)
require.NoError(t, f.store.treeRootNode.updateBestDescendant(ctx, 1, 1, 1))
require.NoError(t, f.store.treeRootNode.updateBestDescendant(ctx, &updateDescendantArgs{
justifiedEpoch: 1,
finalizedEpoch: 1,
currentSlot: 6,
secondsSinceSlotStart: 0,
committeeWeight: f.store.committeeWeight,
}))
require.DeepEqual(t, [32]byte{'3'}, f.store.treeRootNode.bestDescendant.root)
r1 := [32]byte{'1'}
@@ -923,3 +930,85 @@ func TestForkChoice_CleanupInserting(t *testing.T) {
require.NotNil(t, f.InsertNode(ctx, st, roblock))
require.Equal(t, false, f.HasNode(roblock.Root()))
}
func TestForkChoiceSafeHead(t *testing.T) {
f := setup(0, 0)
ctx := context.Background()
balances := []uint64{32, 32, 32, 32, 32, 32, 32, 32, 32, 32}
f.balancesByRoot = func(context.Context, [32]byte) ([]uint64, error) {
return balances, nil
}
require.NoError(t, f.updateJustifiedBalances(context.Background(), [32]byte{}))
require.Equal(t, uint64(len(balances)), f.numActiveValidators)
require.Equal(t, uint64(10), f.store.committeeWeight)
require.DeepEqual(t, balances, f.justifiedBalances)
proposerScoreBoost := params.BeaconConfig().ProposerScoreBoost
require.Equal(t, uint64(40), proposerScoreBoost)
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
require.Equal(t, primitives.Slot(32), slotsPerEpoch)
driftGenesisTime(f, primitives.Slot(11), 0)
st, b, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, b))
for i := 2; i < 10; i++ {
st, b, err = prepareForkchoiceState(ctx, primitives.Slot(i), indexToHash(uint64(i)), indexToHash(uint64(i-1)), params.BeaconConfig().ZeroHash, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, b))
}
// Add a node at slot 11 to ensure highest received node is at current slot
st, b, err = prepareForkchoiceState(ctx, primitives.Slot(11), indexToHash(uint64(11)), indexToHash(uint64(9)), params.BeaconConfig().ZeroHash, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, b))
tests := []struct {
name string
currentSlot primitives.Slot
nodeBalances []uint64
wantRoot [32]byte
}{
{
name: "safeHead is head-1",
currentSlot: primitives.Slot(11),
nodeBalances: []uint64{10, 10, 10, 10, 10, 10, 10, 10, 10, 14, 10},
wantRoot: indexToHash(9),
},
{
name: "safeHead is head-2",
currentSlot: primitives.Slot(11),
nodeBalances: []uint64{10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10},
wantRoot: indexToHash(9),
},
{
name: "safeHead is head-3",
currentSlot: primitives.Slot(11),
nodeBalances: []uint64{10, 10, 10, 10, 10, 10, 10, 10, 10, 0, 10},
wantRoot: indexToHash(7),
},
{
name: "safeHead is justified",
currentSlot: primitives.Slot(11),
nodeBalances: []uint64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
wantRoot: params.BeaconConfig().ZeroHash,
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
driftGenesisTime(f, tc.currentSlot, 0)
require.Equal(t, tc.currentSlot, slots.CurrentSlot(f.store.genesisTime))
s := f.store
s.nodeByRoot[params.BeaconConfig().ZeroHash].balance = tc.nodeBalances[0]
for i := 1; i < 10; i++ {
s.nodeByRoot[indexToHash(uint64(i))].balance = tc.nodeBalances[i]
}
s.nodeByRoot[indexToHash(uint64(11))].balance = tc.nodeBalances[10]
_, err = f.Head(ctx)
require.NoError(t, err)
safeHead := f.store.safeHeadRoot
require.Equal(t, tc.wantRoot, safeHead)
})
}
}

View File

@@ -51,4 +51,28 @@ var (
Help: "The number of times pruning happened.",
},
)
safeHeadSlotNumber = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "safe_head_slot",
Help: "The slot number of the current safe head.",
},
)
safeHeadReorgCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "safe_head_reorgs_total",
Help: "Count the number of safe head reorgs",
})
safeHeadReorgDistance = promauto.NewHistogram(
prometheus.HistogramOpts{
Name: "safe_head_reorg_distance",
Help: "Captures distance of safe head reorgs. Distance is defined as the number of blocks between the old safe head and the new safe head",
Buckets: []float64{1, 2, 4, 8, 16, 32, 64},
},
)
safeHeadReorgDepth = promauto.NewHistogram(
prometheus.HistogramOpts{
Name: "safe_head_reorg_depth",
Help: "Captures depth of safe head reorgs. Depth is defined as the number of blocks between the safe heads and the common ancestor",
Buckets: []float64{1, 2, 4, 8, 16, 32},
},
)
)

View File

@@ -10,14 +10,27 @@ import (
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/time/slots"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// ProcessAttestationsThreshold is the amount of time after which we
// process attestations for the current slot
const ProcessAttestationsThreshold = 10 * time.Second
// applyWeightChanges recomputes the weight of the node passed as an argument and all of its descendants,
// using the current balance stored in each node.
type updateDescendantArgs struct {
justifiedEpoch primitives.Epoch
finalizedEpoch primitives.Epoch
currentSlot primitives.Slot
secondsSinceSlotStart time.Duration
committeeWeight uint64
pbRoot [32]byte
pbValue uint64
}
// applyWeightChanges recursively traverses a tree of nodes to update each node's total weight and
// weight without proposer boost by summing the balance of the node and its children.
// If the node matches a specific root (`pbRoot`), it subtracts a given boost value (`pbValue`) from the weight without boost,
// ensuring the balance is sufficient. It also handles context cancellation and errors during recursion.
func (n *Node) applyWeightChanges(ctx context.Context) error {
// Recursively calling the children to sum their weights.
childrenWeight := uint64(0)
@@ -37,14 +50,51 @@ func (n *Node) applyWeightChanges(ctx context.Context) error {
return nil
}
// maxWeight computes the maximum possible voting weight for this node.
// This function computes the maximum weight a node can contribute from its start slot to the end slot,
// scaled by committee weight. If the range is within one epoch, it returns the number of slots times the committee weight.
// If the range spans at least one full epoch or starts at an epoch boundary and ends in the next epoch, it returns the full epoch weight.
// Otherwise, it prorates the weight based on the number of slots in the start and end epochs, accounting for partial epoch coverage.
func (n *Node) maxWeight(endSlot primitives.Slot, committeeWeight uint64) uint64 {
startSlot := n.slot
if n.parent != nil {
startSlot = n.parent.slot + 1
}
if startSlot > endSlot {
return 0
}
startEpoch := slots.ToEpoch(startSlot)
endEpoch := slots.ToEpoch(endSlot)
slotsPerEpoch := uint64(params.BeaconConfig().SlotsPerEpoch)
slotSpan := uint64(endSlot - startSlot + 1)
if startEpoch == endEpoch {
return committeeWeight * slotSpan
}
if endEpoch > startEpoch+1 || (endEpoch == startEpoch+1 && uint64(startSlot)%slotsPerEpoch == 0) {
return committeeWeight * slotsPerEpoch
}
slotsInStartEpoch := slotsPerEpoch - (uint64(startSlot) % slotsPerEpoch)
slotsInEndEpoch := (uint64(endSlot) % slotsPerEpoch) + 1
weightEnd := committeeWeight * slotsInEndEpoch
weightStart := (committeeWeight * slotsInStartEpoch * (slotsPerEpoch - slotsInEndEpoch)) / slotsPerEpoch
return weightEnd + weightStart
}
// updateBestDescendant updates the best descendant of this node and its
// children.
func (n *Node) updateBestDescendant(ctx context.Context, justifiedEpoch, finalizedEpoch, currentEpoch primitives.Epoch) error {
func (n *Node) updateBestDescendant(ctx context.Context, args *updateDescendantArgs) error {
if ctx.Err() != nil {
return ctx.Err()
}
if len(n.children) == 0 {
n.bestDescendant = nil
n.bestConfirmedDescendant = nil
return nil
}
@@ -55,10 +105,11 @@ func (n *Node) updateBestDescendant(ctx context.Context, justifiedEpoch, finaliz
if child == nil {
return errors.Wrap(ErrNilNode, "could not update best descendant")
}
if err := child.updateBestDescendant(ctx, justifiedEpoch, finalizedEpoch, currentEpoch); err != nil {
if err := child.updateBestDescendant(ctx, args); err != nil {
return err
}
childLeadsToViableHead := child.leadsToViableHead(justifiedEpoch, currentEpoch)
currentEpoch := slots.ToEpoch(args.currentSlot)
childLeadsToViableHead := child.leadsToViableHead(args.justifiedEpoch, currentEpoch)
if childLeadsToViableHead && !hasViableDescendant {
// The child leads to a viable head, but the current
// parent's best child doesn't.
@@ -79,13 +130,33 @@ func (n *Node) updateBestDescendant(ctx context.Context, justifiedEpoch, finaliz
}
}
if hasViableDescendant {
// This node has a viable descendant.
if bestChild.bestDescendant == nil {
// The best descendant is the best child.
n.bestDescendant = bestChild
} else {
// The best descendant is more than 1 hop away.
n.bestDescendant = bestChild.bestDescendant
}
if uint64(args.secondsSinceSlotStart.Seconds()) < params.BeaconConfig().SecondsPerSlot/params.BeaconConfig().IntervalsPerSlot {
prevSlot := primitives.Slot(0)
if args.currentSlot > 1 {
prevSlot = args.currentSlot - 1
}
if bestChild.confirmed(prevSlot, args.committeeWeight, args.pbRoot, args.pbValue) {
n.bestConfirmedDescendant = bestChild.bestConfirmedDescendant
if n.bestConfirmedDescendant == nil {
n.bestConfirmedDescendant = bestChild
}
} else {
n.bestConfirmedDescendant = nil
}
}
} else {
n.bestDescendant = nil
n.bestConfirmedDescendant = nil
}
return nil
}
@@ -192,3 +263,38 @@ func (n *Node) nodeTreeDump(ctx context.Context, nodes []*forkchoice2.Node) ([]*
}
return nodes, nil
}
// confirmed returns true if the node satisfies the confirmation rule.
func (n *Node) confirmed(slot primitives.Slot, committeeWeight uint64, pbRoot [32]byte, pbValue uint64) bool {
if n.slot > slot {
return false
}
pbWeight := committeeWeight * params.BeaconConfig().ProposerScoreBoost / 100
maxWeight := n.maxWeight(slot, committeeWeight)
byzantineWeight := maxWeight * params.BeaconConfig().FastConfirmationByzantineThreshold / 100
threshold := (maxWeight+pbWeight)/2 + byzantineWeight
nodeWeight := n.weight
var pbWeightSubtracted bool
if n.root == pbRoot || (n.bestDescendant != nil && n.bestDescendant.root == pbRoot) {
if nodeWeight < pbValue {
return false
}
nodeWeight -= pbValue
pbWeightSubtracted = true
}
log.WithFields(logrus.Fields{
"slot": slot,
"nodeSlot": n.slot,
"committeeWeight": committeeWeight,
"maxWeight": maxWeight,
"nodeWeight": nodeWeight,
"threshold": threshold,
"pbWeightSubtracted": pbWeightSubtracted,
}).Info("Checking confirmation")
return nodeWeight > threshold
}

View File

@@ -1,6 +1,7 @@
package doublylinkedtree
import (
"context"
"testing"
"time"
@@ -110,12 +111,60 @@ func TestNode_UpdateBestDescendant_HigherWeightChild(t *testing.T) {
s := f.store
s.nodeByRoot[indexToHash(1)].weight = 100
s.nodeByRoot[indexToHash(2)].weight = 200
assert.NoError(t, s.treeRootNode.updateBestDescendant(ctx, 1, 1, 1))
assert.NoError(t, s.treeRootNode.updateBestDescendant(ctx, &updateDescendantArgs{
justifiedEpoch: 1,
finalizedEpoch: 1,
currentSlot: 2,
secondsSinceSlotStart: 0,
committeeWeight: f.store.committeeWeight,
}))
assert.Equal(t, 2, len(s.treeRootNode.children))
assert.Equal(t, s.treeRootNode.children[1], s.treeRootNode.bestDescendant)
}
func TestNode_UpdateBestDescendant_BestConfirmedDescendant(t *testing.T) {
ctx := context.Background()
f := setup(1, 1)
// Insert first child node
state1, blk1, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state1, blk1))
// Insert second child node
state2, blk2, err := prepareForkchoiceState(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state2, blk2))
s := f.store
// Set weightWithoutBoost manually to control confirmation logic
node1 := s.nodeByRoot[indexToHash(1)]
node2 := s.nodeByRoot[indexToHash(2)]
node1.weight = 100
node2.weight = 200
// Execute update
assert.NoError(t, s.treeRootNode.updateBestDescendant(ctx, &updateDescendantArgs{
justifiedEpoch: 1,
finalizedEpoch: 1,
currentSlot: 3,
secondsSinceSlotStart: 0,
committeeWeight: f.store.committeeWeight,
}))
require.NoError(t, err)
// Assert the correct bestConfirmedDescendant is selected
assert.NotNil(t, s.treeRootNode.bestConfirmedDescendant, "expected bestConfirmedDescendant to be set")
assert.Equal(t, node2, s.treeRootNode.bestConfirmedDescendant, "expected node2 to be the bestConfirmedDescendant")
// Additional: verify that the best descendant logic is consistent
assert.Equal(t, node2, s.treeRootNode.bestDescendant, "expected node2 to be the bestDescendant")
}
func TestNode_UpdateBestDescendant_LowerWeightChild(t *testing.T) {
f := setup(1, 1)
ctx := t.Context()
@@ -130,7 +179,13 @@ func TestNode_UpdateBestDescendant_LowerWeightChild(t *testing.T) {
s := f.store
s.nodeByRoot[indexToHash(1)].weight = 200
s.nodeByRoot[indexToHash(2)].weight = 100
assert.NoError(t, s.treeRootNode.updateBestDescendant(ctx, 1, 1, 1))
assert.NoError(t, s.treeRootNode.updateBestDescendant(ctx, &updateDescendantArgs{
justifiedEpoch: 1,
finalizedEpoch: 1,
currentSlot: 2,
secondsSinceSlotStart: 0,
committeeWeight: f.store.committeeWeight,
}))
assert.Equal(t, 2, len(s.treeRootNode.children))
assert.Equal(t, s.treeRootNode.children[0], s.treeRootNode.bestDescendant)
@@ -327,3 +382,238 @@ func TestNode_TimeStampsChecks(t *testing.T) {
require.ErrorContains(t, "invalid timestamp", err)
require.Equal(t, false, late)
}
func TestNode_maxWeight(t *testing.T) {
type fields struct {
slot primitives.Slot
parent *Node
}
type args struct {
endSlot primitives.Slot
committeeWeight uint64
}
tests := []struct {
name string
fields fields
args args
want uint64
}{
{
name: "startSlot > endSlot, should return 0",
fields: fields{
parent: &Node{slot: 9},
},
args: args{
endSlot: 9,
},
want: 0,
},
{
name: "startEpoch == currentEpoch",
fields: fields{
parent: &Node{slot: 4},
},
args: args{
endSlot: 7,
committeeWeight: 10,
},
want: 30, // (7 - 5 + 1) = 30
},
{
name: "currentEpoch > startEpoch + 1",
fields: fields{
slot: 0,
},
args: args{
endSlot: 32,
committeeWeight: 10,
},
want: 320, // slotsPerEpoch * committeeWeight
},
{
name: "currentEpoch == startEpoch+1 && startSlot % slotsPerEpoch == 0",
fields: fields{
parent: &Node{
slot: 31,
},
},
args: args{
endSlot: 64,
committeeWeight: 5,
},
want: 160, // slotsPerEpoch * committeeWeight
},
{
name: "partial overlap between epochs",
fields: fields{
slot: 30,
},
args: args{
endSlot: 33,
committeeWeight: 4,
},
want: func() uint64 {
startSlot := uint64(30)
currentSlot := uint64(33)
slotsPerEpoch := uint64(params.BeaconConfig().SlotsPerEpoch)
slotsInStartEpoch := slotsPerEpoch - (startSlot % slotsPerEpoch) // 32 - 30 = 2
slotsInCurrentEpoch := (currentSlot % slotsPerEpoch) + 1 // 33 % 32 + 1 = 2
weightStart := (4 * slotsInStartEpoch * (slotsPerEpoch - slotsInCurrentEpoch)) / slotsPerEpoch // 4 * 2 * 30 / 32 = 7 (int division)
weightCurrent := 4 * slotsInCurrentEpoch // 4 * 2 = 8
return weightStart + weightCurrent // 7 + 8 = 15
}(),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
n := &Node{
slot: tt.fields.slot,
parent: tt.fields.parent,
}
if got := n.maxWeight(tt.args.endSlot, tt.args.committeeWeight); got != tt.want {
t.Errorf("maxWeight() = %v, want %v", got, tt.want)
}
})
}
}
func TestNode_confirmed(t *testing.T) {
cfg := params.BeaconConfig()
cfg.FastConfirmationByzantineThreshold = 33
undo, err := params.SetActiveWithUndo(cfg)
require.NoError(t, err)
defer func() {
require.NoError(t, undo())
}()
type fields struct {
nodeSlot primitives.Slot
weight uint64
root [32]byte
bestDescendant *Node
}
type args struct {
slot primitives.Slot
committeeWeight uint64
pbRoot [32]byte
pbValue uint64
}
tests := []struct {
name string
fields fields
args args
want bool
}{
{
name: "node slot > slot returns false",
fields: fields{
nodeSlot: 10,
},
args: args{
slot: 9,
},
want: false,
},
{
name: "weight without boost <= threshold returns false",
fields: fields{
weight: 186, // 200 committee weight, 40 pb weight, 66 byzantine weight
bestDescendant: &Node{},
},
args: args{
slot: 1,
committeeWeight: 100,
},
want: false,
},
{
name: "weight without boost > threshold returns true",
fields: fields{
weight: 187, // 200 committee weight, 40 pb weight, 66 byzantine weight
bestDescendant: &Node{},
},
args: args{
slot: 1,
committeeWeight: 100,
},
want: true,
},
{
name: "node root matches pbRoot but balance < pbValue returns false",
fields: fields{
weight: 187, bestDescendant: &Node{
root: [32]byte{1},
},
},
args: args{
slot: 1,
committeeWeight: 100,
pbRoot: [32]byte{1},
pbValue: 100000000,
},
want: false,
},
{
name: "node root matches pbRoot, balance >= pbValue, adjusted weight <= threshold returns false",
fields: fields{
weight: 187,
bestDescendant: &Node{
root: [32]byte{1},
},
},
args: args{
slot: 1,
committeeWeight: 100,
pbRoot: [32]byte{1},
pbValue: 1,
},
want: false,
},
{
name: "node root matches pbRoot (self), balance >= pbValue, adjusted weight <= threshold returns false",
fields: fields{
weight: 187,
root: [32]byte{1},
},
args: args{
slot: 1,
committeeWeight: 100,
pbRoot: [32]byte{1},
pbValue: 1,
},
want: false,
},
{
name: "node root matches pbRoot, balance >= pbValue, adjusted weight > threshold returns true",
fields: fields{
weight: 188,
bestDescendant: &Node{
root: [32]byte{1},
},
},
args: args{
slot: 1,
committeeWeight: 100,
pbRoot: [32]byte{1},
pbValue: 1,
},
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
n := &Node{
slot: tt.fields.nodeSlot,
weight: tt.fields.weight,
root: tt.fields.root,
bestDescendant: tt.fields.bestDescendant,
}
if got := n.confirmed(tt.args.slot, tt.args.committeeWeight, tt.args.pbRoot, tt.args.pbValue); got != tt.want {
t.Errorf("confirmed() = %v, want %v", got, tt.want)
}
})
}
}

View File

@@ -62,6 +62,39 @@ func (s *Store) head(ctx context.Context) ([32]byte, error) {
return bestDescendant.root, nil
}
// safeHead starts from justified root and then follows the best descendant links
// to find the best safe head block.
func (s *Store) safeHead(ctx context.Context) ([32]byte, error) {
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.safeHead")
defer span.End()
if err := ctx.Err(); err != nil {
return [32]byte{}, err
}
// JustifiedRoot has to be known
justifiedNode, ok := s.nodeByRoot[s.justifiedCheckpoint.Root]
if !ok || justifiedNode == nil {
// If the justifiedCheckpoint is from genesis, then the root is
// zeroHash. In this case it should be the root of forkchoice
// tree.
if s.justifiedCheckpoint.Epoch == params.BeaconConfig().GenesisEpoch {
justifiedNode = s.treeRootNode
} else {
return [32]byte{}, errors.WithMessage(errUnknownJustifiedRoot, fmt.Sprintf("%#x", s.justifiedCheckpoint.Root))
}
}
// If the justified node doesn't have a best confirmed descendant,
// the best node is itself.
bestConfirmedDescendant := justifiedNode.bestConfirmedDescendant
if bestConfirmedDescendant == nil {
bestConfirmedDescendant = justifiedNode
}
return bestConfirmedDescendant.root, nil
}
// insert registers a new block node to the fork choice store's node list.
// It then updates the new node's parent with the best child and descendant node.
func (s *Store) insert(ctx context.Context,
@@ -146,7 +179,18 @@ func (s *Store) insert(ctx context.Context,
// Update best descendants
jEpoch := s.justifiedCheckpoint.Epoch
fEpoch := s.finalizedCheckpoint.Epoch
if err := s.treeRootNode.updateBestDescendant(ctx, jEpoch, fEpoch, slots.ToEpoch(currentSlot)); err != nil {
secondsSinceSlotStart, err := slots.SinceSlotStart(currentSlot, s.genesisTime, time.Now())
if err != nil {
log.WithError(err).Error("could not compute seconds since slot start")
secondsSinceSlotStart = 0
}
if err := s.treeRootNode.updateBestDescendant(ctx, &updateDescendantArgs{
justifiedEpoch: jEpoch,
finalizedEpoch: fEpoch,
currentSlot: currentSlot,
secondsSinceSlotStart: secondsSinceSlotStart,
committeeWeight: s.committeeWeight,
}); err != nil {
_, remErr := s.removeNode(ctx, n)
if remErr != nil {
log.WithError(remErr).Error("could not remove node")

View File

@@ -630,3 +630,110 @@ func TestStore_HighestReceivedBlockDelay(t *testing.T) {
require.Equal(t, primitives.Slot(12), f.HighestReceivedBlockDelay())
}
func TestStore_safeHead(t *testing.T) {
genesisEpoch := params.BeaconConfig().GenesisEpoch
root1 := [32]byte{0x01}
root2 := [32]byte{0x02}
root3 := [32]byte{0x03}
tests := []struct {
name string
setupStore func() *Store
wantRoot [32]byte
expectErr bool
expectedErr error
}{
{
name: "context cancelled returns error",
setupStore: func() *Store {
return &Store{}
},
wantRoot: [32]byte{},
expectErr: true,
},
{
name: "justified root missing and not genesis returns error",
setupStore: func() *Store {
return &Store{
justifiedCheckpoint: &forkchoicetypes.Checkpoint{
Epoch: 1,
Root: root1,
},
nodeByRoot: make(map[[32]byte]*Node),
}
},
wantRoot: [32]byte{},
expectErr: true,
},
{
name: "justified is genesis, uses tree root",
setupStore: func() *Store {
return &Store{
justifiedCheckpoint: &forkchoicetypes.Checkpoint{
Epoch: genesisEpoch,
Root: [32]byte{}, // zero hash
},
nodeByRoot: map[[32]byte]*Node{},
treeRootNode: &Node{root: root2},
}
},
wantRoot: root2,
expectErr: false,
},
{
name: "justified exists with no best confirmed descendant",
setupStore: func() *Store {
node := &Node{root: root1}
return &Store{
justifiedCheckpoint: &forkchoicetypes.Checkpoint{
Epoch: 1,
Root: root1,
},
nodeByRoot: map[[32]byte]*Node{
root1: node,
},
}
},
wantRoot: root1,
expectErr: false,
},
{
name: "justified exists with best confirmed descendant",
setupStore: func() *Store {
descendant := &Node{root: root3}
node := &Node{root: root1, bestConfirmedDescendant: descendant}
return &Store{
justifiedCheckpoint: &forkchoicetypes.Checkpoint{
Epoch: 2,
Root: root1,
},
nodeByRoot: map[[32]byte]*Node{
root1: node,
},
}
},
wantRoot: root3,
expectErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
store := tt.setupStore()
ctx := context.Background()
if tt.name == "context cancelled returns error" {
c, cancel := context.WithCancel(ctx)
cancel()
ctx = c
}
got, err := store.safeHead(ctx)
if (err != nil) != tt.expectErr {
t.Fatalf("expected error: %v, got: %v", tt.expectErr, err)
}
if err == nil && got != tt.wantRoot {
t.Errorf("safeHead() = %x, want %x", got, tt.wantRoot)
}
})
}
}

View File

@@ -28,6 +28,7 @@ type Store struct {
unrealizedFinalizedCheckpoint *forkchoicetypes.Checkpoint // best unrealized finalized checkpoint in store.
prevJustifiedCheckpoint *forkchoicetypes.Checkpoint // previous justified checkpoint in store.
finalizedCheckpoint *forkchoicetypes.Checkpoint // latest finalized epoch in store.
safeHeadRoot [fieldparams.RootLength]byte // safe head root in store.
proposerBoostRoot [fieldparams.RootLength]byte // latest block root that was boosted after being received in a timely manner.
previousProposerBoostRoot [fieldparams.RootLength]byte // previous block root that was boosted after being received in a timely manner.
previousProposerBoostScore uint64 // previous proposer boosted root score.
@@ -62,6 +63,7 @@ type Node struct {
bestDescendant *Node // bestDescendant node of this node.
optimistic bool // whether the block has been fully validated or not
timestamp time.Time // The timestamp when the node was inserted.
bestConfirmedDescendant *Node // bestConfirmedDescendant node of this node.
}
// Vote defines an individual validator's vote.

View File

@@ -83,6 +83,7 @@ type FastGetter interface {
DependentRoot(primitives.Epoch) ([32]byte, error)
TargetRootForEpoch([32]byte, primitives.Epoch) ([32]byte, error)
UnrealizedJustifiedPayloadBlockHash() [32]byte
SafeBlockHash() [32]byte
Weight(root [32]byte) (uint64, error)
ParentRoot(root [32]byte) ([32]byte, error)
}

View File

@@ -100,6 +100,13 @@ func (ro *ROForkChoice) UnrealizedJustifiedPayloadBlockHash() [32]byte {
return ro.getter.UnrealizedJustifiedPayloadBlockHash()
}
// SafeBlockHash delegates to the underlying forkchoice call, under a lock.
func (ro *ROForkChoice) SafeBlockHash() [32]byte {
ro.l.RLock()
defer ro.l.RUnlock()
return ro.getter.SafeBlockHash()
}
// NodeCount delegates to the underlying forkchoice call, under a lock.
func (ro *ROForkChoice) NodeCount() int {
ro.l.RLock()

View File

@@ -27,6 +27,7 @@ const (
previousJustifiedCheckpointCalled
justifiedPayloadBlockHashCalled
unrealizedJustifiedPayloadBlockHashCalled
safeBlockHashCalled
nodeCountCalled
highestReceivedBlockSlotCalled
highestReceivedBlockRootCalled
@@ -107,6 +108,11 @@ func TestROLocking(t *testing.T) {
call: unrealizedJustifiedPayloadBlockHashCalled,
cb: func(g FastGetter) { g.UnrealizedJustifiedPayloadBlockHash() },
},
{
name: "safeBlockHashCalled",
call: safeBlockHashCalled,
cb: func(g FastGetter) { g.SafeBlockHash() },
},
{
name: "nodeCountCalled",
call: nodeCountCalled,
@@ -249,6 +255,11 @@ func (ro *mockROForkchoice) UnrealizedJustifiedPayloadBlockHash() [32]byte {
return [32]byte{}
}
func (ro *mockROForkchoice) SafeBlockHash() [32]byte {
ro.calls = append(ro.calls, safeBlockHashCalled)
return [32]byte{}
}
func (ro *mockROForkchoice) NodeCount() int {
ro.calls = append(ro.calls, nodeCountCalled)
return 0

View File

@@ -195,3 +195,32 @@ func configureExecutionSetting(cliCtx *cli.Context) error {
" Default fee recipient will be used as a fall back", checksumAddress.Hex())
return params.SetActive(c)
}
func configureSafeBlockConfig(cliCtx *cli.Context) error {
c := params.BeaconConfig().Copy()
if cliCtx.IsSet(flags.FastConfirmationByzantineThreshold.Name) {
threshold := cliCtx.Uint64(flags.FastConfirmationByzantineThreshold.Name)
if threshold > 100 {
return fmt.Errorf("fast-confirmation-byzantine-threshold must be between 0 and 100")
}
c.FastConfirmationByzantineThreshold = cliCtx.Uint64(flags.FastConfirmationByzantineThreshold.Name)
if err := params.SetActive(c); err != nil {
return err
}
}
if cliCtx.IsSet(flags.SafeBlock.Name) {
safeBlock := cliCtx.String(flags.SafeBlock.Name)
switch safeBlock {
case "justified", "unrealized-justified", "fast-confirmation":
default:
return fmt.Errorf("invalid safe-block option: %s", safeBlock)
}
c.SafeBlockAlgorithm = safeBlock
if err := params.SetActive(c); err != nil {
return err
}
}
return nil
}

View File

@@ -289,6 +289,10 @@ func configureBeacon(cliCtx *cli.Context) error {
return errors.Wrap(err, "could not configure execution setting")
}
if err := configureSafeBlockConfig(cliCtx); err != nil {
return errors.Wrap(err, "could not configure safe block config")
}
return nil
}

View File

@@ -201,6 +201,7 @@ func (s *Server) GetForkChoice(w http.ResponseWriter, r *http.Request) {
ExtraData: &structs.ForkChoiceDumpExtraData{
UnrealizedJustifiedCheckpoint: structs.CheckpointFromConsensus(dump.UnrealizedJustifiedCheckpoint),
UnrealizedFinalizedCheckpoint: structs.CheckpointFromConsensus(dump.UnrealizedFinalizedCheckpoint),
SafeHeadRoot: hexutil.Encode(dump.SafeHeadRoot),
ProposerBoostRoot: hexutil.Encode(dump.ProposerBoostRoot),
PreviousProposerBoostRoot: hexutil.Encode(dump.PreviousProposerBoostRoot),
HeadRoot: hexutil.Encode(dump.HeadRoot),

View File

@@ -117,16 +117,16 @@ func (vs *Server) getLocalPayloadFromEngine(
}
finalizedBlockHash := [32]byte{}
justifiedBlockHash := [32]byte{}
safeBlockHash := [32]byte{}
// Blocks before Bellatrix don't have execution payloads. Use zeros as the hash.
if st.Version() >= version.Bellatrix {
finalizedBlockHash = vs.FinalizationFetcher.FinalizedBlockHash()
justifiedBlockHash = vs.FinalizationFetcher.UnrealizedJustifiedPayloadBlockHash()
safeBlockHash = vs.ForkchoiceFetcher.SafeBlockHash()
}
f := &enginev1.ForkchoiceState{
HeadBlockHash: parentHash,
SafeBlockHash: justifiedBlockHash[:],
SafeBlockHash: safeBlockHash[:],
FinalizedBlockHash: finalizedBlockHash[:],
}

View File

@@ -151,6 +151,7 @@ func TestServer_getExecutionPayload(t *testing.T) {
ExecutionEngineCaller: &powtesting.EngineClient{PayloadIDBytes: tt.payloadID, ErrForkchoiceUpdated: tt.forkchoiceErr, GetPayloadResponse: &blocks.GetPayloadResponse{ExecutionData: ed, OverrideBuilder: tt.override}},
HeadFetcher: &chainMock.ChainService{State: tt.st},
FinalizationFetcher: &chainMock.ChainService{},
ForkchoiceFetcher: &chainMock.ChainService{},
BeaconDB: beaconDB,
PayloadIDCache: cache.NewPayloadIDCache(),
TrackedValidatorsCache: cache.NewTrackedValidatorsCache(),
@@ -252,6 +253,7 @@ func TestServer_getExecutionPayload_UnexpectedFeeRecipient(t *testing.T) {
},
HeadFetcher: &chainMock.ChainService{State: transitionSt},
FinalizationFetcher: &chainMock.ChainService{},
ForkchoiceFetcher: &chainMock.ChainService{},
BeaconDB: beaconDB,
PayloadIDCache: cache.NewPayloadIDCache(),
TrackedValidatorsCache: cache.NewTrackedValidatorsCache(),

View File

@@ -0,0 +1,3 @@
### Added
- Implement fast confirmation algorithm for Prysm.

View File

@@ -333,6 +333,7 @@ var (
Usage: "Specifies the retention period for the pruner service in terms of epochs. " +
"If this value is less than MIN_EPOCHS_FOR_BLOCK_REQUESTS, it will be ignored.",
}
// SubscribeAllDataSubnets enables subscription to all data subnets.
SubscribeAllDataSubnets = &cli.BoolFlag{
Name: "subscribe-all-data-subnets",
@@ -344,4 +345,18 @@ var (
Usage: "Maximum number of signatures to batch verify at once for beacon attestation p2p gossip.",
Value: 1000,
}
FastConfirmationByzantineThreshold = &cli.Uint64Flag{
Name: "fast-confirmation-byzantine-threshold",
Usage: "Byzantine threshold percentage (0-100) used for fast confirmation",
Value: 33,
Aliases: []string{"fc-threshold"},
}
SafeBlock = &cli.StringFlag{
Name: "safe-block",
Usage: "Algorithm for safe block selection: justified, unrealized-justified, or fast-confirmation",
Value: "unrealized-justified",
Aliases: []string{"sb"},
}
)

View File

@@ -149,6 +149,8 @@ var appFlags = []cli.Flag{
bflags.BackfillWorkerCount,
bflags.BackfillOldestSlot,
flags.BatchVerifierLimit,
flags.FastConfirmationByzantineThreshold,
flags.SafeBlock,
}
func init() {

View File

@@ -163,6 +163,8 @@ var appHelpFlagGroups = []flagGroup{
flags.ExecutionJWTSecretFlag,
flags.JwtId,
flags.InteropMockEth1DataVotesFlag,
flags.FastConfirmationByzantineThreshold,
flags.SafeBlock,
},
},
{ // Flags relevant to configuring beacon chain monitoring.

View File

@@ -314,6 +314,10 @@ type BeaconChainConfig struct {
// DeprecatedMaxBlobsPerBlockFulu defines the max blobs that could exist in a block post Fulu hard fork.
// Deprecated: This field is no longer supported. Avoid using it.
DeprecatedMaxBlobsPerBlockFulu int `yaml:"MAX_BLOBS_PER_BLOCK_FULU" spec:"true"`
// Safe block config value
FastConfirmationByzantineThreshold uint64 // FastConfirmationByzantineThreshold is the Byzantine threshold percentage (0-100) used for fast confirmation.
SafeBlockAlgorithm string // SafeBlockAlgorithm is the algorithm for safe block selection
}
func (b *BeaconChainConfig) VersionToForkEpochMap() map[int]primitives.Epoch {

View File

@@ -33,6 +33,7 @@ type Dump struct {
FinalizedCheckpoint *eth.Checkpoint
UnrealizedJustifiedCheckpoint *eth.Checkpoint
UnrealizedFinalizedCheckpoint *eth.Checkpoint
SafeHeadRoot []byte
ProposerBoostRoot []byte
PreviousProposerBoostRoot []byte
HeadRoot []byte