Compare commits

..

2 Commits

Author SHA1 Message Date
potuz
0b48daf14a Gloas Initial forkchoice implementation
This is the bare minimum to have the **current fork** logic being
compatible with having full/empty slots in forkchoice. That is,
forkchoice is designed as if we had the option to have full/empty slots,
but we still create the full node when we import a block pre-gloas.

No PTC and no ordering between empty/full is yet implemented, it just
always choses the full child between the two.

The *Node structure corresponds to the `PAYLOAD_PENDING_STATUS` in the
spec, while the enclosing *PayloadNode structures are the full
forkchoice nodes that either have full or emtpy status.

The most complicated path to check in this PR is the working of
`SetOptimisticToInvalid` that handles invalid block insertion from the
EL. There are many options, either the passed root exits or not in
forkchoice and wether this invalid node built on top of full or empty,
which is particularly ugly because if the node is not in forkchoice we
still need to find out if its parent was full or empty.

-[x] compiling beacon chain
-[ ] fix tests
2026-02-06 15:49:11 -03:00
potuz
dcb8dc2b75 Remove unused method in forkchoice 2026-02-06 15:07:29 -03:00
47 changed files with 1948 additions and 3629 deletions

View File

@@ -150,36 +150,3 @@ type ActiveSetChanges struct {
EjectedPublicKeys []string `json:"ejected_public_keys"`
EjectedIndices []string `json:"ejected_indices"`
}
// =============================================================================
// GLOAS Fork Types - Execution Payload Envelope
// =============================================================================
// Note: Block-related GLOAS types (BeaconBlockGloas, ExecutionPayloadBid, etc.)
// are defined in block.go. This file contains only the envelope types used for
// the validator API endpoints.
// ExecutionPayloadEnvelope represents an execution payload envelope in the GLOAS fork.
// This wraps the full execution payload with builder metadata for separate signing
// and broadcasting by validators.
type ExecutionPayloadEnvelope struct {
Payload json.RawMessage `json:"payload"` // ExecutionPayloadDeneb
ExecutionRequests json.RawMessage `json:"execution_requests"` // ExecutionRequests
BuilderIndex string `json:"builder_index"` // uint64 as string
BeaconBlockRoot string `json:"beacon_block_root"` // hex encoded 32 bytes
Slot string `json:"slot"` // uint64 as string
BlobKzgCommitments []string `json:"blob_kzg_commitments"` // list of hex encoded 48-byte commitments
StateRoot string `json:"state_root"` // hex encoded 32 bytes
}
// SignedExecutionPayloadEnvelope wraps an execution payload envelope with a BLS signature.
// The signature is provided by the validator after retrieving the envelope from the beacon node.
type SignedExecutionPayloadEnvelope struct {
Message *ExecutionPayloadEnvelope `json:"message"`
Signature string `json:"signature"` // hex encoded 96-byte BLS signature
}
// GetExecutionPayloadEnvelopeResponse is the response for retrieving a cached execution payload envelope.
type GetExecutionPayloadEnvelopeResponse struct {
Version string `json:"version"`
Data *ExecutionPayloadEnvelope `json:"data"`
}

View File

@@ -101,7 +101,8 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*
if len(lastValidHash) == 0 {
lastValidHash = defaultLatestValidHash
}
invalidRoots, err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, headRoot, headBlk.ParentRoot(), bytesutil.ToBytes32(lastValidHash))
// this call has guaranteed to have the `headRoot` with its payload in forkchoice.
invalidRoots, err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, headRoot, headBlk.ParentRoot(), [32]byte(headPayload.ParentHash()), bytesutil.ToBytes32(lastValidHash))
if err != nil {
log.WithError(err).Error("Could not set head root to invalid")
return nil, nil
@@ -290,10 +291,10 @@ func (s *Service) notifyNewPayload(ctx context.Context, stVersion int, header in
return false, errors.WithMessage(ErrUndefinedExecutionEngineError, err.Error())
}
// reportInvalidBlock deals with the event that an invalid block was detected by the execution layer
func (s *Service) pruneInvalidBlock(ctx context.Context, root, parentRoot, lvh [32]byte) error {
// pruneInvalidBlock deals with the event that an invalid block was detected by the execution layer
func (s *Service) pruneInvalidBlock(ctx context.Context, root, parentRoot, parentHash [32]byte, lvh [32]byte) error {
newPayloadInvalidNodeCount.Inc()
invalidRoots, err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, root, parentRoot, lvh)
invalidRoots, err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, root, parentRoot, parentHash, lvh)
if err != nil {
return err
}

View File

@@ -232,7 +232,8 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
postVersionAndHeaders[i].version,
postVersionAndHeaders[i].header, b)
if err != nil {
return s.handleInvalidExecutionError(ctx, err, root, b.Block().ParentRoot())
// this call does not have the root in forkchoice yet.
return s.handleInvalidExecutionError(ctx, err, root, b.Block().ParentRoot(), [32]byte(postVersionAndHeaders[i].header.ParentHash()))
}
if isValidPayload {
if err := s.validateMergeTransitionBlock(ctx, preVersionAndHeaders[i].version,
@@ -992,9 +993,9 @@ func (s *Service) waitForSync() error {
}
}
func (s *Service) handleInvalidExecutionError(ctx context.Context, err error, blockRoot, parentRoot [fieldparams.RootLength]byte) error {
func (s *Service) handleInvalidExecutionError(ctx context.Context, err error, blockRoot, parentRoot [32]byte, parentHash [32]byte) error {
if IsInvalidBlock(err) && InvalidBlockLVH(err) != [32]byte{} {
return s.pruneInvalidBlock(ctx, blockRoot, parentRoot, InvalidBlockLVH(err))
return s.pruneInvalidBlock(ctx, blockRoot, parentRoot, parentHash, InvalidBlockLVH(err))
}
return err
}

View File

@@ -633,7 +633,7 @@ func (s *Service) validateExecutionOnBlock(ctx context.Context, ver int, header
isValidPayload, err := s.notifyNewPayload(ctx, ver, header, block)
if err != nil {
s.cfg.ForkChoiceStore.Lock()
err = s.handleInvalidExecutionError(ctx, err, block.Root(), block.Block().ParentRoot())
err = s.handleInvalidExecutionError(ctx, err, block.Root(), block.Block().ParentRoot(), [32]byte(header.ParentHash()))
s.cfg.ForkChoiceStore.Unlock()
return false, err
}

View File

@@ -15,7 +15,6 @@ go_library(
"common.go",
"doc.go",
"error.go",
"execution_payload_envelope.go",
"interfaces.go",
"log.go",
"payload_id.go",
@@ -52,7 +51,6 @@ go_library(
"//encoding/bytesutil:go_default_library",
"//math:go_default_library",
"//monitoring/tracing/trace:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/attestation:go_default_library",
"//runtime/version:go_default_library",
@@ -78,7 +76,6 @@ go_test(
"checkpoint_state_test.go",
"committee_fuzz_test.go",
"committee_test.go",
"execution_payload_envelope_test.go",
"payload_id_test.go",
"private_access_test.go",
"proposer_indices_test.go",
@@ -100,7 +97,6 @@ go_test(
"//consensus-types/primitives:go_default_library",
"//crypto/bls/blst:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/attestation:go_default_library",
"//testing/assert:go_default_library",

View File

@@ -1,111 +0,0 @@
package cache
import (
"sync"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
)
// ExecutionPayloadEnvelopeKey uniquely identifies a cached execution payload envelope.
type ExecutionPayloadEnvelopeKey struct {
Slot primitives.Slot
BuilderIndex primitives.BuilderIndex
}
// executionPayloadEnvelopeCacheEntry holds an execution payload envelope and
// the associated blobs bundle from the EL. The blobs bundle is needed later
// when proposing the block to build and broadcast blob sidecars.
type executionPayloadEnvelopeCacheEntry struct {
envelope *ethpb.ExecutionPayloadEnvelope
blobsBundle enginev1.BlobsBundler
}
// ExecutionPayloadEnvelopeCache stores execution payload envelopes produced during
// GLOAS block building for later retrieval by validators. When a beacon node
// produces a GLOAS block, it caches the execution payload envelope so the validator
// can retrieve it, sign it, and broadcast it separately from the beacon block.
// The blobs bundle from the EL is also cached alongside, since blobs are only
// persisted to the DB after they are broadcast as sidecars during block proposal.
type ExecutionPayloadEnvelopeCache struct {
cache map[ExecutionPayloadEnvelopeKey]*executionPayloadEnvelopeCacheEntry
sync.RWMutex
}
// NewExecutionPayloadEnvelopeCache creates a new execution payload envelope cache.
func NewExecutionPayloadEnvelopeCache() *ExecutionPayloadEnvelopeCache {
return &ExecutionPayloadEnvelopeCache{
cache: make(map[ExecutionPayloadEnvelopeKey]*executionPayloadEnvelopeCacheEntry),
}
}
// Get retrieves an execution payload envelope by slot and builder index.
// Returns the envelope and true if found, nil and false otherwise.
func (c *ExecutionPayloadEnvelopeCache) Get(slot primitives.Slot, builderIndex primitives.BuilderIndex) (*ethpb.ExecutionPayloadEnvelope, bool) {
c.RLock()
defer c.RUnlock()
key := ExecutionPayloadEnvelopeKey{
Slot: slot,
BuilderIndex: builderIndex,
}
entry, ok := c.cache[key]
if !ok {
return nil, false
}
return entry.envelope, true
}
// GetBlobsBundle retrieves a cached blobs bundle by slot and builder index.
// Returns the blobs bundle and true if found, nil and false otherwise.
func (c *ExecutionPayloadEnvelopeCache) GetBlobsBundle(slot primitives.Slot, builderIndex primitives.BuilderIndex) (enginev1.BlobsBundler, bool) {
c.RLock()
defer c.RUnlock()
key := ExecutionPayloadEnvelopeKey{
Slot: slot,
BuilderIndex: builderIndex,
}
entry, ok := c.cache[key]
if !ok {
return nil, false
}
return entry.blobsBundle, true
}
// Set stores an execution payload envelope and its associated blobs bundle in the cache.
// The envelope's slot and builder_index fields are used as the cache key.
// Old entries are automatically pruned to prevent unbounded growth.
func (c *ExecutionPayloadEnvelopeCache) Set(envelope *ethpb.ExecutionPayloadEnvelope, blobsBundle enginev1.BlobsBundler) {
if envelope == nil {
return
}
c.Lock()
defer c.Unlock()
slot := envelope.Slot
if slot > 2 {
c.prune(slot - 2)
}
key := ExecutionPayloadEnvelopeKey{
Slot: slot,
BuilderIndex: envelope.BuilderIndex,
}
c.cache[key] = &executionPayloadEnvelopeCacheEntry{
envelope: envelope,
blobsBundle: blobsBundle,
}
}
// prune removes all entries with slots older than the given slot.
// Must be called with the lock held.
func (c *ExecutionPayloadEnvelopeCache) prune(slot primitives.Slot) {
for key := range c.cache {
if key.Slot < slot {
delete(c.cache, key)
}
}
}

View File

@@ -1,146 +0,0 @@
package cache
import (
"testing"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/require"
)
func TestExecutionPayloadEnvelopeCache_GetSet(t *testing.T) {
cache := NewExecutionPayloadEnvelopeCache()
// Test empty cache returns false
_, ok := cache.Get(1, 0)
require.Equal(t, false, ok, "expected empty cache to return false")
// Create test envelope
envelope := &ethpb.ExecutionPayloadEnvelope{
Slot: primitives.Slot(100),
BuilderIndex: primitives.BuilderIndex(5),
StateRoot: make([]byte, 32),
}
// Set and retrieve
cache.Set(envelope, nil)
retrieved, ok := cache.Get(100, 5)
require.Equal(t, true, ok, "expected to find cached envelope")
require.Equal(t, envelope.Slot, retrieved.Slot)
require.Equal(t, envelope.BuilderIndex, retrieved.BuilderIndex)
// Different builder index should not find it
_, ok = cache.Get(100, 6)
require.Equal(t, false, ok, "expected different builder index to return false")
// Different slot should not find it
_, ok = cache.Get(101, 5)
require.Equal(t, false, ok, "expected different slot to return false")
}
func TestExecutionPayloadEnvelopeCache_Prune(t *testing.T) {
cache := NewExecutionPayloadEnvelopeCache()
// Add envelopes at slots 10, 11, 12 (close enough that none get pruned).
// Prune removes entries with slot < (new_slot - 2), so inserting 10, 11, 12
// keeps all three: slot 12 prunes < 10, but 10 is not < 10.
for i := primitives.Slot(10); i <= 12; i++ {
envelope := &ethpb.ExecutionPayloadEnvelope{
Slot: i,
BuilderIndex: 0,
StateRoot: make([]byte, 32),
}
cache.Set(envelope, nil)
}
// Verify all are present
for i := primitives.Slot(10); i <= 12; i++ {
_, ok := cache.Get(i, 0)
require.Equal(t, true, ok, "expected envelope at slot %d", i)
}
// Add envelope at slot 20, should prune slots < 18
envelope := &ethpb.ExecutionPayloadEnvelope{
Slot: 20,
BuilderIndex: 0,
StateRoot: make([]byte, 32),
}
cache.Set(envelope, nil)
// Slots 10-12 should be pruned (all < 18)
for i := primitives.Slot(10); i <= 12; i++ {
_, ok := cache.Get(i, 0)
require.Equal(t, false, ok, "expected envelope at slot %d to be pruned", i)
}
// Slot 20 should still be present
_, ok := cache.Get(20, 0)
require.Equal(t, true, ok, "expected envelope at slot 20 to be present")
}
func TestExecutionPayloadEnvelopeCache_NilEnvelope(t *testing.T) {
cache := NewExecutionPayloadEnvelopeCache()
// Setting nil should not panic or add entry
cache.Set(nil, nil)
// Cache should still be empty
require.Equal(t, 0, len(cache.cache))
}
func TestExecutionPayloadEnvelopeCache_MultipleBuilders(t *testing.T) {
cache := NewExecutionPayloadEnvelopeCache()
slot := primitives.Slot(100)
// Add envelopes from multiple builders at same slot
for i := range primitives.BuilderIndex(3) {
envelope := &ethpb.ExecutionPayloadEnvelope{
Slot: slot,
BuilderIndex: i,
StateRoot: make([]byte, 32),
}
cache.Set(envelope, nil)
}
// All should be retrievable
for i := range primitives.BuilderIndex(3) {
retrieved, ok := cache.Get(slot, i)
require.Equal(t, true, ok, "expected to find envelope for builder %d", i)
require.Equal(t, i, retrieved.BuilderIndex)
}
}
func TestExecutionPayloadEnvelopeCache_BlobsBundle(t *testing.T) {
cache := NewExecutionPayloadEnvelopeCache()
slot := primitives.Slot(100)
builderIndex := primitives.BuilderIndex(5)
envelope := &ethpb.ExecutionPayloadEnvelope{
Slot: slot,
BuilderIndex: builderIndex,
StateRoot: make([]byte, 32),
}
bundle := &enginev1.BlobsBundle{
KzgCommitments: [][]byte{{1, 2, 3}},
Proofs: [][]byte{{4, 5, 6}},
Blobs: [][]byte{{7, 8, 9}},
}
cache.Set(envelope, bundle)
// Retrieve blobs bundle
retrieved, ok := cache.GetBlobsBundle(slot, builderIndex)
require.Equal(t, true, ok, "expected to find cached blobs bundle")
require.NotNil(t, retrieved)
b, ok := retrieved.(*enginev1.BlobsBundle)
require.Equal(t, true, ok)
require.Equal(t, 1, len(b.KzgCommitments))
require.DeepEqual(t, []byte{1, 2, 3}, b.KzgCommitments[0])
// Nil blobs bundle for missing key
_, ok = cache.GetBlobsBundle(slot, 99)
require.Equal(t, false, ok, "expected missing key to return false")
}

View File

@@ -23,13 +23,11 @@ var (
var (
_ ConstructionPopulator = (*BlockReconstructionSource)(nil)
_ ConstructionPopulator = (*SidecarReconstructionSource)(nil)
_ ConstructionPopulator = (*EnvelopeReconstructionSource)(nil)
)
const (
BlockType = "BeaconBlock"
SidecarType = "DataColumnSidecar"
EnvelopeType = "ExecutionPayloadEnvelope"
BlockType = "BeaconBlock"
SidecarType = "DataColumnSidecar"
)
type (
@@ -51,20 +49,11 @@ type (
blocks.ROBlock
}
// SidecarReconstructionSource is a ConstructionPopulator that uses a data column sidecar as the source of data
// DataColumnSidecar is a ConstructionPopulator that uses a data column sidecar as the source of data
SidecarReconstructionSource struct {
blocks.VerifiedRODataColumn
}
// EnvelopeReconstructionSource is a ConstructionPopulator for GLOAS+ where
// blob KZG commitments are in the execution payload envelope rather than the
// block body. It uses the block for the header and root, but overrides
// commitments with those from the envelope.
EnvelopeReconstructionSource struct {
blocks.ROBlock
commitments [][]byte
}
blockInfo struct {
signedBlockHeader *ethpb.SignedBeaconBlockHeader
kzgCommitments [][]byte
@@ -82,13 +71,6 @@ func PopulateFromSidecar(sidecar blocks.VerifiedRODataColumn) *SidecarReconstruc
return &SidecarReconstructionSource{VerifiedRODataColumn: sidecar}
}
// PopulateFromEnvelope creates an EnvelopeReconstructionSource from a block and
// the KZG commitments from the execution payload envelope. This is used for GLOAS+
// where commitments are in the envelope rather than the block body.
func PopulateFromEnvelope(block blocks.ROBlock, commitments [][]byte) *EnvelopeReconstructionSource {
return &EnvelopeReconstructionSource{ROBlock: block, commitments: commitments}
}
// ValidatorsCustodyRequirement returns the number of custody groups regarding the validator indices attached to the beacon node.
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/validator.md#validator-custody
func ValidatorsCustodyRequirement(state beaconState.ReadOnlyBeaconState, validatorsIndex map[primitives.ValidatorIndex]bool) (uint64, error) {
@@ -272,46 +254,3 @@ func (s *SidecarReconstructionSource) extract() (*blockInfo, error) {
return info, nil
}
// Slot returns the slot of the source
func (s *EnvelopeReconstructionSource) Slot() primitives.Slot {
return s.Block().Slot()
}
// ProposerIndex returns the proposer index of the source
func (s *EnvelopeReconstructionSource) ProposerIndex() primitives.ValidatorIndex {
return s.Block().ProposerIndex()
}
// Commitments returns the blob KZG commitments from the envelope.
func (s *EnvelopeReconstructionSource) Commitments() ([][]byte, error) {
return s.commitments, nil
}
// Type returns the type of the source
func (s *EnvelopeReconstructionSource) Type() string {
return EnvelopeType
}
// extract extracts the block information from the source, using commitments
// from the envelope rather than the block body.
func (s *EnvelopeReconstructionSource) extract() (*blockInfo, error) {
header, err := s.Header()
if err != nil {
return nil, errors.Wrap(err, "header")
}
// TODO: Implement proper merkle proof for envelope commitments.
// In GLOAS, commitments are in the envelope not the block body,
// so the inclusion proof structure differs from pre-GLOAS forks.
inclusionProof := make([][]byte, 4)
for i := range inclusionProof {
inclusionProof[i] = make([]byte, 32)
}
return &blockInfo{
signedBlockHeader: header,
kzgCommitments: s.commitments,
kzgInclusionProof: inclusionProof,
}, nil
}

View File

@@ -6,6 +6,7 @@ go_library(
"doc.go",
"errors.go",
"forkchoice.go",
"gloas.go",
"log.go",
"metrics.go",
"node.go",
@@ -32,6 +33,7 @@ go_library(
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/forkchoice:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//monitoring/tracing/trace:go_default_library",

View File

@@ -31,7 +31,8 @@ func New() *ForkChoice {
prevJustifiedCheckpoint: &forkchoicetypes.Checkpoint{},
finalizedCheckpoint: &forkchoicetypes.Checkpoint{},
proposerBoostRoot: [32]byte{},
nodeByRoot: make(map[[fieldparams.RootLength]byte]*Node),
emptyNodeByRoot: make(map[[fieldparams.RootLength]byte]*PayloadNode),
fullNodeByRoot: make(map[[fieldparams.RootLength]byte]*PayloadNode),
slashedIndices: make(map[primitives.ValidatorIndex]bool),
receivedBlocksLastEpoch: [fieldparams.SlotsPerEpoch]primitives.Slot{},
}
@@ -43,7 +44,7 @@ func New() *ForkChoice {
// NodeCount returns the current number of nodes in the Store.
func (f *ForkChoice) NodeCount() int {
return len(f.store.nodeByRoot)
return len(f.store.emptyNodeByRoot)
}
// Head returns the head root from fork choice store.
@@ -64,14 +65,14 @@ func (f *ForkChoice) Head(
return [32]byte{}, errors.Wrap(err, "could not apply proposer boost score")
}
if err := f.store.treeRootNode.applyWeightChanges(ctx); err != nil {
if err := f.store.applyWeightChangesConsensusNode(ctx, f.store.treeRootNode); err != nil {
return [32]byte{}, errors.Wrap(err, "could not apply weight changes")
}
jc := f.JustifiedCheckpoint()
fc := f.FinalizedCheckpoint()
currentEpoch := slots.EpochsSinceGenesis(f.store.genesisTime)
if err := f.store.treeRootNode.updateBestDescendant(ctx, jc.Epoch, fc.Epoch, currentEpoch); err != nil {
if err := f.store.updateBestDescendantConsensusNode(ctx, f.store.treeRootNode, jc.Epoch, fc.Epoch, currentEpoch); err != nil {
return [32]byte{}, errors.Wrap(err, "could not update best descendant")
}
return f.store.head(ctx)
@@ -118,14 +119,14 @@ func (f *ForkChoice) InsertNode(ctx context.Context, state state.BeaconState, ro
return errInvalidNilCheckpoint
}
finalizedEpoch := fc.Epoch
node, err := f.store.insert(ctx, roblock, justifiedEpoch, finalizedEpoch)
pn, err := f.store.insert(ctx, roblock, justifiedEpoch, finalizedEpoch)
if err != nil {
return err
}
jc, fc = f.store.pullTips(state, node, jc, fc)
jc, fc = f.store.pullTips(state, pn.node, jc, fc)
if err := f.updateCheckpoints(ctx, jc, fc); err != nil {
_, remErr := f.store.removeNode(ctx, node)
_, remErr := f.store.removeNode(ctx, pn)
if remErr != nil {
log.WithError(remErr).Error("Could not remove node")
}
@@ -156,27 +157,32 @@ func (f *ForkChoice) updateCheckpoints(ctx context.Context, jc, fc *ethpb.Checkp
// HasNode returns true if the node exists in fork choice store,
// false else wise.
func (f *ForkChoice) HasNode(root [32]byte) bool {
_, ok := f.store.nodeByRoot[root]
_, ok := f.store.emptyNodeByRoot[root]
return ok
}
// IsCanonical returns true if the given root is part of the canonical chain.
func (f *ForkChoice) IsCanonical(root [32]byte) bool {
node, ok := f.store.nodeByRoot[root]
if !ok || node == nil {
//It is fine to pick empty node here since we only check if the beacon block is canonical.
pn, ok := f.store.emptyNodeByRoot[root]
if !ok || pn == nil {
return false
}
if node.bestDescendant == nil {
if pn.node.bestDescendant == nil {
// The node doesn't have any children
if f.store.headNode.bestDescendant == nil {
return node == f.store.headNode
// headNode is itself head.
return pn.node == f.store.headNode
}
return node == f.store.headNode.bestDescendant
// headNode is not actualized and there are some descendants
return pn.node == f.store.headNode.bestDescendant
}
// The node has children
if f.store.headNode.bestDescendant == nil {
return node.bestDescendant == f.store.headNode
return pn.node.bestDescendant == f.store.headNode
}
return node.bestDescendant == f.store.headNode.bestDescendant
return pn.node.bestDescendant == f.store.headNode.bestDescendant
}
// IsOptimistic returns true if the given root has been optimistically synced.
@@ -185,7 +191,7 @@ func (f *ForkChoice) IsOptimistic(root [32]byte) (bool, error) {
return true, nil
}
node, ok := f.store.nodeByRoot[root]
node, ok := f.store.emptyNodeByRoot[root]
if !ok || node == nil {
return true, ErrNilNode
}
@@ -198,17 +204,21 @@ func (f *ForkChoice) AncestorRoot(ctx context.Context, root [32]byte, slot primi
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.AncestorRoot")
defer span.End()
node, ok := f.store.nodeByRoot[root]
if !ok || node == nil {
pn, ok := f.store.emptyNodeByRoot[root]
if !ok || pn == nil {
return [32]byte{}, errors.Wrap(ErrNilNode, "could not determine ancestor root")
}
n := node
for n != nil && n.slot > slot {
n := pn.node
for n.slot > slot {
if ctx.Err() != nil {
return [32]byte{}, ctx.Err()
}
n = n.parent
if n.parent == nil {
n = nil
break
}
n = n.parent.node
}
if n == nil {
@@ -221,10 +231,11 @@ func (f *ForkChoice) AncestorRoot(ctx context.Context, root [32]byte, slot primi
// IsViableForCheckpoint returns whether the root passed is a checkpoint root for any
// known chain in forkchoice.
func (f *ForkChoice) IsViableForCheckpoint(cp *forkchoicetypes.Checkpoint) (bool, error) {
node, ok := f.store.nodeByRoot[cp.Root]
if !ok || node == nil {
pn, ok := f.store.emptyNodeByRoot[cp.Root]
if !ok || pn == nil {
return false, nil
}
node := pn.node
epochStart, err := slots.EpochStart(cp.Epoch)
if err != nil {
return false, err
@@ -233,10 +244,13 @@ func (f *ForkChoice) IsViableForCheckpoint(cp *forkchoicetypes.Checkpoint) (bool
return false, nil
}
if len(node.children) == 0 {
// If it's the start of the epoch, it is a checkpoint
if node.slot == epochStart {
return true, nil
}
if node.slot == epochStart {
// If there are no descendants of this beacon block, it is is viable as a checkpoint
children := f.store.allConsensusChildren(node)
if len(children) == 0 {
return true, nil
}
if !features.Get().IgnoreUnviableAttestations {
@@ -246,7 +260,8 @@ func (f *ForkChoice) IsViableForCheckpoint(cp *forkchoicetypes.Checkpoint) (bool
return true, nil
}
}
for _, child := range node.children {
// If some child is after the start of the epoch, the checkpoint is viable.
for _, child := range children {
if child.slot > epochStart {
return true, nil
}
@@ -287,7 +302,7 @@ func (f *ForkChoice) updateBalances() error {
if vote.currentRoot != vote.nextRoot || oldBalance != newBalance {
// Ignore the vote if the root is not in fork choice
// store, that means we have not seen the block before.
nextNode, ok := f.store.nodeByRoot[vote.nextRoot]
nextNode, ok := f.store.emptyNodeByRoot[vote.nextRoot]
if ok && vote.nextRoot != zHash {
// Protection against nil node
if nextNode == nil {
@@ -296,7 +311,7 @@ func (f *ForkChoice) updateBalances() error {
nextNode.balance += newBalance
}
currentNode, ok := f.store.nodeByRoot[vote.currentRoot]
currentNode, ok := f.store.emptyNodeByRoot[vote.currentRoot]
if ok && vote.currentRoot != zHash {
// Protection against nil node
if currentNode == nil {
@@ -337,13 +352,13 @@ func (f *ForkChoice) ProposerBoost() [fieldparams.RootLength]byte {
return f.store.proposerBoost()
}
// SetOptimisticToValid sets the node with the given root as a fully validated node
// SetOptimisticToValid sets the node with the given root as a fully validated node. The payload for this root MUST have been processed.
func (f *ForkChoice) SetOptimisticToValid(ctx context.Context, root [fieldparams.RootLength]byte) error {
node, ok := f.store.nodeByRoot[root]
if !ok || node == nil {
fn, ok := f.store.fullNodeByRoot[root]
if !ok || fn == nil {
return errors.Wrap(ErrNilNode, "could not set node to valid")
}
return node.setNodeAndParentValidated(ctx)
return f.store.setNodeAndParentValidated(ctx, fn)
}
// PreviousJustifiedCheckpoint of fork choice store.
@@ -362,8 +377,8 @@ func (f *ForkChoice) FinalizedCheckpoint() *forkchoicetypes.Checkpoint {
}
// SetOptimisticToInvalid removes a block with an invalid execution payload from fork choice store
func (f *ForkChoice) SetOptimisticToInvalid(ctx context.Context, root, parentRoot, payloadHash [fieldparams.RootLength]byte) ([][32]byte, error) {
return f.store.setOptimisticToInvalid(ctx, root, parentRoot, payloadHash)
func (f *ForkChoice) SetOptimisticToInvalid(ctx context.Context, root, parentRoot, parentHash, payloadHash [fieldparams.RootLength]byte) ([][32]byte, error) {
return f.store.setOptimisticToInvalid(ctx, root, parentRoot, parentHash, payloadHash)
}
// InsertSlashedIndex adds the given slashed validator index to the
@@ -386,7 +401,7 @@ func (f *ForkChoice) InsertSlashedIndex(_ context.Context, index primitives.Vali
return
}
node, ok := f.store.nodeByRoot[f.votes[index].currentRoot]
node, ok := f.store.emptyNodeByRoot[f.votes[index].currentRoot]
if !ok || node == nil {
return
}
@@ -421,22 +436,28 @@ func (f *ForkChoice) UpdateFinalizedCheckpoint(fc *forkchoicetypes.Checkpoint) e
}
// CommonAncestor returns the common ancestor root and slot between the two block roots r1 and r2.
// This is payload aware. Consider the following situation
// [A,full] <--- [B, full] <---[C,pending]
//
// \---------[B, empty] <--[D, pending]
//
// Then even though C and D both descend from the beacon block B, their common ancestor is A.
func (f *ForkChoice) CommonAncestor(ctx context.Context, r1 [32]byte, r2 [32]byte) ([32]byte, primitives.Slot, error) {
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.CommonAncestorRoot")
defer span.End()
n1, ok := f.store.nodeByRoot[r1]
if !ok || n1 == nil {
en1, ok := f.store.emptyNodeByRoot[r1]
if !ok || en1 == nil {
return [32]byte{}, 0, forkchoice.ErrUnknownCommonAncestor
}
// Do nothing if the input roots are the same.
if r1 == r2 {
return r1, n1.slot, nil
return r1, en1.node.slot, nil
}
n2, ok := f.store.nodeByRoot[r2]
if !ok || n2 == nil {
en2, ok := f.store.emptyNodeByRoot[r2]
if !ok || en2 == nil {
return [32]byte{}, 0, forkchoice.ErrUnknownCommonAncestor
}
@@ -444,23 +465,23 @@ func (f *ForkChoice) CommonAncestor(ctx context.Context, r1 [32]byte, r2 [32]byt
if ctx.Err() != nil {
return [32]byte{}, 0, ctx.Err()
}
if n1.slot > n2.slot {
n1 = n1.parent
if en1.node.slot > en2.node.slot {
en1 = en1.node.parent
// Reaches the end of the tree and unable to find common ancestor.
// This should not happen at runtime as the finalized
// node has to be a common ancestor
if n1 == nil {
if en1 == nil {
return [32]byte{}, 0, forkchoice.ErrUnknownCommonAncestor
}
} else {
n2 = n2.parent
en2 = en2.node.parent
// Reaches the end of the tree and unable to find common ancestor.
if n2 == nil {
if en2 == nil {
return [32]byte{}, 0, forkchoice.ErrUnknownCommonAncestor
}
}
if n1 == n2 {
return n1.root, n1.slot, nil
if en1 == en2 {
return en1.node.root, en1.node.slot, nil
}
}
}
@@ -507,35 +528,17 @@ func (f *ForkChoice) CachedHeadRoot() [32]byte {
// FinalizedPayloadBlockHash returns the hash of the payload at the finalized checkpoint
func (f *ForkChoice) FinalizedPayloadBlockHash() [32]byte {
root := f.FinalizedCheckpoint().Root
node, ok := f.store.nodeByRoot[root]
if !ok || node == nil {
// This should not happen
return [32]byte{}
}
return node.payloadHash
return f.store.latestHashForRoot(f.FinalizedCheckpoint().Root)
}
// JustifiedPayloadBlockHash returns the hash of the payload at the justified checkpoint
func (f *ForkChoice) JustifiedPayloadBlockHash() [32]byte {
root := f.JustifiedCheckpoint().Root
node, ok := f.store.nodeByRoot[root]
if !ok || node == nil {
// This should not happen
return [32]byte{}
}
return node.payloadHash
return f.store.latestHashForRoot(f.JustifiedCheckpoint().Root)
}
// UnrealizedJustifiedPayloadBlockHash returns the hash of the payload at the unrealized justified checkpoint
func (f *ForkChoice) UnrealizedJustifiedPayloadBlockHash() [32]byte {
root := f.store.unrealizedJustifiedCheckpoint.Root
node, ok := f.store.nodeByRoot[root]
if !ok || node == nil {
// This should not happen
return [32]byte{}
}
return node.payloadHash
return f.store.latestHashForRoot(f.store.unrealizedJustifiedCheckpoint.Root)
}
// ForkChoiceDump returns a full dump of forkchoice.
@@ -559,7 +562,7 @@ func (f *ForkChoice) ForkChoiceDump(ctx context.Context) (*forkchoice2.Dump, err
nodes := make([]*forkchoice2.Node, 0, f.NodeCount())
var err error
if f.store.treeRootNode != nil {
nodes, err = f.store.treeRootNode.nodeTreeDump(ctx, nodes)
nodes, err = f.store.nodeTreeDump(ctx, f.store.treeRootNode, nodes)
if err != nil {
return nil, err
}
@@ -588,7 +591,7 @@ func (f *ForkChoice) SetBalancesByRooter(handler forkchoice.BalancesByRooter) {
// Weight returns the weight of the given root if found on the store
func (f *ForkChoice) Weight(root [32]byte) (uint64, error) {
n, ok := f.store.nodeByRoot[root]
n, ok := f.store.emptyNodeByRoot[root]
if !ok || n == nil {
return 0, ErrNilNode
}
@@ -616,11 +619,11 @@ func (f *ForkChoice) updateJustifiedBalances(ctx context.Context, root [32]byte)
// Slot returns the slot of the given root if it's known to forkchoice
func (f *ForkChoice) Slot(root [32]byte) (primitives.Slot, error) {
n, ok := f.store.nodeByRoot[root]
n, ok := f.store.emptyNodeByRoot[root]
if !ok || n == nil {
return 0, ErrNilNode
}
return n.slot, nil
return n.node.slot, nil
}
// DependentRoot returns the last root of the epoch prior to the requested ecoch in the canonical chain.
@@ -628,7 +631,7 @@ func (f *ForkChoice) DependentRoot(epoch primitives.Epoch) ([32]byte, error) {
return f.DependentRootForEpoch(f.CachedHeadRoot(), epoch)
}
// DependentRootForEpoch return the last root of the epoch prior to the requested ecoch for the given root.
// DependentRootForEpoch return the last root of the epoch prior to the requested epoch for the given root.
func (f *ForkChoice) DependentRootForEpoch(root [32]byte, epoch primitives.Epoch) ([32]byte, error) {
tr, err := f.TargetRootForEpoch(root, epoch)
if err != nil {
@@ -637,18 +640,18 @@ func (f *ForkChoice) DependentRootForEpoch(root [32]byte, epoch primitives.Epoch
if tr == [32]byte{} {
return [32]byte{}, nil
}
node, ok := f.store.nodeByRoot[tr]
if !ok || node == nil {
en, ok := f.store.emptyNodeByRoot[tr]
if !ok || en == nil {
return [32]byte{}, ErrNilNode
}
if slots.ToEpoch(node.slot) >= epoch {
if node.parent != nil {
node = node.parent
if slots.ToEpoch(en.node.slot) >= epoch {
if en.node.parent != nil {
en = en.node.parent
} else {
return f.store.finalizedDependentRoot, nil
}
}
return node.root, nil
return en.node.root, nil
}
// TargetRootForEpoch returns the root of the target block for a given epoch.
@@ -660,46 +663,48 @@ func (f *ForkChoice) DependentRootForEpoch(root [32]byte, epoch primitives.Epoch
// which case we return the root of the checkpoint of the chain containing the
// passed root, at the given epoch
func (f *ForkChoice) TargetRootForEpoch(root [32]byte, epoch primitives.Epoch) ([32]byte, error) {
n, ok := f.store.nodeByRoot[root]
n, ok := f.store.emptyNodeByRoot[root]
if !ok || n == nil {
return [32]byte{}, ErrNilNode
}
nodeEpoch := slots.ToEpoch(n.slot)
node := n.node
nodeEpoch := slots.ToEpoch(node.slot)
if epoch > nodeEpoch {
return n.root, nil
return node.root, nil
}
if n.target == nil {
if node.target == nil {
return [32]byte{}, nil
}
targetRoot := n.target.root
targetRoot := node.target.root
if epoch == nodeEpoch {
return targetRoot, nil
}
targetNode, ok := f.store.nodeByRoot[targetRoot]
targetNode, ok := f.store.emptyNodeByRoot[targetRoot]
if !ok || targetNode == nil {
return [32]byte{}, ErrNilNode
}
// If slot 0 was not missed we consider a previous block to go back at least one epoch
if nodeEpoch == slots.ToEpoch(targetNode.slot) {
targetNode = targetNode.parent
if nodeEpoch == slots.ToEpoch(targetNode.node.slot) {
targetNode = targetNode.node.parent
if targetNode == nil {
return [32]byte{}, ErrNilNode
}
}
return f.TargetRootForEpoch(targetNode.root, epoch)
return f.TargetRootForEpoch(targetNode.node.root, epoch)
}
// ParentRoot returns the block root of the parent node if it is in forkchoice.
// The exception is for the finalized checkpoint root which we return the zero
// hash.
func (f *ForkChoice) ParentRoot(root [32]byte) ([32]byte, error) {
n, ok := f.store.nodeByRoot[root]
n, ok := f.store.emptyNodeByRoot[root]
if !ok || n == nil {
return [32]byte{}, ErrNilNode
}
// Return the zero hash for the tree root
if n.parent == nil {
parent := n.node.parent
if parent == nil {
return [32]byte{}, nil
}
return n.parent.root, nil
return parent.node.root, nil
}

View File

@@ -104,9 +104,9 @@ func TestForkChoice_UpdateBalancesPositiveChange(t *testing.T) {
f.justifiedBalances = []uint64{10, 20, 30}
require.NoError(t, f.updateBalances())
s := f.store
assert.Equal(t, uint64(10), s.nodeByRoot[indexToHash(1)].balance)
assert.Equal(t, uint64(20), s.nodeByRoot[indexToHash(2)].balance)
assert.Equal(t, uint64(30), s.nodeByRoot[indexToHash(3)].balance)
assert.Equal(t, uint64(10), s.emptyNodeByRoot[indexToHash(1)].balance)
assert.Equal(t, uint64(20), s.emptyNodeByRoot[indexToHash(2)].balance)
assert.Equal(t, uint64(30), s.emptyNodeByRoot[indexToHash(3)].balance)
}
func TestForkChoice_UpdateBalancesNegativeChange(t *testing.T) {
@@ -122,9 +122,9 @@ func TestForkChoice_UpdateBalancesNegativeChange(t *testing.T) {
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, roblock))
s := f.store
s.nodeByRoot[indexToHash(1)].balance = 100
s.nodeByRoot[indexToHash(2)].balance = 100
s.nodeByRoot[indexToHash(3)].balance = 100
s.emptyNodeByRoot[indexToHash(1)].balance = 100
s.emptyNodeByRoot[indexToHash(2)].balance = 100
s.emptyNodeByRoot[indexToHash(3)].balance = 100
f.balances = []uint64{100, 100, 100}
f.votes = []Vote{
@@ -135,9 +135,9 @@ func TestForkChoice_UpdateBalancesNegativeChange(t *testing.T) {
f.justifiedBalances = []uint64{10, 20, 30}
require.NoError(t, f.updateBalances())
assert.Equal(t, uint64(10), s.nodeByRoot[indexToHash(1)].balance)
assert.Equal(t, uint64(20), s.nodeByRoot[indexToHash(2)].balance)
assert.Equal(t, uint64(30), s.nodeByRoot[indexToHash(3)].balance)
assert.Equal(t, uint64(10), s.emptyNodeByRoot[indexToHash(1)].balance)
assert.Equal(t, uint64(20), s.emptyNodeByRoot[indexToHash(2)].balance)
assert.Equal(t, uint64(30), s.emptyNodeByRoot[indexToHash(3)].balance)
}
func TestForkChoice_UpdateBalancesUnderflow(t *testing.T) {
@@ -153,9 +153,9 @@ func TestForkChoice_UpdateBalancesUnderflow(t *testing.T) {
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, roblock))
s := f.store
s.nodeByRoot[indexToHash(1)].balance = 100
s.nodeByRoot[indexToHash(2)].balance = 100
s.nodeByRoot[indexToHash(3)].balance = 100
s.emptyNodeByRoot[indexToHash(1)].balance = 100
s.emptyNodeByRoot[indexToHash(2)].balance = 100
s.emptyNodeByRoot[indexToHash(3)].balance = 100
f.balances = []uint64{125, 125, 125}
f.votes = []Vote{
@@ -166,9 +166,9 @@ func TestForkChoice_UpdateBalancesUnderflow(t *testing.T) {
f.justifiedBalances = []uint64{10, 20, 30}
require.NoError(t, f.updateBalances())
assert.Equal(t, uint64(0), s.nodeByRoot[indexToHash(1)].balance)
assert.Equal(t, uint64(0), s.nodeByRoot[indexToHash(2)].balance)
assert.Equal(t, uint64(5), s.nodeByRoot[indexToHash(3)].balance)
assert.Equal(t, uint64(0), s.emptyNodeByRoot[indexToHash(1)].balance)
assert.Equal(t, uint64(0), s.emptyNodeByRoot[indexToHash(2)].balance)
assert.Equal(t, uint64(5), s.emptyNodeByRoot[indexToHash(3)].balance)
}
func TestForkChoice_IsCanonical(t *testing.T) {
@@ -224,10 +224,10 @@ func TestForkChoice_IsCanonicalReorg(t *testing.T) {
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, roblock))
f.store.nodeByRoot[[32]byte{'3'}].balance = 10
f.store.emptyNodeByRoot[[32]byte{'3'}].balance = 10
require.NoError(t, f.store.treeRootNode.applyWeightChanges(ctx))
require.Equal(t, uint64(10), f.store.nodeByRoot[[32]byte{'1'}].weight)
require.Equal(t, uint64(0), f.store.nodeByRoot[[32]byte{'2'}].weight)
require.Equal(t, uint64(10), f.store.emptyNodeByRoot[[32]byte{'1'}].weight)
require.Equal(t, uint64(0), f.store.emptyNodeByRoot[[32]byte{'2'}].weight)
require.NoError(t, f.store.treeRootNode.updateBestDescendant(ctx, 1, 1, 1))
require.DeepEqual(t, [32]byte{'3'}, f.store.treeRootNode.bestDescendant.root)
@@ -260,7 +260,7 @@ func TestForkChoice_AncestorRoot(t *testing.T) {
st, roblock, err = prepareForkchoiceState(ctx, 5, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, roblock))
f.store.treeRootNode = f.store.nodeByRoot[indexToHash(1)]
f.store.treeRootNode = f.store.emptyNodeByRoot[indexToHash(1)]
f.store.treeRootNode.parent = nil
r, err := f.AncestorRoot(ctx, indexToHash(3), 6)
@@ -342,21 +342,21 @@ func TestForkChoice_RemoveEquivocating(t *testing.T) {
// Process b's slashing, c is now head
f.InsertSlashedIndex(ctx, 1)
require.Equal(t, uint64(200), f.store.nodeByRoot[[32]byte{'b'}].balance)
require.Equal(t, uint64(200), f.store.emptyNodeByRoot[[32]byte{'b'}].balance)
f.justifiedBalances = []uint64{100, 200, 200, 300}
head, err = f.Head(ctx)
require.Equal(t, uint64(200), f.store.nodeByRoot[[32]byte{'b'}].weight)
require.Equal(t, uint64(300), f.store.nodeByRoot[[32]byte{'c'}].weight)
require.Equal(t, uint64(200), f.store.emptyNodeByRoot[[32]byte{'b'}].weight)
require.Equal(t, uint64(300), f.store.emptyNodeByRoot[[32]byte{'c'}].weight)
require.NoError(t, err)
require.Equal(t, [32]byte{'c'}, head)
// Process b's slashing again, should be a noop
f.InsertSlashedIndex(ctx, 1)
require.Equal(t, uint64(200), f.store.nodeByRoot[[32]byte{'b'}].balance)
require.Equal(t, uint64(200), f.store.emptyNodeByRoot[[32]byte{'b'}].balance)
f.justifiedBalances = []uint64{100, 200, 200, 300}
head, err = f.Head(ctx)
require.Equal(t, uint64(200), f.store.nodeByRoot[[32]byte{'b'}].weight)
require.Equal(t, uint64(300), f.store.nodeByRoot[[32]byte{'c'}].weight)
require.Equal(t, uint64(200), f.store.emptyNodeByRoot[[32]byte{'b'}].weight)
require.Equal(t, uint64(300), f.store.emptyNodeByRoot[[32]byte{'c'}].weight)
require.NoError(t, err)
require.Equal(t, [32]byte{'c'}, head)
@@ -591,7 +591,7 @@ func TestStore_CommonAncestor(t *testing.T) {
optimistic: true,
}
f.store.nodeByRoot[[32]byte{'y'}] = n
f.store.emptyNodeByRoot[[32]byte{'y'}] = n
// broken link
_, _, err = f.CommonAncestor(ctx, [32]byte{'y'}, [32]byte{'a'})
require.ErrorIs(t, err, forkchoice.ErrUnknownCommonAncestor)
@@ -742,7 +742,7 @@ func TestWeight(t *testing.T) {
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, roblock))
n, ok := f.store.nodeByRoot[root]
n, ok := f.store.emptyNodeByRoot[root]
require.Equal(t, true, ok)
n.weight = 10
w, err := f.Weight(root)

View File

@@ -0,0 +1,300 @@
package doublylinkedtree
import (
"bytes"
"context"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
forkchoice2 "github.com/OffchainLabs/prysm/v7/consensus-types/forkchoice"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/pkg/errors"
)
func (s *Store) getNodeInformation(block interfaces.ReadOnlyBeaconBlock, parent **PayloadNode, payloadHash *[32]byte) error {
sb, err := block.Body().SignedExecutionPayloadBid()
if err != nil {
return err
}
wb, err := blocks.WrappedROSignedExecutionPayloadBid(sb)
if err != nil {
return errors.Wrap(err, "failed to wrap signed bid")
}
bid, err := wb.Bid()
if err != nil {
return errors.Wrap(err, "failed to get bid from wrapped bid")
}
*payloadHash = bid.BlockHash()
parentRoot := block.ParentRoot()
*parent = s.emptyNodeByRoot[parentRoot]
if *parent == nil {
// This is the tree root node.
return nil
}
if bid.ParentBlockHash() == (*parent).node.payloadHash {
//block builds on full
*parent = s.fullNodeByRoot[(*parent).node.payloadHash]
}
return nil
}
// applyWeightChangesConsensusNode recomputes the weight of the node passed as an argument and all of its descendants,
// using the current balance stored in each node.
func (s *Store) applyWeightChangesConsensusNode(ctx context.Context, n *Node) error {
// Recursively calling the children to sum their weights.
en := s.emptyNodeByRoot[n.root]
if err := s.applyWeightChangesPayloadNode(ctx, en); err != nil {
return err
}
childrenWeight := en.weight
fn := s.fullNodeByRoot[n.root]
if fn != nil {
if err := s.applyWeightChangesPayloadNode(ctx, fn); err != nil {
return err
}
childrenWeight += fn.weight
}
if n.root == params.BeaconConfig().ZeroHash {
return nil
}
n.weight = n.balance + childrenWeight
return nil
}
// applyWeightChangesPayloadNode recomputes the weight of the node passed as an argument and all of its descendants,
// using the current balance stored in each node.
func (s *Store) applyWeightChangesPayloadNode(ctx context.Context, n *PayloadNode) error {
// Recursively calling the children to sum their weights.
childrenWeight := uint64(0)
for _, child := range n.children {
if ctx.Err() != nil {
return ctx.Err()
}
if err := s.applyWeightChangesConsensusNode(ctx, child); err != nil {
return err
}
childrenWeight += child.weight
}
n.weight = n.balance + childrenWeight
return nil
}
// allConsensusChildren returns the list of all consensus blocks that build on the given node.
func (s *Store) allConsensusChildren(n *Node) []*Node {
en := s.emptyNodeByRoot[n.root]
fn, ok := s.fullNodeByRoot[n.root]
if ok {
return append(en.children, fn.children...)
}
return en.children
}
// setNodeAndParentValidated sets the current node and all the ancestors as validated (i.e. non-optimistic).
func (s *Store) setNodeAndParentValidated(ctx context.Context, pn *PayloadNode) error {
if ctx.Err() != nil {
return ctx.Err()
}
if !pn.optimistic {
return nil
}
pn.optimistic = false
if pn.full {
// set the empty node also a as valid
en := s.emptyNodeByRoot[pn.node.root]
en.optimistic = false
}
if pn.node.parent == nil {
return nil
}
return s.setNodeAndParentValidated(ctx, pn.node.parent)
}
// fullAncestor returns the highest ancestor with a full payload that a block with the
// given root has. If there is a payload for the past root, then it will return that full
// node. Otherwise it will use the full parent actually being an acestor of the given root
func (s *Store) fullAncestor(root [32]byte) *PayloadNode {
fn, ok := s.fullNodeByRoot[root]
if ok {
return fn
}
en := s.emptyNodeByRoot[root]
if en == nil {
return nil
}
return s.fullParent(en)
}
// fullParent returns the latest full node that this block builds on.
func (s *Store) fullParent(pn *PayloadNode) *PayloadNode {
parent := pn.node.parent
for ; parent != nil && !parent.full; parent = parent.node.parent {
}
return parent
}
// parentHash return the payload hash of the latest full node that this block builds on.
func (s *Store) parentHash(pn *PayloadNode) [32]byte {
fullParent := s.fullParent(pn)
if fullParent == nil {
return [32]byte{}
}
return fullParent.node.payloadHash
}
// latestHashForRoot returns the latest payload hash for the given block root.
func (s *Store) latestHashForRoot(root [32]byte) [32]byte {
// try to get the full node first
fn, ok := s.fullNodeByRoot[root]
if ok && fn != nil {
return fn.node.payloadHash
}
en := s.emptyNodeByRoot[root]
if !ok || en == nil {
// This should not happen
return [32]byte{}
}
return s.parentHash(en)
}
// updateBestDescendantPayloadNode updates the best descendant of this node and its
// children.
func (s *Store) updateBestDescendantPayloadNode(ctx context.Context, n *PayloadNode, justifiedEpoch, finalizedEpoch, currentEpoch primitives.Epoch) error {
if ctx.Err() != nil {
return ctx.Err()
}
var bestChild *Node
bestWeight := uint64(0)
for _, child := range n.children {
if child == nil {
return errors.Wrap(ErrNilNode, "could not update best descendant")
}
if err := s.updateBestDescendantConsensusNode(ctx, child, justifiedEpoch, finalizedEpoch, currentEpoch); err != nil {
return err
}
childLeadsToViableHead := child.leadsToViableHead(justifiedEpoch, currentEpoch)
if childLeadsToViableHead && bestChild == nil {
// The child leads to a viable head, but the current
// parent's best child doesn't.
bestWeight = child.weight
bestChild = child
} else if childLeadsToViableHead {
// If both are viable, compare their weights.
if child.weight == bestWeight {
// Tie-breaker of equal weights by root.
if bytes.Compare(child.root[:], bestChild.root[:]) > 0 {
bestChild = child
}
} else if child.weight > bestWeight {
bestChild = child
bestWeight = child.weight
}
}
}
if bestChild == nil {
n.bestDescendant = nil
} else {
if bestChild.bestDescendant == nil {
n.bestDescendant = bestChild
} else {
n.bestDescendant = bestChild.bestDescendant
}
}
return nil
}
// updateBestDescendantConsensusNode updates the best descendant of this node and its
// children.
func (s *Store) updateBestDescendantConsensusNode(ctx context.Context, n *Node, justifiedEpoch, finalizedEpoch, currentEpoch primitives.Epoch) error {
if ctx.Err() != nil {
return ctx.Err()
}
if len(s.allConsensusChildren(n)) == 0 {
n.bestDescendant = nil
return nil
}
en := s.emptyNodeByRoot[n.root]
if err := s.updateBestDescendantPayloadNode(ctx, en, justifiedEpoch, finalizedEpoch, currentEpoch); err != nil {
return err
}
fn := s.fullNodeByRoot[n.root]
if fn == nil {
n.bestDescendant = en.bestDescendant
return nil
}
// TODO GLOAS: pick between full or empty
if err := s.updateBestDescendantPayloadNode(ctx, fn, justifiedEpoch, finalizedEpoch, currentEpoch); err != nil {
return err
}
n.bestDescendant = fn.bestDescendant
return nil
}
// choosePayloadContent chooses between empty or full for the passed consensus node. TODO Gloas: use PTC to choose.
func (s *Store) choosePayloadContent(n *Node) *PayloadNode {
if n == nil {
return nil
}
fn := s.fullNodeByRoot[n.root]
if fn != nil {
return fn
}
return s.emptyNodeByRoot[n.root]
}
// nodeTreeDump appends to the given list all the nodes descending from this one
func (s *Store) nodeTreeDump(ctx context.Context, n *Node, nodes []*forkchoice2.Node) ([]*forkchoice2.Node, error) {
if ctx.Err() != nil {
return nil, ctx.Err()
}
var parentRoot [32]byte
if n.parent != nil {
parentRoot = n.parent.node.root
}
target := [32]byte{}
if n.target != nil {
target = n.target.root
}
optimistic := n.parent.optimistic
en := s.emptyNodeByRoot[n.root]
timestamp := en.timestamp
fn := s.fullNodeByRoot[n.root]
if fn != nil {
optimistic = fn.optimistic
timestamp = fn.timestamp
}
thisNode := &forkchoice2.Node{
Slot: n.slot,
BlockRoot: n.root[:],
ParentRoot: parentRoot[:],
JustifiedEpoch: n.justifiedEpoch,
FinalizedEpoch: n.finalizedEpoch,
UnrealizedJustifiedEpoch: n.unrealizedJustifiedEpoch,
UnrealizedFinalizedEpoch: n.unrealizedFinalizedEpoch,
Balance: n.balance,
Weight: n.weight,
ExecutionOptimistic: optimistic,
ExecutionBlockHash: n.payloadHash[:],
Timestamp: timestamp,
Target: target[:],
}
if optimistic {
thisNode.Validity = forkchoice2.Optimistic
} else {
thisNode.Validity = forkchoice2.Valid
}
nodes = append(nodes, thisNode)
var err error
children := s.allConsensusChildren(n)
for _, child := range children {
nodes, err = s.nodeTreeDump(ctx, child, nodes)
if err != nil {
return nil, err
}
}
return nodes, nil
}

View File

@@ -1,95 +1,17 @@
package doublylinkedtree
import (
"bytes"
"context"
"time"
"github.com/OffchainLabs/prysm/v7/config/params"
forkchoice2 "github.com/OffchainLabs/prysm/v7/consensus-types/forkchoice"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/time/slots"
"github.com/pkg/errors"
)
// ProcessAttestationsThreshold is the amount of time after which we
// process attestations for the current slot
const ProcessAttestationsThreshold = 10 * time.Second
// applyWeightChanges recomputes the weight of the node passed as an argument and all of its descendants,
// using the current balance stored in each node.
func (n *Node) applyWeightChanges(ctx context.Context) error {
// Recursively calling the children to sum their weights.
childrenWeight := uint64(0)
for _, child := range n.children {
if ctx.Err() != nil {
return ctx.Err()
}
if err := child.applyWeightChanges(ctx); err != nil {
return err
}
childrenWeight += child.weight
}
if n.root == params.BeaconConfig().ZeroHash {
return nil
}
n.weight = n.balance + childrenWeight
return nil
}
// updateBestDescendant updates the best descendant of this node and its
// children.
func (n *Node) updateBestDescendant(ctx context.Context, justifiedEpoch, finalizedEpoch, currentEpoch primitives.Epoch) error {
if ctx.Err() != nil {
return ctx.Err()
}
if len(n.children) == 0 {
n.bestDescendant = nil
return nil
}
var bestChild *Node
bestWeight := uint64(0)
hasViableDescendant := false
for _, child := range n.children {
if child == nil {
return errors.Wrap(ErrNilNode, "could not update best descendant")
}
if err := child.updateBestDescendant(ctx, justifiedEpoch, finalizedEpoch, currentEpoch); err != nil {
return err
}
childLeadsToViableHead := child.leadsToViableHead(justifiedEpoch, currentEpoch)
if childLeadsToViableHead && !hasViableDescendant {
// The child leads to a viable head, but the current
// parent's best child doesn't.
bestWeight = child.weight
bestChild = child
hasViableDescendant = true
} else if childLeadsToViableHead {
// If both are viable, compare their weights.
if child.weight == bestWeight {
// Tie-breaker of equal weights by root.
if bytes.Compare(child.root[:], bestChild.root[:]) > 0 {
bestChild = child
}
} else if child.weight > bestWeight {
bestChild = child
bestWeight = child.weight
}
}
}
if hasViableDescendant {
if bestChild.bestDescendant == nil {
n.bestDescendant = bestChild
} else {
n.bestDescendant = bestChild.bestDescendant
}
} else {
n.bestDescendant = nil
}
return nil
}
// viableForHead returns true if the node is viable to head.
// Any node with different finalized or justified epoch than
// the ones in fork choice store should not be viable to head.
@@ -110,30 +32,13 @@ func (n *Node) leadsToViableHead(justifiedEpoch, currentEpoch primitives.Epoch)
return n.bestDescendant.viableForHead(justifiedEpoch, currentEpoch)
}
// setNodeAndParentValidated sets the current node and all the ancestors as validated (i.e. non-optimistic).
func (n *Node) setNodeAndParentValidated(ctx context.Context) error {
if ctx.Err() != nil {
return ctx.Err()
}
if !n.optimistic {
return nil
}
n.optimistic = false
if n.parent == nil {
return nil
}
return n.parent.setNodeAndParentValidated(ctx)
}
// arrivedEarly returns whether this node was inserted before the first
// threshold to orphan a block.
// Note that genesisTime has seconds granularity, therefore we use a strict
// inequality < here. For example a block that arrives 3.9999 seconds into the
// slot will have secs = 3 below.
func (n *Node) arrivedEarly(genesis time.Time) (bool, error) {
sss, err := slots.SinceSlotStart(n.slot, genesis, n.timestamp.Truncate(time.Second)) // Truncate such that 3.9999 seconds will have a value of 3.
func (n *PayloadNode) arrivedEarly(genesis time.Time) (bool, error) {
sss, err := slots.SinceSlotStart(n.node.slot, genesis, n.timestamp.Truncate(time.Second)) // Truncate such that 3.9999 seconds will have a value of 3.
votingWindow := params.BeaconConfig().SlotComponentDuration(params.BeaconConfig().AttestationDueBPS)
return sss < votingWindow, err
}
@@ -143,52 +48,7 @@ func (n *Node) arrivedEarly(genesis time.Time) (bool, error) {
// Note that genesisTime has seconds granularity, therefore we use an
// inequality >= here. For example a block that arrives 10.00001 seconds into the
// slot will have secs = 10 below.
func (n *Node) arrivedAfterOrphanCheck(genesis time.Time) (bool, error) {
secs, err := slots.SinceSlotStart(n.slot, genesis, n.timestamp.Truncate(time.Second)) // Truncate such that 10.00001 seconds will have a value of 10.
func (n *PayloadNode) arrivedAfterOrphanCheck(genesis time.Time) (bool, error) {
secs, err := slots.SinceSlotStart(n.node.slot, genesis, n.timestamp.Truncate(time.Second)) // Truncate such that 10.00001 seconds will have a value of 10.
return secs >= ProcessAttestationsThreshold, err
}
// nodeTreeDump appends to the given list all the nodes descending from this one
func (n *Node) nodeTreeDump(ctx context.Context, nodes []*forkchoice2.Node) ([]*forkchoice2.Node, error) {
if ctx.Err() != nil {
return nil, ctx.Err()
}
var parentRoot [32]byte
if n.parent != nil {
parentRoot = n.parent.root
}
target := [32]byte{}
if n.target != nil {
target = n.target.root
}
thisNode := &forkchoice2.Node{
Slot: n.slot,
BlockRoot: n.root[:],
ParentRoot: parentRoot[:],
JustifiedEpoch: n.justifiedEpoch,
FinalizedEpoch: n.finalizedEpoch,
UnrealizedJustifiedEpoch: n.unrealizedJustifiedEpoch,
UnrealizedFinalizedEpoch: n.unrealizedFinalizedEpoch,
Balance: n.balance,
Weight: n.weight,
ExecutionOptimistic: n.optimistic,
ExecutionBlockHash: n.payloadHash[:],
Timestamp: n.timestamp,
Target: target[:],
}
if n.optimistic {
thisNode.Validity = forkchoice2.Optimistic
} else {
thisNode.Validity = forkchoice2.Valid
}
nodes = append(nodes, thisNode)
var err error
for _, child := range n.children {
nodes, err = child.nodeTreeDump(ctx, nodes)
if err != nil {
return nil, err
}
}
return nodes, nil
}

View File

@@ -27,15 +27,15 @@ func TestNode_ApplyWeightChanges_PositiveChange(t *testing.T) {
// The updated balances of each node is 100
s := f.store
s.nodeByRoot[indexToHash(1)].balance = 100
s.nodeByRoot[indexToHash(2)].balance = 100
s.nodeByRoot[indexToHash(3)].balance = 100
s.emptyNodeByRoot[indexToHash(1)].balance = 100
s.emptyNodeByRoot[indexToHash(2)].balance = 100
s.emptyNodeByRoot[indexToHash(3)].balance = 100
assert.NoError(t, s.treeRootNode.applyWeightChanges(ctx))
assert.Equal(t, uint64(300), s.nodeByRoot[indexToHash(1)].weight)
assert.Equal(t, uint64(200), s.nodeByRoot[indexToHash(2)].weight)
assert.Equal(t, uint64(100), s.nodeByRoot[indexToHash(3)].weight)
assert.Equal(t, uint64(300), s.emptyNodeByRoot[indexToHash(1)].weight)
assert.Equal(t, uint64(200), s.emptyNodeByRoot[indexToHash(2)].weight)
assert.Equal(t, uint64(100), s.emptyNodeByRoot[indexToHash(3)].weight)
}
func TestNode_ApplyWeightChanges_NegativeChange(t *testing.T) {
@@ -53,19 +53,19 @@ func TestNode_ApplyWeightChanges_NegativeChange(t *testing.T) {
// The updated balances of each node is 100
s := f.store
s.nodeByRoot[indexToHash(1)].weight = 400
s.nodeByRoot[indexToHash(2)].weight = 400
s.nodeByRoot[indexToHash(3)].weight = 400
s.emptyNodeByRoot[indexToHash(1)].weight = 400
s.emptyNodeByRoot[indexToHash(2)].weight = 400
s.emptyNodeByRoot[indexToHash(3)].weight = 400
s.nodeByRoot[indexToHash(1)].balance = 100
s.nodeByRoot[indexToHash(2)].balance = 100
s.nodeByRoot[indexToHash(3)].balance = 100
s.emptyNodeByRoot[indexToHash(1)].balance = 100
s.emptyNodeByRoot[indexToHash(2)].balance = 100
s.emptyNodeByRoot[indexToHash(3)].balance = 100
assert.NoError(t, s.treeRootNode.applyWeightChanges(ctx))
assert.Equal(t, uint64(300), s.nodeByRoot[indexToHash(1)].weight)
assert.Equal(t, uint64(200), s.nodeByRoot[indexToHash(2)].weight)
assert.Equal(t, uint64(100), s.nodeByRoot[indexToHash(3)].weight)
assert.Equal(t, uint64(300), s.emptyNodeByRoot[indexToHash(1)].weight)
assert.Equal(t, uint64(200), s.emptyNodeByRoot[indexToHash(2)].weight)
assert.Equal(t, uint64(100), s.emptyNodeByRoot[indexToHash(3)].weight)
}
func TestNode_UpdateBestDescendant_NonViableChild(t *testing.T) {
@@ -108,8 +108,8 @@ func TestNode_UpdateBestDescendant_HigherWeightChild(t *testing.T) {
require.NoError(t, f.InsertNode(ctx, state, blk))
s := f.store
s.nodeByRoot[indexToHash(1)].weight = 100
s.nodeByRoot[indexToHash(2)].weight = 200
s.emptyNodeByRoot[indexToHash(1)].weight = 100
s.emptyNodeByRoot[indexToHash(2)].weight = 200
assert.NoError(t, s.treeRootNode.updateBestDescendant(ctx, 1, 1, 1))
assert.Equal(t, 2, len(s.treeRootNode.children))
@@ -128,8 +128,8 @@ func TestNode_UpdateBestDescendant_LowerWeightChild(t *testing.T) {
require.NoError(t, f.InsertNode(ctx, state, blk))
s := f.store
s.nodeByRoot[indexToHash(1)].weight = 200
s.nodeByRoot[indexToHash(2)].weight = 100
s.emptyNodeByRoot[indexToHash(1)].weight = 200
s.emptyNodeByRoot[indexToHash(2)].weight = 100
assert.NoError(t, s.treeRootNode.updateBestDescendant(ctx, 1, 1, 1))
assert.Equal(t, 2, len(s.treeRootNode.children))
@@ -176,9 +176,9 @@ func TestNode_LeadsToViableHead(t *testing.T) {
require.NoError(t, f.InsertNode(ctx, state, blk))
require.Equal(t, true, f.store.treeRootNode.leadsToViableHead(4, 5))
require.Equal(t, true, f.store.nodeByRoot[indexToHash(5)].leadsToViableHead(4, 5))
require.Equal(t, false, f.store.nodeByRoot[indexToHash(2)].leadsToViableHead(4, 5))
require.Equal(t, false, f.store.nodeByRoot[indexToHash(4)].leadsToViableHead(4, 5))
require.Equal(t, true, f.store.emptyNodeByRoot[indexToHash(5)].leadsToViableHead(4, 5))
require.Equal(t, false, f.store.emptyNodeByRoot[indexToHash(2)].leadsToViableHead(4, 5))
require.Equal(t, false, f.store.emptyNodeByRoot[indexToHash(4)].leadsToViableHead(4, 5))
}
func TestNode_SetFullyValidated(t *testing.T) {
@@ -195,25 +195,25 @@ func TestNode_SetFullyValidated(t *testing.T) {
state, blk, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blk))
storeNodes[1] = f.store.nodeByRoot[blk.Root()]
storeNodes[1] = f.store.emptyNodeByRoot[blk.Root()]
require.NoError(t, f.SetOptimisticToValid(ctx, params.BeaconConfig().ZeroHash))
state, blk, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blk))
storeNodes[2] = f.store.nodeByRoot[blk.Root()]
storeNodes[2] = f.store.emptyNodeByRoot[blk.Root()]
require.NoError(t, f.SetOptimisticToValid(ctx, indexToHash(1)))
state, blk, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blk))
storeNodes[3] = f.store.nodeByRoot[blk.Root()]
storeNodes[3] = f.store.emptyNodeByRoot[blk.Root()]
state, blk, err = prepareForkchoiceState(ctx, 4, indexToHash(4), indexToHash(3), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blk))
storeNodes[4] = f.store.nodeByRoot[blk.Root()]
storeNodes[4] = f.store.emptyNodeByRoot[blk.Root()]
state, blk, err = prepareForkchoiceState(ctx, 5, indexToHash(5), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blk))
storeNodes[5] = f.store.nodeByRoot[blk.Root()]
storeNodes[5] = f.store.emptyNodeByRoot[blk.Root()]
opt, err := f.IsOptimistic(indexToHash(5))
require.NoError(t, err)
@@ -223,7 +223,7 @@ func TestNode_SetFullyValidated(t *testing.T) {
require.NoError(t, err)
require.Equal(t, true, opt)
require.NoError(t, f.store.nodeByRoot[indexToHash(4)].setNodeAndParentValidated(ctx))
require.NoError(t, f.store.emptyNodeByRoot[indexToHash(4)].setNodeAndParentValidated(ctx))
// block 5 should still be optimistic
opt, err = f.IsOptimistic(indexToHash(5))

View File

@@ -7,92 +7,130 @@ import (
"github.com/pkg/errors"
)
func (s *Store) setOptimisticToInvalid(ctx context.Context, root, parentRoot, lastValidHash [32]byte) ([][32]byte, error) {
// setOptimisticToInvalid removes invalid nodes from forkchoice. It does NOT remove the empty node for the passed root.
func (s *Store) setOptimisticToInvalid(ctx context.Context, root, parentRoot, parentHash, lastValidHash [32]byte) ([][32]byte, error) {
invalidRoots := make([][32]byte, 0)
node, ok := s.nodeByRoot[root]
if !ok {
node, ok = s.nodeByRoot[parentRoot]
if !ok || node == nil {
return invalidRoots, errors.Wrap(ErrNilNode, "could not set node to invalid")
n := s.fullNodeByRoot[root]
if n == nil {
// The offending node with its payload is not in forkchoice. Try with the parent
n = s.emptyNodeByRoot[parentRoot]
if n == nil {
return invalidRoots, errors.Wrap(ErrNilNode, "could not set node to invalid, could not find consensus parent")
}
// return early if the parent is LVH
if node.payloadHash == lastValidHash {
if n.node.payloadHash == lastValidHash {
// The parent node must have been full and with a valid payload
return invalidRoots, nil
}
} else {
if node == nil {
return invalidRoots, errors.Wrap(ErrNilNode, "could not set node to invalid")
}
if node.parent.root != parentRoot {
return invalidRoots, errInvalidParentRoot
if n.node.payloadHash == parentHash {
// The parent was full and invalid
n = s.fullNodeByRoot[parentRoot]
if n == nil {
return invalidRoots, errors.Wrap(ErrNilNode, "could not set node to invalid, could not find full parent")
}
} else {
// The parent is empty and we don't yet know if it's valid or not
for n = n.node.parent; n != nil; n = n.node.parent {
if ctx.Err() != nil {
return invalidRoots, ctx.Err()
}
if n.node.payloadHash == lastValidHash {
// The node built on empty and the whole chain was valid
return invalidRoots, nil
}
if n.node.payloadHash == parentHash {
// The parent was full and invalid
break
}
}
if n == nil {
return nil, errors.Wrap(ErrNilNode, "could not set node to invalid, could not find full parent in ancestry")
}
}
}
firstInvalid := node
for ; firstInvalid.parent != nil && firstInvalid.parent.payloadHash != lastValidHash; firstInvalid = firstInvalid.parent {
// n points to a full node that has an invalid payload in forkchoice. We need to find the fist node in the chain that is actually invalid.
fp := s.fullParent(n)
for ; fp != nil && fp.node.payloadHash != lastValidHash; fp = s.fullParent(fp) {
if ctx.Err() != nil {
return invalidRoots, ctx.Err()
}
n = fp
}
// Deal with the case that the last valid payload is in a different fork
// This means we are dealing with an EE that does not follow the spec
if firstInvalid.parent == nil {
if fp == nil {
// return early if the invalid node was not imported
if node.root == parentRoot {
if n.node.root == parentRoot {
return invalidRoots, nil
}
firstInvalid = node
}
return s.removeNode(ctx, firstInvalid)
return s.removeNode(ctx, n)
}
// removeNode removes the node with the given root and all of its children
// from the Fork Choice Store.
func (s *Store) removeNode(ctx context.Context, node *Node) ([][32]byte, error) {
func (s *Store) removeNode(ctx context.Context, pn *PayloadNode) ([][32]byte, error) {
invalidRoots := make([][32]byte, 0)
if node == nil {
if pn == nil {
return invalidRoots, errors.Wrap(ErrNilNode, "could not remove node")
}
if !node.optimistic || node.parent == nil {
if !pn.optimistic || pn.node.parent == nil {
return invalidRoots, errInvalidOptimisticStatus
}
children := node.parent.children
children := pn.node.parent.children
if len(children) == 1 {
node.parent.children = []*Node{}
pn.node.parent.children = []*Node{}
} else {
for i, n := range children {
if n == node {
if n == pn.node {
if i != len(children)-1 {
children[i] = children[len(children)-1]
}
node.parent.children = children[:len(children)-1]
pn.node.parent.children = children[:len(children)-1]
break
}
}
}
return s.removeNodeAndChildren(ctx, node, invalidRoots)
return s.removeNodeAndChildren(ctx, pn, invalidRoots)
}
// removeNodeAndChildren removes `node` and all of its descendant from the Store
func (s *Store) removeNodeAndChildren(ctx context.Context, node *Node, invalidRoots [][32]byte) ([][32]byte, error) {
func (s *Store) removeNodeAndChildren(ctx context.Context, pn *PayloadNode, invalidRoots [][32]byte) ([][32]byte, error) {
var err error
for _, child := range node.children {
// If we are removing an empty node, then remove the full node as well if it exists.
if !pn.full {
fn, ok := s.fullNodeByRoot[pn.node.root]
if ok {
invalidRoots, err = s.removeNodeAndChildren(ctx, fn, invalidRoots)
if err != nil {
return invalidRoots, err
}
}
}
// Now we remove the full node's children.
for _, child := range pn.children {
if ctx.Err() != nil {
return invalidRoots, ctx.Err()
}
if invalidRoots, err = s.removeNodeAndChildren(ctx, child, invalidRoots); err != nil {
// We need to remove only the empty node here since the recursion will take care of the full one.
en := s.emptyNodeByRoot[child.root]
if invalidRoots, err = s.removeNodeAndChildren(ctx, en, invalidRoots); err != nil {
return invalidRoots, err
}
}
invalidRoots = append(invalidRoots, node.root)
if node.root == s.proposerBoostRoot {
s.proposerBoostRoot = [32]byte{}
// Only append the root for the empty nodes.
if pn.full {
delete(s.fullNodeByRoot, pn.node.root)
} else {
invalidRoots = append(invalidRoots, pn.node.root)
if pn.node.root == s.proposerBoostRoot {
s.proposerBoostRoot = [32]byte{}
}
if pn.node.root == s.previousProposerBoostRoot {
s.previousProposerBoostRoot = params.BeaconConfig().ZeroHash
s.previousProposerBoostScore = 0
}
delete(s.emptyNodeByRoot, pn.node.root)
}
if node.root == s.previousProposerBoostRoot {
s.previousProposerBoostRoot = params.BeaconConfig().ZeroHash
s.previousProposerBoostScore = 0
}
delete(s.nodeByRoot, node.root)
return invalidRoots, nil
}

View File

@@ -274,7 +274,7 @@ func TestSetOptimisticToInvalid_CorrectChildren(t *testing.T) {
_, err = f.store.setOptimisticToInvalid(ctx, [32]byte{'d'}, [32]byte{'a'}, [32]byte{'A'})
require.NoError(t, err)
require.Equal(t, 2, len(f.store.nodeByRoot[[32]byte{'a'}].children))
require.Equal(t, 2, len(f.store.emptyNodeByRoot[[32]byte{'a'}].children))
}

View File

@@ -11,7 +11,7 @@ func (f *ForkChoice) applyProposerBoostScore() error {
s := f.store
proposerScore := uint64(0)
if s.previousProposerBoostRoot != params.BeaconConfig().ZeroHash {
previousNode, ok := s.nodeByRoot[s.previousProposerBoostRoot]
previousNode, ok := s.emptyNodeByRoot[s.previousProposerBoostRoot]
if !ok || previousNode == nil {
log.WithError(errInvalidProposerBoostRoot).Errorf("invalid prev root %#x", s.previousProposerBoostRoot)
} else {
@@ -20,7 +20,7 @@ func (f *ForkChoice) applyProposerBoostScore() error {
}
if s.proposerBoostRoot != params.BeaconConfig().ZeroHash {
currentNode, ok := s.nodeByRoot[s.proposerBoostRoot]
currentNode, ok := s.emptyNodeByRoot[s.proposerBoostRoot]
if !ok || currentNode == nil {
log.WithError(errInvalidProposerBoostRoot).Errorf("invalid current root %#x", s.proposerBoostRoot)
} else {

View File

@@ -166,13 +166,13 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
// (1: 48) -> (2: 38) -> (3: 10)
// \--------------->(4: 18)
//
node1 := f.store.nodeByRoot[indexToHash(1)]
node1 := f.store.emptyNodeByRoot[indexToHash(1)]
require.Equal(t, node1.weight, uint64(48))
node2 := f.store.nodeByRoot[indexToHash(2)]
node2 := f.store.emptyNodeByRoot[indexToHash(2)]
require.Equal(t, node2.weight, uint64(38))
node3 := f.store.nodeByRoot[indexToHash(3)]
node3 := f.store.emptyNodeByRoot[indexToHash(3)]
require.Equal(t, node3.weight, uint64(10))
node4 := f.store.nodeByRoot[indexToHash(4)]
node4 := f.store.emptyNodeByRoot[indexToHash(4)]
require.Equal(t, node4.weight, uint64(18))
// Regression: process attestations for C, check that it

View File

@@ -34,22 +34,23 @@ const orphanLateBlockProposingEarly = 2
func (f *ForkChoice) ShouldOverrideFCU() (override bool) {
override = false
// We only need to override FCU if our current head is from the current
// We only need to override FCU if our current consensusHead is from the current
// slot. This differs from the spec implementation in that we assume
// that we will call this function in the previous slot to proposing.
head := f.store.headNode
if head == nil {
consensusHead := f.store.headNode
if consensusHead == nil {
return
}
if head.slot != slots.CurrentSlot(f.store.genesisTime) {
if consensusHead.slot != slots.CurrentSlot(f.store.genesisTime) {
return
}
// Do not reorg on epoch boundaries
if (head.slot+1)%params.BeaconConfig().SlotsPerEpoch == 0 {
if (consensusHead.slot+1)%params.BeaconConfig().SlotsPerEpoch == 0 {
return
}
head := f.store.choosePayloadContent(consensusHead)
// Only reorg blocks that arrive late
early, err := head.arrivedEarly(f.store.genesisTime)
if err != nil {
@@ -61,15 +62,15 @@ func (f *ForkChoice) ShouldOverrideFCU() (override bool) {
}
// Only reorg if we have been finalizing
finalizedEpoch := f.store.finalizedCheckpoint.Epoch
if slots.ToEpoch(head.slot+1) > finalizedEpoch+params.BeaconConfig().ReorgMaxEpochsSinceFinalization {
if slots.ToEpoch(consensusHead.slot+1) > finalizedEpoch+params.BeaconConfig().ReorgMaxEpochsSinceFinalization {
return
}
// Only orphan a single block
parent := head.parent
parent := consensusHead.parent
if parent == nil {
return
}
if head.slot > parent.slot+1 {
if consensusHead.slot > parent.node.slot+1 {
return
}
// Do not orphan a block that has higher justification than the parent
@@ -78,12 +79,12 @@ func (f *ForkChoice) ShouldOverrideFCU() (override bool) {
// }
// Only orphan a block if the head LMD vote is weak
if head.weight*100 > f.store.committeeWeight*params.BeaconConfig().ReorgHeadWeightThreshold {
if consensusHead.weight*100 > f.store.committeeWeight*params.BeaconConfig().ReorgHeadWeightThreshold {
return
}
// Return early if we are checking before 10 seconds into the slot
sss, err := slots.SinceSlotStart(head.slot, f.store.genesisTime, time.Now())
sss, err := slots.SinceSlotStart(consensusHead.slot, f.store.genesisTime, time.Now())
if err != nil {
log.WithError(err).Error("could not check current slot")
return true
@@ -106,60 +107,61 @@ func (f *ForkChoice) ShouldOverrideFCU() (override bool) {
// This function needs to be called only when proposing a block and all
// attestation processing has already happened.
func (f *ForkChoice) GetProposerHead() [32]byte {
head := f.store.headNode
if head == nil {
consensusHead := f.store.headNode
if consensusHead == nil {
return [32]byte{}
}
// Only reorg blocks from the previous slot.
currentSlot := slots.CurrentSlot(f.store.genesisTime)
if head.slot+1 != currentSlot {
return head.root
if consensusHead.slot+1 != currentSlot {
return consensusHead.root
}
// Do not reorg on epoch boundaries
if (head.slot+1)%params.BeaconConfig().SlotsPerEpoch == 0 {
return head.root
if (consensusHead.slot+1)%params.BeaconConfig().SlotsPerEpoch == 0 {
return consensusHead.root
}
// Only reorg blocks that arrive late
head := f.store.choosePayloadContent(consensusHead)
early, err := head.arrivedEarly(f.store.genesisTime)
if err != nil {
log.WithError(err).Error("could not check if block arrived early")
return head.root
return consensusHead.root
}
if early {
return head.root
return consensusHead.root
}
// Only reorg if we have been finalizing
finalizedEpoch := f.store.finalizedCheckpoint.Epoch
if slots.ToEpoch(head.slot+1) > finalizedEpoch+params.BeaconConfig().ReorgMaxEpochsSinceFinalization {
return head.root
if slots.ToEpoch(consensusHead.slot+1) > finalizedEpoch+params.BeaconConfig().ReorgMaxEpochsSinceFinalization {
return consensusHead.root
}
// Only orphan a single block
parent := head.parent
parent := consensusHead.parent
if parent == nil {
return head.root
return consensusHead.root
}
if head.slot > parent.slot+1 {
return head.root
if consensusHead.slot > parent.node.slot+1 {
return consensusHead.root
}
// Only orphan a block if the head LMD vote is weak
if head.weight*100 > f.store.committeeWeight*params.BeaconConfig().ReorgHeadWeightThreshold {
return head.root
if consensusHead.weight*100 > f.store.committeeWeight*params.BeaconConfig().ReorgHeadWeightThreshold {
return consensusHead.root
}
// Only orphan a block if the parent LMD vote is strong
if parent.weight*100 < f.store.committeeWeight*params.BeaconConfig().ReorgParentWeightThreshold {
return head.root
return consensusHead.root
}
// Only reorg if we are proposing early
sss, err := slots.SinceSlotStart(currentSlot, f.store.genesisTime, time.Now())
if err != nil {
log.WithError(err).Error("could not check if proposing early")
return head.root
return consensusHead.root
}
if sss >= orphanLateBlockProposingEarly*time.Second {
return head.root
return consensusHead.root
}
return parent.root
return parent.node.root
}

View File

@@ -13,6 +13,7 @@ import (
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/OffchainLabs/prysm/v7/time/slots"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// head starts from justified root and then follows the best descendant links
@@ -26,13 +27,16 @@ func (s *Store) head(ctx context.Context) ([32]byte, error) {
}
// JustifiedRoot has to be known
justifiedNode, ok := s.nodeByRoot[s.justifiedCheckpoint.Root]
if !ok || justifiedNode == nil {
var jn *Node
ej := s.emptyNodeByRoot[s.justifiedCheckpoint.Root]
if ej != nil {
jn = ej.node
} else {
// If the justifiedCheckpoint is from genesis, then the root is
// zeroHash. In this case it should be the root of forkchoice
// tree.
if s.justifiedCheckpoint.Epoch == params.BeaconConfig().GenesisEpoch {
justifiedNode = s.treeRootNode
jn = s.treeRootNode
} else {
return [32]byte{}, errors.WithMessage(errUnknownJustifiedRoot, fmt.Sprintf("%#x", s.justifiedCheckpoint.Root))
}
@@ -40,9 +44,9 @@ func (s *Store) head(ctx context.Context) ([32]byte, error) {
// If the justified node doesn't have a best descendant,
// the best node is itself.
bestDescendant := justifiedNode.bestDescendant
bestDescendant := jn.bestDescendant
if bestDescendant == nil {
bestDescendant = justifiedNode
bestDescendant = jn
}
currentEpoch := slots.EpochsSinceGenesis(s.genesisTime)
if !bestDescendant.viableForHead(s.justifiedCheckpoint.Epoch, currentEpoch) {
@@ -66,29 +70,42 @@ func (s *Store) head(ctx context.Context) ([32]byte, error) {
// It then updates the new node's parent with the best child and descendant node.
func (s *Store) insert(ctx context.Context,
roblock consensus_blocks.ROBlock,
justifiedEpoch, finalizedEpoch primitives.Epoch) (*Node, error) {
justifiedEpoch, finalizedEpoch primitives.Epoch,
) (*PayloadNode, error) {
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.insert")
defer span.End()
root := roblock.Root()
block := roblock.Block()
slot := block.Slot()
parentRoot := block.ParentRoot()
var payloadHash [32]byte
if block.Version() >= version.Bellatrix {
execution, err := block.Body().Execution()
if err != nil {
return nil, err
}
copy(payloadHash[:], execution.BlockHash())
}
// Return if the block has been inserted into Store before.
if n, ok := s.nodeByRoot[root]; ok {
if n, ok := s.emptyNodeByRoot[root]; ok {
return n, nil
}
parent := s.nodeByRoot[parentRoot]
block := roblock.Block()
slot := block.Slot()
var parent *PayloadNode
var payloadHash = &[32]byte{}
if block.Version() >= version.Gloas {
if err := s.getNodeInformation(block, &parent, payloadHash); err != nil {
return nil, err
}
} else {
if block.Version() >= version.Bellatrix {
execution, err := block.Body().Execution()
if err != nil {
return nil, err
}
copy(payloadHash[:], execution.BlockHash())
}
parentRoot := block.ParentRoot()
en := s.fullNodeByRoot[parentRoot]
parent = s.fullNodeByRoot[parentRoot]
if parent == nil && en != nil {
// pre-Gloas only full parents are allowed.
return nil, errInvalidParentRoot
}
}
n := &Node{
slot: slot,
root: root,
@@ -97,30 +114,47 @@ func (s *Store) insert(ctx context.Context,
unrealizedJustifiedEpoch: justifiedEpoch,
finalizedEpoch: finalizedEpoch,
unrealizedFinalizedEpoch: finalizedEpoch,
optimistic: true,
payloadHash: payloadHash,
timestamp: time.Now(),
payloadHash: *payloadHash,
}
// Set the node's target checkpoint
if slot%params.BeaconConfig().SlotsPerEpoch == 0 {
n.target = n
} else if parent != nil {
if slots.ToEpoch(slot) == slots.ToEpoch(parent.slot) {
n.target = parent.target
if slots.ToEpoch(slot) == slots.ToEpoch(parent.node.slot) {
n.target = parent.node.target
} else {
n.target = parent
n.target = parent.node
}
}
var ret *PayloadNode
// Make the empty node.It's optimistic status equals it's parent's status.
pn := &PayloadNode{
node: n,
optimistic: n.parent.optimistic,
timestamp: time.Now(),
}
s.emptyNodeByRoot[root] = pn
ret = pn
if block.Version() < version.Gloas {
// Make also the full node, this is optimistic until the engine returns the execution payload validation.
fn := &PayloadNode{
node: n,
optimistic: true,
timestamp: time.Now(),
full: true,
}
ret = fn
s.fullNodeByRoot[root] = fn
}
s.nodeByRoot[root] = n
if parent == nil {
if s.treeRootNode == nil {
s.treeRootNode = n
s.headNode = n
s.highestReceivedNode = n
} else {
delete(s.nodeByRoot, root)
delete(s.emptyNodeByRoot, root)
delete(s.fullNodeByRoot, root)
return nil, errInvalidParentRoot
}
} else {
@@ -128,7 +162,7 @@ func (s *Store) insert(ctx context.Context,
// Apply proposer boost
now := time.Now()
if now.Before(s.genesisTime) {
return n, nil
return ret, nil
}
currentSlot := slots.CurrentSlot(s.genesisTime)
sss, err := slots.SinceSlotStart(currentSlot, s.genesisTime, now)
@@ -144,17 +178,16 @@ func (s *Store) insert(ctx context.Context,
// Update best descendants
jEpoch := s.justifiedCheckpoint.Epoch
fEpoch := s.finalizedCheckpoint.Epoch
if err := s.treeRootNode.updateBestDescendant(ctx, jEpoch, fEpoch, slots.ToEpoch(currentSlot)); err != nil {
_, remErr := s.removeNode(ctx, n)
if remErr != nil {
log.WithError(remErr).Error("could not remove node")
}
return nil, errors.Wrap(err, "could not update best descendants")
if err := s.updateBestDescendantConsensusNode(ctx, s.treeRootNode, jEpoch, fEpoch, slots.ToEpoch(currentSlot)); err != nil {
log.WithError(err).WithFields(logrus.Fields{
"slot": slot,
"root": root,
}).Error("Could not update best descendant")
}
}
// Update metrics.
processedBlockCount.Inc()
nodeCount.Set(float64(len(s.nodeByRoot)))
nodeCount.Set(float64(len(s.emptyNodeByRoot)))
// Only update received block slot if it's within epoch from current time.
if slot+params.BeaconConfig().SlotsPerEpoch > slots.CurrentSlot(s.genesisTime) {
@@ -165,10 +198,10 @@ func (s *Store) insert(ctx context.Context,
s.highestReceivedNode = n
}
return n, nil
return ret, nil
}
// pruneFinalizedNodeByRootMap prunes the `nodeByRoot` map
// pruneFinalizedNodeByRootMap prunes the `nodeByRoot` maps
// starting from `node` down to the finalized Node or to a leaf of the Fork
// choice store.
func (s *Store) pruneFinalizedNodeByRootMap(ctx context.Context, node, finalizedNode *Node) error {
@@ -181,44 +214,51 @@ func (s *Store) pruneFinalizedNodeByRootMap(ctx context.Context, node, finalized
}
return nil
}
for _, child := range node.children {
for _, child := range s.allConsensusChildren(node) {
if err := s.pruneFinalizedNodeByRootMap(ctx, child, finalizedNode); err != nil {
return err
}
}
node.children = nil
delete(s.nodeByRoot, node.root)
en := s.emptyNodeByRoot[node.root]
en.children = nil
delete(s.emptyNodeByRoot, node.root)
fn := s.fullNodeByRoot[node.root]
if fn != nil {
fn.children = nil
delete(s.fullNodeByRoot, node.root)
}
return nil
}
// prune prunes the fork choice store. It removes all nodes that compete with the finalized root.
// This function does not prune for invalid optimistically synced nodes, it deals only with pruning upon finalization
// TODO: GLOAS, to ensure that chains up to a full node are found, we may want to consider pruning only up to the latest full block that was finalized
func (s *Store) prune(ctx context.Context) error {
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.Prune")
defer span.End()
finalizedRoot := s.finalizedCheckpoint.Root
finalizedEpoch := s.finalizedCheckpoint.Epoch
finalizedNode, ok := s.nodeByRoot[finalizedRoot]
if !ok || finalizedNode == nil {
fen, ok := s.emptyNodeByRoot[finalizedRoot]
if !ok || fen == nil {
return errors.WithMessage(errUnknownFinalizedRoot, fmt.Sprintf("%#x", finalizedRoot))
}
fn := fen.node
// return early if we haven't changed the finalized checkpoint
if finalizedNode.parent == nil {
if fn.parent == nil {
return nil
}
// Save the new finalized dependent root because it will be pruned
s.finalizedDependentRoot = finalizedNode.parent.root
s.finalizedDependentRoot = fn.parent.node.root
// Prune nodeByRoot starting from root
if err := s.pruneFinalizedNodeByRootMap(ctx, s.treeRootNode, finalizedNode); err != nil {
if err := s.pruneFinalizedNodeByRootMap(ctx, s.treeRootNode, fn); err != nil {
return err
}
finalizedNode.parent = nil
s.treeRootNode = finalizedNode
fn.parent = nil
s.treeRootNode = fn
prunedCount.Inc()
// Prune all children of the finalized checkpoint block that are incompatible with it
@@ -226,13 +266,13 @@ func (s *Store) prune(ctx context.Context) error {
if err != nil {
return errors.Wrap(err, "could not compute epoch start")
}
if finalizedNode.slot == checkpointMaxSlot {
if fn.slot == checkpointMaxSlot {
return nil
}
for _, child := range finalizedNode.children {
for _, child := range fen.children {
if child != nil && child.slot <= checkpointMaxSlot {
if err := s.pruneFinalizedNodeByRootMap(ctx, child, finalizedNode); err != nil {
if err := s.pruneFinalizedNodeByRootMap(ctx, child, fn); err != nil {
return errors.Wrap(err, "could not prune incompatible finalized child")
}
}
@@ -246,10 +286,10 @@ func (s *Store) tips() ([][32]byte, []primitives.Slot) {
var roots [][32]byte
var slots []primitives.Slot
for root, node := range s.nodeByRoot {
if len(node.children) == 0 {
for root, n := range s.emptyNodeByRoot {
if len(s.allConsensusChildren(n.node)) == 0 {
roots = append(roots, root)
slots = append(slots, node.slot)
slots = append(slots, n.node.slot)
}
}
return roots, slots

View File

@@ -52,7 +52,7 @@ func TestStore_NodeByRoot(t *testing.T) {
}
require.Equal(t, 3, f.NodeCount())
for root, node := range f.store.nodeByRoot {
for root, node := range f.store.emptyNodeByRoot {
v, ok := expectedRoots[root]
require.Equal(t, ok, true)
require.Equal(t, v, node)
@@ -130,14 +130,14 @@ func TestStore_Insert(t *testing.T) {
nodeByRoot := map[[32]byte]*Node{indexToHash(0): treeRootNode}
jc := &forkchoicetypes.Checkpoint{Epoch: 0}
fc := &forkchoicetypes.Checkpoint{Epoch: 0}
s := &Store{nodeByRoot: nodeByRoot, treeRootNode: treeRootNode, justifiedCheckpoint: jc, finalizedCheckpoint: fc, highestReceivedNode: &Node{}}
s := &Store{emptyNodeByRoot: nodeByRoot, treeRootNode: treeRootNode, justifiedCheckpoint: jc, finalizedCheckpoint: fc, highestReceivedNode: &Node{}}
payloadHash := [32]byte{'a'}
ctx := t.Context()
_, blk, err := prepareForkchoiceState(ctx, 100, indexToHash(100), indexToHash(0), payloadHash, 1, 1)
require.NoError(t, err)
_, err = s.insert(ctx, blk, 1, 1)
require.NoError(t, err)
assert.Equal(t, 2, len(s.nodeByRoot), "Did not insert block")
assert.Equal(t, 2, len(s.emptyNodeByRoot), "Did not insert block")
assert.Equal(t, (*Node)(nil), treeRootNode.parent, "Incorrect parent")
assert.Equal(t, 1, len(treeRootNode.children), "Incorrect children number")
assert.Equal(t, payloadHash, treeRootNode.children[0].payloadHash, "Incorrect payload hash")
@@ -166,7 +166,7 @@ func TestStore_Prune_MoreThanThreshold(t *testing.T) {
// Finalized root is at index 99 so everything before 99 should be pruned.
s.finalizedCheckpoint.Root = indexToHash(99)
require.NoError(t, s.prune(t.Context()))
assert.Equal(t, 1, len(s.nodeByRoot), "Incorrect nodes count")
assert.Equal(t, 1, len(s.emptyNodeByRoot), "Incorrect nodes count")
}
func TestStore_Prune_MoreThanOnce(t *testing.T) {
@@ -188,12 +188,12 @@ func TestStore_Prune_MoreThanOnce(t *testing.T) {
// Finalized root is at index 11 so everything before 11 should be pruned.
s.finalizedCheckpoint.Root = indexToHash(10)
require.NoError(t, s.prune(t.Context()))
assert.Equal(t, 90, len(s.nodeByRoot), "Incorrect nodes count")
assert.Equal(t, 90, len(s.emptyNodeByRoot), "Incorrect nodes count")
// One more time.
s.finalizedCheckpoint.Root = indexToHash(20)
require.NoError(t, s.prune(t.Context()))
assert.Equal(t, 80, len(s.nodeByRoot), "Incorrect nodes count")
assert.Equal(t, 80, len(s.emptyNodeByRoot), "Incorrect nodes count")
}
func TestStore_Prune_ReturnEarly(t *testing.T) {
@@ -236,7 +236,7 @@ func TestStore_Prune_NoDanglingBranch(t *testing.T) {
s := f.store
s.finalizedCheckpoint.Root = indexToHash(1)
require.NoError(t, s.prune(t.Context()))
require.Equal(t, len(s.nodeByRoot), 1)
require.Equal(t, len(s.emptyNodeByRoot), 1)
}
// This test starts with the following branching diagram
@@ -316,7 +316,7 @@ func TestStore_PruneMapsNodes(t *testing.T) {
s := f.store
s.finalizedCheckpoint.Root = indexToHash(1)
require.NoError(t, s.prune(t.Context()))
require.Equal(t, len(s.nodeByRoot), 1)
require.Equal(t, len(s.emptyNodeByRoot), 1)
}
func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {

View File

@@ -21,23 +21,26 @@ type ForkChoice struct {
balancesByRoot forkchoice.BalancesByRooter // handler to obtain balances for the state with a given root
}
var _ forkchoice.ForkChoicer = (*ForkChoice)(nil)
// Store defines the fork choice store which includes block nodes and the last view of checkpoint information.
type Store struct {
justifiedCheckpoint *forkchoicetypes.Checkpoint // latest justified epoch in store.
unrealizedJustifiedCheckpoint *forkchoicetypes.Checkpoint // best unrealized justified checkpoint in store.
unrealizedFinalizedCheckpoint *forkchoicetypes.Checkpoint // best unrealized finalized checkpoint in store.
prevJustifiedCheckpoint *forkchoicetypes.Checkpoint // previous justified checkpoint in store.
finalizedCheckpoint *forkchoicetypes.Checkpoint // latest finalized epoch in store.
proposerBoostRoot [fieldparams.RootLength]byte // latest block root that was boosted after being received in a timely manner.
previousProposerBoostRoot [fieldparams.RootLength]byte // previous block root that was boosted after being received in a timely manner.
previousProposerBoostScore uint64 // previous proposer boosted root score.
finalizedDependentRoot [fieldparams.RootLength]byte // dependent root at finalized checkpoint.
committeeWeight uint64 // tracks the total active validator balance divided by the number of slots per Epoch.
treeRootNode *Node // the root node of the store tree.
headNode *Node // last head Node
nodeByRoot map[[fieldparams.RootLength]byte]*Node // nodes indexed by roots.
slashedIndices map[primitives.ValidatorIndex]bool // the list of equivocating validator indices
originRoot [fieldparams.RootLength]byte // The genesis block root
justifiedCheckpoint *forkchoicetypes.Checkpoint // latest justified epoch in store.
unrealizedJustifiedCheckpoint *forkchoicetypes.Checkpoint // best unrealized justified checkpoint in store.
unrealizedFinalizedCheckpoint *forkchoicetypes.Checkpoint // best unrealized finalized checkpoint in store.
prevJustifiedCheckpoint *forkchoicetypes.Checkpoint // previous justified checkpoint in store.
finalizedCheckpoint *forkchoicetypes.Checkpoint // latest finalized epoch in store.
proposerBoostRoot [fieldparams.RootLength]byte // latest block root that was boosted after being received in a timely manner.
previousProposerBoostRoot [fieldparams.RootLength]byte // previous block root that was boosted after being received in a timely manner.
previousProposerBoostScore uint64 // previous proposer boosted root score.
finalizedDependentRoot [fieldparams.RootLength]byte // dependent root at finalized checkpoint.
committeeWeight uint64 // tracks the total active validator balance divided by the number of slots per Epoch.
treeRootNode *Node // the root node of the store tree.
headNode *Node // last head Node
emptyNodeByRoot map[[fieldparams.RootLength]byte]*PayloadNode // nodes indexed by roots.
fullNodeByRoot map[[fieldparams.RootLength]byte]*PayloadNode // nodes indexed by payload Hash
slashedIndices map[primitives.ValidatorIndex]bool // the list of equivocating validator indices
originRoot [fieldparams.RootLength]byte // The genesis block root
genesisTime time.Time
highestReceivedNode *Node // The highest slot node.
receivedBlocksLastEpoch [fieldparams.SlotsPerEpoch]primitives.Slot // Using `highestReceivedSlot`. The slot of blocks received in the last epoch.
@@ -50,18 +53,27 @@ type Node struct {
slot primitives.Slot // slot of the block converted to the node.
root [fieldparams.RootLength]byte // root of the block converted to the node.
payloadHash [fieldparams.RootLength]byte // payloadHash of the block converted to the node.
parent *Node // parent index of this node.
parent *PayloadNode // parent index of this node.
target *Node // target checkpoint for
children []*Node // the list of direct children of this Node
bestDescendant *Node // bestDescendant node of this node.
justifiedEpoch primitives.Epoch // justifiedEpoch of this node.
unrealizedJustifiedEpoch primitives.Epoch // the epoch that would be justified if the block would be advanced to the next epoch.
finalizedEpoch primitives.Epoch // finalizedEpoch of this node.
unrealizedFinalizedEpoch primitives.Epoch // the epoch that would be finalized if the block would be advanced to the next epoch.
balance uint64 // the balance that voted for this node directly
weight uint64 // weight of this node: the total balance including children
bestDescendant *Node // bestDescendant node of this node.
optimistic bool // whether the block has been fully validated or not
timestamp time.Time // The timestamp when the node was inserted.
}
// PayloadNode defines a full Forkchoice node after the Gloas fork, with the payload status either empty of full
type PayloadNode struct {
optimistic bool // whether the block has been fully validated or not
full bool // whether this node represents a payload present or not
weight uint64 // weight of this node: the total balance including children
balance uint64 // the balance that voted for this node directly
bestDescendant *Node // bestDescendant node of this payload node.
node *Node // the consensus part of this full forkchoice node
timestamp time.Time // The timestamp when the node was inserted.
children []*Node // the list of direct children of this Node
}
// Vote defines an individual validator's vote.

View File

@@ -15,33 +15,34 @@ import (
)
func (s *Store) setUnrealizedJustifiedEpoch(root [32]byte, epoch primitives.Epoch) error {
node, ok := s.nodeByRoot[root]
if !ok || node == nil {
en, ok := s.emptyNodeByRoot[root]
if !ok || en == nil {
return errors.Wrap(ErrNilNode, "could not set unrealized justified epoch")
}
if epoch < node.unrealizedJustifiedEpoch {
if epoch < en.node.unrealizedJustifiedEpoch {
return errInvalidUnrealizedJustifiedEpoch
}
node.unrealizedJustifiedEpoch = epoch
en.node.unrealizedJustifiedEpoch = epoch
return nil
}
func (s *Store) setUnrealizedFinalizedEpoch(root [32]byte, epoch primitives.Epoch) error {
node, ok := s.nodeByRoot[root]
if !ok || node == nil {
en, ok := s.emptyNodeByRoot[root]
if !ok || en == nil {
return errors.Wrap(ErrNilNode, "could not set unrealized finalized epoch")
}
if epoch < node.unrealizedFinalizedEpoch {
if epoch < en.node.unrealizedFinalizedEpoch {
return errInvalidUnrealizedFinalizedEpoch
}
node.unrealizedFinalizedEpoch = epoch
en.node.unrealizedFinalizedEpoch = epoch
return nil
}
// updateUnrealizedCheckpoints "realizes" the unrealized justified and finalized
// epochs stored within nodes. It should be called at the beginning of each epoch.
func (f *ForkChoice) updateUnrealizedCheckpoints(ctx context.Context) error {
for _, node := range f.store.nodeByRoot {
for _, en := range f.store.emptyNodeByRoot {
node := en.node
node.justifiedEpoch = node.unrealizedJustifiedEpoch
node.finalizedEpoch = node.unrealizedFinalizedEpoch
if node.justifiedEpoch > f.store.justifiedCheckpoint.Epoch {
@@ -62,16 +63,17 @@ func (s *Store) pullTips(state state.BeaconState, node *Node, jc, fc *ethpb.Chec
if node.parent == nil { // Nothing to do if the parent is nil.
return jc, fc
}
pn := node.parent.node
currentEpoch := slots.ToEpoch(slots.CurrentSlot(s.genesisTime))
stateSlot := state.Slot()
stateEpoch := slots.ToEpoch(stateSlot)
currJustified := node.parent.unrealizedJustifiedEpoch == currentEpoch
prevJustified := node.parent.unrealizedJustifiedEpoch+1 == currentEpoch
currJustified := pn.unrealizedJustifiedEpoch == currentEpoch
prevJustified := pn.unrealizedJustifiedEpoch+1 == currentEpoch
tooEarlyForCurr := slots.SinceEpochStarts(stateSlot)*3 < params.BeaconConfig().SlotsPerEpoch*2
// Exit early if it's justified or too early to be justified.
if currJustified || (stateEpoch == currentEpoch && prevJustified && tooEarlyForCurr) {
node.unrealizedJustifiedEpoch = node.parent.unrealizedJustifiedEpoch
node.unrealizedFinalizedEpoch = node.parent.unrealizedFinalizedEpoch
node.unrealizedJustifiedEpoch = pn.unrealizedJustifiedEpoch
node.unrealizedFinalizedEpoch = pn.unrealizedFinalizedEpoch
return jc, fc
}

View File

@@ -22,12 +22,12 @@ func TestStore_SetUnrealizedEpochs(t *testing.T) {
state, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
require.Equal(t, primitives.Epoch(1), f.store.nodeByRoot[[32]byte{'b'}].unrealizedJustifiedEpoch)
require.Equal(t, primitives.Epoch(1), f.store.nodeByRoot[[32]byte{'b'}].unrealizedFinalizedEpoch)
require.Equal(t, primitives.Epoch(1), f.store.emptyNodeByRoot[[32]byte{'b'}].unrealizedJustifiedEpoch)
require.Equal(t, primitives.Epoch(1), f.store.emptyNodeByRoot[[32]byte{'b'}].unrealizedFinalizedEpoch)
require.NoError(t, f.store.setUnrealizedJustifiedEpoch([32]byte{'b'}, 2))
require.NoError(t, f.store.setUnrealizedFinalizedEpoch([32]byte{'b'}, 2))
require.Equal(t, primitives.Epoch(2), f.store.nodeByRoot[[32]byte{'b'}].unrealizedJustifiedEpoch)
require.Equal(t, primitives.Epoch(2), f.store.nodeByRoot[[32]byte{'b'}].unrealizedFinalizedEpoch)
require.Equal(t, primitives.Epoch(2), f.store.emptyNodeByRoot[[32]byte{'b'}].unrealizedJustifiedEpoch)
require.Equal(t, primitives.Epoch(2), f.store.emptyNodeByRoot[[32]byte{'b'}].unrealizedFinalizedEpoch)
require.ErrorIs(t, errInvalidUnrealizedJustifiedEpoch, f.store.setUnrealizedJustifiedEpoch([32]byte{'b'}, 0))
require.ErrorIs(t, errInvalidUnrealizedFinalizedEpoch, f.store.setUnrealizedFinalizedEpoch([32]byte{'b'}, 0))
@@ -78,7 +78,7 @@ func TestStore_LongFork(t *testing.T) {
// Add an attestation to c, it is head
f.ProcessAttestation(ctx, []uint64{0}, [32]byte{'c'}, 1)
f.justifiedBalances = []uint64{100}
c := f.store.nodeByRoot[[32]byte{'c'}]
c := f.store.emptyNodeByRoot[[32]byte{'c'}]
require.Equal(t, primitives.Epoch(2), slots.ToEpoch(c.slot))
driftGenesisTime(f, c.slot, 0)
headRoot, err := f.Head(ctx)
@@ -91,15 +91,15 @@ func TestStore_LongFork(t *testing.T) {
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
require.NoError(t, f.UpdateJustifiedCheckpoint(ctx, &forkchoicetypes.Checkpoint{Epoch: 2, Root: ha}))
d := f.store.nodeByRoot[[32]byte{'d'}]
d := f.store.emptyNodeByRoot[[32]byte{'d'}]
require.Equal(t, primitives.Epoch(3), slots.ToEpoch(d.slot))
driftGenesisTime(f, d.slot, 0)
require.Equal(t, true, d.viableForHead(f.store.justifiedCheckpoint.Epoch, slots.ToEpoch(d.slot)))
headRoot, err = f.Head(ctx)
require.NoError(t, err)
require.Equal(t, [32]byte{'c'}, headRoot)
require.Equal(t, uint64(0), f.store.nodeByRoot[[32]byte{'d'}].weight)
require.Equal(t, uint64(100), f.store.nodeByRoot[[32]byte{'c'}].weight)
require.Equal(t, uint64(0), f.store.emptyNodeByRoot[[32]byte{'d'}].weight)
require.Equal(t, uint64(100), f.store.emptyNodeByRoot[[32]byte{'c'}].weight)
}
// Epoch 1 Epoch 2 Epoch 3
@@ -243,8 +243,8 @@ func TestStore_ForkNextEpoch(t *testing.T) {
require.NoError(t, err)
require.Equal(t, [32]byte{'d'}, headRoot)
require.Equal(t, primitives.Epoch(2), f.JustifiedCheckpoint().Epoch)
require.Equal(t, uint64(0), f.store.nodeByRoot[[32]byte{'d'}].weight)
require.Equal(t, uint64(100), f.store.nodeByRoot[[32]byte{'h'}].weight)
require.Equal(t, uint64(0), f.store.emptyNodeByRoot[[32]byte{'d'}].weight)
require.Equal(t, uint64(100), f.store.emptyNodeByRoot[[32]byte{'h'}].weight)
// Set current epoch to 3, and H's unrealized checkpoint. Check it's head
driftGenesisTime(f, 99, 0)
require.NoError(t, f.store.setUnrealizedJustifiedEpoch([32]byte{'h'}, 2))
@@ -252,8 +252,8 @@ func TestStore_ForkNextEpoch(t *testing.T) {
require.NoError(t, err)
require.Equal(t, [32]byte{'h'}, headRoot)
require.Equal(t, primitives.Epoch(2), f.JustifiedCheckpoint().Epoch)
require.Equal(t, uint64(0), f.store.nodeByRoot[[32]byte{'d'}].weight)
require.Equal(t, uint64(100), f.store.nodeByRoot[[32]byte{'h'}].weight)
require.Equal(t, uint64(0), f.store.emptyNodeByRoot[[32]byte{'d'}].weight)
require.Equal(t, uint64(100), f.store.emptyNodeByRoot[[32]byte{'h'}].weight)
}
func TestStore_PullTips_Heuristics(t *testing.T) {
@@ -263,14 +263,14 @@ func TestStore_PullTips_Heuristics(t *testing.T) {
st, root, err := prepareForkchoiceState(ctx, 65, [32]byte{'p'}, [32]byte{}, [32]byte{}, 1, 1)
require.NoError(tt, err)
require.NoError(tt, f.InsertNode(ctx, st, root))
f.store.nodeByRoot[[32]byte{'p'}].unrealizedJustifiedEpoch = primitives.Epoch(2)
f.store.emptyNodeByRoot[[32]byte{'p'}].unrealizedJustifiedEpoch = primitives.Epoch(2)
driftGenesisTime(f, 66, 0)
st, root, err = prepareForkchoiceState(ctx, 66, [32]byte{'h'}, [32]byte{'p'}, [32]byte{}, 1, 1)
require.NoError(tt, err)
require.NoError(tt, f.InsertNode(ctx, st, root))
require.Equal(tt, primitives.Epoch(2), f.store.nodeByRoot[[32]byte{'h'}].unrealizedJustifiedEpoch)
require.Equal(tt, primitives.Epoch(1), f.store.nodeByRoot[[32]byte{'h'}].unrealizedFinalizedEpoch)
require.Equal(tt, primitives.Epoch(2), f.store.emptyNodeByRoot[[32]byte{'h'}].unrealizedJustifiedEpoch)
require.Equal(tt, primitives.Epoch(1), f.store.emptyNodeByRoot[[32]byte{'h'}].unrealizedFinalizedEpoch)
})
t.Run("Previous Epoch is justified and too early for current", func(tt *testing.T) {
@@ -278,21 +278,21 @@ func TestStore_PullTips_Heuristics(t *testing.T) {
st, root, err := prepareForkchoiceState(ctx, 95, [32]byte{'p'}, [32]byte{}, [32]byte{}, 1, 1)
require.NoError(tt, err)
require.NoError(tt, f.InsertNode(ctx, st, root))
f.store.nodeByRoot[[32]byte{'p'}].unrealizedJustifiedEpoch = primitives.Epoch(2)
f.store.emptyNodeByRoot[[32]byte{'p'}].unrealizedJustifiedEpoch = primitives.Epoch(2)
driftGenesisTime(f, 96, 0)
st, root, err = prepareForkchoiceState(ctx, 96, [32]byte{'h'}, [32]byte{'p'}, [32]byte{}, 1, 1)
require.NoError(tt, err)
require.NoError(tt, f.InsertNode(ctx, st, root))
require.Equal(tt, primitives.Epoch(2), f.store.nodeByRoot[[32]byte{'h'}].unrealizedJustifiedEpoch)
require.Equal(tt, primitives.Epoch(1), f.store.nodeByRoot[[32]byte{'h'}].unrealizedFinalizedEpoch)
require.Equal(tt, primitives.Epoch(2), f.store.emptyNodeByRoot[[32]byte{'h'}].unrealizedJustifiedEpoch)
require.Equal(tt, primitives.Epoch(1), f.store.emptyNodeByRoot[[32]byte{'h'}].unrealizedFinalizedEpoch)
})
t.Run("Previous Epoch is justified and not too early for current", func(tt *testing.T) {
f := setup(1, 1)
st, root, err := prepareForkchoiceState(ctx, 95, [32]byte{'p'}, [32]byte{}, [32]byte{}, 1, 1)
require.NoError(tt, err)
require.NoError(tt, f.InsertNode(ctx, st, root))
f.store.nodeByRoot[[32]byte{'p'}].unrealizedJustifiedEpoch = primitives.Epoch(2)
f.store.emptyNodeByRoot[[32]byte{'p'}].unrealizedJustifiedEpoch = primitives.Epoch(2)
driftGenesisTime(f, 127, 0)
st, root, err = prepareForkchoiceState(ctx, 127, [32]byte{'h'}, [32]byte{'p'}, [32]byte{}, 1, 1)
@@ -302,14 +302,14 @@ func TestStore_PullTips_Heuristics(t *testing.T) {
// This test checks that the heuristics in pullTips did not apply and
// the test continues to compute a bogus unrealized
// justification
require.Equal(tt, primitives.Epoch(1), f.store.nodeByRoot[[32]byte{'h'}].unrealizedJustifiedEpoch)
require.Equal(tt, primitives.Epoch(1), f.store.emptyNodeByRoot[[32]byte{'h'}].unrealizedJustifiedEpoch)
})
t.Run("Block from previous Epoch", func(tt *testing.T) {
f := setup(1, 1)
st, root, err := prepareForkchoiceState(ctx, 94, [32]byte{'p'}, [32]byte{}, [32]byte{}, 1, 1)
require.NoError(tt, err)
require.NoError(tt, f.InsertNode(ctx, st, root))
f.store.nodeByRoot[[32]byte{'p'}].unrealizedJustifiedEpoch = primitives.Epoch(2)
f.store.emptyNodeByRoot[[32]byte{'p'}].unrealizedJustifiedEpoch = primitives.Epoch(2)
driftGenesisTime(f, 96, 0)
st, root, err = prepareForkchoiceState(ctx, 95, [32]byte{'h'}, [32]byte{'p'}, [32]byte{}, 1, 1)
@@ -319,7 +319,7 @@ func TestStore_PullTips_Heuristics(t *testing.T) {
// This test checks that the heuristics in pullTips did not apply and
// the test continues to compute a bogus unrealized
// justification
require.Equal(tt, primitives.Epoch(1), f.store.nodeByRoot[[32]byte{'h'}].unrealizedJustifiedEpoch)
require.Equal(tt, primitives.Epoch(1), f.store.emptyNodeByRoot[[32]byte{'h'}].unrealizedJustifiedEpoch)
})
t.Run("Previous Epoch is not justified", func(tt *testing.T) {
f := setup(1, 1)
@@ -335,6 +335,6 @@ func TestStore_PullTips_Heuristics(t *testing.T) {
// This test checks that the heuristics in pullTips did not apply and
// the test continues to compute a bogus unrealized
// justification
require.Equal(tt, primitives.Epoch(2), f.store.nodeByRoot[[32]byte{'h'}].unrealizedJustifiedEpoch)
require.Equal(tt, primitives.Epoch(2), f.store.emptyNodeByRoot[[32]byte{'h'}].unrealizedJustifiedEpoch)
})
}

View File

@@ -284,7 +284,7 @@ func TestVotes_CanFindHead(t *testing.T) {
// 9 10
f.store.finalizedCheckpoint.Root = indexToHash(5)
require.NoError(t, f.store.prune(t.Context()))
assert.Equal(t, 5, len(f.store.nodeByRoot), "Incorrect nodes length after prune")
assert.Equal(t, 5, len(f.store.emptyNodeByRoot), "Incorrect nodes length after prune")
// we pruned artificially the justified root.
f.store.justifiedCheckpoint.Root = indexToHash(5)

View File

@@ -89,7 +89,7 @@ type FastGetter interface {
// Setter allows to set forkchoice information
type Setter interface {
SetOptimisticToValid(context.Context, [fieldparams.RootLength]byte) error
SetOptimisticToInvalid(context.Context, [fieldparams.RootLength]byte, [fieldparams.RootLength]byte, [fieldparams.RootLength]byte) ([][32]byte, error)
SetOptimisticToInvalid(context.Context, [32]byte, [32]byte, [32]byte, [32]byte) ([][32]byte, error)
UpdateJustifiedCheckpoint(context.Context, *forkchoicetypes.Checkpoint) error
UpdateFinalizedCheckpoint(*forkchoicetypes.Checkpoint) error
SetGenesisTime(time.Time)

View File

@@ -5,7 +5,6 @@ go_library(
srcs = [
"handlers.go",
"handlers_block.go",
"handlers_gloas.go",
"log.go",
"server.go",
],

View File

@@ -1,276 +0,0 @@
package validator
import (
"encoding/json"
"fmt"
"net/http"
"strconv"
"strings"
"github.com/OffchainLabs/prysm/v7/api/server/structs"
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/eth/shared"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
"github.com/OffchainLabs/prysm/v7/network/httputil"
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/pkg/errors"
"google.golang.org/protobuf/types/known/wrapperspb"
)
// ProduceBlockV4 requests a beacon node to produce a valid GLOAS block.
// This is the GLOAS-specific block production endpoint that returns a block
// containing a signed execution payload bid instead of the full payload.
//
// The execution payload envelope is cached by the beacon node and can be
// retrieved via GetExecutionPayloadEnvelope.
//
// Endpoint: GET /eth/v4/validator/blocks/{slot}
func (s *Server) ProduceBlockV4(w http.ResponseWriter, r *http.Request) {
_, span := trace.StartSpan(r.Context(), "validator.ProduceBlockV4")
defer span.End()
if shared.IsSyncing(r.Context(), w, s.SyncChecker, s.HeadFetcher, s.TimeFetcher, s.OptimisticModeFetcher) {
return
}
// Parse path parameters
segments := strings.Split(r.URL.Path, "/")
rawSlot := segments[len(segments)-1]
slot, valid := shared.ValidateUint(w, "slot", rawSlot)
if !valid {
return
}
// Parse query parameters
rawRandaoReveal := r.URL.Query().Get("randao_reveal")
rawGraffiti := r.URL.Query().Get("graffiti")
rawSkipRandaoVerification := r.URL.Query().Get("skip_randao_verification")
var bbFactor *wrapperspb.UInt64Value
rawBbFactor, bbValue, ok := shared.UintFromQuery(w, r, "builder_boost_factor", false)
if !ok {
return
}
if rawBbFactor != "" {
bbFactor = &wrapperspb.UInt64Value{Value: bbValue}
}
// Parse randao reveal
var randaoReveal []byte
if rawSkipRandaoVerification == "true" {
// TODO: Use infinite signature constant
randaoReveal = make([]byte, 96)
} else {
// TODO: Decode randao reveal from hex
_ = rawRandaoReveal
}
// Parse graffiti
var graffiti []byte
if rawGraffiti != "" {
// TODO: Decode graffiti from hex
}
// TODO: Implement GLOAS-specific block production
//
// This handler should:
// 1. Verify the slot is in the GLOAS fork
// 2. Call v1alpha1 server's getGloasBeaconBlock
// 3. Format response with GLOAS-specific headers
// 4. Return the block (the envelope is cached server-side)
_ = bbFactor
_ = graffiti
_ = randaoReveal
_ = slot
httputil.HandleError(w, "ProduceBlockV4 not yet implemented", http.StatusNotImplemented)
}
// handleProduceGloasV4 handles the response formatting for GLOAS blocks.
func handleProduceGloasV4(w http.ResponseWriter, isSSZ bool, block *eth.BeaconBlockGloas, payloadValue, consensusBlockValue string) {
// TODO: Implement GLOAS response handling
//
// Similar to handleProduceFuluV3 but for GLOAS blocks.
// The response should NOT include the execution payload envelope,
// as that is retrieved separately.
if isSSZ {
// TODO: SSZ serialize the GLOAS block
httputil.HandleError(w, "SSZ response not yet implemented for GLOAS", http.StatusNotImplemented)
return
}
// JSON response
// TODO: Convert GLOAS block to JSON struct
resp := &structs.ProduceBlockV3Response{
Version: version.String(version.Gloas),
ExecutionPayloadBlinded: false, // GLOAS blocks don't have blinded concept in same way
ExecutionPayloadValue: payloadValue,
ConsensusBlockValue: consensusBlockValue,
Data: nil, // TODO: Marshal block to JSON
}
httputil.WriteJson(w, resp)
}
// GetExecutionPayloadEnvelope retrieves a cached execution payload envelope.
// Validators call this after receiving a GLOAS block to get the envelope
// they need to sign and broadcast.
//
// Endpoint: GET /eth/v1/validator/execution_payload_envelope/{slot}/{builder_index}
func (s *Server) GetExecutionPayloadEnvelope(w http.ResponseWriter, r *http.Request) {
ctx, span := trace.StartSpan(r.Context(), "validator.ExecutionPayloadEnvelope")
defer span.End()
// Parse path parameters
segments := strings.Split(r.URL.Path, "/")
if len(segments) < 2 {
httputil.HandleError(w, "missing slot and builder_index in path", http.StatusBadRequest)
return
}
rawSlot := segments[len(segments)-2]
rawBuilderIndex := segments[len(segments)-1]
slot, valid := shared.ValidateUint(w, "slot", rawSlot)
if !valid {
return
}
builderIndex, err := strconv.ParseUint(rawBuilderIndex, 10, 64)
if err != nil {
httputil.HandleError(w, errors.Wrap(err, "invalid builder_index").Error(), http.StatusBadRequest)
return
}
// Build gRPC request
req := &eth.ExecutionPayloadEnvelopeRequest{
Slot: primitives.Slot(slot),
BuilderIndex: primitives.BuilderIndex(builderIndex),
}
// TODO: The V1Alpha1Server needs to implement the ExecutionPayloadEnvelope method
// from the BeaconNodeValidatorServer interface. Currently it's defined but the
// interface may need updating to include this method.
//
// Once implemented, uncomment:
// resp, err := s.V1Alpha1Server.ExecutionPayloadEnvelope(ctx, req)
// if err != nil {
// // Map gRPC error codes to HTTP status codes
// if status.Code(err) == codes.NotFound {
// httputil.HandleError(w, err.Error(), http.StatusNotFound)
// } else {
// httputil.HandleError(w, err.Error(), http.StatusInternalServerError)
// }
// return
// }
//
// // Format and return response
// // - Support both JSON and SSZ based on Accept header
// // - Set version header
// w.Header().Set(api.VersionHeader, version.String(version.Gloas))
// httputil.WriteJson(w, &structs.GetExecutionPayloadEnvelopeResponse{
// Version: version.String(version.Gloas),
// Data: envelopeProtoToJSON(resp.Envelope),
// })
_ = ctx
_ = req
httputil.HandleError(w, "ExecutionPayloadEnvelope not yet implemented", http.StatusNotImplemented)
}
// PublishExecutionPayloadEnvelope broadcasts a signed execution payload envelope.
// Validators call this after signing the envelope to broadcast it to the network.
//
// Endpoint: POST /eth/v1/beacon/execution_payload_envelope
func (s *Server) PublishExecutionPayloadEnvelope(w http.ResponseWriter, r *http.Request) {
ctx, span := trace.StartSpan(r.Context(), "validator.PublishExecutionPayloadEnvelope")
defer span.End()
// Parse request body
var signedEnvelope structs.SignedExecutionPayloadEnvelope
if err := json.NewDecoder(r.Body).Decode(&signedEnvelope); err != nil {
httputil.HandleError(w, errors.Wrap(err, "failed to decode request body").Error(), http.StatusBadRequest)
return
}
// TODO: Convert JSON struct to proto
// protoEnvelope, err := signedEnvelope.ToProto()
// if err != nil {
// httputil.HandleError(w, err.Error(), http.StatusBadRequest)
// return
// }
// TODO: Call gRPC server
// _, err = s.V1Alpha1Server.PublishExecutionPayloadEnvelope(ctx, protoEnvelope)
// if err != nil {
// // Handle different error types (validation errors vs internal errors)
// httputil.HandleError(w, err.Error(), http.StatusBadRequest)
// return
// }
_ = ctx
_ = signedEnvelope
httputil.HandleError(w, "PublishExecutionPayloadEnvelope not yet implemented", http.StatusNotImplemented)
}
// ExecutionPayloadEnvelopeJSON represents the JSON structure for an execution payload envelope.
// This is used for REST API serialization.
type ExecutionPayloadEnvelopeJSON struct {
Payload json.RawMessage `json:"payload"`
ExecutionRequests json.RawMessage `json:"execution_requests"`
BuilderIndex string `json:"builder_index"`
BeaconBlockRoot string `json:"beacon_block_root"`
Slot string `json:"slot"`
BlobKzgCommitments []string `json:"blob_kzg_commitments"`
StateRoot string `json:"state_root"`
}
// SignedExecutionPayloadEnvelopeJSON represents the JSON structure for a signed envelope.
type SignedExecutionPayloadEnvelopeJSON struct {
Message *ExecutionPayloadEnvelopeJSON `json:"message"`
Signature string `json:"signature"`
}
// ExecutionPayloadEnvelopeResponseJSON is the response wrapper for envelope retrieval.
type ExecutionPayloadEnvelopeResponseJSON struct {
Version string `json:"version"`
Data *ExecutionPayloadEnvelopeJSON `json:"data"`
}
// envelopeProtoToJSON converts a proto envelope to JSON representation.
func envelopeProtoToJSON(envelope *eth.ExecutionPayloadEnvelope) (*ExecutionPayloadEnvelopeJSON, error) {
// TODO: Implement conversion
//
// Convert each field:
// - payload: Marshal ExecutionPayloadDeneb to JSON
// - execution_requests: Marshal to JSON
// - builder_index: Convert uint64 to string
// - beacon_block_root: Hex encode
// - slot: Convert uint64 to string
// - blob_kzg_commitments: Hex encode each
// - state_root: Hex encode
return nil, fmt.Errorf("envelopeProtoToJSON not yet implemented")
}
// envelopeJSONToProto converts a JSON envelope to proto representation.
func envelopeJSONToProto(envelope *ExecutionPayloadEnvelopeJSON) (*eth.ExecutionPayloadEnvelope, error) {
// TODO: Implement conversion
//
// Parse each field:
// - payload: Unmarshal from JSON
// - execution_requests: Unmarshal from JSON
// - builder_index: Parse uint64 from string
// - beacon_block_root: Hex decode
// - slot: Parse uint64 from string
// - blob_kzg_commitments: Hex decode each
// - state_root: Hex decode
return nil, fmt.Errorf("envelopeJSONToProto not yet implemented")
}

View File

@@ -19,24 +19,23 @@ import (
// Server defines a server implementation of the gRPC Validator service,
// providing RPC endpoints intended for validator clients.
type Server struct {
HeadFetcher blockchain.HeadFetcher
TimeFetcher blockchain.TimeFetcher
SyncChecker sync.Checker
AttestationCache *cache.AttestationCache
AttestationsPool attestations.Pool
PeerManager p2p.PeerManager
Broadcaster p2p.Broadcaster
Stater lookup.Stater
OptimisticModeFetcher blockchain.OptimisticModeFetcher
SyncCommitteePool synccommittee.Pool
V1Alpha1Server eth.BeaconNodeValidatorServer
ChainInfoFetcher blockchain.ChainInfoFetcher
BeaconDB db.HeadAccessDatabase
BlockBuilder builder.BlockBuilder
OperationNotifier operation.Notifier
CoreService *core.Service
BlockRewardFetcher rewards.BlockRewardsFetcher
TrackedValidatorsCache *cache.TrackedValidatorsCache
PayloadIDCache *cache.PayloadIDCache
ExecutionPayloadEnvelopeCache *cache.ExecutionPayloadEnvelopeCache // GLOAS: Cache for execution payload envelopes
HeadFetcher blockchain.HeadFetcher
TimeFetcher blockchain.TimeFetcher
SyncChecker sync.Checker
AttestationCache *cache.AttestationCache
AttestationsPool attestations.Pool
PeerManager p2p.PeerManager
Broadcaster p2p.Broadcaster
Stater lookup.Stater
OptimisticModeFetcher blockchain.OptimisticModeFetcher
SyncCommitteePool synccommittee.Pool
V1Alpha1Server eth.BeaconNodeValidatorServer
ChainInfoFetcher blockchain.ChainInfoFetcher
BeaconDB db.HeadAccessDatabase
BlockBuilder builder.BlockBuilder
OperationNotifier operation.Notifier
CoreService *core.Service
BlockRewardFetcher rewards.BlockRewardsFetcher
TrackedValidatorsCache *cache.TrackedValidatorsCache
PayloadIDCache *cache.PayloadIDCache
}

View File

@@ -26,7 +26,6 @@ go_library(
"proposer_eth1data.go",
"proposer_execution_payload.go",
"proposer_exits.go",
"proposer_gloas.go",
"proposer_slashings.go",
"proposer_sync_aggregate.go",
"server.go",

View File

@@ -57,8 +57,6 @@ func (vs *Server) constructGenericBeaconBlock(
return nil, fmt.Errorf("expected *BlobsBundleV2, got %T", blobsBundler)
}
return vs.constructFuluBlock(blockProto, isBlinded, bidStr, bundle), nil
case version.Gloas:
return vs.constructGloasBlock(blockProto), nil
default:
return nil, fmt.Errorf("unknown block version: %d", sBlk.Version())
}
@@ -111,13 +109,6 @@ func (vs *Server) constructElectraBlock(blockProto proto.Message, isBlinded bool
return &ethpb.GenericBeaconBlock{Block: &ethpb.GenericBeaconBlock_Electra{Electra: electraContents}, IsBlinded: false, PayloadValue: payloadValue}
}
func (vs *Server) constructGloasBlock(blockProto proto.Message) *ethpb.GenericBeaconBlock {
// GLOAS blocks do not carry a separate payload value — the bid is part of the block body.
return &ethpb.GenericBeaconBlock{
Block: &ethpb.GenericBeaconBlock_Gloas{Gloas: blockProto.(*ethpb.BeaconBlockGloas)},
}
}
func (vs *Server) constructFuluBlock(blockProto proto.Message, isBlinded bool, payloadValue string, bundle *enginev1.BlobsBundleV2) *ethpb.GenericBeaconBlock {
if isBlinded {
return &ethpb.GenericBeaconBlock{Block: &ethpb.GenericBeaconBlock_BlindedFulu{BlindedFulu: blockProto.(*ethpb.BlindedBeaconBlockFulu)}, IsBlinded: true, PayloadValue: payloadValue}

View File

@@ -231,13 +231,6 @@ func (vs *Server) BuildBlockParallel(ctx context.Context, sBlk interfaces.Signed
// Set bls to execution change. New in Capella.
vs.setBlsToExecData(sBlk, head)
// Set payload attestations. New in GLOAS.
if sBlk.Version() >= version.Gloas {
if err := sBlk.SetPayloadAttestations(vs.getPayloadAttestations(ctx, head, sBlk.Block().Slot())); err != nil {
log.WithError(err).Error("Could not set payload attestations")
}
}
})
winningBid := primitives.ZeroWei()
@@ -248,31 +241,24 @@ func (vs *Server) BuildBlockParallel(ctx context.Context, sBlk interfaces.Signed
return nil, status.Errorf(codes.Internal, "Could not get local payload: %v", err)
}
switch {
case sBlk.Version() >= version.Gloas:
if err := vs.setGloasExecutionData(ctx, sBlk, local); err != nil {
return nil, status.Errorf(codes.Internal, "Could not set GLOAS execution data: %v", err)
}
default:
// There's no reason to try to get a builder bid if local override is true.
var builderBid builderapi.Bid
if !(local.OverrideBuilder || skipMevBoost) {
latestHeader, err := head.LatestExecutionPayloadHeader()
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get latest execution payload header: %v", err)
}
parentGasLimit := latestHeader.GasLimit()
builderBid, err = vs.getBuilderPayloadAndBlobs(ctx, sBlk.Block().Slot(), sBlk.Block().ProposerIndex(), parentGasLimit)
if err != nil {
builderGetPayloadMissCount.Inc()
log.WithError(err).Error("Could not get builder payload")
}
}
winningBid, bundle, err = setExecutionData(ctx, sBlk, local, builderBid, builderBoostFactor)
// There's no reason to try to get a builder bid if local override is true.
var builderBid builderapi.Bid
if !(local.OverrideBuilder || skipMevBoost) {
latestHeader, err := head.LatestExecutionPayloadHeader()
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not set execution data: %v", err)
return nil, status.Errorf(codes.Internal, "Could not get latest execution payload header: %v", err)
}
parentGasLimit := latestHeader.GasLimit()
builderBid, err = vs.getBuilderPayloadAndBlobs(ctx, sBlk.Block().Slot(), sBlk.Block().ProposerIndex(), parentGasLimit)
if err != nil {
builderGetPayloadMissCount.Inc()
log.WithError(err).Error("Could not get builder payload")
}
}
winningBid, bundle, err = setExecutionData(ctx, sBlk, local, builderBid, builderBoostFactor)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not set execution data: %v", err)
}
}
@@ -291,6 +277,11 @@ func (vs *Server) BuildBlockParallel(ctx context.Context, sBlk interfaces.Signed
//
// ProposeBeaconBlock handles the proposal of beacon blocks.
func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSignedBeaconBlock) (*ethpb.ProposeResponse, error) {
var (
blobSidecars []*ethpb.BlobSidecar
dataColumnSidecars []blocks.RODataColumn
)
ctx, span := trace.StartSpan(ctx, "ProposerServer.ProposeBeaconBlock")
defer span.End()
@@ -307,58 +298,15 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
return nil, status.Errorf(codes.Internal, "Could not hash tree root: %v", err)
}
if block.Version() < version.Gloas {
// For post-Fulu blinded blocks, submit to relay and return early.
if block.IsBlinded() && slots.ToEpoch(block.Block().Slot()) >= params.BeaconConfig().FuluForkEpoch {
err := vs.BlockBuilder.SubmitBlindedBlockPostFulu(ctx, block)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not submit blinded block post-Fulu: %v", err)
}
return &ethpb.ProposeResponse{BlockRoot: root[:]}, nil
// For post-Fulu blinded blocks, submit to relay and return early
if block.IsBlinded() && slots.ToEpoch(block.Block().Slot()) >= params.BeaconConfig().FuluForkEpoch {
err := vs.BlockBuilder.SubmitBlindedBlockPostFulu(ctx, block)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not submit blinded block post-Fulu: %v", err)
}
return vs.proposeBlockWithSidecars(ctx, block, root, req)
return &ethpb.ProposeResponse{BlockRoot: root[:]}, nil
}
return vs.proposeBlock(ctx, block, root)
}
// proposeBlock broadcasts and receives a beacon block without sidecars.
// Used for GLOAS and beyond where execution data is delivered via a separate envelope.
func (vs *Server) proposeBlock(
ctx context.Context,
block interfaces.SignedBeaconBlock,
root [fieldparams.RootLength]byte,
) (*ethpb.ProposeResponse, error) {
protoBlock, err := block.Proto()
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not convert block to proto: %v", err)
}
if err := vs.P2P.Broadcast(ctx, protoBlock); err != nil {
return nil, status.Errorf(codes.Internal, "Could not broadcast block: %v", err)
}
vs.BlockNotifier.BlockFeed().Send(&feed.Event{
Type: blockfeed.ReceivedBlock,
Data: &blockfeed.ReceivedBlockData{SignedBlock: block},
})
if err := vs.BlockReceiver.ReceiveBlock(ctx, block, root, nil); err != nil {
return nil, status.Errorf(codes.Internal, "Could not receive block: %v", err)
}
return &ethpb.ProposeResponse{BlockRoot: root[:]}, nil
}
// proposeBlockWithSidecars handles block proposal for forks that carry blob or
// data column sidecars alongside the block (Bellatrix through Fulu).
func (vs *Server) proposeBlockWithSidecars(
ctx context.Context,
block interfaces.SignedBeaconBlock,
root [fieldparams.RootLength]byte,
req *ethpb.GenericSignedBeaconBlock,
) (*ethpb.ProposeResponse, error) {
var (
blobSidecars []*ethpb.BlobSidecar
dataColumnSidecars []blocks.RODataColumn
)
rob, err := blocks.NewROBlockWithRoot(block, root)
if block.IsBlinded() {
block, blobSidecars, err = vs.handleBlindedBlock(ctx, block)
@@ -462,10 +410,18 @@ func (vs *Server) handleUnblindedBlock(
}
if block.Version() >= version.Fulu {
roDataColumnSidecars, err := buildDataColumnSidecars(rawBlobs, proofs, peerdas.PopulateFromBlock(block))
// Compute cells and proofs from the blobs and cell proofs.
cellsPerBlob, proofsPerBlob, err := peerdas.ComputeCellsAndProofsFromFlat(rawBlobs, proofs)
if err != nil {
return nil, nil, err
return nil, nil, errors.Wrap(err, "compute cells and proofs")
}
// Construct data column sidecars from the signed block and cells and proofs.
roDataColumnSidecars, err := peerdas.DataColumnSidecars(cellsPerBlob, proofsPerBlob, peerdas.PopulateFromBlock(block))
if err != nil {
return nil, nil, errors.Wrap(err, "data column sidcars")
}
return nil, roDataColumnSidecars, nil
}
@@ -477,22 +433,6 @@ func (vs *Server) handleUnblindedBlock(
return blobSidecars, nil, nil
}
// buildDataColumnSidecars computes cells and proofs from blobs and constructs
// data column sidecars using the given ConstructionPopulator source.
func buildDataColumnSidecars(blobs, proofs [][]byte, src peerdas.ConstructionPopulator) ([]blocks.RODataColumn, error) {
cellsPerBlob, proofsPerBlob, err := peerdas.ComputeCellsAndProofsFromFlat(blobs, proofs)
if err != nil {
return nil, errors.Wrap(err, "compute cells and proofs")
}
roDataColumnSidecars, err := peerdas.DataColumnSidecars(cellsPerBlob, proofsPerBlob, src)
if err != nil {
return nil, errors.Wrap(err, "data column sidecars")
}
return roDataColumnSidecars, nil
}
// broadcastReceiveBlock broadcasts a block and handles its reception.
func (vs *Server) broadcastReceiveBlock(ctx context.Context, wg *sync.WaitGroup, block interfaces.SignedBeaconBlock, root [fieldparams.RootLength]byte) error {
if err := vs.broadcastBlock(ctx, wg, block, root); err != nil {

View File

@@ -16,11 +16,6 @@ func getEmptyBlock(slot primitives.Slot) (interfaces.SignedBeaconBlock, error) {
var err error
epoch := slots.ToEpoch(slot)
switch {
case epoch >= params.BeaconConfig().GloasForkEpoch:
sBlk, err = blocks.NewSignedBeaconBlock(&ethpb.SignedBeaconBlockGloas{Block: &ethpb.BeaconBlockGloas{Body: &ethpb.BeaconBlockBodyGloas{}}})
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not initialize block for proposal: %v", err)
}
case epoch >= params.BeaconConfig().FuluForkEpoch:
sBlk, err = blocks.NewSignedBeaconBlock(&ethpb.SignedBeaconBlockFulu{Block: &ethpb.BeaconBlockElectra{Body: &ethpb.BeaconBlockBodyElectra{}}})
if err != nil {

View File

@@ -1,460 +0,0 @@
package validator
import (
"context"
"fmt"
"time"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed"
blockfeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/block"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
consensusblocks "github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/container/trie"
"github.com/OffchainLabs/prysm/v7/crypto/bls/common"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/time/slots"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/types/known/emptypb"
)
// setGloasExecutionData creates an execution payload bid from the local payload,
// sets it on the block, and caches the execution payload envelope for later
// retrieval by the validator client.
func (vs *Server) setGloasExecutionData(
ctx context.Context,
sBlk interfaces.SignedBeaconBlock,
local *consensusblocks.GetPayloadResponse,
) error {
_, span := trace.StartSpan(ctx, "ProposerServer.setGloasExecutionData")
defer span.End()
if local == nil || local.ExecutionData == nil {
return errors.New("local execution payload is nil")
}
// Create execution payload bid from the local payload.
parentRoot := sBlk.Block().ParentRoot()
bid, err := vs.createSelfBuildExecutionPayloadBid(
local.ExecutionData,
primitives.BuilderIndex(sBlk.Block().ProposerIndex()),
parentRoot[:],
sBlk.Block().Slot(),
local.BlobsBundler,
)
if err != nil {
return errors.Wrap(err, "could not create execution payload bid")
}
// Per spec, self-build bids must use G2 point-at-infinity as the signature.
// Only the execution payload envelope requires a real signature from the proposer.
signedBid := &ethpb.SignedExecutionPayloadBid{
Message: bid,
Signature: common.InfiniteSignature[:],
}
if err := sBlk.SetSignedExecutionPayloadBid(signedBid); err != nil {
return errors.Wrap(err, "could not set signed execution payload bid")
}
// Cache the execution payload envelope and blobs bundle for later retrieval.
// The envelope is retrieved by the VC to sign and broadcast.
// The blobs bundle is needed during block proposal to build and broadcast blob sidecars.
envelope := vs.createExecutionPayloadEnvelope(
local.ExecutionData,
local.ExecutionRequests,
primitives.BuilderIndex(sBlk.Block().ProposerIndex()),
sBlk.Block().Slot(),
local.BlobsBundler,
)
vs.cacheExecutionPayloadEnvelope(envelope, local.BlobsBundler)
return nil
}
// getPayloadAttestations returns payload attestations for inclusion in a GLOAS block.
// These attest to the payload timeliness from the previous slot's PTC.
func (vs *Server) getPayloadAttestations(ctx context.Context, head state.BeaconState, slot primitives.Slot) []*ethpb.PayloadAttestation {
// TODO: Implement payload attestation retrieval from pool.
// This requires:
// 1. A PayloadAttestationPool to collect PTC votes
// 2. Aggregation of individual PayloadAttestationMessages into PayloadAttestations
// For now, return empty - blocks are valid without payload attestations.
return []*ethpb.PayloadAttestation{}
}
// createSelfBuildExecutionPayloadBid creates an ExecutionPayloadBid for self-building,
// where the proposer acts as its own builder. Value and payment are zero, and the
// bid fields are derived directly from the local execution payload.
func (vs *Server) createSelfBuildExecutionPayloadBid(
executionData interfaces.ExecutionData,
builderIndex primitives.BuilderIndex,
parentBlockRoot []byte,
slot primitives.Slot,
blobsBundler enginev1.BlobsBundler,
) (*ethpb.ExecutionPayloadBid, error) {
if executionData == nil || executionData.IsNil() {
return nil, errors.New("execution data is nil")
}
// Compute blob_kzg_commitments_root from the blobs bundle.
// This is hash_tree_root(List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK]).
kzgCommitmentsRoot := make([]byte, 32)
if blobsBundler != nil {
commitments := extractKzgCommitments(blobsBundler)
if len(commitments) > 0 {
leaves := consensusblocks.LeavesFromCommitments(commitments)
commitmentsTree, err := trie.GenerateTrieFromItems(leaves, fieldparams.LogMaxBlobCommitments)
if err != nil {
return nil, errors.Wrap(err, "could not generate kzg commitments trie")
}
root, err := commitmentsTree.HashTreeRoot()
if err != nil {
return nil, errors.Wrap(err, "could not compute kzg commitments root")
}
kzgCommitmentsRoot = root[:]
}
}
return &ethpb.ExecutionPayloadBid{
ParentBlockHash: executionData.ParentHash(),
ParentBlockRoot: bytesutil.SafeCopyBytes(parentBlockRoot),
BlockHash: executionData.BlockHash(),
PrevRandao: executionData.PrevRandao(),
FeeRecipient: executionData.FeeRecipient(),
GasLimit: executionData.GasLimit(),
BuilderIndex: builderIndex,
Slot: slot,
Value: 0,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: kzgCommitmentsRoot,
}, nil
}
// createExecutionPayloadEnvelope wraps a full execution payload with metadata.
// The envelope is cached by the beacon node during block production for later
// retrieval by the validator via GetExecutionPayloadEnvelope.
func (vs *Server) createExecutionPayloadEnvelope(
executionData interfaces.ExecutionData,
executionRequests *enginev1.ExecutionRequests,
builderIndex primitives.BuilderIndex,
slot primitives.Slot,
blobsBundler enginev1.BlobsBundler,
) *ethpb.ExecutionPayloadEnvelope {
// Extract the underlying ExecutionPayloadDeneb proto
var payload *enginev1.ExecutionPayloadDeneb
if executionData != nil && !executionData.IsNil() {
if p, ok := executionData.Proto().(*enginev1.ExecutionPayloadDeneb); ok {
payload = p
}
}
commitments := extractKzgCommitments(blobsBundler)
return &ethpb.ExecutionPayloadEnvelope{
Payload: payload,
ExecutionRequests: executionRequests,
BuilderIndex: builderIndex,
BeaconBlockRoot: make([]byte, 32), // Populated later when block root is known
Slot: slot,
BlobKzgCommitments: commitments,
StateRoot: make([]byte, 32), // Computed later in GetExecutionPayloadEnvelope
}
}
// extractKzgCommitments pulls KZG commitments from a blobs bundler.
func extractKzgCommitments(blobsBundler enginev1.BlobsBundler) [][]byte {
if blobsBundler == nil {
return nil
}
switch b := blobsBundler.(type) {
case *enginev1.BlobsBundle:
if b != nil {
return b.KzgCommitments
}
case *enginev1.BlobsBundleV2:
if b != nil {
return b.KzgCommitments
}
}
return nil
}
// cacheExecutionPayloadEnvelope stores an envelope and its blobs bundle for later retrieval.
// The blobs bundle is cached alongside the envelope because blobs from the EL are only
// held in memory until they are broadcast as sidecars during block proposal.
func (vs *Server) cacheExecutionPayloadEnvelope(envelope *ethpb.ExecutionPayloadEnvelope, blobsBundle enginev1.BlobsBundler) {
if vs.ExecutionPayloadEnvelopeCache == nil {
log.Warn("ExecutionPayloadEnvelopeCache is nil, envelope will not be cached")
return
}
vs.ExecutionPayloadEnvelopeCache.Set(envelope, blobsBundle)
}
// GetExecutionPayloadEnvelope retrieves a cached execution payload envelope.
// This is called by validators after receiving a GLOAS block to get the envelopeF
// they need to sign and broadcast.
//
// gRPC endpoint: /eth/v1alpha1/validator/execution_payload_envelope/{slot}/{builder_index}
func (vs *Server) GetExecutionPayloadEnvelope(
ctx context.Context,
req *ethpb.ExecutionPayloadEnvelopeRequest,
) (*ethpb.ExecutionPayloadEnvelopeResponse, error) {
if req == nil {
return nil, status.Error(codes.InvalidArgument, "request cannot be nil")
}
if slots.ToEpoch(req.Slot) < params.BeaconConfig().GloasForkEpoch {
return nil, status.Errorf(codes.InvalidArgument,
"execution payload envelopes are not supported before GLOAS fork (slot %d)", req.Slot)
}
if vs.ExecutionPayloadEnvelopeCache == nil {
return nil, status.Error(codes.Internal, "execution payload envelope cache not initialized")
}
envelope, found := vs.ExecutionPayloadEnvelopeCache.Get(req.Slot, req.BuilderIndex)
if !found {
return nil, status.Errorf(
codes.NotFound,
"execution payload envelope not found for slot %d builder %d",
req.Slot,
req.BuilderIndex,
)
}
// Compute state root if not already set.
// Following the pattern from epbs-interop: compute post-payload state root.
if len(envelope.StateRoot) == 0 || bytesutil.ZeroRoot(envelope.StateRoot) {
stateRoot, err := vs.computePostPayloadStateRoot(ctx, envelope)
if err != nil {
log.WithError(err).Warn("Failed to compute post-payload state root")
} else {
envelope.StateRoot = stateRoot
log.WithField("stateRoot", fmt.Sprintf("%#x", stateRoot)).Debug("Computed state root at execution stage")
}
}
return &ethpb.ExecutionPayloadEnvelopeResponse{
Envelope: envelope,
}, nil
}
// computePostPayloadStateRoot computes the state root after an execution
// payload envelope has been processed through a state transition.
func (vs *Server) computePostPayloadStateRoot(ctx context.Context, envelope *ethpb.ExecutionPayloadEnvelope) ([]byte, error) {
ctx, span := trace.StartSpan(ctx, "ProposerServer.computePostPayloadStateRoot")
defer span.End()
if len(envelope.BeaconBlockRoot) == 0 || bytesutil.ZeroRoot(envelope.BeaconBlockRoot) {
return nil, errors.New("beacon block root not set on envelope")
}
blockRoot := bytesutil.ToBytes32(envelope.BeaconBlockRoot)
st, err := vs.StateGen.StateByRoot(ctx, blockRoot)
if err != nil {
return nil, errors.Wrap(err, "could not get state by block root")
}
if st == nil {
return nil, errors.New("nil state for block root")
}
// Copy the state to avoid mutating the original
st = st.Copy()
// TODO: Process the execution payload envelope through state transition.
// This requires implementing ProcessPayloadStateTransition in beacon-chain/core/gloas.
// For now, use the state root from the beacon block state as a placeholder.
// The correct implementation would:
// 1. Call ProcessPayloadStateTransition(ctx, st, envelope) to apply payload effects
// 2. Compute HashTreeRoot of the resulting state
root, err := st.HashTreeRoot(ctx)
if err != nil {
return nil, errors.Wrap(err, "could not compute state root")
}
return root[:], nil
}
// envelopeBlockWaitTimeout is the maximum time to wait for the associated beacon block
// before giving up on publishing the execution payload envelope.
const envelopeBlockWaitTimeout = 4 * time.Second
// envelopeBlockPollInterval is how often to check for the beacon block while waiting.
const envelopeBlockPollInterval = 100 * time.Millisecond
// PublishExecutionPayloadEnvelope validates and broadcasts a signed execution payload envelope.
// This is called by validators after signing the envelope retrieved from GetExecutionPayloadEnvelope.
//
// The function waits for the associated beacon block to be available before processing,
// as the envelope references a beacon_block_root that must exist either from local
// production or P2P gossip.
//
// gRPC endpoint: POST /eth/v1alpha1/validator/execution_payload_envelope
func (vs *Server) PublishExecutionPayloadEnvelope(
ctx context.Context,
req *ethpb.SignedExecutionPayloadEnvelope,
) (*emptypb.Empty, error) {
if req == nil || req.Message == nil {
return nil, status.Error(codes.InvalidArgument, "signed envelope cannot be nil")
}
if slots.ToEpoch(req.Message.Slot) < params.BeaconConfig().GloasForkEpoch {
return nil, status.Errorf(codes.InvalidArgument,
"execution payload envelopes are not supported before GLOAS fork (slot %d)", req.Message.Slot)
}
beaconBlockRoot := bytesutil.ToBytes32(req.Message.BeaconBlockRoot)
log := log.WithFields(logrus.Fields{
"slot": req.Message.Slot,
"builderIndex": req.Message.BuilderIndex,
"beaconBlockRoot": fmt.Sprintf("%#x", beaconBlockRoot[:8]),
})
log.Info("Publishing signed execution payload envelope")
// Wait for the associated beacon block to be available.
// The block may come from local production or P2P gossip.
if err := vs.waitForBeaconBlock(ctx, beaconBlockRoot); err != nil {
return nil, status.Errorf(codes.FailedPrecondition,
"beacon block %#x not available: %v", beaconBlockRoot[:8], err)
}
// TODO: Validate envelope signature before broadcasting
// if err := vs.validateEnvelopeSignature(ctx, req); err != nil {
// return nil, status.Errorf(codes.InvalidArgument, "invalid envelope signature: %v", err)
// }
// Build data column sidecars from the cached blobs bundle before broadcasting.
// In GLOAS, blob data is delivered alongside the execution payload envelope
// rather than with the beacon block (which only carries the bid).
dataColumnSidecars, err := vs.buildEnvelopeDataColumns(ctx, req.Message, beaconBlockRoot)
if err != nil {
return nil, status.Errorf(codes.Internal, "failed to build data column sidecars: %v", err)
}
// Broadcast envelope and data column sidecars concurrently.
eg, eCtx := errgroup.WithContext(ctx)
eg.Go(func() error {
if err := vs.P2P.Broadcast(eCtx, req); err != nil {
return errors.Wrap(err, "broadcast signed execution payload envelope")
}
// TODO: Receive the envelope locally following the broadcastReceiveBlock pattern.
// This requires:
// 1. blocks.WrappedROSignedExecutionPayloadEnvelope wrapper
// 2. BlockReceiver.ReceiveExecutionPayloadEnvelope method
// See epbs branch's receive_execution_payload_envelope.go for reference.
return nil
})
if len(dataColumnSidecars) > 0 {
eg.Go(func() error {
return vs.broadcastAndReceiveDataColumns(eCtx, dataColumnSidecars)
})
}
if err := eg.Wait(); err != nil {
return nil, status.Errorf(codes.Internal, "failed to publish execution payload envelope: %v", err)
}
log.Info("Successfully published execution payload envelope")
return &emptypb.Empty{}, nil
}
// waitForBeaconBlock waits for the beacon block with the given root to be available.
// It first checks if the block already exists, then subscribes to block notifications
// and polls periodically until the block arrives or the timeout is reached.
func (vs *Server) waitForBeaconBlock(ctx context.Context, blockRoot [32]byte) error {
// Fast path: check if block already exists
if vs.BlockReceiver.HasBlock(ctx, blockRoot) {
return nil
}
log.WithField("blockRoot", fmt.Sprintf("%#x", blockRoot[:8])).
Debug("Waiting for beacon block to arrive")
waitCtx, cancel := context.WithTimeout(ctx, envelopeBlockWaitTimeout)
defer cancel()
blocksChan := make(chan *feed.Event, 1)
blockSub := vs.BlockNotifier.BlockFeed().Subscribe(blocksChan)
defer blockSub.Unsubscribe()
ticker := time.NewTicker(envelopeBlockPollInterval)
defer ticker.Stop()
for {
select {
case <-waitCtx.Done():
return errors.Wrap(waitCtx.Err(), "timeout waiting for beacon block")
case blockEvent := <-blocksChan:
if blockEvent.Type == blockfeed.ReceivedBlock {
data, ok := blockEvent.Data.(*blockfeed.ReceivedBlockData)
if ok && data != nil && data.SignedBlock != nil {
root, err := data.SignedBlock.Block().HashTreeRoot()
if err == nil && root == blockRoot {
return nil
}
}
}
case <-ticker.C:
if vs.BlockReceiver.HasBlock(ctx, blockRoot) {
return nil
}
case <-blockSub.Err():
return errors.New("block subscription closed")
}
}
}
// buildEnvelopeDataColumns retrieves the cached blobs bundle for the envelope's
// slot/builder and builds data column sidecars. Returns nil if no blobs to broadcast.
func (vs *Server) buildEnvelopeDataColumns(
ctx context.Context,
envelope *ethpb.ExecutionPayloadEnvelope,
blockRoot [32]byte,
) ([]consensusblocks.RODataColumn, error) {
if vs.ExecutionPayloadEnvelopeCache == nil {
return nil, nil
}
blobsBundle, found := vs.ExecutionPayloadEnvelopeCache.GetBlobsBundle(envelope.Slot, envelope.BuilderIndex)
if !found || blobsBundle == nil {
return nil, nil
}
blobs := blobsBundle.GetBlobs()
proofs := blobsBundle.GetProofs()
commitments := envelope.BlobKzgCommitments
if len(blobs) == 0 {
return nil, nil
}
// Retrieve the beacon block to build the signed block header for sidecars.
blk, err := vs.BeaconDB.Block(ctx, blockRoot)
if err != nil {
return nil, errors.Wrap(err, "could not get block for data column sidecars")
}
if blk == nil {
return nil, errors.New("block not found for data column sidecars")
}
roBlock, err := consensusblocks.NewROBlockWithRoot(blk, blockRoot)
if err != nil {
return nil, errors.Wrap(err, "could not create ROBlock")
}
return buildDataColumnSidecars(blobs, proofs, peerdas.PopulateFromEnvelope(roBlock, commitments))
}

View File

@@ -1,104 +0,0 @@
package validator
import (
"testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
consensusblocks "github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/crypto/bls/common"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/require"
)
func TestSetGloasExecutionData(t *testing.T) {
parentRoot := [32]byte{1, 2, 3}
slot := primitives.Slot(100)
proposerIndex := primitives.ValidatorIndex(42)
sBlk, err := consensusblocks.NewSignedBeaconBlock(&ethpb.SignedBeaconBlockGloas{
Block: &ethpb.BeaconBlockGloas{
Slot: slot,
ProposerIndex: proposerIndex,
ParentRoot: parentRoot[:],
Body: &ethpb.BeaconBlockBodyGloas{},
},
})
require.NoError(t, err)
payload := &enginev1.ExecutionPayloadDeneb{
ParentHash: make([]byte, 32),
FeeRecipient: make([]byte, 20),
StateRoot: make([]byte, 32),
ReceiptsRoot: make([]byte, 32),
LogsBloom: make([]byte, 256),
PrevRandao: make([]byte, 32),
BaseFeePerGas: make([]byte, 32),
BlockHash: make([]byte, 32),
ExtraData: make([]byte, 0),
}
ed, err := consensusblocks.WrappedExecutionPayloadDeneb(payload)
require.NoError(t, err)
local := &consensusblocks.GetPayloadResponse{
ExecutionData: ed,
Bid: primitives.ZeroWei(),
BlobsBundler: nil,
ExecutionRequests: &enginev1.ExecutionRequests{},
}
envelopeCache := cache.NewExecutionPayloadEnvelopeCache()
vs := &Server{
ExecutionPayloadEnvelopeCache: envelopeCache,
}
err = vs.setGloasExecutionData(t.Context(), sBlk, local)
require.NoError(t, err)
// Verify the signed bid was set on the block.
signedBid, err := sBlk.Block().Body().SignedExecutionPayloadBid()
require.NoError(t, err)
require.NotNil(t, signedBid)
require.NotNil(t, signedBid.Message)
// Per spec (process_execution_payload_bid): for self-builds,
// signature must be G2 point-at-infinity.
require.DeepEqual(t, common.InfiniteSignature[:], signedBid.Signature)
// Verify bid fields.
bid := signedBid.Message
require.Equal(t, slot, bid.Slot)
require.Equal(t, primitives.BuilderIndex(proposerIndex), bid.BuilderIndex)
require.DeepEqual(t, parentRoot[:], bid.ParentBlockRoot)
require.Equal(t, uint64(0), bid.Value)
require.Equal(t, uint64(0), bid.ExecutionPayment)
// Verify the envelope was cached.
envelope, found := envelopeCache.Get(slot, primitives.BuilderIndex(proposerIndex))
require.Equal(t, true, found)
require.NotNil(t, envelope)
require.Equal(t, slot, envelope.Slot)
require.Equal(t, primitives.BuilderIndex(proposerIndex), envelope.BuilderIndex)
}
func TestSetGloasExecutionData_NilPayload(t *testing.T) {
sBlk, err := consensusblocks.NewSignedBeaconBlock(&ethpb.SignedBeaconBlockGloas{
Block: &ethpb.BeaconBlockGloas{
Slot: 1,
ParentRoot: make([]byte, 32),
Body: &ethpb.BeaconBlockBodyGloas{},
},
})
require.NoError(t, err)
vs := &Server{
ExecutionPayloadEnvelopeCache: cache.NewExecutionPayloadEnvelopeCache(),
}
err = vs.setGloasExecutionData(t.Context(), sBlk, nil)
require.ErrorContains(t, "local execution payload is nil", err)
err = vs.setGloasExecutionData(t.Context(), sBlk, &consensusblocks.GetPayloadResponse{})
require.ErrorContains(t, "local execution payload is nil", err)
}

View File

@@ -44,46 +44,45 @@ import (
// and committees in which particular validators need to perform their responsibilities,
// and more.
type Server struct {
Ctx context.Context
PayloadIDCache *cache.PayloadIDCache
TrackedValidatorsCache *cache.TrackedValidatorsCache
ExecutionPayloadEnvelopeCache *cache.ExecutionPayloadEnvelopeCache // GLOAS: Cache for execution payload envelopes
HeadFetcher blockchain.HeadFetcher
ForkFetcher blockchain.ForkFetcher
ForkchoiceFetcher blockchain.ForkchoiceFetcher
GenesisFetcher blockchain.GenesisFetcher
FinalizationFetcher blockchain.FinalizationFetcher
TimeFetcher blockchain.TimeFetcher
BlockFetcher execution.POWBlockFetcher
DepositFetcher cache.DepositFetcher
ChainStartFetcher execution.ChainStartFetcher
Eth1InfoFetcher execution.ChainInfoFetcher
OptimisticModeFetcher blockchain.OptimisticModeFetcher
SyncChecker sync.Checker
StateNotifier statefeed.Notifier
BlockNotifier blockfeed.Notifier
P2P p2p.Broadcaster
AttestationCache *cache.AttestationCache
AttPool attestations.Pool
SlashingsPool slashings.PoolManager
ExitPool voluntaryexits.PoolManager
SyncCommitteePool synccommittee.Pool
BlockReceiver blockchain.BlockReceiver
BlobReceiver blockchain.BlobReceiver
DataColumnReceiver blockchain.DataColumnReceiver
MockEth1Votes bool
Eth1BlockFetcher execution.POWBlockFetcher
PendingDepositsFetcher depositsnapshot.PendingDepositsFetcher
OperationNotifier opfeed.Notifier
StateGen stategen.StateManager
ReplayerBuilder stategen.ReplayerBuilder
BeaconDB db.HeadAccessDatabase
ExecutionEngineCaller execution.EngineCaller
BlockBuilder builder.BlockBuilder
BLSChangesPool blstoexec.PoolManager
ClockWaiter startup.ClockWaiter
CoreService *core.Service
AttestationStateFetcher blockchain.AttestationStateFetcher
Ctx context.Context
PayloadIDCache *cache.PayloadIDCache
TrackedValidatorsCache *cache.TrackedValidatorsCache
HeadFetcher blockchain.HeadFetcher
ForkFetcher blockchain.ForkFetcher
ForkchoiceFetcher blockchain.ForkchoiceFetcher
GenesisFetcher blockchain.GenesisFetcher
FinalizationFetcher blockchain.FinalizationFetcher
TimeFetcher blockchain.TimeFetcher
BlockFetcher execution.POWBlockFetcher
DepositFetcher cache.DepositFetcher
ChainStartFetcher execution.ChainStartFetcher
Eth1InfoFetcher execution.ChainInfoFetcher
OptimisticModeFetcher blockchain.OptimisticModeFetcher
SyncChecker sync.Checker
StateNotifier statefeed.Notifier
BlockNotifier blockfeed.Notifier
P2P p2p.Broadcaster
AttestationCache *cache.AttestationCache
AttPool attestations.Pool
SlashingsPool slashings.PoolManager
ExitPool voluntaryexits.PoolManager
SyncCommitteePool synccommittee.Pool
BlockReceiver blockchain.BlockReceiver
BlobReceiver blockchain.BlobReceiver
DataColumnReceiver blockchain.DataColumnReceiver
MockEth1Votes bool
Eth1BlockFetcher execution.POWBlockFetcher
PendingDepositsFetcher depositsnapshot.PendingDepositsFetcher
OperationNotifier opfeed.Notifier
StateGen stategen.StateManager
ReplayerBuilder stategen.ReplayerBuilder
BeaconDB db.HeadAccessDatabase
ExecutionEngineCaller execution.EngineCaller
BlockBuilder builder.BlockBuilder
BLSChangesPool blstoexec.PoolManager
ClockWaiter startup.ClockWaiter
CoreService *core.Service
AttestationStateFetcher blockchain.AttestationStateFetcher
}
// Deprecated: The gRPC API will remain the default and fully supported through v8 (expected in 2026) but will be eventually removed in favor of REST API.

File diff suppressed because it is too large Load Diff

View File

@@ -25,7 +25,6 @@ import "proto/prysm/v1alpha1/beacon_block.proto";
import "proto/prysm/v1alpha1/beacon_core_types.proto";
import "proto/prysm/v1alpha1/sync_committee.proto";
import "proto/prysm/v1alpha1/attestation.proto";
import "proto/prysm/v1alpha1/gloas.proto";
option csharp_namespace = "Ethereum.Eth.V1";
option go_package = "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1;eth";
@@ -440,39 +439,6 @@ service BeaconNodeValidator {
get : "/eth/v1alpha1/validator/blocks/aggregated_sig_and_aggregation_bits"
};
}
// ==========================================================================
// GLOAS Fork Endpoints
// ==========================================================================
// GetExecutionPayloadEnvelope retrieves a cached execution payload envelope
// for the given slot and builder index. This is called by validators after
// receiving a GLOAS block to get the envelope they need to sign and broadcast.
//
// The envelope is cached by the beacon node during block production and
// contains the full execution payload that corresponds to the bid in the block.
rpc GetExecutionPayloadEnvelope(ExecutionPayloadEnvelopeRequest)
returns (ExecutionPayloadEnvelopeResponse) {
option deprecated = true;
option (google.api.http) = {
get : "/eth/v1alpha1/validator/execution_payload_envelope/{slot}/{builder_index}"
};
}
// PublishExecutionPayloadEnvelope broadcasts a signed execution payload envelope
// to the P2P network. This is called by validators after signing the envelope
// retrieved from GetExecutionPayloadEnvelope.
//
// The beacon node validates the envelope signature and broadcasts it to peers
// via the execution_payload_envelope gossip topic.
rpc PublishExecutionPayloadEnvelope(SignedExecutionPayloadEnvelope)
returns (google.protobuf.Empty) {
option deprecated = true;
option (google.api.http) = {
post : "/eth/v1alpha1/validator/execution_payload_envelope"
body : "*"
};
}
}
// SyncMessageBlockRootResponse for beacon chain validator to retrieve and
@@ -1168,34 +1134,3 @@ message AggregatedSigAndAggregationBitsResponse {
bytes aggregated_sig = 1;
bytes bits = 2;
}
// =============================================================================
// GLOAS Fork Messages
// =============================================================================
// ExecutionPayloadEnvelopeRequest is the request for retrieving a cached
// execution payload envelope from the beacon node.
message ExecutionPayloadEnvelopeRequest {
option deprecated = true;
// The slot for which to retrieve the execution payload envelope.
uint64 slot = 1 [
(ethereum.eth.ext.cast_type) =
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.Slot"
];
// The builder index that created the payload envelope.
uint64 builder_index = 2 [
(ethereum.eth.ext.cast_type) =
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.BuilderIndex"
];
}
// ExecutionPayloadEnvelopeResponse is the response containing the cached
// execution payload envelope.
message ExecutionPayloadEnvelopeResponse {
option deprecated = true;
// The execution payload envelope for the requested slot and builder.
ExecutionPayloadEnvelope envelope = 1;
}

View File

@@ -683,46 +683,6 @@ func (mr *MockBeaconNodeValidatorClientMockRecorder) WaitForChainStart(arg0, arg
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForChainStart", reflect.TypeOf((*MockBeaconNodeValidatorClient)(nil).WaitForChainStart), varargs...)
}
// GetExecutionPayloadEnvelope mocks base method.
func (m *MockBeaconNodeValidatorClient) GetExecutionPayloadEnvelope(arg0 context.Context, arg1 *eth.ExecutionPayloadEnvelopeRequest, arg2 ...grpc.CallOption) (*eth.ExecutionPayloadEnvelopeResponse, error) {
m.ctrl.T.Helper()
varargs := []any{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "ExecutionPayloadEnvelope", varargs...)
ret0, _ := ret[0].(*eth.ExecutionPayloadEnvelopeResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetExecutionPayloadEnvelope indicates an expected call of GetExecutionPayloadEnvelope.
func (mr *MockBeaconNodeValidatorClientMockRecorder) GetExecutionPayloadEnvelope(arg0, arg1 any, arg2 ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExecutionPayloadEnvelope", reflect.TypeOf((*MockBeaconNodeValidatorClient)(nil).GetExecutionPayloadEnvelope), varargs...)
}
// PublishExecutionPayloadEnvelope mocks base method.
func (m *MockBeaconNodeValidatorClient) PublishExecutionPayloadEnvelope(arg0 context.Context, arg1 *eth.SignedExecutionPayloadEnvelope, arg2 ...grpc.CallOption) (*emptypb.Empty, error) {
m.ctrl.T.Helper()
varargs := []any{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "PublishExecutionPayloadEnvelope", varargs...)
ret0, _ := ret[0].(*emptypb.Empty)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// PublishExecutionPayloadEnvelope indicates an expected call of PublishExecutionPayloadEnvelope.
func (mr *MockBeaconNodeValidatorClientMockRecorder) PublishExecutionPayloadEnvelope(arg0, arg1 any, arg2 ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PublishExecutionPayloadEnvelope", reflect.TypeOf((*MockBeaconNodeValidatorClient)(nil).PublishExecutionPayloadEnvelope), varargs...)
}
// MockBeaconNodeValidator_WaitForChainStartClient is a mock of BeaconNodeValidator_WaitForChainStartClient interface.
type MockBeaconNodeValidator_WaitForChainStartClient struct {
ctrl *gomock.Controller

View File

@@ -518,36 +518,6 @@ func (mr *MockBeaconNodeValidatorServerMockRecorder) WaitForChainStart(arg0, arg
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForChainStart", reflect.TypeOf((*MockBeaconNodeValidatorServer)(nil).WaitForChainStart), arg0, arg1)
}
// GetExecutionPayloadEnvelope mocks base method.
func (m *MockBeaconNodeValidatorServer) GetExecutionPayloadEnvelope(arg0 context.Context, arg1 *eth.ExecutionPayloadEnvelopeRequest) (*eth.ExecutionPayloadEnvelopeResponse, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ExecutionPayloadEnvelope", arg0, arg1)
ret0, _ := ret[0].(*eth.ExecutionPayloadEnvelopeResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetExecutionPayloadEnvelope indicates an expected call of GetExecutionPayloadEnvelope.
func (mr *MockBeaconNodeValidatorServerMockRecorder) GetExecutionPayloadEnvelope(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExecutionPayloadEnvelope", reflect.TypeOf((*MockBeaconNodeValidatorServer)(nil).GetExecutionPayloadEnvelope), arg0, arg1)
}
// PublishExecutionPayloadEnvelope mocks base method.
func (m *MockBeaconNodeValidatorServer) PublishExecutionPayloadEnvelope(arg0 context.Context, arg1 *eth.SignedExecutionPayloadEnvelope) (*emptypb.Empty, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "PublishExecutionPayloadEnvelope", arg0, arg1)
ret0, _ := ret[0].(*emptypb.Empty)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// PublishExecutionPayloadEnvelope indicates an expected call of PublishExecutionPayloadEnvelope.
func (mr *MockBeaconNodeValidatorServerMockRecorder) PublishExecutionPayloadEnvelope(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PublishExecutionPayloadEnvelope", reflect.TypeOf((*MockBeaconNodeValidatorServer)(nil).PublishExecutionPayloadEnvelope), arg0, arg1)
}
// MockBeaconNodeValidator_WaitForActivationServer is a mock of BeaconNodeValidator_WaitForActivationServer interface.
type MockBeaconNodeValidator_WaitForActivationServer struct {
ctrl *gomock.Controller

View File

@@ -518,33 +518,3 @@ func (mr *MockValidatorClientMockRecorder) WaitForChainStart(ctx, in any) *gomoc
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForChainStart", reflect.TypeOf((*MockValidatorClient)(nil).WaitForChainStart), ctx, in)
}
// GetExecutionPayloadEnvelope mocks base method.
func (m *MockValidatorClient) ExecutionPayloadEnvelope(ctx context.Context, slot primitives.Slot, builderIndex primitives.BuilderIndex) (*eth.ExecutionPayloadEnvelope, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ExecutionPayloadEnvelope", ctx, slot, builderIndex)
ret0, _ := ret[0].(*eth.ExecutionPayloadEnvelope)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetExecutionPayloadEnvelope indicates an expected call of GetExecutionPayloadEnvelope.
func (mr *MockValidatorClientMockRecorder) GetExecutionPayloadEnvelope(ctx, slot, builderIndex any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExecutionPayloadEnvelope", reflect.TypeOf((*MockValidatorClient)(nil).ExecutionPayloadEnvelope), ctx, slot, builderIndex)
}
// PublishExecutionPayloadEnvelope mocks base method.
func (m *MockValidatorClient) PublishExecutionPayloadEnvelope(ctx context.Context, in *eth.SignedExecutionPayloadEnvelope) (*emptypb.Empty, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "PublishExecutionPayloadEnvelope", ctx, in)
ret0, _ := ret[0].(*emptypb.Empty)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// PublishExecutionPayloadEnvelope indicates an expected call of PublishExecutionPayloadEnvelope.
func (mr *MockValidatorClientMockRecorder) PublishExecutionPayloadEnvelope(ctx, in any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PublishExecutionPayloadEnvelope", reflect.TypeOf((*MockValidatorClient)(nil).PublishExecutionPayloadEnvelope), ctx, in)
}

View File

@@ -17,7 +17,6 @@ go_library(
"duties.go",
"genesis.go",
"get_beacon_block.go",
"gloas.go",
"index.go",
"log.go",
"metrics.go",

View File

@@ -342,23 +342,3 @@ func (c *beaconApiValidatorClient) Host() string {
func (c *beaconApiValidatorClient) EnsureReady(ctx context.Context) bool {
return fallback.EnsureReady(ctx, c.restProvider, c.nodeClient)
}
// GLOAS Fork Methods
func (c *beaconApiValidatorClient) ExecutionPayloadEnvelope(ctx context.Context, slot primitives.Slot, builderIndex primitives.BuilderIndex) (*ethpb.ExecutionPayloadEnvelope, error) {
ctx, span := trace.StartSpan(ctx, "beacon-api.ExecutionPayloadEnvelope")
defer span.End()
return wrapInMetrics[*ethpb.ExecutionPayloadEnvelope]("ExecutionPayloadEnvelope", func() (*ethpb.ExecutionPayloadEnvelope, error) {
return c.getExecutionPayloadEnvelope(ctx, slot, builderIndex)
})
}
func (c *beaconApiValidatorClient) PublishExecutionPayloadEnvelope(ctx context.Context, in *ethpb.SignedExecutionPayloadEnvelope) (*empty.Empty, error) {
ctx, span := trace.StartSpan(ctx, "beacon-api.PublishExecutionPayloadEnvelope")
defer span.End()
return wrapInMetrics[*empty.Empty]("PublishExecutionPayloadEnvelope", func() (*empty.Empty, error) {
return c.publishExecutionPayloadEnvelope(ctx, in)
})
}

View File

@@ -1,122 +0,0 @@
package beacon_api
import (
"context"
"fmt"
neturl "net/url"
"github.com/OffchainLabs/prysm/v7/api/apiutil"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/golang/protobuf/ptypes/empty"
"github.com/pkg/errors"
)
// getExecutionPayloadEnvelope retrieves the execution payload envelope for the given
// slot and builder index. This is called by validators after receiving a GLOAS block
// to get the envelope they need to sign and broadcast.
//
// REST endpoint: GET /eth/v1/validator/execution_payload_envelope/{slot}/{builder_index}
func (c *beaconApiValidatorClient) getExecutionPayloadEnvelope(
ctx context.Context,
slot primitives.Slot,
builderIndex primitives.BuilderIndex,
) (*ethpb.ExecutionPayloadEnvelope, error) {
// TODO: Implement execution payload envelope retrieval
//
// Implementation steps:
// 1. Build URL with slot and builder_index path parameters
// 2. Make GET request (support both JSON and SSZ based on Accept header)
// 3. Parse response
// 4. Convert to proto type
// 5. Return envelope
queryUrl := apiutil.BuildURL(
fmt.Sprintf("/eth/v1/validator/execution_payload_envelope/%d/%d", slot, builderIndex),
neturl.Values{},
)
_ = queryUrl
return nil, errors.New("getExecutionPayloadEnvelope not yet implemented")
}
// publishExecutionPayloadEnvelope broadcasts a signed execution payload envelope
// to the beacon node for P2P gossip.
//
// REST endpoint: POST /eth/v1/beacon/execution_payload_envelope
func (c *beaconApiValidatorClient) publishExecutionPayloadEnvelope(
ctx context.Context,
envelope *ethpb.SignedExecutionPayloadEnvelope,
) (*empty.Empty, error) {
// TODO: Implement envelope publishing
//
// Implementation steps:
// 1. Convert proto envelope to JSON struct
// 2. Serialize to JSON (or SSZ based on Content-Type)
// 3. POST to /eth/v1/beacon/execution_payload_envelope
// 4. Handle response (200 = success, 4xx = validation error)
if envelope == nil || envelope.Message == nil {
return nil, errors.New("signed envelope cannot be nil")
}
return nil, errors.New("publishExecutionPayloadEnvelope not yet implemented")
}
// signedEnvelopeToJSON converts a proto SignedExecutionPayloadEnvelope to its JSON representation.
func signedEnvelopeToJSON(envelope *ethpb.SignedExecutionPayloadEnvelope) (any, error) {
// TODO: Implement conversion from proto to JSON struct
//
// Convert each field:
// - message.payload: Marshal ExecutionPayloadDeneb to JSON
// - message.execution_requests: Marshal to JSON
// - message.builder_index: Format as decimal string
// - message.beacon_block_root: Hex encode with 0x prefix
// - message.slot: Format as decimal string
// - message.blob_kzg_commitments: Hex encode each with 0x prefix
// - message.state_root: Hex encode with 0x prefix
// - signature: Hex encode with 0x prefix
return nil, errors.New("signedEnvelopeToJSON not yet implemented")
}
// envelopeJSONToProto converts a JSON execution payload envelope to proto type.
func envelopeJSONToProto(jsonEnvelope any) (*ethpb.ExecutionPayloadEnvelope, error) {
// TODO: Implement conversion from JSON to proto
//
// Parse each field:
// - payload: Unmarshal ExecutionPayloadDeneb from JSON
// - execution_requests: Unmarshal from JSON
// - builder_index: Parse uint64 from decimal string
// - beacon_block_root: Hex decode (strip 0x prefix)
// - slot: Parse uint64 from decimal string
// - blob_kzg_commitments: Hex decode each (strip 0x prefix)
// - state_root: Hex decode (strip 0x prefix)
return nil, errors.New("envelopeJSONToProto not yet implemented")
}
// processGloasBlock handles GLOAS block responses from the beacon node.
// This is called from processBlockJSONResponse when the version is "gloas".
func processGloasBlock(jsonBlock any) (*ethpb.GenericBeaconBlock, error) {
// TODO: Implement GLOAS block processing
//
// Convert the JSON block to proto BeaconBlockGloas:
// 1. Parse BeaconBlockGloas fields
// 2. Parse BeaconBlockBodyGloas with signed_execution_payload_bid
// 3. Parse payload_attestations
// 4. Return GenericBeaconBlock with Gloas variant
return nil, errors.New("processGloasBlock not yet implemented")
}
// processBlockSSZResponseGloas handles SSZ-encoded GLOAS block responses.
func processBlockSSZResponseGloas(data []byte) (*ethpb.GenericBeaconBlock, error) {
// TODO: Implement SSZ deserialization for GLOAS blocks
//
// Note: GLOAS blocks don't have a "blinded" variant in the same way
// as previous forks because the execution payload is always separate.
return nil, errors.New("processBlockSSZResponseGloas not yet implemented")
}

View File

@@ -391,24 +391,3 @@ func (c *grpcValidatorClient) EnsureReady(ctx context.Context) bool {
provider := c.grpcClientManager.conn.GetGrpcConnectionProvider()
return fallback.EnsureReady(ctx, provider, c.nodeClient)
}
// GLOAS Fork Methods
func (c *grpcValidatorClient) ExecutionPayloadEnvelope(ctx context.Context, slot primitives.Slot, builderIndex primitives.BuilderIndex) (*ethpb.ExecutionPayloadEnvelope, error) {
req := &ethpb.ExecutionPayloadEnvelopeRequest{
Slot: slot,
BuilderIndex: builderIndex,
}
resp, err := c.getClient().GetExecutionPayloadEnvelope(ctx, req)
if err != nil {
return nil, errors.Wrap(
client.ErrConnectionIssue,
errors.Wrap(err, "ExecutionPayloadEnvelope").Error(),
)
}
return resp.Envelope, nil
}
func (c *grpcValidatorClient) PublishExecutionPayloadEnvelope(ctx context.Context, in *ethpb.SignedExecutionPayloadEnvelope) (*empty.Empty, error) {
return c.getClient().PublishExecutionPayloadEnvelope(ctx, in)
}

View File

@@ -153,6 +153,4 @@ type ValidatorClient interface {
AggregatedSyncSelections(ctx context.Context, selections []SyncCommitteeSelection) ([]SyncCommitteeSelection, error)
Host() string
EnsureReady(ctx context.Context) bool
ExecutionPayloadEnvelope(ctx context.Context, slot primitives.Slot, builderIndex primitives.BuilderIndex) (*ethpb.ExecutionPayloadEnvelope, error)
PublishExecutionPayloadEnvelope(ctx context.Context, in *ethpb.SignedExecutionPayloadEnvelope) (*empty.Empty, error)
}

View File

@@ -128,8 +128,7 @@ func (v *validator) ProposeBlock(ctx context.Context, slot primitives.Slot, pubK
var genericSignedBlock *ethpb.GenericSignedBeaconBlock
// Special handling for Deneb blocks and later version because of blob side cars.
// GLOAS blocks are handled differently - no blobs in block, execution payload is separate.
if blk.Version() >= version.Deneb && blk.Version() < version.Gloas && !blk.IsBlinded() {
if blk.Version() >= version.Deneb && !blk.IsBlinded() {
pb, err := blk.Proto()
if err != nil {
log.WithError(err).Error("Failed to get deneb block")
@@ -177,14 +176,6 @@ func (v *validator) ProposeBlock(ctx context.Context, slot primitives.Slot, pubK
return
}
// GLOAS: After proposing the beacon block, handle the execution payload envelope
if blk.Version() >= version.Gloas {
if err := v.handleGloasExecutionPayloadEnvelope(ctx, slot, pubKey, b); err != nil {
log.WithError(err).Error("Failed to handle GLOAS execution payload envelope")
// Don't return - the block was proposed successfully, envelope handling is secondary
}
}
span.SetAttributes(
trace.StringAttribute("blockRoot", fmt.Sprintf("%#x", blkResp.BlockRoot)),
trace.Int64Attribute("numDeposits", int64(len(blk.Block().Body().Deposits()))),
@@ -592,98 +583,3 @@ func blockLogFields(pubKey [fieldparams.BLSPubkeyLength]byte, blk interfaces.Rea
}
return fields
}
// handleGloasExecutionPayloadEnvelope retrieves, signs, and publishes the execution payload envelope
// for GLOAS blocks. This is called after the beacon block has been proposed.
func (v *validator) handleGloasExecutionPayloadEnvelope(
ctx context.Context,
slot primitives.Slot,
pubKey [fieldparams.BLSPubkeyLength]byte,
b *ethpb.GenericBeaconBlock,
) error {
ctx, span := trace.StartSpan(ctx, "validator.handleGloasExecutionPayloadEnvelope")
defer span.End()
log := log.WithFields(logrus.Fields{
"slot": slot,
"pubkey": fmt.Sprintf("%#x", bytesutil.Trunc(pubKey[:])),
})
// Extract builder_index from the GLOAS block's signed execution payload bid
gloasBlock := b.GetGloas()
if gloasBlock == nil {
return errors.New("expected GLOAS block but got nil")
}
if gloasBlock.Body == nil || gloasBlock.Body.SignedExecutionPayloadBid == nil || gloasBlock.Body.SignedExecutionPayloadBid.Message == nil {
return errors.New("GLOAS block missing signed execution payload bid")
}
builderIndex := gloasBlock.Body.SignedExecutionPayloadBid.Message.BuilderIndex
log = log.WithField("builderIndex", builderIndex)
log.Debug("Retrieving execution payload envelope")
// 1. Retrieve the execution payload envelope from the beacon node
envelope, err := v.validatorClient.ExecutionPayloadEnvelope(ctx, slot, builderIndex)
if err != nil {
return errors.Wrap(err, "failed to get execution payload envelope")
}
// 2. Sign the envelope
signedEnvelope, err := v.signExecutionPayloadEnvelope(ctx, pubKey, slot, envelope)
if err != nil {
return errors.Wrap(err, "failed to sign execution payload envelope")
}
// 3. Publish the signed envelope
log.Debug("Publishing signed execution payload envelope")
if _, err := v.validatorClient.PublishExecutionPayloadEnvelope(ctx, signedEnvelope); err != nil {
return errors.Wrap(err, "failed to publish execution payload envelope")
}
log.Info("Successfully published execution payload envelope")
return nil
}
// signExecutionPayloadEnvelope signs the execution payload envelope using the validator's key.
func (v *validator) signExecutionPayloadEnvelope(
ctx context.Context,
pubKey [fieldparams.BLSPubkeyLength]byte,
slot primitives.Slot,
envelope *ethpb.ExecutionPayloadEnvelope,
) (*ethpb.SignedExecutionPayloadEnvelope, error) {
ctx, span := trace.StartSpan(ctx, "validator.signExecutionPayloadEnvelope")
defer span.End()
epoch := slots.ToEpoch(slot)
// Use DomainBeaconBuilder for execution payload envelope signing.
// This domain is used for builder-related operations including envelope signing.
domain, err := v.domainData(ctx, epoch, params.BeaconConfig().DomainBeaconBuilder[:])
if err != nil {
return nil, errors.Wrap(err, "could not get domain data")
}
if domain == nil {
return nil, errors.New("nil domain data")
}
signingRoot, err := signing.ComputeSigningRoot(envelope, domain.SignatureDomain)
if err != nil {
return nil, errors.Wrap(err, "could not compute signing root")
}
sig, err := v.km.Sign(ctx, &validatorpb.SignRequest{
PublicKey: pubKey[:],
SigningRoot: signingRoot[:],
SignatureDomain: domain.SignatureDomain,
Object: &validatorpb.SignRequest_Slot{Slot: slot},
SigningSlot: slot,
})
if err != nil {
return nil, errors.Wrap(err, "could not sign execution payload envelope")
}
return &ethpb.SignedExecutionPayloadEnvelope{
Message: envelope,
Signature: sig.Marshal(),
}, nil
}