diff --git a/beacon-chain/blockchain/BUILD.bazel b/beacon-chain/blockchain/BUILD.bazel
index 452bf14f9b..efefc22ff9 100644
--- a/beacon-chain/blockchain/BUILD.bazel
+++ b/beacon-chain/blockchain/BUILD.bazel
@@ -27,6 +27,7 @@ go_library(
"receive_blob.go",
"receive_block.go",
"receive_data_column.go",
+ "receive_execution_payload_envelope.go",
"receive_payload_attestation_message.go",
"service.go",
"setup_forkchoice.go",
diff --git a/beacon-chain/blockchain/execution_engine.go b/beacon-chain/blockchain/execution_engine.go
index beff80ccca..7adb316faa 100644
--- a/beacon-chain/blockchain/execution_engine.go
+++ b/beacon-chain/blockchain/execution_engine.go
@@ -101,11 +101,16 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*
if len(lastValidHash) == 0 {
lastValidHash = defaultLatestValidHash
}
- invalidRoots, err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, headRoot, headBlk.ParentRoot(), bytesutil.ToBytes32(lastValidHash))
+ // this call has guaranteed to have the `headRoot` with its payload in forkchoice.
+ invalidRoots, err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, headRoot, headBlk.ParentRoot(), bytesutil.ToBytes32(headPayload.ParentHash()), bytesutil.ToBytes32(lastValidHash))
if err != nil {
log.WithError(err).Error("Could not set head root to invalid")
return nil, nil
}
+ // TODO: Gloas, we should not include the head root in this call
+ if len(invalidRoots) == 0 || invalidRoots[0] != headRoot {
+ invalidRoots = append([][32]byte{headRoot}, invalidRoots...)
+ }
if err := s.removeInvalidBlockAndState(ctx, invalidRoots); err != nil {
log.WithError(err).Error("Could not remove invalid block and state")
return nil, nil
@@ -290,10 +295,10 @@ func (s *Service) notifyNewPayload(ctx context.Context, stVersion int, header in
return false, errors.WithMessage(ErrUndefinedExecutionEngineError, err.Error())
}
-// reportInvalidBlock deals with the event that an invalid block was detected by the execution layer
-func (s *Service) pruneInvalidBlock(ctx context.Context, root, parentRoot, lvh [32]byte) error {
+// pruneInvalidBlock deals with the event that an invalid block was detected by the execution layer
+func (s *Service) pruneInvalidBlock(ctx context.Context, root, parentRoot, parentHash [32]byte, lvh [32]byte) error {
newPayloadInvalidNodeCount.Inc()
- invalidRoots, err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, root, parentRoot, lvh)
+ invalidRoots, err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, root, parentRoot, parentHash, lvh)
if err != nil {
return err
}
diff --git a/beacon-chain/blockchain/execution_engine_test.go b/beacon-chain/blockchain/execution_engine_test.go
index e7eda0532c..c500fb1d98 100644
--- a/beacon-chain/blockchain/execution_engine_test.go
+++ b/beacon-chain/blockchain/execution_engine_test.go
@@ -465,9 +465,9 @@ func Test_NotifyForkchoiceUpdateRecursive_DoublyLinkedTree(t *testing.T) {
require.NoError(t, err)
require.Equal(t, brd, headRoot)
- // Ensure F and G where removed but their parent E wasn't
- require.Equal(t, false, fcs.HasNode(brf))
- require.Equal(t, false, fcs.HasNode(brg))
+ // Ensure F and G's full nodes were removed but their empty (consensus) nodes remain, as does E
+ require.Equal(t, true, fcs.HasNode(brf))
+ require.Equal(t, true, fcs.HasNode(brg))
require.Equal(t, true, fcs.HasNode(bre))
}
@@ -703,14 +703,13 @@ func Test_reportInvalidBlock(t *testing.T) {
require.NoError(t, fcs.InsertNode(ctx, st, root))
require.NoError(t, fcs.SetOptimisticToValid(ctx, [32]byte{'A'}))
- err = service.pruneInvalidBlock(ctx, [32]byte{'D'}, [32]byte{'C'}, [32]byte{'a'})
+ err = service.pruneInvalidBlock(ctx, [32]byte{'D'}, [32]byte{'C'}, [32]byte{'c'}, [32]byte{'a'})
require.Equal(t, IsInvalidBlock(err), true)
require.Equal(t, InvalidBlockLVH(err), [32]byte{'a'})
invalidRoots := InvalidAncestorRoots(err)
- require.Equal(t, 3, len(invalidRoots))
+ require.Equal(t, 2, len(invalidRoots))
require.Equal(t, [32]byte{'D'}, invalidRoots[0])
require.Equal(t, [32]byte{'C'}, invalidRoots[1])
- require.Equal(t, [32]byte{'B'}, invalidRoots[2])
}
func Test_GetPayloadAttribute(t *testing.T) {
@@ -785,7 +784,7 @@ func Test_GetPayloadAttributeV2(t *testing.T) {
}
func Test_GetPayloadAttributeV3(t *testing.T) {
- var testCases = []struct {
+ testCases := []struct {
name string
st bstate.BeaconState
}{
diff --git a/beacon-chain/blockchain/process_block.go b/beacon-chain/blockchain/process_block.go
index dd840548dd..b4880477bc 100644
--- a/beacon-chain/blockchain/process_block.go
+++ b/beacon-chain/blockchain/process_block.go
@@ -232,7 +232,8 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
postVersionAndHeaders[i].version,
postVersionAndHeaders[i].header, b)
if err != nil {
- return s.handleInvalidExecutionError(ctx, err, root, b.Block().ParentRoot())
+ // this call does not have the root in forkchoice yet.
+ return s.handleInvalidExecutionError(ctx, err, root, b.Block().ParentRoot(), [32]byte(postVersionAndHeaders[i].header.ParentHash()))
}
if isValidPayload {
if err := s.validateMergeTransitionBlock(ctx, preVersionAndHeaders[i].version,
@@ -992,9 +993,9 @@ func (s *Service) waitForSync() error {
}
}
-func (s *Service) handleInvalidExecutionError(ctx context.Context, err error, blockRoot, parentRoot [fieldparams.RootLength]byte) error {
+func (s *Service) handleInvalidExecutionError(ctx context.Context, err error, blockRoot, parentRoot [32]byte, parentHash [32]byte) error {
if IsInvalidBlock(err) && InvalidBlockLVH(err) != [32]byte{} {
- return s.pruneInvalidBlock(ctx, blockRoot, parentRoot, InvalidBlockLVH(err))
+ return s.pruneInvalidBlock(ctx, blockRoot, parentRoot, parentHash, InvalidBlockLVH(err))
}
return err
}
diff --git a/beacon-chain/blockchain/process_block_test.go b/beacon-chain/blockchain/process_block_test.go
index 7ef16025f9..85dc863b63 100644
--- a/beacon-chain/blockchain/process_block_test.go
+++ b/beacon-chain/blockchain/process_block_test.go
@@ -2006,6 +2006,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
// Check that we have justified the second epoch
jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint()
require.Equal(t, primitives.Epoch(2), jc.Epoch)
+ time.Sleep(20 * time.Millisecond) // wait for async forkchoice update to be processed
// import block 19 to find out that the whole chain 13--18 was in fact
// invalid
diff --git a/beacon-chain/blockchain/receive_block.go b/beacon-chain/blockchain/receive_block.go
index 0c016afe73..1ff67bd4b4 100644
--- a/beacon-chain/blockchain/receive_block.go
+++ b/beacon-chain/blockchain/receive_block.go
@@ -633,7 +633,7 @@ func (s *Service) validateExecutionOnBlock(ctx context.Context, ver int, header
isValidPayload, err := s.notifyNewPayload(ctx, ver, header, block)
if err != nil {
s.cfg.ForkChoiceStore.Lock()
- err = s.handleInvalidExecutionError(ctx, err, block.Root(), block.Block().ParentRoot())
+ err = s.handleInvalidExecutionError(ctx, err, block.Root(), block.Block().ParentRoot(), [32]byte(header.BlockHash()))
s.cfg.ForkChoiceStore.Unlock()
return false, err
}
diff --git a/beacon-chain/blockchain/receive_execution_payload_envelope.go b/beacon-chain/blockchain/receive_execution_payload_envelope.go
new file mode 100644
index 0000000000..75c2148e1a
--- /dev/null
+++ b/beacon-chain/blockchain/receive_execution_payload_envelope.go
@@ -0,0 +1,19 @@
+package blockchain
+
+import (
+ "context"
+
+ "github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
+)
+
+// ExecutionPayloadEnvelopeReceiver interface defines the methods of chain service for receiving
+// validated execution payload envelopes.
+type ExecutionPayloadEnvelopeReceiver interface {
+ ReceiveExecutionPayloadEnvelope(context.Context, interfaces.ROSignedExecutionPayloadEnvelope) error
+}
+
+// ReceiveExecutionPayloadEnvelope accepts a signed execution payload envelope.
+func (s *Service) ReceiveExecutionPayloadEnvelope(_ context.Context, _ interfaces.ROSignedExecutionPayloadEnvelope) error {
+ // TODO: wire into execution payload envelope processing pipeline.
+ return nil
+}
diff --git a/beacon-chain/blockchain/testing/mock.go b/beacon-chain/blockchain/testing/mock.go
index 27c2c34eb1..253a4ca48e 100644
--- a/beacon-chain/blockchain/testing/mock.go
+++ b/beacon-chain/blockchain/testing/mock.go
@@ -762,6 +762,11 @@ func (c *ChainService) ReceivePayloadAttestationMessage(_ context.Context, _ *et
return nil
}
+// ReceiveExecutionPayloadEnvelope implements the same method in the chain service.
+func (c *ChainService) ReceiveExecutionPayloadEnvelope(_ context.Context, _ interfaces.ROSignedExecutionPayloadEnvelope) error {
+ return nil
+}
+
// DependentRootForEpoch mocks the same method in the chain service
func (c *ChainService) DependentRootForEpoch(_ [32]byte, _ primitives.Epoch) ([32]byte, error) {
return c.TargetRoot, nil
diff --git a/beacon-chain/core/altair/BUILD.bazel b/beacon-chain/core/altair/BUILD.bazel
index 86cfe9f05f..9782a0373c 100644
--- a/beacon-chain/core/altair/BUILD.bazel
+++ b/beacon-chain/core/altair/BUILD.bazel
@@ -20,6 +20,7 @@ go_library(
"//beacon-chain/core/blocks:go_default_library",
"//beacon-chain/core/epoch:go_default_library",
"//beacon-chain/core/epoch/precompute:go_default_library",
+ "//beacon-chain/core/gloas:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/core/time:go_default_library",
diff --git a/beacon-chain/core/altair/attestation.go b/beacon-chain/core/altair/attestation.go
index ec2ac3d537..2c9ed906f7 100644
--- a/beacon-chain/core/altair/attestation.go
+++ b/beacon-chain/core/altair/attestation.go
@@ -6,6 +6,7 @@ import (
"fmt"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
+ "github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
@@ -75,7 +76,11 @@ func ProcessAttestationNoVerifySignature(
return nil, err
}
- return SetParticipationAndRewardProposer(ctx, beaconState, att.GetData().Target.Epoch, indices, participatedFlags, totalBalance)
+ if err := beaconState.UpdatePendingPaymentWeight(att, indices, participatedFlags); err != nil {
+ return nil, errors.Wrap(err, "failed to update pending payment weight")
+ }
+
+ return SetParticipationAndRewardProposer(ctx, beaconState, att.GetData().Target.Epoch, indices, participatedFlags, totalBalance, att)
}
// SetParticipationAndRewardProposer retrieves and sets the epoch participation bits in state. Based on the epoch participation, it rewards
@@ -105,7 +110,9 @@ func SetParticipationAndRewardProposer(
beaconState state.BeaconState,
targetEpoch primitives.Epoch,
indices []uint64,
- participatedFlags map[uint8]bool, totalBalance uint64) (state.BeaconState, error) {
+ participatedFlags map[uint8]bool,
+ totalBalance uint64,
+ att ethpb.Att) (state.BeaconState, error) {
var proposerRewardNumerator uint64
currentEpoch := time.CurrentEpoch(beaconState)
var stateErr error
@@ -299,6 +306,19 @@ func AttestationParticipationFlagIndices(beaconState state.ReadOnlyBeaconState,
participatedFlags[targetFlagIndex] = true
}
matchedSrcTgtHead := matchedHead && matchedSrcTgt
+
+ var beaconBlockRoot [32]byte
+ copy(beaconBlockRoot[:], data.BeaconBlockRoot)
+ matchingPayload, err := gloas.MatchingPayload(
+ beaconState,
+ beaconBlockRoot,
+ data.Slot,
+ uint64(data.CommitteeIndex),
+ )
+ if err != nil {
+ return nil, err
+ }
+ matchedSrcTgtHead = matchedSrcTgtHead && matchingPayload
if matchedSrcTgtHead && delay == cfg.MinAttestationInclusionDelay {
participatedFlags[headFlagIndex] = true
}
diff --git a/beacon-chain/core/altair/attestation_test.go b/beacon-chain/core/altair/attestation_test.go
index 8ceb34810f..68e4babc7f 100644
--- a/beacon-chain/core/altair/attestation_test.go
+++ b/beacon-chain/core/altair/attestation_test.go
@@ -1,7 +1,9 @@
package altair_test
import (
+ "bytes"
"fmt"
+ "reflect"
"testing"
"github.com/OffchainLabs/go-bitfield"
@@ -556,7 +558,7 @@ func TestSetParticipationAndRewardProposer(t *testing.T) {
b, err := helpers.TotalActiveBalance(beaconState)
require.NoError(t, err)
- st, err := altair.SetParticipationAndRewardProposer(t.Context(), beaconState, test.epoch, test.indices, test.participatedFlags, b)
+ st, err := altair.SetParticipationAndRewardProposer(t.Context(), beaconState, test.epoch, test.indices, test.participatedFlags, b, ðpb.Attestation{})
require.NoError(t, err)
i, err := helpers.BeaconProposerIndex(t.Context(), st)
@@ -775,11 +777,67 @@ func TestAttestationParticipationFlagIndices(t *testing.T) {
headFlagIndex: true,
},
},
+ {
+ name: "gloas same-slot committee index non-zero errors",
+ inputState: func() state.BeaconState {
+ stateSlot := primitives.Slot(5)
+ slot := primitives.Slot(3)
+ targetRoot := bytes.Repeat([]byte{0xAA}, 32)
+ headRoot := bytes.Repeat([]byte{0xBB}, 32)
+ prevRoot := bytes.Repeat([]byte{0xCC}, 32)
+ return buildGloasStateForFlags(t, stateSlot, slot, targetRoot, headRoot, prevRoot, 0, 0)
+ }(),
+ inputData: ðpb.AttestationData{
+ Slot: 3,
+ CommitteeIndex: 1, // invalid for same-slot
+ BeaconBlockRoot: bytes.Repeat([]byte{0xBB}, 32),
+ Source: ðpb.Checkpoint{Root: bytes.Repeat([]byte{0xDD}, 32)},
+ Target: ðpb.Checkpoint{
+ Epoch: 0,
+ Root: bytes.Repeat([]byte{0xAA}, 32),
+ },
+ },
+ inputDelay: 1,
+ participationIndices: nil,
+ },
+ {
+ name: "gloas payload availability matches committee index",
+ inputState: func() state.BeaconState {
+ stateSlot := primitives.Slot(5)
+ slot := primitives.Slot(3)
+ targetRoot := bytes.Repeat([]byte{0xAA}, 32)
+ headRoot := bytes.Repeat([]byte{0xBB}, 32)
+ // Same prev root to make SameSlotAttestation false and use payload availability.
+ return buildGloasStateForFlags(t, stateSlot, slot, targetRoot, headRoot, headRoot, 1, slot)
+ }(),
+ inputData: ðpb.AttestationData{
+ Slot: 3,
+ CommitteeIndex: 1,
+ BeaconBlockRoot: bytes.Repeat([]byte{0xBB}, 32),
+ Source: ðpb.Checkpoint{Root: bytes.Repeat([]byte{0xDD}, 32)},
+ Target: ðpb.Checkpoint{
+ Epoch: 0,
+ Root: bytes.Repeat([]byte{0xAA}, 32),
+ },
+ },
+ inputDelay: 1,
+ participationIndices: map[uint8]bool{
+ sourceFlagIndex: true,
+ targetFlagIndex: true,
+ headFlagIndex: true,
+ },
+ },
}
for _, test := range tests {
flagIndices, err := altair.AttestationParticipationFlagIndices(test.inputState, test.inputData, test.inputDelay)
+ if test.participationIndices == nil {
+ require.ErrorContains(t, "committee index", err)
+ continue
+ }
require.NoError(t, err)
- require.DeepEqual(t, test.participationIndices, flagIndices)
+ if !reflect.DeepEqual(test.participationIndices, flagIndices) {
+ t.Fatalf("unexpected participation indices: got %v want %v", flagIndices, test.participationIndices)
+ }
}
}
@@ -858,3 +916,61 @@ func TestMatchingStatus(t *testing.T) {
require.Equal(t, test.matchedHead, head)
}
}
+
+func buildGloasStateForFlags(t *testing.T, stateSlot, slot primitives.Slot, targetRoot, headRoot, prevRoot []byte, availabilityBit uint8, availabilitySlot primitives.Slot) state.BeaconState {
+ t.Helper()
+
+ cfg := params.BeaconConfig()
+ blockRoots := make([][]byte, cfg.SlotsPerHistoricalRoot)
+ blockRoots[0] = targetRoot
+ blockRoots[slot%cfg.SlotsPerHistoricalRoot] = headRoot
+ blockRoots[(slot-1)%cfg.SlotsPerHistoricalRoot] = prevRoot
+
+ stateRoots := make([][]byte, cfg.SlotsPerHistoricalRoot)
+ for i := range stateRoots {
+ stateRoots[i] = make([]byte, fieldparams.RootLength)
+ }
+ randaoMixes := make([][]byte, cfg.EpochsPerHistoricalVector)
+ for i := range randaoMixes {
+ randaoMixes[i] = make([]byte, fieldparams.RootLength)
+ }
+
+ execPayloadAvailability := make([]byte, cfg.SlotsPerHistoricalRoot/8)
+ idx := availabilitySlot % cfg.SlotsPerHistoricalRoot
+ byteIndex := idx / 8
+ bitIndex := idx % 8
+ if availabilityBit == 1 {
+ execPayloadAvailability[byteIndex] |= 1 << bitIndex
+ }
+
+ checkpointRoot := bytes.Repeat([]byte{0xDD}, fieldparams.RootLength)
+ justified := ðpb.Checkpoint{Root: checkpointRoot}
+
+ stProto := ðpb.BeaconStateGloas{
+ Slot: stateSlot,
+ GenesisValidatorsRoot: bytes.Repeat([]byte{0x11}, fieldparams.RootLength),
+ BlockRoots: blockRoots,
+ StateRoots: stateRoots,
+ RandaoMixes: randaoMixes,
+ ExecutionPayloadAvailability: execPayloadAvailability,
+ CurrentJustifiedCheckpoint: justified,
+ PreviousJustifiedCheckpoint: justified,
+ Validators: []*ethpb.Validator{
+ {
+ EffectiveBalance: cfg.MinActivationBalance,
+ WithdrawalCredentials: append([]byte{cfg.ETH1AddressWithdrawalPrefixByte}, bytes.Repeat([]byte{0x01}, 31)...),
+ },
+ },
+ Balances: []uint64{cfg.MinActivationBalance},
+ BuilderPendingPayments: make([]*ethpb.BuilderPendingPayment, cfg.SlotsPerEpoch*2),
+ Fork: ðpb.Fork{
+ CurrentVersion: bytes.Repeat([]byte{0x01}, 4),
+ PreviousVersion: bytes.Repeat([]byte{0x01}, 4),
+ Epoch: 0,
+ },
+ }
+
+ beaconState, err := state_native.InitializeFromProtoGloas(stProto)
+ require.NoError(t, err)
+ return beaconState
+}
diff --git a/beacon-chain/core/blocks/attestation.go b/beacon-chain/core/blocks/attestation.go
index 858464603d..a16804479c 100644
--- a/beacon-chain/core/blocks/attestation.go
+++ b/beacon-chain/core/blocks/attestation.go
@@ -111,10 +111,21 @@ func VerifyAttestationNoVerifySignature(
var indexedAtt ethpb.IndexedAtt
if att.Version() >= version.Electra {
- if att.GetData().CommitteeIndex != 0 {
- return errors.New("committee index must be 0 post-Electra")
+ ci := att.GetData().CommitteeIndex
+ // Spec v1.7.0-alpha pseudocode:
+ //
+ // # [Modified in Gloas:EIP7732]
+ // assert data.index < 2
+ //
+ if beaconState.Version() >= version.Gloas {
+ if ci >= 2 {
+ return fmt.Errorf("incorrect committee index %d", ci)
+ }
+ } else {
+ if ci != 0 {
+ return errors.New("committee index must be 0 between Electra and Gloas forks")
+ }
}
-
aggBits := att.GetAggregationBits()
committeeIndices := att.CommitteeBitsVal().BitIndices()
committees := make([][]primitives.ValidatorIndex, len(committeeIndices))
diff --git a/beacon-chain/core/blocks/attestation_test.go b/beacon-chain/core/blocks/attestation_test.go
index 24a068ef91..78f1866253 100644
--- a/beacon-chain/core/blocks/attestation_test.go
+++ b/beacon-chain/core/blocks/attestation_test.go
@@ -1,6 +1,7 @@
package blocks_test
import (
+ "bytes"
"context"
"testing"
@@ -262,7 +263,7 @@ func TestVerifyAttestationNoVerifySignature_Electra(t *testing.T) {
CommitteeBits: bitfield.NewBitvector64(),
}
err = blocks.VerifyAttestationNoVerifySignature(context.TODO(), beaconState, att)
- assert.ErrorContains(t, "committee index must be 0 post-Electra", err)
+ assert.ErrorContains(t, "committee index must be 0", err)
})
t.Run("index of committee too big", func(t *testing.T) {
aggBits := bitfield.NewBitlist(3)
@@ -314,6 +315,75 @@ func TestVerifyAttestationNoVerifySignature_Electra(t *testing.T) {
})
}
+func TestVerifyAttestationNoVerifySignature_GloasCommitteeIndexLimit(t *testing.T) {
+ cfg := params.BeaconConfig()
+ stateSlot := cfg.MinAttestationInclusionDelay + 1
+
+ blockRoots := make([][]byte, cfg.SlotsPerHistoricalRoot)
+ for i := range blockRoots {
+ blockRoots[i] = make([]byte, fieldparams.RootLength)
+ }
+ stateRoots := make([][]byte, cfg.SlotsPerHistoricalRoot)
+ for i := range stateRoots {
+ stateRoots[i] = make([]byte, fieldparams.RootLength)
+ }
+ randaoMixes := make([][]byte, cfg.EpochsPerHistoricalVector)
+ for i := range randaoMixes {
+ randaoMixes[i] = make([]byte, fieldparams.RootLength)
+ }
+
+ checkpointRoot := bytes.Repeat([]byte{0xAA}, fieldparams.RootLength)
+ justified := ðpb.Checkpoint{Epoch: 0, Root: checkpointRoot}
+
+ gloasStateProto := ðpb.BeaconStateGloas{
+ Slot: stateSlot,
+ GenesisValidatorsRoot: bytes.Repeat([]byte{0x11}, fieldparams.RootLength),
+ BlockRoots: blockRoots,
+ StateRoots: stateRoots,
+ RandaoMixes: randaoMixes,
+ ExecutionPayloadAvailability: make([]byte, cfg.SlotsPerHistoricalRoot/8),
+ CurrentJustifiedCheckpoint: justified,
+ PreviousJustifiedCheckpoint: justified,
+ Validators: []*ethpb.Validator{
+ {
+ EffectiveBalance: cfg.MinActivationBalance,
+ WithdrawalCredentials: append([]byte{cfg.ETH1AddressWithdrawalPrefixByte}, bytes.Repeat([]byte{0x01}, 31)...),
+ },
+ },
+ Balances: []uint64{cfg.MinActivationBalance},
+ BuilderPendingPayments: make([]*ethpb.BuilderPendingPayment, cfg.SlotsPerEpoch*2),
+ Fork: ðpb.Fork{
+ CurrentVersion: bytes.Repeat([]byte{0x01}, 4),
+ PreviousVersion: bytes.Repeat([]byte{0x01}, 4),
+ Epoch: 0,
+ },
+ }
+
+ beaconState, err := state_native.InitializeFromProtoGloas(gloasStateProto)
+ require.NoError(t, err)
+
+ committeeBits := bitfield.NewBitvector64()
+ committeeBits.SetBitAt(0, true)
+ aggBits := bitfield.NewBitlist(1)
+ aggBits.SetBitAt(0, true)
+
+ att := ðpb.AttestationElectra{
+ Data: ðpb.AttestationData{
+ Slot: 0,
+ CommitteeIndex: 2, // invalid for Gloas (must be <2)
+ BeaconBlockRoot: blockRoots[0],
+ Source: justified,
+ Target: justified,
+ },
+ AggregationBits: aggBits,
+ CommitteeBits: committeeBits,
+ Signature: bytes.Repeat([]byte{0x00}, fieldparams.BLSSignatureLength),
+ }
+
+ err = blocks.VerifyAttestationNoVerifySignature(context.TODO(), beaconState, att)
+ assert.ErrorContains(t, "incorrect committee index 2", err)
+}
+
func TestConvertToIndexed_OK(t *testing.T) {
helpers.ClearCache()
validators := make([]*ethpb.Validator, 2*params.BeaconConfig().SlotsPerEpoch)
@@ -583,6 +653,7 @@ func TestVerifyAttestations_HandlesPlannedFork(t *testing.T) {
}
func TestRetrieveAttestationSignatureSet_VerifiesMultipleAttestations(t *testing.T) {
+ helpers.ClearCache()
ctx := t.Context()
numOfValidators := uint64(params.BeaconConfig().SlotsPerEpoch.Mul(4))
validators := make([]*ethpb.Validator, numOfValidators)
diff --git a/beacon-chain/core/gloas/BUILD.bazel b/beacon-chain/core/gloas/BUILD.bazel
index d85a4f2c09..0545ad44dc 100644
--- a/beacon-chain/core/gloas/BUILD.bazel
+++ b/beacon-chain/core/gloas/BUILD.bazel
@@ -3,7 +3,11 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
+ "attestation.go",
"bid.go",
+ "deposit_request.go",
+ "log.go",
+ "payload.go",
"payload_attestation.go",
"pending_payment.go",
"proposer_slashing.go",
@@ -12,6 +16,7 @@ go_library(
visibility = ["//visibility:public"],
deps = [
"//beacon-chain/core/helpers:go_default_library",
+ "//beacon-chain/core/requests:go_default_library",
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/state:go_default_library",
@@ -25,17 +30,23 @@ go_library(
"//crypto/bls/common:go_default_library",
"//crypto/hash:go_default_library",
"//encoding/bytesutil:go_default_library",
+ "//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
+ "//runtime/version:go_default_library",
"//time/slots:go_default_library",
"@com_github_pkg_errors//:go_default_library",
+ "@com_github_sirupsen_logrus//:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
+ "attestation_test.go",
"bid_test.go",
+ "deposit_request_test.go",
"payload_attestation_test.go",
+ "payload_test.go",
"pending_payment_test.go",
"proposer_slashing_test.go",
],
@@ -45,6 +56,7 @@ go_test(
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
+ "//beacon-chain/state/testing:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
diff --git a/beacon-chain/core/gloas/attestation.go b/beacon-chain/core/gloas/attestation.go
new file mode 100644
index 0000000000..ba83e105d8
--- /dev/null
+++ b/beacon-chain/core/gloas/attestation.go
@@ -0,0 +1,52 @@
+package gloas
+
+import (
+ "fmt"
+
+ "github.com/OffchainLabs/prysm/v7/beacon-chain/state"
+ "github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
+ "github.com/OffchainLabs/prysm/v7/runtime/version"
+ "github.com/pkg/errors"
+)
+
+// MatchingPayload returns true if the attestation's committee index matches the expected payload index.
+//
+// For pre-Gloas forks, this always returns true.
+//
+// Spec v1.7.0-alpha (pseudocode):
+//
+// # [New in Gloas:EIP7732]
+// if is_attestation_same_slot(state, data):
+// assert data.index == 0
+// payload_matches = True
+// else:
+// slot_index = data.slot % SLOTS_PER_HISTORICAL_ROOT
+// payload_index = state.execution_payload_availability[slot_index]
+// payload_matches = data.index == payload_index
+func MatchingPayload(
+ beaconState state.ReadOnlyBeaconState,
+ beaconBlockRoot [32]byte,
+ slot primitives.Slot,
+ committeeIndex uint64,
+) (bool, error) {
+ if beaconState.Version() < version.Gloas {
+ return true, nil
+ }
+
+ sameSlot, err := beaconState.IsAttestationSameSlot(beaconBlockRoot, slot)
+ if err != nil {
+ return false, errors.Wrap(err, "failed to get same slot attestation status")
+ }
+ if sameSlot {
+ if committeeIndex != 0 {
+ return false, fmt.Errorf("committee index %d for same slot attestation must be 0", committeeIndex)
+ }
+ return true, nil
+ }
+
+ executionPayloadAvail, err := beaconState.ExecutionPayloadAvailability(slot)
+ if err != nil {
+ return false, errors.Wrap(err, "failed to get execution payload availability status")
+ }
+ return executionPayloadAvail == committeeIndex, nil
+}
diff --git a/beacon-chain/core/gloas/attestation_test.go b/beacon-chain/core/gloas/attestation_test.go
new file mode 100644
index 0000000000..c3d52480d4
--- /dev/null
+++ b/beacon-chain/core/gloas/attestation_test.go
@@ -0,0 +1,110 @@
+package gloas
+
+import (
+ "bytes"
+ "testing"
+
+ state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
+ "github.com/OffchainLabs/prysm/v7/config/params"
+ "github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
+ ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
+ "github.com/OffchainLabs/prysm/v7/runtime/version"
+ "github.com/OffchainLabs/prysm/v7/testing/require"
+)
+
+func buildStateWithBlockRoots(t *testing.T, stateSlot primitives.Slot, roots map[primitives.Slot][]byte) *state_native.BeaconState {
+ t.Helper()
+
+ cfg := params.BeaconConfig()
+ blockRoots := make([][]byte, cfg.SlotsPerHistoricalRoot)
+ for slot, root := range roots {
+ blockRoots[slot%cfg.SlotsPerHistoricalRoot] = root
+ }
+
+ stProto := ðpb.BeaconStateGloas{
+ Slot: stateSlot,
+ BlockRoots: blockRoots,
+ }
+
+ state, err := state_native.InitializeFromProtoGloas(stProto)
+ require.NoError(t, err)
+ return state.(*state_native.BeaconState)
+}
+
+func TestMatchingPayload(t *testing.T) {
+ t.Run("pre-gloas always true", func(t *testing.T) {
+ stIface, err := state_native.InitializeFromProtoElectra(ðpb.BeaconStateElectra{})
+ require.NoError(t, err)
+
+ ok, err := MatchingPayload(stIface, [32]byte{}, 0, 123)
+ require.NoError(t, err)
+ require.Equal(t, true, ok)
+ })
+
+ t.Run("same slot requires committee index 0", func(t *testing.T) {
+ root := bytes.Repeat([]byte{0xAA}, 32)
+ state := buildStateWithBlockRoots(t, 6, map[primitives.Slot][]byte{
+ 4: root,
+ 3: bytes.Repeat([]byte{0xBB}, 32),
+ })
+
+ var rootArr [32]byte
+ copy(rootArr[:], root)
+
+ ok, err := MatchingPayload(state, rootArr, 4, 1)
+ require.ErrorContains(t, "committee index", err)
+ require.Equal(t, false, ok)
+ })
+
+ t.Run("same slot matches when committee index is 0", func(t *testing.T) {
+ root := bytes.Repeat([]byte{0xAA}, 32)
+ state := buildStateWithBlockRoots(t, 6, map[primitives.Slot][]byte{
+ 4: root,
+ 3: bytes.Repeat([]byte{0xBB}, 32),
+ })
+
+ var rootArr [32]byte
+ copy(rootArr[:], root)
+
+ ok, err := MatchingPayload(state, rootArr, 4, 0)
+ require.NoError(t, err)
+ require.Equal(t, true, ok)
+ })
+
+ t.Run("non same slot checks payload availability", func(t *testing.T) {
+ cfg := params.BeaconConfig()
+ root := bytes.Repeat([]byte{0xAA}, 32)
+ blockRoots := make([][]byte, cfg.SlotsPerHistoricalRoot)
+ blockRoots[4%cfg.SlotsPerHistoricalRoot] = bytes.Repeat([]byte{0xCC}, 32)
+ blockRoots[3%cfg.SlotsPerHistoricalRoot] = bytes.Repeat([]byte{0xBB}, 32)
+
+ availability := make([]byte, cfg.SlotsPerHistoricalRoot/8)
+ slotIndex := uint64(4)
+ availability[slotIndex/8] = byte(1 << (slotIndex % 8))
+
+ stIface, err := state_native.InitializeFromProtoGloas(ðpb.BeaconStateGloas{
+ Slot: 6,
+ BlockRoots: blockRoots,
+ ExecutionPayloadAvailability: availability,
+ Fork: ðpb.Fork{
+ CurrentVersion: bytes.Repeat([]byte{0x66}, 4),
+ PreviousVersion: bytes.Repeat([]byte{0x66}, 4),
+ Epoch: 0,
+ },
+ })
+ require.NoError(t, err)
+ state := stIface.(*state_native.BeaconState)
+ require.Equal(t, version.Gloas, state.Version())
+
+ var rootArr [32]byte
+ copy(rootArr[:], root)
+
+ ok, err := MatchingPayload(state, rootArr, 4, 1)
+ require.NoError(t, err)
+ require.Equal(t, true, ok)
+
+ ok, err = MatchingPayload(state, rootArr, 4, 0)
+ require.NoError(t, err)
+ require.Equal(t, false, ok)
+ })
+}
diff --git a/beacon-chain/core/gloas/deposit_request.go b/beacon-chain/core/gloas/deposit_request.go
new file mode 100644
index 0000000000..14407eda5e
--- /dev/null
+++ b/beacon-chain/core/gloas/deposit_request.go
@@ -0,0 +1,180 @@
+package gloas
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
+ "github.com/OffchainLabs/prysm/v7/beacon-chain/state"
+ fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
+ "github.com/OffchainLabs/prysm/v7/config/params"
+ "github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
+ enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
+ ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
+ "github.com/OffchainLabs/prysm/v7/runtime/version"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+func processDepositRequests(ctx context.Context, beaconState state.BeaconState, requests []*enginev1.DepositRequest) error {
+ if len(requests) == 0 {
+ return nil
+ }
+
+ for _, receipt := range requests {
+ if err := processDepositRequest(beaconState, receipt); err != nil {
+ return errors.Wrap(err, "could not apply deposit request")
+ }
+ }
+ return nil
+}
+
+// processDepositRequest processes the specific deposit request
+//
+//
+// def process_deposit_request(state: BeaconState, deposit_request: DepositRequest) -> None:
+// # [New in Gloas:EIP7732]
+// builder_pubkeys = [b.pubkey for b in state.builders]
+// validator_pubkeys = [v.pubkey for v in state.validators]
+//
+// # [New in Gloas:EIP7732]
+// # Regardless of the withdrawal credentials prefix, if a builder/validator
+// # already exists with this pubkey, apply the deposit to their balance
+// is_builder = deposit_request.pubkey in builder_pubkeys
+// is_validator = deposit_request.pubkey in validator_pubkeys
+// is_builder_prefix = is_builder_withdrawal_credential(deposit_request.withdrawal_credentials)
+// if is_builder or (is_builder_prefix and not is_validator):
+// # Apply builder deposits immediately
+// apply_deposit_for_builder(
+// state,
+// deposit_request.pubkey,
+// deposit_request.withdrawal_credentials,
+// deposit_request.amount,
+// deposit_request.signature,
+// state.slot,
+// )
+// return
+//
+// # Add validator deposits to the queue
+// state.pending_deposits.append(
+// PendingDeposit(
+// pubkey=deposit_request.pubkey,
+// withdrawal_credentials=deposit_request.withdrawal_credentials,
+// amount=deposit_request.amount,
+// signature=deposit_request.signature,
+// slot=state.slot,
+// )
+// )
+//
+func processDepositRequest(beaconState state.BeaconState, request *enginev1.DepositRequest) error {
+ if request == nil {
+ return errors.New("nil deposit request")
+ }
+
+ applied, err := applyBuilderDepositRequest(beaconState, request)
+ if err != nil {
+ return errors.Wrap(err, "could not apply builder deposit")
+ }
+ if applied {
+ return nil
+ }
+
+ if err := beaconState.AppendPendingDeposit(ðpb.PendingDeposit{
+ PublicKey: request.Pubkey,
+ WithdrawalCredentials: request.WithdrawalCredentials,
+ Amount: request.Amount,
+ Signature: request.Signature,
+ Slot: beaconState.Slot(),
+ }); err != nil {
+ return errors.Wrap(err, "could not append deposit request")
+ }
+ return nil
+}
+
+//
+// def apply_deposit_for_builder(
+//
+// state: BeaconState,
+// pubkey: BLSPubkey,
+// withdrawal_credentials: Bytes32,
+// amount: uint64,
+// signature: BLSSignature,
+// slot: Slot,
+//
+// ) -> None:
+//
+// builder_pubkeys = [b.pubkey for b in state.builders]
+// if pubkey not in builder_pubkeys:
+// # Verify the deposit signature (proof of possession) which is not checked by the deposit contract
+// if is_valid_deposit_signature(pubkey, withdrawal_credentials, amount, signature):
+// add_builder_to_registry(state, pubkey, withdrawal_credentials, amount, slot)
+// else:
+// # Increase balance by deposit amount
+// builder_index = builder_pubkeys.index(pubkey)
+// state.builders[builder_index].balance += amount
+//
+//
+func applyBuilderDepositRequest(beaconState state.BeaconState, request *enginev1.DepositRequest) (bool, error) {
+ if beaconState.Version() < version.Gloas {
+ return false, nil
+ }
+
+ pubkey := bytesutil.ToBytes48(request.Pubkey)
+ _, isValidator := beaconState.ValidatorIndexByPubkey(pubkey)
+ idx, isBuilder := beaconState.BuilderIndexByPubkey(pubkey)
+ isBuilderPrefix := IsBuilderWithdrawalCredential(request.WithdrawalCredentials)
+ if !isBuilder && (!isBuilderPrefix || isValidator) {
+ return false, nil
+ }
+
+ if isBuilder {
+ if err := beaconState.IncreaseBuilderBalance(idx, request.Amount); err != nil {
+ return false, err
+ }
+ return true, nil
+ }
+
+ if err := applyDepositForNewBuilder(
+ beaconState,
+ request.Pubkey,
+ request.WithdrawalCredentials,
+ request.Amount,
+ request.Signature,
+ ); err != nil {
+ return false, err
+ }
+ return true, nil
+}
+
+func applyDepositForNewBuilder(
+ beaconState state.BeaconState,
+ pubkey []byte,
+ withdrawalCredentials []byte,
+ amount uint64,
+ signature []byte,
+) error {
+ pubkeyBytes := bytesutil.ToBytes48(pubkey)
+ valid, err := helpers.IsValidDepositSignature(ðpb.Deposit_Data{
+ PublicKey: pubkey,
+ WithdrawalCredentials: withdrawalCredentials,
+ Amount: amount,
+ Signature: signature,
+ })
+ if err != nil {
+ return errors.Wrap(err, "could not verify deposit signature")
+ }
+ if !valid {
+ log.WithFields(logrus.Fields{
+ "pubkey": fmt.Sprintf("%x", pubkey),
+ }).Warn("ignoring builder deposit: invalid signature")
+ return nil
+ }
+
+ withdrawalCredBytes := bytesutil.ToBytes32(withdrawalCredentials)
+ return beaconState.AddBuilderFromDeposit(pubkeyBytes, withdrawalCredBytes, amount)
+}
+
+func IsBuilderWithdrawalCredential(withdrawalCredentials []byte) bool {
+ return len(withdrawalCredentials) == fieldparams.RootLength &&
+ withdrawalCredentials[0] == params.BeaconConfig().BuilderWithdrawalPrefixByte
+}
diff --git a/beacon-chain/core/gloas/deposit_request_test.go b/beacon-chain/core/gloas/deposit_request_test.go
new file mode 100644
index 0000000000..02fcf3de3c
--- /dev/null
+++ b/beacon-chain/core/gloas/deposit_request_test.go
@@ -0,0 +1,150 @@
+package gloas
+
+import (
+ "bytes"
+ "testing"
+
+ "github.com/OffchainLabs/prysm/v7/beacon-chain/state"
+ state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
+ stateTesting "github.com/OffchainLabs/prysm/v7/beacon-chain/state/testing"
+ "github.com/OffchainLabs/prysm/v7/config/params"
+ "github.com/OffchainLabs/prysm/v7/crypto/bls"
+ enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
+ ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
+ "github.com/OffchainLabs/prysm/v7/testing/require"
+)
+
+func TestProcessDepositRequests_EmptyAndNil(t *testing.T) {
+ st := newGloasState(t, nil, nil)
+
+ t.Run("empty requests continues", func(t *testing.T) {
+ err := processDepositRequests(t.Context(), st, []*enginev1.DepositRequest{})
+ require.NoError(t, err)
+ })
+
+ t.Run("nil request errors", func(t *testing.T) {
+ err := processDepositRequests(t.Context(), st, []*enginev1.DepositRequest{nil})
+ require.ErrorContains(t, "nil deposit request", err)
+ })
+}
+
+func TestProcessDepositRequest_BuilderDepositAddsBuilder(t *testing.T) {
+ sk, err := bls.RandKey()
+ require.NoError(t, err)
+
+ cred := builderWithdrawalCredentials()
+ pd := stateTesting.GeneratePendingDeposit(t, sk, 1234, cred, 0)
+ req := depositRequestFromPending(pd, 1)
+
+ st := newGloasState(t, nil, nil)
+ err = processDepositRequest(st, req)
+ require.NoError(t, err)
+
+ idx, ok := st.BuilderIndexByPubkey(toBytes48(req.Pubkey))
+ require.Equal(t, true, ok)
+
+ builder, err := st.Builder(idx)
+ require.NoError(t, err)
+ require.NotNil(t, builder)
+ require.DeepEqual(t, req.Pubkey, builder.Pubkey)
+ require.DeepEqual(t, []byte{cred[0]}, builder.Version)
+ require.DeepEqual(t, cred[12:], builder.ExecutionAddress)
+ require.Equal(t, uint64(1234), uint64(builder.Balance))
+ require.Equal(t, params.BeaconConfig().FarFutureEpoch, builder.WithdrawableEpoch)
+
+ pending, err := st.PendingDeposits()
+ require.NoError(t, err)
+ require.Equal(t, 0, len(pending))
+}
+
+func TestProcessDepositRequest_ExistingBuilderIncreasesBalance(t *testing.T) {
+ sk, err := bls.RandKey()
+ require.NoError(t, err)
+
+ pubkey := sk.PublicKey().Marshal()
+ builders := []*ethpb.Builder{
+ {
+ Pubkey: pubkey,
+ Version: []byte{0},
+ ExecutionAddress: bytes.Repeat([]byte{0x11}, 20),
+ Balance: 5,
+ WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
+ },
+ }
+ st := newGloasState(t, nil, builders)
+
+ cred := validatorWithdrawalCredentials()
+ pd := stateTesting.GeneratePendingDeposit(t, sk, 200, cred, 0)
+ req := depositRequestFromPending(pd, 9)
+
+ err = processDepositRequest(st, req)
+ require.NoError(t, err)
+
+ idx, ok := st.BuilderIndexByPubkey(toBytes48(pubkey))
+ require.Equal(t, true, ok)
+ builder, err := st.Builder(idx)
+ require.NoError(t, err)
+ require.Equal(t, uint64(205), uint64(builder.Balance))
+
+ pending, err := st.PendingDeposits()
+ require.NoError(t, err)
+ require.Equal(t, 0, len(pending))
+}
+
+func TestApplyDepositForBuilder_InvalidSignatureIgnoresDeposit(t *testing.T) {
+ sk, err := bls.RandKey()
+ require.NoError(t, err)
+
+ cred := builderWithdrawalCredentials()
+ st := newGloasState(t, nil, nil)
+ err = applyDepositForNewBuilder(st, sk.PublicKey().Marshal(), cred[:], 100, make([]byte, 96))
+ require.NoError(t, err)
+
+ _, ok := st.BuilderIndexByPubkey(toBytes48(sk.PublicKey().Marshal()))
+ require.Equal(t, false, ok)
+}
+
+func newGloasState(t *testing.T, validators []*ethpb.Validator, builders []*ethpb.Builder) state.BeaconState {
+ t.Helper()
+
+ st, err := state_native.InitializeFromProtoGloas(ðpb.BeaconStateGloas{
+ DepositRequestsStartIndex: params.BeaconConfig().UnsetDepositRequestsStartIndex,
+ Validators: validators,
+ Balances: make([]uint64, len(validators)),
+ PendingDeposits: []*ethpb.PendingDeposit{},
+ Builders: builders,
+ })
+ require.NoError(t, err)
+
+ return st
+}
+
+func depositRequestFromPending(pd *ethpb.PendingDeposit, index uint64) *enginev1.DepositRequest {
+ return &enginev1.DepositRequest{
+ Pubkey: pd.PublicKey,
+ WithdrawalCredentials: pd.WithdrawalCredentials,
+ Amount: pd.Amount,
+ Signature: pd.Signature,
+ Index: index,
+ }
+}
+
+func builderWithdrawalCredentials() [32]byte {
+ var cred [32]byte
+ cred[0] = params.BeaconConfig().BuilderWithdrawalPrefixByte
+ copy(cred[12:], bytes.Repeat([]byte{0x22}, 20))
+ return cred
+}
+
+func validatorWithdrawalCredentials() [32]byte {
+ var cred [32]byte
+ cred[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
+ copy(cred[12:], bytes.Repeat([]byte{0x33}, 20))
+ return cred
+}
+
+func toBytes48(b []byte) [48]byte {
+ var out [48]byte
+ copy(out[:], b)
+ return out
+}
diff --git a/beacon-chain/core/gloas/log.go b/beacon-chain/core/gloas/log.go
new file mode 100644
index 0000000000..957b3bd47e
--- /dev/null
+++ b/beacon-chain/core/gloas/log.go
@@ -0,0 +1,9 @@
+// Code generated by hack/gen-logs.sh; DO NOT EDIT.
+// This file is created and regenerated automatically. Anything added here might get removed.
+package gloas
+
+import "github.com/sirupsen/logrus"
+
+// The prefix for logs from this package will be the text after the last slash in the package path.
+// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
+var log = logrus.WithField("package", "beacon-chain/core/gloas")
diff --git a/beacon-chain/core/gloas/payload.go b/beacon-chain/core/gloas/payload.go
new file mode 100644
index 0000000000..34a1a4c998
--- /dev/null
+++ b/beacon-chain/core/gloas/payload.go
@@ -0,0 +1,345 @@
+package gloas
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+
+ requests "github.com/OffchainLabs/prysm/v7/beacon-chain/core/requests"
+ "github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
+ "github.com/OffchainLabs/prysm/v7/beacon-chain/state"
+ "github.com/OffchainLabs/prysm/v7/config/params"
+ "github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
+ "github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
+ "github.com/OffchainLabs/prysm/v7/crypto/bls"
+ enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
+ "github.com/OffchainLabs/prysm/v7/time/slots"
+ "github.com/pkg/errors"
+)
+
+// ProcessExecutionPayload processes the signed execution payload envelope for the Gloas fork.
+//
+//
+// def process_execution_payload(
+// state: BeaconState,
+// # [Modified in Gloas:EIP7732]
+// # Removed `body`
+// # [New in Gloas:EIP7732]
+// signed_envelope: SignedExecutionPayloadEnvelope,
+// execution_engine: ExecutionEngine,
+// # [New in Gloas:EIP7732]
+// verify: bool = True,
+// ) -> None:
+// envelope = signed_envelope.message
+// payload = envelope.payload
+//
+// # Verify signature
+// if verify:
+// assert verify_execution_payload_envelope_signature(state, signed_envelope)
+//
+// # Cache latest block header state root
+// previous_state_root = hash_tree_root(state)
+// if state.latest_block_header.state_root == Root():
+// state.latest_block_header.state_root = previous_state_root
+//
+// # Verify consistency with the beacon block
+// assert envelope.beacon_block_root == hash_tree_root(state.latest_block_header)
+// assert envelope.slot == state.slot
+//
+// # Verify consistency with the committed bid
+// committed_bid = state.latest_execution_payload_bid
+// assert envelope.builder_index == committed_bid.builder_index
+// assert committed_bid.prev_randao == payload.prev_randao
+//
+// # Verify consistency with expected withdrawals
+// assert hash_tree_root(payload.withdrawals) == hash_tree_root(state.payload_expected_withdrawals)
+//
+// # Verify the gas_limit
+// assert committed_bid.gas_limit == payload.gas_limit
+// # Verify the block hash
+// assert committed_bid.block_hash == payload.block_hash
+// # Verify consistency of the parent hash with respect to the previous execution payload
+// assert payload.parent_hash == state.latest_block_hash
+// # Verify timestamp
+// assert payload.timestamp == compute_time_at_slot(state, state.slot)
+// # Verify the execution payload is valid
+// versioned_hashes = [
+// kzg_commitment_to_versioned_hash(commitment)
+// # [Modified in Gloas:EIP7732]
+// for commitment in committed_bid.blob_kzg_commitments
+// ]
+// requests = envelope.execution_requests
+// assert execution_engine.verify_and_notify_new_payload(
+// NewPayloadRequest(
+// execution_payload=payload,
+// versioned_hashes=versioned_hashes,
+// parent_beacon_block_root=state.latest_block_header.parent_root,
+// execution_requests=requests,
+// )
+// )
+//
+// def for_ops(operations: Sequence[Any], fn: Callable[[BeaconState, Any], None]) -> None:
+// for operation in operations:
+// fn(state, operation)
+//
+// for_ops(requests.deposits, process_deposit_request)
+// for_ops(requests.withdrawals, process_withdrawal_request)
+// for_ops(requests.consolidations, process_consolidation_request)
+//
+// # Queue the builder payment
+// payment = state.builder_pending_payments[SLOTS_PER_EPOCH + state.slot % SLOTS_PER_EPOCH]
+// amount = payment.withdrawal.amount
+// if amount > 0:
+// state.builder_pending_withdrawals.append(payment.withdrawal)
+// state.builder_pending_payments[SLOTS_PER_EPOCH + state.slot % SLOTS_PER_EPOCH] = (
+// BuilderPendingPayment()
+// )
+//
+// # Cache the execution payload hash
+// state.execution_payload_availability[state.slot % SLOTS_PER_HISTORICAL_ROOT] = 0b1
+// state.latest_block_hash = payload.block_hash
+//
+// # Verify the state root
+// if verify:
+// assert envelope.state_root == hash_tree_root(state)
+//
+func ProcessExecutionPayload(
+ ctx context.Context,
+ st state.BeaconState,
+ signedEnvelope interfaces.ROSignedExecutionPayloadEnvelope,
+) error {
+ if err := verifyExecutionPayloadEnvelopeSignature(st, signedEnvelope); err != nil {
+ return errors.Wrap(err, "signature verification failed")
+ }
+
+ latestHeader := st.LatestBlockHeader()
+ if len(latestHeader.StateRoot) == 0 || bytes.Equal(latestHeader.StateRoot, make([]byte, 32)) {
+ previousStateRoot, err := st.HashTreeRoot(ctx)
+ if err != nil {
+ return errors.Wrap(err, "could not compute state root")
+ }
+ latestHeader.StateRoot = previousStateRoot[:]
+ if err := st.SetLatestBlockHeader(latestHeader); err != nil {
+ return errors.Wrap(err, "could not set latest block header")
+ }
+ }
+
+ blockHeaderRoot, err := latestHeader.HashTreeRoot()
+ if err != nil {
+ return errors.Wrap(err, "could not compute block header root")
+ }
+ envelope, err := signedEnvelope.Envelope()
+ if err != nil {
+ return errors.Wrap(err, "could not get envelope from signed envelope")
+ }
+
+ beaconBlockRoot := envelope.BeaconBlockRoot()
+ if !bytes.Equal(beaconBlockRoot[:], blockHeaderRoot[:]) {
+ return errors.Errorf("envelope beacon block root does not match state latest block header root: envelope=%#x, header=%#x", beaconBlockRoot, blockHeaderRoot)
+ }
+
+ if envelope.Slot() != st.Slot() {
+ return errors.Errorf("envelope slot does not match state slot: envelope=%d, state=%d", envelope.Slot(), st.Slot())
+ }
+
+ latestBid, err := st.LatestExecutionPayloadBid()
+ if err != nil {
+ return errors.Wrap(err, "could not get latest execution payload bid")
+ }
+ if latestBid == nil {
+ return errors.New("latest execution payload bid is nil")
+ }
+ if envelope.BuilderIndex() != latestBid.BuilderIndex() {
+ return errors.Errorf("envelope builder index does not match committed bid builder index: envelope=%d, bid=%d", envelope.BuilderIndex(), latestBid.BuilderIndex())
+ }
+
+ payload, err := envelope.Execution()
+ if err != nil {
+ return errors.Wrap(err, "could not get execution payload from envelope")
+ }
+ latestBidPrevRandao := latestBid.PrevRandao()
+ if !bytes.Equal(payload.PrevRandao(), latestBidPrevRandao[:]) {
+ return errors.Errorf("payload prev randao does not match committed bid prev randao: payload=%#x, bid=%#x", payload.PrevRandao(), latestBidPrevRandao)
+ }
+
+ withdrawals, err := payload.Withdrawals()
+ if err != nil {
+ return errors.Wrap(err, "could not get withdrawals from payload")
+ }
+
+ ok, err := st.WithdrawalsMatchPayloadExpected(withdrawals)
+ if err != nil {
+ return errors.Wrap(err, "could not validate payload withdrawals")
+ }
+ if !ok {
+ return errors.New("payload withdrawals do not match expected withdrawals")
+ }
+
+ if latestBid.GasLimit() != payload.GasLimit() {
+ return errors.Errorf("committed bid gas limit does not match payload gas limit: bid=%d, payload=%d", latestBid.GasLimit(), payload.GasLimit())
+ }
+
+ bidBlockHash := latestBid.BlockHash()
+ payloadBlockHash := payload.BlockHash()
+ if !bytes.Equal(bidBlockHash[:], payloadBlockHash) {
+ return errors.Errorf("committed bid block hash does not match payload block hash: bid=%#x, payload=%#x", bidBlockHash, payloadBlockHash)
+ }
+
+ latestBlockHash, err := st.LatestBlockHash()
+ if err != nil {
+ return errors.Wrap(err, "could not get latest block hash")
+ }
+ if !bytes.Equal(payload.ParentHash(), latestBlockHash[:]) {
+ return errors.Errorf("payload parent hash does not match state latest block hash: payload=%#x, state=%#x", payload.ParentHash(), latestBlockHash)
+ }
+
+ t, err := slots.StartTime(st.GenesisTime(), st.Slot())
+ if err != nil {
+ return errors.Wrap(err, "could not compute timestamp")
+ }
+ if payload.Timestamp() != uint64(t.Unix()) {
+ return errors.Errorf("payload timestamp does not match expected timestamp: payload=%d, expected=%d", payload.Timestamp(), uint64(t.Unix()))
+ }
+
+ if err := processExecutionRequests(ctx, st, envelope.ExecutionRequests()); err != nil {
+ return errors.Wrap(err, "could not process execution requests")
+ }
+
+ if err := st.QueueBuilderPayment(); err != nil {
+ return errors.Wrap(err, "could not queue builder payment")
+ }
+
+ if err := st.SetExecutionPayloadAvailability(st.Slot(), true); err != nil {
+ return errors.Wrap(err, "could not set execution payload availability")
+ }
+
+ if err := st.SetLatestBlockHash([32]byte(payload.BlockHash())); err != nil {
+ return errors.Wrap(err, "could not set latest block hash")
+ }
+
+ r, err := st.HashTreeRoot(ctx)
+ if err != nil {
+ return errors.Wrap(err, "could not get hash tree root")
+ }
+ if r != envelope.StateRoot() {
+ return fmt.Errorf("state root mismatch: expected %#x, got %#x", envelope.StateRoot(), r)
+ }
+
+ return nil
+}
+
+func envelopePublicKey(st state.BeaconState, builderIdx primitives.BuilderIndex) (bls.PublicKey, error) {
+ if builderIdx == params.BeaconConfig().BuilderIndexSelfBuild {
+ return proposerPublicKey(st)
+ }
+ return builderPublicKey(st, builderIdx)
+}
+
+func proposerPublicKey(st state.BeaconState) (bls.PublicKey, error) {
+ header := st.LatestBlockHeader()
+ if header == nil {
+ return nil, fmt.Errorf("latest block header is nil")
+ }
+ proposerPubkey := st.PubkeyAtIndex(header.ProposerIndex)
+ publicKey, err := bls.PublicKeyFromBytes(proposerPubkey[:])
+ if err != nil {
+ return nil, fmt.Errorf("invalid proposer public key: %w", err)
+ }
+ return publicKey, nil
+}
+
+func builderPublicKey(st state.BeaconState, builderIdx primitives.BuilderIndex) (bls.PublicKey, error) {
+ builder, err := st.Builder(builderIdx)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get builder: %w", err)
+ }
+ if builder == nil {
+ return nil, fmt.Errorf("builder at index %d not found", builderIdx)
+ }
+ publicKey, err := bls.PublicKeyFromBytes(builder.Pubkey)
+ if err != nil {
+ return nil, fmt.Errorf("invalid builder public key: %w", err)
+ }
+ return publicKey, nil
+}
+
+// processExecutionRequests processes deposits, withdrawals, and consolidations from execution requests.
+// Spec v1.7.0-alpha.0 (pseudocode):
+// for op in requests.deposits: process_deposit_request(state, op)
+// for op in requests.withdrawals: process_withdrawal_request(state, op)
+// for op in requests.consolidations: process_consolidation_request(state, op)
+func processExecutionRequests(ctx context.Context, st state.BeaconState, rqs *enginev1.ExecutionRequests) error {
+ if err := processDepositRequests(ctx, st, rqs.Deposits); err != nil {
+ return errors.Wrap(err, "could not process deposit requests")
+ }
+
+ var err error
+ st, err = requests.ProcessWithdrawalRequests(ctx, st, rqs.Withdrawals)
+ if err != nil {
+ return errors.Wrap(err, "could not process withdrawal requests")
+ }
+ err = requests.ProcessConsolidationRequests(ctx, st, rqs.Consolidations)
+ if err != nil {
+ return errors.Wrap(err, "could not process consolidation requests")
+ }
+ return nil
+}
+
+// verifyExecutionPayloadEnvelopeSignature verifies the BLS signature on a signed execution payload envelope.
+// Spec v1.7.0-alpha.0 (pseudocode):
+// builder_index = signed_envelope.message.builder_index
+// if builder_index == BUILDER_INDEX_SELF_BUILD:
+//
+// validator_index = state.latest_block_header.proposer_index
+// pubkey = state.validators[validator_index].pubkey
+//
+// else:
+//
+// pubkey = state.builders[builder_index].pubkey
+//
+// signing_root = compute_signing_root(
+//
+// signed_envelope.message, get_domain(state, DOMAIN_BEACON_BUILDER)
+//
+// )
+// return bls.Verify(pubkey, signing_root, signed_envelope.signature)
+func verifyExecutionPayloadEnvelopeSignature(st state.BeaconState, signedEnvelope interfaces.ROSignedExecutionPayloadEnvelope) error {
+ envelope, err := signedEnvelope.Envelope()
+ if err != nil {
+ return fmt.Errorf("failed to get envelope: %w", err)
+ }
+
+ builderIdx := envelope.BuilderIndex()
+ publicKey, err := envelopePublicKey(st, builderIdx)
+ if err != nil {
+ return err
+ }
+
+ signatureBytes := signedEnvelope.Signature()
+ signature, err := bls.SignatureFromBytes(signatureBytes[:])
+ if err != nil {
+ return fmt.Errorf("invalid signature format: %w", err)
+ }
+
+ currentEpoch := slots.ToEpoch(envelope.Slot())
+ domain, err := signing.Domain(
+ st.Fork(),
+ currentEpoch,
+ params.BeaconConfig().DomainBeaconBuilder,
+ st.GenesisValidatorsRoot(),
+ )
+ if err != nil {
+ return fmt.Errorf("failed to compute signing domain: %w", err)
+ }
+
+ signingRoot, err := signedEnvelope.SigningRoot(domain)
+ if err != nil {
+ return fmt.Errorf("failed to compute signing root: %w", err)
+ }
+
+ if !signature.Verify(publicKey, signingRoot[:]) {
+ return fmt.Errorf("signature verification failed: %w", signing.ErrSigFailedToVerify)
+ }
+
+ return nil
+}
diff --git a/beacon-chain/core/gloas/payload_test.go b/beacon-chain/core/gloas/payload_test.go
new file mode 100644
index 0000000000..8a727f66a3
--- /dev/null
+++ b/beacon-chain/core/gloas/payload_test.go
@@ -0,0 +1,349 @@
+package gloas
+
+import (
+ "bytes"
+ "context"
+ "testing"
+
+ "github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
+ "github.com/OffchainLabs/prysm/v7/beacon-chain/state"
+ state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
+ "github.com/OffchainLabs/prysm/v7/config/params"
+ "github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
+ "github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
+ "github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
+ "github.com/OffchainLabs/prysm/v7/crypto/bls"
+ enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
+ ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
+ "github.com/OffchainLabs/prysm/v7/testing/require"
+ "github.com/OffchainLabs/prysm/v7/time/slots"
+ "google.golang.org/protobuf/proto"
+)
+
+type payloadFixture struct {
+ state state.BeaconState
+ signed interfaces.ROSignedExecutionPayloadEnvelope
+ signedProto *ethpb.SignedExecutionPayloadEnvelope
+ envelope *ethpb.ExecutionPayloadEnvelope
+ payload *enginev1.ExecutionPayloadDeneb
+ slot primitives.Slot
+}
+
+func buildPayloadFixture(t *testing.T, mutate func(payload *enginev1.ExecutionPayloadDeneb, bid *ethpb.ExecutionPayloadBid, envelope *ethpb.ExecutionPayloadEnvelope)) payloadFixture {
+ t.Helper()
+
+ cfg := params.BeaconConfig()
+ slot := primitives.Slot(5)
+ builderIdx := primitives.BuilderIndex(0)
+
+ sk, err := bls.RandKey()
+ require.NoError(t, err)
+ pk := sk.PublicKey().Marshal()
+
+ randao := bytes.Repeat([]byte{0xAA}, 32)
+ parentHash := bytes.Repeat([]byte{0xBB}, 32)
+ blockHash := bytes.Repeat([]byte{0xCC}, 32)
+
+ withdrawals := []*enginev1.Withdrawal{
+ {Index: 0, ValidatorIndex: 1, Address: bytes.Repeat([]byte{0x01}, 20), Amount: 0},
+ }
+
+ payload := &enginev1.ExecutionPayloadDeneb{
+ ParentHash: parentHash,
+ FeeRecipient: bytes.Repeat([]byte{0x01}, 20),
+ StateRoot: bytes.Repeat([]byte{0x02}, 32),
+ ReceiptsRoot: bytes.Repeat([]byte{0x03}, 32),
+ LogsBloom: bytes.Repeat([]byte{0x04}, 256),
+ PrevRandao: randao,
+ BlockNumber: 1,
+ GasLimit: 1,
+ GasUsed: 0,
+ Timestamp: 100,
+ ExtraData: []byte{},
+ BaseFeePerGas: bytes.Repeat([]byte{0x05}, 32),
+ BlockHash: blockHash,
+ Transactions: [][]byte{},
+ Withdrawals: withdrawals,
+ BlobGasUsed: 0,
+ ExcessBlobGas: 0,
+ }
+
+ bid := ðpb.ExecutionPayloadBid{
+ ParentBlockHash: parentHash,
+ ParentBlockRoot: bytes.Repeat([]byte{0xDD}, 32),
+ BlockHash: blockHash,
+ PrevRandao: randao,
+ GasLimit: 1,
+ BuilderIndex: builderIdx,
+ Slot: slot,
+ Value: 0,
+ ExecutionPayment: 0,
+ FeeRecipient: bytes.Repeat([]byte{0xEE}, 20),
+ }
+
+ header := ðpb.BeaconBlockHeader{
+ Slot: slot,
+ ParentRoot: bytes.Repeat([]byte{0x11}, 32),
+ StateRoot: bytes.Repeat([]byte{0x22}, 32),
+ BodyRoot: bytes.Repeat([]byte{0x33}, 32),
+ }
+ headerRoot, err := header.HashTreeRoot()
+ require.NoError(t, err)
+
+ envelope := ðpb.ExecutionPayloadEnvelope{
+ Slot: slot,
+ BuilderIndex: builderIdx,
+ BeaconBlockRoot: headerRoot[:],
+ Payload: payload,
+ ExecutionRequests: &enginev1.ExecutionRequests{},
+ }
+
+ if mutate != nil {
+ mutate(payload, bid, envelope)
+ }
+
+ genesisRoot := bytes.Repeat([]byte{0xAB}, 32)
+ blockRoots := make([][]byte, cfg.SlotsPerHistoricalRoot)
+ stateRoots := make([][]byte, cfg.SlotsPerHistoricalRoot)
+ for i := range blockRoots {
+ blockRoots[i] = bytes.Repeat([]byte{0x44}, 32)
+ stateRoots[i] = bytes.Repeat([]byte{0x55}, 32)
+ }
+ randaoMixes := make([][]byte, cfg.EpochsPerHistoricalVector)
+ for i := range randaoMixes {
+ randaoMixes[i] = randao
+ }
+
+ withdrawalCreds := make([]byte, 32)
+ withdrawalCreds[0] = cfg.ETH1AddressWithdrawalPrefixByte
+
+ eth1Data := ðpb.Eth1Data{
+ DepositRoot: bytes.Repeat([]byte{0x66}, 32),
+ DepositCount: 0,
+ BlockHash: bytes.Repeat([]byte{0x77}, 32),
+ }
+
+ vals := []*ethpb.Validator{
+ {
+ PublicKey: pk,
+ WithdrawalCredentials: withdrawalCreds,
+ EffectiveBalance: cfg.MinActivationBalance + 1_000,
+ },
+ }
+ balances := []uint64{cfg.MinActivationBalance + 1_000}
+
+ payments := make([]*ethpb.BuilderPendingPayment, cfg.SlotsPerEpoch*2)
+ for i := range payments {
+ payments[i] = ðpb.BuilderPendingPayment{
+ Withdrawal: ðpb.BuilderPendingWithdrawal{
+ FeeRecipient: make([]byte, 20),
+ },
+ }
+ }
+
+ executionPayloadAvailability := make([]byte, cfg.SlotsPerHistoricalRoot/8)
+
+ builders := make([]*ethpb.Builder, builderIdx+1)
+ builders[builderIdx] = ðpb.Builder{
+ Pubkey: pk,
+ Version: []byte{0},
+ ExecutionAddress: bytes.Repeat([]byte{0x09}, 20),
+ Balance: 0,
+ DepositEpoch: 0,
+ WithdrawableEpoch: 0,
+ }
+
+ genesisTime := uint64(0)
+ slotSeconds := cfg.SecondsPerSlot * uint64(slot)
+ if payload.Timestamp > slotSeconds {
+ genesisTime = payload.Timestamp - slotSeconds
+ }
+
+ stProto := ðpb.BeaconStateGloas{
+ Slot: slot,
+ GenesisTime: genesisTime,
+ GenesisValidatorsRoot: genesisRoot,
+ Fork: ðpb.Fork{
+ CurrentVersion: bytes.Repeat([]byte{0x01}, 4),
+ PreviousVersion: bytes.Repeat([]byte{0x01}, 4),
+ Epoch: 0,
+ },
+ LatestBlockHeader: header,
+ BlockRoots: blockRoots,
+ StateRoots: stateRoots,
+ RandaoMixes: randaoMixes,
+ Eth1Data: eth1Data,
+ Validators: vals,
+ Balances: balances,
+ LatestBlockHash: payload.ParentHash,
+ LatestExecutionPayloadBid: bid,
+ BuilderPendingPayments: payments,
+ ExecutionPayloadAvailability: executionPayloadAvailability,
+ BuilderPendingWithdrawals: []*ethpb.BuilderPendingWithdrawal{},
+ PayloadExpectedWithdrawals: payload.Withdrawals,
+ Builders: builders,
+ }
+
+ st, err := state_native.InitializeFromProtoGloas(stProto)
+ require.NoError(t, err)
+
+ expected := st.Copy()
+ ctx := context.Background()
+ require.NoError(t, processExecutionRequests(ctx, expected, envelope.ExecutionRequests))
+ require.NoError(t, expected.QueueBuilderPayment())
+ require.NoError(t, expected.SetExecutionPayloadAvailability(slot, true))
+ var blockHashArr [32]byte
+ copy(blockHashArr[:], payload.BlockHash)
+ require.NoError(t, expected.SetLatestBlockHash(blockHashArr))
+ expectedRoot, err := expected.HashTreeRoot(ctx)
+ require.NoError(t, err)
+ envelope.StateRoot = expectedRoot[:]
+
+ epoch := slots.ToEpoch(slot)
+ domain, err := signing.Domain(st.Fork(), epoch, cfg.DomainBeaconBuilder, st.GenesisValidatorsRoot())
+ require.NoError(t, err)
+ signingRoot, err := signing.ComputeSigningRoot(envelope, domain)
+ require.NoError(t, err)
+ signature := sk.Sign(signingRoot[:]).Marshal()
+
+ signedProto := ðpb.SignedExecutionPayloadEnvelope{
+ Message: envelope,
+ Signature: signature,
+ }
+ signed, err := blocks.WrappedROSignedExecutionPayloadEnvelope(signedProto)
+ require.NoError(t, err)
+
+ return payloadFixture{
+ state: st,
+ signed: signed,
+ signedProto: signedProto,
+ envelope: envelope,
+ payload: payload,
+ slot: slot,
+ }
+}
+
+func TestProcessExecutionPayload_Success(t *testing.T) {
+ fixture := buildPayloadFixture(t, nil)
+ require.NoError(t, ProcessExecutionPayload(t.Context(), fixture.state, fixture.signed))
+
+ latestHash, err := fixture.state.LatestBlockHash()
+ require.NoError(t, err)
+ var expectedHash [32]byte
+ copy(expectedHash[:], fixture.payload.BlockHash)
+ require.Equal(t, expectedHash, latestHash)
+
+ slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
+ paymentIndex := slotsPerEpoch + (fixture.slot % slotsPerEpoch)
+ payments, err := fixture.state.BuilderPendingPayments()
+ require.NoError(t, err)
+ payment := payments[paymentIndex]
+ require.NotNil(t, payment)
+ require.Equal(t, primitives.Gwei(0), payment.Withdrawal.Amount)
+}
+
+func TestProcessExecutionPayload_PrevRandaoMismatch(t *testing.T) {
+ fixture := buildPayloadFixture(t, func(_ *enginev1.ExecutionPayloadDeneb, bid *ethpb.ExecutionPayloadBid, _ *ethpb.ExecutionPayloadEnvelope) {
+ bid.PrevRandao = bytes.Repeat([]byte{0xFF}, 32)
+ })
+
+ err := ProcessExecutionPayload(t.Context(), fixture.state, fixture.signed)
+ require.ErrorContains(t, "prev randao", err)
+}
+
+func TestQueueBuilderPayment_ZeroAmountClearsSlot(t *testing.T) {
+ fixture := buildPayloadFixture(t, nil)
+
+ require.NoError(t, fixture.state.QueueBuilderPayment())
+
+ slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
+ paymentIndex := slotsPerEpoch + (fixture.slot % slotsPerEpoch)
+ payments, err := fixture.state.BuilderPendingPayments()
+ require.NoError(t, err)
+ payment := payments[paymentIndex]
+ require.NotNil(t, payment)
+ require.Equal(t, primitives.Gwei(0), payment.Withdrawal.Amount)
+}
+
+func TestVerifyExecutionPayloadEnvelopeSignature(t *testing.T) {
+ fixture := buildPayloadFixture(t, nil)
+
+ t.Run("self build", func(t *testing.T) {
+ proposerSk, err := bls.RandKey()
+ require.NoError(t, err)
+ proposerPk := proposerSk.PublicKey().Marshal()
+
+ stPb, ok := fixture.state.ToProtoUnsafe().(*ethpb.BeaconStateGloas)
+ require.Equal(t, true, ok)
+ stPb = proto.Clone(stPb).(*ethpb.BeaconStateGloas)
+ stPb.Validators[0].PublicKey = proposerPk
+ st, err := state_native.InitializeFromProtoUnsafeGloas(stPb)
+ require.NoError(t, err)
+
+ msg := proto.Clone(fixture.signedProto.Message).(*ethpb.ExecutionPayloadEnvelope)
+ msg.BuilderIndex = params.BeaconConfig().BuilderIndexSelfBuild
+
+ epoch := slots.ToEpoch(msg.Slot)
+ domain, err := signing.Domain(st.Fork(), epoch, params.BeaconConfig().DomainBeaconBuilder, st.GenesisValidatorsRoot())
+ require.NoError(t, err)
+ signingRoot, err := signing.ComputeSigningRoot(msg, domain)
+ require.NoError(t, err)
+ signature := proposerSk.Sign(signingRoot[:]).Marshal()
+
+ signedProto := ðpb.SignedExecutionPayloadEnvelope{
+ Message: msg,
+ Signature: signature,
+ }
+ signed, err := blocks.WrappedROSignedExecutionPayloadEnvelope(signedProto)
+ require.NoError(t, err)
+
+ require.NoError(t, verifyExecutionPayloadEnvelopeSignature(st, signed))
+ })
+
+ t.Run("builder", func(t *testing.T) {
+ signed, err := blocks.WrappedROSignedExecutionPayloadEnvelope(fixture.signedProto)
+ require.NoError(t, err)
+
+ require.NoError(t, verifyExecutionPayloadEnvelopeSignature(fixture.state, signed))
+ })
+
+ t.Run("invalid signature", func(t *testing.T) {
+ t.Run("self build", func(t *testing.T) {
+ proposerSk, err := bls.RandKey()
+ require.NoError(t, err)
+ proposerPk := proposerSk.PublicKey().Marshal()
+
+ stPb, ok := fixture.state.ToProtoUnsafe().(*ethpb.BeaconStateGloas)
+ require.Equal(t, true, ok)
+ stPb = proto.Clone(stPb).(*ethpb.BeaconStateGloas)
+ stPb.Validators[0].PublicKey = proposerPk
+ st, err := state_native.InitializeFromProtoUnsafeGloas(stPb)
+ require.NoError(t, err)
+
+ msg := proto.Clone(fixture.signedProto.Message).(*ethpb.ExecutionPayloadEnvelope)
+ msg.BuilderIndex = params.BeaconConfig().BuilderIndexSelfBuild
+
+ signedProto := ðpb.SignedExecutionPayloadEnvelope{
+ Message: msg,
+ Signature: bytes.Repeat([]byte{0xFF}, 96),
+ }
+ badSigned, err := blocks.WrappedROSignedExecutionPayloadEnvelope(signedProto)
+ require.NoError(t, err)
+
+ err = verifyExecutionPayloadEnvelopeSignature(st, badSigned)
+ require.ErrorContains(t, "invalid signature format", err)
+ })
+
+ t.Run("builder", func(t *testing.T) {
+ signedProto := ðpb.SignedExecutionPayloadEnvelope{
+ Message: fixture.signedProto.Message,
+ Signature: bytes.Repeat([]byte{0xFF}, 96),
+ }
+ badSigned, err := blocks.WrappedROSignedExecutionPayloadEnvelope(signedProto)
+ require.NoError(t, err)
+
+ err = verifyExecutionPayloadEnvelopeSignature(fixture.state, badSigned)
+ require.ErrorContains(t, "invalid signature format", err)
+ })
+ })
+}
diff --git a/beacon-chain/execution/BUILD.bazel b/beacon-chain/execution/BUILD.bazel
index 4d4ce47689..1f0d20c465 100644
--- a/beacon-chain/execution/BUILD.bazel
+++ b/beacon-chain/execution/BUILD.bazel
@@ -8,6 +8,7 @@ go_library(
"deposit.go",
"engine_client.go",
"errors.go",
+ "graffiti_info.go",
"log.go",
"log_processing.go",
"metrics.go",
@@ -89,6 +90,7 @@ go_test(
"engine_client_fuzz_test.go",
"engine_client_test.go",
"execution_chain_test.go",
+ "graffiti_info_test.go",
"init_test.go",
"log_processing_test.go",
"mock_test.go",
diff --git a/beacon-chain/execution/engine_client.go b/beacon-chain/execution/engine_client.go
index 82eb5ff634..7d35c59508 100644
--- a/beacon-chain/execution/engine_client.go
+++ b/beacon-chain/execution/engine_client.go
@@ -61,7 +61,17 @@ var (
}
)
+// ClientVersionV1 represents the response from engine_getClientVersionV1.
+type ClientVersionV1 struct {
+ Code string `json:"code"`
+ Name string `json:"name"`
+ Version string `json:"version"`
+ Commit string `json:"commit"`
+}
+
const (
+ // GetClientVersionMethod is the engine_getClientVersionV1 method for JSON-RPC.
+ GetClientVersionMethod = "engine_getClientVersionV1"
// NewPayloadMethod v1 request string for JSON-RPC.
NewPayloadMethod = "engine_newPayloadV1"
// NewPayloadMethodV2 v2 request string for JSON-RPC.
@@ -350,6 +360,24 @@ func (s *Service) ExchangeCapabilities(ctx context.Context) ([]string, error) {
return elSupportedEndpointsSlice, nil
}
+// GetClientVersion calls engine_getClientVersionV1 to retrieve EL client information.
+func (s *Service) GetClientVersion(ctx context.Context) ([]ClientVersionV1, error) {
+ ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.GetClientVersion")
+ defer span.End()
+
+ // Per spec, we send our own client info as the parameter
+ clVersion := ClientVersionV1{
+ Code: CLCode,
+ Name: Name,
+ Version: version.SemanticVersion(),
+ Commit: version.GetCommitPrefix(),
+ }
+
+ var result []ClientVersionV1
+ err := s.rpcClient.CallContext(ctx, &result, GetClientVersionMethod, clVersion)
+ return result, handleRPCError(err)
+}
+
// GetTerminalBlockHash returns the valid terminal block hash based on total difficulty.
//
// Spec code:
diff --git a/beacon-chain/execution/graffiti_info.go b/beacon-chain/execution/graffiti_info.go
new file mode 100644
index 0000000000..2b12693e12
--- /dev/null
+++ b/beacon-chain/execution/graffiti_info.go
@@ -0,0 +1,134 @@
+package execution
+
+import (
+ "strings"
+ "sync"
+
+ "github.com/OffchainLabs/prysm/v7/runtime/version"
+)
+
+const (
+ // CLCode is the two-letter client code for Prysm.
+ CLCode = "PR"
+ Name = "Prysm"
+)
+
+// GraffitiInfo holds version information for generating block graffiti.
+// It is thread-safe and can be updated by the execution service and read by the validator server.
+type GraffitiInfo struct {
+ mu sync.RWMutex
+ elCode string // From engine_getClientVersionV1
+ elCommit string // From engine_getClientVersionV1
+ logOnce sync.Once
+}
+
+// NewGraffitiInfo creates a new GraffitiInfo.
+func NewGraffitiInfo() *GraffitiInfo {
+ return &GraffitiInfo{}
+}
+
+// UpdateFromEngine updates the EL client information.
+func (g *GraffitiInfo) UpdateFromEngine(code, commit string) {
+ g.mu.Lock()
+ defer g.mu.Unlock()
+ g.elCode = code
+ g.elCommit = strings.TrimPrefix(commit, "0x")
+}
+
+// GenerateGraffiti generates graffiti using the flexible standard
+// with the provided user graffiti from the validator client request.
+// It places user graffiti first, then appends as much client info as space allows.
+//
+// A space separator is added between user graffiti and client info when it
+// fits without reducing the client version tier.
+//
+// Available Space | Format
+// ≥13 bytes | user + space + EL(2)+commit(4)+CL(2)+commit(4) e.g. "Sushi GEabcdPRe4f6"
+// 12 bytes | user + EL(2)+commit(4)+CL(2)+commit(4) e.g. "12345678901234567890GEabcdPRe4f6"
+// 9-11 bytes | user + space + EL(2)+commit(2)+CL(2)+commit(2) e.g. "12345678901234567890123 GEabPRe4"
+// 8 bytes | user + EL(2)+commit(2)+CL(2)+commit(2) e.g. "123456789012345678901234GEabPRe4"
+// 5-7 bytes | user + space + EL(2)+CL(2) e.g. "123456789012345678901234567 GEPR"
+// 4 bytes | user + EL(2)+CL(2) e.g. "1234567890123456789012345678GEPR"
+// 3 bytes | user + space + code(2) e.g. "12345678901234567890123456789 GE"
+// 2 bytes | user + code(2) e.g. "123456789012345678901234567890GE"
+// <2 bytes | user only e.g. "1234567890123456789012345678901x"
+func (g *GraffitiInfo) GenerateGraffiti(userGraffiti []byte) [32]byte {
+ g.mu.RLock()
+ defer g.mu.RUnlock()
+
+ var result [32]byte
+ userStr := string(userGraffiti)
+ // Trim trailing null bytes
+ for len(userStr) > 0 && userStr[len(userStr)-1] == 0 {
+ userStr = userStr[:len(userStr)-1]
+ }
+
+ available := 32 - len(userStr)
+
+ clCommit := version.GetCommitPrefix()
+ clCommit4 := truncateCommit(clCommit, 4)
+ clCommit2 := truncateCommit(clCommit, 2)
+
+ // If no EL info, clear EL commits but still include CL info
+ var elCommit4, elCommit2 string
+ if g.elCode != "" {
+ elCommit4 = truncateCommit(g.elCommit, 4)
+ elCommit2 = truncateCommit(g.elCommit, 2)
+ }
+
+ // Add a space separator between user graffiti and client info,
+ // but only if it won't reduce the space available for client version info.
+ space := func(minForTier int) string {
+ if len(userStr) > 0 && available >= minForTier+1 {
+ return " "
+ }
+ return ""
+ }
+
+ var graffiti string
+ switch {
+ case available >= 12:
+ // Full: user+EL(2)+commit(4)+CL(2)+commit(4)
+ graffiti = userStr + space(12) + g.elCode + elCommit4 + CLCode + clCommit4
+ case available >= 8:
+ // Reduced commits: user+EL(2)+commit(2)+CL(2)+commit(2)
+ graffiti = userStr + space(8) + g.elCode + elCommit2 + CLCode + clCommit2
+ case available >= 4:
+ // Codes only: user+EL(2)+CL(2)
+ graffiti = userStr + space(4) + g.elCode + CLCode
+ case available >= 2:
+ // Single code: user+code(2)
+ if g.elCode != "" {
+ graffiti = userStr + space(2) + g.elCode
+ } else {
+ graffiti = userStr + space(2) + CLCode
+ }
+ default:
+ // User graffiti only
+ graffiti = userStr
+ }
+
+ g.logOnce.Do(func() {
+ logGraffitiInfo(graffiti, available)
+ })
+
+ copy(result[:], graffiti)
+ return result
+}
+
+// logGraffitiInfo logs the graffiti that will be used.
+func logGraffitiInfo(graffiti string, available int) {
+ if available >= 2 {
+ log.WithField("graffiti", graffiti).Info("Graffiti includes client version info appended after user graffiti")
+ return
+ }
+ log.WithField("graffiti", graffiti).Info("Prysm adds consensus and execution debugging information to the end of the graffiti field when possible. To prevent deletion of debugging info, please consider using a shorter graffiti")
+}
+
+// truncateCommit returns the first n characters of the commit string.
+func truncateCommit(commit string, n int) string {
+ if len(commit) <= n {
+ return commit
+ }
+ return commit[:n]
+}
diff --git a/beacon-chain/execution/graffiti_info_test.go b/beacon-chain/execution/graffiti_info_test.go
new file mode 100644
index 0000000000..1dcdcd9c75
--- /dev/null
+++ b/beacon-chain/execution/graffiti_info_test.go
@@ -0,0 +1,250 @@
+package execution
+
+import (
+ "testing"
+
+ "github.com/OffchainLabs/prysm/v7/testing/require"
+)
+
+func TestGraffitiInfo_GenerateGraffiti(t *testing.T) {
+ tests := []struct {
+ name string
+ elCode string
+ elCommit string
+ userGraffiti []byte
+ wantPrefix string // user graffiti appears first
+ wantSuffix string // client version info appended after
+ }{
+ // No EL info cases (CL info "PR" + commit still included when space allows)
+ {
+ name: "No EL - empty user graffiti",
+ elCode: "",
+ elCommit: "",
+ userGraffiti: []byte{},
+ wantPrefix: "PR", // Only CL code + commit (no user graffiti to prefix)
+ },
+ {
+ name: "No EL - short user graffiti",
+ elCode: "",
+ elCommit: "",
+ userGraffiti: []byte("my validator"),
+ wantPrefix: "my validator",
+ wantSuffix: " PR", // space + CL code appended
+ },
+ {
+ name: "No EL - 28 char user graffiti (4 bytes available)",
+ elCode: "",
+ elCommit: "",
+ userGraffiti: []byte("1234567890123456789012345678"), // 28 chars, 4 bytes available = codes only
+ wantPrefix: "1234567890123456789012345678",
+ wantSuffix: "PR", // CL code (no EL, so just PR)
+ },
+ {
+ name: "No EL - 30 char user graffiti (2 bytes available)",
+ elCode: "",
+ elCommit: "",
+ userGraffiti: []byte("123456789012345678901234567890"), // 30 chars, 2 bytes available = fits PR
+ wantPrefix: "123456789012345678901234567890",
+ wantSuffix: "PR",
+ },
+ {
+ name: "No EL - 31 char user graffiti (1 byte available)",
+ elCode: "",
+ elCommit: "",
+ userGraffiti: []byte("1234567890123456789012345678901"), // 31 chars, 1 byte available = not enough for code
+ wantPrefix: "1234567890123456789012345678901", // User only
+ },
+ {
+ name: "No EL - 32 char user graffiti (0 bytes available)",
+ elCode: "",
+ elCommit: "",
+ userGraffiti: []byte("12345678901234567890123456789012"),
+ wantPrefix: "12345678901234567890123456789012", // User only
+ },
+ // With EL info - flexible standard format cases
+ {
+ name: "With EL - full format (empty user graffiti)",
+ elCode: "GE",
+ elCommit: "abcd1234",
+ userGraffiti: []byte{},
+ wantPrefix: "GEabcdPR", // No user graffiti, starts with client info
+ },
+ {
+ name: "With EL - full format (short user graffiti)",
+ elCode: "GE",
+ elCommit: "abcd1234",
+ userGraffiti: []byte("Bob"),
+ wantPrefix: "Bob",
+ wantSuffix: " GEabcdPR", // space + EL(2)+commit(4)+CL(2)+commit(4)
+ },
+ {
+ name: "With EL - full format (20 char user, 12 bytes available) - no space, would reduce tier",
+ elCode: "GE",
+ elCommit: "abcd1234",
+ userGraffiti: []byte("12345678901234567890"), // 20 chars, leaves exactly 12 bytes = full format, no room for space
+ wantPrefix: "12345678901234567890",
+ wantSuffix: "GEabcdPR",
+ },
+ {
+ name: "With EL - full format (19 char user, 13 bytes available) - space fits",
+ elCode: "GE",
+ elCommit: "abcd1234",
+ userGraffiti: []byte("1234567890123456789"), // 19 chars, leaves 13 bytes = full format + space
+ wantPrefix: "1234567890123456789",
+ wantSuffix: " GEabcdPR",
+ },
+ {
+ name: "With EL - reduced commits (24 char user, 8 bytes available) - no space, would reduce tier",
+ elCode: "GE",
+ elCommit: "abcd1234",
+ userGraffiti: []byte("123456789012345678901234"), // 24 chars, leaves exactly 8 bytes = reduced format, no room for space
+ wantPrefix: "123456789012345678901234",
+ wantSuffix: "GEabPR",
+ },
+ {
+ name: "With EL - reduced commits (23 char user, 9 bytes available) - space fits",
+ elCode: "GE",
+ elCommit: "abcd1234",
+ userGraffiti: []byte("12345678901234567890123"), // 23 chars, leaves 9 bytes = reduced format + space
+ wantPrefix: "12345678901234567890123",
+ wantSuffix: " GEabPR",
+ },
+ {
+ name: "With EL - codes only (28 char user, 4 bytes available) - no space, would reduce tier",
+ elCode: "GE",
+ elCommit: "abcd1234",
+ userGraffiti: []byte("1234567890123456789012345678"), // 28 chars, leaves exactly 4 bytes = codes only, no room for space
+ wantPrefix: "1234567890123456789012345678",
+ wantSuffix: "GEPR",
+ },
+ {
+ name: "With EL - codes only (27 char user, 5 bytes available) - space fits",
+ elCode: "GE",
+ elCommit: "abcd1234",
+ userGraffiti: []byte("123456789012345678901234567"), // 27 chars, leaves 5 bytes = codes only + space
+ wantPrefix: "123456789012345678901234567",
+ wantSuffix: " GEPR",
+ },
+ {
+ name: "With EL - EL code only (30 char user, 2 bytes available) - no space, would reduce tier",
+ elCode: "GE",
+ elCommit: "abcd1234",
+ userGraffiti: []byte("123456789012345678901234567890"), // 30 chars, leaves exactly 2 bytes = EL code only, no room for space
+ wantPrefix: "123456789012345678901234567890",
+ wantSuffix: "GE",
+ },
+ {
+ name: "With EL - EL code only (29 char user, 3 bytes available) - space fits",
+ elCode: "GE",
+ elCommit: "abcd1234",
+ userGraffiti: []byte("12345678901234567890123456789"), // 29 chars, leaves 3 bytes = EL code + space
+ wantPrefix: "12345678901234567890123456789",
+ wantSuffix: " GE",
+ },
+ {
+ name: "With EL - user only (31 char user, 1 byte available)",
+ elCode: "GE",
+ elCommit: "abcd1234",
+ userGraffiti: []byte("1234567890123456789012345678901"), // 31 chars, leaves 1 byte = not enough for code
+ wantPrefix: "1234567890123456789012345678901", // User only
+ },
+ {
+ name: "With EL - user only (32 char user, 0 bytes available)",
+ elCode: "GE",
+ elCommit: "abcd1234",
+ userGraffiti: []byte("12345678901234567890123456789012"),
+ wantPrefix: "12345678901234567890123456789012",
+ },
+ // Null byte handling
+ {
+ name: "Null bytes - input with trailing nulls",
+ elCode: "GE",
+ elCommit: "abcd1234",
+ userGraffiti: append([]byte("test"), 0, 0, 0),
+ wantPrefix: "test",
+ wantSuffix: " GEabcdPR",
+ },
+ // 0x prefix handling - some ELs return 0x-prefixed commits
+ {
+ name: "0x prefix - stripped from EL commit",
+ elCode: "GE",
+ elCommit: "0xabcd1234",
+ userGraffiti: []byte{},
+ wantPrefix: "GEabcdPR",
+ },
+ {
+ name: "No 0x prefix - commit used as-is",
+ elCode: "NM",
+ elCommit: "abcd1234",
+ userGraffiti: []byte{},
+ wantPrefix: "NMabcdPR",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ g := NewGraffitiInfo()
+ if tt.elCode != "" {
+ g.UpdateFromEngine(tt.elCode, tt.elCommit)
+ }
+
+ result := g.GenerateGraffiti(tt.userGraffiti)
+ resultStr := string(result[:])
+ trimmed := trimNullBytes(resultStr)
+
+ // Check prefix (user graffiti comes first)
+ require.Equal(t, true, len(trimmed) >= len(tt.wantPrefix), "Result too short for prefix check")
+ require.Equal(t, tt.wantPrefix, trimmed[:len(tt.wantPrefix)], "Prefix mismatch")
+
+ // Check suffix if specified (client version info appended)
+ if tt.wantSuffix != "" {
+ require.Equal(t, true, len(trimmed) >= len(tt.wantSuffix), "Result too short for suffix check")
+ // The suffix should appear somewhere after the prefix
+ afterPrefix := trimmed[len(tt.wantPrefix):]
+ require.Equal(t, true, len(afterPrefix) >= len(tt.wantSuffix), "Not enough room for suffix after prefix")
+ require.Equal(t, tt.wantSuffix, afterPrefix[:len(tt.wantSuffix)], "Suffix mismatch")
+ }
+ })
+ }
+}
+
+func TestGraffitiInfo_UpdateFromEngine(t *testing.T) {
+ g := NewGraffitiInfo()
+
+ // Initially no EL info - should still have CL info (PR + commit)
+ result := g.GenerateGraffiti([]byte{})
+ resultStr := trimNullBytes(string(result[:]))
+ require.Equal(t, "PR", resultStr[:2], "Expected CL info before update")
+
+ // Update with EL info
+ g.UpdateFromEngine("GE", "1234abcd")
+
+ result = g.GenerateGraffiti([]byte{})
+ resultStr = trimNullBytes(string(result[:]))
+ require.Equal(t, "GE1234PR", resultStr[:8], "Expected EL+CL info after update")
+}
+
+func TestTruncateCommit(t *testing.T) {
+ tests := []struct {
+ commit string
+ n int
+ want string
+ }{
+ {"abcd1234", 4, "abcd"},
+ {"ab", 4, "ab"},
+ {"", 4, ""},
+ {"abcdef", 2, "ab"},
+ }
+
+ for _, tt := range tests {
+ got := truncateCommit(tt.commit, tt.n)
+ require.Equal(t, tt.want, got)
+ }
+}
+
+func trimNullBytes(s string) string {
+ for len(s) > 0 && s[len(s)-1] == 0 {
+ s = s[:len(s)-1]
+ }
+ return s
+}
diff --git a/beacon-chain/execution/options.go b/beacon-chain/execution/options.go
index 7d178671ce..e7918b1c1b 100644
--- a/beacon-chain/execution/options.go
+++ b/beacon-chain/execution/options.go
@@ -124,3 +124,11 @@ func WithVerifierWaiter(v *verification.InitializerWaiter) Option {
return nil
}
}
+
+// WithGraffitiInfo sets the GraffitiInfo for client version tracking.
+func WithGraffitiInfo(g *GraffitiInfo) Option {
+ return func(s *Service) error {
+ s.graffitiInfo = g
+ return nil
+ }
+}
diff --git a/beacon-chain/execution/service.go b/beacon-chain/execution/service.go
index f9d35fd7a5..d9b095ade4 100644
--- a/beacon-chain/execution/service.go
+++ b/beacon-chain/execution/service.go
@@ -162,6 +162,7 @@ type Service struct {
verifierWaiter *verification.InitializerWaiter
blobVerifier verification.NewBlobVerifier
capabilityCache *capabilityCache
+ graffitiInfo *GraffitiInfo
}
// NewService sets up a new instance with an ethclient when given a web3 endpoint as a string in the config.
@@ -318,6 +319,28 @@ func (s *Service) updateConnectedETH1(state bool) {
s.updateBeaconNodeStats()
}
+// GraffitiInfo returns the GraffitiInfo struct for graffiti generation.
+func (s *Service) GraffitiInfo() *GraffitiInfo {
+ return s.graffitiInfo
+}
+
+// updateGraffitiInfo fetches EL client version and updates the graffiti info.
+func (s *Service) updateGraffitiInfo() {
+ if s.graffitiInfo == nil {
+ return
+ }
+ ctx, cancel := context.WithTimeout(s.ctx, time.Second)
+ defer cancel()
+ versions, err := s.GetClientVersion(ctx)
+ if err != nil {
+ log.WithError(err).Debug("Could not get execution client version for graffiti")
+ return
+ }
+ if len(versions) >= 1 {
+ s.graffitiInfo.UpdateFromEngine(versions[0].Code, versions[0].Commit)
+ }
+}
+
// refers to the latest eth1 block which follows the condition: eth1_timestamp +
// SECONDS_PER_ETH1_BLOCK * ETH1_FOLLOW_DISTANCE <= current_unix_time
func (s *Service) followedBlockHeight(ctx context.Context) (uint64, error) {
@@ -598,6 +621,12 @@ func (s *Service) run(done <-chan struct{}) {
chainstartTicker := time.NewTicker(logPeriod)
defer chainstartTicker.Stop()
+ // Update graffiti info 4 times per epoch (~96 seconds with 12s slots and 32 slots/epoch)
+ graffitiTicker := time.NewTicker(96 * time.Second)
+ defer graffitiTicker.Stop()
+ // Initial update
+ s.updateGraffitiInfo()
+
for {
select {
case <-done:
@@ -622,6 +651,8 @@ func (s *Service) run(done <-chan struct{}) {
continue
}
s.logTillChainStart(context.Background())
+ case <-graffitiTicker.C:
+ s.updateGraffitiInfo()
}
}
}
diff --git a/beacon-chain/forkchoice/doubly-linked-tree/BUILD.bazel b/beacon-chain/forkchoice/doubly-linked-tree/BUILD.bazel
index 09dd66018c..daa8e4a55f 100644
--- a/beacon-chain/forkchoice/doubly-linked-tree/BUILD.bazel
+++ b/beacon-chain/forkchoice/doubly-linked-tree/BUILD.bazel
@@ -6,6 +6,7 @@ go_library(
"doc.go",
"errors.go",
"forkchoice.go",
+ "gloas.go",
"log.go",
"metrics.go",
"node.go",
@@ -32,6 +33,7 @@ go_library(
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/forkchoice:go_default_library",
+ "//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//monitoring/tracing/trace:go_default_library",
diff --git a/beacon-chain/forkchoice/doubly-linked-tree/forkchoice.go b/beacon-chain/forkchoice/doubly-linked-tree/forkchoice.go
index 5aa33813f7..3e1b7a0287 100644
--- a/beacon-chain/forkchoice/doubly-linked-tree/forkchoice.go
+++ b/beacon-chain/forkchoice/doubly-linked-tree/forkchoice.go
@@ -31,7 +31,8 @@ func New() *ForkChoice {
prevJustifiedCheckpoint: &forkchoicetypes.Checkpoint{},
finalizedCheckpoint: &forkchoicetypes.Checkpoint{},
proposerBoostRoot: [32]byte{},
- nodeByRoot: make(map[[fieldparams.RootLength]byte]*Node),
+ emptyNodeByRoot: make(map[[fieldparams.RootLength]byte]*PayloadNode),
+ fullNodeByRoot: make(map[[fieldparams.RootLength]byte]*PayloadNode),
slashedIndices: make(map[primitives.ValidatorIndex]bool),
receivedBlocksLastEpoch: [fieldparams.SlotsPerEpoch]primitives.Slot{},
}
@@ -43,7 +44,7 @@ func New() *ForkChoice {
// NodeCount returns the current number of nodes in the Store.
func (f *ForkChoice) NodeCount() int {
- return len(f.store.nodeByRoot)
+ return len(f.store.emptyNodeByRoot)
}
// Head returns the head root from fork choice store.
@@ -64,14 +65,14 @@ func (f *ForkChoice) Head(
return [32]byte{}, errors.Wrap(err, "could not apply proposer boost score")
}
- if err := f.store.treeRootNode.applyWeightChanges(ctx); err != nil {
+ if err := f.store.applyWeightChangesConsensusNode(ctx, f.store.treeRootNode); err != nil {
return [32]byte{}, errors.Wrap(err, "could not apply weight changes")
}
jc := f.JustifiedCheckpoint()
fc := f.FinalizedCheckpoint()
currentEpoch := slots.EpochsSinceGenesis(f.store.genesisTime)
- if err := f.store.treeRootNode.updateBestDescendant(ctx, jc.Epoch, fc.Epoch, currentEpoch); err != nil {
+ if err := f.store.updateBestDescendantConsensusNode(ctx, f.store.treeRootNode, jc.Epoch, fc.Epoch, currentEpoch); err != nil {
return [32]byte{}, errors.Wrap(err, "could not update best descendant")
}
return f.store.head(ctx)
@@ -118,14 +119,14 @@ func (f *ForkChoice) InsertNode(ctx context.Context, state state.BeaconState, ro
return errInvalidNilCheckpoint
}
finalizedEpoch := fc.Epoch
- node, err := f.store.insert(ctx, roblock, justifiedEpoch, finalizedEpoch)
+ pn, err := f.store.insert(ctx, roblock, justifiedEpoch, finalizedEpoch)
if err != nil {
return err
}
- jc, fc = f.store.pullTips(state, node, jc, fc)
+ jc, fc = f.store.pullTips(state, pn.node, jc, fc)
if err := f.updateCheckpoints(ctx, jc, fc); err != nil {
- _, remErr := f.store.removeNode(ctx, node)
+ _, remErr := f.store.removeNode(ctx, pn)
if remErr != nil {
log.WithError(remErr).Error("Could not remove node")
}
@@ -148,49 +149,63 @@ func (f *ForkChoice) updateCheckpoints(ctx context.Context, jc, fc *ethpb.Checkp
if fc.Epoch <= f.store.finalizedCheckpoint.Epoch {
return nil
}
- f.store.finalizedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: fc.Epoch,
- Root: bytesutil.ToBytes32(fc.Root)}
+ f.store.finalizedCheckpoint = &forkchoicetypes.Checkpoint{
+ Epoch: fc.Epoch,
+ Root: bytesutil.ToBytes32(fc.Root),
+ }
return f.store.prune(ctx)
}
// HasNode returns true if the node exists in fork choice store,
// false else wise.
func (f *ForkChoice) HasNode(root [32]byte) bool {
- _, ok := f.store.nodeByRoot[root]
+ _, ok := f.store.emptyNodeByRoot[root]
return ok
}
// IsCanonical returns true if the given root is part of the canonical chain.
func (f *ForkChoice) IsCanonical(root [32]byte) bool {
- node, ok := f.store.nodeByRoot[root]
- if !ok || node == nil {
+ // It is fine to pick empty node here since we only check if the beacon block is canonical.
+ pn, ok := f.store.emptyNodeByRoot[root]
+ if !ok || pn == nil {
return false
}
- if node.bestDescendant == nil {
+ if pn.node.bestDescendant == nil {
+ // The node doesn't have any children
if f.store.headNode.bestDescendant == nil {
- return node == f.store.headNode
+ // headNode is itself head.
+ return pn.node == f.store.headNode
}
- return node == f.store.headNode.bestDescendant
+ // headNode is not actualized and there are some descendants
+ return pn.node == f.store.headNode.bestDescendant
}
+ // The node has children
if f.store.headNode.bestDescendant == nil {
- return node.bestDescendant == f.store.headNode
+ return pn.node.bestDescendant == f.store.headNode
}
- return node.bestDescendant == f.store.headNode.bestDescendant
+ return pn.node.bestDescendant == f.store.headNode.bestDescendant
}
// IsOptimistic returns true if the given root has been optimistically synced.
+// TODO: Gloas, the current implementation uses the result of the full block for
+// the given root. In gloas this would be incorrect and we should specify the
+// payload content, thus we should expose a full/empty version of this call.
func (f *ForkChoice) IsOptimistic(root [32]byte) (bool, error) {
if f.store.allTipsAreInvalid {
return true, nil
}
- node, ok := f.store.nodeByRoot[root]
- if !ok || node == nil {
+ en, ok := f.store.emptyNodeByRoot[root]
+ if !ok || en == nil {
return true, ErrNilNode
}
+ fn := f.store.fullNodeByRoot[root]
+ if fn != nil {
+ return fn.optimistic, nil
+ }
- return node.optimistic, nil
+ return en.optimistic, nil
}
// AncestorRoot returns the ancestor root of input block root at a given slot.
@@ -198,17 +213,21 @@ func (f *ForkChoice) AncestorRoot(ctx context.Context, root [32]byte, slot primi
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.AncestorRoot")
defer span.End()
- node, ok := f.store.nodeByRoot[root]
- if !ok || node == nil {
+ pn, ok := f.store.emptyNodeByRoot[root]
+ if !ok || pn == nil {
return [32]byte{}, errors.Wrap(ErrNilNode, "could not determine ancestor root")
}
- n := node
- for n != nil && n.slot > slot {
+ n := pn.node
+ for n.slot > slot {
if ctx.Err() != nil {
return [32]byte{}, ctx.Err()
}
- n = n.parent
+ if n.parent == nil {
+ n = nil
+ break
+ }
+ n = n.parent.node
}
if n == nil {
@@ -221,10 +240,11 @@ func (f *ForkChoice) AncestorRoot(ctx context.Context, root [32]byte, slot primi
// IsViableForCheckpoint returns whether the root passed is a checkpoint root for any
// known chain in forkchoice.
func (f *ForkChoice) IsViableForCheckpoint(cp *forkchoicetypes.Checkpoint) (bool, error) {
- node, ok := f.store.nodeByRoot[cp.Root]
- if !ok || node == nil {
+ pn, ok := f.store.emptyNodeByRoot[cp.Root]
+ if !ok || pn == nil {
return false, nil
}
+ node := pn.node
epochStart, err := slots.EpochStart(cp.Epoch)
if err != nil {
return false, err
@@ -233,10 +253,13 @@ func (f *ForkChoice) IsViableForCheckpoint(cp *forkchoicetypes.Checkpoint) (bool
return false, nil
}
- if len(node.children) == 0 {
+ // If it's the start of the epoch, it is a checkpoint
+ if node.slot == epochStart {
return true, nil
}
- if node.slot == epochStart {
+ // If there are no descendants of this beacon block, it is is viable as a checkpoint
+ children := f.store.allConsensusChildren(node)
+ if len(children) == 0 {
return true, nil
}
if !features.Get().IgnoreUnviableAttestations {
@@ -246,7 +269,8 @@ func (f *ForkChoice) IsViableForCheckpoint(cp *forkchoicetypes.Checkpoint) (bool
return true, nil
}
}
- for _, child := range node.children {
+ // If some child is after the start of the epoch, the checkpoint is viable.
+ for _, child := range children {
if child.slot > epochStart {
return true, nil
}
@@ -287,7 +311,7 @@ func (f *ForkChoice) updateBalances() error {
if vote.currentRoot != vote.nextRoot || oldBalance != newBalance {
// Ignore the vote if the root is not in fork choice
// store, that means we have not seen the block before.
- nextNode, ok := f.store.nodeByRoot[vote.nextRoot]
+ nextNode, ok := f.store.emptyNodeByRoot[vote.nextRoot]
if ok && vote.nextRoot != zHash {
// Protection against nil node
if nextNode == nil {
@@ -296,7 +320,7 @@ func (f *ForkChoice) updateBalances() error {
nextNode.balance += newBalance
}
- currentNode, ok := f.store.nodeByRoot[vote.currentRoot]
+ currentNode, ok := f.store.emptyNodeByRoot[vote.currentRoot]
if ok && vote.currentRoot != zHash {
// Protection against nil node
if currentNode == nil {
@@ -337,13 +361,13 @@ func (f *ForkChoice) ProposerBoost() [fieldparams.RootLength]byte {
return f.store.proposerBoost()
}
-// SetOptimisticToValid sets the node with the given root as a fully validated node
+// SetOptimisticToValid sets the node with the given root as a fully validated node. The payload for this root MUST have been processed.
func (f *ForkChoice) SetOptimisticToValid(ctx context.Context, root [fieldparams.RootLength]byte) error {
- node, ok := f.store.nodeByRoot[root]
- if !ok || node == nil {
+ fn, ok := f.store.fullNodeByRoot[root]
+ if !ok || fn == nil {
return errors.Wrap(ErrNilNode, "could not set node to valid")
}
- return node.setNodeAndParentValidated(ctx)
+ return f.store.setNodeAndParentValidated(ctx, fn)
}
// PreviousJustifiedCheckpoint of fork choice store.
@@ -362,8 +386,8 @@ func (f *ForkChoice) FinalizedCheckpoint() *forkchoicetypes.Checkpoint {
}
// SetOptimisticToInvalid removes a block with an invalid execution payload from fork choice store
-func (f *ForkChoice) SetOptimisticToInvalid(ctx context.Context, root, parentRoot, payloadHash [fieldparams.RootLength]byte) ([][32]byte, error) {
- return f.store.setOptimisticToInvalid(ctx, root, parentRoot, payloadHash)
+func (f *ForkChoice) SetOptimisticToInvalid(ctx context.Context, root, parentRoot, parentHash, payloadHash [fieldparams.RootLength]byte) ([][32]byte, error) {
+ return f.store.setOptimisticToInvalid(ctx, root, parentRoot, parentHash, payloadHash)
}
// InsertSlashedIndex adds the given slashed validator index to the
@@ -386,7 +410,7 @@ func (f *ForkChoice) InsertSlashedIndex(_ context.Context, index primitives.Vali
return
}
- node, ok := f.store.nodeByRoot[f.votes[index].currentRoot]
+ node, ok := f.store.emptyNodeByRoot[f.votes[index].currentRoot]
if !ok || node == nil {
return
}
@@ -421,22 +445,30 @@ func (f *ForkChoice) UpdateFinalizedCheckpoint(fc *forkchoicetypes.Checkpoint) e
}
// CommonAncestor returns the common ancestor root and slot between the two block roots r1 and r2.
+// This is payload aware. Consider the following situation
+// [A,full] <--- [B, full] <---[C,pending]
+//
+// \---------[B, empty] <--[D, pending]
+//
+// Then even though C and D both descend from the beacon block B, their common ancestor is A.
+// Notice that also this function **requires** that the two roots are actually contending blocks! otherwise the
+// behavior is not defined.
func (f *ForkChoice) CommonAncestor(ctx context.Context, r1 [32]byte, r2 [32]byte) ([32]byte, primitives.Slot, error) {
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.CommonAncestorRoot")
defer span.End()
- n1, ok := f.store.nodeByRoot[r1]
- if !ok || n1 == nil {
+ en1, ok := f.store.emptyNodeByRoot[r1]
+ if !ok || en1 == nil {
return [32]byte{}, 0, forkchoice.ErrUnknownCommonAncestor
}
// Do nothing if the input roots are the same.
if r1 == r2 {
- return r1, n1.slot, nil
+ return r1, en1.node.slot, nil
}
- n2, ok := f.store.nodeByRoot[r2]
- if !ok || n2 == nil {
+ en2, ok := f.store.emptyNodeByRoot[r2]
+ if !ok || en2 == nil {
return [32]byte{}, 0, forkchoice.ErrUnknownCommonAncestor
}
@@ -444,23 +476,23 @@ func (f *ForkChoice) CommonAncestor(ctx context.Context, r1 [32]byte, r2 [32]byt
if ctx.Err() != nil {
return [32]byte{}, 0, ctx.Err()
}
- if n1.slot > n2.slot {
- n1 = n1.parent
+ if en1.node.slot > en2.node.slot {
+ en1 = en1.node.parent
// Reaches the end of the tree and unable to find common ancestor.
// This should not happen at runtime as the finalized
// node has to be a common ancestor
- if n1 == nil {
+ if en1 == nil {
return [32]byte{}, 0, forkchoice.ErrUnknownCommonAncestor
}
} else {
- n2 = n2.parent
+ en2 = en2.node.parent
// Reaches the end of the tree and unable to find common ancestor.
- if n2 == nil {
+ if en2 == nil {
return [32]byte{}, 0, forkchoice.ErrUnknownCommonAncestor
}
}
- if n1 == n2 {
- return n1.root, n1.slot, nil
+ if en1 == en2 {
+ return en1.node.root, en1.node.slot, nil
}
}
}
@@ -507,35 +539,17 @@ func (f *ForkChoice) CachedHeadRoot() [32]byte {
// FinalizedPayloadBlockHash returns the hash of the payload at the finalized checkpoint
func (f *ForkChoice) FinalizedPayloadBlockHash() [32]byte {
- root := f.FinalizedCheckpoint().Root
- node, ok := f.store.nodeByRoot[root]
- if !ok || node == nil {
- // This should not happen
- return [32]byte{}
- }
- return node.payloadHash
+ return f.store.latestHashForRoot(f.FinalizedCheckpoint().Root)
}
// JustifiedPayloadBlockHash returns the hash of the payload at the justified checkpoint
func (f *ForkChoice) JustifiedPayloadBlockHash() [32]byte {
- root := f.JustifiedCheckpoint().Root
- node, ok := f.store.nodeByRoot[root]
- if !ok || node == nil {
- // This should not happen
- return [32]byte{}
- }
- return node.payloadHash
+ return f.store.latestHashForRoot(f.JustifiedCheckpoint().Root)
}
// UnrealizedJustifiedPayloadBlockHash returns the hash of the payload at the unrealized justified checkpoint
func (f *ForkChoice) UnrealizedJustifiedPayloadBlockHash() [32]byte {
- root := f.store.unrealizedJustifiedCheckpoint.Root
- node, ok := f.store.nodeByRoot[root]
- if !ok || node == nil {
- // This should not happen
- return [32]byte{}
- }
- return node.payloadHash
+ return f.store.latestHashForRoot(f.store.unrealizedJustifiedCheckpoint.Root)
}
// ForkChoiceDump returns a full dump of forkchoice.
@@ -559,7 +573,7 @@ func (f *ForkChoice) ForkChoiceDump(ctx context.Context) (*forkchoice2.Dump, err
nodes := make([]*forkchoice2.Node, 0, f.NodeCount())
var err error
if f.store.treeRootNode != nil {
- nodes, err = f.store.treeRootNode.nodeTreeDump(ctx, nodes)
+ nodes, err = f.store.nodeTreeDump(ctx, f.store.treeRootNode, nodes)
if err != nil {
return nil, err
}
@@ -588,7 +602,7 @@ func (f *ForkChoice) SetBalancesByRooter(handler forkchoice.BalancesByRooter) {
// Weight returns the weight of the given root if found on the store
func (f *ForkChoice) Weight(root [32]byte) (uint64, error) {
- n, ok := f.store.nodeByRoot[root]
+ n, ok := f.store.emptyNodeByRoot[root]
if !ok || n == nil {
return 0, ErrNilNode
}
@@ -616,11 +630,11 @@ func (f *ForkChoice) updateJustifiedBalances(ctx context.Context, root [32]byte)
// Slot returns the slot of the given root if it's known to forkchoice
func (f *ForkChoice) Slot(root [32]byte) (primitives.Slot, error) {
- n, ok := f.store.nodeByRoot[root]
+ n, ok := f.store.emptyNodeByRoot[root]
if !ok || n == nil {
return 0, ErrNilNode
}
- return n.slot, nil
+ return n.node.slot, nil
}
// DependentRoot returns the last root of the epoch prior to the requested ecoch in the canonical chain.
@@ -628,7 +642,7 @@ func (f *ForkChoice) DependentRoot(epoch primitives.Epoch) ([32]byte, error) {
return f.DependentRootForEpoch(f.CachedHeadRoot(), epoch)
}
-// DependentRootForEpoch return the last root of the epoch prior to the requested ecoch for the given root.
+// DependentRootForEpoch return the last root of the epoch prior to the requested epoch for the given root.
func (f *ForkChoice) DependentRootForEpoch(root [32]byte, epoch primitives.Epoch) ([32]byte, error) {
tr, err := f.TargetRootForEpoch(root, epoch)
if err != nil {
@@ -637,18 +651,18 @@ func (f *ForkChoice) DependentRootForEpoch(root [32]byte, epoch primitives.Epoch
if tr == [32]byte{} {
return [32]byte{}, nil
}
- node, ok := f.store.nodeByRoot[tr]
- if !ok || node == nil {
+ en, ok := f.store.emptyNodeByRoot[tr]
+ if !ok || en == nil {
return [32]byte{}, ErrNilNode
}
- if slots.ToEpoch(node.slot) >= epoch {
- if node.parent != nil {
- node = node.parent
+ if slots.ToEpoch(en.node.slot) >= epoch {
+ if en.node.parent != nil {
+ en = en.node.parent
} else {
return f.store.finalizedDependentRoot, nil
}
}
- return node.root, nil
+ return en.node.root, nil
}
// TargetRootForEpoch returns the root of the target block for a given epoch.
@@ -660,46 +674,48 @@ func (f *ForkChoice) DependentRootForEpoch(root [32]byte, epoch primitives.Epoch
// which case we return the root of the checkpoint of the chain containing the
// passed root, at the given epoch
func (f *ForkChoice) TargetRootForEpoch(root [32]byte, epoch primitives.Epoch) ([32]byte, error) {
- n, ok := f.store.nodeByRoot[root]
+ n, ok := f.store.emptyNodeByRoot[root]
if !ok || n == nil {
return [32]byte{}, ErrNilNode
}
- nodeEpoch := slots.ToEpoch(n.slot)
+ node := n.node
+ nodeEpoch := slots.ToEpoch(node.slot)
if epoch > nodeEpoch {
- return n.root, nil
+ return node.root, nil
}
- if n.target == nil {
+ if node.target == nil {
return [32]byte{}, nil
}
- targetRoot := n.target.root
+ targetRoot := node.target.root
if epoch == nodeEpoch {
return targetRoot, nil
}
- targetNode, ok := f.store.nodeByRoot[targetRoot]
+ targetNode, ok := f.store.emptyNodeByRoot[targetRoot]
if !ok || targetNode == nil {
return [32]byte{}, ErrNilNode
}
// If slot 0 was not missed we consider a previous block to go back at least one epoch
- if nodeEpoch == slots.ToEpoch(targetNode.slot) {
- targetNode = targetNode.parent
+ if nodeEpoch == slots.ToEpoch(targetNode.node.slot) {
+ targetNode = targetNode.node.parent
if targetNode == nil {
return [32]byte{}, ErrNilNode
}
}
- return f.TargetRootForEpoch(targetNode.root, epoch)
+ return f.TargetRootForEpoch(targetNode.node.root, epoch)
}
// ParentRoot returns the block root of the parent node if it is in forkchoice.
// The exception is for the finalized checkpoint root which we return the zero
// hash.
func (f *ForkChoice) ParentRoot(root [32]byte) ([32]byte, error) {
- n, ok := f.store.nodeByRoot[root]
+ n, ok := f.store.emptyNodeByRoot[root]
if !ok || n == nil {
return [32]byte{}, ErrNilNode
}
// Return the zero hash for the tree root
- if n.parent == nil {
+ parent := n.node.parent
+ if parent == nil {
return [32]byte{}, nil
}
- return n.parent.root, nil
+ return parent.node.root, nil
}
diff --git a/beacon-chain/forkchoice/doubly-linked-tree/forkchoice_test.go b/beacon-chain/forkchoice/doubly-linked-tree/forkchoice_test.go
index 107ead8ff8..5c047dfa6a 100644
--- a/beacon-chain/forkchoice/doubly-linked-tree/forkchoice_test.go
+++ b/beacon-chain/forkchoice/doubly-linked-tree/forkchoice_test.go
@@ -3,7 +3,6 @@ package doublylinkedtree
import (
"context"
"encoding/binary"
- "errors"
"testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/forkchoice"
@@ -104,9 +103,9 @@ func TestForkChoice_UpdateBalancesPositiveChange(t *testing.T) {
f.justifiedBalances = []uint64{10, 20, 30}
require.NoError(t, f.updateBalances())
s := f.store
- assert.Equal(t, uint64(10), s.nodeByRoot[indexToHash(1)].balance)
- assert.Equal(t, uint64(20), s.nodeByRoot[indexToHash(2)].balance)
- assert.Equal(t, uint64(30), s.nodeByRoot[indexToHash(3)].balance)
+ assert.Equal(t, uint64(10), s.emptyNodeByRoot[indexToHash(1)].balance)
+ assert.Equal(t, uint64(20), s.emptyNodeByRoot[indexToHash(2)].balance)
+ assert.Equal(t, uint64(30), s.emptyNodeByRoot[indexToHash(3)].balance)
}
func TestForkChoice_UpdateBalancesNegativeChange(t *testing.T) {
@@ -122,9 +121,9 @@ func TestForkChoice_UpdateBalancesNegativeChange(t *testing.T) {
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, roblock))
s := f.store
- s.nodeByRoot[indexToHash(1)].balance = 100
- s.nodeByRoot[indexToHash(2)].balance = 100
- s.nodeByRoot[indexToHash(3)].balance = 100
+ s.emptyNodeByRoot[indexToHash(1)].balance = 100
+ s.emptyNodeByRoot[indexToHash(2)].balance = 100
+ s.emptyNodeByRoot[indexToHash(3)].balance = 100
f.balances = []uint64{100, 100, 100}
f.votes = []Vote{
@@ -135,9 +134,9 @@ func TestForkChoice_UpdateBalancesNegativeChange(t *testing.T) {
f.justifiedBalances = []uint64{10, 20, 30}
require.NoError(t, f.updateBalances())
- assert.Equal(t, uint64(10), s.nodeByRoot[indexToHash(1)].balance)
- assert.Equal(t, uint64(20), s.nodeByRoot[indexToHash(2)].balance)
- assert.Equal(t, uint64(30), s.nodeByRoot[indexToHash(3)].balance)
+ assert.Equal(t, uint64(10), s.emptyNodeByRoot[indexToHash(1)].balance)
+ assert.Equal(t, uint64(20), s.emptyNodeByRoot[indexToHash(2)].balance)
+ assert.Equal(t, uint64(30), s.emptyNodeByRoot[indexToHash(3)].balance)
}
func TestForkChoice_UpdateBalancesUnderflow(t *testing.T) {
@@ -153,9 +152,9 @@ func TestForkChoice_UpdateBalancesUnderflow(t *testing.T) {
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, roblock))
s := f.store
- s.nodeByRoot[indexToHash(1)].balance = 100
- s.nodeByRoot[indexToHash(2)].balance = 100
- s.nodeByRoot[indexToHash(3)].balance = 100
+ s.emptyNodeByRoot[indexToHash(1)].balance = 100
+ s.emptyNodeByRoot[indexToHash(2)].balance = 100
+ s.emptyNodeByRoot[indexToHash(3)].balance = 100
f.balances = []uint64{125, 125, 125}
f.votes = []Vote{
@@ -166,9 +165,9 @@ func TestForkChoice_UpdateBalancesUnderflow(t *testing.T) {
f.justifiedBalances = []uint64{10, 20, 30}
require.NoError(t, f.updateBalances())
- assert.Equal(t, uint64(0), s.nodeByRoot[indexToHash(1)].balance)
- assert.Equal(t, uint64(0), s.nodeByRoot[indexToHash(2)].balance)
- assert.Equal(t, uint64(5), s.nodeByRoot[indexToHash(3)].balance)
+ assert.Equal(t, uint64(0), s.emptyNodeByRoot[indexToHash(1)].balance)
+ assert.Equal(t, uint64(0), s.emptyNodeByRoot[indexToHash(2)].balance)
+ assert.Equal(t, uint64(5), s.emptyNodeByRoot[indexToHash(3)].balance)
}
func TestForkChoice_IsCanonical(t *testing.T) {
@@ -224,12 +223,12 @@ func TestForkChoice_IsCanonicalReorg(t *testing.T) {
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, roblock))
- f.store.nodeByRoot[[32]byte{'3'}].balance = 10
- require.NoError(t, f.store.treeRootNode.applyWeightChanges(ctx))
- require.Equal(t, uint64(10), f.store.nodeByRoot[[32]byte{'1'}].weight)
- require.Equal(t, uint64(0), f.store.nodeByRoot[[32]byte{'2'}].weight)
+ f.store.emptyNodeByRoot[[32]byte{'3'}].balance = 10
+ require.NoError(t, f.store.applyWeightChangesConsensusNode(ctx, f.store.treeRootNode))
+ require.Equal(t, uint64(10), f.store.emptyNodeByRoot[[32]byte{'1'}].node.weight)
+ require.Equal(t, uint64(0), f.store.emptyNodeByRoot[[32]byte{'2'}].node.weight)
- require.NoError(t, f.store.treeRootNode.updateBestDescendant(ctx, 1, 1, 1))
+ require.NoError(t, f.store.updateBestDescendantConsensusNode(ctx, f.store.treeRootNode, 1, 1, 1))
require.DeepEqual(t, [32]byte{'3'}, f.store.treeRootNode.bestDescendant.root)
r1 := [32]byte{'1'}
@@ -260,7 +259,7 @@ func TestForkChoice_AncestorRoot(t *testing.T) {
st, roblock, err = prepareForkchoiceState(ctx, 5, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, roblock))
- f.store.treeRootNode = f.store.nodeByRoot[indexToHash(1)]
+ f.store.treeRootNode = f.store.emptyNodeByRoot[indexToHash(1)].node
f.store.treeRootNode.parent = nil
r, err := f.AncestorRoot(ctx, indexToHash(3), 6)
@@ -342,21 +341,21 @@ func TestForkChoice_RemoveEquivocating(t *testing.T) {
// Process b's slashing, c is now head
f.InsertSlashedIndex(ctx, 1)
- require.Equal(t, uint64(200), f.store.nodeByRoot[[32]byte{'b'}].balance)
+ require.Equal(t, uint64(200), f.store.emptyNodeByRoot[[32]byte{'b'}].balance)
f.justifiedBalances = []uint64{100, 200, 200, 300}
head, err = f.Head(ctx)
- require.Equal(t, uint64(200), f.store.nodeByRoot[[32]byte{'b'}].weight)
- require.Equal(t, uint64(300), f.store.nodeByRoot[[32]byte{'c'}].weight)
+ require.Equal(t, uint64(200), f.store.emptyNodeByRoot[[32]byte{'b'}].weight)
+ require.Equal(t, uint64(300), f.store.emptyNodeByRoot[[32]byte{'c'}].weight)
require.NoError(t, err)
require.Equal(t, [32]byte{'c'}, head)
// Process b's slashing again, should be a noop
f.InsertSlashedIndex(ctx, 1)
- require.Equal(t, uint64(200), f.store.nodeByRoot[[32]byte{'b'}].balance)
+ require.Equal(t, uint64(200), f.store.emptyNodeByRoot[[32]byte{'b'}].balance)
f.justifiedBalances = []uint64{100, 200, 200, 300}
head, err = f.Head(ctx)
- require.Equal(t, uint64(200), f.store.nodeByRoot[[32]byte{'b'}].weight)
- require.Equal(t, uint64(300), f.store.nodeByRoot[[32]byte{'c'}].weight)
+ require.Equal(t, uint64(200), f.store.emptyNodeByRoot[[32]byte{'b'}].weight)
+ require.Equal(t, uint64(300), f.store.emptyNodeByRoot[[32]byte{'c'}].weight)
require.NoError(t, err)
require.Equal(t, [32]byte{'c'}, head)
@@ -514,58 +513,6 @@ func TestStore_CommonAncestor(t *testing.T) {
})
}
- // a -- b -- c -- d
- f = setup(0, 0)
- st, roblock, err = prepareForkchoiceState(ctx, 0, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1)
- require.NoError(t, err)
- require.NoError(t, f.InsertNode(ctx, st, roblock))
- st, roblock, err = prepareForkchoiceState(ctx, 1, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1)
- require.NoError(t, err)
- require.NoError(t, f.InsertNode(ctx, st, roblock))
- st, roblock, err = prepareForkchoiceState(ctx, 2, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1)
- require.NoError(t, err)
- require.NoError(t, f.InsertNode(ctx, st, roblock))
- st, roblock, err = prepareForkchoiceState(ctx, 3, [32]byte{'d'}, [32]byte{'c'}, [32]byte{}, 1, 1)
- require.NoError(t, err)
- require.NoError(t, f.InsertNode(ctx, st, roblock))
- tests = []struct {
- name string
- r1 [32]byte
- r2 [32]byte
- wantRoot [32]byte
- wantSlot primitives.Slot
- }{
- {
- name: "Common ancestor between a and b is a",
- r1: [32]byte{'a'},
- r2: [32]byte{'b'},
- wantRoot: [32]byte{'a'},
- wantSlot: 0,
- },
- {
- name: "Common ancestor between b and d is b",
- r1: [32]byte{'d'},
- r2: [32]byte{'b'},
- wantRoot: [32]byte{'b'},
- wantSlot: 1,
- },
- {
- name: "Common ancestor between d and a is a",
- r1: [32]byte{'d'},
- r2: [32]byte{'a'},
- wantRoot: [32]byte{'a'},
- wantSlot: 0,
- },
- }
- for _, tc := range tests {
- t.Run(tc.name, func(t *testing.T) {
- gotRoot, gotSlot, err := f.CommonAncestor(ctx, tc.r1, tc.r2)
- require.NoError(t, err)
- require.Equal(t, tc.wantRoot, gotRoot)
- require.Equal(t, tc.wantSlot, gotSlot)
- })
- }
-
// Equal inputs should return the same root.
r, s, err := f.CommonAncestor(ctx, [32]byte{'b'}, [32]byte{'b'})
require.NoError(t, err)
@@ -588,10 +535,9 @@ func TestStore_CommonAncestor(t *testing.T) {
unrealizedJustifiedEpoch: 1,
finalizedEpoch: 1,
unrealizedFinalizedEpoch: 1,
- optimistic: true,
}
- f.store.nodeByRoot[[32]byte{'y'}] = n
+ f.store.emptyNodeByRoot[[32]byte{'y'}] = &PayloadNode{node: n, optimistic: true}
// broken link
_, _, err = f.CommonAncestor(ctx, [32]byte{'y'}, [32]byte{'a'})
require.ErrorIs(t, err, forkchoice.ErrUnknownCommonAncestor)
@@ -610,7 +556,8 @@ func TestStore_InsertChain(t *testing.T) {
require.NoError(t, err)
roblock, err := blocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
- blks = append(blks, &forkchoicetypes.BlockAndCheckpoints{Block: roblock,
+ blks = append(blks, &forkchoicetypes.BlockAndCheckpoints{
+ Block: roblock,
JustifiedCheckpoint: ðpb.Checkpoint{Epoch: 1, Root: params.BeaconConfig().ZeroHash[:]},
FinalizedCheckpoint: ðpb.Checkpoint{Epoch: 1, Root: params.BeaconConfig().ZeroHash[:]},
})
@@ -625,7 +572,8 @@ func TestStore_InsertChain(t *testing.T) {
require.NoError(t, err)
roblock, err := blocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
- blks = append(blks, &forkchoicetypes.BlockAndCheckpoints{Block: roblock,
+ blks = append(blks, &forkchoicetypes.BlockAndCheckpoints{
+ Block: roblock,
JustifiedCheckpoint: ðpb.Checkpoint{Epoch: 1, Root: params.BeaconConfig().ZeroHash[:]},
FinalizedCheckpoint: ðpb.Checkpoint{Epoch: 1, Root: params.BeaconConfig().ZeroHash[:]},
})
@@ -742,7 +690,7 @@ func TestWeight(t *testing.T) {
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, roblock))
- n, ok := f.store.nodeByRoot[root]
+ n, ok := f.store.emptyNodeByRoot[root]
require.Equal(t, true, ok)
n.weight = 10
w, err := f.Weight(root)
@@ -914,16 +862,3 @@ func TestForkchoiceParentRoot(t *testing.T) {
require.NoError(t, err)
require.Equal(t, zeroHash, root)
}
-
-func TestForkChoice_CleanupInserting(t *testing.T) {
- f := setup(0, 0)
- ctx := t.Context()
- st, roblock, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 2, 2)
- f.SetBalancesByRooter(func(_ context.Context, _ [32]byte) ([]uint64, error) {
- return f.justifiedBalances, errors.New("mock err")
- })
-
- require.NoError(t, err)
- require.NotNil(t, f.InsertNode(ctx, st, roblock))
- require.Equal(t, false, f.HasNode(roblock.Root()))
-}
diff --git a/beacon-chain/forkchoice/doubly-linked-tree/gloas.go b/beacon-chain/forkchoice/doubly-linked-tree/gloas.go
new file mode 100644
index 0000000000..7d6e8b9047
--- /dev/null
+++ b/beacon-chain/forkchoice/doubly-linked-tree/gloas.go
@@ -0,0 +1,289 @@
+package doublylinkedtree
+
+import (
+ "bytes"
+ "context"
+ "slices"
+
+ "github.com/OffchainLabs/prysm/v7/config/params"
+ "github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
+ forkchoice2 "github.com/OffchainLabs/prysm/v7/consensus-types/forkchoice"
+ "github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
+ "github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
+ "github.com/pkg/errors"
+)
+
+func (s *Store) resolveParentPayloadStatus(block interfaces.ReadOnlyBeaconBlock, parent **PayloadNode, blockHash *[32]byte) error {
+ sb, err := block.Body().SignedExecutionPayloadBid()
+ if err != nil {
+ return err
+ }
+ wb, err := blocks.WrappedROSignedExecutionPayloadBid(sb)
+ if err != nil {
+ return errors.Wrap(err, "failed to wrap signed bid")
+ }
+ bid, err := wb.Bid()
+ if err != nil {
+ return errors.Wrap(err, "failed to get bid from wrapped bid")
+ }
+ *blockHash = bid.BlockHash()
+ parentRoot := block.ParentRoot()
+ *parent = s.emptyNodeByRoot[parentRoot]
+ if *parent == nil {
+ // This is the tree root node.
+ return nil
+ }
+ if bid.ParentBlockHash() == (*parent).node.blockHash {
+ // block builds on full
+ *parent = s.fullNodeByRoot[(*parent).node.root]
+ }
+ return nil
+}
+
+// applyWeightChangesConsensusNode recomputes the weight of the node passed as an argument and all of its descendants,
+// using the current balance stored in each node.
+func (s *Store) applyWeightChangesConsensusNode(ctx context.Context, n *Node) error {
+ // Recursively calling the children to sum their weights.
+ en := s.emptyNodeByRoot[n.root]
+ if err := s.applyWeightChangesPayloadNode(ctx, en); err != nil {
+ return err
+ }
+ childrenWeight := en.weight
+ fn := s.fullNodeByRoot[n.root]
+ if fn != nil {
+ if err := s.applyWeightChangesPayloadNode(ctx, fn); err != nil {
+ return err
+ }
+ childrenWeight += fn.weight
+ }
+ if n.root == params.BeaconConfig().ZeroHash {
+ return nil
+ }
+ n.weight = n.balance + childrenWeight
+ return nil
+}
+
+// applyWeightChangesPayloadNode recomputes the weight of the node passed as an argument and all of its descendants,
+// using the current balance stored in each node.
+func (s *Store) applyWeightChangesPayloadNode(ctx context.Context, n *PayloadNode) error {
+ // Recursively calling the children to sum their weights.
+ childrenWeight := uint64(0)
+ for _, child := range n.children {
+ if ctx.Err() != nil {
+ return ctx.Err()
+ }
+ if err := s.applyWeightChangesConsensusNode(ctx, child); err != nil {
+ return err
+ }
+ childrenWeight += child.weight
+ }
+ n.weight = n.balance + childrenWeight
+ return nil
+}
+
+// allConsensusChildren returns the list of all consensus blocks that build on the given node.
+func (s *Store) allConsensusChildren(n *Node) []*Node {
+ en := s.emptyNodeByRoot[n.root]
+ fn, ok := s.fullNodeByRoot[n.root]
+ if ok {
+ return append(slices.Clone(en.children), fn.children...)
+ }
+ return en.children
+}
+
+// setNodeAndParentValidated sets the current node and all the ancestors as validated (i.e. non-optimistic).
+func (s *Store) setNodeAndParentValidated(ctx context.Context, pn *PayloadNode) error {
+ if ctx.Err() != nil {
+ return ctx.Err()
+ }
+
+ if !pn.optimistic {
+ return nil
+ }
+ pn.optimistic = false
+ if pn.full {
+ // set the empty node also a as valid
+ en := s.emptyNodeByRoot[pn.node.root]
+ en.optimistic = false
+ }
+ if pn.node.parent == nil {
+ return nil
+ }
+ return s.setNodeAndParentValidated(ctx, pn.node.parent)
+}
+
+// fullParent returns the latest full node that this block builds on.
+func (s *Store) fullParent(pn *PayloadNode) *PayloadNode {
+ parent := pn.node.parent
+ for ; parent != nil && !parent.full; parent = parent.node.parent {
+ }
+ return parent
+}
+
+// parentHash return the payload hash of the latest full node that this block builds on.
+func (s *Store) parentHash(pn *PayloadNode) [32]byte {
+ fullParent := s.fullParent(pn)
+ if fullParent == nil {
+ return [32]byte{}
+ }
+ return fullParent.node.blockHash
+}
+
+// latestHashForRoot returns the latest payload hash for the given block root.
+func (s *Store) latestHashForRoot(root [32]byte) [32]byte {
+ // try to get the full node first
+ fn := s.fullNodeByRoot[root]
+ if fn != nil {
+ return fn.node.blockHash
+ }
+ en := s.emptyNodeByRoot[root]
+ if en == nil {
+ // This should not happen
+ return [32]byte{}
+ }
+ return s.parentHash(en)
+}
+
+// updateBestDescendantPayloadNode updates the best descendant of this node and its
+// children.
+func (s *Store) updateBestDescendantPayloadNode(ctx context.Context, n *PayloadNode, justifiedEpoch, finalizedEpoch, currentEpoch primitives.Epoch) error {
+ if ctx.Err() != nil {
+ return ctx.Err()
+ }
+
+ var bestChild *Node
+ bestWeight := uint64(0)
+ for _, child := range n.children {
+ if child == nil {
+ return errors.Wrap(ErrNilNode, "could not update best descendant")
+ }
+ if err := s.updateBestDescendantConsensusNode(ctx, child, justifiedEpoch, finalizedEpoch, currentEpoch); err != nil {
+ return err
+ }
+ childLeadsToViableHead := child.leadsToViableHead(justifiedEpoch, currentEpoch)
+ if childLeadsToViableHead && bestChild == nil {
+ // The child leads to a viable head, but the current
+ // parent's best child doesn't.
+ bestWeight = child.weight
+ bestChild = child
+ } else if childLeadsToViableHead {
+ // If both are viable, compare their weights.
+ if child.weight == bestWeight {
+ // Tie-breaker of equal weights by root.
+ if bytes.Compare(child.root[:], bestChild.root[:]) > 0 {
+ bestChild = child
+ }
+ } else if child.weight > bestWeight {
+ bestChild = child
+ bestWeight = child.weight
+ }
+ }
+ }
+ if bestChild == nil {
+ n.bestDescendant = nil
+ } else {
+ if bestChild.bestDescendant == nil {
+ n.bestDescendant = bestChild
+ } else {
+ n.bestDescendant = bestChild.bestDescendant
+ }
+ }
+ return nil
+}
+
+// updateBestDescendantConsensusNode updates the best descendant of this node and its
+// children.
+func (s *Store) updateBestDescendantConsensusNode(ctx context.Context, n *Node, justifiedEpoch, finalizedEpoch, currentEpoch primitives.Epoch) error {
+ if ctx.Err() != nil {
+ return ctx.Err()
+ }
+ if len(s.allConsensusChildren(n)) == 0 {
+ n.bestDescendant = nil
+ return nil
+ }
+
+ en := s.emptyNodeByRoot[n.root]
+ if err := s.updateBestDescendantPayloadNode(ctx, en, justifiedEpoch, finalizedEpoch, currentEpoch); err != nil {
+ return err
+ }
+ fn := s.fullNodeByRoot[n.root]
+ if fn == nil {
+ n.bestDescendant = en.bestDescendant
+ return nil
+ }
+ // TODO GLOAS: pick between full or empty
+ if err := s.updateBestDescendantPayloadNode(ctx, fn, justifiedEpoch, finalizedEpoch, currentEpoch); err != nil {
+ return err
+ }
+ n.bestDescendant = fn.bestDescendant
+ return nil
+}
+
+// choosePayloadContent chooses between empty or full for the passed consensus node. TODO Gloas: use PTC to choose.
+func (s *Store) choosePayloadContent(n *Node) *PayloadNode {
+ if n == nil {
+ return nil
+ }
+ fn := s.fullNodeByRoot[n.root]
+ if fn != nil {
+ return fn
+ }
+ return s.emptyNodeByRoot[n.root]
+}
+
+// nodeTreeDump appends to the given list all the nodes descending from this one
+func (s *Store) nodeTreeDump(ctx context.Context, n *Node, nodes []*forkchoice2.Node) ([]*forkchoice2.Node, error) {
+ if ctx.Err() != nil {
+ return nil, ctx.Err()
+ }
+ var parentRoot [32]byte
+ if n.parent != nil {
+ parentRoot = n.parent.node.root
+ }
+ target := [32]byte{}
+ if n.target != nil {
+ target = n.target.root
+ }
+ optimistic := false
+ if n.parent != nil {
+ optimistic = n.parent.optimistic
+ }
+ en := s.emptyNodeByRoot[n.root]
+ timestamp := en.timestamp
+ fn := s.fullNodeByRoot[n.root]
+ if fn != nil {
+ optimistic = fn.optimistic
+ timestamp = fn.timestamp
+ }
+ thisNode := &forkchoice2.Node{
+ Slot: n.slot,
+ BlockRoot: n.root[:],
+ ParentRoot: parentRoot[:],
+ JustifiedEpoch: n.justifiedEpoch,
+ FinalizedEpoch: n.finalizedEpoch,
+ UnrealizedJustifiedEpoch: n.unrealizedJustifiedEpoch,
+ UnrealizedFinalizedEpoch: n.unrealizedFinalizedEpoch,
+ Balance: n.balance,
+ Weight: n.weight,
+ ExecutionOptimistic: optimistic,
+ ExecutionBlockHash: n.blockHash[:],
+ Timestamp: timestamp,
+ Target: target[:],
+ }
+ if optimistic {
+ thisNode.Validity = forkchoice2.Optimistic
+ } else {
+ thisNode.Validity = forkchoice2.Valid
+ }
+
+ nodes = append(nodes, thisNode)
+ var err error
+ children := s.allConsensusChildren(n)
+ for _, child := range children {
+ nodes, err = s.nodeTreeDump(ctx, child, nodes)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return nodes, nil
+}
diff --git a/beacon-chain/forkchoice/doubly-linked-tree/node.go b/beacon-chain/forkchoice/doubly-linked-tree/node.go
index 3b36157f18..ee174d51ee 100644
--- a/beacon-chain/forkchoice/doubly-linked-tree/node.go
+++ b/beacon-chain/forkchoice/doubly-linked-tree/node.go
@@ -1,95 +1,17 @@
package doublylinkedtree
import (
- "bytes"
- "context"
"time"
"github.com/OffchainLabs/prysm/v7/config/params"
- forkchoice2 "github.com/OffchainLabs/prysm/v7/consensus-types/forkchoice"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/time/slots"
- "github.com/pkg/errors"
)
// ProcessAttestationsThreshold is the amount of time after which we
// process attestations for the current slot
const ProcessAttestationsThreshold = 10 * time.Second
-// applyWeightChanges recomputes the weight of the node passed as an argument and all of its descendants,
-// using the current balance stored in each node.
-func (n *Node) applyWeightChanges(ctx context.Context) error {
- // Recursively calling the children to sum their weights.
- childrenWeight := uint64(0)
- for _, child := range n.children {
- if ctx.Err() != nil {
- return ctx.Err()
- }
- if err := child.applyWeightChanges(ctx); err != nil {
- return err
- }
- childrenWeight += child.weight
- }
- if n.root == params.BeaconConfig().ZeroHash {
- return nil
- }
- n.weight = n.balance + childrenWeight
- return nil
-}
-
-// updateBestDescendant updates the best descendant of this node and its
-// children.
-func (n *Node) updateBestDescendant(ctx context.Context, justifiedEpoch, finalizedEpoch, currentEpoch primitives.Epoch) error {
- if ctx.Err() != nil {
- return ctx.Err()
- }
- if len(n.children) == 0 {
- n.bestDescendant = nil
- return nil
- }
-
- var bestChild *Node
- bestWeight := uint64(0)
- hasViableDescendant := false
- for _, child := range n.children {
- if child == nil {
- return errors.Wrap(ErrNilNode, "could not update best descendant")
- }
- if err := child.updateBestDescendant(ctx, justifiedEpoch, finalizedEpoch, currentEpoch); err != nil {
- return err
- }
- childLeadsToViableHead := child.leadsToViableHead(justifiedEpoch, currentEpoch)
- if childLeadsToViableHead && !hasViableDescendant {
- // The child leads to a viable head, but the current
- // parent's best child doesn't.
- bestWeight = child.weight
- bestChild = child
- hasViableDescendant = true
- } else if childLeadsToViableHead {
- // If both are viable, compare their weights.
- if child.weight == bestWeight {
- // Tie-breaker of equal weights by root.
- if bytes.Compare(child.root[:], bestChild.root[:]) > 0 {
- bestChild = child
- }
- } else if child.weight > bestWeight {
- bestChild = child
- bestWeight = child.weight
- }
- }
- }
- if hasViableDescendant {
- if bestChild.bestDescendant == nil {
- n.bestDescendant = bestChild
- } else {
- n.bestDescendant = bestChild.bestDescendant
- }
- } else {
- n.bestDescendant = nil
- }
- return nil
-}
-
// viableForHead returns true if the node is viable to head.
// Any node with different finalized or justified epoch than
// the ones in fork choice store should not be viable to head.
@@ -110,30 +32,13 @@ func (n *Node) leadsToViableHead(justifiedEpoch, currentEpoch primitives.Epoch)
return n.bestDescendant.viableForHead(justifiedEpoch, currentEpoch)
}
-// setNodeAndParentValidated sets the current node and all the ancestors as validated (i.e. non-optimistic).
-func (n *Node) setNodeAndParentValidated(ctx context.Context) error {
- if ctx.Err() != nil {
- return ctx.Err()
- }
-
- if !n.optimistic {
- return nil
- }
- n.optimistic = false
-
- if n.parent == nil {
- return nil
- }
- return n.parent.setNodeAndParentValidated(ctx)
-}
-
// arrivedEarly returns whether this node was inserted before the first
// threshold to orphan a block.
// Note that genesisTime has seconds granularity, therefore we use a strict
// inequality < here. For example a block that arrives 3.9999 seconds into the
// slot will have secs = 3 below.
-func (n *Node) arrivedEarly(genesis time.Time) (bool, error) {
- sss, err := slots.SinceSlotStart(n.slot, genesis, n.timestamp.Truncate(time.Second)) // Truncate such that 3.9999 seconds will have a value of 3.
+func (n *PayloadNode) arrivedEarly(genesis time.Time) (bool, error) {
+ sss, err := slots.SinceSlotStart(n.node.slot, genesis, n.timestamp.Truncate(time.Second)) // Truncate such that 3.9999 seconds will have a value of 3.
votingWindow := params.BeaconConfig().SlotComponentDuration(params.BeaconConfig().AttestationDueBPS)
return sss < votingWindow, err
}
@@ -143,52 +48,7 @@ func (n *Node) arrivedEarly(genesis time.Time) (bool, error) {
// Note that genesisTime has seconds granularity, therefore we use an
// inequality >= here. For example a block that arrives 10.00001 seconds into the
// slot will have secs = 10 below.
-func (n *Node) arrivedAfterOrphanCheck(genesis time.Time) (bool, error) {
- secs, err := slots.SinceSlotStart(n.slot, genesis, n.timestamp.Truncate(time.Second)) // Truncate such that 10.00001 seconds will have a value of 10.
+func (n *PayloadNode) arrivedAfterOrphanCheck(genesis time.Time) (bool, error) {
+ secs, err := slots.SinceSlotStart(n.node.slot, genesis, n.timestamp.Truncate(time.Second)) // Truncate such that 10.00001 seconds will have a value of 10.
return secs >= ProcessAttestationsThreshold, err
}
-
-// nodeTreeDump appends to the given list all the nodes descending from this one
-func (n *Node) nodeTreeDump(ctx context.Context, nodes []*forkchoice2.Node) ([]*forkchoice2.Node, error) {
- if ctx.Err() != nil {
- return nil, ctx.Err()
- }
- var parentRoot [32]byte
- if n.parent != nil {
- parentRoot = n.parent.root
- }
- target := [32]byte{}
- if n.target != nil {
- target = n.target.root
- }
- thisNode := &forkchoice2.Node{
- Slot: n.slot,
- BlockRoot: n.root[:],
- ParentRoot: parentRoot[:],
- JustifiedEpoch: n.justifiedEpoch,
- FinalizedEpoch: n.finalizedEpoch,
- UnrealizedJustifiedEpoch: n.unrealizedJustifiedEpoch,
- UnrealizedFinalizedEpoch: n.unrealizedFinalizedEpoch,
- Balance: n.balance,
- Weight: n.weight,
- ExecutionOptimistic: n.optimistic,
- ExecutionBlockHash: n.payloadHash[:],
- Timestamp: n.timestamp,
- Target: target[:],
- }
- if n.optimistic {
- thisNode.Validity = forkchoice2.Optimistic
- } else {
- thisNode.Validity = forkchoice2.Valid
- }
-
- nodes = append(nodes, thisNode)
- var err error
- for _, child := range n.children {
- nodes, err = child.nodeTreeDump(ctx, nodes)
- if err != nil {
- return nil, err
- }
- }
- return nodes, nil
-}
diff --git a/beacon-chain/forkchoice/doubly-linked-tree/node_test.go b/beacon-chain/forkchoice/doubly-linked-tree/node_test.go
index fe21705f06..51f46a483b 100644
--- a/beacon-chain/forkchoice/doubly-linked-tree/node_test.go
+++ b/beacon-chain/forkchoice/doubly-linked-tree/node_test.go
@@ -27,15 +27,15 @@ func TestNode_ApplyWeightChanges_PositiveChange(t *testing.T) {
// The updated balances of each node is 100
s := f.store
- s.nodeByRoot[indexToHash(1)].balance = 100
- s.nodeByRoot[indexToHash(2)].balance = 100
- s.nodeByRoot[indexToHash(3)].balance = 100
+ s.emptyNodeByRoot[indexToHash(1)].balance = 100
+ s.emptyNodeByRoot[indexToHash(2)].balance = 100
+ s.emptyNodeByRoot[indexToHash(3)].balance = 100
- assert.NoError(t, s.treeRootNode.applyWeightChanges(ctx))
+ assert.NoError(t, s.applyWeightChangesConsensusNode(ctx, s.treeRootNode))
- assert.Equal(t, uint64(300), s.nodeByRoot[indexToHash(1)].weight)
- assert.Equal(t, uint64(200), s.nodeByRoot[indexToHash(2)].weight)
- assert.Equal(t, uint64(100), s.nodeByRoot[indexToHash(3)].weight)
+ assert.Equal(t, uint64(300), s.emptyNodeByRoot[indexToHash(1)].node.weight)
+ assert.Equal(t, uint64(200), s.emptyNodeByRoot[indexToHash(2)].node.weight)
+ assert.Equal(t, uint64(100), s.emptyNodeByRoot[indexToHash(3)].node.weight)
}
func TestNode_ApplyWeightChanges_NegativeChange(t *testing.T) {
@@ -53,19 +53,19 @@ func TestNode_ApplyWeightChanges_NegativeChange(t *testing.T) {
// The updated balances of each node is 100
s := f.store
- s.nodeByRoot[indexToHash(1)].weight = 400
- s.nodeByRoot[indexToHash(2)].weight = 400
- s.nodeByRoot[indexToHash(3)].weight = 400
+ s.emptyNodeByRoot[indexToHash(1)].weight = 400
+ s.emptyNodeByRoot[indexToHash(2)].weight = 400
+ s.emptyNodeByRoot[indexToHash(3)].weight = 400
- s.nodeByRoot[indexToHash(1)].balance = 100
- s.nodeByRoot[indexToHash(2)].balance = 100
- s.nodeByRoot[indexToHash(3)].balance = 100
+ s.emptyNodeByRoot[indexToHash(1)].balance = 100
+ s.emptyNodeByRoot[indexToHash(2)].balance = 100
+ s.emptyNodeByRoot[indexToHash(3)].balance = 100
- assert.NoError(t, s.treeRootNode.applyWeightChanges(ctx))
+ assert.NoError(t, s.applyWeightChangesConsensusNode(ctx, s.treeRootNode))
- assert.Equal(t, uint64(300), s.nodeByRoot[indexToHash(1)].weight)
- assert.Equal(t, uint64(200), s.nodeByRoot[indexToHash(2)].weight)
- assert.Equal(t, uint64(100), s.nodeByRoot[indexToHash(3)].weight)
+ assert.Equal(t, uint64(300), s.emptyNodeByRoot[indexToHash(1)].node.weight)
+ assert.Equal(t, uint64(200), s.emptyNodeByRoot[indexToHash(2)].node.weight)
+ assert.Equal(t, uint64(100), s.emptyNodeByRoot[indexToHash(3)].node.weight)
}
func TestNode_UpdateBestDescendant_NonViableChild(t *testing.T) {
@@ -78,7 +78,7 @@ func TestNode_UpdateBestDescendant_NonViableChild(t *testing.T) {
// Verify parent's best child and best descendant are `none`.
s := f.store
- assert.Equal(t, 1, len(s.treeRootNode.children))
+ assert.Equal(t, 1, len(s.allConsensusChildren(s.treeRootNode)))
nilBestDescendant := s.treeRootNode.bestDescendant == nil
assert.Equal(t, true, nilBestDescendant)
}
@@ -92,8 +92,9 @@ func TestNode_UpdateBestDescendant_ViableChild(t *testing.T) {
require.NoError(t, f.InsertNode(ctx, state, blk))
s := f.store
- assert.Equal(t, 1, len(s.treeRootNode.children))
- assert.Equal(t, s.treeRootNode.children[0], s.treeRootNode.bestDescendant)
+ children := s.allConsensusChildren(s.treeRootNode)
+ assert.Equal(t, 1, len(children))
+ assert.Equal(t, children[0], s.treeRootNode.bestDescendant)
}
func TestNode_UpdateBestDescendant_HigherWeightChild(t *testing.T) {
@@ -108,32 +109,34 @@ func TestNode_UpdateBestDescendant_HigherWeightChild(t *testing.T) {
require.NoError(t, f.InsertNode(ctx, state, blk))
s := f.store
- s.nodeByRoot[indexToHash(1)].weight = 100
- s.nodeByRoot[indexToHash(2)].weight = 200
- assert.NoError(t, s.treeRootNode.updateBestDescendant(ctx, 1, 1, 1))
+ s.emptyNodeByRoot[indexToHash(1)].weight = 100
+ s.emptyNodeByRoot[indexToHash(2)].weight = 200
+ assert.NoError(t, s.updateBestDescendantConsensusNode(ctx, s.treeRootNode, 1, 1, 1))
- assert.Equal(t, 2, len(s.treeRootNode.children))
- assert.Equal(t, s.treeRootNode.children[1], s.treeRootNode.bestDescendant)
+ children := s.allConsensusChildren(s.treeRootNode)
+ assert.Equal(t, 2, len(children))
+ assert.Equal(t, children[1], s.treeRootNode.bestDescendant)
}
func TestNode_UpdateBestDescendant_LowerWeightChild(t *testing.T) {
f := setup(1, 1)
ctx := t.Context()
// Input child is the best descendant
- state, blk, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
+ state, blk, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, indexToHash(101), 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blk))
- state, blk, err = prepareForkchoiceState(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
+ state, blk, err = prepareForkchoiceState(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, indexToHash(102), 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blk))
s := f.store
- s.nodeByRoot[indexToHash(1)].weight = 200
- s.nodeByRoot[indexToHash(2)].weight = 100
- assert.NoError(t, s.treeRootNode.updateBestDescendant(ctx, 1, 1, 1))
+ s.emptyNodeByRoot[indexToHash(1)].node.weight = 200
+ s.emptyNodeByRoot[indexToHash(2)].node.weight = 100
+ assert.NoError(t, s.updateBestDescendantConsensusNode(ctx, s.treeRootNode, 1, 1, 1))
- assert.Equal(t, 2, len(s.treeRootNode.children))
- assert.Equal(t, s.treeRootNode.children[0], s.treeRootNode.bestDescendant)
+ children := s.allConsensusChildren(s.treeRootNode)
+ assert.Equal(t, 2, len(children))
+ assert.Equal(t, children[0], s.treeRootNode.bestDescendant)
}
func TestNode_ViableForHead(t *testing.T) {
@@ -176,44 +179,44 @@ func TestNode_LeadsToViableHead(t *testing.T) {
require.NoError(t, f.InsertNode(ctx, state, blk))
require.Equal(t, true, f.store.treeRootNode.leadsToViableHead(4, 5))
- require.Equal(t, true, f.store.nodeByRoot[indexToHash(5)].leadsToViableHead(4, 5))
- require.Equal(t, false, f.store.nodeByRoot[indexToHash(2)].leadsToViableHead(4, 5))
- require.Equal(t, false, f.store.nodeByRoot[indexToHash(4)].leadsToViableHead(4, 5))
+ require.Equal(t, true, f.store.emptyNodeByRoot[indexToHash(5)].node.leadsToViableHead(4, 5))
+ require.Equal(t, false, f.store.emptyNodeByRoot[indexToHash(2)].node.leadsToViableHead(4, 5))
+ require.Equal(t, false, f.store.emptyNodeByRoot[indexToHash(4)].node.leadsToViableHead(4, 5))
}
func TestNode_SetFullyValidated(t *testing.T) {
f := setup(1, 1)
ctx := t.Context()
- storeNodes := make([]*Node, 6)
- storeNodes[0] = f.store.treeRootNode
+ storeNodes := make([]*PayloadNode, 6)
+ storeNodes[0] = f.store.fullNodeByRoot[params.BeaconConfig().ZeroHash]
// insert blocks in the fork pattern (optimistic status in parenthesis)
//
// 0 (false) -- 1 (false) -- 2 (false) -- 3 (true) -- 4 (true)
// \
// -- 5 (true)
//
- state, blk, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
+ state, blk, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, indexToHash(101), 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blk))
- storeNodes[1] = f.store.nodeByRoot[blk.Root()]
- require.NoError(t, f.SetOptimisticToValid(ctx, params.BeaconConfig().ZeroHash))
- state, blk, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1)
- require.NoError(t, err)
- require.NoError(t, f.InsertNode(ctx, state, blk))
- storeNodes[2] = f.store.nodeByRoot[blk.Root()]
+ storeNodes[1] = f.store.fullNodeByRoot[blk.Root()]
require.NoError(t, f.SetOptimisticToValid(ctx, indexToHash(1)))
- state, blk, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1)
+ state, blk, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), indexToHash(102), 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blk))
- storeNodes[3] = f.store.nodeByRoot[blk.Root()]
- state, blk, err = prepareForkchoiceState(ctx, 4, indexToHash(4), indexToHash(3), params.BeaconConfig().ZeroHash, 1, 1)
+ storeNodes[2] = f.store.fullNodeByRoot[blk.Root()]
+ require.NoError(t, f.SetOptimisticToValid(ctx, indexToHash(2)))
+ state, blk, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(2), indexToHash(103), 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blk))
- storeNodes[4] = f.store.nodeByRoot[blk.Root()]
- state, blk, err = prepareForkchoiceState(ctx, 5, indexToHash(5), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1)
+ storeNodes[3] = f.store.fullNodeByRoot[blk.Root()]
+ state, blk, err = prepareForkchoiceState(ctx, 4, indexToHash(4), indexToHash(3), indexToHash(104), 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blk))
- storeNodes[5] = f.store.nodeByRoot[blk.Root()]
+ storeNodes[4] = f.store.fullNodeByRoot[blk.Root()]
+ state, blk, err = prepareForkchoiceState(ctx, 5, indexToHash(5), indexToHash(1), indexToHash(105), 1, 1)
+ require.NoError(t, err)
+ require.NoError(t, f.InsertNode(ctx, state, blk))
+ storeNodes[5] = f.store.fullNodeByRoot[blk.Root()]
opt, err := f.IsOptimistic(indexToHash(5))
require.NoError(t, err)
@@ -223,7 +226,7 @@ func TestNode_SetFullyValidated(t *testing.T) {
require.NoError(t, err)
require.Equal(t, true, opt)
- require.NoError(t, f.store.nodeByRoot[indexToHash(4)].setNodeAndParentValidated(ctx))
+ require.NoError(t, f.store.setNodeAndParentValidated(ctx, f.store.fullNodeByRoot[indexToHash(4)]))
// block 5 should still be optimistic
opt, err = f.IsOptimistic(indexToHash(5))
@@ -240,20 +243,20 @@ func TestNode_SetFullyValidated(t *testing.T) {
require.Equal(t, false, opt)
respNodes := make([]*forkchoice.Node, 0)
- respNodes, err = f.store.treeRootNode.nodeTreeDump(ctx, respNodes)
+ respNodes, err = f.store.nodeTreeDump(ctx, f.store.treeRootNode, respNodes)
require.NoError(t, err)
require.Equal(t, len(respNodes), f.NodeCount())
for i, respNode := range respNodes {
- require.Equal(t, storeNodes[i].slot, respNode.Slot)
- require.DeepEqual(t, storeNodes[i].root[:], respNode.BlockRoot)
- require.Equal(t, storeNodes[i].balance, respNode.Balance)
- require.Equal(t, storeNodes[i].weight, respNode.Weight)
+ require.Equal(t, storeNodes[i].node.slot, respNode.Slot)
+ require.DeepEqual(t, storeNodes[i].node.root[:], respNode.BlockRoot)
+ require.Equal(t, storeNodes[i].node.balance, respNode.Balance)
+ require.Equal(t, storeNodes[i].node.weight, respNode.Weight)
require.Equal(t, storeNodes[i].optimistic, respNode.ExecutionOptimistic)
- require.Equal(t, storeNodes[i].justifiedEpoch, respNode.JustifiedEpoch)
- require.Equal(t, storeNodes[i].unrealizedJustifiedEpoch, respNode.UnrealizedJustifiedEpoch)
- require.Equal(t, storeNodes[i].finalizedEpoch, respNode.FinalizedEpoch)
- require.Equal(t, storeNodes[i].unrealizedFinalizedEpoch, respNode.UnrealizedFinalizedEpoch)
+ require.Equal(t, storeNodes[i].node.justifiedEpoch, respNode.JustifiedEpoch)
+ require.Equal(t, storeNodes[i].node.unrealizedJustifiedEpoch, respNode.UnrealizedJustifiedEpoch)
+ require.Equal(t, storeNodes[i].node.finalizedEpoch, respNode.FinalizedEpoch)
+ require.Equal(t, storeNodes[i].node.unrealizedFinalizedEpoch, respNode.UnrealizedFinalizedEpoch)
require.Equal(t, storeNodes[i].timestamp, respNode.Timestamp)
}
}
@@ -272,10 +275,10 @@ func TestNode_TimeStampsChecks(t *testing.T) {
headRoot, err := f.Head(ctx)
require.NoError(t, err)
require.Equal(t, root, headRoot)
- early, err := f.store.headNode.arrivedEarly(f.store.genesisTime)
+ early, err := f.store.choosePayloadContent(f.store.headNode).arrivedEarly(f.store.genesisTime)
require.NoError(t, err)
require.Equal(t, true, early)
- late, err := f.store.headNode.arrivedAfterOrphanCheck(f.store.genesisTime)
+ late, err := f.store.choosePayloadContent(f.store.headNode).arrivedAfterOrphanCheck(f.store.genesisTime)
require.NoError(t, err)
require.Equal(t, false, late)
@@ -289,10 +292,10 @@ func TestNode_TimeStampsChecks(t *testing.T) {
headRoot, err = f.Head(ctx)
require.NoError(t, err)
require.Equal(t, root, headRoot)
- early, err = f.store.headNode.arrivedEarly(f.store.genesisTime)
+ early, err = f.store.choosePayloadContent(f.store.headNode).arrivedEarly(f.store.genesisTime)
require.NoError(t, err)
require.Equal(t, false, early)
- late, err = f.store.headNode.arrivedAfterOrphanCheck(f.store.genesisTime)
+ late, err = f.store.choosePayloadContent(f.store.headNode).arrivedAfterOrphanCheck(f.store.genesisTime)
require.NoError(t, err)
require.Equal(t, false, late)
@@ -305,10 +308,10 @@ func TestNode_TimeStampsChecks(t *testing.T) {
headRoot, err = f.Head(ctx)
require.NoError(t, err)
require.Equal(t, root, headRoot)
- early, err = f.store.headNode.arrivedEarly(f.store.genesisTime)
+ early, err = f.store.choosePayloadContent(f.store.headNode).arrivedEarly(f.store.genesisTime)
require.NoError(t, err)
require.Equal(t, false, early)
- late, err = f.store.headNode.arrivedAfterOrphanCheck(f.store.genesisTime)
+ late, err = f.store.choosePayloadContent(f.store.headNode).arrivedAfterOrphanCheck(f.store.genesisTime)
require.NoError(t, err)
require.Equal(t, true, late)
@@ -320,10 +323,10 @@ func TestNode_TimeStampsChecks(t *testing.T) {
headRoot, err = f.Head(ctx)
require.NoError(t, err)
require.Equal(t, root, headRoot)
- early, err = f.store.headNode.arrivedEarly(f.store.genesisTime)
+ early, err = f.store.choosePayloadContent(f.store.headNode).arrivedEarly(f.store.genesisTime)
require.ErrorContains(t, "invalid timestamp", err)
require.Equal(t, true, early)
- late, err = f.store.headNode.arrivedAfterOrphanCheck(f.store.genesisTime)
+ late, err = f.store.choosePayloadContent(f.store.headNode).arrivedAfterOrphanCheck(f.store.genesisTime)
require.ErrorContains(t, "invalid timestamp", err)
require.Equal(t, false, late)
}
diff --git a/beacon-chain/forkchoice/doubly-linked-tree/optimistic_sync.go b/beacon-chain/forkchoice/doubly-linked-tree/optimistic_sync.go
index 8116130a3e..9b2846bf5c 100644
--- a/beacon-chain/forkchoice/doubly-linked-tree/optimistic_sync.go
+++ b/beacon-chain/forkchoice/doubly-linked-tree/optimistic_sync.go
@@ -7,92 +7,141 @@ import (
"github.com/pkg/errors"
)
-func (s *Store) setOptimisticToInvalid(ctx context.Context, root, parentRoot, lastValidHash [32]byte) ([][32]byte, error) {
+// setOptimisticToInvalid removes invalid nodes from forkchoice. It does NOT remove the empty node for the passed root.
+func (s *Store) setOptimisticToInvalid(ctx context.Context, root, parentRoot, parentHash, lastValidHash [32]byte) ([][32]byte, error) {
invalidRoots := make([][32]byte, 0)
- node, ok := s.nodeByRoot[root]
- if !ok {
- node, ok = s.nodeByRoot[parentRoot]
- if !ok || node == nil {
- return invalidRoots, errors.Wrap(ErrNilNode, "could not set node to invalid")
+ n := s.fullNodeByRoot[root]
+ if n == nil {
+ // The offending node with its payload is not in forkchoice. Try with the parent
+ n = s.emptyNodeByRoot[parentRoot]
+ if n == nil {
+ return invalidRoots, errors.Wrap(ErrNilNode, "could not set node to invalid, could not find consensus parent")
}
- // return early if the parent is LVH
- if node.payloadHash == lastValidHash {
+ if n.node.blockHash == lastValidHash {
+ // The parent node must have been full and with a valid payload
return invalidRoots, nil
}
- } else {
- if node == nil {
- return invalidRoots, errors.Wrap(ErrNilNode, "could not set node to invalid")
+ if n.node.blockHash == parentHash {
+ // The parent was full and invalid
+ n = s.fullNodeByRoot[parentRoot]
+ if n == nil {
+ return invalidRoots, errors.Wrap(ErrNilNode, "could not set node to invalid, could not find full parent")
+ }
+ } else {
+ // The parent is empty and we don't yet know if it's valid or not
+ for n = n.node.parent; n != nil; n = n.node.parent {
+ if ctx.Err() != nil {
+ return invalidRoots, ctx.Err()
+ }
+ if n.node.blockHash == lastValidHash {
+ // The node built on empty and the whole chain was valid
+ return invalidRoots, nil
+ }
+ if n.node.blockHash == parentHash {
+ // The parent was full and invalid
+ break
+ }
+ }
+ if n == nil {
+ return nil, errors.Wrap(ErrNilNode, "could not set node to invalid, could not find full parent in ancestry")
+ }
}
- if node.parent.root != parentRoot {
- return invalidRoots, errInvalidParentRoot
+ } else {
+ // check consistency with the parent information
+ if n.node.parent == nil {
+ return nil, ErrNilNode
+ }
+ if n.node.parent.node.root != parentRoot {
+ return nil, errInvalidParentRoot
}
}
- firstInvalid := node
- for ; firstInvalid.parent != nil && firstInvalid.parent.payloadHash != lastValidHash; firstInvalid = firstInvalid.parent {
+ // n points to a full node that has an invalid payload in forkchoice. We need to find the fist node in the chain that is actually invalid.
+ startNode := n
+ fp := s.fullParent(n)
+ for ; fp != nil && fp.node.blockHash != lastValidHash; fp = s.fullParent(fp) {
if ctx.Err() != nil {
return invalidRoots, ctx.Err()
}
+ n = fp
}
// Deal with the case that the last valid payload is in a different fork
// This means we are dealing with an EE that does not follow the spec
- if firstInvalid.parent == nil {
+ if fp == nil {
// return early if the invalid node was not imported
- if node.root == parentRoot {
+ if startNode.node.root != root {
return invalidRoots, nil
}
- firstInvalid = node
+ // Remove just the imported invalid root
+ n = startNode
}
- return s.removeNode(ctx, firstInvalid)
+ return s.removeNode(ctx, n)
}
// removeNode removes the node with the given root and all of its children
// from the Fork Choice Store.
-func (s *Store) removeNode(ctx context.Context, node *Node) ([][32]byte, error) {
+func (s *Store) removeNode(ctx context.Context, pn *PayloadNode) ([][32]byte, error) {
invalidRoots := make([][32]byte, 0)
- if node == nil {
+ if pn == nil {
return invalidRoots, errors.Wrap(ErrNilNode, "could not remove node")
}
- if !node.optimistic || node.parent == nil {
+ if !pn.optimistic || pn.node.parent == nil {
return invalidRoots, errInvalidOptimisticStatus
}
-
- children := node.parent.children
+ children := pn.node.parent.children
if len(children) == 1 {
- node.parent.children = []*Node{}
+ pn.node.parent.children = []*Node{}
} else {
for i, n := range children {
- if n == node {
+ if n == pn.node {
if i != len(children)-1 {
children[i] = children[len(children)-1]
}
- node.parent.children = children[:len(children)-1]
+ pn.node.parent.children = children[:len(children)-1]
break
}
}
}
- return s.removeNodeAndChildren(ctx, node, invalidRoots)
+ return s.removeNodeAndChildren(ctx, pn, invalidRoots)
}
// removeNodeAndChildren removes `node` and all of its descendant from the Store
-func (s *Store) removeNodeAndChildren(ctx context.Context, node *Node, invalidRoots [][32]byte) ([][32]byte, error) {
+func (s *Store) removeNodeAndChildren(ctx context.Context, pn *PayloadNode, invalidRoots [][32]byte) ([][32]byte, error) {
var err error
- for _, child := range node.children {
+ // If we are removing an empty node, then remove the full node as well if it exists.
+ if !pn.full {
+ fn, ok := s.fullNodeByRoot[pn.node.root]
+ if ok {
+ invalidRoots, err = s.removeNodeAndChildren(ctx, fn, invalidRoots)
+ if err != nil {
+ return invalidRoots, err
+ }
+ }
+ }
+ // Now we remove the full node's children.
+ for _, child := range pn.children {
if ctx.Err() != nil {
return invalidRoots, ctx.Err()
}
- if invalidRoots, err = s.removeNodeAndChildren(ctx, child, invalidRoots); err != nil {
+ // We need to remove only the empty node here since the recursion will take care of the full one.
+ en := s.emptyNodeByRoot[child.root]
+ if invalidRoots, err = s.removeNodeAndChildren(ctx, en, invalidRoots); err != nil {
return invalidRoots, err
}
}
- invalidRoots = append(invalidRoots, node.root)
- if node.root == s.proposerBoostRoot {
- s.proposerBoostRoot = [32]byte{}
+ // Only append the root for the empty nodes.
+ if pn.full {
+ delete(s.fullNodeByRoot, pn.node.root)
+ } else {
+ invalidRoots = append(invalidRoots, pn.node.root)
+ if pn.node.root == s.proposerBoostRoot {
+ s.proposerBoostRoot = [32]byte{}
+ }
+ if pn.node.root == s.previousProposerBoostRoot {
+ s.previousProposerBoostRoot = params.BeaconConfig().ZeroHash
+ s.previousProposerBoostScore = 0
+ }
+ delete(s.emptyNodeByRoot, pn.node.root)
}
- if node.root == s.previousProposerBoostRoot {
- s.previousProposerBoostRoot = params.BeaconConfig().ZeroHash
- s.previousProposerBoostScore = 0
- }
- delete(s.nodeByRoot, node.root)
return invalidRoots, nil
}
diff --git a/beacon-chain/forkchoice/doubly-linked-tree/optimistic_sync_test.go b/beacon-chain/forkchoice/doubly-linked-tree/optimistic_sync_test.go
index 3bc6f44b70..386b43f6a8 100644
--- a/beacon-chain/forkchoice/doubly-linked-tree/optimistic_sync_test.go
+++ b/beacon-chain/forkchoice/doubly-linked-tree/optimistic_sync_test.go
@@ -23,93 +23,35 @@ import (
// And every block in the Fork choice is optimistic.
func TestPruneInvalid(t *testing.T) {
tests := []struct {
+ name string
root [32]byte // the root of the new INVALID block
parentRoot [32]byte // the root of the parent block
- payload [32]byte // the last valid hash
+ parentHash [32]byte // the execution hash of the parent block
+ lastValidHash [32]byte // the last valid execution hash
wantedNodeNumber int
wantedRoots [][32]byte
wantedErr error
}{
{ // Bogus LVH, root not in forkchoice
- [32]byte{'x'},
- [32]byte{'i'},
- [32]byte{'R'},
- 13,
- [][32]byte{},
- nil,
+ name: "bogus LVH not in forkchoice",
+ root: [32]byte{'x'}, parentRoot: [32]byte{'i'}, parentHash: [32]byte{'I'}, lastValidHash: [32]byte{'R'},
+ wantedNodeNumber: 13, wantedRoots: [][32]byte{},
+ },
+ { // Bogus LVH
+ name: "bogus LVH",
+ root: [32]byte{'i'}, parentRoot: [32]byte{'h'}, parentHash: [32]byte{'H'}, lastValidHash: [32]byte{'R'},
+ wantedNodeNumber: 13, wantedRoots: [][32]byte{},
},
{
- // Bogus LVH
- [32]byte{'i'},
- [32]byte{'h'},
- [32]byte{'R'},
- 12,
- [][32]byte{{'i'}},
- nil,
+ name: "wanted j",
+ root: [32]byte{'j'}, parentRoot: [32]byte{'b'}, parentHash: [32]byte{'B'}, lastValidHash: [32]byte{'B'},
+ wantedNodeNumber: 13, wantedRoots: [][32]byte{},
},
{
- [32]byte{'j'},
- [32]byte{'b'},
- [32]byte{'B'},
- 12,
- [][32]byte{{'j'}},
- nil,
- },
- {
- [32]byte{'c'},
- [32]byte{'b'},
- [32]byte{'B'},
- 4,
- [][32]byte{{'f'}, {'e'}, {'i'}, {'h'}, {'l'},
- {'k'}, {'g'}, {'d'}, {'c'}},
- nil,
- },
- {
- [32]byte{'i'},
- [32]byte{'h'},
- [32]byte{'H'},
- 12,
- [][32]byte{{'i'}},
- nil,
- },
- {
- [32]byte{'h'},
- [32]byte{'g'},
- [32]byte{'G'},
- 11,
- [][32]byte{{'i'}, {'h'}},
- nil,
- },
- {
- [32]byte{'g'},
- [32]byte{'d'},
- [32]byte{'D'},
- 8,
- [][32]byte{{'i'}, {'h'}, {'l'}, {'k'}, {'g'}},
- nil,
- },
- {
- [32]byte{'i'},
- [32]byte{'h'},
- [32]byte{'D'},
- 8,
- [][32]byte{{'i'}, {'h'}, {'l'}, {'k'}, {'g'}},
- nil,
- },
- {
- [32]byte{'f'},
- [32]byte{'e'},
- [32]byte{'D'},
- 11,
- [][32]byte{{'f'}, {'e'}},
- nil,
- },
- {
- [32]byte{'h'},
- [32]byte{'g'},
- [32]byte{'C'},
- 5,
- [][32]byte{
+ name: "wanted 5",
+ root: [32]byte{'c'}, parentRoot: [32]byte{'b'}, parentHash: [32]byte{'B'}, lastValidHash: [32]byte{'B'},
+ wantedNodeNumber: 5,
+ wantedRoots: [][32]byte{
{'f'},
{'e'},
{'i'},
@@ -119,106 +61,118 @@ func TestPruneInvalid(t *testing.T) {
{'g'},
{'d'},
},
- nil,
},
{
- [32]byte{'g'},
- [32]byte{'d'},
- [32]byte{'E'},
- 8,
- [][32]byte{{'i'}, {'h'}, {'l'}, {'k'}, {'g'}},
- nil,
+ name: "wanted i",
+ root: [32]byte{'i'}, parentRoot: [32]byte{'h'}, parentHash: [32]byte{'H'}, lastValidHash: [32]byte{'H'},
+ wantedNodeNumber: 13, wantedRoots: [][32]byte{},
},
{
- [32]byte{'z'},
- [32]byte{'j'},
- [32]byte{'B'},
- 12,
- [][32]byte{{'j'}},
- nil,
+ name: "wanted i and h",
+ root: [32]byte{'h'}, parentRoot: [32]byte{'g'}, parentHash: [32]byte{'G'}, lastValidHash: [32]byte{'G'},
+ wantedNodeNumber: 12, wantedRoots: [][32]byte{{'i'}},
},
{
- [32]byte{'z'},
- [32]byte{'j'},
- [32]byte{'J'},
- 13,
- [][32]byte{},
- nil,
+ name: "wanted i--g",
+ root: [32]byte{'g'}, parentRoot: [32]byte{'d'}, parentHash: [32]byte{'D'}, lastValidHash: [32]byte{'D'},
+ wantedNodeNumber: 9, wantedRoots: [][32]byte{{'i'}, {'h'}, {'l'}, {'k'}},
},
{
- [32]byte{'j'},
- [32]byte{'a'},
- [32]byte{'B'},
- 0,
- [][32]byte{},
- errInvalidParentRoot,
+ name: "wanted 9",
+ root: [32]byte{'i'}, parentRoot: [32]byte{'h'}, parentHash: [32]byte{'H'}, lastValidHash: [32]byte{'D'},
+ wantedNodeNumber: 9, wantedRoots: [][32]byte{{'i'}, {'h'}, {'l'}, {'k'}},
},
{
- [32]byte{'z'},
- [32]byte{'h'},
- [32]byte{'D'},
- 8,
- [][32]byte{{'i'}, {'h'}, {'l'}, {'k'}, {'g'}},
- nil,
+ name: "wanted f and e",
+ root: [32]byte{'f'}, parentRoot: [32]byte{'e'}, parentHash: [32]byte{'E'}, lastValidHash: [32]byte{'D'},
+ wantedNodeNumber: 12, wantedRoots: [][32]byte{{'f'}},
},
{
- [32]byte{'z'},
- [32]byte{'h'},
- [32]byte{'D'},
- 8,
- [][32]byte{{'i'}, {'h'}, {'l'}, {'k'}, {'g'}},
- nil,
+ name: "wanted 6",
+ root: [32]byte{'h'}, parentRoot: [32]byte{'g'}, parentHash: [32]byte{'G'}, lastValidHash: [32]byte{'C'},
+ wantedNodeNumber: 6,
+ wantedRoots: [][32]byte{
+ {'f'}, {'e'}, {'i'}, {'h'}, {'l'}, {'k'}, {'g'},
+ },
+ },
+ {
+ name: "wanted 9 again",
+ root: [32]byte{'g'}, parentRoot: [32]byte{'d'}, parentHash: [32]byte{'D'}, lastValidHash: [32]byte{'E'},
+ wantedNodeNumber: 9, wantedRoots: [][32]byte{{'i'}, {'h'}, {'l'}, {'k'}},
+ },
+ {
+ name: "wanted 13",
+ root: [32]byte{'z'}, parentRoot: [32]byte{'j'}, parentHash: [32]byte{'J'}, lastValidHash: [32]byte{'B'},
+ wantedNodeNumber: 13, wantedRoots: [][32]byte{},
+ },
+ {
+ name: "wanted empty",
+ root: [32]byte{'z'}, parentRoot: [32]byte{'j'}, parentHash: [32]byte{'J'}, lastValidHash: [32]byte{'J'},
+ wantedNodeNumber: 13, wantedRoots: [][32]byte{},
+ },
+ {
+ name: "errInvalidParentRoot",
+ root: [32]byte{'j'}, parentRoot: [32]byte{'a'}, parentHash: [32]byte{'A'}, lastValidHash: [32]byte{'B'},
+ wantedErr: errInvalidParentRoot,
+ },
+ {
+ name: "root z",
+ root: [32]byte{'z'}, parentRoot: [32]byte{'h'}, parentHash: [32]byte{'H'}, lastValidHash: [32]byte{'D'},
+ wantedNodeNumber: 9, wantedRoots: [][32]byte{{'i'}, {'h'}, {'l'}, {'k'}},
},
}
for _, tc := range tests {
- ctx := t.Context()
- f := setup(1, 1)
-
- state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1)
- require.NoError(t, err)
- require.NoError(t, f.InsertNode(ctx, state, blkRoot))
- state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1)
- require.NoError(t, err)
- require.NoError(t, f.InsertNode(ctx, state, blkRoot))
- state, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1)
- require.NoError(t, err)
- require.NoError(t, f.InsertNode(ctx, state, blkRoot))
- state, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, [32]byte{'J'}, 1, 1)
- require.NoError(t, err)
- require.NoError(t, f.InsertNode(ctx, state, blkRoot))
- state, blkRoot, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, [32]byte{'D'}, 1, 1)
- require.NoError(t, err)
- require.NoError(t, f.InsertNode(ctx, state, blkRoot))
- state, blkRoot, err = prepareForkchoiceState(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, [32]byte{'E'}, 1, 1)
- require.NoError(t, err)
- require.NoError(t, f.InsertNode(ctx, state, blkRoot))
- state, blkRoot, err = prepareForkchoiceState(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, [32]byte{'G'}, 1, 1)
- require.NoError(t, err)
- require.NoError(t, f.InsertNode(ctx, state, blkRoot))
- state, blkRoot, err = prepareForkchoiceState(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, [32]byte{'F'}, 1, 1)
- require.NoError(t, err)
- require.NoError(t, f.InsertNode(ctx, state, blkRoot))
- state, blkRoot, err = prepareForkchoiceState(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, [32]byte{'H'}, 1, 1)
- require.NoError(t, err)
- require.NoError(t, f.InsertNode(ctx, state, blkRoot))
- state, blkRoot, err = prepareForkchoiceState(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, [32]byte{'K'}, 1, 1)
- require.NoError(t, err)
- require.NoError(t, f.InsertNode(ctx, state, blkRoot))
- state, blkRoot, err = prepareForkchoiceState(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, [32]byte{'I'}, 1, 1)
- require.NoError(t, err)
- require.NoError(t, f.InsertNode(ctx, state, blkRoot))
- state, blkRoot, err = prepareForkchoiceState(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, [32]byte{'L'}, 1, 1)
- require.NoError(t, err)
- require.NoError(t, f.InsertNode(ctx, state, blkRoot))
-
- roots, err := f.store.setOptimisticToInvalid(t.Context(), tc.root, tc.parentRoot, tc.payload)
- if tc.wantedErr == nil {
+ t.Run(tc.name, func(t *testing.T) {
+ ctx := t.Context()
+ f := setup(1, 1)
+ require.NoError(t, f.SetOptimisticToValid(ctx, [32]byte{}))
+ state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1)
require.NoError(t, err)
- require.DeepEqual(t, tc.wantedRoots, roots)
- require.Equal(t, tc.wantedNodeNumber, f.NodeCount())
- } else {
- require.ErrorIs(t, tc.wantedErr, err)
- }
+ require.NoError(t, f.InsertNode(ctx, state, blkRoot))
+ state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1)
+ require.NoError(t, err)
+ require.NoError(t, f.InsertNode(ctx, state, blkRoot))
+ state, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1)
+ require.NoError(t, err)
+ require.NoError(t, f.InsertNode(ctx, state, blkRoot))
+ state, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, [32]byte{'J'}, 1, 1)
+ require.NoError(t, err)
+ require.NoError(t, f.InsertNode(ctx, state, blkRoot))
+ state, blkRoot, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, [32]byte{'D'}, 1, 1)
+ require.NoError(t, err)
+ require.NoError(t, f.InsertNode(ctx, state, blkRoot))
+ state, blkRoot, err = prepareForkchoiceState(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, [32]byte{'E'}, 1, 1)
+ require.NoError(t, err)
+ require.NoError(t, f.InsertNode(ctx, state, blkRoot))
+ state, blkRoot, err = prepareForkchoiceState(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, [32]byte{'G'}, 1, 1)
+ require.NoError(t, err)
+ require.NoError(t, f.InsertNode(ctx, state, blkRoot))
+ state, blkRoot, err = prepareForkchoiceState(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, [32]byte{'F'}, 1, 1)
+ require.NoError(t, err)
+ require.NoError(t, f.InsertNode(ctx, state, blkRoot))
+ state, blkRoot, err = prepareForkchoiceState(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, [32]byte{'H'}, 1, 1)
+ require.NoError(t, err)
+ require.NoError(t, f.InsertNode(ctx, state, blkRoot))
+ state, blkRoot, err = prepareForkchoiceState(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, [32]byte{'K'}, 1, 1)
+ require.NoError(t, err)
+ require.NoError(t, f.InsertNode(ctx, state, blkRoot))
+ state, blkRoot, err = prepareForkchoiceState(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, [32]byte{'I'}, 1, 1)
+ require.NoError(t, err)
+ require.NoError(t, f.InsertNode(ctx, state, blkRoot))
+ state, blkRoot, err = prepareForkchoiceState(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, [32]byte{'L'}, 1, 1)
+ require.NoError(t, err)
+ require.NoError(t, f.InsertNode(ctx, state, blkRoot))
+
+ roots, err := f.store.setOptimisticToInvalid(t.Context(), tc.root, tc.parentRoot, tc.parentHash, tc.lastValidHash)
+ if tc.wantedErr == nil {
+ require.NoError(t, err)
+ require.Equal(t, len(tc.wantedRoots), len(roots))
+ require.DeepEqual(t, tc.wantedRoots, roots)
+ require.Equal(t, tc.wantedNodeNumber, f.NodeCount())
+ } else {
+ require.ErrorIs(t, tc.wantedErr, err)
+ }
+ })
}
}
@@ -240,11 +194,40 @@ func TestSetOptimisticToInvalid_ProposerBoost(t *testing.T) {
f.store.previousProposerBoostScore = 10
f.store.previousProposerBoostRoot = [32]byte{'b'}
- _, err = f.SetOptimisticToInvalid(ctx, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'A'})
+ _, err = f.SetOptimisticToInvalid(ctx, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'B'}, [32]byte{'A'})
require.NoError(t, err)
+ // proposer boost is still applied to c
+ require.Equal(t, uint64(10), f.store.previousProposerBoostScore)
+ require.Equal(t, [32]byte{}, f.store.proposerBoostRoot)
+ require.Equal(t, [32]byte{'b'}, f.store.previousProposerBoostRoot)
+}
+
+func TestSetOptimisticToInvalid_ProposerBoost_Older(t *testing.T) {
+ ctx := t.Context()
+ f := setup(1, 1)
+
+ state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1)
+ require.NoError(t, err)
+ require.NoError(t, f.InsertNode(ctx, state, blkRoot))
+ state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1)
+ require.NoError(t, err)
+ require.NoError(t, f.InsertNode(ctx, state, blkRoot))
+ state, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1)
+ require.NoError(t, err)
+ require.NoError(t, f.InsertNode(ctx, state, blkRoot))
+ state, blkRoot, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, [32]byte{'D'}, 1, 1)
+ require.NoError(t, err)
+ require.NoError(t, f.InsertNode(ctx, state, blkRoot))
+ f.store.proposerBoostRoot = [32]byte{'d'}
+ f.store.previousProposerBoostScore = 10
+ f.store.previousProposerBoostRoot = [32]byte{'c'}
+
+ _, err = f.SetOptimisticToInvalid(ctx, [32]byte{'d'}, [32]byte{'c'}, [32]byte{'C'}, [32]byte{'A'})
+ require.NoError(t, err)
+ // proposer boost is still applied to c
require.Equal(t, uint64(0), f.store.previousProposerBoostScore)
- require.DeepEqual(t, [32]byte{}, f.store.proposerBoostRoot)
- require.DeepEqual(t, params.BeaconConfig().ZeroHash, f.store.previousProposerBoostRoot)
+ require.Equal(t, [32]byte{}, f.store.proposerBoostRoot)
+ require.Equal(t, [32]byte{}, f.store.previousProposerBoostRoot)
}
// This is a regression test (10565)
@@ -272,10 +255,9 @@ func TestSetOptimisticToInvalid_CorrectChildren(t *testing.T) {
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
- _, err = f.store.setOptimisticToInvalid(ctx, [32]byte{'d'}, [32]byte{'a'}, [32]byte{'A'})
+ _, err = f.store.setOptimisticToInvalid(ctx, [32]byte{'d'}, [32]byte{'a'}, [32]byte{'A'}, [32]byte{'A'})
require.NoError(t, err)
- require.Equal(t, 2, len(f.store.nodeByRoot[[32]byte{'a'}].children))
-
+ require.Equal(t, 2, len(f.store.fullNodeByRoot[[32]byte{'a'}].children))
}
// Pow | Pos
@@ -322,13 +304,13 @@ func TestSetOptimisticToInvalid_ForkAtMerge(t *testing.T) {
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, root))
- roots, err := f.SetOptimisticToInvalid(ctx, [32]byte{'x'}, [32]byte{'d'}, [32]byte{})
+ roots, err := f.SetOptimisticToInvalid(ctx, [32]byte{'x'}, [32]byte{'d'}, [32]byte{'D'}, [32]byte{})
require.NoError(t, err)
- require.Equal(t, 4, len(roots))
+ require.Equal(t, 3, len(roots))
sort.Slice(roots, func(i, j int) bool {
return bytesutil.BytesToUint64BigEndian(roots[i][:]) < bytesutil.BytesToUint64BigEndian(roots[j][:])
})
- require.DeepEqual(t, roots, [][32]byte{{'b'}, {'c'}, {'d'}, {'e'}})
+ require.DeepEqual(t, roots, [][32]byte{{'c'}, {'d'}, {'e'}})
}
// Pow | Pos
@@ -375,13 +357,13 @@ func TestSetOptimisticToInvalid_ForkAtMerge_bis(t *testing.T) {
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, root))
- roots, err := f.SetOptimisticToInvalid(ctx, [32]byte{'x'}, [32]byte{'d'}, [32]byte{})
+ roots, err := f.SetOptimisticToInvalid(ctx, [32]byte{'x'}, [32]byte{'d'}, [32]byte{'D'}, [32]byte{})
require.NoError(t, err)
- require.Equal(t, 4, len(roots))
+ require.Equal(t, 3, len(roots))
sort.Slice(roots, func(i, j int) bool {
return bytesutil.BytesToUint64BigEndian(roots[i][:]) < bytesutil.BytesToUint64BigEndian(roots[j][:])
})
- require.DeepEqual(t, roots, [][32]byte{{'b'}, {'c'}, {'d'}, {'e'}})
+ require.DeepEqual(t, roots, [][32]byte{{'c'}, {'d'}, {'e'}})
}
func TestSetOptimisticToValid(t *testing.T) {
diff --git a/beacon-chain/forkchoice/doubly-linked-tree/proposer_boost.go b/beacon-chain/forkchoice/doubly-linked-tree/proposer_boost.go
index 41b20185a0..c1707dda50 100644
--- a/beacon-chain/forkchoice/doubly-linked-tree/proposer_boost.go
+++ b/beacon-chain/forkchoice/doubly-linked-tree/proposer_boost.go
@@ -11,7 +11,7 @@ func (f *ForkChoice) applyProposerBoostScore() error {
s := f.store
proposerScore := uint64(0)
if s.previousProposerBoostRoot != params.BeaconConfig().ZeroHash {
- previousNode, ok := s.nodeByRoot[s.previousProposerBoostRoot]
+ previousNode, ok := s.emptyNodeByRoot[s.previousProposerBoostRoot]
if !ok || previousNode == nil {
log.WithError(errInvalidProposerBoostRoot).Errorf("invalid prev root %#x", s.previousProposerBoostRoot)
} else {
@@ -20,7 +20,7 @@ func (f *ForkChoice) applyProposerBoostScore() error {
}
if s.proposerBoostRoot != params.BeaconConfig().ZeroHash {
- currentNode, ok := s.nodeByRoot[s.proposerBoostRoot]
+ currentNode, ok := s.emptyNodeByRoot[s.proposerBoostRoot]
if !ok || currentNode == nil {
log.WithError(errInvalidProposerBoostRoot).Errorf("invalid current root %#x", s.proposerBoostRoot)
} else {
diff --git a/beacon-chain/forkchoice/doubly-linked-tree/proposer_boost_test.go b/beacon-chain/forkchoice/doubly-linked-tree/proposer_boost_test.go
index 80f88ac8c6..b61f3cae53 100644
--- a/beacon-chain/forkchoice/doubly-linked-tree/proposer_boost_test.go
+++ b/beacon-chain/forkchoice/doubly-linked-tree/proposer_boost_test.go
@@ -166,14 +166,14 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
// (1: 48) -> (2: 38) -> (3: 10)
// \--------------->(4: 18)
//
- node1 := f.store.nodeByRoot[indexToHash(1)]
- require.Equal(t, node1.weight, uint64(48))
- node2 := f.store.nodeByRoot[indexToHash(2)]
- require.Equal(t, node2.weight, uint64(38))
- node3 := f.store.nodeByRoot[indexToHash(3)]
- require.Equal(t, node3.weight, uint64(10))
- node4 := f.store.nodeByRoot[indexToHash(4)]
- require.Equal(t, node4.weight, uint64(18))
+ node1 := f.store.emptyNodeByRoot[indexToHash(1)]
+ require.Equal(t, node1.node.weight, uint64(48))
+ node2 := f.store.emptyNodeByRoot[indexToHash(2)]
+ require.Equal(t, node2.node.weight, uint64(38))
+ node3 := f.store.emptyNodeByRoot[indexToHash(3)]
+ require.Equal(t, node3.node.weight, uint64(10))
+ node4 := f.store.emptyNodeByRoot[indexToHash(4)]
+ require.Equal(t, node4.node.weight, uint64(18))
// Regression: process attestations for C, check that it
// becomes head, we need two attestations to have C.weight = 30 > 24 = D.weight
diff --git a/beacon-chain/forkchoice/doubly-linked-tree/reorg_late_blocks.go b/beacon-chain/forkchoice/doubly-linked-tree/reorg_late_blocks.go
index 26e9a6c5bd..2054045206 100644
--- a/beacon-chain/forkchoice/doubly-linked-tree/reorg_late_blocks.go
+++ b/beacon-chain/forkchoice/doubly-linked-tree/reorg_late_blocks.go
@@ -34,22 +34,23 @@ const orphanLateBlockProposingEarly = 2
func (f *ForkChoice) ShouldOverrideFCU() (override bool) {
override = false
- // We only need to override FCU if our current head is from the current
+ // We only need to override FCU if our current consensusHead is from the current
// slot. This differs from the spec implementation in that we assume
// that we will call this function in the previous slot to proposing.
- head := f.store.headNode
- if head == nil {
+ consensusHead := f.store.headNode
+ if consensusHead == nil {
return
}
- if head.slot != slots.CurrentSlot(f.store.genesisTime) {
+ if consensusHead.slot != slots.CurrentSlot(f.store.genesisTime) {
return
}
// Do not reorg on epoch boundaries
- if (head.slot+1)%params.BeaconConfig().SlotsPerEpoch == 0 {
+ if (consensusHead.slot+1)%params.BeaconConfig().SlotsPerEpoch == 0 {
return
}
+ head := f.store.choosePayloadContent(consensusHead)
// Only reorg blocks that arrive late
early, err := head.arrivedEarly(f.store.genesisTime)
if err != nil {
@@ -61,15 +62,15 @@ func (f *ForkChoice) ShouldOverrideFCU() (override bool) {
}
// Only reorg if we have been finalizing
finalizedEpoch := f.store.finalizedCheckpoint.Epoch
- if slots.ToEpoch(head.slot+1) > finalizedEpoch+params.BeaconConfig().ReorgMaxEpochsSinceFinalization {
+ if slots.ToEpoch(consensusHead.slot+1) > finalizedEpoch+params.BeaconConfig().ReorgMaxEpochsSinceFinalization {
return
}
// Only orphan a single block
- parent := head.parent
+ parent := consensusHead.parent
if parent == nil {
return
}
- if head.slot > parent.slot+1 {
+ if consensusHead.slot > parent.node.slot+1 {
return
}
// Do not orphan a block that has higher justification than the parent
@@ -78,12 +79,12 @@ func (f *ForkChoice) ShouldOverrideFCU() (override bool) {
// }
// Only orphan a block if the head LMD vote is weak
- if head.weight*100 > f.store.committeeWeight*params.BeaconConfig().ReorgHeadWeightThreshold {
+ if consensusHead.weight*100 > f.store.committeeWeight*params.BeaconConfig().ReorgHeadWeightThreshold {
return
}
// Return early if we are checking before 10 seconds into the slot
- sss, err := slots.SinceSlotStart(head.slot, f.store.genesisTime, time.Now())
+ sss, err := slots.SinceSlotStart(consensusHead.slot, f.store.genesisTime, time.Now())
if err != nil {
log.WithError(err).Error("could not check current slot")
return true
@@ -92,7 +93,7 @@ func (f *ForkChoice) ShouldOverrideFCU() (override bool) {
return true
}
// Only orphan a block if the parent LMD vote is strong
- if parent.weight*100 < f.store.committeeWeight*params.BeaconConfig().ReorgParentWeightThreshold {
+ if parent.node.weight*100 < f.store.committeeWeight*params.BeaconConfig().ReorgParentWeightThreshold {
return
}
return true
@@ -106,60 +107,61 @@ func (f *ForkChoice) ShouldOverrideFCU() (override bool) {
// This function needs to be called only when proposing a block and all
// attestation processing has already happened.
func (f *ForkChoice) GetProposerHead() [32]byte {
- head := f.store.headNode
- if head == nil {
+ consensusHead := f.store.headNode
+ if consensusHead == nil {
return [32]byte{}
}
// Only reorg blocks from the previous slot.
currentSlot := slots.CurrentSlot(f.store.genesisTime)
- if head.slot+1 != currentSlot {
- return head.root
+ if consensusHead.slot+1 != currentSlot {
+ return consensusHead.root
}
// Do not reorg on epoch boundaries
- if (head.slot+1)%params.BeaconConfig().SlotsPerEpoch == 0 {
- return head.root
+ if (consensusHead.slot+1)%params.BeaconConfig().SlotsPerEpoch == 0 {
+ return consensusHead.root
}
// Only reorg blocks that arrive late
+ head := f.store.choosePayloadContent(consensusHead)
early, err := head.arrivedEarly(f.store.genesisTime)
if err != nil {
log.WithError(err).Error("could not check if block arrived early")
- return head.root
+ return consensusHead.root
}
if early {
- return head.root
+ return consensusHead.root
}
// Only reorg if we have been finalizing
finalizedEpoch := f.store.finalizedCheckpoint.Epoch
- if slots.ToEpoch(head.slot+1) > finalizedEpoch+params.BeaconConfig().ReorgMaxEpochsSinceFinalization {
- return head.root
+ if slots.ToEpoch(consensusHead.slot+1) > finalizedEpoch+params.BeaconConfig().ReorgMaxEpochsSinceFinalization {
+ return consensusHead.root
}
// Only orphan a single block
- parent := head.parent
+ parent := consensusHead.parent
if parent == nil {
- return head.root
+ return consensusHead.root
}
- if head.slot > parent.slot+1 {
- return head.root
+ if consensusHead.slot > parent.node.slot+1 {
+ return consensusHead.root
}
// Only orphan a block if the head LMD vote is weak
- if head.weight*100 > f.store.committeeWeight*params.BeaconConfig().ReorgHeadWeightThreshold {
- return head.root
+ if consensusHead.weight*100 > f.store.committeeWeight*params.BeaconConfig().ReorgHeadWeightThreshold {
+ return consensusHead.root
}
// Only orphan a block if the parent LMD vote is strong
- if parent.weight*100 < f.store.committeeWeight*params.BeaconConfig().ReorgParentWeightThreshold {
- return head.root
+ if parent.node.weight*100 < f.store.committeeWeight*params.BeaconConfig().ReorgParentWeightThreshold {
+ return consensusHead.root
}
// Only reorg if we are proposing early
sss, err := slots.SinceSlotStart(currentSlot, f.store.genesisTime, time.Now())
if err != nil {
log.WithError(err).Error("could not check if proposing early")
- return head.root
+ return consensusHead.root
}
if sss >= orphanLateBlockProposingEarly*time.Second {
- return head.root
+ return consensusHead.root
}
- return parent.root
+ return parent.node.root
}
diff --git a/beacon-chain/forkchoice/doubly-linked-tree/reorg_late_blocks_test.go b/beacon-chain/forkchoice/doubly-linked-tree/reorg_late_blocks_test.go
index 34c10cf9d6..4220370d66 100644
--- a/beacon-chain/forkchoice/doubly-linked-tree/reorg_late_blocks_test.go
+++ b/beacon-chain/forkchoice/doubly-linked-tree/reorg_late_blocks_test.go
@@ -38,7 +38,6 @@ func TestForkChoice_ShouldOverrideFCU(t *testing.T) {
require.Equal(t, blk.Root(), headRoot)
t.Run("head is weak", func(t *testing.T) {
require.Equal(t, true, f.ShouldOverrideFCU())
-
})
t.Run("head is nil", func(t *testing.T) {
saved := f.store.headNode
@@ -60,10 +59,11 @@ func TestForkChoice_ShouldOverrideFCU(t *testing.T) {
f.store.headNode.slot = saved
})
t.Run("head is early", func(t *testing.T) {
- saved := f.store.headNode.timestamp
- f.store.headNode.timestamp = saved.Add(-2 * time.Second)
+ fn := f.store.fullNodeByRoot[f.store.headNode.root]
+ saved := fn.timestamp
+ fn.timestamp = saved.Add(-2 * time.Second)
require.Equal(t, false, f.ShouldOverrideFCU())
- f.store.headNode.timestamp = saved
+ fn.timestamp = saved
})
t.Run("chain not finalizing", func(t *testing.T) {
saved := f.store.headNode.slot
@@ -74,10 +74,10 @@ func TestForkChoice_ShouldOverrideFCU(t *testing.T) {
driftGenesisTime(f, 2, orphanLateBlockFirstThreshold+time.Second)
})
t.Run("Not single block reorg", func(t *testing.T) {
- saved := f.store.headNode.parent.slot
- f.store.headNode.parent.slot = 0
+ saved := f.store.headNode.parent.node.slot
+ f.store.headNode.parent.node.slot = 0
require.Equal(t, false, f.ShouldOverrideFCU())
- f.store.headNode.parent.slot = saved
+ f.store.headNode.parent.node.slot = saved
})
t.Run("parent is nil", func(t *testing.T) {
saved := f.store.headNode.parent
@@ -86,17 +86,17 @@ func TestForkChoice_ShouldOverrideFCU(t *testing.T) {
f.store.headNode.parent = saved
})
t.Run("parent is weak early call", func(t *testing.T) {
- saved := f.store.headNode.parent.weight
- f.store.headNode.parent.weight = 0
+ saved := f.store.headNode.parent.node.weight
+ f.store.headNode.parent.node.weight = 0
require.Equal(t, true, f.ShouldOverrideFCU())
- f.store.headNode.parent.weight = saved
+ f.store.headNode.parent.node.weight = saved
})
t.Run("parent is weak late call", func(t *testing.T) {
- saved := f.store.headNode.parent.weight
+ saved := f.store.headNode.parent.node.weight
driftGenesisTime(f, 2, 11*time.Second)
- f.store.headNode.parent.weight = 0
+ f.store.headNode.parent.node.weight = 0
require.Equal(t, false, f.ShouldOverrideFCU())
- f.store.headNode.parent.weight = saved
+ f.store.headNode.parent.node.weight = saved
driftGenesisTime(f, 2, orphanLateBlockFirstThreshold+time.Second)
})
t.Run("Head is strong", func(t *testing.T) {
@@ -135,7 +135,8 @@ func TestForkChoice_GetProposerHead(t *testing.T) {
require.NoError(t, err)
require.Equal(t, blk.Root(), headRoot)
orphanLateBlockFirstThreshold := params.BeaconConfig().SlotComponentDuration(params.BeaconConfig().AttestationDueBPS)
- f.store.headNode.timestamp.Add(-1 * (params.BeaconConfig().SlotDuration() - orphanLateBlockFirstThreshold))
+ fn := f.store.fullNodeByRoot[f.store.headNode.root]
+ fn.timestamp = fn.timestamp.Add(-1 * (params.BeaconConfig().SlotDuration() - orphanLateBlockFirstThreshold))
t.Run("head is weak", func(t *testing.T) {
require.Equal(t, parentRoot, f.GetProposerHead())
})
@@ -159,11 +160,12 @@ func TestForkChoice_GetProposerHead(t *testing.T) {
f.store.headNode.slot = saved
})
t.Run("head is early", func(t *testing.T) {
- saved := f.store.headNode.timestamp
+ fn := f.store.fullNodeByRoot[f.store.headNode.root]
+ saved := fn.timestamp
headTimeStamp := f.store.genesisTime.Add(time.Duration(uint64(f.store.headNode.slot)*params.BeaconConfig().SecondsPerSlot+1) * time.Second)
- f.store.headNode.timestamp = headTimeStamp
+ fn.timestamp = headTimeStamp
require.Equal(t, childRoot, f.GetProposerHead())
- f.store.headNode.timestamp = saved
+ fn.timestamp = saved
})
t.Run("chain not finalizing", func(t *testing.T) {
saved := f.store.headNode.slot
@@ -174,10 +176,10 @@ func TestForkChoice_GetProposerHead(t *testing.T) {
driftGenesisTime(f, 3, 1*time.Second)
})
t.Run("Not single block reorg", func(t *testing.T) {
- saved := f.store.headNode.parent.slot
- f.store.headNode.parent.slot = 0
+ saved := f.store.headNode.parent.node.slot
+ f.store.headNode.parent.node.slot = 0
require.Equal(t, childRoot, f.GetProposerHead())
- f.store.headNode.parent.slot = saved
+ f.store.headNode.parent.node.slot = saved
})
t.Run("parent is nil", func(t *testing.T) {
saved := f.store.headNode.parent
diff --git a/beacon-chain/forkchoice/doubly-linked-tree/store.go b/beacon-chain/forkchoice/doubly-linked-tree/store.go
index 95651a35f9..33836db5cf 100644
--- a/beacon-chain/forkchoice/doubly-linked-tree/store.go
+++ b/beacon-chain/forkchoice/doubly-linked-tree/store.go
@@ -13,6 +13,7 @@ import (
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/OffchainLabs/prysm/v7/time/slots"
"github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
)
// head starts from justified root and then follows the best descendant links
@@ -26,13 +27,16 @@ func (s *Store) head(ctx context.Context) ([32]byte, error) {
}
// JustifiedRoot has to be known
- justifiedNode, ok := s.nodeByRoot[s.justifiedCheckpoint.Root]
- if !ok || justifiedNode == nil {
+ var jn *Node
+ ej := s.emptyNodeByRoot[s.justifiedCheckpoint.Root]
+ if ej != nil {
+ jn = ej.node
+ } else {
// If the justifiedCheckpoint is from genesis, then the root is
// zeroHash. In this case it should be the root of forkchoice
// tree.
if s.justifiedCheckpoint.Epoch == params.BeaconConfig().GenesisEpoch {
- justifiedNode = s.treeRootNode
+ jn = s.treeRootNode
} else {
return [32]byte{}, errors.WithMessage(errUnknownJustifiedRoot, fmt.Sprintf("%#x", s.justifiedCheckpoint.Root))
}
@@ -40,9 +44,9 @@ func (s *Store) head(ctx context.Context) ([32]byte, error) {
// If the justified node doesn't have a best descendant,
// the best node is itself.
- bestDescendant := justifiedNode.bestDescendant
+ bestDescendant := jn.bestDescendant
if bestDescendant == nil {
- bestDescendant = justifiedNode
+ bestDescendant = jn
}
currentEpoch := slots.EpochsSinceGenesis(s.genesisTime)
if !bestDescendant.viableForHead(s.justifiedCheckpoint.Epoch, currentEpoch) {
@@ -66,29 +70,42 @@ func (s *Store) head(ctx context.Context) ([32]byte, error) {
// It then updates the new node's parent with the best child and descendant node.
func (s *Store) insert(ctx context.Context,
roblock consensus_blocks.ROBlock,
- justifiedEpoch, finalizedEpoch primitives.Epoch) (*Node, error) {
+ justifiedEpoch, finalizedEpoch primitives.Epoch,
+) (*PayloadNode, error) {
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.insert")
defer span.End()
root := roblock.Root()
- block := roblock.Block()
- slot := block.Slot()
- parentRoot := block.ParentRoot()
- var payloadHash [32]byte
- if block.Version() >= version.Bellatrix {
- execution, err := block.Body().Execution()
- if err != nil {
- return nil, err
- }
- copy(payloadHash[:], execution.BlockHash())
- }
-
// Return if the block has been inserted into Store before.
- if n, ok := s.nodeByRoot[root]; ok {
+ if n, ok := s.emptyNodeByRoot[root]; ok {
return n, nil
}
- parent := s.nodeByRoot[parentRoot]
+ block := roblock.Block()
+ slot := block.Slot()
+ var parent *PayloadNode
+ blockHash := &[32]byte{}
+ if block.Version() >= version.Gloas {
+ if err := s.resolveParentPayloadStatus(block, &parent, blockHash); err != nil {
+ return nil, err
+ }
+ } else {
+ if block.Version() >= version.Bellatrix {
+ execution, err := block.Body().Execution()
+ if err != nil {
+ return nil, err
+ }
+ copy(blockHash[:], execution.BlockHash())
+ }
+ parentRoot := block.ParentRoot()
+ en := s.emptyNodeByRoot[parentRoot]
+ parent = s.fullNodeByRoot[parentRoot]
+ if parent == nil && en != nil {
+ // pre-Gloas only full parents are allowed.
+ return nil, errInvalidParentRoot
+ }
+ }
+
n := &Node{
slot: slot,
root: root,
@@ -97,30 +114,51 @@ func (s *Store) insert(ctx context.Context,
unrealizedJustifiedEpoch: justifiedEpoch,
finalizedEpoch: finalizedEpoch,
unrealizedFinalizedEpoch: finalizedEpoch,
- optimistic: true,
- payloadHash: payloadHash,
- timestamp: time.Now(),
+ blockHash: *blockHash,
}
-
// Set the node's target checkpoint
if slot%params.BeaconConfig().SlotsPerEpoch == 0 {
n.target = n
} else if parent != nil {
- if slots.ToEpoch(slot) == slots.ToEpoch(parent.slot) {
- n.target = parent.target
+ if slots.ToEpoch(slot) == slots.ToEpoch(parent.node.slot) {
+ n.target = parent.node.target
} else {
- n.target = parent
+ n.target = parent.node
}
}
+ var ret *PayloadNode
+ optimistic := true
+ if parent != nil {
+ optimistic = n.parent.optimistic
+ }
+ // Make the empty node.It's optimistic status equals it's parent's status.
+ pn := &PayloadNode{
+ node: n,
+ optimistic: optimistic,
+ timestamp: time.Now(),
+ }
+ s.emptyNodeByRoot[root] = pn
+ ret = pn
+ if block.Version() < version.Gloas {
+ // Make also the full node, this is optimistic until the engine returns the execution payload validation.
+ fn := &PayloadNode{
+ node: n,
+ optimistic: true,
+ timestamp: time.Now(),
+ full: true,
+ }
+ ret = fn
+ s.fullNodeByRoot[root] = fn
+ }
- s.nodeByRoot[root] = n
if parent == nil {
if s.treeRootNode == nil {
s.treeRootNode = n
s.headNode = n
s.highestReceivedNode = n
} else {
- delete(s.nodeByRoot, root)
+ delete(s.emptyNodeByRoot, root)
+ delete(s.fullNodeByRoot, root)
return nil, errInvalidParentRoot
}
} else {
@@ -128,7 +166,7 @@ func (s *Store) insert(ctx context.Context,
// Apply proposer boost
now := time.Now()
if now.Before(s.genesisTime) {
- return n, nil
+ return ret, nil
}
currentSlot := slots.CurrentSlot(s.genesisTime)
sss, err := slots.SinceSlotStart(currentSlot, s.genesisTime, now)
@@ -144,17 +182,16 @@ func (s *Store) insert(ctx context.Context,
// Update best descendants
jEpoch := s.justifiedCheckpoint.Epoch
fEpoch := s.finalizedCheckpoint.Epoch
- if err := s.treeRootNode.updateBestDescendant(ctx, jEpoch, fEpoch, slots.ToEpoch(currentSlot)); err != nil {
- _, remErr := s.removeNode(ctx, n)
- if remErr != nil {
- log.WithError(remErr).Error("could not remove node")
- }
- return nil, errors.Wrap(err, "could not update best descendants")
+ if err := s.updateBestDescendantConsensusNode(ctx, s.treeRootNode, jEpoch, fEpoch, slots.ToEpoch(currentSlot)); err != nil {
+ log.WithError(err).WithFields(logrus.Fields{
+ "slot": slot,
+ "root": root,
+ }).Error("Could not update best descendant")
}
}
// Update metrics.
processedBlockCount.Inc()
- nodeCount.Set(float64(len(s.nodeByRoot)))
+ nodeCount.Set(float64(len(s.emptyNodeByRoot)))
// Only update received block slot if it's within epoch from current time.
if slot+params.BeaconConfig().SlotsPerEpoch > slots.CurrentSlot(s.genesisTime) {
@@ -165,10 +202,10 @@ func (s *Store) insert(ctx context.Context,
s.highestReceivedNode = n
}
- return n, nil
+ return ret, nil
}
-// pruneFinalizedNodeByRootMap prunes the `nodeByRoot` map
+// pruneFinalizedNodeByRootMap prunes the `nodeByRoot` maps
// starting from `node` down to the finalized Node or to a leaf of the Fork
// choice store.
func (s *Store) pruneFinalizedNodeByRootMap(ctx context.Context, node, finalizedNode *Node) error {
@@ -181,44 +218,51 @@ func (s *Store) pruneFinalizedNodeByRootMap(ctx context.Context, node, finalized
}
return nil
}
- for _, child := range node.children {
+ for _, child := range s.allConsensusChildren(node) {
if err := s.pruneFinalizedNodeByRootMap(ctx, child, finalizedNode); err != nil {
return err
}
}
-
- node.children = nil
- delete(s.nodeByRoot, node.root)
+ en := s.emptyNodeByRoot[node.root]
+ en.children = nil
+ delete(s.emptyNodeByRoot, node.root)
+ fn := s.fullNodeByRoot[node.root]
+ if fn != nil {
+ fn.children = nil
+ delete(s.fullNodeByRoot, node.root)
+ }
return nil
}
// prune prunes the fork choice store. It removes all nodes that compete with the finalized root.
// This function does not prune for invalid optimistically synced nodes, it deals only with pruning upon finalization
+// TODO: GLOAS, to ensure that chains up to a full node are found, we may want to consider pruning only up to the latest full block that was finalized
func (s *Store) prune(ctx context.Context) error {
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.Prune")
defer span.End()
finalizedRoot := s.finalizedCheckpoint.Root
finalizedEpoch := s.finalizedCheckpoint.Epoch
- finalizedNode, ok := s.nodeByRoot[finalizedRoot]
- if !ok || finalizedNode == nil {
+ fen, ok := s.emptyNodeByRoot[finalizedRoot]
+ if !ok || fen == nil {
return errors.WithMessage(errUnknownFinalizedRoot, fmt.Sprintf("%#x", finalizedRoot))
}
+ fn := fen.node
// return early if we haven't changed the finalized checkpoint
- if finalizedNode.parent == nil {
+ if fn.parent == nil {
return nil
}
// Save the new finalized dependent root because it will be pruned
- s.finalizedDependentRoot = finalizedNode.parent.root
+ s.finalizedDependentRoot = fn.parent.node.root
// Prune nodeByRoot starting from root
- if err := s.pruneFinalizedNodeByRootMap(ctx, s.treeRootNode, finalizedNode); err != nil {
+ if err := s.pruneFinalizedNodeByRootMap(ctx, s.treeRootNode, fn); err != nil {
return err
}
- finalizedNode.parent = nil
- s.treeRootNode = finalizedNode
+ fn.parent = nil
+ s.treeRootNode = fn
prunedCount.Inc()
// Prune all children of the finalized checkpoint block that are incompatible with it
@@ -226,13 +270,13 @@ func (s *Store) prune(ctx context.Context) error {
if err != nil {
return errors.Wrap(err, "could not compute epoch start")
}
- if finalizedNode.slot == checkpointMaxSlot {
+ if fn.slot == checkpointMaxSlot {
return nil
}
- for _, child := range finalizedNode.children {
+ for _, child := range fen.children {
if child != nil && child.slot <= checkpointMaxSlot {
- if err := s.pruneFinalizedNodeByRootMap(ctx, child, finalizedNode); err != nil {
+ if err := s.pruneFinalizedNodeByRootMap(ctx, child, fn); err != nil {
return errors.Wrap(err, "could not prune incompatible finalized child")
}
}
@@ -246,10 +290,10 @@ func (s *Store) tips() ([][32]byte, []primitives.Slot) {
var roots [][32]byte
var slots []primitives.Slot
- for root, node := range s.nodeByRoot {
- if len(node.children) == 0 {
+ for root, n := range s.emptyNodeByRoot {
+ if len(s.allConsensusChildren(n.node)) == 0 {
roots = append(roots, root)
- slots = append(slots, node.slot)
+ slots = append(slots, n.node.slot)
}
}
return roots, slots
diff --git a/beacon-chain/forkchoice/doubly-linked-tree/store_test.go b/beacon-chain/forkchoice/doubly-linked-tree/store_test.go
index 2bbbda9071..b975d93616 100644
--- a/beacon-chain/forkchoice/doubly-linked-tree/store_test.go
+++ b/beacon-chain/forkchoice/doubly-linked-tree/store_test.go
@@ -1,7 +1,6 @@
package doublylinkedtree
import (
- "context"
"testing"
"time"
@@ -41,18 +40,18 @@ func TestStore_NodeByRoot(t *testing.T) {
state, blkRoot, err = prepareForkchoiceState(t.Context(), 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
- node0 := f.store.treeRootNode
- node1 := node0.children[0]
- node2 := node1.children[0]
+ node0 := f.store.emptyNodeByRoot[params.BeaconConfig().ZeroHash]
+ node1 := f.store.emptyNodeByRoot[indexToHash(1)]
+ node2 := f.store.emptyNodeByRoot[indexToHash(2)]
- expectedRoots := map[[32]byte]*Node{
+ expectedRoots := map[[32]byte]*PayloadNode{
params.BeaconConfig().ZeroHash: node0,
indexToHash(1): node1,
indexToHash(2): node2,
}
require.Equal(t, 3, f.NodeCount())
- for root, node := range f.store.nodeByRoot {
+ for root, node := range f.store.emptyNodeByRoot {
v, ok := expectedRoots[root]
require.Equal(t, ok, true)
require.Equal(t, v, node)
@@ -111,37 +110,28 @@ func TestStore_Head_BestDescendant(t *testing.T) {
require.Equal(t, h, indexToHash(4))
}
-func TestStore_UpdateBestDescendant_ContextCancelled(t *testing.T) {
- ctx, cancel := context.WithCancel(t.Context())
- f := setup(0, 0)
- state, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0)
- require.NoError(t, err)
- require.NoError(t, f.InsertNode(ctx, state, blkRoot))
- cancel()
- state, blkRoot, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0)
- require.NoError(t, err)
- err = f.InsertNode(ctx, state, blkRoot)
- require.ErrorContains(t, "context canceled", err)
-}
-
func TestStore_Insert(t *testing.T) {
// The new node does not have a parent.
treeRootNode := &Node{slot: 0, root: indexToHash(0)}
- nodeByRoot := map[[32]byte]*Node{indexToHash(0): treeRootNode}
+ emptyRootPN := &PayloadNode{node: treeRootNode}
+ fullRootPN := &PayloadNode{node: treeRootNode, full: true, optimistic: true}
+ emptyNodeByRoot := map[[32]byte]*PayloadNode{indexToHash(0): emptyRootPN}
+ fullNodeByRoot := map[[32]byte]*PayloadNode{indexToHash(0): fullRootPN}
jc := &forkchoicetypes.Checkpoint{Epoch: 0}
fc := &forkchoicetypes.Checkpoint{Epoch: 0}
- s := &Store{nodeByRoot: nodeByRoot, treeRootNode: treeRootNode, justifiedCheckpoint: jc, finalizedCheckpoint: fc, highestReceivedNode: &Node{}}
+ s := &Store{emptyNodeByRoot: emptyNodeByRoot, fullNodeByRoot: fullNodeByRoot, treeRootNode: treeRootNode, justifiedCheckpoint: jc, finalizedCheckpoint: fc, highestReceivedNode: &Node{}}
payloadHash := [32]byte{'a'}
ctx := t.Context()
_, blk, err := prepareForkchoiceState(ctx, 100, indexToHash(100), indexToHash(0), payloadHash, 1, 1)
require.NoError(t, err)
_, err = s.insert(ctx, blk, 1, 1)
require.NoError(t, err)
- assert.Equal(t, 2, len(s.nodeByRoot), "Did not insert block")
- assert.Equal(t, (*Node)(nil), treeRootNode.parent, "Incorrect parent")
- assert.Equal(t, 1, len(treeRootNode.children), "Incorrect children number")
- assert.Equal(t, payloadHash, treeRootNode.children[0].payloadHash, "Incorrect payload hash")
- child := treeRootNode.children[0]
+ assert.Equal(t, 2, len(s.emptyNodeByRoot), "Did not insert block")
+ assert.Equal(t, (*PayloadNode)(nil), treeRootNode.parent, "Incorrect parent")
+ children := s.allConsensusChildren(treeRootNode)
+ assert.Equal(t, 1, len(children), "Incorrect children number")
+ assert.Equal(t, payloadHash, children[0].blockHash, "Incorrect payload hash")
+ child := children[0]
assert.Equal(t, primitives.Epoch(1), child.justifiedEpoch, "Incorrect justification")
assert.Equal(t, primitives.Epoch(1), child.finalizedEpoch, "Incorrect finalization")
assert.Equal(t, indexToHash(100), child.root, "Incorrect root")
@@ -166,7 +156,7 @@ func TestStore_Prune_MoreThanThreshold(t *testing.T) {
// Finalized root is at index 99 so everything before 99 should be pruned.
s.finalizedCheckpoint.Root = indexToHash(99)
require.NoError(t, s.prune(t.Context()))
- assert.Equal(t, 1, len(s.nodeByRoot), "Incorrect nodes count")
+ assert.Equal(t, 1, len(s.emptyNodeByRoot), "Incorrect nodes count")
}
func TestStore_Prune_MoreThanOnce(t *testing.T) {
@@ -188,12 +178,12 @@ func TestStore_Prune_MoreThanOnce(t *testing.T) {
// Finalized root is at index 11 so everything before 11 should be pruned.
s.finalizedCheckpoint.Root = indexToHash(10)
require.NoError(t, s.prune(t.Context()))
- assert.Equal(t, 90, len(s.nodeByRoot), "Incorrect nodes count")
+ assert.Equal(t, 90, len(s.emptyNodeByRoot), "Incorrect nodes count")
// One more time.
s.finalizedCheckpoint.Root = indexToHash(20)
require.NoError(t, s.prune(t.Context()))
- assert.Equal(t, 80, len(s.nodeByRoot), "Incorrect nodes count")
+ assert.Equal(t, 80, len(s.emptyNodeByRoot), "Incorrect nodes count")
}
func TestStore_Prune_ReturnEarly(t *testing.T) {
@@ -236,7 +226,7 @@ func TestStore_Prune_NoDanglingBranch(t *testing.T) {
s := f.store
s.finalizedCheckpoint.Root = indexToHash(1)
require.NoError(t, s.prune(t.Context()))
- require.Equal(t, len(s.nodeByRoot), 1)
+ require.Equal(t, len(s.emptyNodeByRoot), 1)
}
// This test starts with the following branching diagram
@@ -316,7 +306,7 @@ func TestStore_PruneMapsNodes(t *testing.T) {
s := f.store
s.finalizedCheckpoint.Root = indexToHash(1)
require.NoError(t, s.prune(t.Context()))
- require.Equal(t, len(s.nodeByRoot), 1)
+ require.Equal(t, len(s.emptyNodeByRoot), 1)
}
func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
diff --git a/beacon-chain/forkchoice/doubly-linked-tree/types.go b/beacon-chain/forkchoice/doubly-linked-tree/types.go
index 4a99ef2a56..66a231746a 100644
--- a/beacon-chain/forkchoice/doubly-linked-tree/types.go
+++ b/beacon-chain/forkchoice/doubly-linked-tree/types.go
@@ -21,23 +21,26 @@ type ForkChoice struct {
balancesByRoot forkchoice.BalancesByRooter // handler to obtain balances for the state with a given root
}
+var _ forkchoice.ForkChoicer = (*ForkChoice)(nil)
+
// Store defines the fork choice store which includes block nodes and the last view of checkpoint information.
type Store struct {
- justifiedCheckpoint *forkchoicetypes.Checkpoint // latest justified epoch in store.
- unrealizedJustifiedCheckpoint *forkchoicetypes.Checkpoint // best unrealized justified checkpoint in store.
- unrealizedFinalizedCheckpoint *forkchoicetypes.Checkpoint // best unrealized finalized checkpoint in store.
- prevJustifiedCheckpoint *forkchoicetypes.Checkpoint // previous justified checkpoint in store.
- finalizedCheckpoint *forkchoicetypes.Checkpoint // latest finalized epoch in store.
- proposerBoostRoot [fieldparams.RootLength]byte // latest block root that was boosted after being received in a timely manner.
- previousProposerBoostRoot [fieldparams.RootLength]byte // previous block root that was boosted after being received in a timely manner.
- previousProposerBoostScore uint64 // previous proposer boosted root score.
- finalizedDependentRoot [fieldparams.RootLength]byte // dependent root at finalized checkpoint.
- committeeWeight uint64 // tracks the total active validator balance divided by the number of slots per Epoch.
- treeRootNode *Node // the root node of the store tree.
- headNode *Node // last head Node
- nodeByRoot map[[fieldparams.RootLength]byte]*Node // nodes indexed by roots.
- slashedIndices map[primitives.ValidatorIndex]bool // the list of equivocating validator indices
- originRoot [fieldparams.RootLength]byte // The genesis block root
+ justifiedCheckpoint *forkchoicetypes.Checkpoint // latest justified epoch in store.
+ unrealizedJustifiedCheckpoint *forkchoicetypes.Checkpoint // best unrealized justified checkpoint in store.
+ unrealizedFinalizedCheckpoint *forkchoicetypes.Checkpoint // best unrealized finalized checkpoint in store.
+ prevJustifiedCheckpoint *forkchoicetypes.Checkpoint // previous justified checkpoint in store.
+ finalizedCheckpoint *forkchoicetypes.Checkpoint // latest finalized epoch in store.
+ proposerBoostRoot [fieldparams.RootLength]byte // latest block root that was boosted after being received in a timely manner.
+ previousProposerBoostRoot [fieldparams.RootLength]byte // previous block root that was boosted after being received in a timely manner.
+ previousProposerBoostScore uint64 // previous proposer boosted root score.
+ finalizedDependentRoot [fieldparams.RootLength]byte // dependent root at finalized checkpoint.
+ committeeWeight uint64 // tracks the total active validator balance divided by the number of slots per Epoch.
+ treeRootNode *Node // the root node of the store tree.
+ headNode *Node // last head Node
+ emptyNodeByRoot map[[fieldparams.RootLength]byte]*PayloadNode // nodes indexed by roots.
+ fullNodeByRoot map[[fieldparams.RootLength]byte]*PayloadNode // full nodes (the payload was present) indexed by beacon block root.
+ slashedIndices map[primitives.ValidatorIndex]bool // the list of equivocating validator indices
+ originRoot [fieldparams.RootLength]byte // The genesis block root
genesisTime time.Time
highestReceivedNode *Node // The highest slot node.
receivedBlocksLastEpoch [fieldparams.SlotsPerEpoch]primitives.Slot // Using `highestReceivedSlot`. The slot of blocks received in the last epoch.
@@ -49,19 +52,28 @@ type Store struct {
type Node struct {
slot primitives.Slot // slot of the block converted to the node.
root [fieldparams.RootLength]byte // root of the block converted to the node.
- payloadHash [fieldparams.RootLength]byte // payloadHash of the block converted to the node.
- parent *Node // parent index of this node.
+ blockHash [fieldparams.RootLength]byte // payloadHash of the block converted to the node.
+ parent *PayloadNode // parent index of this node.
target *Node // target checkpoint for
- children []*Node // the list of direct children of this Node
+ bestDescendant *Node // bestDescendant node of this node.
justifiedEpoch primitives.Epoch // justifiedEpoch of this node.
unrealizedJustifiedEpoch primitives.Epoch // the epoch that would be justified if the block would be advanced to the next epoch.
finalizedEpoch primitives.Epoch // finalizedEpoch of this node.
unrealizedFinalizedEpoch primitives.Epoch // the epoch that would be finalized if the block would be advanced to the next epoch.
balance uint64 // the balance that voted for this node directly
weight uint64 // weight of this node: the total balance including children
- bestDescendant *Node // bestDescendant node of this node.
- optimistic bool // whether the block has been fully validated or not
- timestamp time.Time // The timestamp when the node was inserted.
+}
+
+// PayloadNode defines a full Forkchoice node after the Gloas fork, with the payload status either empty of full
+type PayloadNode struct {
+ optimistic bool // whether the block has been fully validated or not
+ full bool // whether this node represents a payload present or not
+ weight uint64 // weight of this node: the total balance including children
+ balance uint64 // the balance that voted for this node directly
+ bestDescendant *Node // bestDescendant node of this payload node.
+ node *Node // the consensus part of this full forkchoice node
+ timestamp time.Time // The timestamp when the node was inserted.
+ children []*Node // the list of direct children of this Node
}
// Vote defines an individual validator's vote.
diff --git a/beacon-chain/forkchoice/doubly-linked-tree/unrealized_justification.go b/beacon-chain/forkchoice/doubly-linked-tree/unrealized_justification.go
index 0900a61e14..fd95fef2d3 100644
--- a/beacon-chain/forkchoice/doubly-linked-tree/unrealized_justification.go
+++ b/beacon-chain/forkchoice/doubly-linked-tree/unrealized_justification.go
@@ -15,33 +15,34 @@ import (
)
func (s *Store) setUnrealizedJustifiedEpoch(root [32]byte, epoch primitives.Epoch) error {
- node, ok := s.nodeByRoot[root]
- if !ok || node == nil {
+ en, ok := s.emptyNodeByRoot[root]
+ if !ok || en == nil {
return errors.Wrap(ErrNilNode, "could not set unrealized justified epoch")
}
- if epoch < node.unrealizedJustifiedEpoch {
+ if epoch < en.node.unrealizedJustifiedEpoch {
return errInvalidUnrealizedJustifiedEpoch
}
- node.unrealizedJustifiedEpoch = epoch
+ en.node.unrealizedJustifiedEpoch = epoch
return nil
}
func (s *Store) setUnrealizedFinalizedEpoch(root [32]byte, epoch primitives.Epoch) error {
- node, ok := s.nodeByRoot[root]
- if !ok || node == nil {
+ en, ok := s.emptyNodeByRoot[root]
+ if !ok || en == nil {
return errors.Wrap(ErrNilNode, "could not set unrealized finalized epoch")
}
- if epoch < node.unrealizedFinalizedEpoch {
+ if epoch < en.node.unrealizedFinalizedEpoch {
return errInvalidUnrealizedFinalizedEpoch
}
- node.unrealizedFinalizedEpoch = epoch
+ en.node.unrealizedFinalizedEpoch = epoch
return nil
}
// updateUnrealizedCheckpoints "realizes" the unrealized justified and finalized
// epochs stored within nodes. It should be called at the beginning of each epoch.
func (f *ForkChoice) updateUnrealizedCheckpoints(ctx context.Context) error {
- for _, node := range f.store.nodeByRoot {
+ for _, en := range f.store.emptyNodeByRoot {
+ node := en.node
node.justifiedEpoch = node.unrealizedJustifiedEpoch
node.finalizedEpoch = node.unrealizedFinalizedEpoch
if node.justifiedEpoch > f.store.justifiedCheckpoint.Epoch {
@@ -62,16 +63,17 @@ func (s *Store) pullTips(state state.BeaconState, node *Node, jc, fc *ethpb.Chec
if node.parent == nil { // Nothing to do if the parent is nil.
return jc, fc
}
+ pn := node.parent.node
currentEpoch := slots.ToEpoch(slots.CurrentSlot(s.genesisTime))
stateSlot := state.Slot()
stateEpoch := slots.ToEpoch(stateSlot)
- currJustified := node.parent.unrealizedJustifiedEpoch == currentEpoch
- prevJustified := node.parent.unrealizedJustifiedEpoch+1 == currentEpoch
+ currJustified := pn.unrealizedJustifiedEpoch == currentEpoch
+ prevJustified := pn.unrealizedJustifiedEpoch+1 == currentEpoch
tooEarlyForCurr := slots.SinceEpochStarts(stateSlot)*3 < params.BeaconConfig().SlotsPerEpoch*2
// Exit early if it's justified or too early to be justified.
if currJustified || (stateEpoch == currentEpoch && prevJustified && tooEarlyForCurr) {
- node.unrealizedJustifiedEpoch = node.parent.unrealizedJustifiedEpoch
- node.unrealizedFinalizedEpoch = node.parent.unrealizedFinalizedEpoch
+ node.unrealizedJustifiedEpoch = pn.unrealizedJustifiedEpoch
+ node.unrealizedFinalizedEpoch = pn.unrealizedFinalizedEpoch
return jc, fc
}
diff --git a/beacon-chain/forkchoice/doubly-linked-tree/unrealized_justification_test.go b/beacon-chain/forkchoice/doubly-linked-tree/unrealized_justification_test.go
index f8ee9330b0..6256d7496e 100644
--- a/beacon-chain/forkchoice/doubly-linked-tree/unrealized_justification_test.go
+++ b/beacon-chain/forkchoice/doubly-linked-tree/unrealized_justification_test.go
@@ -22,12 +22,12 @@ func TestStore_SetUnrealizedEpochs(t *testing.T) {
state, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
- require.Equal(t, primitives.Epoch(1), f.store.nodeByRoot[[32]byte{'b'}].unrealizedJustifiedEpoch)
- require.Equal(t, primitives.Epoch(1), f.store.nodeByRoot[[32]byte{'b'}].unrealizedFinalizedEpoch)
+ require.Equal(t, primitives.Epoch(1), f.store.emptyNodeByRoot[[32]byte{'b'}].node.unrealizedJustifiedEpoch)
+ require.Equal(t, primitives.Epoch(1), f.store.emptyNodeByRoot[[32]byte{'b'}].node.unrealizedFinalizedEpoch)
require.NoError(t, f.store.setUnrealizedJustifiedEpoch([32]byte{'b'}, 2))
require.NoError(t, f.store.setUnrealizedFinalizedEpoch([32]byte{'b'}, 2))
- require.Equal(t, primitives.Epoch(2), f.store.nodeByRoot[[32]byte{'b'}].unrealizedJustifiedEpoch)
- require.Equal(t, primitives.Epoch(2), f.store.nodeByRoot[[32]byte{'b'}].unrealizedFinalizedEpoch)
+ require.Equal(t, primitives.Epoch(2), f.store.emptyNodeByRoot[[32]byte{'b'}].node.unrealizedJustifiedEpoch)
+ require.Equal(t, primitives.Epoch(2), f.store.emptyNodeByRoot[[32]byte{'b'}].node.unrealizedFinalizedEpoch)
require.ErrorIs(t, errInvalidUnrealizedJustifiedEpoch, f.store.setUnrealizedJustifiedEpoch([32]byte{'b'}, 0))
require.ErrorIs(t, errInvalidUnrealizedFinalizedEpoch, f.store.setUnrealizedFinalizedEpoch([32]byte{'b'}, 0))
@@ -78,9 +78,9 @@ func TestStore_LongFork(t *testing.T) {
// Add an attestation to c, it is head
f.ProcessAttestation(ctx, []uint64{0}, [32]byte{'c'}, 1)
f.justifiedBalances = []uint64{100}
- c := f.store.nodeByRoot[[32]byte{'c'}]
- require.Equal(t, primitives.Epoch(2), slots.ToEpoch(c.slot))
- driftGenesisTime(f, c.slot, 0)
+ c := f.store.emptyNodeByRoot[[32]byte{'c'}]
+ require.Equal(t, primitives.Epoch(2), slots.ToEpoch(c.node.slot))
+ driftGenesisTime(f, c.node.slot, 0)
headRoot, err := f.Head(ctx)
require.NoError(t, err)
require.Equal(t, [32]byte{'c'}, headRoot)
@@ -91,15 +91,15 @@ func TestStore_LongFork(t *testing.T) {
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
require.NoError(t, f.UpdateJustifiedCheckpoint(ctx, &forkchoicetypes.Checkpoint{Epoch: 2, Root: ha}))
- d := f.store.nodeByRoot[[32]byte{'d'}]
- require.Equal(t, primitives.Epoch(3), slots.ToEpoch(d.slot))
- driftGenesisTime(f, d.slot, 0)
- require.Equal(t, true, d.viableForHead(f.store.justifiedCheckpoint.Epoch, slots.ToEpoch(d.slot)))
+ d := f.store.emptyNodeByRoot[[32]byte{'d'}]
+ require.Equal(t, primitives.Epoch(3), slots.ToEpoch(d.node.slot))
+ driftGenesisTime(f, d.node.slot, 0)
+ require.Equal(t, true, d.node.viableForHead(f.store.justifiedCheckpoint.Epoch, slots.ToEpoch(d.node.slot)))
headRoot, err = f.Head(ctx)
require.NoError(t, err)
require.Equal(t, [32]byte{'c'}, headRoot)
- require.Equal(t, uint64(0), f.store.nodeByRoot[[32]byte{'d'}].weight)
- require.Equal(t, uint64(100), f.store.nodeByRoot[[32]byte{'c'}].weight)
+ require.Equal(t, uint64(0), f.store.emptyNodeByRoot[[32]byte{'d'}].weight)
+ require.Equal(t, uint64(100), f.store.emptyNodeByRoot[[32]byte{'c'}].weight)
}
// Epoch 1 Epoch 2 Epoch 3
@@ -243,8 +243,8 @@ func TestStore_ForkNextEpoch(t *testing.T) {
require.NoError(t, err)
require.Equal(t, [32]byte{'d'}, headRoot)
require.Equal(t, primitives.Epoch(2), f.JustifiedCheckpoint().Epoch)
- require.Equal(t, uint64(0), f.store.nodeByRoot[[32]byte{'d'}].weight)
- require.Equal(t, uint64(100), f.store.nodeByRoot[[32]byte{'h'}].weight)
+ require.Equal(t, uint64(0), f.store.emptyNodeByRoot[[32]byte{'d'}].weight)
+ require.Equal(t, uint64(100), f.store.emptyNodeByRoot[[32]byte{'h'}].weight)
// Set current epoch to 3, and H's unrealized checkpoint. Check it's head
driftGenesisTime(f, 99, 0)
require.NoError(t, f.store.setUnrealizedJustifiedEpoch([32]byte{'h'}, 2))
@@ -252,8 +252,8 @@ func TestStore_ForkNextEpoch(t *testing.T) {
require.NoError(t, err)
require.Equal(t, [32]byte{'h'}, headRoot)
require.Equal(t, primitives.Epoch(2), f.JustifiedCheckpoint().Epoch)
- require.Equal(t, uint64(0), f.store.nodeByRoot[[32]byte{'d'}].weight)
- require.Equal(t, uint64(100), f.store.nodeByRoot[[32]byte{'h'}].weight)
+ require.Equal(t, uint64(0), f.store.emptyNodeByRoot[[32]byte{'d'}].weight)
+ require.Equal(t, uint64(100), f.store.emptyNodeByRoot[[32]byte{'h'}].weight)
}
func TestStore_PullTips_Heuristics(t *testing.T) {
@@ -263,14 +263,14 @@ func TestStore_PullTips_Heuristics(t *testing.T) {
st, root, err := prepareForkchoiceState(ctx, 65, [32]byte{'p'}, [32]byte{}, [32]byte{}, 1, 1)
require.NoError(tt, err)
require.NoError(tt, f.InsertNode(ctx, st, root))
- f.store.nodeByRoot[[32]byte{'p'}].unrealizedJustifiedEpoch = primitives.Epoch(2)
+ f.store.emptyNodeByRoot[[32]byte{'p'}].node.unrealizedJustifiedEpoch = primitives.Epoch(2)
driftGenesisTime(f, 66, 0)
st, root, err = prepareForkchoiceState(ctx, 66, [32]byte{'h'}, [32]byte{'p'}, [32]byte{}, 1, 1)
require.NoError(tt, err)
require.NoError(tt, f.InsertNode(ctx, st, root))
- require.Equal(tt, primitives.Epoch(2), f.store.nodeByRoot[[32]byte{'h'}].unrealizedJustifiedEpoch)
- require.Equal(tt, primitives.Epoch(1), f.store.nodeByRoot[[32]byte{'h'}].unrealizedFinalizedEpoch)
+ require.Equal(tt, primitives.Epoch(2), f.store.emptyNodeByRoot[[32]byte{'h'}].node.unrealizedJustifiedEpoch)
+ require.Equal(tt, primitives.Epoch(1), f.store.emptyNodeByRoot[[32]byte{'h'}].node.unrealizedFinalizedEpoch)
})
t.Run("Previous Epoch is justified and too early for current", func(tt *testing.T) {
@@ -278,21 +278,21 @@ func TestStore_PullTips_Heuristics(t *testing.T) {
st, root, err := prepareForkchoiceState(ctx, 95, [32]byte{'p'}, [32]byte{}, [32]byte{}, 1, 1)
require.NoError(tt, err)
require.NoError(tt, f.InsertNode(ctx, st, root))
- f.store.nodeByRoot[[32]byte{'p'}].unrealizedJustifiedEpoch = primitives.Epoch(2)
+ f.store.emptyNodeByRoot[[32]byte{'p'}].node.unrealizedJustifiedEpoch = primitives.Epoch(2)
driftGenesisTime(f, 96, 0)
st, root, err = prepareForkchoiceState(ctx, 96, [32]byte{'h'}, [32]byte{'p'}, [32]byte{}, 1, 1)
require.NoError(tt, err)
require.NoError(tt, f.InsertNode(ctx, st, root))
- require.Equal(tt, primitives.Epoch(2), f.store.nodeByRoot[[32]byte{'h'}].unrealizedJustifiedEpoch)
- require.Equal(tt, primitives.Epoch(1), f.store.nodeByRoot[[32]byte{'h'}].unrealizedFinalizedEpoch)
+ require.Equal(tt, primitives.Epoch(2), f.store.emptyNodeByRoot[[32]byte{'h'}].node.unrealizedJustifiedEpoch)
+ require.Equal(tt, primitives.Epoch(1), f.store.emptyNodeByRoot[[32]byte{'h'}].node.unrealizedFinalizedEpoch)
})
t.Run("Previous Epoch is justified and not too early for current", func(tt *testing.T) {
f := setup(1, 1)
st, root, err := prepareForkchoiceState(ctx, 95, [32]byte{'p'}, [32]byte{}, [32]byte{}, 1, 1)
require.NoError(tt, err)
require.NoError(tt, f.InsertNode(ctx, st, root))
- f.store.nodeByRoot[[32]byte{'p'}].unrealizedJustifiedEpoch = primitives.Epoch(2)
+ f.store.emptyNodeByRoot[[32]byte{'p'}].node.unrealizedJustifiedEpoch = primitives.Epoch(2)
driftGenesisTime(f, 127, 0)
st, root, err = prepareForkchoiceState(ctx, 127, [32]byte{'h'}, [32]byte{'p'}, [32]byte{}, 1, 1)
@@ -302,14 +302,14 @@ func TestStore_PullTips_Heuristics(t *testing.T) {
// This test checks that the heuristics in pullTips did not apply and
// the test continues to compute a bogus unrealized
// justification
- require.Equal(tt, primitives.Epoch(1), f.store.nodeByRoot[[32]byte{'h'}].unrealizedJustifiedEpoch)
+ require.Equal(tt, primitives.Epoch(1), f.store.emptyNodeByRoot[[32]byte{'h'}].node.unrealizedJustifiedEpoch)
})
t.Run("Block from previous Epoch", func(tt *testing.T) {
f := setup(1, 1)
st, root, err := prepareForkchoiceState(ctx, 94, [32]byte{'p'}, [32]byte{}, [32]byte{}, 1, 1)
require.NoError(tt, err)
require.NoError(tt, f.InsertNode(ctx, st, root))
- f.store.nodeByRoot[[32]byte{'p'}].unrealizedJustifiedEpoch = primitives.Epoch(2)
+ f.store.emptyNodeByRoot[[32]byte{'p'}].node.unrealizedJustifiedEpoch = primitives.Epoch(2)
driftGenesisTime(f, 96, 0)
st, root, err = prepareForkchoiceState(ctx, 95, [32]byte{'h'}, [32]byte{'p'}, [32]byte{}, 1, 1)
@@ -319,7 +319,7 @@ func TestStore_PullTips_Heuristics(t *testing.T) {
// This test checks that the heuristics in pullTips did not apply and
// the test continues to compute a bogus unrealized
// justification
- require.Equal(tt, primitives.Epoch(1), f.store.nodeByRoot[[32]byte{'h'}].unrealizedJustifiedEpoch)
+ require.Equal(tt, primitives.Epoch(1), f.store.emptyNodeByRoot[[32]byte{'h'}].node.unrealizedJustifiedEpoch)
})
t.Run("Previous Epoch is not justified", func(tt *testing.T) {
f := setup(1, 1)
@@ -335,6 +335,6 @@ func TestStore_PullTips_Heuristics(t *testing.T) {
// This test checks that the heuristics in pullTips did not apply and
// the test continues to compute a bogus unrealized
// justification
- require.Equal(tt, primitives.Epoch(2), f.store.nodeByRoot[[32]byte{'h'}].unrealizedJustifiedEpoch)
+ require.Equal(tt, primitives.Epoch(2), f.store.emptyNodeByRoot[[32]byte{'h'}].node.unrealizedJustifiedEpoch)
})
}
diff --git a/beacon-chain/forkchoice/doubly-linked-tree/vote_test.go b/beacon-chain/forkchoice/doubly-linked-tree/vote_test.go
index c3e6e77eac..6d2a975294 100644
--- a/beacon-chain/forkchoice/doubly-linked-tree/vote_test.go
+++ b/beacon-chain/forkchoice/doubly-linked-tree/vote_test.go
@@ -284,7 +284,7 @@ func TestVotes_CanFindHead(t *testing.T) {
// 9 10
f.store.finalizedCheckpoint.Root = indexToHash(5)
require.NoError(t, f.store.prune(t.Context()))
- assert.Equal(t, 5, len(f.store.nodeByRoot), "Incorrect nodes length after prune")
+ assert.Equal(t, 5, len(f.store.emptyNodeByRoot), "Incorrect nodes length after prune")
// we pruned artificially the justified root.
f.store.justifiedCheckpoint.Root = indexToHash(5)
diff --git a/beacon-chain/forkchoice/interfaces.go b/beacon-chain/forkchoice/interfaces.go
index 5bcc8e92b4..ce219cd9f9 100644
--- a/beacon-chain/forkchoice/interfaces.go
+++ b/beacon-chain/forkchoice/interfaces.go
@@ -89,7 +89,7 @@ type FastGetter interface {
// Setter allows to set forkchoice information
type Setter interface {
SetOptimisticToValid(context.Context, [fieldparams.RootLength]byte) error
- SetOptimisticToInvalid(context.Context, [fieldparams.RootLength]byte, [fieldparams.RootLength]byte, [fieldparams.RootLength]byte) ([][32]byte, error)
+ SetOptimisticToInvalid(context.Context, [32]byte, [32]byte, [32]byte, [32]byte) ([][32]byte, error)
UpdateJustifiedCheckpoint(context.Context, *forkchoicetypes.Checkpoint) error
UpdateFinalizedCheckpoint(*forkchoicetypes.Checkpoint) error
SetGenesisTime(time.Time)
diff --git a/beacon-chain/graffiti/graffiti-proposal-brief.md b/beacon-chain/graffiti/graffiti-proposal-brief.md
deleted file mode 100644
index 448ae29d13..0000000000
--- a/beacon-chain/graffiti/graffiti-proposal-brief.md
+++ /dev/null
@@ -1,95 +0,0 @@
-# Graffiti Version Info Implementation
-
-## Summary
-Add automatic EL+CL version info to block graffiti following [ethereum/execution-apis#517](https://github.com/ethereum/execution-apis/pull/517). Uses the [flexible standard](https://hackmd.io/@wmoBhF17RAOH2NZ5bNXJVg/BJX2c9gja) to pack client info into leftover space after user graffiti.
-
-More details: https://github.com/ethereum/execution-apis/blob/main/src/engine/identification.md
-
-## Implementation
-
-### Core Component: GraffitiInfo Struct
-Thread-safe struct holding version information:
-```go
-const clCode = "PR"
-
-type GraffitiInfo struct {
- mu sync.RWMutex
- userGraffiti string // From --graffiti flag (set once at startup)
- clCommit string // From version.GetCommitPrefix() helper function
- elCode string // From engine_getClientVersionV1
- elCommit string // From engine_getClientVersionV1
-}
-```
-
-### Flow
-1. **Startup**: Parse flags, create GraffitiInfo with user graffiti and CL info.
-2. **Wiring**: Pass struct to both execution service and RPC validator server
-3. **Runtime**: Execution service goroutine periodically calls `engine_getClientVersionV1` and updates EL fields
-4. **Block Proposal**: RPC validator server calls `GenerateGraffiti()` to get formatted graffiti
-
-### Flexible Graffiti Format
-Packs as much client info as space allows (after user graffiti):
-
-| Available Space | Format | Example |
-|----------------|--------|---------|
-| ≥12 bytes | `EL(2)+commit(4)+CL(2)+commit(4)+user` | `GE168dPR63afBob` |
-| 8-11 bytes | `EL(2)+commit(2)+CL(2)+commit(2)+user` | `GE16PR63my node here` |
-| 4-7 bytes | `EL(2)+CL(2)+user` | `GEPRthis is my graffiti msg` |
-| 2-3 bytes | `EL(2)+user` | `GEalmost full graffiti message` |
-| <2 bytes | user only | `full 32 byte user graffiti here` |
-
-```go
-func (g *GraffitiInfo) GenerateGraffiti() [32]byte {
- available := 32 - len(userGraffiti)
-
- if elCode == "" {
- elCommit2 = elCommit4 = ""
- }
-
- switch {
- case available >= 12:
- return elCode + elCommit4 + clCode + clCommit4 + userGraffiti
- case available >= 8:
- return elCode + elCommit2 + clCode + clCommit2 + userGraffiti
- case available >= 4:
- return elCode + clCode + userGraffiti
- case available >= 2:
- return elCode + userGraffiti
- default:
- return userGraffiti
- }
-}
-```
-
-### Update Logic
-Single testable function in execution service:
-```go
-func (s *Service) updateGraffitiInfo() {
- versions, err := s.GetClientVersion(ctx)
- if err != nil {
- return // Keep last good value
- }
- if len(versions) == 1 {
- s.graffitiInfo.UpdateFromEngine(versions[0].Code, versions[0].Commit)
- }
-}
-```
-
-Goroutine calls this on `slot % 8 == 4` timing (4 times per epoch, avoids slot boundaries).
-
-### Files Changes Required
-
-**New:**
-- `beacon-chain/execution/graffiti_info.go` - The struct and methods
-- `beacon-chain/execution/graffiti_info_test.go` - Unit tests
-- `runtime/version/version.go` - Add `GetCommitPrefix()` helper that extracts first 4 hex chars from the git commit injected via Bazel ldflags at build time
-
-**Modified:**
-- `beacon-chain/execution/service.go` - Add goroutine + updateGraffitiInfo()
-- `beacon-chain/execution/engine_client.go` - Add GetClientVersion() method that does engine call
-- `beacon-chain/rpc/.../validator/proposer.go` - Call GenerateGraffiti()
-- `beacon-chain/node/node.go` - Wire GraffitiInfo to services
-
-### Testing Strategy
-- Unit test GraffitiInfo methods (priority logic, thread safety)
-- Unit test updateGraffitiInfo() with mocked engine client
diff --git a/beacon-chain/node/node.go b/beacon-chain/node/node.go
index 7f766f2d8a..c9eb6346d6 100644
--- a/beacon-chain/node/node.go
+++ b/beacon-chain/node/node.go
@@ -787,6 +787,9 @@ func (b *BeaconNode) registerPOWChainService() error {
return err
}
+ // Create GraffitiInfo for client version tracking in block graffiti
+ graffitiInfo := execution.NewGraffitiInfo()
+
// skipcq: CRT-D0001
opts := append(
b.serviceFlagOpts.executionChainFlagOpts,
@@ -799,6 +802,7 @@ func (b *BeaconNode) registerPOWChainService() error {
execution.WithFinalizedStateAtStartup(b.finalizedStateAtStartUp),
execution.WithJwtId(b.cliCtx.String(flags.JwtId.Name)),
execution.WithVerifierWaiter(b.verifyInitWaiter),
+ execution.WithGraffitiInfo(graffitiInfo),
)
web3Service, err := execution.NewService(b.ctx, opts...)
if err != nil {
@@ -1006,6 +1010,7 @@ func (b *BeaconNode) registerRPCService(router *http.ServeMux) error {
PayloadIDCache: b.payloadIDCache,
ExecutionPayloadEnvelopeCache: b.executionPayloadEnvelopeCache,
LCStore: b.lcStore,
+ GraffitiInfo: web3Service.GraffitiInfo(),
})
return b.services.RegisterService(rpcService)
diff --git a/beacon-chain/p2p/gossip_topic_mappings.go b/beacon-chain/p2p/gossip_topic_mappings.go
index 52e097f49d..9be929dd98 100644
--- a/beacon-chain/p2p/gossip_topic_mappings.go
+++ b/beacon-chain/p2p/gossip_topic_mappings.go
@@ -26,6 +26,7 @@ var gossipTopicMappings = map[string]func() proto.Message{
LightClientFinalityUpdateTopicFormat: func() proto.Message { return ðpb.LightClientFinalityUpdateAltair{} },
DataColumnSubnetTopicFormat: func() proto.Message { return ðpb.DataColumnSidecar{} },
PayloadAttestationMessageTopicFormat: func() proto.Message { return ðpb.PayloadAttestationMessage{} },
+ ExecutionPayloadEnvelopeTopicFormat: func() proto.Message { return ðpb.SignedExecutionPayloadEnvelope{} },
}
// GossipTopicMappings is a function to return the assigned data type
diff --git a/beacon-chain/p2p/topics.go b/beacon-chain/p2p/topics.go
index ac9b0ca66b..ce5d4b1220 100644
--- a/beacon-chain/p2p/topics.go
+++ b/beacon-chain/p2p/topics.go
@@ -46,8 +46,10 @@ const (
GossipLightClientOptimisticUpdateMessage = "light_client_optimistic_update"
// GossipDataColumnSidecarMessage is the name for the data column sidecar message type.
GossipDataColumnSidecarMessage = "data_column_sidecar"
- // GossipPayloadAttestationMessage is the name for the payload attestation message type.
- GossipPayloadAttestationMessage = "payload_attestation_message"
+ // GossipPayloadAttestationMessageMessage is the name for the payload attestation message type.
+ GossipPayloadAttestationMessageMessage = "payload_attestation_message"
+ // GossipExecutionPayloadEnvelopeMessage is the name for the execution payload envelope message type.
+ GossipExecutionPayloadEnvelopeMessage = "execution_payload_envelope"
// Topic Formats
//
@@ -78,7 +80,9 @@ const (
// DataColumnSubnetTopicFormat is the topic format for the data column subnet.
DataColumnSubnetTopicFormat = GossipProtocolAndDigest + GossipDataColumnSidecarMessage + "_%d"
// PayloadAttestationMessageTopicFormat is the topic format for payload attestation messages.
- PayloadAttestationMessageTopicFormat = GossipProtocolAndDigest + GossipPayloadAttestationMessage
+ PayloadAttestationMessageTopicFormat = GossipProtocolAndDigest + GossipPayloadAttestationMessageMessage
+ // ExecutionPayloadEnvelopeTopicFormat is the topic format for execution payload envelopes.
+ ExecutionPayloadEnvelopeTopicFormat = GossipProtocolAndDigest + GossipExecutionPayloadEnvelopeMessage
)
// topic is a struct representing a single gossipsub topic.
@@ -162,7 +166,8 @@ func (s *Service) allTopics() []topic {
newTopic(altair, future, empty, GossipLightClientOptimisticUpdateMessage),
newTopic(altair, future, empty, GossipLightClientFinalityUpdateMessage),
newTopic(capella, future, empty, GossipBlsToExecutionChangeMessage),
- newTopic(gloas, future, empty, GossipPayloadAttestationMessage),
+ newTopic(gloas, future, empty, GossipPayloadAttestationMessageMessage),
+ newTopic(gloas, future, empty, GossipExecutionPayloadEnvelopeMessage),
}
last := params.GetNetworkScheduleEntry(genesis)
schedule := []params.NetworkScheduleEntry{last}
diff --git a/beacon-chain/rpc/eth/config/handlers_test.go b/beacon-chain/rpc/eth/config/handlers_test.go
index edb02c7310..382b652258 100644
--- a/beacon-chain/rpc/eth/config/handlers_test.go
+++ b/beacon-chain/rpc/eth/config/handlers_test.go
@@ -86,6 +86,7 @@ func TestGetSpec(t *testing.T) {
config.GloasForkEpoch = 110
config.BLSWithdrawalPrefixByte = byte('b')
config.ETH1AddressWithdrawalPrefixByte = byte('c')
+ config.BuilderWithdrawalPrefixByte = byte('e')
config.GenesisDelay = 24
config.SecondsPerSlot = 25
config.SlotDurationMilliseconds = 120
diff --git a/beacon-chain/rpc/prysm/v1alpha1/validator/proposer.go b/beacon-chain/rpc/prysm/v1alpha1/validator/proposer.go
index ce9de50235..80098e3cd8 100644
--- a/beacon-chain/rpc/prysm/v1alpha1/validator/proposer.go
+++ b/beacon-chain/rpc/prysm/v1alpha1/validator/proposer.go
@@ -89,7 +89,13 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
}
// Set slot, graffiti, randao reveal, and parent root.
sBlk.SetSlot(req.Slot)
- sBlk.SetGraffiti(req.Graffiti)
+ // Generate graffiti with client version info using flexible standard
+ if vs.GraffitiInfo != nil {
+ graffiti := vs.GraffitiInfo.GenerateGraffiti(req.Graffiti)
+ sBlk.SetGraffiti(graffiti[:])
+ } else {
+ sBlk.SetGraffiti(req.Graffiti)
+ }
sBlk.SetRandaoReveal(req.RandaoReveal)
sBlk.SetParentRoot(parentRoot[:])
diff --git a/beacon-chain/rpc/prysm/v1alpha1/validator/server.go b/beacon-chain/rpc/prysm/v1alpha1/validator/server.go
index ceb91702f1..e57be617ef 100644
--- a/beacon-chain/rpc/prysm/v1alpha1/validator/server.go
+++ b/beacon-chain/rpc/prysm/v1alpha1/validator/server.go
@@ -84,6 +84,7 @@ type Server struct {
ClockWaiter startup.ClockWaiter
CoreService *core.Service
AttestationStateFetcher blockchain.AttestationStateFetcher
+ GraffitiInfo *execution.GraffitiInfo
}
// Deprecated: The gRPC API will remain the default and fully supported through v8 (expected in 2026) but will be eventually removed in favor of REST API.
diff --git a/beacon-chain/rpc/service.go b/beacon-chain/rpc/service.go
index a058e67ad3..4a4a21f913 100644
--- a/beacon-chain/rpc/service.go
+++ b/beacon-chain/rpc/service.go
@@ -126,6 +126,7 @@ type Config struct {
PayloadIDCache *cache.PayloadIDCache
ExecutionPayloadEnvelopeCache *cache.ExecutionPayloadEnvelopeCache
LCStore *lightClient.Store
+ GraffitiInfo *execution.GraffitiInfo
}
// NewService instantiates a new RPC service instance that will
@@ -258,6 +259,7 @@ func NewService(ctx context.Context, cfg *Config) *Service {
PayloadIDCache: s.cfg.PayloadIDCache,
AttestationStateFetcher: s.cfg.AttestationReceiver,
ExecutionPayloadEnvelopeCache: s.cfg.ExecutionPayloadEnvelopeCache,
+ GraffitiInfo: s.cfg.GraffitiInfo,
}
s.validatorServer = validatorServer
nodeServer := &nodev1alpha1.Server{
diff --git a/beacon-chain/state/interfaces_gloas.go b/beacon-chain/state/interfaces_gloas.go
index ea03c663ff..8bd2d87505 100644
--- a/beacon-chain/state/interfaces_gloas.go
+++ b/beacon-chain/state/interfaces_gloas.go
@@ -1,24 +1,55 @@
package state
import (
+ fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
+ enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
)
type writeOnlyGloasFields interface {
+ // Bids.
SetExecutionPayloadBid(h interfaces.ROExecutionPayloadBid) error
+
+ // Builder pending payments / withdrawals.
SetBuilderPendingPayment(index primitives.Slot, payment *ethpb.BuilderPendingPayment) error
ClearBuilderPendingPayment(index primitives.Slot) error
+ QueueBuilderPayment() error
RotateBuilderPendingPayments() error
AppendBuilderPendingWithdrawals([]*ethpb.BuilderPendingWithdrawal) error
+
+ // Execution payload availability.
UpdateExecutionPayloadAvailabilityAtIndex(idx uint64, val byte) error
+
+ // Misc.
+ SetLatestBlockHash(hash [32]byte) error
+ SetExecutionPayloadAvailability(index primitives.Slot, available bool) error
+
+ // Builders.
+ IncreaseBuilderBalance(index primitives.BuilderIndex, amount uint64) error
+ AddBuilderFromDeposit(pubkey [fieldparams.BLSPubkeyLength]byte, withdrawalCredentials [fieldparams.RootLength]byte, amount uint64) error
+ UpdatePendingPaymentWeight(att ethpb.Att, indices []uint64, participatedFlags map[uint8]bool) error
}
type readOnlyGloasFields interface {
+ // Bids.
+ LatestExecutionPayloadBid() (interfaces.ROExecutionPayloadBid, error)
+
+ // Builder pending payments / withdrawals.
+ BuilderPendingPayments() ([]*ethpb.BuilderPendingPayment, error)
+ WithdrawalsMatchPayloadExpected(withdrawals []*enginev1.Withdrawal) (bool, error)
+
+ // Misc.
+ LatestBlockHash() ([32]byte, error)
+
+ // Builders.
+ Builder(index primitives.BuilderIndex) (*ethpb.Builder, error)
BuilderPubkey(primitives.BuilderIndex) ([48]byte, error)
+ BuilderIndexByPubkey(pubkey [fieldparams.BLSPubkeyLength]byte) (primitives.BuilderIndex, bool)
IsActiveBuilder(primitives.BuilderIndex) (bool, error)
CanBuilderCoverBid(primitives.BuilderIndex, primitives.Gwei) (bool, error)
- LatestBlockHash() ([32]byte, error)
- BuilderPendingPayments() ([]*ethpb.BuilderPendingPayment, error)
+ IsAttestationSameSlot(blockRoot [32]byte, slot primitives.Slot) (bool, error)
+ BuilderPendingPayment(index uint64) (*ethpb.BuilderPendingPayment, error)
+ ExecutionPayloadAvailability(slot primitives.Slot) (uint64, error)
}
diff --git a/beacon-chain/state/state-native/getters_gloas.go b/beacon-chain/state/state-native/getters_gloas.go
index 8a7ab2f12b..7224cc4336 100644
--- a/beacon-chain/state/state-native/getters_gloas.go
+++ b/beacon-chain/state/state-native/getters_gloas.go
@@ -1,13 +1,19 @@
package state_native
import (
+ "bytes"
"fmt"
+ "github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
+ "github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
+ "github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
+ enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
+ "github.com/pkg/errors"
)
// LatestBlockHash returns the hash of the latest execution block.
@@ -26,6 +32,45 @@ func (b *BeaconState) LatestBlockHash() ([32]byte, error) {
return [32]byte(b.latestBlockHash), nil
}
+// IsAttestationSameSlot checks if the attestation is for the same slot as the block root in the state.
+// Spec v1.7.0-alpha pseudocode:
+//
+// is_attestation_same_slot(state, data):
+// if data.slot == 0:
+// return True
+//
+// blockroot = data.beacon_block_root
+// slot_blockroot = get_block_root_at_slot(state, data.slot)
+// prev_blockroot = get_block_root_at_slot(state, Slot(data.slot - 1))
+//
+// return blockroot == slot_blockroot and blockroot != prev_blockroot
+func (b *BeaconState) IsAttestationSameSlot(blockRoot [32]byte, slot primitives.Slot) (bool, error) {
+ if b.version < version.Gloas {
+ return false, errNotSupported("IsAttestationSameSlot", b.version)
+ }
+
+ b.lock.RLock()
+ defer b.lock.RUnlock()
+
+ if slot == 0 {
+ return true, nil
+ }
+
+ blockRootAtSlot, err := helpers.BlockRootAtSlot(b, slot)
+ if err != nil {
+ return false, errors.Wrapf(err, "block root at slot %d", slot)
+ }
+ matchingBlockRoot := bytes.Equal(blockRoot[:], blockRootAtSlot)
+
+ blockRootAtPrevSlot, err := helpers.BlockRootAtSlot(b, slot-1)
+ if err != nil {
+ return false, errors.Wrapf(err, "block root at slot %d", slot-1)
+ }
+ matchingPrevBlockRoot := bytes.Equal(blockRoot[:], blockRootAtPrevSlot)
+
+ return matchingBlockRoot && !matchingPrevBlockRoot, nil
+}
+
// BuilderPubkey returns the builder pubkey at the provided index.
func (b *BeaconState) BuilderPubkey(builderIndex primitives.BuilderIndex) ([fieldparams.BLSPubkeyLength]byte, error) {
if b.version < version.Gloas {
@@ -156,3 +201,116 @@ func (b *BeaconState) BuilderPendingPayments() ([]*ethpb.BuilderPendingPayment,
return b.builderPendingPaymentsVal(), nil
}
+
+// BuilderPendingPayment returns the builder pending payment for the given index.
+func (b *BeaconState) BuilderPendingPayment(index uint64) (*ethpb.BuilderPendingPayment, error) {
+ if b.version < version.Gloas {
+ return nil, errNotSupported("BuilderPendingPayment", b.version)
+ }
+
+ b.lock.RLock()
+ defer b.lock.RUnlock()
+
+ if index >= uint64(len(b.builderPendingPayments)) {
+ return nil, fmt.Errorf("builder pending payment index %d out of range (len=%d)", index, len(b.builderPendingPayments))
+ }
+ return ethpb.CopyBuilderPendingPayment(b.builderPendingPayments[index]), nil
+}
+
+// LatestExecutionPayloadBid returns the cached latest execution payload bid for Gloas.
+func (b *BeaconState) LatestExecutionPayloadBid() (interfaces.ROExecutionPayloadBid, error) {
+ if b.version < version.Gloas {
+ return nil, errNotSupported("LatestExecutionPayloadBid", b.version)
+ }
+
+ b.lock.RLock()
+ defer b.lock.RUnlock()
+
+ if b.latestExecutionPayloadBid == nil {
+ return nil, nil
+ }
+
+ return blocks.WrappedROExecutionPayloadBid(b.latestExecutionPayloadBid.Copy())
+}
+
+// WithdrawalsMatchPayloadExpected returns true if the given withdrawals root matches the state's
+// payload_expected_withdrawals root.
+func (b *BeaconState) WithdrawalsMatchPayloadExpected(withdrawals []*enginev1.Withdrawal) (bool, error) {
+ if b.version < version.Gloas {
+ return false, errNotSupported("WithdrawalsMatchPayloadExpected", b.version)
+ }
+
+ b.lock.RLock()
+ defer b.lock.RUnlock()
+
+ return withdrawalsEqual(withdrawals, b.payloadExpectedWithdrawals), nil
+}
+
+func withdrawalsEqual(a, b []*enginev1.Withdrawal) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ for i := range a {
+ wa := a[i]
+ wb := b[i]
+ if wa.Index != wb.Index ||
+ wa.ValidatorIndex != wb.ValidatorIndex ||
+ wa.Amount != wb.Amount ||
+ !bytes.Equal(wa.Address, wb.Address) {
+ return false
+ }
+ }
+ return true
+}
+
+// ExecutionPayloadAvailability returns the execution payload availability bit for the given slot.
+func (b *BeaconState) ExecutionPayloadAvailability(slot primitives.Slot) (uint64, error) {
+ if b.version < version.Gloas {
+ return 0, errNotSupported("ExecutionPayloadAvailability", b.version)
+ }
+
+ b.lock.RLock()
+ defer b.lock.RUnlock()
+
+ slotIndex := slot % params.BeaconConfig().SlotsPerHistoricalRoot
+ byteIndex := slotIndex / 8
+ bitIndex := slotIndex % 8
+
+ bit := (b.executionPayloadAvailability[byteIndex] >> bitIndex) & 1
+
+ return uint64(bit), nil
+}
+
+// Builder returns the builder at the given index.
+func (b *BeaconState) Builder(index primitives.BuilderIndex) (*ethpb.Builder, error) {
+ b.lock.RLock()
+ defer b.lock.RUnlock()
+
+ if b.builders == nil {
+ return nil, nil
+ }
+ if uint64(index) >= uint64(len(b.builders)) {
+ return nil, fmt.Errorf("builder index %d out of bounds", index)
+ }
+ if b.builders[index] == nil {
+ return nil, nil
+ }
+
+ return ethpb.CopyBuilder(b.builders[index]), nil
+}
+
+// BuilderIndexByPubkey returns the builder index for the given pubkey, if present.
+func (b *BeaconState) BuilderIndexByPubkey(pubkey [fieldparams.BLSPubkeyLength]byte) (primitives.BuilderIndex, bool) {
+ b.lock.RLock()
+ defer b.lock.RUnlock()
+
+ for i, builder := range b.builders {
+ if builder == nil {
+ continue
+ }
+ if bytes.Equal(builder.Pubkey, pubkey[:]) {
+ return primitives.BuilderIndex(i), true
+ }
+ }
+ return 0, false
+}
diff --git a/beacon-chain/state/state-native/getters_gloas_test.go b/beacon-chain/state/state-native/getters_gloas_test.go
index b81ed68f0b..2ab426dd48 100644
--- a/beacon-chain/state/state-native/getters_gloas_test.go
+++ b/beacon-chain/state/state-native/getters_gloas_test.go
@@ -5,8 +5,10 @@ import (
"testing"
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
+ fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
+ enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/require"
"github.com/OffchainLabs/prysm/v7/testing/util"
@@ -44,6 +46,103 @@ func TestLatestBlockHash(t *testing.T) {
})
}
+func TestLatestExecutionPayloadBid(t *testing.T) {
+ t.Run("returns error before gloas", func(t *testing.T) {
+ stIface, _ := util.DeterministicGenesisState(t, 1)
+ native, ok := stIface.(*state_native.BeaconState)
+ require.Equal(t, true, ok)
+
+ _, err := native.LatestExecutionPayloadBid()
+ require.ErrorContains(t, "is not supported", err)
+ })
+}
+
+func TestIsAttestationSameSlot(t *testing.T) {
+ buildStateWithBlockRoots := func(t *testing.T, stateSlot primitives.Slot, roots map[primitives.Slot][]byte) *state_native.BeaconState {
+ t.Helper()
+
+ cfg := params.BeaconConfig()
+ blockRoots := make([][]byte, cfg.SlotsPerHistoricalRoot)
+ for slot, root := range roots {
+ blockRoots[slot%cfg.SlotsPerHistoricalRoot] = root
+ }
+
+ stIface, err := state_native.InitializeFromProtoGloas(ðpb.BeaconStateGloas{
+ Slot: stateSlot,
+ BlockRoots: blockRoots,
+ })
+ require.NoError(t, err)
+ return stIface.(*state_native.BeaconState)
+ }
+
+ rootA := bytes.Repeat([]byte{0xAA}, 32)
+ rootB := bytes.Repeat([]byte{0xBB}, 32)
+ rootC := bytes.Repeat([]byte{0xCC}, 32)
+
+ tests := []struct {
+ name string
+ stateSlot primitives.Slot
+ slot primitives.Slot
+ blockRoot []byte
+ roots map[primitives.Slot][]byte
+ want bool
+ }{
+ {
+ name: "slot zero always true",
+ stateSlot: 1,
+ slot: 0,
+ blockRoot: rootA,
+ roots: map[primitives.Slot][]byte{},
+ want: true,
+ },
+ {
+ name: "matching current different previous",
+ stateSlot: 6,
+ slot: 4,
+ blockRoot: rootA,
+ roots: map[primitives.Slot][]byte{
+ 4: rootA,
+ 3: rootB,
+ },
+ want: true,
+ },
+ {
+ name: "matching current same previous",
+ stateSlot: 6,
+ slot: 4,
+ blockRoot: rootA,
+ roots: map[primitives.Slot][]byte{
+ 4: rootA,
+ 3: rootA,
+ },
+ want: false,
+ },
+ {
+ name: "non matching current",
+ stateSlot: 6,
+ slot: 4,
+ blockRoot: rootC,
+ roots: map[primitives.Slot][]byte{
+ 4: rootA,
+ 3: rootB,
+ },
+ want: false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ st := buildStateWithBlockRoots(t, tt.stateSlot, tt.roots)
+ var rootArr [32]byte
+ copy(rootArr[:], tt.blockRoot)
+
+ got, err := st.IsAttestationSameSlot(rootArr, tt.slot)
+ require.NoError(t, err)
+ require.Equal(t, tt.want, got)
+ })
+ }
+}
+
func TestBuilderPubkey(t *testing.T) {
t.Run("returns error before gloas", func(t *testing.T) {
stIface, _ := util.DeterministicGenesisState(t, 1)
@@ -166,3 +265,208 @@ func TestBuilderPendingPayments_UnsupportedVersion(t *testing.T) {
_, err = st.BuilderPendingPayments()
require.ErrorContains(t, "BuilderPendingPayments", err)
}
+
+func TestWithdrawalsMatchPayloadExpected(t *testing.T) {
+ t.Run("returns error before gloas", func(t *testing.T) {
+ stIface, _ := util.DeterministicGenesisState(t, 1)
+ native, ok := stIface.(*state_native.BeaconState)
+ require.Equal(t, true, ok)
+
+ _, err := native.WithdrawalsMatchPayloadExpected(nil)
+ require.ErrorContains(t, "is not supported", err)
+ })
+
+ t.Run("returns true when roots match", func(t *testing.T) {
+ withdrawals := []*enginev1.Withdrawal{
+ {Index: 0, ValidatorIndex: 1, Address: bytes.Repeat([]byte{0x01}, 20), Amount: 10},
+ }
+ st, err := state_native.InitializeFromProtoGloas(ðpb.BeaconStateGloas{
+ PayloadExpectedWithdrawals: withdrawals,
+ })
+ require.NoError(t, err)
+
+ ok, err := st.WithdrawalsMatchPayloadExpected(withdrawals)
+ require.NoError(t, err)
+ require.Equal(t, true, ok)
+ })
+
+ t.Run("returns false when roots do not match", func(t *testing.T) {
+ expected := []*enginev1.Withdrawal{
+ {Index: 0, ValidatorIndex: 1, Address: bytes.Repeat([]byte{0x01}, 20), Amount: 10},
+ }
+ actual := []*enginev1.Withdrawal{
+ {Index: 0, ValidatorIndex: 1, Address: bytes.Repeat([]byte{0x01}, 20), Amount: 11},
+ }
+
+ st, err := state_native.InitializeFromProtoGloas(ðpb.BeaconStateGloas{
+ PayloadExpectedWithdrawals: expected,
+ })
+ require.NoError(t, err)
+
+ ok, err := st.WithdrawalsMatchPayloadExpected(actual)
+ require.NoError(t, err)
+ require.Equal(t, false, ok)
+ })
+}
+
+func TestBuilder(t *testing.T) {
+ t.Run("nil builders returns nil", func(t *testing.T) {
+ st, err := state_native.InitializeFromProtoGloas(ðpb.BeaconStateGloas{
+ Builders: nil,
+ })
+ require.NoError(t, err)
+
+ got, err := st.Builder(0)
+ require.NoError(t, err)
+ require.Equal(t, (*ethpb.Builder)(nil), got)
+ })
+
+ t.Run("out of bounds returns error", func(t *testing.T) {
+ st, err := state_native.InitializeFromProtoGloas(ðpb.BeaconStateGloas{
+ Builders: []*ethpb.Builder{{}},
+ })
+ require.NoError(t, err)
+
+ _, err = st.Builder(1)
+ require.ErrorContains(t, "out of bounds", err)
+ })
+
+ t.Run("returns copy", func(t *testing.T) {
+ pubkey := bytes.Repeat([]byte{0xAA}, fieldparams.BLSPubkeyLength)
+ st, err := state_native.InitializeFromProtoGloas(ðpb.BeaconStateGloas{
+ Builders: []*ethpb.Builder{
+ {
+ Pubkey: pubkey,
+ Balance: 42,
+ DepositEpoch: 3,
+ WithdrawableEpoch: 4,
+ },
+ },
+ })
+ require.NoError(t, err)
+
+ got1, err := st.Builder(0)
+ require.NoError(t, err)
+ require.NotEqual(t, (*ethpb.Builder)(nil), got1)
+ require.Equal(t, primitives.Gwei(42), got1.Balance)
+ require.DeepEqual(t, pubkey, got1.Pubkey)
+
+ // Mutate returned builder; state should be unchanged.
+ got1.Pubkey[0] = 0xFF
+ got2, err := st.Builder(0)
+ require.NoError(t, err)
+ require.Equal(t, byte(0xAA), got2.Pubkey[0])
+ })
+}
+
+func TestBuilderIndexByPubkey(t *testing.T) {
+ t.Run("not found returns false", func(t *testing.T) {
+ st, err := state_native.InitializeFromProtoGloas(ðpb.BeaconStateGloas{
+ Builders: []*ethpb.Builder{
+ {Pubkey: bytes.Repeat([]byte{0x11}, fieldparams.BLSPubkeyLength)},
+ },
+ })
+ require.NoError(t, err)
+
+ var pk [fieldparams.BLSPubkeyLength]byte
+ copy(pk[:], bytes.Repeat([]byte{0x22}, fieldparams.BLSPubkeyLength))
+ idx, ok := st.BuilderIndexByPubkey(pk)
+ require.Equal(t, false, ok)
+ require.Equal(t, primitives.BuilderIndex(0), idx)
+ })
+
+ t.Run("skips nil entries and finds match", func(t *testing.T) {
+ wantIdx := primitives.BuilderIndex(1)
+ wantPkBytes := bytes.Repeat([]byte{0xAB}, fieldparams.BLSPubkeyLength)
+
+ st, err := state_native.InitializeFromProtoGloas(ðpb.BeaconStateGloas{
+ Builders: []*ethpb.Builder{
+ nil,
+ {Pubkey: wantPkBytes},
+ },
+ })
+ require.NoError(t, err)
+
+ var pk [fieldparams.BLSPubkeyLength]byte
+ copy(pk[:], wantPkBytes)
+ idx, ok := st.BuilderIndexByPubkey(pk)
+ require.Equal(t, true, ok)
+ require.Equal(t, wantIdx, idx)
+ })
+}
+
+func TestBuilderPendingPayment(t *testing.T) {
+ t.Run("returns copy", func(t *testing.T) {
+ slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
+ payments := make([]*ethpb.BuilderPendingPayment, 2*slotsPerEpoch)
+ target := uint64(slotsPerEpoch + 1)
+ payments[target] = ðpb.BuilderPendingPayment{Weight: 10}
+
+ st, err := state_native.InitializeFromProtoUnsafeGloas(ðpb.BeaconStateGloas{
+ BuilderPendingPayments: payments,
+ })
+ require.NoError(t, err)
+
+ payment, err := st.BuilderPendingPayment(target)
+ require.NoError(t, err)
+
+ // mutate returned copy
+ payment.Weight = 99
+
+ original, err := st.BuilderPendingPayment(target)
+ require.NoError(t, err)
+ require.Equal(t, uint64(10), uint64(original.Weight))
+ })
+
+ t.Run("unsupported version", func(t *testing.T) {
+ stIface, err := state_native.InitializeFromProtoElectra(ðpb.BeaconStateElectra{})
+ require.NoError(t, err)
+ st := stIface.(*state_native.BeaconState)
+
+ _, err = st.BuilderPendingPayment(0)
+ require.ErrorContains(t, "BuilderPendingPayment", err)
+ })
+
+ t.Run("out of range", func(t *testing.T) {
+ stIface, err := state_native.InitializeFromProtoUnsafeGloas(ðpb.BeaconStateGloas{
+ BuilderPendingPayments: []*ethpb.BuilderPendingPayment{},
+ })
+ require.NoError(t, err)
+
+ _, err = stIface.BuilderPendingPayment(0)
+ require.ErrorContains(t, "out of range", err)
+ })
+}
+
+func TestExecutionPayloadAvailability(t *testing.T) {
+ t.Run("unsupported version", func(t *testing.T) {
+ stIface, err := state_native.InitializeFromProtoElectra(ðpb.BeaconStateElectra{})
+ require.NoError(t, err)
+ st := stIface.(*state_native.BeaconState)
+
+ _, err = st.ExecutionPayloadAvailability(0)
+ require.ErrorContains(t, "ExecutionPayloadAvailability", err)
+ })
+
+ t.Run("reads expected bit", func(t *testing.T) {
+ // Ensure the backing slice is large enough.
+ availability := make([]byte, params.BeaconConfig().SlotsPerHistoricalRoot/8)
+
+ // Pick a slot and set its corresponding bit.
+ slot := primitives.Slot(9) // byteIndex=1, bitIndex=1
+ availability[1] = 0b00000010
+
+ stIface, err := state_native.InitializeFromProtoUnsafeGloas(ðpb.BeaconStateGloas{
+ ExecutionPayloadAvailability: availability,
+ })
+ require.NoError(t, err)
+
+ bit, err := stIface.ExecutionPayloadAvailability(slot)
+ require.NoError(t, err)
+ require.Equal(t, uint64(1), bit)
+
+ otherBit, err := stIface.ExecutionPayloadAvailability(8)
+ require.NoError(t, err)
+ require.Equal(t, uint64(0), otherBit)
+ })
+}
diff --git a/beacon-chain/state/state-native/setters_gloas.go b/beacon-chain/state/state-native/setters_gloas.go
index 1b0994bf7e..5ae36c7660 100644
--- a/beacon-chain/state/state-native/setters_gloas.go
+++ b/beacon-chain/state/state-native/setters_gloas.go
@@ -5,11 +5,14 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native/types"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stateutil"
+ fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
+ "github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
+ "github.com/OffchainLabs/prysm/v7/time/slots"
)
// RotateBuilderPendingPayments rotates the queue by dropping slots per epoch payments from the
@@ -121,6 +124,41 @@ func (b *BeaconState) ClearBuilderPendingPayment(index primitives.Slot) error {
return nil
}
+// QueueBuilderPayment implements the builder payment queuing logic for Gloas.
+// Spec v1.7.0-alpha.0 (pseudocode):
+// payment = state.builder_pending_payments[SLOTS_PER_EPOCH + state.slot % SLOTS_PER_EPOCH]
+// amount = payment.withdrawal.amount
+// if amount > 0:
+//
+// state.builder_pending_withdrawals.append(payment.withdrawal)
+//
+// state.builder_pending_payments[SLOTS_PER_EPOCH + state.slot % SLOTS_PER_EPOCH] = BuilderPendingPayment()
+func (b *BeaconState) QueueBuilderPayment() error {
+ if b.version < version.Gloas {
+ return errNotSupported("QueueBuilderPayment", b.version)
+ }
+
+ b.lock.Lock()
+ defer b.lock.Unlock()
+
+ slot := b.slot
+ slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
+ paymentIndex := slotsPerEpoch + (slot % slotsPerEpoch)
+ if uint64(paymentIndex) >= uint64(len(b.builderPendingPayments)) {
+ return fmt.Errorf("builder pending payments index %d out of range (len=%d)", paymentIndex, len(b.builderPendingPayments))
+ }
+
+ payment := b.builderPendingPayments[paymentIndex]
+ if payment != nil && payment.Withdrawal != nil && payment.Withdrawal.Amount > 0 {
+ b.builderPendingWithdrawals = append(b.builderPendingWithdrawals, ethpb.CopyBuilderPendingWithdrawal(payment.Withdrawal))
+ b.markFieldAsDirty(types.BuilderPendingWithdrawals)
+ }
+
+ b.builderPendingPayments[paymentIndex] = emptyBuilderPendingPayment
+ b.markFieldAsDirty(types.BuilderPendingPayments)
+ return nil
+}
+
// SetBuilderPendingPayment sets a builder pending payment at the specified index.
func (b *BeaconState) SetBuilderPendingPayment(index primitives.Slot, payment *ethpb.BuilderPendingPayment) error {
if b.version < version.Gloas {
@@ -161,3 +199,249 @@ func (b *BeaconState) UpdateExecutionPayloadAvailabilityAtIndex(idx uint64, val
b.markFieldAsDirty(types.ExecutionPayloadAvailability)
return nil
}
+
+// SetLatestBlockHash sets the latest execution block hash.
+func (b *BeaconState) SetLatestBlockHash(hash [32]byte) error {
+ if b.version < version.Gloas {
+ return errNotSupported("SetLatestBlockHash", b.version)
+ }
+
+ b.lock.Lock()
+ defer b.lock.Unlock()
+
+ b.latestBlockHash = hash[:]
+ b.markFieldAsDirty(types.LatestBlockHash)
+ return nil
+}
+
+// SetExecutionPayloadAvailability sets the execution payload availability bit for a specific slot.
+func (b *BeaconState) SetExecutionPayloadAvailability(index primitives.Slot, available bool) error {
+ if b.version < version.Gloas {
+ return errNotSupported("SetExecutionPayloadAvailability", b.version)
+ }
+
+ b.lock.Lock()
+ defer b.lock.Unlock()
+
+ bitIndex := index % params.BeaconConfig().SlotsPerHistoricalRoot
+ byteIndex := bitIndex / 8
+ bitPosition := bitIndex % 8
+
+ if uint64(byteIndex) >= uint64(len(b.executionPayloadAvailability)) {
+ return fmt.Errorf("bit index %d (byte index %d) out of range for execution payload availability length %d", bitIndex, byteIndex, len(b.executionPayloadAvailability))
+ }
+
+ // Set or clear the bit
+ if available {
+ b.executionPayloadAvailability[byteIndex] |= 1 << bitPosition
+ } else {
+ b.executionPayloadAvailability[byteIndex] &^= 1 << bitPosition
+ }
+
+ b.markFieldAsDirty(types.ExecutionPayloadAvailability)
+ return nil
+}
+
+// IncreaseBuilderBalance increases the balance of the builder at the given index.
+func (b *BeaconState) IncreaseBuilderBalance(index primitives.BuilderIndex, amount uint64) error {
+ if b.version < version.Gloas {
+ return errNotSupported("IncreaseBuilderBalance", b.version)
+ }
+
+ b.lock.Lock()
+ defer b.lock.Unlock()
+
+ if b.builders == nil || uint64(index) >= uint64(len(b.builders)) {
+ return fmt.Errorf("builder index %d out of bounds", index)
+ }
+ if b.builders[index] == nil {
+ return fmt.Errorf("builder at index %d is nil", index)
+ }
+
+ builders := b.builders
+ if b.sharedFieldReferences[types.Builders].Refs() > 1 {
+ builders = make([]*ethpb.Builder, len(b.builders))
+ copy(builders, b.builders)
+ b.sharedFieldReferences[types.Builders].MinusRef()
+ b.sharedFieldReferences[types.Builders] = stateutil.NewRef(1)
+ }
+
+ builder := ethpb.CopyBuilder(builders[index])
+ builder.Balance += primitives.Gwei(amount)
+ builders[index] = builder
+ b.builders = builders
+
+ b.markFieldAsDirty(types.Builders)
+ return nil
+}
+
+// AddBuilderFromDeposit creates or replaces a builder entry derived from a deposit.
+func (b *BeaconState) AddBuilderFromDeposit(pubkey [fieldparams.BLSPubkeyLength]byte, withdrawalCredentials [fieldparams.RootLength]byte, amount uint64) error {
+ if b.version < version.Gloas {
+ return errNotSupported("AddBuilderFromDeposit", b.version)
+ }
+
+ b.lock.Lock()
+ defer b.lock.Unlock()
+
+ currentEpoch := slots.ToEpoch(b.slot)
+ index := b.builderInsertionIndex(currentEpoch)
+
+ builder := ðpb.Builder{
+ Pubkey: bytesutil.SafeCopyBytes(pubkey[:]),
+ Version: []byte{withdrawalCredentials[0]},
+ ExecutionAddress: bytesutil.SafeCopyBytes(withdrawalCredentials[12:]),
+ Balance: primitives.Gwei(amount),
+ DepositEpoch: currentEpoch,
+ WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
+ }
+
+ builders := b.builders
+ if b.sharedFieldReferences[types.Builders].Refs() > 1 {
+ builders = make([]*ethpb.Builder, len(b.builders))
+ copy(builders, b.builders)
+ b.sharedFieldReferences[types.Builders].MinusRef()
+ b.sharedFieldReferences[types.Builders] = stateutil.NewRef(1)
+ }
+
+ if index < primitives.BuilderIndex(len(builders)) {
+ builders[index] = builder
+ } else {
+ gap := index - primitives.BuilderIndex(len(builders)) + 1
+ builders = append(builders, make([]*ethpb.Builder, gap)...)
+ builders[index] = builder
+ }
+ b.builders = builders
+
+ b.markFieldAsDirty(types.Builders)
+ return nil
+}
+
+func (b *BeaconState) builderInsertionIndex(currentEpoch primitives.Epoch) primitives.BuilderIndex {
+ for i, builder := range b.builders {
+ if builder.WithdrawableEpoch <= currentEpoch && builder.Balance == 0 {
+ return primitives.BuilderIndex(i)
+ }
+ }
+ return primitives.BuilderIndex(len(b.builders))
+}
+
+// UpdatePendingPaymentWeight updates the builder pending payment weight based on attestation participation.
+//
+// This is a no-op for pre-Gloas forks.
+//
+// Spec v1.7.0-alpha pseudocode:
+//
+// if data.target.epoch == get_current_epoch(state):
+// current_epoch_target = True
+// epoch_participation = state.current_epoch_participation
+// payment = state.builder_pending_payments[SLOTS_PER_EPOCH + data.slot % SLOTS_PER_EPOCH]
+// else:
+// current_epoch_target = False
+// epoch_participation = state.previous_epoch_participation
+// payment = state.builder_pending_payments[data.slot % SLOTS_PER_EPOCH]
+//
+// proposer_reward_numerator = 0
+// for index in get_attesting_indices(state, attestation):
+// will_set_new_flag = False
+// for flag_index, weight in enumerate(PARTICIPATION_FLAG_WEIGHTS):
+// if flag_index in participation_flag_indices and not has_flag(epoch_participation[index], flag_index):
+// epoch_participation[index] = add_flag(epoch_participation[index], flag_index)
+// proposer_reward_numerator += get_base_reward(state, index) * weight
+// # [New in Gloas:EIP7732]
+// will_set_new_flag = True
+// if (
+// will_set_new_flag
+// and is_attestation_same_slot(state, data)
+// and payment.withdrawal.amount > 0
+// ):
+// payment.weight += state.validators[index].effective_balance
+// if current_epoch_target:
+// state.builder_pending_payments[SLOTS_PER_EPOCH + data.slot % SLOTS_PER_EPOCH] = payment
+// else:
+// state.builder_pending_payments[data.slot % SLOTS_PER_EPOCH] = payment
+func (b *BeaconState) UpdatePendingPaymentWeight(att ethpb.Att, indices []uint64, participatedFlags map[uint8]bool) error {
+ var (
+ paymentSlot primitives.Slot
+ currentPayment *ethpb.BuilderPendingPayment
+ weight primitives.Gwei
+ )
+
+ early, err := func() (bool, error) {
+ b.lock.RLock()
+ defer b.lock.RUnlock()
+
+ if b.version < version.Gloas {
+ return true, nil
+ }
+
+ data := att.GetData()
+ var beaconBlockRoot [32]byte
+ copy(beaconBlockRoot[:], data.BeaconBlockRoot)
+ sameSlot, err := b.IsAttestationSameSlot(beaconBlockRoot, data.Slot)
+ if err != nil {
+ return false, err
+ }
+ if !sameSlot {
+ return true, nil
+ }
+
+ slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
+ var epochParticipation []byte
+
+ if data.Target != nil && data.Target.Epoch == slots.ToEpoch(b.slot) {
+ paymentSlot = slotsPerEpoch + (data.Slot % slotsPerEpoch)
+ epochParticipation = b.currentEpochParticipation
+ } else {
+ paymentSlot = data.Slot % slotsPerEpoch
+ epochParticipation = b.previousEpochParticipation
+ }
+
+ if uint64(paymentSlot) >= uint64(len(b.builderPendingPayments)) {
+ return false, fmt.Errorf("builder pending payments index %d out of range (len=%d)", paymentSlot, len(b.builderPendingPayments))
+ }
+ currentPayment = b.builderPendingPayments[paymentSlot]
+ if currentPayment.Withdrawal.Amount == 0 {
+ return true, nil
+ }
+
+ cfg := params.BeaconConfig()
+ flagIndices := []uint8{cfg.TimelySourceFlagIndex, cfg.TimelyTargetFlagIndex, cfg.TimelyHeadFlagIndex}
+ for _, idx := range indices {
+ if idx >= uint64(len(epochParticipation)) {
+ return false, fmt.Errorf("index %d exceeds participation length %d", idx, len(epochParticipation))
+ }
+ participation := epochParticipation[idx]
+ for _, f := range flagIndices {
+ if !participatedFlags[f] {
+ continue
+ }
+ if participation&(1<= compute_start_slot_at_epoch(store.finalized_checkpoint.epoch).
+ if err := v.VerifySlotAboveFinalized(finalized.Epoch); err != nil {
+ return pubsub.ValidationIgnore, err
+ }
+ // [REJECT] block passes validation.
+ if err := v.VerifyBlockRootValid(s.hasBadBlock); err != nil {
+ return pubsub.ValidationReject, err
+ }
+
+ // Let block be the block with envelope.beacon_block_root.
+ block, err := s.cfg.beaconDB.Block(ctx, root)
+ if err != nil {
+ return pubsub.ValidationIgnore, err
+ }
+ // [REJECT] block.slot equals envelope.slot.
+ if err := v.VerifySlotMatchesBlock(block.Block().Slot()); err != nil {
+ return pubsub.ValidationReject, err
+ }
+
+ // Let bid alias block.body.signed_execution_payload_bid.message
+ // (notice that this can be obtained from the state.latest_execution_payload_bid).
+ signedBid, err := block.Block().Body().SignedExecutionPayloadBid()
+ if err != nil {
+ return pubsub.ValidationIgnore, err
+ }
+ wrappedBid, err := blocks.WrappedROSignedExecutionPayloadBid(signedBid)
+ if err != nil {
+ return pubsub.ValidationIgnore, err
+ }
+ bid, err := wrappedBid.Bid()
+ if err != nil {
+ return pubsub.ValidationIgnore, err
+ }
+ // [REJECT] envelope.builder_index == bid.builder_index.
+ if err := v.VerifyBuilderValid(bid); err != nil {
+ return pubsub.ValidationReject, err
+ }
+ // [REJECT] payload.block_hash == bid.block_hash.
+ if err := v.VerifyPayloadHash(bid); err != nil {
+ return pubsub.ValidationReject, err
+ }
+
+ // For self-build, the state is retrived via how we retrieve for beacon block optimization
+ // For builder index, the state is retrived via head state read only
+ st, err := s.blockVerifyingState(ctx, block)
+ if err != nil {
+ return pubsub.ValidationIgnore, err
+ }
+
+ // [REJECT] signed_execution_payload_envelope.signature is valid with respect to the builder's public key.
+ if err := v.VerifySignature(st); err != nil {
+ return pubsub.ValidationReject, err
+ }
+ s.setSeenPayloadEnvelope(root, env.BuilderIndex())
+ return pubsub.ValidationAccept, nil
+}
+
+func (s *Service) executionPayloadEnvelopeSubscriber(ctx context.Context, msg proto.Message) error {
+ e, ok := msg.(*ethpb.SignedExecutionPayloadEnvelope)
+ if !ok {
+ return errWrongMessage
+ }
+ env, err := blocks.WrappedROSignedExecutionPayloadEnvelope(e)
+ if err != nil {
+ return errors.Wrap(err, "could not wrap signed execution payload envelope")
+ }
+ return s.cfg.chain.ReceiveExecutionPayloadEnvelope(ctx, env)
+}
+
+func (s *Service) hasSeenPayloadEnvelope(root [32]byte, builderIdx primitives.BuilderIndex) bool {
+ if s.seenPayloadEnvelopeCache == nil {
+ return false
+ }
+
+ b := append(bytesutil.Bytes32(uint64(builderIdx)), root[:]...)
+ _, seen := s.seenPayloadEnvelopeCache.Get(string(b))
+ return seen
+}
+
+func (s *Service) setSeenPayloadEnvelope(root [32]byte, builderIdx primitives.BuilderIndex) {
+ if s.seenPayloadEnvelopeCache == nil {
+ return
+ }
+
+ b := append(bytesutil.Bytes32(uint64(builderIdx)), root[:]...)
+ s.seenPayloadEnvelopeCache.Add(string(b), true)
+}
diff --git a/beacon-chain/sync/validate_execution_payload_envelope_test.go b/beacon-chain/sync/validate_execution_payload_envelope_test.go
new file mode 100644
index 0000000000..22f4e2a4d1
--- /dev/null
+++ b/beacon-chain/sync/validate_execution_payload_envelope_test.go
@@ -0,0 +1,291 @@
+package sync
+
+import (
+ "bytes"
+ "context"
+ "reflect"
+ "testing"
+ "time"
+
+ mock "github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/testing"
+ dbtest "github.com/OffchainLabs/prysm/v7/beacon-chain/db/testing"
+ doublylinkedtree "github.com/OffchainLabs/prysm/v7/beacon-chain/forkchoice/doubly-linked-tree"
+ "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
+ p2ptest "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/testing"
+ "github.com/OffchainLabs/prysm/v7/beacon-chain/startup"
+ "github.com/OffchainLabs/prysm/v7/beacon-chain/state"
+ "github.com/OffchainLabs/prysm/v7/beacon-chain/state/stategen"
+ mockSync "github.com/OffchainLabs/prysm/v7/beacon-chain/sync/initial-sync/testing"
+ "github.com/OffchainLabs/prysm/v7/beacon-chain/verification"
+ lruwrpr "github.com/OffchainLabs/prysm/v7/cache/lru"
+ "github.com/OffchainLabs/prysm/v7/config/params"
+ "github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
+ "github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
+ "github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
+ "github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
+ enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
+ ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
+ "github.com/OffchainLabs/prysm/v7/testing/require"
+ "github.com/OffchainLabs/prysm/v7/testing/util"
+ pubsub "github.com/libp2p/go-libp2p-pubsub"
+ pb "github.com/libp2p/go-libp2p-pubsub/pb"
+ "github.com/pkg/errors"
+)
+
+func TestValidateExecutionPayloadEnvelope_InvalidTopic(t *testing.T) {
+ ctx := context.Background()
+ p := p2ptest.NewTestP2P(t)
+ s := &Service{cfg: &config{p2p: p, initialSync: &mockSync.Sync{}}}
+
+ result, err := s.validateExecutionPayloadEnvelope(ctx, "", &pubsub.Message{
+ Message: &pb.Message{},
+ })
+ require.ErrorIs(t, p2p.ErrInvalidTopic, err)
+ require.Equal(t, result, pubsub.ValidationReject)
+}
+
+func TestValidateExecutionPayloadEnvelope_AlreadySeen(t *testing.T) {
+ ctx := context.Background()
+ s, msg, builderIdx, root := setupExecutionPayloadEnvelopeService(t, 1, 1)
+ s.newExecutionPayloadEnvelopeVerifier = testNewExecutionPayloadEnvelopeVerifier(mockExecutionPayloadEnvelopeVerifier{})
+
+ s.setSeenPayloadEnvelope(root, builderIdx)
+ result, err := s.validateExecutionPayloadEnvelope(ctx, "", msg)
+ require.NoError(t, err)
+ require.Equal(t, result, pubsub.ValidationIgnore)
+}
+
+func TestValidateExecutionPayloadEnvelope_ErrorPathsWithMock(t *testing.T) {
+ ctx := context.Background()
+ tests := []struct {
+ name string
+ verifier mockExecutionPayloadEnvelopeVerifier
+ result pubsub.ValidationResult
+ }{
+ {
+ name: "block root not seen",
+ verifier: mockExecutionPayloadEnvelopeVerifier{errBlockRootSeen: errors.New("not seen")},
+ result: pubsub.ValidationIgnore,
+ },
+ {
+ name: "slot below finalized",
+ verifier: mockExecutionPayloadEnvelopeVerifier{errSlotAboveFinalized: errors.New("below finalized")},
+ result: pubsub.ValidationIgnore,
+ },
+ {
+ name: "block root invalid",
+ verifier: mockExecutionPayloadEnvelopeVerifier{errBlockRootValid: errors.New("invalid block")},
+ result: pubsub.ValidationReject,
+ },
+ {
+ name: "slot mismatch",
+ verifier: mockExecutionPayloadEnvelopeVerifier{errSlotMatchesBlock: errors.New("slot mismatch")},
+ result: pubsub.ValidationReject,
+ },
+ {
+ name: "builder mismatch",
+ verifier: mockExecutionPayloadEnvelopeVerifier{errBuilderValid: errors.New("builder mismatch")},
+ result: pubsub.ValidationReject,
+ },
+ {
+ name: "payload hash mismatch",
+ verifier: mockExecutionPayloadEnvelopeVerifier{errPayloadHash: errors.New("payload hash mismatch")},
+ result: pubsub.ValidationReject,
+ },
+ {
+ name: "signature invalid",
+ verifier: mockExecutionPayloadEnvelopeVerifier{errSignature: errors.New("signature invalid")},
+ result: pubsub.ValidationReject,
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ s, msg, _, _ := setupExecutionPayloadEnvelopeService(t, 1, 1)
+ s.newExecutionPayloadEnvelopeVerifier = testNewExecutionPayloadEnvelopeVerifier(tc.verifier)
+
+ result, err := s.validateExecutionPayloadEnvelope(ctx, "", msg)
+ require.NotNil(t, err)
+ require.Equal(t, result, tc.result)
+ })
+ }
+}
+
+func TestValidateExecutionPayloadEnvelope_HappyPath(t *testing.T) {
+ ctx := context.Background()
+ s, msg, builderIdx, root := setupExecutionPayloadEnvelopeService(t, 1, 1)
+ s.newExecutionPayloadEnvelopeVerifier = testNewExecutionPayloadEnvelopeVerifier(mockExecutionPayloadEnvelopeVerifier{})
+
+ require.Equal(t, false, s.hasSeenPayloadEnvelope(root, builderIdx))
+ result, err := s.validateExecutionPayloadEnvelope(ctx, "", msg)
+ require.NoError(t, err)
+ require.Equal(t, result, pubsub.ValidationAccept)
+ require.Equal(t, true, s.hasSeenPayloadEnvelope(root, builderIdx))
+}
+
+func TestExecutionPayloadEnvelopeSubscriber_WrongMessage(t *testing.T) {
+ s := &Service{cfg: &config{}}
+ err := s.executionPayloadEnvelopeSubscriber(context.Background(), ðpb.BeaconBlock{})
+ require.ErrorIs(t, errWrongMessage, err)
+}
+
+func TestExecutionPayloadEnvelopeSubscriber_HappyPath(t *testing.T) {
+ s := &Service{cfg: &config{chain: &mock.ChainService{}}}
+ root := [32]byte{0x01}
+ blockHash := [32]byte{0x02}
+ env := testSignedExecutionPayloadEnvelope(t, 1, 2, root, blockHash)
+
+ err := s.executionPayloadEnvelopeSubscriber(context.Background(), env)
+ require.NoError(t, err)
+}
+
+type mockExecutionPayloadEnvelopeVerifier struct {
+ errBlockRootSeen error
+ errBlockRootValid error
+ errSlotAboveFinalized error
+ errSlotMatchesBlock error
+ errBuilderValid error
+ errPayloadHash error
+ errSignature error
+}
+
+var _ verification.ExecutionPayloadEnvelopeVerifier = &mockExecutionPayloadEnvelopeVerifier{}
+
+func (m *mockExecutionPayloadEnvelopeVerifier) VerifyBlockRootSeen(_ func([32]byte) bool) error {
+ return m.errBlockRootSeen
+}
+
+func (m *mockExecutionPayloadEnvelopeVerifier) VerifyBlockRootValid(_ func([32]byte) bool) error {
+ return m.errBlockRootValid
+}
+
+func (m *mockExecutionPayloadEnvelopeVerifier) VerifySlotAboveFinalized(_ primitives.Epoch) error {
+ return m.errSlotAboveFinalized
+}
+
+func (m *mockExecutionPayloadEnvelopeVerifier) VerifySlotMatchesBlock(_ primitives.Slot) error {
+ return m.errSlotMatchesBlock
+}
+
+func (m *mockExecutionPayloadEnvelopeVerifier) VerifyBuilderValid(_ interfaces.ROExecutionPayloadBid) error {
+ return m.errBuilderValid
+}
+
+func (m *mockExecutionPayloadEnvelopeVerifier) VerifyPayloadHash(_ interfaces.ROExecutionPayloadBid) error {
+ return m.errPayloadHash
+}
+
+func (m *mockExecutionPayloadEnvelopeVerifier) VerifySignature(_ state.ReadOnlyBeaconState) error {
+ return m.errSignature
+}
+
+func (*mockExecutionPayloadEnvelopeVerifier) SatisfyRequirement(_ verification.Requirement) {}
+
+func testNewExecutionPayloadEnvelopeVerifier(m mockExecutionPayloadEnvelopeVerifier) verification.NewExecutionPayloadEnvelopeVerifier {
+ return func(_ interfaces.ROSignedExecutionPayloadEnvelope, _ []verification.Requirement) verification.ExecutionPayloadEnvelopeVerifier {
+ clone := m
+ return &clone
+ }
+}
+
+func setupExecutionPayloadEnvelopeService(t *testing.T, envelopeSlot, blockSlot primitives.Slot) (*Service, *pubsub.Message, primitives.BuilderIndex, [32]byte) {
+ t.Helper()
+
+ ctx := context.Background()
+ db := dbtest.SetupDB(t)
+ p := p2ptest.NewTestP2P(t)
+ chainService := &mock.ChainService{
+ Genesis: time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot), 0),
+ FinalizedCheckPoint: ðpb.Checkpoint{},
+ DB: db,
+ }
+ stateGen := stategen.New(db, doublylinkedtree.New())
+ s := &Service{
+ seenPayloadEnvelopeCache: lruwrpr.New(10),
+ cfg: &config{
+ p2p: p,
+ initialSync: &mockSync.Sync{},
+ chain: chainService,
+ beaconDB: db,
+ stateGen: stateGen,
+ clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot),
+ },
+ }
+
+ bid := util.GenerateTestSignedExecutionPayloadBid(blockSlot)
+ sb := util.NewBeaconBlockGloas()
+ sb.Block.Slot = blockSlot
+ sb.Block.Body.SignedExecutionPayloadBid = bid
+ signedBlock, err := blocks.NewSignedBeaconBlock(sb)
+ require.NoError(t, err)
+ root, err := signedBlock.Block().HashTreeRoot()
+ require.NoError(t, err)
+ require.NoError(t, db.SaveBlock(ctx, signedBlock))
+
+ state, err := util.NewBeaconStateFulu()
+ require.NoError(t, err)
+ require.NoError(t, db.SaveState(ctx, state, root))
+
+ blockHash := bytesutil.ToBytes32(bid.Message.BlockHash)
+ env := testSignedExecutionPayloadEnvelope(t, envelopeSlot, primitives.BuilderIndex(bid.Message.BuilderIndex), root, blockHash)
+ msg := envelopeToPubsub(t, s, p, env)
+
+ return s, msg, primitives.BuilderIndex(bid.Message.BuilderIndex), root
+}
+
+func envelopeToPubsub(t *testing.T, s *Service, p p2p.P2P, env *ethpb.SignedExecutionPayloadEnvelope) *pubsub.Message {
+ t.Helper()
+
+ buf := new(bytes.Buffer)
+ _, err := p.Encoding().EncodeGossip(buf, env)
+ require.NoError(t, err)
+
+ topic := p2p.GossipTypeMapping[reflect.TypeFor[*ethpb.SignedExecutionPayloadEnvelope]()]
+ digest, err := s.currentForkDigest()
+ require.NoError(t, err)
+ topic = s.addDigestToTopic(topic, digest)
+
+ return &pubsub.Message{
+ Message: &pb.Message{
+ Data: buf.Bytes(),
+ Topic: &topic,
+ },
+ }
+}
+
+func testSignedExecutionPayloadEnvelope(t *testing.T, slot primitives.Slot, builderIdx primitives.BuilderIndex, root, blockHash [32]byte) *ethpb.SignedExecutionPayloadEnvelope {
+ t.Helper()
+
+ payload := &enginev1.ExecutionPayloadDeneb{
+ ParentHash: bytes.Repeat([]byte{0x01}, 32),
+ FeeRecipient: bytes.Repeat([]byte{0x02}, 20),
+ StateRoot: bytes.Repeat([]byte{0x03}, 32),
+ ReceiptsRoot: bytes.Repeat([]byte{0x04}, 32),
+ LogsBloom: bytes.Repeat([]byte{0x05}, 256),
+ PrevRandao: bytes.Repeat([]byte{0x06}, 32),
+ BlockNumber: 1,
+ GasLimit: 2,
+ GasUsed: 3,
+ Timestamp: 4,
+ BaseFeePerGas: bytes.Repeat([]byte{0x07}, 32),
+ BlockHash: blockHash[:],
+ Transactions: [][]byte{},
+ Withdrawals: []*enginev1.Withdrawal{},
+ BlobGasUsed: 0,
+ ExcessBlobGas: 0,
+ }
+
+ return ðpb.SignedExecutionPayloadEnvelope{
+ Message: ðpb.ExecutionPayloadEnvelope{
+ Payload: payload,
+ ExecutionRequests: &enginev1.ExecutionRequests{
+ Deposits: []*enginev1.DepositRequest{},
+ },
+ BuilderIndex: builderIdx,
+ BeaconBlockRoot: root[:],
+ Slot: slot,
+ StateRoot: bytes.Repeat([]byte{0xBB}, 32),
+ },
+ Signature: bytes.Repeat([]byte{0xAA}, 96),
+ }
+}
diff --git a/beacon-chain/verification/BUILD.bazel b/beacon-chain/verification/BUILD.bazel
index 6c22459602..49b12921f1 100644
--- a/beacon-chain/verification/BUILD.bazel
+++ b/beacon-chain/verification/BUILD.bazel
@@ -8,6 +8,7 @@ go_library(
"cache.go",
"data_column.go",
"error.go",
+ "execution_payload_envelope.go",
"fake.go",
"filesystem.go",
"initializer.go",
@@ -36,6 +37,7 @@ go_library(
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
+ "//consensus-types/interfaces:go_default_library",
"//consensus-types/payload-attestation:go_default_library",
"//consensus-types/primitives:go_default_library",
"//crypto/bls:go_default_library",
@@ -60,6 +62,7 @@ go_test(
"blob_test.go",
"cache_test.go",
"data_column_test.go",
+ "execution_payload_envelope_test.go",
"filesystem_test.go",
"initializer_test.go",
"payload_attestation_test.go",
@@ -80,11 +83,13 @@ go_test(
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
+ "//consensus-types/interfaces:go_default_library",
"//consensus-types/payload-attestation:go_default_library",
"//consensus-types/primitives:go_default_library",
"//crypto/bls:go_default_library",
"//crypto/bls/common:go_default_library",
"//encoding/bytesutil:go_default_library",
+ "//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/interop:go_default_library",
"//testing/require:go_default_library",
diff --git a/beacon-chain/verification/execution_payload_envelope.go b/beacon-chain/verification/execution_payload_envelope.go
new file mode 100644
index 0000000000..ddeda06a97
--- /dev/null
+++ b/beacon-chain/verification/execution_payload_envelope.go
@@ -0,0 +1,229 @@
+package verification
+
+import (
+ "fmt"
+
+ "github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
+ "github.com/OffchainLabs/prysm/v7/beacon-chain/state"
+ "github.com/OffchainLabs/prysm/v7/config/params"
+ "github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
+ "github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
+ "github.com/OffchainLabs/prysm/v7/crypto/bls"
+ "github.com/OffchainLabs/prysm/v7/time/slots"
+ "github.com/pkg/errors"
+)
+
+// ExecutionPayloadEnvelopeVerifier defines the methods implemented by the ROSignedExecutionPayloadEnvelope.
+type ExecutionPayloadEnvelopeVerifier interface {
+ VerifyBlockRootSeen(func([32]byte) bool) error
+ VerifyBlockRootValid(func([32]byte) bool) error
+ VerifySlotAboveFinalized(primitives.Epoch) error
+ VerifySlotMatchesBlock(primitives.Slot) error
+ VerifyBuilderValid(interfaces.ROExecutionPayloadBid) error
+ VerifyPayloadHash(interfaces.ROExecutionPayloadBid) error
+ VerifySignature(state.ReadOnlyBeaconState) error
+ SatisfyRequirement(Requirement)
+}
+
+// NewExecutionPayloadEnvelopeVerifier is a function signature that can be used by code that needs to be
+// able to mock Initializer.NewExecutionPayloadEnvelopeVerifier without complex setup.
+type NewExecutionPayloadEnvelopeVerifier func(e interfaces.ROSignedExecutionPayloadEnvelope, reqs []Requirement) ExecutionPayloadEnvelopeVerifier
+
+// ExecutionPayloadEnvelopeGossipRequirements defines the list of requirements for gossip
+// execution payload envelopes.
+var ExecutionPayloadEnvelopeGossipRequirements = []Requirement{
+ RequireBlockRootSeen,
+ RequireBlockRootValid,
+ RequireEnvelopeSlotAboveFinalized,
+ RequireEnvelopeSlotMatchesBlock,
+ RequireBuilderValid,
+ RequirePayloadHashValid,
+ RequireBuilderSignatureValid,
+}
+
+// GossipExecutionPayloadEnvelopeRequirements is a requirement list for gossip execution payload envelopes.
+var GossipExecutionPayloadEnvelopeRequirements = requirementList(ExecutionPayloadEnvelopeGossipRequirements)
+
+var (
+ ErrEnvelopeBlockRootNotSeen = errors.New("block root not seen")
+ ErrEnvelopeBlockRootInvalid = errors.New("block root invalid")
+ ErrEnvelopeSlotBeforeFinalized = errors.New("envelope slot is before finalized checkpoint")
+ ErrEnvelopeSlotMismatch = errors.New("envelope slot does not match block slot")
+ ErrIncorrectEnvelopeBuilder = errors.New("builder index does not match committed header")
+ ErrIncorrectEnvelopeBlockHash = errors.New("block hash does not match committed header")
+)
+
+var _ ExecutionPayloadEnvelopeVerifier = &EnvelopeVerifier{}
+
+// EnvelopeVerifier is a read-only verifier for execution payload envelopes.
+type EnvelopeVerifier struct {
+ results *results
+ e interfaces.ROSignedExecutionPayloadEnvelope
+}
+
+// VerifyBlockRootSeen verifies if the block root has been seen before.
+func (v *EnvelopeVerifier) VerifyBlockRootSeen(blockRootSeen func([32]byte) bool) (err error) {
+ defer v.record(RequireBlockRootSeen, &err)
+ env, err := v.e.Envelope()
+ if err != nil {
+ return errors.Wrap(err, "failed to get envelope")
+ }
+ if blockRootSeen != nil && blockRootSeen(env.BeaconBlockRoot()) {
+ return nil
+ }
+ return fmt.Errorf("%w: root=%#x slot=%d builder=%d", ErrEnvelopeBlockRootNotSeen, env.BeaconBlockRoot(), env.Slot(), env.BuilderIndex())
+}
+
+// VerifyBlockRootValid verifies if the block root is valid.
+func (v *EnvelopeVerifier) VerifyBlockRootValid(badBlock func([32]byte) bool) (err error) {
+ defer v.record(RequireBlockRootValid, &err)
+ env, err := v.e.Envelope()
+ if err != nil {
+ return errors.Wrap(err, "failed to get envelope")
+ }
+ if badBlock != nil && badBlock(env.BeaconBlockRoot()) {
+ return fmt.Errorf("%w: root=%#x slot=%d builder=%d", ErrEnvelopeBlockRootInvalid, env.BeaconBlockRoot(), env.Slot(), env.BuilderIndex())
+ }
+ return nil
+}
+
+// VerifySlotAboveFinalized ensures the envelope slot is not before the latest finalized epoch start.
+func (v *EnvelopeVerifier) VerifySlotAboveFinalized(finalizedEpoch primitives.Epoch) (err error) {
+ defer v.record(RequireEnvelopeSlotAboveFinalized, &err)
+ env, err := v.e.Envelope()
+ if err != nil {
+ return errors.Wrap(err, "failed to get envelope")
+ }
+ startSlot, err := slots.EpochStart(finalizedEpoch)
+ if err != nil {
+ return errors.Wrapf(ErrEnvelopeSlotBeforeFinalized, "error computing epoch start slot for finalized checkpoint (%d) %s", finalizedEpoch, err.Error())
+ }
+ if env.Slot() < startSlot {
+ return fmt.Errorf("%w: slot=%d start=%d", ErrEnvelopeSlotBeforeFinalized, env.Slot(), startSlot)
+ }
+ return nil
+}
+
+// VerifySlotMatchesBlock ensures the envelope slot matches the block slot.
+func (v *EnvelopeVerifier) VerifySlotMatchesBlock(blockSlot primitives.Slot) (err error) {
+ defer v.record(RequireEnvelopeSlotMatchesBlock, &err)
+ env, err := v.e.Envelope()
+ if err != nil {
+ return errors.Wrap(err, "failed to get envelope")
+ }
+ if env.Slot() != blockSlot {
+ return fmt.Errorf("%w: envelope=%d block=%d", ErrEnvelopeSlotMismatch, env.Slot(), blockSlot)
+ }
+ return nil
+}
+
+// VerifyBuilderValid checks that the builder index matches the one in the bid.
+func (v *EnvelopeVerifier) VerifyBuilderValid(bid interfaces.ROExecutionPayloadBid) (err error) {
+ defer v.record(RequireBuilderValid, &err)
+ env, err := v.e.Envelope()
+ if err != nil {
+ return errors.Wrap(err, "failed to get envelope")
+ }
+ if bid.BuilderIndex() != env.BuilderIndex() {
+ return fmt.Errorf("%w: envelope=%d bid=%d", ErrIncorrectEnvelopeBuilder, env.BuilderIndex(), bid.BuilderIndex())
+ }
+ return nil
+}
+
+// VerifyPayloadHash checks that the payload blockhash matches the one in the bid.
+func (v *EnvelopeVerifier) VerifyPayloadHash(bid interfaces.ROExecutionPayloadBid) (err error) {
+ defer v.record(RequirePayloadHashValid, &err)
+ env, err := v.e.Envelope()
+ if err != nil {
+ return errors.Wrap(err, "failed to get envelope")
+ }
+ if env.IsBlinded() {
+ return nil
+ }
+ payload, err := env.Execution()
+ if err != nil {
+ return errors.Wrap(err, "failed to get payload execution")
+ }
+ if bid.BlockHash() != [32]byte(payload.BlockHash()) {
+ return fmt.Errorf("%w: payload=%#x bid=%#x", ErrIncorrectEnvelopeBlockHash, payload.BlockHash(), bid.BlockHash())
+ }
+ return nil
+}
+
+// VerifySignature verifies the signature of the execution payload envelope.
+func (v *EnvelopeVerifier) VerifySignature(st state.ReadOnlyBeaconState) (err error) {
+ defer v.record(RequireBuilderSignatureValid, &err)
+
+ err = validatePayloadEnvelopeSignature(st, v.e)
+ if err != nil {
+ env, envErr := v.e.Envelope()
+ if envErr != nil {
+ return errors.Wrap(err, "failed to get envelope for signature validation")
+ }
+ return errors.Wrapf(err, "signature validation failed: root=%#x slot=%d builder=%d", env.BeaconBlockRoot(), env.Slot(), env.BuilderIndex())
+ }
+ return nil
+}
+
+// SatisfyRequirement allows the caller to manually mark a requirement as satisfied.
+func (v *EnvelopeVerifier) SatisfyRequirement(req Requirement) {
+ v.record(req, nil)
+}
+
+// record records the result of a requirement verification.
+func (v *EnvelopeVerifier) record(req Requirement, err *error) {
+ if err == nil || *err == nil {
+ v.results.record(req, nil)
+ return
+ }
+
+ v.results.record(req, *err)
+}
+
+// validatePayloadEnvelopeSignature verifies the signature of a signed execution payload envelope
+func validatePayloadEnvelopeSignature(st state.ReadOnlyBeaconState, e interfaces.ROSignedExecutionPayloadEnvelope) error {
+ env, err := e.Envelope()
+ if err != nil {
+ return errors.Wrap(err, "failed to get envelope")
+ }
+ var pubkey []byte
+ if env.BuilderIndex() == params.BeaconConfig().BuilderIndexSelfBuild {
+ header := st.LatestBlockHeader()
+ if header == nil {
+ return errors.New("latest block header is nil")
+ }
+ val, err := st.ValidatorAtIndex(primitives.ValidatorIndex(header.ProposerIndex))
+ if err != nil {
+ return errors.Wrap(err, "failed to get proposer validator")
+ }
+ pubkey = val.PublicKey
+ } else {
+ builderPubkey, err := st.BuilderPubkey(env.BuilderIndex())
+ if err != nil {
+ return errors.Wrap(err, "failed to get builder pubkey")
+ }
+ pubkey = builderPubkey[:]
+ }
+ pub, err := bls.PublicKeyFromBytes(pubkey)
+ if err != nil {
+ return errors.Wrap(err, "invalid public key")
+ }
+ s := e.Signature()
+ sig, err := bls.SignatureFromBytes(s[:])
+ if err != nil {
+ return errors.Wrap(err, "invalid signature format")
+ }
+ currentEpoch := slots.ToEpoch(st.Slot())
+ domain, err := signing.Domain(st.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconBuilder, st.GenesisValidatorsRoot())
+ if err != nil {
+ return errors.Wrap(err, "failed to compute signing domain")
+ }
+ root, err := e.SigningRoot(domain)
+ if err != nil {
+ return errors.Wrap(err, "failed to compute signing root")
+ }
+ if !sig.Verify(pub, root[:]) {
+ return signing.ErrSigFailedToVerify
+ }
+ return nil
+}
diff --git a/beacon-chain/verification/execution_payload_envelope_test.go b/beacon-chain/verification/execution_payload_envelope_test.go
new file mode 100644
index 0000000000..914b2b6648
--- /dev/null
+++ b/beacon-chain/verification/execution_payload_envelope_test.go
@@ -0,0 +1,258 @@
+package verification
+
+import (
+ "bytes"
+ "testing"
+
+ "github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
+ "github.com/OffchainLabs/prysm/v7/beacon-chain/state"
+ "github.com/OffchainLabs/prysm/v7/config/params"
+ "github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
+ "github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
+ "github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
+ "github.com/OffchainLabs/prysm/v7/crypto/bls"
+ "github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
+ enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
+ ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
+ "github.com/OffchainLabs/prysm/v7/testing/require"
+ "github.com/OffchainLabs/prysm/v7/testing/util"
+ "github.com/OffchainLabs/prysm/v7/time/slots"
+)
+
+func TestEnvelopeVerifier_VerifySlotAboveFinalized(t *testing.T) {
+ root := bytesutil.ToBytes32(bytes.Repeat([]byte{0xAA}, 32))
+ blockHash := bytesutil.ToBytes32(bytes.Repeat([]byte{0xBB}, 32))
+ env := testSignedExecutionPayloadEnvelope(t, 1, 1, root, blockHash)
+ wrapped, err := blocks.WrappedROSignedExecutionPayloadEnvelope(env)
+ require.NoError(t, err)
+
+ verifier := &EnvelopeVerifier{results: newResults(RequireEnvelopeSlotAboveFinalized), e: wrapped}
+ require.ErrorIs(t, verifier.VerifySlotAboveFinalized(1), ErrEnvelopeSlotBeforeFinalized)
+
+ verifier = &EnvelopeVerifier{results: newResults(RequireEnvelopeSlotAboveFinalized), e: wrapped}
+ require.NoError(t, verifier.VerifySlotAboveFinalized(0))
+}
+
+func TestEnvelopeVerifier_VerifySlotMatchesBlock(t *testing.T) {
+ root := bytesutil.ToBytes32(bytes.Repeat([]byte{0xAA}, 32))
+ blockHash := bytesutil.ToBytes32(bytes.Repeat([]byte{0xBB}, 32))
+ env := testSignedExecutionPayloadEnvelope(t, 2, 1, root, blockHash)
+ wrapped, err := blocks.WrappedROSignedExecutionPayloadEnvelope(env)
+ require.NoError(t, err)
+
+ verifier := &EnvelopeVerifier{results: newResults(RequireEnvelopeSlotMatchesBlock), e: wrapped}
+ require.ErrorIs(t, verifier.VerifySlotMatchesBlock(3), ErrEnvelopeSlotMismatch)
+
+ verifier = &EnvelopeVerifier{results: newResults(RequireEnvelopeSlotMatchesBlock), e: wrapped}
+ require.NoError(t, verifier.VerifySlotMatchesBlock(2))
+}
+
+func TestEnvelopeVerifier_VerifyBlockRootSeen(t *testing.T) {
+ root := bytesutil.ToBytes32(bytes.Repeat([]byte{0xAA}, 32))
+ blockHash := bytesutil.ToBytes32(bytes.Repeat([]byte{0xBB}, 32))
+ env := testSignedExecutionPayloadEnvelope(t, 1, 1, root, blockHash)
+ wrapped, err := blocks.WrappedROSignedExecutionPayloadEnvelope(env)
+ require.NoError(t, err)
+
+ verifier := &EnvelopeVerifier{results: newResults(RequireBlockRootSeen), e: wrapped}
+ require.ErrorIs(t, verifier.VerifyBlockRootSeen(func([32]byte) bool { return false }), ErrEnvelopeBlockRootNotSeen)
+
+ verifier = &EnvelopeVerifier{results: newResults(RequireBlockRootSeen), e: wrapped}
+ require.NoError(t, verifier.VerifyBlockRootSeen(func([32]byte) bool { return true }))
+}
+
+func TestEnvelopeVerifier_VerifyBlockRootValid(t *testing.T) {
+ root := bytesutil.ToBytes32(bytes.Repeat([]byte{0xAA}, 32))
+ blockHash := bytesutil.ToBytes32(bytes.Repeat([]byte{0xBB}, 32))
+ env := testSignedExecutionPayloadEnvelope(t, 1, 1, root, blockHash)
+ wrapped, err := blocks.WrappedROSignedExecutionPayloadEnvelope(env)
+ require.NoError(t, err)
+
+ verifier := &EnvelopeVerifier{results: newResults(RequireBlockRootValid), e: wrapped}
+ require.ErrorIs(t, verifier.VerifyBlockRootValid(func([32]byte) bool { return true }), ErrEnvelopeBlockRootInvalid)
+
+ verifier = &EnvelopeVerifier{results: newResults(RequireBlockRootValid), e: wrapped}
+ require.NoError(t, verifier.VerifyBlockRootValid(func([32]byte) bool { return false }))
+}
+
+func TestEnvelopeVerifier_VerifyBuilderValid(t *testing.T) {
+ root := bytesutil.ToBytes32(bytes.Repeat([]byte{0xAA}, 32))
+ blockHash := bytesutil.ToBytes32(bytes.Repeat([]byte{0xBB}, 32))
+ env := testSignedExecutionPayloadEnvelope(t, 1, 1, root, blockHash)
+ wrapped, err := blocks.WrappedROSignedExecutionPayloadEnvelope(env)
+ require.NoError(t, err)
+
+ badBid := testExecutionPayloadBid(t, 1, 2, blockHash)
+ verifier := &EnvelopeVerifier{results: newResults(RequireBuilderValid), e: wrapped}
+ require.ErrorIs(t, verifier.VerifyBuilderValid(badBid), ErrIncorrectEnvelopeBuilder)
+
+ okBid := testExecutionPayloadBid(t, 1, 1, blockHash)
+ verifier = &EnvelopeVerifier{results: newResults(RequireBuilderValid), e: wrapped}
+ require.NoError(t, verifier.VerifyBuilderValid(okBid))
+}
+
+func TestEnvelopeVerifier_VerifyPayloadHash(t *testing.T) {
+ root := bytesutil.ToBytes32(bytes.Repeat([]byte{0xAA}, 32))
+ blockHash := bytesutil.ToBytes32(bytes.Repeat([]byte{0xBB}, 32))
+ env := testSignedExecutionPayloadEnvelope(t, 1, 1, root, blockHash)
+ wrapped, err := blocks.WrappedROSignedExecutionPayloadEnvelope(env)
+ require.NoError(t, err)
+
+ badHash := bytesutil.ToBytes32(bytes.Repeat([]byte{0xCC}, 32))
+ badBid := testExecutionPayloadBid(t, 1, 1, badHash)
+ verifier := &EnvelopeVerifier{results: newResults(RequirePayloadHashValid), e: wrapped}
+ require.ErrorIs(t, verifier.VerifyPayloadHash(badBid), ErrIncorrectEnvelopeBlockHash)
+
+ okBid := testExecutionPayloadBid(t, 1, 1, blockHash)
+ verifier = &EnvelopeVerifier{results: newResults(RequirePayloadHashValid), e: wrapped}
+ require.NoError(t, verifier.VerifyPayloadHash(okBid))
+}
+
+func TestEnvelopeVerifier_VerifySignature_Builder(t *testing.T) {
+ slot := primitives.Slot(1)
+ root := bytesutil.ToBytes32(bytes.Repeat([]byte{0xAA}, 32))
+ blockHash := bytesutil.ToBytes32(bytes.Repeat([]byte{0xBB}, 32))
+ env := testSignedExecutionPayloadEnvelope(t, slot, 0, root, blockHash)
+
+ sk, err := bls.RandKey()
+ require.NoError(t, err)
+ builderPubkey := sk.PublicKey().Marshal()
+
+ st := newGloasState(t, slot, nil, nil, []*ethpb.Builder{{Pubkey: builderPubkey}})
+
+ sig := signEnvelope(t, sk, env.Message, st.Fork(), st.GenesisValidatorsRoot(), slot)
+ env.Signature = sig[:]
+ wrapped, err := blocks.WrappedROSignedExecutionPayloadEnvelope(env)
+ require.NoError(t, err)
+
+ verifier := &EnvelopeVerifier{results: newResults(RequireBuilderSignatureValid), e: wrapped}
+ require.NoError(t, verifier.VerifySignature(st))
+
+ sk2, err := bls.RandKey()
+ require.NoError(t, err)
+ badSig := signEnvelope(t, sk2, env.Message, st.Fork(), st.GenesisValidatorsRoot(), slot)
+ env.Signature = badSig[:]
+ wrapped, err = blocks.WrappedROSignedExecutionPayloadEnvelope(env)
+ require.NoError(t, err)
+ verifier = &EnvelopeVerifier{results: newResults(RequireBuilderSignatureValid), e: wrapped}
+ require.ErrorIs(t, verifier.VerifySignature(st), signing.ErrSigFailedToVerify)
+}
+
+func TestEnvelopeVerifier_VerifySignature_SelfBuild(t *testing.T) {
+ slot := primitives.Slot(2)
+ root := bytesutil.ToBytes32(bytes.Repeat([]byte{0xAA}, 32))
+ blockHash := bytesutil.ToBytes32(bytes.Repeat([]byte{0xBB}, 32))
+ env := testSignedExecutionPayloadEnvelope(t, slot, params.BeaconConfig().BuilderIndexSelfBuild, root, blockHash)
+
+ sk, err := bls.RandKey()
+ require.NoError(t, err)
+ validatorPubkey := sk.PublicKey().Marshal()
+
+ validators := []*ethpb.Validator{{PublicKey: validatorPubkey}}
+ balances := []uint64{0}
+ st := newGloasState(t, slot, validators, balances, nil)
+
+ sig := signEnvelope(t, sk, env.Message, st.Fork(), st.GenesisValidatorsRoot(), slot)
+ env.Signature = sig[:]
+ wrapped, err := blocks.WrappedROSignedExecutionPayloadEnvelope(env)
+ require.NoError(t, err)
+
+ verifier := &EnvelopeVerifier{results: newResults(RequireBuilderSignatureValid), e: wrapped}
+ require.NoError(t, verifier.VerifySignature(st))
+}
+
+func testSignedExecutionPayloadEnvelope(t *testing.T, slot primitives.Slot, builderIdx primitives.BuilderIndex, root, blockHash [32]byte) *ethpb.SignedExecutionPayloadEnvelope {
+ t.Helper()
+
+ payload := &enginev1.ExecutionPayloadDeneb{
+ ParentHash: bytes.Repeat([]byte{0x01}, 32),
+ FeeRecipient: bytes.Repeat([]byte{0x02}, 20),
+ StateRoot: bytes.Repeat([]byte{0x03}, 32),
+ ReceiptsRoot: bytes.Repeat([]byte{0x04}, 32),
+ LogsBloom: bytes.Repeat([]byte{0x05}, 256),
+ PrevRandao: bytes.Repeat([]byte{0x06}, 32),
+ BlockNumber: 1,
+ GasLimit: 2,
+ GasUsed: 3,
+ Timestamp: 4,
+ BaseFeePerGas: bytes.Repeat([]byte{0x07}, 32),
+ BlockHash: blockHash[:],
+ Transactions: [][]byte{},
+ Withdrawals: []*enginev1.Withdrawal{},
+ BlobGasUsed: 0,
+ ExcessBlobGas: 0,
+ }
+
+ return ðpb.SignedExecutionPayloadEnvelope{
+ Message: ðpb.ExecutionPayloadEnvelope{
+ Payload: payload,
+ ExecutionRequests: &enginev1.ExecutionRequests{
+ Deposits: []*enginev1.DepositRequest{},
+ },
+ BuilderIndex: builderIdx,
+ BeaconBlockRoot: root[:],
+ Slot: slot,
+ StateRoot: bytes.Repeat([]byte{0xBB}, 32),
+ },
+ Signature: bytes.Repeat([]byte{0xCC}, 96),
+ }
+}
+
+func testExecutionPayloadBid(t *testing.T, slot primitives.Slot, builderIdx primitives.BuilderIndex, blockHash [32]byte) interfaces.ROExecutionPayloadBid {
+ t.Helper()
+
+ signed := util.GenerateTestSignedExecutionPayloadBid(slot)
+ signed.Message.BuilderIndex = builderIdx
+ copy(signed.Message.BlockHash, blockHash[:])
+
+ wrapped, err := blocks.WrappedROSignedExecutionPayloadBid(signed)
+ require.NoError(t, err)
+ bid, err := wrapped.Bid()
+ require.NoError(t, err)
+ return bid
+}
+
+func newGloasState(
+ t *testing.T,
+ slot primitives.Slot,
+ validators []*ethpb.Validator,
+ balances []uint64,
+ builders []*ethpb.Builder,
+) state.BeaconState {
+ t.Helper()
+
+ genesisRoot := bytes.Repeat([]byte{0x11}, 32)
+ st, err := util.NewBeaconStateGloas(func(s *ethpb.BeaconStateGloas) error {
+ s.Slot = slot
+ s.GenesisValidatorsRoot = genesisRoot
+ if validators != nil {
+ s.Validators = validators
+ }
+ if balances != nil {
+ s.Balances = balances
+ }
+ if s.LatestBlockHeader != nil {
+ s.LatestBlockHeader.ProposerIndex = 0
+ }
+ if builders != nil {
+ s.Builders = builders
+ }
+ return nil
+ })
+ require.NoError(t, err)
+ return st
+}
+
+func signEnvelope(t *testing.T, sk bls.SecretKey, env *ethpb.ExecutionPayloadEnvelope, fork *ethpb.Fork, genesisRoot []byte, slot primitives.Slot) [96]byte {
+ t.Helper()
+
+ epoch := slots.ToEpoch(slot)
+ domain, err := signing.Domain(fork, epoch, params.BeaconConfig().DomainBeaconBuilder, genesisRoot)
+ require.NoError(t, err)
+ root, err := signing.ComputeSigningRoot(env, domain)
+ require.NoError(t, err)
+ sig := sk.Sign(root[:]).Marshal()
+ var out [96]byte
+ copy(out[:], sig)
+ return out
+}
diff --git a/beacon-chain/verification/initializer.go b/beacon-chain/verification/initializer.go
index 1a13a0c44c..ed53c0f51b 100644
--- a/beacon-chain/verification/initializer.go
+++ b/beacon-chain/verification/initializer.go
@@ -12,6 +12,7 @@ import (
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
+ "github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
payloadattestation "github.com/OffchainLabs/prysm/v7/consensus-types/payload-attestation"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
@@ -97,6 +98,14 @@ func (ini *Initializer) NewPayloadAttestationMsgVerifier(pa payloadattestation.R
}
}
+// NewPayloadEnvelopeVerifier creates a SignedExecutionPayloadEnvelopeVerifier for a single signed execution payload envelope with the given set of requirements.
+func (ini *Initializer) NewPayloadEnvelopeVerifier(ee interfaces.ROSignedExecutionPayloadEnvelope, reqs []Requirement) *EnvelopeVerifier {
+ return &EnvelopeVerifier{
+ results: newResults(reqs...),
+ e: ee,
+ }
+}
+
// InitializerWaiter provides an Initializer once all dependent resources are ready
// via the WaitForInitializer method.
type InitializerWaiter struct {
diff --git a/beacon-chain/verification/requirements.go b/beacon-chain/verification/requirements.go
index 69f55b1779..eea063322e 100644
--- a/beacon-chain/verification/requirements.go
+++ b/beacon-chain/verification/requirements.go
@@ -24,4 +24,11 @@ const (
RequireBlockRootSeen
RequireBlockRootValid
RequireSignatureValid
+
+ // Execution payload envelope specific.
+ RequireBuilderValid
+ RequirePayloadHashValid
+ RequireEnvelopeSlotAboveFinalized
+ RequireEnvelopeSlotMatchesBlock
+ RequireBuilderSignatureValid
)
diff --git a/beacon-chain/verification/result.go b/beacon-chain/verification/result.go
index 8c5998bff9..e2e0a85adb 100644
--- a/beacon-chain/verification/result.go
+++ b/beacon-chain/verification/result.go
@@ -45,6 +45,16 @@ func (r Requirement) String() string {
return "RequireBlockRootValid"
case RequireSignatureValid:
return "RequireSignatureValid"
+ case RequireBuilderValid:
+ return "RequireBuilderValid"
+ case RequirePayloadHashValid:
+ return "RequirePayloadHashValid"
+ case RequireEnvelopeSlotAboveFinalized:
+ return "RequireEnvelopeSlotAboveFinalized"
+ case RequireEnvelopeSlotMatchesBlock:
+ return "RequireEnvelopeSlotMatchesBlock"
+ case RequireBuilderSignatureValid:
+ return "RequireBuilderSignatureValid"
default:
return unknownRequirementName
}
diff --git a/changelog/codex_add-gloas-execution-payload-envelope.md b/changelog/codex_add-gloas-execution-payload-envelope.md
new file mode 100644
index 0000000000..db30b8c71b
--- /dev/null
+++ b/changelog/codex_add-gloas-execution-payload-envelope.md
@@ -0,0 +1,3 @@
+### Added
+
+- Add Gloas execution payload envelope gossip validation
diff --git a/changelog/potuz_forkchoice_unused_highestblockdelay.md b/changelog/potuz_forkchoice_unused_highestblockdelay.md
deleted file mode 100644
index b7df2a51a5..0000000000
--- a/changelog/potuz_forkchoice_unused_highestblockdelay.md
+++ /dev/null
@@ -1,2 +0,0 @@
-### Ignored
-- Remove unused `HighestBlockDelay` method in forkchoice.
diff --git a/changelog/potuz_gloas_forkchoice_1.md b/changelog/potuz_gloas_forkchoice_1.md
new file mode 100644
index 0000000000..d53b173063
--- /dev/null
+++ b/changelog/potuz_gloas_forkchoice_1.md
@@ -0,0 +1,2 @@
+### Added
+- Adapted forkchoice to future Gloas compatible type.
diff --git a/changelog/satushh-graffiti-impl.md b/changelog/satushh-graffiti-impl.md
new file mode 100644
index 0000000000..b2f80a029c
--- /dev/null
+++ b/changelog/satushh-graffiti-impl.md
@@ -0,0 +1,3 @@
+### Added
+
+- Graffiti implementation based on the design doc.
\ No newline at end of file
diff --git a/changelog/t_gloas-process-attestations.md b/changelog/t_gloas-process-attestations.md
new file mode 100644
index 0000000000..303ec8cd75
--- /dev/null
+++ b/changelog/t_gloas-process-attestations.md
@@ -0,0 +1,3 @@
+### Added
+
+- Added process attestation for gloas
diff --git a/changelog/terencechain_add-process-execution-payload-gloas.md b/changelog/terencechain_add-process-execution-payload-gloas.md
new file mode 100644
index 0000000000..424edc6a33
--- /dev/null
+++ b/changelog/terencechain_add-process-execution-payload-gloas.md
@@ -0,0 +1,2 @@
+### Added
+- Add process execution payload for gloas
diff --git a/config/params/configset_test.go b/config/params/configset_test.go
index 0a724f1e8d..0c0938f2a8 100644
--- a/config/params/configset_test.go
+++ b/config/params/configset_test.go
@@ -98,6 +98,7 @@ func compareConfigs(t *testing.T, expected, actual *BeaconChainConfig) {
require.DeepEqual(t, expected.EjectionBalance, actual.EjectionBalance)
require.DeepEqual(t, expected.EffectiveBalanceIncrement, actual.EffectiveBalanceIncrement)
require.DeepEqual(t, expected.BLSWithdrawalPrefixByte, actual.BLSWithdrawalPrefixByte)
+ require.DeepEqual(t, expected.BuilderWithdrawalPrefixByte, actual.BuilderWithdrawalPrefixByte)
require.DeepEqual(t, expected.ZeroHash, actual.ZeroHash)
require.DeepEqual(t, expected.GenesisDelay, actual.GenesisDelay)
require.DeepEqual(t, expected.MinAttestationInclusionDelay, actual.MinAttestationInclusionDelay)
diff --git a/config/params/loader_test.go b/config/params/loader_test.go
index a061f8cac8..765cf2c8aa 100644
--- a/config/params/loader_test.go
+++ b/config/params/loader_test.go
@@ -119,6 +119,7 @@ func assertEqualConfigs(t *testing.T, name string, fields []string, expected, ac
// Initial values.
assert.DeepEqual(t, expected.GenesisForkVersion, actual.GenesisForkVersion, "%s: GenesisForkVersion", name)
assert.DeepEqual(t, expected.BLSWithdrawalPrefixByte, actual.BLSWithdrawalPrefixByte, "%s: BLSWithdrawalPrefixByte", name)
+ assert.DeepEqual(t, expected.BuilderWithdrawalPrefixByte, actual.BuilderWithdrawalPrefixByte, "%s: BuilderWithdrawalPrefixByte", name)
assert.DeepEqual(t, expected.ETH1AddressWithdrawalPrefixByte, actual.ETH1AddressWithdrawalPrefixByte, "%s: ETH1AddressWithdrawalPrefixByte", name)
// Time parameters.
diff --git a/config/params/minimal_config.go b/config/params/minimal_config.go
index 8b2cb4b44c..e64d02265e 100644
--- a/config/params/minimal_config.go
+++ b/config/params/minimal_config.go
@@ -31,6 +31,7 @@ func MinimalSpecConfig() *BeaconChainConfig {
// Initial values
minimalConfig.BLSWithdrawalPrefixByte = byte(0)
minimalConfig.ETH1AddressWithdrawalPrefixByte = byte(1)
+ minimalConfig.BuilderWithdrawalPrefixByte = byte(3)
// Time parameters
minimalConfig.SecondsPerSlot = 6
diff --git a/config/params/testnet_config_test.go b/config/params/testnet_config_test.go
index bdd0285016..4505800d95 100644
--- a/config/params/testnet_config_test.go
+++ b/config/params/testnet_config_test.go
@@ -54,6 +54,7 @@ func compareConfigs(t *testing.T, expected, actual *params.BeaconChainConfig) {
require.DeepEqual(t, expected.EjectionBalance, actual.EjectionBalance)
require.DeepEqual(t, expected.EffectiveBalanceIncrement, actual.EffectiveBalanceIncrement)
require.DeepEqual(t, expected.BLSWithdrawalPrefixByte, actual.BLSWithdrawalPrefixByte)
+ require.DeepEqual(t, expected.BuilderWithdrawalPrefixByte, actual.BuilderWithdrawalPrefixByte)
require.DeepEqual(t, expected.ZeroHash, actual.ZeroHash)
require.DeepEqual(t, expected.GenesisDelay, actual.GenesisDelay)
require.DeepEqual(t, expected.MinAttestationInclusionDelay, actual.MinAttestationInclusionDelay)
diff --git a/consensus-types/blocks/signed_execution_bid.go b/consensus-types/blocks/signed_execution_bid.go
index 46063e9d9c..1f9ecccf64 100644
--- a/consensus-types/blocks/signed_execution_bid.go
+++ b/consensus-types/blocks/signed_execution_bid.go
@@ -117,7 +117,7 @@ func (h executionPayloadBidGloas) GasLimit() uint64 {
return h.payload.GasLimit
}
-// BuilderIndex returns the validator index of the builder who created this bid.
+// BuilderIndex returns the builder index of the builder who created this bid.
func (h executionPayloadBidGloas) BuilderIndex() primitives.BuilderIndex {
return h.payload.BuilderIndex
}
diff --git a/encoding/ssz/htrutils.go b/encoding/ssz/htrutils.go
index 9ab74ca3e1..df7d2a6101 100644
--- a/encoding/ssz/htrutils.go
+++ b/encoding/ssz/htrutils.go
@@ -5,6 +5,7 @@ import (
"encoding/binary"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
+ "github.com/OffchainLabs/prysm/v7/crypto/hash/htr"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
@@ -141,3 +142,24 @@ func withdrawalRoot(w *enginev1.Withdrawal) ([32]byte, error) {
}
return w.HashTreeRoot()
}
+
+// KzgCommitmentsRoot computes the HTR for a list of KZG commitments
+func KzgCommitmentsRoot(commitments [][]byte) ([32]byte, error) {
+ roots := make([][32]byte, len(commitments))
+ for i, commitment := range commitments {
+ chunks, err := PackByChunk([][]byte{commitment})
+ if err != nil {
+ return [32]byte{}, err
+ }
+ roots[i] = htr.VectorizedSha256(chunks)[0]
+ }
+
+ commitmentsRoot, err := BitwiseMerkleize(roots, uint64(len(roots)), fieldparams.MaxBlobCommitmentsPerBlock)
+ if err != nil {
+ return [32]byte{}, errors.Wrap(err, "could not compute merkleization")
+ }
+
+ length := make([]byte, 32)
+ binary.LittleEndian.PutUint64(length[:8], uint64(len(roots)))
+ return MixInLength(commitmentsRoot, length), nil
+}
diff --git a/runtime/version/version.go b/runtime/version/version.go
index a7ba869747..7014950171 100644
--- a/runtime/version/version.go
+++ b/runtime/version/version.go
@@ -47,3 +47,13 @@ func BuildData() string {
}
return fmt.Sprintf("Prysm/%s/%s", gitTag, gitCommit)
}
+
+// GetCommitPrefix returns the first 4 characters of the git commit.
+// This is used for graffiti generation per the client identification spec.
+// Note: BuildData() must be called before this (happens at startup via Version()).
+func GetCommitPrefix() string {
+ if len(gitCommit) < 4 {
+ return gitCommit
+ }
+ return gitCommit[:4]
+}
diff --git a/testing/endtoend/evaluators/operations.go b/testing/endtoend/evaluators/operations.go
index 7b383ecbc6..7b55ff1c25 100644
--- a/testing/endtoend/evaluators/operations.go
+++ b/testing/endtoend/evaluators/operations.go
@@ -260,17 +260,21 @@ func verifyGraffitiInBlocks(_ *e2etypes.EvaluationContext, conns ...*grpc.Client
if err != nil {
return err
}
- var e bool
+ var found bool
slot := blk.Block().Slot()
graffitiInBlock := blk.Block().Body().Graffiti()
+ // Trim trailing null bytes from graffiti.
+ // Example: "SushiGEabcdPRxxxx\x00\x00\x00..." becomes "SushiGEabcdPRxxxx"
+ graffitiTrimmed := bytes.TrimRight(graffitiInBlock[:], "\x00")
for _, graffiti := range helpers.Graffiti {
- if bytes.Equal(bytesutil.PadTo([]byte(graffiti), 32), graffitiInBlock[:]) {
- e = true
+ // Check prefix match since user graffiti comes first, with EL/CL version info appended after.
+ if bytes.HasPrefix(graffitiTrimmed, []byte(graffiti)) {
+ found = true
break
}
}
- if !e && slot != 0 {
- return errors.New("could not get graffiti from the list")
+ if !found && slot != 0 {
+ return fmt.Errorf("block at slot %d has graffiti %q which does not start with any expected graffiti", slot, string(graffitiTrimmed))
}
}
diff --git a/testing/spectest/mainnet/BUILD.bazel b/testing/spectest/mainnet/BUILD.bazel
index 13bdf14db8..d4f783562d 100644
--- a/testing/spectest/mainnet/BUILD.bazel
+++ b/testing/spectest/mainnet/BUILD.bazel
@@ -201,7 +201,9 @@ go_test(
"fulu__sanity__slots_test.go",
"fulu__ssz_static__ssz_static_test.go",
"gloas__epoch_processing__process_builder_pending_payments_test.go",
+ "gloas__operations__attestation_test.go",
"gloas__operations__execution_payload_header_test.go",
+ "gloas__operations__execution_payload_test.go",
"gloas__operations__payload_attestation_test.go",
"gloas__operations__proposer_slashing_test.go",
"gloas__sanity__slots_test.go",
diff --git a/testing/spectest/mainnet/gloas__operations__attestation_test.go b/testing/spectest/mainnet/gloas__operations__attestation_test.go
new file mode 100644
index 0000000000..418e7d0dc7
--- /dev/null
+++ b/testing/spectest/mainnet/gloas__operations__attestation_test.go
@@ -0,0 +1,11 @@
+package mainnet
+
+import (
+ "testing"
+
+ "github.com/OffchainLabs/prysm/v7/testing/spectest/shared/gloas/operations"
+)
+
+func TestMainnet_Gloas_Operations_Attestation(t *testing.T) {
+ operations.RunAttestationTest(t, "mainnet")
+}
diff --git a/testing/spectest/mainnet/gloas__operations__execution_payload_test.go b/testing/spectest/mainnet/gloas__operations__execution_payload_test.go
new file mode 100644
index 0000000000..a6305f1103
--- /dev/null
+++ b/testing/spectest/mainnet/gloas__operations__execution_payload_test.go
@@ -0,0 +1,11 @@
+package mainnet
+
+import (
+ "testing"
+
+ "github.com/OffchainLabs/prysm/v7/testing/spectest/shared/gloas/operations"
+)
+
+func TestMainnet_Gloas_Operations_ExecutionPayloadEnvelope(t *testing.T) {
+ operations.RunExecutionPayloadTest(t, "mainnet")
+}
diff --git a/testing/spectest/minimal/BUILD.bazel b/testing/spectest/minimal/BUILD.bazel
index 0575fa6944..a72c1dab5d 100644
--- a/testing/spectest/minimal/BUILD.bazel
+++ b/testing/spectest/minimal/BUILD.bazel
@@ -207,7 +207,9 @@ go_test(
"fulu__sanity__slots_test.go",
"fulu__ssz_static__ssz_static_test.go",
"gloas__epoch_processing__process_builder_pending_payments_test.go",
+ "gloas__operations__attestation_test.go",
"gloas__operations__execution_payload_bid_test.go",
+ "gloas__operations__execution_payload_test.go",
"gloas__operations__payload_attestation_test.go",
"gloas__operations__proposer_slashing_test.go",
"gloas__sanity__slots_test.go",
diff --git a/testing/spectest/minimal/gloas__operations__attestation_test.go b/testing/spectest/minimal/gloas__operations__attestation_test.go
new file mode 100644
index 0000000000..a859b7be20
--- /dev/null
+++ b/testing/spectest/minimal/gloas__operations__attestation_test.go
@@ -0,0 +1,11 @@
+package minimal
+
+import (
+ "testing"
+
+ "github.com/OffchainLabs/prysm/v7/testing/spectest/shared/gloas/operations"
+)
+
+func TestMinimal_Gloas_Operations_Attestation(t *testing.T) {
+ operations.RunAttestationTest(t, "minimal")
+}
diff --git a/testing/spectest/minimal/gloas__operations__execution_payload_test.go b/testing/spectest/minimal/gloas__operations__execution_payload_test.go
new file mode 100644
index 0000000000..a169c21c59
--- /dev/null
+++ b/testing/spectest/minimal/gloas__operations__execution_payload_test.go
@@ -0,0 +1,11 @@
+package minimal
+
+import (
+ "testing"
+
+ "github.com/OffchainLabs/prysm/v7/testing/spectest/shared/gloas/operations"
+)
+
+func TestMinimal_Gloas_Operations_ExecutionPayloadEnvelope(t *testing.T) {
+ operations.RunExecutionPayloadTest(t, "minimal")
+}
diff --git a/testing/spectest/shared/gloas/operations/BUILD.bazel b/testing/spectest/shared/gloas/operations/BUILD.bazel
index 374c299e77..49ecbd72f8 100644
--- a/testing/spectest/shared/gloas/operations/BUILD.bazel
+++ b/testing/spectest/shared/gloas/operations/BUILD.bazel
@@ -4,6 +4,8 @@ go_library(
name = "go_default_library",
testonly = True,
srcs = [
+ "attestation.go",
+ "execution_payload.go",
"execution_payload_bid.go",
"helpers.go",
"payload_attestation.go",
@@ -12,12 +14,24 @@ go_library(
importpath = "github.com/OffchainLabs/prysm/v7/testing/spectest/shared/gloas/operations",
visibility = ["//visibility:public"],
deps = [
+ "//beacon-chain/core/altair:go_default_library",
+ "//beacon-chain/core/gloas:go_default_library",
+ "//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
+ "//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
+ "//testing/require:go_default_library",
"//testing/spectest/shared/common/operations:go_default_library",
+ "//testing/spectest/utils:go_default_library",
+ "//testing/util:go_default_library",
+ "@com_github_golang_snappy//:go_default_library",
+ "@com_github_google_go_cmp//cmp:go_default_library",
+ "@io_bazel_rules_go//go/tools/bazel:go_default_library",
+ "@org_golang_google_protobuf//proto:go_default_library",
+ "@org_golang_google_protobuf//testing/protocmp:go_default_library",
],
)
diff --git a/testing/spectest/shared/gloas/operations/attestation.go b/testing/spectest/shared/gloas/operations/attestation.go
new file mode 100644
index 0000000000..64911ed826
--- /dev/null
+++ b/testing/spectest/shared/gloas/operations/attestation.go
@@ -0,0 +1,26 @@
+package operations
+
+import (
+ "testing"
+
+ "github.com/OffchainLabs/prysm/v7/beacon-chain/core/altair"
+ "github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
+ "github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
+ ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
+ "github.com/OffchainLabs/prysm/v7/runtime/version"
+ common "github.com/OffchainLabs/prysm/v7/testing/spectest/shared/common/operations"
+)
+
+func blockWithAttestation(attestationSSZ []byte) (interfaces.SignedBeaconBlock, error) {
+ att := ðpb.AttestationElectra{}
+ if err := att.UnmarshalSSZ(attestationSSZ); err != nil {
+ return nil, err
+ }
+ b := ðpb.BeaconBlockGloas{}
+ b.Body = ðpb.BeaconBlockBodyGloas{Attestations: []*ethpb.AttestationElectra{att}}
+ return blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockGloas{Block: b})
+}
+
+func RunAttestationTest(t *testing.T, config string) {
+ common.RunAttestationTest(t, config, version.String(version.Gloas), blockWithAttestation, altair.ProcessAttestationsNoVerifySignature, sszToState)
+}
diff --git a/testing/spectest/shared/gloas/operations/execution_payload.go b/testing/spectest/shared/gloas/operations/execution_payload.go
new file mode 100644
index 0000000000..cb2cf3248d
--- /dev/null
+++ b/testing/spectest/shared/gloas/operations/execution_payload.go
@@ -0,0 +1,123 @@
+package operations
+
+import (
+ "context"
+ "os"
+ "path"
+ "strings"
+ "testing"
+
+ "github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas"
+ "github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
+ "github.com/OffchainLabs/prysm/v7/beacon-chain/state"
+ "github.com/OffchainLabs/prysm/v7/config/params"
+ "github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
+ "github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
+ ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
+ "github.com/OffchainLabs/prysm/v7/runtime/version"
+ "github.com/OffchainLabs/prysm/v7/testing/require"
+ "github.com/OffchainLabs/prysm/v7/testing/spectest/utils"
+ "github.com/OffchainLabs/prysm/v7/testing/util"
+ "github.com/bazelbuild/rules_go/go/tools/bazel"
+ "github.com/golang/snappy"
+ "github.com/google/go-cmp/cmp"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/testing/protocmp"
+)
+
+type ExecutionConfig struct {
+ Valid bool `json:"execution_valid"`
+}
+
+func sszToSignedExecutionPayloadEnvelope(b []byte) (interfaces.ROSignedExecutionPayloadEnvelope, error) {
+ envelope := ðpb.SignedExecutionPayloadEnvelope{}
+ if err := envelope.UnmarshalSSZ(b); err != nil {
+ return nil, err
+ }
+ return blocks.WrappedROSignedExecutionPayloadEnvelope(envelope)
+}
+
+func RunExecutionPayloadTest(t *testing.T, config string) {
+ require.NoError(t, utils.SetConfig(t, config))
+ cfg := params.BeaconConfig()
+ params.SetGenesisFork(t, cfg, version.Fulu)
+ testFolders, testsFolderPath := utils.TestFolders(t, config, "gloas", "operations/execution_payload/pyspec_tests")
+ if len(testFolders) == 0 {
+ t.Fatalf("No test folders found for %s/%s/%s", config, "gloas", "operations/execution_payload/pyspec_tests")
+ }
+ for _, folder := range testFolders {
+ t.Run(folder.Name(), func(t *testing.T) {
+ helpers.ClearCache()
+
+ // Check if signed_envelope.ssz_snappy exists, skip if not
+ _, err := bazel.Runfile(path.Join(testsFolderPath, folder.Name(), "signed_envelope.ssz_snappy"))
+ if err != nil && strings.Contains(err.Error(), "could not locate file") {
+ t.Skipf("Skipping test %s: signed_envelope.ssz_snappy not found", folder.Name())
+ return
+ }
+
+ // Read the signed execution payload envelope
+ envelopeFile, err := util.BazelFileBytes(testsFolderPath, folder.Name(), "signed_envelope.ssz_snappy")
+ require.NoError(t, err)
+ envelopeSSZ, err := snappy.Decode(nil /* dst */, envelopeFile)
+ require.NoError(t, err, "Failed to decompress envelope")
+ signedEnvelope, err := sszToSignedExecutionPayloadEnvelope(envelopeSSZ)
+ require.NoError(t, err, "Failed to unmarshal signed envelope")
+
+ preBeaconStateFile, err := util.BazelFileBytes(testsFolderPath, folder.Name(), "pre.ssz_snappy")
+ require.NoError(t, err)
+ preBeaconStateSSZ, err := snappy.Decode(nil /* dst */, preBeaconStateFile)
+ require.NoError(t, err, "Failed to decompress")
+ preBeaconState, err := sszToState(preBeaconStateSSZ)
+ require.NoError(t, err)
+
+ postSSZFilepath, err := bazel.Runfile(path.Join(testsFolderPath, folder.Name(), "post.ssz_snappy"))
+ postSSZExists := true
+ if err != nil && strings.Contains(err.Error(), "could not locate file") {
+ postSSZExists = false
+ } else {
+ require.NoError(t, err)
+ }
+
+ file, err := util.BazelFileBytes(testsFolderPath, folder.Name(), "execution.yaml")
+ require.NoError(t, err)
+ config := &ExecutionConfig{}
+ require.NoError(t, utils.UnmarshalYaml(file, config), "Failed to Unmarshal")
+ if !config.Valid {
+ t.Skip("Skipping invalid execution engine test as it's never supported")
+ }
+
+ err = gloas.ProcessExecutionPayload(context.Background(), preBeaconState, signedEnvelope)
+ if postSSZExists {
+ require.NoError(t, err)
+ comparePostState(t, postSSZFilepath, preBeaconState)
+ } else if config.Valid {
+ // Note: This doesn't test anything worthwhile. It essentially tests
+ // that *any* error has occurred, not any specific error.
+ if err == nil {
+ t.Fatal("Did not fail when expected")
+ }
+ t.Logf("Expected failure; failure reason = %v", err)
+ return
+ }
+ })
+ }
+}
+
+func comparePostState(t *testing.T, postSSZFilepath string, want state.BeaconState) {
+ postBeaconStateFile, err := os.ReadFile(postSSZFilepath) // #nosec G304
+ require.NoError(t, err)
+ postBeaconStateSSZ, err := snappy.Decode(nil /* dst */, postBeaconStateFile)
+ require.NoError(t, err, "Failed to decompress")
+ postBeaconState, err := sszToState(postBeaconStateSSZ)
+ require.NoError(t, err)
+ postBeaconStatePb, ok := postBeaconState.ToProtoUnsafe().(proto.Message)
+ require.Equal(t, true, ok, "post beacon state did not return a proto.Message")
+ pbState, ok := want.ToProtoUnsafe().(proto.Message)
+ require.Equal(t, true, ok, "beacon state did not return a proto.Message")
+
+ if !proto.Equal(postBeaconStatePb, pbState) {
+ diff := cmp.Diff(pbState, postBeaconStatePb, protocmp.Transform())
+ t.Fatalf("Post state does not match expected state, diff: %s", diff)
+ }
+}