Compare commits

..

1 Commits

Author SHA1 Message Date
Bastin
ecde60c67c fix gen-logs.sh bug 2026-02-04 22:22:54 +01:00
185 changed files with 3365 additions and 9958 deletions

View File

@@ -1,4 +1,4 @@
version: v1.7.0-alpha.2
version: v1.7.0-alpha.1
style: full
specrefs:
@@ -146,6 +146,7 @@ exceptions:
- g1_lincomb#deneb
- hash_to_bls_field#deneb
- is_power_of_two#deneb
- multi_exp#deneb
- reverse_bits#deneb
- validate_kzg_g1#deneb
- verify_blob_kzg_proof#deneb
@@ -385,8 +386,7 @@ exceptions:
- convert_builder_index_to_validator_index#gloas
- convert_validator_index_to_builder_index#gloas
- get_attestation_score#gloas
- get_attestation_score#phase0
- get_balance_after_withdrawals#capella
- get_builder_from_deposit#gloas
- get_builder_withdrawals#gloas
- get_builders_sweep_withdrawals#gloas
- get_index_for_new_builder#gloas
@@ -396,18 +396,13 @@ exceptions:
- initiate_builder_exit#gloas
- is_active_builder#gloas
- is_builder_index#gloas
- is_data_available#gloas
- is_eligible_for_partial_withdrawals#electra
- is_head_late#gloas
- is_head_weak#gloas
- is_parent_strong#gloas
- is_valid_proposal_slot#gloas
- onboard_builders_from_pending_deposits#gloas
- process_deposit_request#gloas
- process_voluntary_exit#gloas
- record_block_timeliness#gloas
- record_block_timeliness#phase0
- verify_data_column_sidecar_kzg_proofs#gloas
- should_apply_proposer_boost#gloas
- update_builder_pending_withdrawals#gloas
- update_next_withdrawal_builder_index#gloas

View File

@@ -273,16 +273,16 @@ filegroup(
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
)
consensus_spec_version = "v1.7.0-alpha.2"
consensus_spec_version = "v1.7.0-alpha.1"
load("@prysm//tools:download_spectests.bzl", "consensus_spec_tests")
consensus_spec_tests(
name = "consensus_spec_tests",
flavors = {
"general": "sha256-iGQsGZ1cHah+2CSod9jC3kN8Ku4n6KO0hIwfINrn/po=",
"minimal": "sha256-TgcYt8N8sXSttdHTGvOa+exUZ1zn1UzlAMz0V7i37xc=",
"mainnet": "sha256-LnXyiLoJtrvEvbqLDSAAqpLMdN/lXv92SAgYG8fNjCs=",
"general": "sha256-j5R3jA7Oo4OSDMTvpMuD+8RomaCByeFSwtfkq6fL0Zg=",
"minimal": "sha256-tdTqByoyswOS4r6OxFmo70y2BP7w1TgEok+gf4cbxB0=",
"mainnet": "sha256-5gB4dt6SnSDKzdBc06VedId3NkgvSYyv9n9FRxWKwYI=",
},
version = consensus_spec_version,
)
@@ -298,7 +298,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
integrity = "sha256-Y/67Dg393PksZj5rTFNLntiJ6hNdB7Rxbu5gZE2gebY=",
integrity = "sha256-J+43DrK1pF658kTXTwMS6zGf4KDjvas++m8w2a8swpg=",
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
)

View File

@@ -509,17 +509,17 @@ func (s *SignedBlindedBeaconBlockFulu) SigString() string {
// ----------------------------------------------------------------------------
type ExecutionPayloadBid struct {
ParentBlockHash string `json:"parent_block_hash"`
ParentBlockRoot string `json:"parent_block_root"`
BlockHash string `json:"block_hash"`
PrevRandao string `json:"prev_randao"`
FeeRecipient string `json:"fee_recipient"`
GasLimit string `json:"gas_limit"`
BuilderIndex string `json:"builder_index"`
Slot string `json:"slot"`
Value string `json:"value"`
ExecutionPayment string `json:"execution_payment"`
BlobKzgCommitments []string `json:"blob_kzg_commitments"`
ParentBlockHash string `json:"parent_block_hash"`
ParentBlockRoot string `json:"parent_block_root"`
BlockHash string `json:"block_hash"`
PrevRandao string `json:"prev_randao"`
FeeRecipient string `json:"fee_recipient"`
GasLimit string `json:"gas_limit"`
BuilderIndex string `json:"builder_index"`
Slot string `json:"slot"`
Value string `json:"value"`
ExecutionPayment string `json:"execution_payment"`
BlobKzgCommitmentsRoot string `json:"blob_kzg_commitments_root"`
}
type SignedExecutionPayloadBid struct {

View File

@@ -2939,22 +2939,18 @@ func SignedExecutionPayloadBidFromConsensus(b *eth.SignedExecutionPayloadBid) *S
}
func ExecutionPayloadBidFromConsensus(b *eth.ExecutionPayloadBid) *ExecutionPayloadBid {
blobKzgCommitments := make([]string, len(b.BlobKzgCommitments))
for i := range b.BlobKzgCommitments {
blobKzgCommitments[i] = hexutil.Encode(b.BlobKzgCommitments[i])
}
return &ExecutionPayloadBid{
ParentBlockHash: hexutil.Encode(b.ParentBlockHash),
ParentBlockRoot: hexutil.Encode(b.ParentBlockRoot),
BlockHash: hexutil.Encode(b.BlockHash),
PrevRandao: hexutil.Encode(b.PrevRandao),
FeeRecipient: hexutil.Encode(b.FeeRecipient),
GasLimit: fmt.Sprintf("%d", b.GasLimit),
BuilderIndex: fmt.Sprintf("%d", b.BuilderIndex),
Slot: fmt.Sprintf("%d", b.Slot),
Value: fmt.Sprintf("%d", b.Value),
ExecutionPayment: fmt.Sprintf("%d", b.ExecutionPayment),
BlobKzgCommitments: blobKzgCommitments,
ParentBlockHash: hexutil.Encode(b.ParentBlockHash),
ParentBlockRoot: hexutil.Encode(b.ParentBlockRoot),
BlockHash: hexutil.Encode(b.BlockHash),
PrevRandao: hexutil.Encode(b.PrevRandao),
FeeRecipient: hexutil.Encode(b.FeeRecipient),
GasLimit: fmt.Sprintf("%d", b.GasLimit),
BuilderIndex: fmt.Sprintf("%d", b.BuilderIndex),
Slot: fmt.Sprintf("%d", b.Slot),
Value: fmt.Sprintf("%d", b.Value),
ExecutionPayment: fmt.Sprintf("%d", b.ExecutionPayment),
BlobKzgCommitmentsRoot: hexutil.Encode(b.BlobKzgCommitmentsRoot),
}
}
@@ -3191,30 +3187,22 @@ func (b *ExecutionPayloadBid) ToConsensus() (*eth.ExecutionPayloadBid, error) {
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayment")
}
err = slice.VerifyMaxLength(b.BlobKzgCommitments, fieldparams.MaxBlobCommitmentsPerBlock)
blobKzgCommitmentsRoot, err := bytesutil.DecodeHexWithLength(b.BlobKzgCommitmentsRoot, fieldparams.RootLength)
if err != nil {
return nil, server.NewDecodeError(err, "BlobKzgCommitments")
}
blobKzgCommitments := make([][]byte, len(b.BlobKzgCommitments))
for i, commitment := range b.BlobKzgCommitments {
kzg, err := bytesutil.DecodeHexWithLength(commitment, fieldparams.BLSPubkeyLength)
if err != nil {
return nil, server.NewDecodeError(err, fmt.Sprintf("BlobKzgCommitments[%d]", i))
}
blobKzgCommitments[i] = kzg
return nil, server.NewDecodeError(err, "BlobKzgCommitmentsRoot")
}
return &eth.ExecutionPayloadBid{
ParentBlockHash: parentBlockHash,
ParentBlockRoot: parentBlockRoot,
BlockHash: blockHash,
PrevRandao: prevRandao,
FeeRecipient: feeRecipient,
GasLimit: gasLimit,
BuilderIndex: primitives.BuilderIndex(builderIndex),
Slot: primitives.Slot(slot),
Value: primitives.Gwei(value),
ExecutionPayment: primitives.Gwei(executionPayment),
BlobKzgCommitments: blobKzgCommitments,
ParentBlockHash: parentBlockHash,
ParentBlockRoot: parentBlockRoot,
BlockHash: blockHash,
PrevRandao: prevRandao,
FeeRecipient: feeRecipient,
GasLimit: gasLimit,
BuilderIndex: primitives.BuilderIndex(builderIndex),
Slot: primitives.Slot(slot),
Value: primitives.Gwei(value),
ExecutionPayment: primitives.Gwei(executionPayment),
BlobKzgCommitmentsRoot: blobKzgCommitmentsRoot,
}, nil
}

View File

@@ -27,8 +27,6 @@ go_library(
"receive_blob.go",
"receive_block.go",
"receive_data_column.go",
"receive_execution_payload_envelope.go",
"receive_payload_attestation_message.go",
"service.go",
"setup_forkchoice.go",
"tracked_proposer.go",
@@ -87,7 +85,6 @@ go_library(
"//consensus-types/primitives:go_default_library",
"//crypto/bls:go_default_library",
"//encoding/bytesutil:go_default_library",
"//io/logs:go_default_library",
"//math:go_default_library",
"//monitoring/tracing:go_default_library",
"//monitoring/tracing/trace:go_default_library",

View File

@@ -101,16 +101,11 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*
if len(lastValidHash) == 0 {
lastValidHash = defaultLatestValidHash
}
// this call has guaranteed to have the `headRoot` with its payload in forkchoice.
invalidRoots, err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, headRoot, headBlk.ParentRoot(), bytesutil.ToBytes32(headPayload.ParentHash()), bytesutil.ToBytes32(lastValidHash))
invalidRoots, err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, headRoot, headBlk.ParentRoot(), bytesutil.ToBytes32(lastValidHash))
if err != nil {
log.WithError(err).Error("Could not set head root to invalid")
return nil, nil
}
// TODO: Gloas, we should not include the head root in this call
if len(invalidRoots) == 0 || invalidRoots[0] != headRoot {
invalidRoots = append([][32]byte{headRoot}, invalidRoots...)
}
if err := s.removeInvalidBlockAndState(ctx, invalidRoots); err != nil {
log.WithError(err).Error("Could not remove invalid block and state")
return nil, nil
@@ -295,10 +290,10 @@ func (s *Service) notifyNewPayload(ctx context.Context, stVersion int, header in
return false, errors.WithMessage(ErrUndefinedExecutionEngineError, err.Error())
}
// pruneInvalidBlock deals with the event that an invalid block was detected by the execution layer
func (s *Service) pruneInvalidBlock(ctx context.Context, root, parentRoot, parentHash [32]byte, lvh [32]byte) error {
// reportInvalidBlock deals with the event that an invalid block was detected by the execution layer
func (s *Service) pruneInvalidBlock(ctx context.Context, root, parentRoot, lvh [32]byte) error {
newPayloadInvalidNodeCount.Inc()
invalidRoots, err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, root, parentRoot, parentHash, lvh)
invalidRoots, err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, root, parentRoot, lvh)
if err != nil {
return err
}

View File

@@ -465,9 +465,9 @@ func Test_NotifyForkchoiceUpdateRecursive_DoublyLinkedTree(t *testing.T) {
require.NoError(t, err)
require.Equal(t, brd, headRoot)
// Ensure F and G's full nodes were removed but their empty (consensus) nodes remain, as does E
require.Equal(t, true, fcs.HasNode(brf))
require.Equal(t, true, fcs.HasNode(brg))
// Ensure F and G where removed but their parent E wasn't
require.Equal(t, false, fcs.HasNode(brf))
require.Equal(t, false, fcs.HasNode(brg))
require.Equal(t, true, fcs.HasNode(bre))
}
@@ -703,13 +703,14 @@ func Test_reportInvalidBlock(t *testing.T) {
require.NoError(t, fcs.InsertNode(ctx, st, root))
require.NoError(t, fcs.SetOptimisticToValid(ctx, [32]byte{'A'}))
err = service.pruneInvalidBlock(ctx, [32]byte{'D'}, [32]byte{'C'}, [32]byte{'c'}, [32]byte{'a'})
err = service.pruneInvalidBlock(ctx, [32]byte{'D'}, [32]byte{'C'}, [32]byte{'a'})
require.Equal(t, IsInvalidBlock(err), true)
require.Equal(t, InvalidBlockLVH(err), [32]byte{'a'})
invalidRoots := InvalidAncestorRoots(err)
require.Equal(t, 2, len(invalidRoots))
require.Equal(t, 3, len(invalidRoots))
require.Equal(t, [32]byte{'D'}, invalidRoots[0])
require.Equal(t, [32]byte{'C'}, invalidRoots[1])
require.Equal(t, [32]byte{'B'}, invalidRoots[2])
}
func Test_GetPayloadAttribute(t *testing.T) {
@@ -784,7 +785,7 @@ func Test_GetPayloadAttributeV2(t *testing.T) {
}
func Test_GetPayloadAttributeV3(t *testing.T) {
testCases := []struct {
var testCases = []struct {
name string
st bstate.BeaconState
}{

View File

@@ -10,7 +10,6 @@ import (
consensus_types "github.com/OffchainLabs/prysm/v7/consensus-types"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v7/io/logs"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
prysmTime "github.com/OffchainLabs/prysm/v7/time"
@@ -88,45 +87,36 @@ func logStateTransitionData(b interfaces.ReadOnlyBeaconBlock) error {
func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte, justified, finalized *ethpb.Checkpoint, receivedTime time.Time, genesis time.Time, daWaitedTime time.Duration) error {
startTime, err := slots.StartTime(genesis, block.Slot())
if err != nil {
return errors.Wrap(err, "failed to get slot start time")
return err
}
parentRoot := block.ParentRoot()
blkRoot := fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8])
finalizedRoot := fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8])
sinceSlotStartTime := prysmTime.Now().Sub(startTime)
lessFields := logrus.Fields{
"slot": block.Slot(),
"block": blkRoot,
"finalizedEpoch": finalized.Epoch,
"finalizedRoot": finalizedRoot,
"epoch": slots.ToEpoch(block.Slot()),
"sinceSlotStartTime": sinceSlotStartTime,
}
moreFields := logrus.Fields{
"slot": block.Slot(),
"slotInEpoch": block.Slot() % params.BeaconConfig().SlotsPerEpoch,
"block": blkRoot,
"epoch": slots.ToEpoch(block.Slot()),
"justifiedEpoch": justified.Epoch,
"justifiedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(justified.Root)[:8]),
"finalizedEpoch": finalized.Epoch,
"finalizedRoot": finalizedRoot,
"parentRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(parentRoot[:])[:8]),
"version": version.String(block.Version()),
"sinceSlotStartTime": sinceSlotStartTime,
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime) - daWaitedTime,
"dataAvailabilityWaitedTime": daWaitedTime,
}
level := logs.PackageVerbosity("beacon-chain/blockchain")
level := log.Logger.GetLevel()
if level >= logrus.DebugLevel {
log.WithFields(moreFields).Info("Synced new block")
return nil
parentRoot := block.ParentRoot()
lf := logrus.Fields{
"slot": block.Slot(),
"slotInEpoch": block.Slot() % params.BeaconConfig().SlotsPerEpoch,
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
"epoch": slots.ToEpoch(block.Slot()),
"justifiedEpoch": justified.Epoch,
"justifiedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(justified.Root)[:8]),
"finalizedEpoch": finalized.Epoch,
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
"parentRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(parentRoot[:])[:8]),
"version": version.String(block.Version()),
"sinceSlotStartTime": prysmTime.Now().Sub(startTime),
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime) - daWaitedTime,
"dataAvailabilityWaitedTime": daWaitedTime,
}
log.WithFields(lf).Debug("Synced new block")
} else {
log.WithFields(logrus.Fields{
"slot": block.Slot(),
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
"finalizedEpoch": finalized.Epoch,
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
"epoch": slots.ToEpoch(block.Slot()),
}).Info("Synced new block")
}
log.WithFields(lessFields).WithField(logs.LogTargetField, logs.LogTargetUser).Info("Synced new block")
log.WithFields(moreFields).WithField(logs.LogTargetField, logs.LogTargetEphemeral).Info("Synced new block")
return nil
}

View File

@@ -232,8 +232,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
postVersionAndHeaders[i].version,
postVersionAndHeaders[i].header, b)
if err != nil {
// this call does not have the root in forkchoice yet.
return s.handleInvalidExecutionError(ctx, err, root, b.Block().ParentRoot(), [32]byte(postVersionAndHeaders[i].header.ParentHash()))
return s.handleInvalidExecutionError(ctx, err, root, b.Block().ParentRoot())
}
if isValidPayload {
if err := s.validateMergeTransitionBlock(ctx, preVersionAndHeaders[i].version,
@@ -993,9 +992,9 @@ func (s *Service) waitForSync() error {
}
}
func (s *Service) handleInvalidExecutionError(ctx context.Context, err error, blockRoot, parentRoot [32]byte, parentHash [32]byte) error {
func (s *Service) handleInvalidExecutionError(ctx context.Context, err error, blockRoot, parentRoot [fieldparams.RootLength]byte) error {
if IsInvalidBlock(err) && InvalidBlockLVH(err) != [32]byte{} {
return s.pruneInvalidBlock(ctx, blockRoot, parentRoot, parentHash, InvalidBlockLVH(err))
return s.pruneInvalidBlock(ctx, blockRoot, parentRoot, InvalidBlockLVH(err))
}
return err
}

View File

@@ -2006,7 +2006,6 @@ func TestNoViableHead_Reboot(t *testing.T) {
// Check that we have justified the second epoch
jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint()
require.Equal(t, primitives.Epoch(2), jc.Epoch)
time.Sleep(20 * time.Millisecond) // wait for async forkchoice update to be processed
// import block 19 to find out that the whole chain 13--18 was in fact
// invalid

View File

@@ -633,7 +633,7 @@ func (s *Service) validateExecutionOnBlock(ctx context.Context, ver int, header
isValidPayload, err := s.notifyNewPayload(ctx, ver, header, block)
if err != nil {
s.cfg.ForkChoiceStore.Lock()
err = s.handleInvalidExecutionError(ctx, err, block.Root(), block.Block().ParentRoot(), [32]byte(header.BlockHash()))
err = s.handleInvalidExecutionError(ctx, err, block.Root(), block.Block().ParentRoot())
s.cfg.ForkChoiceStore.Unlock()
return false, err
}

View File

@@ -1,19 +0,0 @@
package blockchain
import (
"context"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
)
// ExecutionPayloadEnvelopeReceiver interface defines the methods of chain service for receiving
// validated execution payload envelopes.
type ExecutionPayloadEnvelopeReceiver interface {
ReceiveExecutionPayloadEnvelope(context.Context, interfaces.ROSignedExecutionPayloadEnvelope) error
}
// ReceiveExecutionPayloadEnvelope accepts a signed execution payload envelope.
func (s *Service) ReceiveExecutionPayloadEnvelope(_ context.Context, _ interfaces.ROSignedExecutionPayloadEnvelope) error {
// TODO: wire into execution payload envelope processing pipeline.
return nil
}

View File

@@ -1,19 +0,0 @@
package blockchain
import (
"context"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
)
// PayloadAttestationReceiver interface defines the methods of chain service for receiving
// validated payload attestation messages.
type PayloadAttestationReceiver interface {
ReceivePayloadAttestationMessage(context.Context, *ethpb.PayloadAttestationMessage) error
}
// ReceivePayloadAttestationMessage accepts a payload attestation message.
func (s *Service) ReceivePayloadAttestationMessage(ctx context.Context, a *ethpb.PayloadAttestationMessage) error {
// TODO: Handle payload attestation message processing once Gloas is fully wired.
return nil
}

View File

@@ -757,16 +757,6 @@ func (c *ChainService) ReceiveDataColumns(dcs []blocks.VerifiedRODataColumn) err
return nil
}
// ReceivePayloadAttestationMessage implements the same method in the chain service.
func (c *ChainService) ReceivePayloadAttestationMessage(_ context.Context, _ *ethpb.PayloadAttestationMessage) error {
return nil
}
// ReceiveExecutionPayloadEnvelope implements the same method in the chain service.
func (c *ChainService) ReceiveExecutionPayloadEnvelope(_ context.Context, _ interfaces.ROSignedExecutionPayloadEnvelope) error {
return nil
}
// DependentRootForEpoch mocks the same method in the chain service
func (c *ChainService) DependentRootForEpoch(_ [32]byte, _ primitives.Epoch) ([32]byte, error) {
return c.TargetRoot, nil

View File

@@ -17,7 +17,6 @@ go_library(
"error.go",
"interfaces.go",
"log.go",
"payload_attestation.go",
"payload_id.go",
"proposer_indices.go",
"proposer_indices_disabled.go", # keep
@@ -77,7 +76,6 @@ go_test(
"checkpoint_state_test.go",
"committee_fuzz_test.go",
"committee_test.go",
"payload_attestation_test.go",
"payload_id_test.go",
"private_access_test.go",
"proposer_indices_test.go",

View File

@@ -1,53 +0,0 @@
package cache
import (
"sync"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
)
// PayloadAttestationCache tracks seen payload attestation messages for a single slot.
type PayloadAttestationCache struct {
slot primitives.Slot
seen map[primitives.ValidatorIndex]struct{}
mu sync.RWMutex
}
// Seen returns true if a vote for the given slot has already been
// processed for this validator index.
func (p *PayloadAttestationCache) Seen(slot primitives.Slot, idx primitives.ValidatorIndex) bool {
p.mu.RLock()
defer p.mu.RUnlock()
if p.slot != slot {
return false
}
if p.seen == nil {
return false
}
_, ok := p.seen[idx]
return ok
}
// Add marks the given slot and validator index as seen.
// This function assumes that the message has already been validated.
func (p *PayloadAttestationCache) Add(slot primitives.Slot, idx primitives.ValidatorIndex) error {
p.mu.Lock()
defer p.mu.Unlock()
if p.slot != slot {
p.slot = slot
p.seen = make(map[primitives.ValidatorIndex]struct{})
}
if p.seen == nil {
p.seen = make(map[primitives.ValidatorIndex]struct{})
}
p.seen[idx] = struct{}{}
return nil
}
// Clear clears the internal cache.
func (p *PayloadAttestationCache) Clear() {
p.mu.Lock()
defer p.mu.Unlock()
p.slot = 0
p.seen = nil
}

View File

@@ -1,48 +0,0 @@
package cache_test
import (
"testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/stretchr/testify/require"
)
func TestPayloadAttestationCache_SeenAndAdd(t *testing.T) {
var c cache.PayloadAttestationCache
slot1 := primitives.Slot(1)
slot2 := primitives.Slot(2)
idx1 := primitives.ValidatorIndex(3)
idx2 := primitives.ValidatorIndex(4)
require.False(t, c.Seen(slot1, idx1))
require.NoError(t, c.Add(slot1, idx1))
require.True(t, c.Seen(slot1, idx1))
require.False(t, c.Seen(slot1, idx2))
require.False(t, c.Seen(slot2, idx1))
require.NoError(t, c.Add(slot1, idx2))
require.True(t, c.Seen(slot1, idx1))
require.True(t, c.Seen(slot1, idx2))
require.NoError(t, c.Add(slot2, idx1))
require.True(t, c.Seen(slot2, idx1))
require.False(t, c.Seen(slot1, idx1))
require.False(t, c.Seen(slot1, idx2))
}
func TestPayloadAttestationCache_Clear(t *testing.T) {
var c cache.PayloadAttestationCache
slot := primitives.Slot(10)
idx := primitives.ValidatorIndex(42)
require.NoError(t, c.Add(slot, idx))
require.True(t, c.Seen(slot, idx))
c.Clear()
require.False(t, c.Seen(slot, idx))
require.NoError(t, c.Add(slot, idx))
require.True(t, c.Seen(slot, idx))
}

View File

@@ -20,7 +20,6 @@ go_library(
"//beacon-chain/core/blocks:go_default_library",
"//beacon-chain/core/epoch:go_default_library",
"//beacon-chain/core/epoch/precompute:go_default_library",
"//beacon-chain/core/gloas:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/core/time:go_default_library",

View File

@@ -6,7 +6,6 @@ import (
"fmt"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
@@ -76,11 +75,7 @@ func ProcessAttestationNoVerifySignature(
return nil, err
}
if err := beaconState.UpdatePendingPaymentWeight(att, indices, participatedFlags); err != nil {
return nil, errors.Wrap(err, "failed to update pending payment weight")
}
return SetParticipationAndRewardProposer(ctx, beaconState, att.GetData().Target.Epoch, indices, participatedFlags, totalBalance, att)
return SetParticipationAndRewardProposer(ctx, beaconState, att.GetData().Target.Epoch, indices, participatedFlags, totalBalance)
}
// SetParticipationAndRewardProposer retrieves and sets the epoch participation bits in state. Based on the epoch participation, it rewards
@@ -110,9 +105,7 @@ func SetParticipationAndRewardProposer(
beaconState state.BeaconState,
targetEpoch primitives.Epoch,
indices []uint64,
participatedFlags map[uint8]bool,
totalBalance uint64,
att ethpb.Att) (state.BeaconState, error) {
participatedFlags map[uint8]bool, totalBalance uint64) (state.BeaconState, error) {
var proposerRewardNumerator uint64
currentEpoch := time.CurrentEpoch(beaconState)
var stateErr error
@@ -306,19 +299,6 @@ func AttestationParticipationFlagIndices(beaconState state.ReadOnlyBeaconState,
participatedFlags[targetFlagIndex] = true
}
matchedSrcTgtHead := matchedHead && matchedSrcTgt
var beaconBlockRoot [32]byte
copy(beaconBlockRoot[:], data.BeaconBlockRoot)
matchingPayload, err := gloas.MatchingPayload(
beaconState,
beaconBlockRoot,
data.Slot,
uint64(data.CommitteeIndex),
)
if err != nil {
return nil, err
}
matchedSrcTgtHead = matchedSrcTgtHead && matchingPayload
if matchedSrcTgtHead && delay == cfg.MinAttestationInclusionDelay {
participatedFlags[headFlagIndex] = true
}

View File

@@ -1,9 +1,7 @@
package altair_test
import (
"bytes"
"fmt"
"reflect"
"testing"
"github.com/OffchainLabs/go-bitfield"
@@ -558,7 +556,7 @@ func TestSetParticipationAndRewardProposer(t *testing.T) {
b, err := helpers.TotalActiveBalance(beaconState)
require.NoError(t, err)
st, err := altair.SetParticipationAndRewardProposer(t.Context(), beaconState, test.epoch, test.indices, test.participatedFlags, b, &ethpb.Attestation{})
st, err := altair.SetParticipationAndRewardProposer(t.Context(), beaconState, test.epoch, test.indices, test.participatedFlags, b)
require.NoError(t, err)
i, err := helpers.BeaconProposerIndex(t.Context(), st)
@@ -777,67 +775,11 @@ func TestAttestationParticipationFlagIndices(t *testing.T) {
headFlagIndex: true,
},
},
{
name: "gloas same-slot committee index non-zero errors",
inputState: func() state.BeaconState {
stateSlot := primitives.Slot(5)
slot := primitives.Slot(3)
targetRoot := bytes.Repeat([]byte{0xAA}, 32)
headRoot := bytes.Repeat([]byte{0xBB}, 32)
prevRoot := bytes.Repeat([]byte{0xCC}, 32)
return buildGloasStateForFlags(t, stateSlot, slot, targetRoot, headRoot, prevRoot, 0, 0)
}(),
inputData: &ethpb.AttestationData{
Slot: 3,
CommitteeIndex: 1, // invalid for same-slot
BeaconBlockRoot: bytes.Repeat([]byte{0xBB}, 32),
Source: &ethpb.Checkpoint{Root: bytes.Repeat([]byte{0xDD}, 32)},
Target: &ethpb.Checkpoint{
Epoch: 0,
Root: bytes.Repeat([]byte{0xAA}, 32),
},
},
inputDelay: 1,
participationIndices: nil,
},
{
name: "gloas payload availability matches committee index",
inputState: func() state.BeaconState {
stateSlot := primitives.Slot(5)
slot := primitives.Slot(3)
targetRoot := bytes.Repeat([]byte{0xAA}, 32)
headRoot := bytes.Repeat([]byte{0xBB}, 32)
// Same prev root to make SameSlotAttestation false and use payload availability.
return buildGloasStateForFlags(t, stateSlot, slot, targetRoot, headRoot, headRoot, 1, slot)
}(),
inputData: &ethpb.AttestationData{
Slot: 3,
CommitteeIndex: 1,
BeaconBlockRoot: bytes.Repeat([]byte{0xBB}, 32),
Source: &ethpb.Checkpoint{Root: bytes.Repeat([]byte{0xDD}, 32)},
Target: &ethpb.Checkpoint{
Epoch: 0,
Root: bytes.Repeat([]byte{0xAA}, 32),
},
},
inputDelay: 1,
participationIndices: map[uint8]bool{
sourceFlagIndex: true,
targetFlagIndex: true,
headFlagIndex: true,
},
},
}
for _, test := range tests {
flagIndices, err := altair.AttestationParticipationFlagIndices(test.inputState, test.inputData, test.inputDelay)
if test.participationIndices == nil {
require.ErrorContains(t, "committee index", err)
continue
}
require.NoError(t, err)
if !reflect.DeepEqual(test.participationIndices, flagIndices) {
t.Fatalf("unexpected participation indices: got %v want %v", flagIndices, test.participationIndices)
}
require.DeepEqual(t, test.participationIndices, flagIndices)
}
}
@@ -916,61 +858,3 @@ func TestMatchingStatus(t *testing.T) {
require.Equal(t, test.matchedHead, head)
}
}
func buildGloasStateForFlags(t *testing.T, stateSlot, slot primitives.Slot, targetRoot, headRoot, prevRoot []byte, availabilityBit uint8, availabilitySlot primitives.Slot) state.BeaconState {
t.Helper()
cfg := params.BeaconConfig()
blockRoots := make([][]byte, cfg.SlotsPerHistoricalRoot)
blockRoots[0] = targetRoot
blockRoots[slot%cfg.SlotsPerHistoricalRoot] = headRoot
blockRoots[(slot-1)%cfg.SlotsPerHistoricalRoot] = prevRoot
stateRoots := make([][]byte, cfg.SlotsPerHistoricalRoot)
for i := range stateRoots {
stateRoots[i] = make([]byte, fieldparams.RootLength)
}
randaoMixes := make([][]byte, cfg.EpochsPerHistoricalVector)
for i := range randaoMixes {
randaoMixes[i] = make([]byte, fieldparams.RootLength)
}
execPayloadAvailability := make([]byte, cfg.SlotsPerHistoricalRoot/8)
idx := availabilitySlot % cfg.SlotsPerHistoricalRoot
byteIndex := idx / 8
bitIndex := idx % 8
if availabilityBit == 1 {
execPayloadAvailability[byteIndex] |= 1 << bitIndex
}
checkpointRoot := bytes.Repeat([]byte{0xDD}, fieldparams.RootLength)
justified := &ethpb.Checkpoint{Root: checkpointRoot}
stProto := &ethpb.BeaconStateGloas{
Slot: stateSlot,
GenesisValidatorsRoot: bytes.Repeat([]byte{0x11}, fieldparams.RootLength),
BlockRoots: blockRoots,
StateRoots: stateRoots,
RandaoMixes: randaoMixes,
ExecutionPayloadAvailability: execPayloadAvailability,
CurrentJustifiedCheckpoint: justified,
PreviousJustifiedCheckpoint: justified,
Validators: []*ethpb.Validator{
{
EffectiveBalance: cfg.MinActivationBalance,
WithdrawalCredentials: append([]byte{cfg.ETH1AddressWithdrawalPrefixByte}, bytes.Repeat([]byte{0x01}, 31)...),
},
},
Balances: []uint64{cfg.MinActivationBalance},
BuilderPendingPayments: make([]*ethpb.BuilderPendingPayment, cfg.SlotsPerEpoch*2),
Fork: &ethpb.Fork{
CurrentVersion: bytes.Repeat([]byte{0x01}, 4),
PreviousVersion: bytes.Repeat([]byte{0x01}, 4),
Epoch: 0,
},
}
beaconState, err := state_native.InitializeFromProtoGloas(stProto)
require.NoError(t, err)
return beaconState
}

View File

@@ -111,21 +111,10 @@ func VerifyAttestationNoVerifySignature(
var indexedAtt ethpb.IndexedAtt
if att.Version() >= version.Electra {
ci := att.GetData().CommitteeIndex
// Spec v1.7.0-alpha pseudocode:
//
// # [Modified in Gloas:EIP7732]
// assert data.index < 2
//
if beaconState.Version() >= version.Gloas {
if ci >= 2 {
return fmt.Errorf("incorrect committee index %d", ci)
}
} else {
if ci != 0 {
return errors.New("committee index must be 0 between Electra and Gloas forks")
}
if att.GetData().CommitteeIndex != 0 {
return errors.New("committee index must be 0 post-Electra")
}
aggBits := att.GetAggregationBits()
committeeIndices := att.CommitteeBitsVal().BitIndices()
committees := make([][]primitives.ValidatorIndex, len(committeeIndices))

View File

@@ -1,7 +1,6 @@
package blocks_test
import (
"bytes"
"context"
"testing"
@@ -263,7 +262,7 @@ func TestVerifyAttestationNoVerifySignature_Electra(t *testing.T) {
CommitteeBits: bitfield.NewBitvector64(),
}
err = blocks.VerifyAttestationNoVerifySignature(context.TODO(), beaconState, att)
assert.ErrorContains(t, "committee index must be 0", err)
assert.ErrorContains(t, "committee index must be 0 post-Electra", err)
})
t.Run("index of committee too big", func(t *testing.T) {
aggBits := bitfield.NewBitlist(3)
@@ -315,75 +314,6 @@ func TestVerifyAttestationNoVerifySignature_Electra(t *testing.T) {
})
}
func TestVerifyAttestationNoVerifySignature_GloasCommitteeIndexLimit(t *testing.T) {
cfg := params.BeaconConfig()
stateSlot := cfg.MinAttestationInclusionDelay + 1
blockRoots := make([][]byte, cfg.SlotsPerHistoricalRoot)
for i := range blockRoots {
blockRoots[i] = make([]byte, fieldparams.RootLength)
}
stateRoots := make([][]byte, cfg.SlotsPerHistoricalRoot)
for i := range stateRoots {
stateRoots[i] = make([]byte, fieldparams.RootLength)
}
randaoMixes := make([][]byte, cfg.EpochsPerHistoricalVector)
for i := range randaoMixes {
randaoMixes[i] = make([]byte, fieldparams.RootLength)
}
checkpointRoot := bytes.Repeat([]byte{0xAA}, fieldparams.RootLength)
justified := &ethpb.Checkpoint{Epoch: 0, Root: checkpointRoot}
gloasStateProto := &ethpb.BeaconStateGloas{
Slot: stateSlot,
GenesisValidatorsRoot: bytes.Repeat([]byte{0x11}, fieldparams.RootLength),
BlockRoots: blockRoots,
StateRoots: stateRoots,
RandaoMixes: randaoMixes,
ExecutionPayloadAvailability: make([]byte, cfg.SlotsPerHistoricalRoot/8),
CurrentJustifiedCheckpoint: justified,
PreviousJustifiedCheckpoint: justified,
Validators: []*ethpb.Validator{
{
EffectiveBalance: cfg.MinActivationBalance,
WithdrawalCredentials: append([]byte{cfg.ETH1AddressWithdrawalPrefixByte}, bytes.Repeat([]byte{0x01}, 31)...),
},
},
Balances: []uint64{cfg.MinActivationBalance},
BuilderPendingPayments: make([]*ethpb.BuilderPendingPayment, cfg.SlotsPerEpoch*2),
Fork: &ethpb.Fork{
CurrentVersion: bytes.Repeat([]byte{0x01}, 4),
PreviousVersion: bytes.Repeat([]byte{0x01}, 4),
Epoch: 0,
},
}
beaconState, err := state_native.InitializeFromProtoGloas(gloasStateProto)
require.NoError(t, err)
committeeBits := bitfield.NewBitvector64()
committeeBits.SetBitAt(0, true)
aggBits := bitfield.NewBitlist(1)
aggBits.SetBitAt(0, true)
att := &ethpb.AttestationElectra{
Data: &ethpb.AttestationData{
Slot: 0,
CommitteeIndex: 2, // invalid for Gloas (must be <2)
BeaconBlockRoot: blockRoots[0],
Source: justified,
Target: justified,
},
AggregationBits: aggBits,
CommitteeBits: committeeBits,
Signature: bytes.Repeat([]byte{0x00}, fieldparams.BLSSignatureLength),
}
err = blocks.VerifyAttestationNoVerifySignature(context.TODO(), beaconState, att)
assert.ErrorContains(t, "incorrect committee index 2", err)
}
func TestConvertToIndexed_OK(t *testing.T) {
helpers.ClearCache()
validators := make([]*ethpb.Validator, 2*params.BeaconConfig().SlotsPerEpoch)
@@ -653,7 +583,6 @@ func TestVerifyAttestations_HandlesPlannedFork(t *testing.T) {
}
func TestRetrieveAttestationSignatureSet_VerifiesMultipleAttestations(t *testing.T) {
helpers.ClearCache()
ctx := t.Context()
numOfValidators := uint64(params.BeaconConfig().SlotsPerEpoch.Mul(4))
validators := make([]*ethpb.Validator, numOfValidators)

View File

@@ -3,11 +3,7 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"attestation.go",
"bid.go",
"deposit_request.go",
"log.go",
"payload.go",
"payload_attestation.go",
"pending_payment.go",
"proposer_slashing.go",
@@ -16,7 +12,6 @@ go_library(
visibility = ["//visibility:public"],
deps = [
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/requests:go_default_library",
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/state:go_default_library",
@@ -30,23 +25,17 @@ go_library(
"//crypto/bls/common:go_default_library",
"//crypto/hash:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"//time/slots:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"attestation_test.go",
"bid_test.go",
"deposit_request_test.go",
"payload_attestation_test.go",
"payload_test.go",
"pending_payment_test.go",
"proposer_slashing_test.go",
],
@@ -56,7 +45,6 @@ go_test(
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
"//beacon-chain/state/testing:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",

View File

@@ -1,52 +0,0 @@
package gloas
import (
"fmt"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/pkg/errors"
)
// MatchingPayload returns true if the attestation's committee index matches the expected payload index.
//
// For pre-Gloas forks, this always returns true.
//
// Spec v1.7.0-alpha (pseudocode):
//
// # [New in Gloas:EIP7732]
// if is_attestation_same_slot(state, data):
// assert data.index == 0
// payload_matches = True
// else:
// slot_index = data.slot % SLOTS_PER_HISTORICAL_ROOT
// payload_index = state.execution_payload_availability[slot_index]
// payload_matches = data.index == payload_index
func MatchingPayload(
beaconState state.ReadOnlyBeaconState,
beaconBlockRoot [32]byte,
slot primitives.Slot,
committeeIndex uint64,
) (bool, error) {
if beaconState.Version() < version.Gloas {
return true, nil
}
sameSlot, err := beaconState.IsAttestationSameSlot(beaconBlockRoot, slot)
if err != nil {
return false, errors.Wrap(err, "failed to get same slot attestation status")
}
if sameSlot {
if committeeIndex != 0 {
return false, fmt.Errorf("committee index %d for same slot attestation must be 0", committeeIndex)
}
return true, nil
}
executionPayloadAvail, err := beaconState.ExecutionPayloadAvailability(slot)
if err != nil {
return false, errors.Wrap(err, "failed to get execution payload availability status")
}
return executionPayloadAvail == committeeIndex, nil
}

View File

@@ -1,110 +0,0 @@
package gloas
import (
"bytes"
"testing"
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/OffchainLabs/prysm/v7/testing/require"
)
func buildStateWithBlockRoots(t *testing.T, stateSlot primitives.Slot, roots map[primitives.Slot][]byte) *state_native.BeaconState {
t.Helper()
cfg := params.BeaconConfig()
blockRoots := make([][]byte, cfg.SlotsPerHistoricalRoot)
for slot, root := range roots {
blockRoots[slot%cfg.SlotsPerHistoricalRoot] = root
}
stProto := &ethpb.BeaconStateGloas{
Slot: stateSlot,
BlockRoots: blockRoots,
}
state, err := state_native.InitializeFromProtoGloas(stProto)
require.NoError(t, err)
return state.(*state_native.BeaconState)
}
func TestMatchingPayload(t *testing.T) {
t.Run("pre-gloas always true", func(t *testing.T) {
stIface, err := state_native.InitializeFromProtoElectra(&ethpb.BeaconStateElectra{})
require.NoError(t, err)
ok, err := MatchingPayload(stIface, [32]byte{}, 0, 123)
require.NoError(t, err)
require.Equal(t, true, ok)
})
t.Run("same slot requires committee index 0", func(t *testing.T) {
root := bytes.Repeat([]byte{0xAA}, 32)
state := buildStateWithBlockRoots(t, 6, map[primitives.Slot][]byte{
4: root,
3: bytes.Repeat([]byte{0xBB}, 32),
})
var rootArr [32]byte
copy(rootArr[:], root)
ok, err := MatchingPayload(state, rootArr, 4, 1)
require.ErrorContains(t, "committee index", err)
require.Equal(t, false, ok)
})
t.Run("same slot matches when committee index is 0", func(t *testing.T) {
root := bytes.Repeat([]byte{0xAA}, 32)
state := buildStateWithBlockRoots(t, 6, map[primitives.Slot][]byte{
4: root,
3: bytes.Repeat([]byte{0xBB}, 32),
})
var rootArr [32]byte
copy(rootArr[:], root)
ok, err := MatchingPayload(state, rootArr, 4, 0)
require.NoError(t, err)
require.Equal(t, true, ok)
})
t.Run("non same slot checks payload availability", func(t *testing.T) {
cfg := params.BeaconConfig()
root := bytes.Repeat([]byte{0xAA}, 32)
blockRoots := make([][]byte, cfg.SlotsPerHistoricalRoot)
blockRoots[4%cfg.SlotsPerHistoricalRoot] = bytes.Repeat([]byte{0xCC}, 32)
blockRoots[3%cfg.SlotsPerHistoricalRoot] = bytes.Repeat([]byte{0xBB}, 32)
availability := make([]byte, cfg.SlotsPerHistoricalRoot/8)
slotIndex := uint64(4)
availability[slotIndex/8] = byte(1 << (slotIndex % 8))
stIface, err := state_native.InitializeFromProtoGloas(&ethpb.BeaconStateGloas{
Slot: 6,
BlockRoots: blockRoots,
ExecutionPayloadAvailability: availability,
Fork: &ethpb.Fork{
CurrentVersion: bytes.Repeat([]byte{0x66}, 4),
PreviousVersion: bytes.Repeat([]byte{0x66}, 4),
Epoch: 0,
},
})
require.NoError(t, err)
state := stIface.(*state_native.BeaconState)
require.Equal(t, version.Gloas, state.Version())
var rootArr [32]byte
copy(rootArr[:], root)
ok, err := MatchingPayload(state, rootArr, 4, 1)
require.NoError(t, err)
require.Equal(t, true, ok)
ok, err = MatchingPayload(state, rootArr, 4, 0)
require.NoError(t, err)
require.Equal(t, false, ok)
})
}

View File

@@ -18,7 +18,7 @@ import (
// ProcessExecutionPayloadBid processes a signed execution payload bid in the Gloas fork.
//
// <spec fn="process_execution_payload_bid" fork="gloas" hash="823c9f3a">
// <spec fn="process_execution_payload_bid" fork="gloas" hash="6dc696bb">
// def process_execution_payload_bid(state: BeaconState, block: BeaconBlock) -> None:
// signed_bid = block.body.signed_execution_payload_bid
// bid = signed_bid.message
@@ -37,12 +37,6 @@ import (
// # Verify that the bid signature is valid
// assert verify_execution_payload_bid_signature(state, signed_bid)
//
// # Verify commitments are under limit
// assert (
// len(bid.blob_kzg_commitments)
// <= get_blob_parameters(get_current_epoch(state)).max_blobs_per_block
// )
//
// # Verify that the bid is for the current slot
// assert bid.slot == block.slot
// # Verify that the bid is for the right parent block
@@ -115,12 +109,6 @@ func ProcessExecutionPayloadBid(st state.BeaconState, block interfaces.ReadOnlyB
}
}
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlockAtEpoch(slots.ToEpoch(block.Slot()))
commitmentCount := bid.BlobKzgCommitmentCount()
if commitmentCount > uint64(maxBlobsPerBlock) {
return fmt.Errorf("bid has %d blob KZG commitments over max %d", commitmentCount, maxBlobsPerBlock)
}
if err := validateBidConsistency(st, bid, block); err != nil {
return errors.Wrap(err, "bid consistency validation failed")
}

View File

@@ -184,28 +184,6 @@ func signBid(t *testing.T, sk common.SecretKey, bid *ethpb.ExecutionPayloadBid,
return out
}
func blobCommitmentsForSlot(slot primitives.Slot, count int) [][]byte {
max := int(params.BeaconConfig().MaxBlobsPerBlockAtEpoch(slots.ToEpoch(slot)))
if count > max {
count = max
}
commitments := make([][]byte, count)
for i := range commitments {
commitments[i] = bytes.Repeat([]byte{0xEE}, 48)
}
return commitments
}
func tooManyBlobCommitmentsForSlot(slot primitives.Slot) [][]byte {
max := int(params.BeaconConfig().MaxBlobsPerBlockAtEpoch(slots.ToEpoch(slot)))
count := max + 1
commitments := make([][]byte, count)
for i := range commitments {
commitments[i] = bytes.Repeat([]byte{0xEE}, 48)
}
return commitments
}
func TestProcessExecutionPayloadBid_SelfBuildSuccess(t *testing.T) {
slot := primitives.Slot(12)
proposerIdx := primitives.ValidatorIndex(0)
@@ -216,17 +194,17 @@ func TestProcessExecutionPayloadBid_SelfBuildSuccess(t *testing.T) {
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinActivationBalance+1000, randao, latestHash, pubKey)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 0,
ExecutionPayment: 0,
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 0,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xEE}, 32),
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
}
signed := &ethpb.SignedExecutionPayloadBid{
Message: bid,
@@ -258,16 +236,16 @@ func TestProcessExecutionPayloadBid_SelfBuildNonZeroAmountFails(t *testing.T) {
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinActivationBalance+1000, randao, latestHash, [48]byte{})
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xAA}, 32),
BlockHash: bytes.Repeat([]byte{0xBB}, 32),
PrevRandao: randao[:],
BuilderIndex: builderIdx,
Slot: slot,
Value: 10,
ExecutionPayment: 0,
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
FeeRecipient: bytes.Repeat([]byte{0xDD}, 20),
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xAA}, 32),
BlockHash: bytes.Repeat([]byte{0xBB}, 32),
PrevRandao: randao[:],
BuilderIndex: builderIdx,
Slot: slot,
Value: 10,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xCC}, 32),
FeeRecipient: bytes.Repeat([]byte{0xDD}, 20),
}
signed := &ethpb.SignedExecutionPayloadBid{
Message: bid,
@@ -302,17 +280,17 @@ func TestProcessExecutionPayloadBid_PendingPaymentAndCacheBid(t *testing.T) {
state := buildGloasState(t, slot, proposerIdx, builderIdx, balance, randao, latestHash, pubKey)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 500_000,
ExecutionPayment: 1,
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 500_000,
ExecutionPayment: 1,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xEE}, 32),
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
}
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
@@ -363,17 +341,17 @@ func TestProcessExecutionPayloadBid_BuilderNotActive(t *testing.T) {
state = stateIface.(*state_native.BeaconState)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0x03}, 32),
BlockHash: bytes.Repeat([]byte{0x04}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 10,
ExecutionPayment: 0,
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
FeeRecipient: bytes.Repeat([]byte{0x06}, 20),
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0x03}, 32),
BlockHash: bytes.Repeat([]byte{0x04}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 10,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0x05}, 32),
FeeRecipient: bytes.Repeat([]byte{0x06}, 20),
}
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
sig := signBid(t, sk, bid, state.Fork(), genesis)
@@ -416,17 +394,17 @@ func TestProcessExecutionPayloadBid_CannotCoverBid(t *testing.T) {
state = stateIface.(*state_native.BeaconState)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 25,
ExecutionPayment: 0,
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 25,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xEE}, 32),
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
}
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
sig := signBid(t, sk, bid, state.Fork(), genesis)
@@ -458,17 +436,17 @@ func TestProcessExecutionPayloadBid_InvalidSignature(t *testing.T) {
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinDepositAmount+1000, randao, latestHash, pubKey)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 10,
ExecutionPayment: 0,
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 10,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xEE}, 32),
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
}
// Use an invalid signature.
invalidSig := [96]byte{1}
@@ -485,42 +463,6 @@ func TestProcessExecutionPayloadBid_InvalidSignature(t *testing.T) {
require.ErrorContains(t, "bid signature validation failed", err)
}
func TestProcessExecutionPayloadBid_TooManyBlobCommitments(t *testing.T) {
slot := primitives.Slot(9)
proposerIdx := primitives.ValidatorIndex(0)
builderIdx := params.BeaconConfig().BuilderIndexSelfBuild
randao := [32]byte(bytes.Repeat([]byte{0xAA}, 32))
latestHash := [32]byte(bytes.Repeat([]byte{0xBB}, 32))
pubKey := [48]byte{}
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinActivationBalance+1000, randao, latestHash, pubKey)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
PrevRandao: randao[:],
BuilderIndex: builderIdx,
Slot: slot,
BlobKzgCommitments: tooManyBlobCommitmentsForSlot(slot),
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
}
signed := &ethpb.SignedExecutionPayloadBid{
Message: bid,
Signature: common.InfiniteSignature[:],
}
block := stubBlock{
slot: slot,
proposer: proposerIdx,
parentRoot: bytesutil.ToBytes32(bid.ParentBlockRoot),
body: stubBlockBody{signedBid: signed},
v: version.Gloas,
}
err := ProcessExecutionPayloadBid(state, block)
require.ErrorContains(t, "blob KZG commitments over max", err)
}
func TestProcessExecutionPayloadBid_SlotMismatch(t *testing.T) {
slot := primitives.Slot(10)
builderIdx := primitives.BuilderIndex(1)
@@ -536,17 +478,17 @@ func TestProcessExecutionPayloadBid_SlotMismatch(t *testing.T) {
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinDepositAmount+1000, randao, latestHash, pubKey)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xAA}, 32),
BlockHash: bytes.Repeat([]byte{0xBB}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot + 1, // mismatch
Value: 1,
ExecutionPayment: 0,
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
FeeRecipient: bytes.Repeat([]byte{0xDD}, 20),
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xAA}, 32),
BlockHash: bytes.Repeat([]byte{0xBB}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot + 1, // mismatch
Value: 1,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xCC}, 32),
FeeRecipient: bytes.Repeat([]byte{0xDD}, 20),
}
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
sig := signBid(t, sk, bid, state.Fork(), genesis)
@@ -578,17 +520,17 @@ func TestProcessExecutionPayloadBid_ParentHashMismatch(t *testing.T) {
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinDepositAmount+1000, randao, latestHash, pubKey)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: bytes.Repeat([]byte{0x11}, 32), // mismatch
ParentBlockRoot: bytes.Repeat([]byte{0x22}, 32),
BlockHash: bytes.Repeat([]byte{0x33}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 1,
ExecutionPayment: 0,
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
FeeRecipient: bytes.Repeat([]byte{0x55}, 20),
ParentBlockHash: bytes.Repeat([]byte{0x11}, 32), // mismatch
ParentBlockRoot: bytes.Repeat([]byte{0x22}, 32),
BlockHash: bytes.Repeat([]byte{0x33}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 1,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0x44}, 32),
FeeRecipient: bytes.Repeat([]byte{0x55}, 20),
}
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
sig := signBid(t, sk, bid, state.Fork(), genesis)
@@ -621,17 +563,17 @@ func TestProcessExecutionPayloadBid_ParentRootMismatch(t *testing.T) {
parentRoot := bytes.Repeat([]byte{0x22}, 32)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: parentRoot,
BlockHash: bytes.Repeat([]byte{0x33}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 1,
ExecutionPayment: 0,
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
FeeRecipient: bytes.Repeat([]byte{0x55}, 20),
ParentBlockHash: latestHash[:],
ParentBlockRoot: parentRoot,
BlockHash: bytes.Repeat([]byte{0x33}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 1,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0x44}, 32),
FeeRecipient: bytes.Repeat([]byte{0x55}, 20),
}
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
sig := signBid(t, sk, bid, state.Fork(), genesis)
@@ -663,17 +605,17 @@ func TestProcessExecutionPayloadBid_PrevRandaoMismatch(t *testing.T) {
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinDepositAmount+1000, randao, latestHash, pubKey)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0x22}, 32),
BlockHash: bytes.Repeat([]byte{0x33}, 32),
PrevRandao: bytes.Repeat([]byte{0x01}, 32), // mismatch
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 1,
ExecutionPayment: 0,
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
FeeRecipient: bytes.Repeat([]byte{0x55}, 20),
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0x22}, 32),
BlockHash: bytes.Repeat([]byte{0x33}, 32),
PrevRandao: bytes.Repeat([]byte{0x01}, 32), // mismatch
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 1,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0x44}, 32),
FeeRecipient: bytes.Repeat([]byte{0x55}, 20),
}
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
sig := signBid(t, sk, bid, state.Fork(), genesis)

View File

@@ -1,180 +0,0 @@
package gloas
import (
"context"
"fmt"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
func processDepositRequests(ctx context.Context, beaconState state.BeaconState, requests []*enginev1.DepositRequest) error {
if len(requests) == 0 {
return nil
}
for _, receipt := range requests {
if err := processDepositRequest(beaconState, receipt); err != nil {
return errors.Wrap(err, "could not apply deposit request")
}
}
return nil
}
// processDepositRequest processes the specific deposit request
//
// <spec fn="process_deposit_request" fork="gloas" hash="3c6b0310">
// def process_deposit_request(state: BeaconState, deposit_request: DepositRequest) -> None:
// # [New in Gloas:EIP7732]
// builder_pubkeys = [b.pubkey for b in state.builders]
// validator_pubkeys = [v.pubkey for v in state.validators]
//
// # [New in Gloas:EIP7732]
// # Regardless of the withdrawal credentials prefix, if a builder/validator
// # already exists with this pubkey, apply the deposit to their balance
// is_builder = deposit_request.pubkey in builder_pubkeys
// is_validator = deposit_request.pubkey in validator_pubkeys
// is_builder_prefix = is_builder_withdrawal_credential(deposit_request.withdrawal_credentials)
// if is_builder or (is_builder_prefix and not is_validator):
// # Apply builder deposits immediately
// apply_deposit_for_builder(
// state,
// deposit_request.pubkey,
// deposit_request.withdrawal_credentials,
// deposit_request.amount,
// deposit_request.signature,
// state.slot,
// )
// return
//
// # Add validator deposits to the queue
// state.pending_deposits.append(
// PendingDeposit(
// pubkey=deposit_request.pubkey,
// withdrawal_credentials=deposit_request.withdrawal_credentials,
// amount=deposit_request.amount,
// signature=deposit_request.signature,
// slot=state.slot,
// )
// )
// </spec>
func processDepositRequest(beaconState state.BeaconState, request *enginev1.DepositRequest) error {
if request == nil {
return errors.New("nil deposit request")
}
applied, err := applyBuilderDepositRequest(beaconState, request)
if err != nil {
return errors.Wrap(err, "could not apply builder deposit")
}
if applied {
return nil
}
if err := beaconState.AppendPendingDeposit(&ethpb.PendingDeposit{
PublicKey: request.Pubkey,
WithdrawalCredentials: request.WithdrawalCredentials,
Amount: request.Amount,
Signature: request.Signature,
Slot: beaconState.Slot(),
}); err != nil {
return errors.Wrap(err, "could not append deposit request")
}
return nil
}
// <spec fn="apply_deposit_for_builder" fork="gloas" hash="e4bc98c7">
// def apply_deposit_for_builder(
//
// state: BeaconState,
// pubkey: BLSPubkey,
// withdrawal_credentials: Bytes32,
// amount: uint64,
// signature: BLSSignature,
// slot: Slot,
//
// ) -> None:
//
// builder_pubkeys = [b.pubkey for b in state.builders]
// if pubkey not in builder_pubkeys:
// # Verify the deposit signature (proof of possession) which is not checked by the deposit contract
// if is_valid_deposit_signature(pubkey, withdrawal_credentials, amount, signature):
// add_builder_to_registry(state, pubkey, withdrawal_credentials, amount, slot)
// else:
// # Increase balance by deposit amount
// builder_index = builder_pubkeys.index(pubkey)
// state.builders[builder_index].balance += amount
//
// </spec>
func applyBuilderDepositRequest(beaconState state.BeaconState, request *enginev1.DepositRequest) (bool, error) {
if beaconState.Version() < version.Gloas {
return false, nil
}
pubkey := bytesutil.ToBytes48(request.Pubkey)
_, isValidator := beaconState.ValidatorIndexByPubkey(pubkey)
idx, isBuilder := beaconState.BuilderIndexByPubkey(pubkey)
isBuilderPrefix := IsBuilderWithdrawalCredential(request.WithdrawalCredentials)
if !isBuilder && (!isBuilderPrefix || isValidator) {
return false, nil
}
if isBuilder {
if err := beaconState.IncreaseBuilderBalance(idx, request.Amount); err != nil {
return false, err
}
return true, nil
}
if err := applyDepositForNewBuilder(
beaconState,
request.Pubkey,
request.WithdrawalCredentials,
request.Amount,
request.Signature,
); err != nil {
return false, err
}
return true, nil
}
func applyDepositForNewBuilder(
beaconState state.BeaconState,
pubkey []byte,
withdrawalCredentials []byte,
amount uint64,
signature []byte,
) error {
pubkeyBytes := bytesutil.ToBytes48(pubkey)
valid, err := helpers.IsValidDepositSignature(&ethpb.Deposit_Data{
PublicKey: pubkey,
WithdrawalCredentials: withdrawalCredentials,
Amount: amount,
Signature: signature,
})
if err != nil {
return errors.Wrap(err, "could not verify deposit signature")
}
if !valid {
log.WithFields(logrus.Fields{
"pubkey": fmt.Sprintf("%x", pubkey),
}).Warn("ignoring builder deposit: invalid signature")
return nil
}
withdrawalCredBytes := bytesutil.ToBytes32(withdrawalCredentials)
return beaconState.AddBuilderFromDeposit(pubkeyBytes, withdrawalCredBytes, amount)
}
func IsBuilderWithdrawalCredential(withdrawalCredentials []byte) bool {
return len(withdrawalCredentials) == fieldparams.RootLength &&
withdrawalCredentials[0] == params.BeaconConfig().BuilderWithdrawalPrefixByte
}

View File

@@ -1,150 +0,0 @@
package gloas
import (
"bytes"
"testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
stateTesting "github.com/OffchainLabs/prysm/v7/beacon-chain/state/testing"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/crypto/bls"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/require"
)
func TestProcessDepositRequests_EmptyAndNil(t *testing.T) {
st := newGloasState(t, nil, nil)
t.Run("empty requests continues", func(t *testing.T) {
err := processDepositRequests(t.Context(), st, []*enginev1.DepositRequest{})
require.NoError(t, err)
})
t.Run("nil request errors", func(t *testing.T) {
err := processDepositRequests(t.Context(), st, []*enginev1.DepositRequest{nil})
require.ErrorContains(t, "nil deposit request", err)
})
}
func TestProcessDepositRequest_BuilderDepositAddsBuilder(t *testing.T) {
sk, err := bls.RandKey()
require.NoError(t, err)
cred := builderWithdrawalCredentials()
pd := stateTesting.GeneratePendingDeposit(t, sk, 1234, cred, 0)
req := depositRequestFromPending(pd, 1)
st := newGloasState(t, nil, nil)
err = processDepositRequest(st, req)
require.NoError(t, err)
idx, ok := st.BuilderIndexByPubkey(toBytes48(req.Pubkey))
require.Equal(t, true, ok)
builder, err := st.Builder(idx)
require.NoError(t, err)
require.NotNil(t, builder)
require.DeepEqual(t, req.Pubkey, builder.Pubkey)
require.DeepEqual(t, []byte{cred[0]}, builder.Version)
require.DeepEqual(t, cred[12:], builder.ExecutionAddress)
require.Equal(t, uint64(1234), uint64(builder.Balance))
require.Equal(t, params.BeaconConfig().FarFutureEpoch, builder.WithdrawableEpoch)
pending, err := st.PendingDeposits()
require.NoError(t, err)
require.Equal(t, 0, len(pending))
}
func TestProcessDepositRequest_ExistingBuilderIncreasesBalance(t *testing.T) {
sk, err := bls.RandKey()
require.NoError(t, err)
pubkey := sk.PublicKey().Marshal()
builders := []*ethpb.Builder{
{
Pubkey: pubkey,
Version: []byte{0},
ExecutionAddress: bytes.Repeat([]byte{0x11}, 20),
Balance: 5,
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
},
}
st := newGloasState(t, nil, builders)
cred := validatorWithdrawalCredentials()
pd := stateTesting.GeneratePendingDeposit(t, sk, 200, cred, 0)
req := depositRequestFromPending(pd, 9)
err = processDepositRequest(st, req)
require.NoError(t, err)
idx, ok := st.BuilderIndexByPubkey(toBytes48(pubkey))
require.Equal(t, true, ok)
builder, err := st.Builder(idx)
require.NoError(t, err)
require.Equal(t, uint64(205), uint64(builder.Balance))
pending, err := st.PendingDeposits()
require.NoError(t, err)
require.Equal(t, 0, len(pending))
}
func TestApplyDepositForBuilder_InvalidSignatureIgnoresDeposit(t *testing.T) {
sk, err := bls.RandKey()
require.NoError(t, err)
cred := builderWithdrawalCredentials()
st := newGloasState(t, nil, nil)
err = applyDepositForNewBuilder(st, sk.PublicKey().Marshal(), cred[:], 100, make([]byte, 96))
require.NoError(t, err)
_, ok := st.BuilderIndexByPubkey(toBytes48(sk.PublicKey().Marshal()))
require.Equal(t, false, ok)
}
func newGloasState(t *testing.T, validators []*ethpb.Validator, builders []*ethpb.Builder) state.BeaconState {
t.Helper()
st, err := state_native.InitializeFromProtoGloas(&ethpb.BeaconStateGloas{
DepositRequestsStartIndex: params.BeaconConfig().UnsetDepositRequestsStartIndex,
Validators: validators,
Balances: make([]uint64, len(validators)),
PendingDeposits: []*ethpb.PendingDeposit{},
Builders: builders,
})
require.NoError(t, err)
return st
}
func depositRequestFromPending(pd *ethpb.PendingDeposit, index uint64) *enginev1.DepositRequest {
return &enginev1.DepositRequest{
Pubkey: pd.PublicKey,
WithdrawalCredentials: pd.WithdrawalCredentials,
Amount: pd.Amount,
Signature: pd.Signature,
Index: index,
}
}
func builderWithdrawalCredentials() [32]byte {
var cred [32]byte
cred[0] = params.BeaconConfig().BuilderWithdrawalPrefixByte
copy(cred[12:], bytes.Repeat([]byte{0x22}, 20))
return cred
}
func validatorWithdrawalCredentials() [32]byte {
var cred [32]byte
cred[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
copy(cred[12:], bytes.Repeat([]byte{0x33}, 20))
return cred
}
func toBytes48(b []byte) [48]byte {
var out [48]byte
copy(out[:], b)
return out
}

View File

@@ -1,9 +0,0 @@
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
// This file is created and regenerated automatically. Anything added here might get removed.
package gloas
import "github.com/sirupsen/logrus"
// The prefix for logs from this package will be the text after the last slash in the package path.
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
var log = logrus.WithField("package", "beacon-chain/core/gloas")

View File

@@ -1,361 +0,0 @@
package gloas
import (
"bytes"
"context"
"fmt"
requests "github.com/OffchainLabs/prysm/v7/beacon-chain/core/requests"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/crypto/bls"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
"github.com/OffchainLabs/prysm/v7/time/slots"
"github.com/pkg/errors"
)
// ProcessExecutionPayload processes the signed execution payload envelope for the Gloas fork.
//
// <spec fn="process_execution_payload" fork="gloas" hash="36bd3af3">
// def process_execution_payload(
// state: BeaconState,
// # [Modified in Gloas:EIP7732]
// # Removed `body`
// # [New in Gloas:EIP7732]
// signed_envelope: SignedExecutionPayloadEnvelope,
// execution_engine: ExecutionEngine,
// # [New in Gloas:EIP7732]
// verify: bool = True,
// ) -> None:
// envelope = signed_envelope.message
// payload = envelope.payload
//
// # Verify signature
// if verify:
// assert verify_execution_payload_envelope_signature(state, signed_envelope)
//
// # Cache latest block header state root
// previous_state_root = hash_tree_root(state)
// if state.latest_block_header.state_root == Root():
// state.latest_block_header.state_root = previous_state_root
//
// # Verify consistency with the beacon block
// assert envelope.beacon_block_root == hash_tree_root(state.latest_block_header)
// assert envelope.slot == state.slot
//
// # Verify consistency with the committed bid
// committed_bid = state.latest_execution_payload_bid
// assert envelope.builder_index == committed_bid.builder_index
// assert committed_bid.prev_randao == payload.prev_randao
//
// # Verify consistency with expected withdrawals
// assert hash_tree_root(payload.withdrawals) == hash_tree_root(state.payload_expected_withdrawals)
//
// # Verify the gas_limit
// assert committed_bid.gas_limit == payload.gas_limit
// # Verify the block hash
// assert committed_bid.block_hash == payload.block_hash
// # Verify consistency of the parent hash with respect to the previous execution payload
// assert payload.parent_hash == state.latest_block_hash
// # Verify timestamp
// assert payload.timestamp == compute_time_at_slot(state, state.slot)
// # Verify the execution payload is valid
// versioned_hashes = [
// kzg_commitment_to_versioned_hash(commitment)
// # [Modified in Gloas:EIP7732]
// for commitment in committed_bid.blob_kzg_commitments
// ]
// requests = envelope.execution_requests
// assert execution_engine.verify_and_notify_new_payload(
// NewPayloadRequest(
// execution_payload=payload,
// versioned_hashes=versioned_hashes,
// parent_beacon_block_root=state.latest_block_header.parent_root,
// execution_requests=requests,
// )
// )
//
// def for_ops(operations: Sequence[Any], fn: Callable[[BeaconState, Any], None]) -> None:
// for operation in operations:
// fn(state, operation)
//
// for_ops(requests.deposits, process_deposit_request)
// for_ops(requests.withdrawals, process_withdrawal_request)
// for_ops(requests.consolidations, process_consolidation_request)
//
// # Queue the builder payment
// payment = state.builder_pending_payments[SLOTS_PER_EPOCH + state.slot % SLOTS_PER_EPOCH]
// amount = payment.withdrawal.amount
// if amount > 0:
// state.builder_pending_withdrawals.append(payment.withdrawal)
// state.builder_pending_payments[SLOTS_PER_EPOCH + state.slot % SLOTS_PER_EPOCH] = (
// BuilderPendingPayment()
// )
//
// # Cache the execution payload hash
// state.execution_payload_availability[state.slot % SLOTS_PER_HISTORICAL_ROOT] = 0b1
// state.latest_block_hash = payload.block_hash
//
// # Verify the state root
// if verify:
// assert envelope.state_root == hash_tree_root(state)
// </spec>
func ProcessExecutionPayload(
ctx context.Context,
st state.BeaconState,
signedEnvelope interfaces.ROSignedExecutionPayloadEnvelope,
) error {
if err := verifyExecutionPayloadEnvelopeSignature(st, signedEnvelope); err != nil {
return errors.Wrap(err, "signature verification failed")
}
envelope, err := signedEnvelope.Envelope()
if err != nil {
return errors.Wrap(err, "could not get envelope from signed envelope")
}
if err := ApplyExecutionPayload(ctx, st, envelope); err != nil {
return err
}
r, err := st.HashTreeRoot(ctx)
if err != nil {
return errors.Wrap(err, "could not get hash tree root")
}
if r != envelope.StateRoot() {
return fmt.Errorf("state root mismatch: expected %#x, got %#x", envelope.StateRoot(), r)
}
return nil
}
// ApplyExecutionPayload applies the execution payload envelope to the state and performs the same
// consistency checks as the full processing path. This keeps the post-payload state root computation
// on a shared code path, even though some bid/payload checks are not strictly required for the root itself.
func ApplyExecutionPayload(
ctx context.Context,
st state.BeaconState,
envelope interfaces.ROExecutionPayloadEnvelope,
) error {
latestHeader := st.LatestBlockHeader()
if len(latestHeader.StateRoot) == 0 || bytes.Equal(latestHeader.StateRoot, make([]byte, 32)) {
previousStateRoot, err := st.HashTreeRoot(ctx)
if err != nil {
return errors.Wrap(err, "could not compute state root")
}
latestHeader.StateRoot = previousStateRoot[:]
if err := st.SetLatestBlockHeader(latestHeader); err != nil {
return errors.Wrap(err, "could not set latest block header")
}
}
blockHeaderRoot, err := latestHeader.HashTreeRoot()
if err != nil {
return errors.Wrap(err, "could not compute block header root")
}
beaconBlockRoot := envelope.BeaconBlockRoot()
if !bytes.Equal(beaconBlockRoot[:], blockHeaderRoot[:]) {
return errors.Errorf("envelope beacon block root does not match state latest block header root: envelope=%#x, header=%#x", beaconBlockRoot, blockHeaderRoot)
}
if envelope.Slot() != st.Slot() {
return errors.Errorf("envelope slot does not match state slot: envelope=%d, state=%d", envelope.Slot(), st.Slot())
}
latestBid, err := st.LatestExecutionPayloadBid()
if err != nil {
return errors.Wrap(err, "could not get latest execution payload bid")
}
if latestBid == nil {
return errors.New("latest execution payload bid is nil")
}
if envelope.BuilderIndex() != latestBid.BuilderIndex() {
return errors.Errorf("envelope builder index does not match committed bid builder index: envelope=%d, bid=%d", envelope.BuilderIndex(), latestBid.BuilderIndex())
}
payload, err := envelope.Execution()
if err != nil {
return errors.Wrap(err, "could not get execution payload from envelope")
}
latestBidPrevRandao := latestBid.PrevRandao()
if !bytes.Equal(payload.PrevRandao(), latestBidPrevRandao[:]) {
return errors.Errorf("payload prev randao does not match committed bid prev randao: payload=%#x, bid=%#x", payload.PrevRandao(), latestBidPrevRandao)
}
withdrawals, err := payload.Withdrawals()
if err != nil {
return errors.Wrap(err, "could not get withdrawals from payload")
}
ok, err := st.WithdrawalsMatchPayloadExpected(withdrawals)
if err != nil {
return errors.Wrap(err, "could not validate payload withdrawals")
}
if !ok {
return errors.New("payload withdrawals do not match expected withdrawals")
}
if latestBid.GasLimit() != payload.GasLimit() {
return errors.Errorf("committed bid gas limit does not match payload gas limit: bid=%d, payload=%d", latestBid.GasLimit(), payload.GasLimit())
}
bidBlockHash := latestBid.BlockHash()
payloadBlockHash := payload.BlockHash()
if !bytes.Equal(bidBlockHash[:], payloadBlockHash) {
return errors.Errorf("committed bid block hash does not match payload block hash: bid=%#x, payload=%#x", bidBlockHash, payloadBlockHash)
}
latestBlockHash, err := st.LatestBlockHash()
if err != nil {
return errors.Wrap(err, "could not get latest block hash")
}
if !bytes.Equal(payload.ParentHash(), latestBlockHash[:]) {
return errors.Errorf("payload parent hash does not match state latest block hash: payload=%#x, state=%#x", payload.ParentHash(), latestBlockHash)
}
t, err := slots.StartTime(st.GenesisTime(), st.Slot())
if err != nil {
return errors.Wrap(err, "could not compute timestamp")
}
if payload.Timestamp() != uint64(t.Unix()) {
return errors.Errorf("payload timestamp does not match expected timestamp: payload=%d, expected=%d", payload.Timestamp(), uint64(t.Unix()))
}
if err := processExecutionRequests(ctx, st, envelope.ExecutionRequests()); err != nil {
return errors.Wrap(err, "could not process execution requests")
}
if err := st.QueueBuilderPayment(); err != nil {
return errors.Wrap(err, "could not queue builder payment")
}
if err := st.SetExecutionPayloadAvailability(st.Slot(), true); err != nil {
return errors.Wrap(err, "could not set execution payload availability")
}
if err := st.SetLatestBlockHash([32]byte(payload.BlockHash())); err != nil {
return errors.Wrap(err, "could not set latest block hash")
}
return nil
}
func envelopePublicKey(st state.BeaconState, builderIdx primitives.BuilderIndex) (bls.PublicKey, error) {
if builderIdx == params.BeaconConfig().BuilderIndexSelfBuild {
return proposerPublicKey(st)
}
return builderPublicKey(st, builderIdx)
}
func proposerPublicKey(st state.BeaconState) (bls.PublicKey, error) {
header := st.LatestBlockHeader()
if header == nil {
return nil, fmt.Errorf("latest block header is nil")
}
proposerPubkey := st.PubkeyAtIndex(header.ProposerIndex)
publicKey, err := bls.PublicKeyFromBytes(proposerPubkey[:])
if err != nil {
return nil, fmt.Errorf("invalid proposer public key: %w", err)
}
return publicKey, nil
}
func builderPublicKey(st state.BeaconState, builderIdx primitives.BuilderIndex) (bls.PublicKey, error) {
builder, err := st.Builder(builderIdx)
if err != nil {
return nil, fmt.Errorf("failed to get builder: %w", err)
}
if builder == nil {
return nil, fmt.Errorf("builder at index %d not found", builderIdx)
}
publicKey, err := bls.PublicKeyFromBytes(builder.Pubkey)
if err != nil {
return nil, fmt.Errorf("invalid builder public key: %w", err)
}
return publicKey, nil
}
// processExecutionRequests processes deposits, withdrawals, and consolidations from execution requests.
// Spec v1.7.0-alpha.0 (pseudocode):
// for op in requests.deposits: process_deposit_request(state, op)
// for op in requests.withdrawals: process_withdrawal_request(state, op)
// for op in requests.consolidations: process_consolidation_request(state, op)
func processExecutionRequests(ctx context.Context, st state.BeaconState, rqs *enginev1.ExecutionRequests) error {
if err := processDepositRequests(ctx, st, rqs.Deposits); err != nil {
return errors.Wrap(err, "could not process deposit requests")
}
var err error
st, err = requests.ProcessWithdrawalRequests(ctx, st, rqs.Withdrawals)
if err != nil {
return errors.Wrap(err, "could not process withdrawal requests")
}
err = requests.ProcessConsolidationRequests(ctx, st, rqs.Consolidations)
if err != nil {
return errors.Wrap(err, "could not process consolidation requests")
}
return nil
}
// verifyExecutionPayloadEnvelopeSignature verifies the BLS signature on a signed execution payload envelope.
// Spec v1.7.0-alpha.0 (pseudocode):
// builder_index = signed_envelope.message.builder_index
// if builder_index == BUILDER_INDEX_SELF_BUILD:
//
// validator_index = state.latest_block_header.proposer_index
// pubkey = state.validators[validator_index].pubkey
//
// else:
//
// pubkey = state.builders[builder_index].pubkey
//
// signing_root = compute_signing_root(
//
// signed_envelope.message, get_domain(state, DOMAIN_BEACON_BUILDER)
//
// )
// return bls.Verify(pubkey, signing_root, signed_envelope.signature)
func verifyExecutionPayloadEnvelopeSignature(st state.BeaconState, signedEnvelope interfaces.ROSignedExecutionPayloadEnvelope) error {
envelope, err := signedEnvelope.Envelope()
if err != nil {
return fmt.Errorf("failed to get envelope: %w", err)
}
builderIdx := envelope.BuilderIndex()
publicKey, err := envelopePublicKey(st, builderIdx)
if err != nil {
return err
}
signatureBytes := signedEnvelope.Signature()
signature, err := bls.SignatureFromBytes(signatureBytes[:])
if err != nil {
return fmt.Errorf("invalid signature format: %w", err)
}
currentEpoch := slots.ToEpoch(envelope.Slot())
domain, err := signing.Domain(
st.Fork(),
currentEpoch,
params.BeaconConfig().DomainBeaconBuilder,
st.GenesisValidatorsRoot(),
)
if err != nil {
return fmt.Errorf("failed to compute signing domain: %w", err)
}
signingRoot, err := signedEnvelope.SigningRoot(domain)
if err != nil {
return fmt.Errorf("failed to compute signing root: %w", err)
}
if !signature.Verify(publicKey, signingRoot[:]) {
return fmt.Errorf("signature verification failed: %w", signing.ErrSigFailedToVerify)
}
return nil
}

View File

@@ -77,7 +77,7 @@ func ProcessPayloadAttestations(ctx context.Context, st state.BeaconState, body
// indexedPayloadAttestation converts a payload attestation into its indexed form.
func indexedPayloadAttestation(ctx context.Context, st state.ReadOnlyBeaconState, att *eth.PayloadAttestation) (*consensus_types.IndexedPayloadAttestation, error) {
committee, err := PayloadCommittee(ctx, st, att.Data.Slot)
committee, err := payloadCommittee(ctx, st, att.Data.Slot)
if err != nil {
return nil, err
}
@@ -96,7 +96,7 @@ func indexedPayloadAttestation(ctx context.Context, st state.ReadOnlyBeaconState
}, nil
}
// PayloadCommittee returns the payload timeliness committee for a given slot for the state.
// payloadCommittee returns the payload timeliness committee for a given slot for the state.
//
// <spec fn="get_ptc" fork="gloas" hash="ae15f761">
// def get_ptc(state: BeaconState, slot: Slot) -> Vector[ValidatorIndex, PTC_SIZE]:
@@ -115,7 +115,7 @@ func indexedPayloadAttestation(ctx context.Context, st state.ReadOnlyBeaconState
// state, indices, seed, size=PTC_SIZE, shuffle_indices=False
// )
// </spec>
func PayloadCommittee(ctx context.Context, st state.ReadOnlyBeaconState, slot primitives.Slot) ([]primitives.ValidatorIndex, error) {
func payloadCommittee(ctx context.Context, st state.ReadOnlyBeaconState, slot primitives.Slot) ([]primitives.ValidatorIndex, error) {
epoch := slots.ToEpoch(slot)
seed, err := ptcSeed(st, epoch, slot)
if err != nil {
@@ -264,24 +264,24 @@ func acceptByBalance(st state.ReadOnlyBeaconState, idx primitives.ValidatorIndex
// validIndexedPayloadAttestation verifies the signature of an indexed payload attestation.
//
// <spec fn="is_valid_indexed_payload_attestation" fork="gloas" hash="d76e0f89">
// <spec fn="is_valid_indexed_payload_attestation" fork="gloas" hash="cf1e65b5">
// def is_valid_indexed_payload_attestation(
// state: BeaconState, attestation: IndexedPayloadAttestation
// state: BeaconState, indexed_payload_attestation: IndexedPayloadAttestation
// ) -> bool:
// """
// Check if ``attestation`` is non-empty, has sorted indices, and has
// Check if ``indexed_payload_attestation`` is non-empty, has sorted indices, and has
// a valid aggregate signature.
// """
// # Verify indices are non-empty and sorted
// indices = attestation.attesting_indices
// indices = indexed_payload_attestation.attesting_indices
// if len(indices) == 0 or not indices == sorted(indices):
// return False
//
// # Verify aggregate signature
// pubkeys = [state.validators[i].pubkey for i in indices]
// domain = get_domain(state, DOMAIN_PTC_ATTESTER, compute_epoch_at_slot(attestation.data.slot))
// signing_root = compute_signing_root(attestation.data, domain)
// return bls.FastAggregateVerify(pubkeys, signing_root, attestation.signature)
// domain = get_domain(state, DOMAIN_PTC_ATTESTER, None)
// signing_root = compute_signing_root(indexed_payload_attestation.data, domain)
// return bls.FastAggregateVerify(pubkeys, signing_root, indexed_payload_attestation.signature)
// </spec>
func validIndexedPayloadAttestation(st state.ReadOnlyBeaconState, att *consensus_types.IndexedPayloadAttestation) error {
indices := att.AttestingIndices

View File

@@ -1,349 +0,0 @@
package gloas
import (
"bytes"
"context"
"testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/crypto/bls"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/require"
"github.com/OffchainLabs/prysm/v7/time/slots"
"google.golang.org/protobuf/proto"
)
type payloadFixture struct {
state state.BeaconState
signed interfaces.ROSignedExecutionPayloadEnvelope
signedProto *ethpb.SignedExecutionPayloadEnvelope
envelope *ethpb.ExecutionPayloadEnvelope
payload *enginev1.ExecutionPayloadDeneb
slot primitives.Slot
}
func buildPayloadFixture(t *testing.T, mutate func(payload *enginev1.ExecutionPayloadDeneb, bid *ethpb.ExecutionPayloadBid, envelope *ethpb.ExecutionPayloadEnvelope)) payloadFixture {
t.Helper()
cfg := params.BeaconConfig()
slot := primitives.Slot(5)
builderIdx := primitives.BuilderIndex(0)
sk, err := bls.RandKey()
require.NoError(t, err)
pk := sk.PublicKey().Marshal()
randao := bytes.Repeat([]byte{0xAA}, 32)
parentHash := bytes.Repeat([]byte{0xBB}, 32)
blockHash := bytes.Repeat([]byte{0xCC}, 32)
withdrawals := []*enginev1.Withdrawal{
{Index: 0, ValidatorIndex: 1, Address: bytes.Repeat([]byte{0x01}, 20), Amount: 0},
}
payload := &enginev1.ExecutionPayloadDeneb{
ParentHash: parentHash,
FeeRecipient: bytes.Repeat([]byte{0x01}, 20),
StateRoot: bytes.Repeat([]byte{0x02}, 32),
ReceiptsRoot: bytes.Repeat([]byte{0x03}, 32),
LogsBloom: bytes.Repeat([]byte{0x04}, 256),
PrevRandao: randao,
BlockNumber: 1,
GasLimit: 1,
GasUsed: 0,
Timestamp: 100,
ExtraData: []byte{},
BaseFeePerGas: bytes.Repeat([]byte{0x05}, 32),
BlockHash: blockHash,
Transactions: [][]byte{},
Withdrawals: withdrawals,
BlobGasUsed: 0,
ExcessBlobGas: 0,
}
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: parentHash,
ParentBlockRoot: bytes.Repeat([]byte{0xDD}, 32),
BlockHash: blockHash,
PrevRandao: randao,
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 0,
ExecutionPayment: 0,
FeeRecipient: bytes.Repeat([]byte{0xEE}, 20),
}
header := &ethpb.BeaconBlockHeader{
Slot: slot,
ParentRoot: bytes.Repeat([]byte{0x11}, 32),
StateRoot: bytes.Repeat([]byte{0x22}, 32),
BodyRoot: bytes.Repeat([]byte{0x33}, 32),
}
headerRoot, err := header.HashTreeRoot()
require.NoError(t, err)
envelope := &ethpb.ExecutionPayloadEnvelope{
Slot: slot,
BuilderIndex: builderIdx,
BeaconBlockRoot: headerRoot[:],
Payload: payload,
ExecutionRequests: &enginev1.ExecutionRequests{},
}
if mutate != nil {
mutate(payload, bid, envelope)
}
genesisRoot := bytes.Repeat([]byte{0xAB}, 32)
blockRoots := make([][]byte, cfg.SlotsPerHistoricalRoot)
stateRoots := make([][]byte, cfg.SlotsPerHistoricalRoot)
for i := range blockRoots {
blockRoots[i] = bytes.Repeat([]byte{0x44}, 32)
stateRoots[i] = bytes.Repeat([]byte{0x55}, 32)
}
randaoMixes := make([][]byte, cfg.EpochsPerHistoricalVector)
for i := range randaoMixes {
randaoMixes[i] = randao
}
withdrawalCreds := make([]byte, 32)
withdrawalCreds[0] = cfg.ETH1AddressWithdrawalPrefixByte
eth1Data := &ethpb.Eth1Data{
DepositRoot: bytes.Repeat([]byte{0x66}, 32),
DepositCount: 0,
BlockHash: bytes.Repeat([]byte{0x77}, 32),
}
vals := []*ethpb.Validator{
{
PublicKey: pk,
WithdrawalCredentials: withdrawalCreds,
EffectiveBalance: cfg.MinActivationBalance + 1_000,
},
}
balances := []uint64{cfg.MinActivationBalance + 1_000}
payments := make([]*ethpb.BuilderPendingPayment, cfg.SlotsPerEpoch*2)
for i := range payments {
payments[i] = &ethpb.BuilderPendingPayment{
Withdrawal: &ethpb.BuilderPendingWithdrawal{
FeeRecipient: make([]byte, 20),
},
}
}
executionPayloadAvailability := make([]byte, cfg.SlotsPerHistoricalRoot/8)
builders := make([]*ethpb.Builder, builderIdx+1)
builders[builderIdx] = &ethpb.Builder{
Pubkey: pk,
Version: []byte{0},
ExecutionAddress: bytes.Repeat([]byte{0x09}, 20),
Balance: 0,
DepositEpoch: 0,
WithdrawableEpoch: 0,
}
genesisTime := uint64(0)
slotSeconds := cfg.SecondsPerSlot * uint64(slot)
if payload.Timestamp > slotSeconds {
genesisTime = payload.Timestamp - slotSeconds
}
stProto := &ethpb.BeaconStateGloas{
Slot: slot,
GenesisTime: genesisTime,
GenesisValidatorsRoot: genesisRoot,
Fork: &ethpb.Fork{
CurrentVersion: bytes.Repeat([]byte{0x01}, 4),
PreviousVersion: bytes.Repeat([]byte{0x01}, 4),
Epoch: 0,
},
LatestBlockHeader: header,
BlockRoots: blockRoots,
StateRoots: stateRoots,
RandaoMixes: randaoMixes,
Eth1Data: eth1Data,
Validators: vals,
Balances: balances,
LatestBlockHash: payload.ParentHash,
LatestExecutionPayloadBid: bid,
BuilderPendingPayments: payments,
ExecutionPayloadAvailability: executionPayloadAvailability,
BuilderPendingWithdrawals: []*ethpb.BuilderPendingWithdrawal{},
PayloadExpectedWithdrawals: payload.Withdrawals,
Builders: builders,
}
st, err := state_native.InitializeFromProtoGloas(stProto)
require.NoError(t, err)
expected := st.Copy()
ctx := context.Background()
require.NoError(t, processExecutionRequests(ctx, expected, envelope.ExecutionRequests))
require.NoError(t, expected.QueueBuilderPayment())
require.NoError(t, expected.SetExecutionPayloadAvailability(slot, true))
var blockHashArr [32]byte
copy(blockHashArr[:], payload.BlockHash)
require.NoError(t, expected.SetLatestBlockHash(blockHashArr))
expectedRoot, err := expected.HashTreeRoot(ctx)
require.NoError(t, err)
envelope.StateRoot = expectedRoot[:]
epoch := slots.ToEpoch(slot)
domain, err := signing.Domain(st.Fork(), epoch, cfg.DomainBeaconBuilder, st.GenesisValidatorsRoot())
require.NoError(t, err)
signingRoot, err := signing.ComputeSigningRoot(envelope, domain)
require.NoError(t, err)
signature := sk.Sign(signingRoot[:]).Marshal()
signedProto := &ethpb.SignedExecutionPayloadEnvelope{
Message: envelope,
Signature: signature,
}
signed, err := blocks.WrappedROSignedExecutionPayloadEnvelope(signedProto)
require.NoError(t, err)
return payloadFixture{
state: st,
signed: signed,
signedProto: signedProto,
envelope: envelope,
payload: payload,
slot: slot,
}
}
func TestProcessExecutionPayload_Success(t *testing.T) {
fixture := buildPayloadFixture(t, nil)
require.NoError(t, ProcessExecutionPayload(t.Context(), fixture.state, fixture.signed))
latestHash, err := fixture.state.LatestBlockHash()
require.NoError(t, err)
var expectedHash [32]byte
copy(expectedHash[:], fixture.payload.BlockHash)
require.Equal(t, expectedHash, latestHash)
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
paymentIndex := slotsPerEpoch + (fixture.slot % slotsPerEpoch)
payments, err := fixture.state.BuilderPendingPayments()
require.NoError(t, err)
payment := payments[paymentIndex]
require.NotNil(t, payment)
require.Equal(t, primitives.Gwei(0), payment.Withdrawal.Amount)
}
func TestProcessExecutionPayload_PrevRandaoMismatch(t *testing.T) {
fixture := buildPayloadFixture(t, func(_ *enginev1.ExecutionPayloadDeneb, bid *ethpb.ExecutionPayloadBid, _ *ethpb.ExecutionPayloadEnvelope) {
bid.PrevRandao = bytes.Repeat([]byte{0xFF}, 32)
})
err := ProcessExecutionPayload(t.Context(), fixture.state, fixture.signed)
require.ErrorContains(t, "prev randao", err)
}
func TestQueueBuilderPayment_ZeroAmountClearsSlot(t *testing.T) {
fixture := buildPayloadFixture(t, nil)
require.NoError(t, fixture.state.QueueBuilderPayment())
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
paymentIndex := slotsPerEpoch + (fixture.slot % slotsPerEpoch)
payments, err := fixture.state.BuilderPendingPayments()
require.NoError(t, err)
payment := payments[paymentIndex]
require.NotNil(t, payment)
require.Equal(t, primitives.Gwei(0), payment.Withdrawal.Amount)
}
func TestVerifyExecutionPayloadEnvelopeSignature(t *testing.T) {
fixture := buildPayloadFixture(t, nil)
t.Run("self build", func(t *testing.T) {
proposerSk, err := bls.RandKey()
require.NoError(t, err)
proposerPk := proposerSk.PublicKey().Marshal()
stPb, ok := fixture.state.ToProtoUnsafe().(*ethpb.BeaconStateGloas)
require.Equal(t, true, ok)
stPb = proto.Clone(stPb).(*ethpb.BeaconStateGloas)
stPb.Validators[0].PublicKey = proposerPk
st, err := state_native.InitializeFromProtoUnsafeGloas(stPb)
require.NoError(t, err)
msg := proto.Clone(fixture.signedProto.Message).(*ethpb.ExecutionPayloadEnvelope)
msg.BuilderIndex = params.BeaconConfig().BuilderIndexSelfBuild
epoch := slots.ToEpoch(msg.Slot)
domain, err := signing.Domain(st.Fork(), epoch, params.BeaconConfig().DomainBeaconBuilder, st.GenesisValidatorsRoot())
require.NoError(t, err)
signingRoot, err := signing.ComputeSigningRoot(msg, domain)
require.NoError(t, err)
signature := proposerSk.Sign(signingRoot[:]).Marshal()
signedProto := &ethpb.SignedExecutionPayloadEnvelope{
Message: msg,
Signature: signature,
}
signed, err := blocks.WrappedROSignedExecutionPayloadEnvelope(signedProto)
require.NoError(t, err)
require.NoError(t, verifyExecutionPayloadEnvelopeSignature(st, signed))
})
t.Run("builder", func(t *testing.T) {
signed, err := blocks.WrappedROSignedExecutionPayloadEnvelope(fixture.signedProto)
require.NoError(t, err)
require.NoError(t, verifyExecutionPayloadEnvelopeSignature(fixture.state, signed))
})
t.Run("invalid signature", func(t *testing.T) {
t.Run("self build", func(t *testing.T) {
proposerSk, err := bls.RandKey()
require.NoError(t, err)
proposerPk := proposerSk.PublicKey().Marshal()
stPb, ok := fixture.state.ToProtoUnsafe().(*ethpb.BeaconStateGloas)
require.Equal(t, true, ok)
stPb = proto.Clone(stPb).(*ethpb.BeaconStateGloas)
stPb.Validators[0].PublicKey = proposerPk
st, err := state_native.InitializeFromProtoUnsafeGloas(stPb)
require.NoError(t, err)
msg := proto.Clone(fixture.signedProto.Message).(*ethpb.ExecutionPayloadEnvelope)
msg.BuilderIndex = params.BeaconConfig().BuilderIndexSelfBuild
signedProto := &ethpb.SignedExecutionPayloadEnvelope{
Message: msg,
Signature: bytes.Repeat([]byte{0xFF}, 96),
}
badSigned, err := blocks.WrappedROSignedExecutionPayloadEnvelope(signedProto)
require.NoError(t, err)
err = verifyExecutionPayloadEnvelopeSignature(st, badSigned)
require.ErrorContains(t, "invalid signature format", err)
})
t.Run("builder", func(t *testing.T) {
signedProto := &ethpb.SignedExecutionPayloadEnvelope{
Message: fixture.signedProto.Message,
Signature: bytes.Repeat([]byte{0xFF}, 96),
}
badSigned, err := blocks.WrappedROSignedExecutionPayloadEnvelope(signedProto)
require.NoError(t, err)
err = verifyExecutionPayloadEnvelopeSignature(fixture.state, badSigned)
require.ErrorContains(t, "invalid signature format", err)
})
})
}

View File

@@ -78,7 +78,7 @@ func newGloasState(t *testing.T, slot primitives.Slot, availability []byte) stat
BlockHash: make([]byte, 32),
PrevRandao: make([]byte, 32),
FeeRecipient: make([]byte, 20),
BlobKzgCommitments: [][]byte{make([]byte, 48)},
BlobKzgCommitmentsRoot: make([]byte, 32),
},
Eth1Data: &ethpb.Eth1Data{
DepositRoot: make([]byte, 32),

View File

@@ -66,10 +66,6 @@ type ReadOnlyDatabase interface {
OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error)
BackfillStatus(context.Context) (*dbval.BackfillStatus, error)
// Execution payload envelope operations (Gloas+).
ExecutionPayloadEnvelope(ctx context.Context, blockRoot [32]byte) (*ethpb.SignedBlindedExecutionPayloadEnvelope, error)
HasExecutionPayloadEnvelope(ctx context.Context, blockRoot [32]byte) bool
// P2P Metadata operations.
MetadataSeqNum(ctx context.Context) (uint64, error)
}
@@ -119,10 +115,6 @@ type NoHeadAccessDatabase interface {
SaveLightClientUpdate(ctx context.Context, period uint64, update interfaces.LightClientUpdate) error
SaveLightClientBootstrap(ctx context.Context, blockRoot []byte, bootstrap interfaces.LightClientBootstrap) error
// Execution payload envelope operations (Gloas+).
SaveExecutionPayloadEnvelope(ctx context.Context, envelope *ethpb.SignedExecutionPayloadEnvelope) error
DeleteExecutionPayloadEnvelope(ctx context.Context, blockRoot [32]byte) error
CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint primitives.Slot) error
DeleteHistoricalDataBeforeSlot(ctx context.Context, slot primitives.Slot, batchSize int) (int, error)

View File

@@ -13,7 +13,6 @@ go_library(
"encoding.go",
"error.go",
"execution_chain.go",
"execution_payload_envelope.go",
"finalized_block_roots.go",
"genesis.go",
"key.go",
@@ -97,7 +96,6 @@ go_test(
"deposit_contract_test.go",
"encoding_test.go",
"execution_chain_test.go",
"execution_payload_envelope_test.go",
"finalized_block_roots_test.go",
"genesis_test.go",
"init_test.go",

View File

@@ -2,7 +2,6 @@ package kv
import (
"context"
"slices"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
@@ -34,9 +33,6 @@ func (s *Store) LastArchivedRoot(ctx context.Context) [32]byte {
if err := s.db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(stateSlotIndicesBucket)
_, blockRoot = bkt.Cursor().Last()
if len(blockRoot) > 0 {
blockRoot = slices.Clone(blockRoot)
}
return nil
}); err != nil { // This view never returns an error, but we'll handle anyway for sanity.
panic(err) // lint:nopanic -- View never returns an error.
@@ -55,9 +51,6 @@ func (s *Store) ArchivedPointRoot(ctx context.Context, slot primitives.Slot) [32
if err := s.db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket(stateSlotIndicesBucket)
blockRoot = bucket.Get(bytesutil.SlotToBytesBigEndian(slot))
if len(blockRoot) > 0 {
blockRoot = slices.Clone(blockRoot)
}
return nil
}); err != nil { // This view never returns an error, but we'll handle anyway for sanity.
panic(err) // lint:nopanic -- View never returns an error.

View File

@@ -517,10 +517,6 @@ func (s *Store) DeleteHistoricalDataBeforeSlot(ctx context.Context, cutoffSlot p
return errors.Wrap(err, "could not delete validators")
}
// TODO: execution payload envelopes (Gloas+) are keyed by execution payload
// block hash, not beacon block root, so they cannot be pruned in this loop.
// A separate pruning mechanism is needed (e.g. secondary index or cursor scan).
numSlotsDeleted++
}
@@ -816,10 +812,7 @@ func (s *Store) FeeRecipientByValidatorID(ctx context.Context, id primitives.Val
var addr []byte
err := s.db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(feeRecipientBucket)
stored := bkt.Get(bytesutil.Uint64ToBytesBigEndian(uint64(id)))
if len(stored) > 0 {
addr = slices.Clone(stored)
}
addr = bkt.Get(bytesutil.Uint64ToBytesBigEndian(uint64(id)))
// IF the fee recipient is not found in the standard fee recipient bucket, then
// check the registration bucket. The fee recipient may be there.
// This is to resolve imcompatility until we fully migrate to the registration bucket.
@@ -833,7 +826,7 @@ func (s *Store) FeeRecipientByValidatorID(ctx context.Context, id primitives.Val
if err := decode(ctx, enc, reg); err != nil {
return err
}
addr = slices.Clone(reg.FeeRecipient)
addr = reg.FeeRecipient
}
return nil
})
@@ -1254,12 +1247,6 @@ func unmarshalBlock(_ context.Context, enc []byte) (interfaces.ReadOnlySignedBea
if err := rawBlock.UnmarshalSSZ(enc[len(fuluBlindKey):]); err != nil {
return nil, errors.Wrap(err, "could not unmarshal blinded Fulu block")
}
case hasGloasKey(enc):
// post Gloas we save the full beacon block as EIP-7732 separates beacon block and payload
rawBlock = &ethpb.SignedBeaconBlockGloas{}
if err := rawBlock.UnmarshalSSZ(enc[len(gloasKey):]); err != nil {
return nil, errors.Wrap(err, "could not unmarshal Gloas block")
}
default:
// Marshal block bytes to phase 0 beacon block.
rawBlock = &ethpb.SignedBeaconBlock{}
@@ -1290,11 +1277,6 @@ func encodeBlock(blk interfaces.ReadOnlySignedBeaconBlock) ([]byte, error) {
func keyForBlock(blk interfaces.ReadOnlySignedBeaconBlock) ([]byte, error) {
v := blk.Version()
if v >= version.Gloas {
// Gloas blocks are never blinded (no execution payload in block body).
return gloasKey, nil
}
if v >= version.Fulu {
if blk.IsBlinded() {
return fuluBlindKey, nil

View File

@@ -151,17 +151,6 @@ var blockTests = []struct {
}
return blocks.NewSignedBeaconBlock(b)
}},
{
name: "gloas",
newBlock: func(slot primitives.Slot, root []byte) (interfaces.ReadOnlySignedBeaconBlock, error) {
b := util.NewBeaconBlockGloas()
b.Block.Slot = slot
if root != nil {
b.Block.ParentRoot = root
}
return blocks.NewSignedBeaconBlock(b)
},
},
}
func TestStore_SaveBlock_NoDuplicates(t *testing.T) {
@@ -222,7 +211,7 @@ func TestStore_BlocksCRUD(t *testing.T) {
retrievedBlock, err = db.Block(ctx, blockRoot)
require.NoError(t, err)
wanted := retrievedBlock
if retrievedBlock.Version() >= version.Bellatrix && retrievedBlock.Version() < version.Gloas {
if retrievedBlock.Version() >= version.Bellatrix {
wanted, err = retrievedBlock.ToBlinded()
require.NoError(t, err)
}
@@ -654,7 +643,7 @@ func TestStore_BlocksCRUD_NoCache(t *testing.T) {
require.NoError(t, err)
wanted := blk
if blk.Version() >= version.Bellatrix && blk.Version() < version.Gloas {
if blk.Version() >= version.Bellatrix {
wanted, err = blk.ToBlinded()
require.NoError(t, err)
}
@@ -1025,7 +1014,7 @@ func TestStore_SaveBlock_CanGetHighestAt(t *testing.T) {
b, err := db.Block(ctx, root)
require.NoError(t, err)
wanted := block1
if block1.Version() >= version.Bellatrix && block1.Version() < version.Gloas {
if block1.Version() >= version.Bellatrix {
wanted, err = wanted.ToBlinded()
require.NoError(t, err)
}
@@ -1043,7 +1032,7 @@ func TestStore_SaveBlock_CanGetHighestAt(t *testing.T) {
b, err = db.Block(ctx, root)
require.NoError(t, err)
wanted2 := block2
if block2.Version() >= version.Bellatrix && block2.Version() < version.Gloas {
if block2.Version() >= version.Bellatrix {
wanted2, err = block2.ToBlinded()
require.NoError(t, err)
}
@@ -1061,7 +1050,7 @@ func TestStore_SaveBlock_CanGetHighestAt(t *testing.T) {
b, err = db.Block(ctx, root)
require.NoError(t, err)
wanted = block3
if block3.Version() >= version.Bellatrix && block3.Version() < version.Gloas {
if block3.Version() >= version.Bellatrix {
wanted, err = wanted.ToBlinded()
require.NoError(t, err)
}
@@ -1097,7 +1086,7 @@ func TestStore_GenesisBlock_CanGetHighestAt(t *testing.T) {
b, err := db.Block(ctx, root)
require.NoError(t, err)
wanted := block1
if block1.Version() >= version.Bellatrix && block1.Version() < version.Gloas {
if block1.Version() >= version.Bellatrix {
wanted, err = block1.ToBlinded()
require.NoError(t, err)
}
@@ -1114,7 +1103,7 @@ func TestStore_GenesisBlock_CanGetHighestAt(t *testing.T) {
b, err = db.Block(ctx, root)
require.NoError(t, err)
wanted = genesisBlock
if genesisBlock.Version() >= version.Bellatrix && genesisBlock.Version() < version.Gloas {
if genesisBlock.Version() >= version.Bellatrix {
wanted, err = genesisBlock.ToBlinded()
require.NoError(t, err)
}
@@ -1131,7 +1120,7 @@ func TestStore_GenesisBlock_CanGetHighestAt(t *testing.T) {
b, err = db.Block(ctx, root)
require.NoError(t, err)
wanted = genesisBlock
if genesisBlock.Version() >= version.Bellatrix && genesisBlock.Version() < version.Gloas {
if genesisBlock.Version() >= version.Bellatrix {
wanted, err = genesisBlock.ToBlinded()
require.NoError(t, err)
}
@@ -1227,7 +1216,7 @@ func TestStore_BlocksBySlot_BlockRootsBySlot(t *testing.T) {
require.NoError(t, err)
wanted := b1
if b1.Version() >= version.Bellatrix && b1.Version() < version.Gloas {
if b1.Version() >= version.Bellatrix {
wanted, err = b1.ToBlinded()
require.NoError(t, err)
}
@@ -1243,7 +1232,7 @@ func TestStore_BlocksBySlot_BlockRootsBySlot(t *testing.T) {
t.Fatalf("Expected 2 blocks, received %d blocks", len(retrievedBlocks))
}
wanted = b2
if b2.Version() >= version.Bellatrix && b2.Version() < version.Gloas {
if b2.Version() >= version.Bellatrix {
wanted, err = b2.ToBlinded()
require.NoError(t, err)
}
@@ -1253,7 +1242,7 @@ func TestStore_BlocksBySlot_BlockRootsBySlot(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, true, proto.Equal(wantedPb, retrieved0Pb), "Wanted: %v, received: %v", retrievedBlocks[0], wanted)
wanted = b3
if b3.Version() >= version.Bellatrix && b3.Version() < version.Gloas {
if b3.Version() >= version.Bellatrix {
wanted, err = b3.ToBlinded()
require.NoError(t, err)
}

View File

@@ -3,7 +3,6 @@ package kv
import (
"context"
"fmt"
"slices"
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
"github.com/ethereum/go-ethereum/common"
@@ -18,10 +17,7 @@ func (s *Store) DepositContractAddress(ctx context.Context) ([]byte, error) {
var addr []byte
if err := s.db.View(func(tx *bolt.Tx) error {
chainInfo := tx.Bucket(chainMetadataBucket)
stored := chainInfo.Get(depositContractAddressKey)
if len(stored) > 0 {
addr = slices.Clone(stored)
}
addr = chainInfo.Get(depositContractAddressKey)
return nil
}); err != nil { // This view never returns an error, but we'll handle anyway for sanity.
panic(err) // lint:nopanic -- View never returns an error.

View File

@@ -1,123 +0,0 @@
package kv
import (
"context"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/golang/snappy"
"github.com/pkg/errors"
bolt "go.etcd.io/bbolt"
)
// SaveExecutionPayloadEnvelope blinds and saves a signed execution payload envelope keyed by
// beacon block root. The envelope is stored in blinded form: the full execution payload is replaced
// with its block hash. The full payload can later be retrieved from the EL via
// engine_getPayloadBodiesByHash.
func (s *Store) SaveExecutionPayloadEnvelope(ctx context.Context, env *ethpb.SignedExecutionPayloadEnvelope) error {
_, span := trace.StartSpan(ctx, "BeaconDB.SaveExecutionPayloadEnvelope")
defer span.End()
if env == nil || env.Message == nil || env.Message.Payload == nil {
return errors.New("cannot save nil execution payload envelope")
}
blockRoot := bytesutil.ToBytes32(env.Message.BeaconBlockRoot)
blinded := blindEnvelope(env)
enc, err := encodeBlindedEnvelope(blinded)
if err != nil {
return err
}
return s.db.Update(func(tx *bolt.Tx) error {
bkt := tx.Bucket(executionPayloadEnvelopesBucket)
return bkt.Put(blockRoot[:], enc)
})
}
// ExecutionPayloadEnvelope retrieves the blinded signed execution payload envelope by beacon block root.
func (s *Store) ExecutionPayloadEnvelope(ctx context.Context, blockRoot [32]byte) (*ethpb.SignedBlindedExecutionPayloadEnvelope, error) {
_, span := trace.StartSpan(ctx, "BeaconDB.ExecutionPayloadEnvelope")
defer span.End()
var enc []byte
if err := s.db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(executionPayloadEnvelopesBucket)
enc = bkt.Get(blockRoot[:])
return nil
}); err != nil {
return nil, err
}
if enc == nil {
return nil, errors.Wrap(ErrNotFound, "execution payload envelope not found")
}
return decodeBlindedEnvelope(enc)
}
// HasExecutionPayloadEnvelope checks whether an execution payload envelope exists for the given beacon block root.
func (s *Store) HasExecutionPayloadEnvelope(ctx context.Context, blockRoot [32]byte) bool {
_, span := trace.StartSpan(ctx, "BeaconDB.HasExecutionPayloadEnvelope")
defer span.End()
var exists bool
if err := s.db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(executionPayloadEnvelopesBucket)
exists = bkt.Get(blockRoot[:]) != nil
return nil
}); err != nil {
return false
}
return exists
}
// DeleteExecutionPayloadEnvelope removes a signed execution payload envelope by beacon block root.
func (s *Store) DeleteExecutionPayloadEnvelope(ctx context.Context, blockRoot [32]byte) error {
_, span := trace.StartSpan(ctx, "BeaconDB.DeleteExecutionPayloadEnvelope")
defer span.End()
return s.db.Update(func(tx *bolt.Tx) error {
bkt := tx.Bucket(executionPayloadEnvelopesBucket)
return bkt.Delete(blockRoot[:])
})
}
// blindEnvelope converts a full signed envelope to its blinded form by replacing
// the execution payload with its block hash. This avoids computing the expensive
// payload hash tree root on the critical path.
func blindEnvelope(env *ethpb.SignedExecutionPayloadEnvelope) *ethpb.SignedBlindedExecutionPayloadEnvelope {
return &ethpb.SignedBlindedExecutionPayloadEnvelope{
Message: &ethpb.BlindedExecutionPayloadEnvelope{
BlockHash: env.Message.Payload.BlockHash,
ExecutionRequests: env.Message.ExecutionRequests,
BuilderIndex: env.Message.BuilderIndex,
BeaconBlockRoot: env.Message.BeaconBlockRoot,
Slot: env.Message.Slot,
StateRoot: env.Message.StateRoot,
},
Signature: env.Signature,
}
}
// encodeBlindedEnvelope SSZ-encodes and snappy-compresses a blinded envelope for storage.
func encodeBlindedEnvelope(env *ethpb.SignedBlindedExecutionPayloadEnvelope) ([]byte, error) {
sszBytes, err := env.MarshalSSZ()
if err != nil {
return nil, errors.Wrap(err, "could not marshal blinded envelope")
}
return snappy.Encode(nil, sszBytes), nil
}
// decodeBlindedEnvelope snappy-decompresses and SSZ-decodes a blinded envelope from storage.
func decodeBlindedEnvelope(enc []byte) (*ethpb.SignedBlindedExecutionPayloadEnvelope, error) {
dec, err := snappy.Decode(nil, enc)
if err != nil {
return nil, errors.Wrap(err, "could not snappy decode envelope")
}
blinded := &ethpb.SignedBlindedExecutionPayloadEnvelope{}
if err := blinded.UnmarshalSSZ(dec); err != nil {
return nil, errors.Wrap(err, "could not unmarshal blinded envelope")
}
return blinded, nil
}

View File

@@ -1,124 +0,0 @@
package kv
import (
"context"
"testing"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/assert"
"github.com/OffchainLabs/prysm/v7/testing/require"
)
func testEnvelope(t *testing.T) *ethpb.SignedExecutionPayloadEnvelope {
t.Helper()
return &ethpb.SignedExecutionPayloadEnvelope{
Message: &ethpb.ExecutionPayloadEnvelope{
Payload: &enginev1.ExecutionPayloadDeneb{
ParentHash: bytesutil.PadTo([]byte("parent"), 32),
FeeRecipient: bytesutil.PadTo([]byte("fee"), 20),
StateRoot: bytesutil.PadTo([]byte("stateroot"), 32),
ReceiptsRoot: bytesutil.PadTo([]byte("receipts"), 32),
LogsBloom: bytesutil.PadTo([]byte{}, 256),
PrevRandao: bytesutil.PadTo([]byte("randao"), 32),
BlockNumber: 100,
GasLimit: 30000000,
GasUsed: 21000,
Timestamp: 1000,
ExtraData: []byte("extra"),
BaseFeePerGas: bytesutil.PadTo([]byte{1}, 32),
BlockHash: bytesutil.PadTo([]byte("blockhash"), 32),
Transactions: [][]byte{[]byte("tx1"), []byte("tx2")},
Withdrawals: []*enginev1.Withdrawal{{Index: 1, ValidatorIndex: 2, Address: bytesutil.PadTo([]byte("addr"), 20), Amount: 100}},
BlobGasUsed: 131072,
ExcessBlobGas: 0,
},
ExecutionRequests: &enginev1.ExecutionRequests{},
BuilderIndex: primitives.BuilderIndex(42),
BeaconBlockRoot: bytesutil.PadTo([]byte("beaconroot"), 32),
Slot: primitives.Slot(99),
StateRoot: bytesutil.PadTo([]byte("envelopestateroot"), 32),
},
Signature: bytesutil.PadTo([]byte("sig"), 96),
}
}
func TestStore_SaveAndRetrieveExecutionPayloadEnvelope(t *testing.T) {
db := setupDB(t)
ctx := context.Background()
env := testEnvelope(t)
// Keyed by beacon block root.
blockRoot := bytesutil.ToBytes32(env.Message.BeaconBlockRoot)
// Initially should not exist.
assert.Equal(t, false, db.HasExecutionPayloadEnvelope(ctx, blockRoot))
// Save (always blinds internally).
require.NoError(t, db.SaveExecutionPayloadEnvelope(ctx, env))
// Should exist now.
assert.Equal(t, true, db.HasExecutionPayloadEnvelope(ctx, blockRoot))
// Load and verify it's blinded.
loaded, err := db.ExecutionPayloadEnvelope(ctx, blockRoot)
require.NoError(t, err)
// Verify metadata is preserved.
assert.Equal(t, env.Message.Slot, loaded.Message.Slot)
assert.Equal(t, env.Message.BuilderIndex, loaded.Message.BuilderIndex)
assert.DeepEqual(t, env.Message.BeaconBlockRoot, loaded.Message.BeaconBlockRoot)
assert.DeepEqual(t, env.Message.StateRoot, loaded.Message.StateRoot)
assert.DeepEqual(t, env.Signature, loaded.Signature)
// BlockHash should be the payload's block hash (not a hash tree root).
assert.DeepEqual(t, env.Message.Payload.BlockHash, loaded.Message.BlockHash)
}
func TestStore_DeleteExecutionPayloadEnvelope(t *testing.T) {
db := setupDB(t)
ctx := context.Background()
env := testEnvelope(t)
blockRoot := bytesutil.ToBytes32(env.Message.BeaconBlockRoot)
require.NoError(t, db.SaveExecutionPayloadEnvelope(ctx, env))
assert.Equal(t, true, db.HasExecutionPayloadEnvelope(ctx, blockRoot))
require.NoError(t, db.DeleteExecutionPayloadEnvelope(ctx, blockRoot))
assert.Equal(t, false, db.HasExecutionPayloadEnvelope(ctx, blockRoot))
}
func TestStore_ExecutionPayloadEnvelope_NotFound(t *testing.T) {
db := setupDB(t)
ctx := context.Background()
nonExistent := bytesutil.ToBytes32([]byte("nonexistent"))
_, err := db.ExecutionPayloadEnvelope(ctx, nonExistent)
require.ErrorContains(t, "not found", err)
}
func TestStore_SaveExecutionPayloadEnvelope_NilRejected(t *testing.T) {
db := setupDB(t)
ctx := context.Background()
err := db.SaveExecutionPayloadEnvelope(ctx, nil)
require.ErrorContains(t, "nil", err)
}
func TestBlindEnvelope_PreservesBlockHash(t *testing.T) {
env := testEnvelope(t)
blinded := blindEnvelope(env)
// Should contain the block hash from the payload, not a hash tree root.
assert.DeepEqual(t, env.Message.Payload.BlockHash, blinded.Message.BlockHash)
// Metadata should be preserved.
assert.Equal(t, env.Message.BuilderIndex, blinded.Message.BuilderIndex)
assert.Equal(t, env.Message.Slot, blinded.Message.Slot)
assert.DeepEqual(t, env.Message.BeaconBlockRoot, blinded.Message.BeaconBlockRoot)
assert.DeepEqual(t, env.Message.StateRoot, blinded.Message.StateRoot)
assert.DeepEqual(t, env.Signature, blinded.Signature)
}

View File

@@ -87,10 +87,3 @@ func hasFuluBlindKey(enc []byte) bool {
}
return bytes.Equal(enc[:len(fuluBlindKey)], fuluBlindKey)
}
func hasGloasKey(enc []byte) bool {
if len(gloasKey) >= len(enc) {
return false
}
return bytes.Equal(enc[:len(gloasKey)], gloasKey)
}

View File

@@ -126,7 +126,6 @@ var Buckets = [][]byte{
feeRecipientBucket,
registrationBucket,
custodyBucket,
executionPayloadEnvelopesBucket,
}
// KVStoreOption is a functional option that modifies a kv.Store.

View File

@@ -199,7 +199,7 @@ func performValidatorStateMigration(ctx context.Context, bar *progressbar.Progre
func stateBucketKeys(stateBucket *bolt.Bucket) ([][]byte, error) {
var keys [][]byte
if err := stateBucket.ForEach(func(pubKey, v []byte) error {
keys = append(keys, bytes.Clone(pubKey))
keys = append(keys, pubKey)
return nil
}); err != nil {
return nil, err

View File

@@ -7,17 +7,16 @@ package kv
// it easy to scan for keys that have a certain shard number as a prefix and return those
// corresponding attestations.
var (
blocksBucket = []byte("blocks")
stateBucket = []byte("state")
stateSummaryBucket = []byte("state-summary")
chainMetadataBucket = []byte("chain-metadata")
checkpointBucket = []byte("check-point")
powchainBucket = []byte("powchain")
stateValidatorsBucket = []byte("state-validators")
feeRecipientBucket = []byte("fee-recipient")
registrationBucket = []byte("registration")
stateDiffBucket = []byte("state-diff")
executionPayloadEnvelopesBucket = []byte("execution-payload-envelopes")
blocksBucket = []byte("blocks")
stateBucket = []byte("state")
stateSummaryBucket = []byte("state-summary")
chainMetadataBucket = []byte("chain-metadata")
checkpointBucket = []byte("check-point")
powchainBucket = []byte("powchain")
stateValidatorsBucket = []byte("state-validators")
feeRecipientBucket = []byte("fee-recipient")
registrationBucket = []byte("registration")
stateDiffBucket = []byte("state-diff")
// Light Client Updates Bucket
lightClientUpdatesBucket = []byte("light-client-updates")
@@ -61,8 +60,6 @@ var (
electraBlindKey = []byte("blind-electra")
fuluKey = []byte("fulu")
fuluBlindKey = []byte("blind-fulu")
gloasKey = []byte("gloas")
// No gloasBlindKey needed - Gloas blocks are never blinded (no execution payload in block body).
// block root included in the beacon state used by weak subjectivity initial sync
originCheckpointBlockRootKey = []byte("origin-checkpoint-block-root")

View File

@@ -2,7 +2,6 @@ package kv
import (
"context"
"slices"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
@@ -188,23 +187,20 @@ func (s *Store) getDiff(lvl int, slot uint64) (hdiff.HdiffBytes, error) {
return bolt.ErrBucketNotFound
}
buf := append(key, stateSuffix...)
rawStateDiff := bucket.Get(buf)
if len(rawStateDiff) == 0 {
stateDiff = bucket.Get(buf)
if stateDiff == nil {
return errors.New("state diff not found")
}
stateDiff = slices.Clone(rawStateDiff)
buf = append(key, validatorSuffix...)
rawValidatorDiff := bucket.Get(buf)
if len(rawValidatorDiff) == 0 {
validatorDiff = bucket.Get(buf)
if validatorDiff == nil {
return errors.New("validator diff not found")
}
validatorDiff = slices.Clone(rawValidatorDiff)
buf = append(key, balancesSuffix...)
rawBalancesDiff := bucket.Get(buf)
if len(rawBalancesDiff) == 0 {
balancesDiff = bucket.Get(buf)
if balancesDiff == nil {
return errors.New("balances diff not found")
}
balancesDiff = slices.Clone(rawBalancesDiff)
return nil
})
@@ -228,11 +224,10 @@ func (s *Store) getFullSnapshot(slot uint64) (state.BeaconState, error) {
if bucket == nil {
return bolt.ErrBucketNotFound
}
rawEnc := bucket.Get(key)
if rawEnc == nil {
enc = bucket.Get(key)
if enc == nil {
return errors.New("state not found")
}
enc = slices.Clone(rawEnc)
return nil
})

View File

@@ -2,7 +2,6 @@ package kv
import (
"context"
"slices"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
@@ -48,11 +47,7 @@ func (s *Store) StateSummary(ctx context.Context, blockRoot [32]byte) (*ethpb.St
}
var enc []byte
if err := s.db.View(func(tx *bolt.Tx) error {
rawEnc := tx.Bucket(stateSummaryBucket).Get(blockRoot[:])
if len(rawEnc) == 0 {
return nil
}
enc = slices.Clone(rawEnc)
enc = tx.Bucket(stateSummaryBucket).Get(blockRoot[:])
return nil
}); err != nil {
return nil, err

View File

@@ -8,7 +8,6 @@ go_library(
"deposit.go",
"engine_client.go",
"errors.go",
"graffiti_info.go",
"log.go",
"log_processing.go",
"metrics.go",
@@ -90,7 +89,6 @@ go_test(
"engine_client_fuzz_test.go",
"engine_client_test.go",
"execution_chain_test.go",
"graffiti_info_test.go",
"init_test.go",
"log_processing_test.go",
"mock_test.go",

View File

@@ -61,17 +61,7 @@ var (
}
)
// ClientVersionV1 represents the response from engine_getClientVersionV1.
type ClientVersionV1 struct {
Code string `json:"code"`
Name string `json:"name"`
Version string `json:"version"`
Commit string `json:"commit"`
}
const (
// GetClientVersionMethod is the engine_getClientVersionV1 method for JSON-RPC.
GetClientVersionMethod = "engine_getClientVersionV1"
// NewPayloadMethod v1 request string for JSON-RPC.
NewPayloadMethod = "engine_newPayloadV1"
// NewPayloadMethodV2 v2 request string for JSON-RPC.
@@ -360,24 +350,6 @@ func (s *Service) ExchangeCapabilities(ctx context.Context) ([]string, error) {
return elSupportedEndpointsSlice, nil
}
// GetClientVersion calls engine_getClientVersionV1 to retrieve EL client information.
func (s *Service) GetClientVersion(ctx context.Context) ([]ClientVersionV1, error) {
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.GetClientVersion")
defer span.End()
// Per spec, we send our own client info as the parameter
clVersion := ClientVersionV1{
Code: CLCode,
Name: Name,
Version: version.SemanticVersion(),
Commit: version.GetCommitPrefix(),
}
var result []ClientVersionV1
err := s.rpcClient.CallContext(ctx, &result, GetClientVersionMethod, clVersion)
return result, handleRPCError(err)
}
// GetTerminalBlockHash returns the valid terminal block hash based on total difficulty.
//
// Spec code:

View File

@@ -1,134 +0,0 @@
package execution
import (
"strings"
"sync"
"github.com/OffchainLabs/prysm/v7/runtime/version"
)
const (
// CLCode is the two-letter client code for Prysm.
CLCode = "PR"
Name = "Prysm"
)
// GraffitiInfo holds version information for generating block graffiti.
// It is thread-safe and can be updated by the execution service and read by the validator server.
type GraffitiInfo struct {
mu sync.RWMutex
elCode string // From engine_getClientVersionV1
elCommit string // From engine_getClientVersionV1
logOnce sync.Once
}
// NewGraffitiInfo creates a new GraffitiInfo.
func NewGraffitiInfo() *GraffitiInfo {
return &GraffitiInfo{}
}
// UpdateFromEngine updates the EL client information.
func (g *GraffitiInfo) UpdateFromEngine(code, commit string) {
g.mu.Lock()
defer g.mu.Unlock()
g.elCode = code
g.elCommit = strings.TrimPrefix(commit, "0x")
}
// GenerateGraffiti generates graffiti using the flexible standard
// with the provided user graffiti from the validator client request.
// It places user graffiti first, then appends as much client info as space allows.
//
// A space separator is added between user graffiti and client info when it
// fits without reducing the client version tier.
//
// Available Space | Format
// ≥13 bytes | user + space + EL(2)+commit(4)+CL(2)+commit(4) e.g. "Sushi GEabcdPRe4f6"
// 12 bytes | user + EL(2)+commit(4)+CL(2)+commit(4) e.g. "12345678901234567890GEabcdPRe4f6"
// 9-11 bytes | user + space + EL(2)+commit(2)+CL(2)+commit(2) e.g. "12345678901234567890123 GEabPRe4"
// 8 bytes | user + EL(2)+commit(2)+CL(2)+commit(2) e.g. "123456789012345678901234GEabPRe4"
// 5-7 bytes | user + space + EL(2)+CL(2) e.g. "123456789012345678901234567 GEPR"
// 4 bytes | user + EL(2)+CL(2) e.g. "1234567890123456789012345678GEPR"
// 3 bytes | user + space + code(2) e.g. "12345678901234567890123456789 GE"
// 2 bytes | user + code(2) e.g. "123456789012345678901234567890GE"
// <2 bytes | user only e.g. "1234567890123456789012345678901x"
func (g *GraffitiInfo) GenerateGraffiti(userGraffiti []byte) [32]byte {
g.mu.RLock()
defer g.mu.RUnlock()
var result [32]byte
userStr := string(userGraffiti)
// Trim trailing null bytes
for len(userStr) > 0 && userStr[len(userStr)-1] == 0 {
userStr = userStr[:len(userStr)-1]
}
available := 32 - len(userStr)
clCommit := version.GetCommitPrefix()
clCommit4 := truncateCommit(clCommit, 4)
clCommit2 := truncateCommit(clCommit, 2)
// If no EL info, clear EL commits but still include CL info
var elCommit4, elCommit2 string
if g.elCode != "" {
elCommit4 = truncateCommit(g.elCommit, 4)
elCommit2 = truncateCommit(g.elCommit, 2)
}
// Add a space separator between user graffiti and client info,
// but only if it won't reduce the space available for client version info.
space := func(minForTier int) string {
if len(userStr) > 0 && available >= minForTier+1 {
return " "
}
return ""
}
var graffiti string
switch {
case available >= 12:
// Full: user+EL(2)+commit(4)+CL(2)+commit(4)
graffiti = userStr + space(12) + g.elCode + elCommit4 + CLCode + clCommit4
case available >= 8:
// Reduced commits: user+EL(2)+commit(2)+CL(2)+commit(2)
graffiti = userStr + space(8) + g.elCode + elCommit2 + CLCode + clCommit2
case available >= 4:
// Codes only: user+EL(2)+CL(2)
graffiti = userStr + space(4) + g.elCode + CLCode
case available >= 2:
// Single code: user+code(2)
if g.elCode != "" {
graffiti = userStr + space(2) + g.elCode
} else {
graffiti = userStr + space(2) + CLCode
}
default:
// User graffiti only
graffiti = userStr
}
g.logOnce.Do(func() {
logGraffitiInfo(graffiti, available)
})
copy(result[:], graffiti)
return result
}
// logGraffitiInfo logs the graffiti that will be used.
func logGraffitiInfo(graffiti string, available int) {
if available >= 2 {
log.WithField("graffiti", graffiti).Info("Graffiti includes client version info appended after user graffiti")
return
}
log.WithField("graffiti", graffiti).Info("Prysm adds consensus and execution debugging information to the end of the graffiti field when possible. To prevent deletion of debugging info, please consider using a shorter graffiti")
}
// truncateCommit returns the first n characters of the commit string.
func truncateCommit(commit string, n int) string {
if len(commit) <= n {
return commit
}
return commit[:n]
}

View File

@@ -1,250 +0,0 @@
package execution
import (
"testing"
"github.com/OffchainLabs/prysm/v7/testing/require"
)
func TestGraffitiInfo_GenerateGraffiti(t *testing.T) {
tests := []struct {
name string
elCode string
elCommit string
userGraffiti []byte
wantPrefix string // user graffiti appears first
wantSuffix string // client version info appended after
}{
// No EL info cases (CL info "PR" + commit still included when space allows)
{
name: "No EL - empty user graffiti",
elCode: "",
elCommit: "",
userGraffiti: []byte{},
wantPrefix: "PR", // Only CL code + commit (no user graffiti to prefix)
},
{
name: "No EL - short user graffiti",
elCode: "",
elCommit: "",
userGraffiti: []byte("my validator"),
wantPrefix: "my validator",
wantSuffix: " PR", // space + CL code appended
},
{
name: "No EL - 28 char user graffiti (4 bytes available)",
elCode: "",
elCommit: "",
userGraffiti: []byte("1234567890123456789012345678"), // 28 chars, 4 bytes available = codes only
wantPrefix: "1234567890123456789012345678",
wantSuffix: "PR", // CL code (no EL, so just PR)
},
{
name: "No EL - 30 char user graffiti (2 bytes available)",
elCode: "",
elCommit: "",
userGraffiti: []byte("123456789012345678901234567890"), // 30 chars, 2 bytes available = fits PR
wantPrefix: "123456789012345678901234567890",
wantSuffix: "PR",
},
{
name: "No EL - 31 char user graffiti (1 byte available)",
elCode: "",
elCommit: "",
userGraffiti: []byte("1234567890123456789012345678901"), // 31 chars, 1 byte available = not enough for code
wantPrefix: "1234567890123456789012345678901", // User only
},
{
name: "No EL - 32 char user graffiti (0 bytes available)",
elCode: "",
elCommit: "",
userGraffiti: []byte("12345678901234567890123456789012"),
wantPrefix: "12345678901234567890123456789012", // User only
},
// With EL info - flexible standard format cases
{
name: "With EL - full format (empty user graffiti)",
elCode: "GE",
elCommit: "abcd1234",
userGraffiti: []byte{},
wantPrefix: "GEabcdPR", // No user graffiti, starts with client info
},
{
name: "With EL - full format (short user graffiti)",
elCode: "GE",
elCommit: "abcd1234",
userGraffiti: []byte("Bob"),
wantPrefix: "Bob",
wantSuffix: " GEabcdPR", // space + EL(2)+commit(4)+CL(2)+commit(4)
},
{
name: "With EL - full format (20 char user, 12 bytes available) - no space, would reduce tier",
elCode: "GE",
elCommit: "abcd1234",
userGraffiti: []byte("12345678901234567890"), // 20 chars, leaves exactly 12 bytes = full format, no room for space
wantPrefix: "12345678901234567890",
wantSuffix: "GEabcdPR",
},
{
name: "With EL - full format (19 char user, 13 bytes available) - space fits",
elCode: "GE",
elCommit: "abcd1234",
userGraffiti: []byte("1234567890123456789"), // 19 chars, leaves 13 bytes = full format + space
wantPrefix: "1234567890123456789",
wantSuffix: " GEabcdPR",
},
{
name: "With EL - reduced commits (24 char user, 8 bytes available) - no space, would reduce tier",
elCode: "GE",
elCommit: "abcd1234",
userGraffiti: []byte("123456789012345678901234"), // 24 chars, leaves exactly 8 bytes = reduced format, no room for space
wantPrefix: "123456789012345678901234",
wantSuffix: "GEabPR",
},
{
name: "With EL - reduced commits (23 char user, 9 bytes available) - space fits",
elCode: "GE",
elCommit: "abcd1234",
userGraffiti: []byte("12345678901234567890123"), // 23 chars, leaves 9 bytes = reduced format + space
wantPrefix: "12345678901234567890123",
wantSuffix: " GEabPR",
},
{
name: "With EL - codes only (28 char user, 4 bytes available) - no space, would reduce tier",
elCode: "GE",
elCommit: "abcd1234",
userGraffiti: []byte("1234567890123456789012345678"), // 28 chars, leaves exactly 4 bytes = codes only, no room for space
wantPrefix: "1234567890123456789012345678",
wantSuffix: "GEPR",
},
{
name: "With EL - codes only (27 char user, 5 bytes available) - space fits",
elCode: "GE",
elCommit: "abcd1234",
userGraffiti: []byte("123456789012345678901234567"), // 27 chars, leaves 5 bytes = codes only + space
wantPrefix: "123456789012345678901234567",
wantSuffix: " GEPR",
},
{
name: "With EL - EL code only (30 char user, 2 bytes available) - no space, would reduce tier",
elCode: "GE",
elCommit: "abcd1234",
userGraffiti: []byte("123456789012345678901234567890"), // 30 chars, leaves exactly 2 bytes = EL code only, no room for space
wantPrefix: "123456789012345678901234567890",
wantSuffix: "GE",
},
{
name: "With EL - EL code only (29 char user, 3 bytes available) - space fits",
elCode: "GE",
elCommit: "abcd1234",
userGraffiti: []byte("12345678901234567890123456789"), // 29 chars, leaves 3 bytes = EL code + space
wantPrefix: "12345678901234567890123456789",
wantSuffix: " GE",
},
{
name: "With EL - user only (31 char user, 1 byte available)",
elCode: "GE",
elCommit: "abcd1234",
userGraffiti: []byte("1234567890123456789012345678901"), // 31 chars, leaves 1 byte = not enough for code
wantPrefix: "1234567890123456789012345678901", // User only
},
{
name: "With EL - user only (32 char user, 0 bytes available)",
elCode: "GE",
elCommit: "abcd1234",
userGraffiti: []byte("12345678901234567890123456789012"),
wantPrefix: "12345678901234567890123456789012",
},
// Null byte handling
{
name: "Null bytes - input with trailing nulls",
elCode: "GE",
elCommit: "abcd1234",
userGraffiti: append([]byte("test"), 0, 0, 0),
wantPrefix: "test",
wantSuffix: " GEabcdPR",
},
// 0x prefix handling - some ELs return 0x-prefixed commits
{
name: "0x prefix - stripped from EL commit",
elCode: "GE",
elCommit: "0xabcd1234",
userGraffiti: []byte{},
wantPrefix: "GEabcdPR",
},
{
name: "No 0x prefix - commit used as-is",
elCode: "NM",
elCommit: "abcd1234",
userGraffiti: []byte{},
wantPrefix: "NMabcdPR",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewGraffitiInfo()
if tt.elCode != "" {
g.UpdateFromEngine(tt.elCode, tt.elCommit)
}
result := g.GenerateGraffiti(tt.userGraffiti)
resultStr := string(result[:])
trimmed := trimNullBytes(resultStr)
// Check prefix (user graffiti comes first)
require.Equal(t, true, len(trimmed) >= len(tt.wantPrefix), "Result too short for prefix check")
require.Equal(t, tt.wantPrefix, trimmed[:len(tt.wantPrefix)], "Prefix mismatch")
// Check suffix if specified (client version info appended)
if tt.wantSuffix != "" {
require.Equal(t, true, len(trimmed) >= len(tt.wantSuffix), "Result too short for suffix check")
// The suffix should appear somewhere after the prefix
afterPrefix := trimmed[len(tt.wantPrefix):]
require.Equal(t, true, len(afterPrefix) >= len(tt.wantSuffix), "Not enough room for suffix after prefix")
require.Equal(t, tt.wantSuffix, afterPrefix[:len(tt.wantSuffix)], "Suffix mismatch")
}
})
}
}
func TestGraffitiInfo_UpdateFromEngine(t *testing.T) {
g := NewGraffitiInfo()
// Initially no EL info - should still have CL info (PR + commit)
result := g.GenerateGraffiti([]byte{})
resultStr := trimNullBytes(string(result[:]))
require.Equal(t, "PR", resultStr[:2], "Expected CL info before update")
// Update with EL info
g.UpdateFromEngine("GE", "1234abcd")
result = g.GenerateGraffiti([]byte{})
resultStr = trimNullBytes(string(result[:]))
require.Equal(t, "GE1234PR", resultStr[:8], "Expected EL+CL info after update")
}
func TestTruncateCommit(t *testing.T) {
tests := []struct {
commit string
n int
want string
}{
{"abcd1234", 4, "abcd"},
{"ab", 4, "ab"},
{"", 4, ""},
{"abcdef", 2, "ab"},
}
for _, tt := range tests {
got := truncateCommit(tt.commit, tt.n)
require.Equal(t, tt.want, got)
}
}
func trimNullBytes(s string) string {
for len(s) > 0 && s[len(s)-1] == 0 {
s = s[:len(s)-1]
}
return s
}

View File

@@ -124,11 +124,3 @@ func WithVerifierWaiter(v *verification.InitializerWaiter) Option {
return nil
}
}
// WithGraffitiInfo sets the GraffitiInfo for client version tracking.
func WithGraffitiInfo(g *GraffitiInfo) Option {
return func(s *Service) error {
s.graffitiInfo = g
return nil
}
}

View File

@@ -162,7 +162,6 @@ type Service struct {
verifierWaiter *verification.InitializerWaiter
blobVerifier verification.NewBlobVerifier
capabilityCache *capabilityCache
graffitiInfo *GraffitiInfo
}
// NewService sets up a new instance with an ethclient when given a web3 endpoint as a string in the config.
@@ -319,28 +318,6 @@ func (s *Service) updateConnectedETH1(state bool) {
s.updateBeaconNodeStats()
}
// GraffitiInfo returns the GraffitiInfo struct for graffiti generation.
func (s *Service) GraffitiInfo() *GraffitiInfo {
return s.graffitiInfo
}
// updateGraffitiInfo fetches EL client version and updates the graffiti info.
func (s *Service) updateGraffitiInfo() {
if s.graffitiInfo == nil {
return
}
ctx, cancel := context.WithTimeout(s.ctx, time.Second)
defer cancel()
versions, err := s.GetClientVersion(ctx)
if err != nil {
log.WithError(err).Debug("Could not get execution client version for graffiti")
return
}
if len(versions) >= 1 {
s.graffitiInfo.UpdateFromEngine(versions[0].Code, versions[0].Commit)
}
}
// refers to the latest eth1 block which follows the condition: eth1_timestamp +
// SECONDS_PER_ETH1_BLOCK * ETH1_FOLLOW_DISTANCE <= current_unix_time
func (s *Service) followedBlockHeight(ctx context.Context) (uint64, error) {
@@ -621,12 +598,6 @@ func (s *Service) run(done <-chan struct{}) {
chainstartTicker := time.NewTicker(logPeriod)
defer chainstartTicker.Stop()
// Update graffiti info 4 times per epoch (~96 seconds with 12s slots and 32 slots/epoch)
graffitiTicker := time.NewTicker(96 * time.Second)
defer graffitiTicker.Stop()
// Initial update
s.updateGraffitiInfo()
for {
select {
case <-done:
@@ -651,8 +622,6 @@ func (s *Service) run(done <-chan struct{}) {
continue
}
s.logTillChainStart(context.Background())
case <-graffitiTicker.C:
s.updateGraffitiInfo()
}
}
}

View File

@@ -6,7 +6,7 @@ go_library(
"doc.go",
"errors.go",
"forkchoice.go",
"gloas.go",
"last_root.go",
"log.go",
"metrics.go",
"node.go",
@@ -33,7 +33,6 @@ go_library(
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/forkchoice:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//monitoring/tracing/trace:go_default_library",
@@ -52,6 +51,7 @@ go_test(
srcs = [
"ffg_update_test.go",
"forkchoice_test.go",
"last_root_test.go",
"no_vote_test.go",
"node_test.go",
"on_tick_test.go",

View File

@@ -31,8 +31,8 @@ func New() *ForkChoice {
prevJustifiedCheckpoint: &forkchoicetypes.Checkpoint{},
finalizedCheckpoint: &forkchoicetypes.Checkpoint{},
proposerBoostRoot: [32]byte{},
emptyNodeByRoot: make(map[[fieldparams.RootLength]byte]*PayloadNode),
fullNodeByRoot: make(map[[fieldparams.RootLength]byte]*PayloadNode),
nodeByRoot: make(map[[fieldparams.RootLength]byte]*Node),
nodeByPayload: make(map[[fieldparams.RootLength]byte]*Node),
slashedIndices: make(map[primitives.ValidatorIndex]bool),
receivedBlocksLastEpoch: [fieldparams.SlotsPerEpoch]primitives.Slot{},
}
@@ -44,7 +44,7 @@ func New() *ForkChoice {
// NodeCount returns the current number of nodes in the Store.
func (f *ForkChoice) NodeCount() int {
return len(f.store.emptyNodeByRoot)
return len(f.store.nodeByRoot)
}
// Head returns the head root from fork choice store.
@@ -65,14 +65,14 @@ func (f *ForkChoice) Head(
return [32]byte{}, errors.Wrap(err, "could not apply proposer boost score")
}
if err := f.store.applyWeightChangesConsensusNode(ctx, f.store.treeRootNode); err != nil {
if err := f.store.treeRootNode.applyWeightChanges(ctx); err != nil {
return [32]byte{}, errors.Wrap(err, "could not apply weight changes")
}
jc := f.JustifiedCheckpoint()
fc := f.FinalizedCheckpoint()
currentEpoch := slots.EpochsSinceGenesis(f.store.genesisTime)
if err := f.store.updateBestDescendantConsensusNode(ctx, f.store.treeRootNode, jc.Epoch, fc.Epoch, currentEpoch); err != nil {
if err := f.store.treeRootNode.updateBestDescendant(ctx, jc.Epoch, fc.Epoch, currentEpoch); err != nil {
return [32]byte{}, errors.Wrap(err, "could not update best descendant")
}
return f.store.head(ctx)
@@ -119,14 +119,14 @@ func (f *ForkChoice) InsertNode(ctx context.Context, state state.BeaconState, ro
return errInvalidNilCheckpoint
}
finalizedEpoch := fc.Epoch
pn, err := f.store.insert(ctx, roblock, justifiedEpoch, finalizedEpoch)
node, err := f.store.insert(ctx, roblock, justifiedEpoch, finalizedEpoch)
if err != nil {
return err
}
jc, fc = f.store.pullTips(state, pn.node, jc, fc)
jc, fc = f.store.pullTips(state, node, jc, fc)
if err := f.updateCheckpoints(ctx, jc, fc); err != nil {
_, remErr := f.store.removeNode(ctx, pn)
_, remErr := f.store.removeNode(ctx, node)
if remErr != nil {
log.WithError(remErr).Error("Could not remove node")
}
@@ -149,63 +149,49 @@ func (f *ForkChoice) updateCheckpoints(ctx context.Context, jc, fc *ethpb.Checkp
if fc.Epoch <= f.store.finalizedCheckpoint.Epoch {
return nil
}
f.store.finalizedCheckpoint = &forkchoicetypes.Checkpoint{
Epoch: fc.Epoch,
Root: bytesutil.ToBytes32(fc.Root),
}
f.store.finalizedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: fc.Epoch,
Root: bytesutil.ToBytes32(fc.Root)}
return f.store.prune(ctx)
}
// HasNode returns true if the node exists in fork choice store,
// false else wise.
func (f *ForkChoice) HasNode(root [32]byte) bool {
_, ok := f.store.emptyNodeByRoot[root]
_, ok := f.store.nodeByRoot[root]
return ok
}
// IsCanonical returns true if the given root is part of the canonical chain.
func (f *ForkChoice) IsCanonical(root [32]byte) bool {
// It is fine to pick empty node here since we only check if the beacon block is canonical.
pn, ok := f.store.emptyNodeByRoot[root]
if !ok || pn == nil {
node, ok := f.store.nodeByRoot[root]
if !ok || node == nil {
return false
}
if pn.node.bestDescendant == nil {
// The node doesn't have any children
if node.bestDescendant == nil {
if f.store.headNode.bestDescendant == nil {
// headNode is itself head.
return pn.node == f.store.headNode
return node == f.store.headNode
}
// headNode is not actualized and there are some descendants
return pn.node == f.store.headNode.bestDescendant
return node == f.store.headNode.bestDescendant
}
// The node has children
if f.store.headNode.bestDescendant == nil {
return pn.node.bestDescendant == f.store.headNode
return node.bestDescendant == f.store.headNode
}
return pn.node.bestDescendant == f.store.headNode.bestDescendant
return node.bestDescendant == f.store.headNode.bestDescendant
}
// IsOptimistic returns true if the given root has been optimistically synced.
// TODO: Gloas, the current implementation uses the result of the full block for
// the given root. In gloas this would be incorrect and we should specify the
// payload content, thus we should expose a full/empty version of this call.
func (f *ForkChoice) IsOptimistic(root [32]byte) (bool, error) {
if f.store.allTipsAreInvalid {
return true, nil
}
en, ok := f.store.emptyNodeByRoot[root]
if !ok || en == nil {
node, ok := f.store.nodeByRoot[root]
if !ok || node == nil {
return true, ErrNilNode
}
fn := f.store.fullNodeByRoot[root]
if fn != nil {
return fn.optimistic, nil
}
return en.optimistic, nil
return node.optimistic, nil
}
// AncestorRoot returns the ancestor root of input block root at a given slot.
@@ -213,21 +199,17 @@ func (f *ForkChoice) AncestorRoot(ctx context.Context, root [32]byte, slot primi
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.AncestorRoot")
defer span.End()
pn, ok := f.store.emptyNodeByRoot[root]
if !ok || pn == nil {
node, ok := f.store.nodeByRoot[root]
if !ok || node == nil {
return [32]byte{}, errors.Wrap(ErrNilNode, "could not determine ancestor root")
}
n := pn.node
for n.slot > slot {
n := node
for n != nil && n.slot > slot {
if ctx.Err() != nil {
return [32]byte{}, ctx.Err()
}
if n.parent == nil {
n = nil
break
}
n = n.parent.node
n = n.parent
}
if n == nil {
@@ -240,11 +222,10 @@ func (f *ForkChoice) AncestorRoot(ctx context.Context, root [32]byte, slot primi
// IsViableForCheckpoint returns whether the root passed is a checkpoint root for any
// known chain in forkchoice.
func (f *ForkChoice) IsViableForCheckpoint(cp *forkchoicetypes.Checkpoint) (bool, error) {
pn, ok := f.store.emptyNodeByRoot[cp.Root]
if !ok || pn == nil {
node, ok := f.store.nodeByRoot[cp.Root]
if !ok || node == nil {
return false, nil
}
node := pn.node
epochStart, err := slots.EpochStart(cp.Epoch)
if err != nil {
return false, err
@@ -253,13 +234,10 @@ func (f *ForkChoice) IsViableForCheckpoint(cp *forkchoicetypes.Checkpoint) (bool
return false, nil
}
// If it's the start of the epoch, it is a checkpoint
if node.slot == epochStart {
if len(node.children) == 0 {
return true, nil
}
// If there are no descendants of this beacon block, it is is viable as a checkpoint
children := f.store.allConsensusChildren(node)
if len(children) == 0 {
if node.slot == epochStart {
return true, nil
}
if !features.Get().IgnoreUnviableAttestations {
@@ -269,8 +247,7 @@ func (f *ForkChoice) IsViableForCheckpoint(cp *forkchoicetypes.Checkpoint) (bool
return true, nil
}
}
// If some child is after the start of the epoch, the checkpoint is viable.
for _, child := range children {
for _, child := range node.children {
if child.slot > epochStart {
return true, nil
}
@@ -311,7 +288,7 @@ func (f *ForkChoice) updateBalances() error {
if vote.currentRoot != vote.nextRoot || oldBalance != newBalance {
// Ignore the vote if the root is not in fork choice
// store, that means we have not seen the block before.
nextNode, ok := f.store.emptyNodeByRoot[vote.nextRoot]
nextNode, ok := f.store.nodeByRoot[vote.nextRoot]
if ok && vote.nextRoot != zHash {
// Protection against nil node
if nextNode == nil {
@@ -320,7 +297,7 @@ func (f *ForkChoice) updateBalances() error {
nextNode.balance += newBalance
}
currentNode, ok := f.store.emptyNodeByRoot[vote.currentRoot]
currentNode, ok := f.store.nodeByRoot[vote.currentRoot]
if ok && vote.currentRoot != zHash {
// Protection against nil node
if currentNode == nil {
@@ -361,13 +338,13 @@ func (f *ForkChoice) ProposerBoost() [fieldparams.RootLength]byte {
return f.store.proposerBoost()
}
// SetOptimisticToValid sets the node with the given root as a fully validated node. The payload for this root MUST have been processed.
// SetOptimisticToValid sets the node with the given root as a fully validated node
func (f *ForkChoice) SetOptimisticToValid(ctx context.Context, root [fieldparams.RootLength]byte) error {
fn, ok := f.store.fullNodeByRoot[root]
if !ok || fn == nil {
node, ok := f.store.nodeByRoot[root]
if !ok || node == nil {
return errors.Wrap(ErrNilNode, "could not set node to valid")
}
return f.store.setNodeAndParentValidated(ctx, fn)
return node.setNodeAndParentValidated(ctx)
}
// PreviousJustifiedCheckpoint of fork choice store.
@@ -386,8 +363,8 @@ func (f *ForkChoice) FinalizedCheckpoint() *forkchoicetypes.Checkpoint {
}
// SetOptimisticToInvalid removes a block with an invalid execution payload from fork choice store
func (f *ForkChoice) SetOptimisticToInvalid(ctx context.Context, root, parentRoot, parentHash, payloadHash [fieldparams.RootLength]byte) ([][32]byte, error) {
return f.store.setOptimisticToInvalid(ctx, root, parentRoot, parentHash, payloadHash)
func (f *ForkChoice) SetOptimisticToInvalid(ctx context.Context, root, parentRoot, payloadHash [fieldparams.RootLength]byte) ([][32]byte, error) {
return f.store.setOptimisticToInvalid(ctx, root, parentRoot, payloadHash)
}
// InsertSlashedIndex adds the given slashed validator index to the
@@ -410,7 +387,7 @@ func (f *ForkChoice) InsertSlashedIndex(_ context.Context, index primitives.Vali
return
}
node, ok := f.store.emptyNodeByRoot[f.votes[index].currentRoot]
node, ok := f.store.nodeByRoot[f.votes[index].currentRoot]
if !ok || node == nil {
return
}
@@ -445,30 +422,22 @@ func (f *ForkChoice) UpdateFinalizedCheckpoint(fc *forkchoicetypes.Checkpoint) e
}
// CommonAncestor returns the common ancestor root and slot between the two block roots r1 and r2.
// This is payload aware. Consider the following situation
// [A,full] <--- [B, full] <---[C,pending]
//
// \---------[B, empty] <--[D, pending]
//
// Then even though C and D both descend from the beacon block B, their common ancestor is A.
// Notice that also this function **requires** that the two roots are actually contending blocks! otherwise the
// behavior is not defined.
func (f *ForkChoice) CommonAncestor(ctx context.Context, r1 [32]byte, r2 [32]byte) ([32]byte, primitives.Slot, error) {
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.CommonAncestorRoot")
defer span.End()
en1, ok := f.store.emptyNodeByRoot[r1]
if !ok || en1 == nil {
n1, ok := f.store.nodeByRoot[r1]
if !ok || n1 == nil {
return [32]byte{}, 0, forkchoice.ErrUnknownCommonAncestor
}
// Do nothing if the input roots are the same.
if r1 == r2 {
return r1, en1.node.slot, nil
return r1, n1.slot, nil
}
en2, ok := f.store.emptyNodeByRoot[r2]
if !ok || en2 == nil {
n2, ok := f.store.nodeByRoot[r2]
if !ok || n2 == nil {
return [32]byte{}, 0, forkchoice.ErrUnknownCommonAncestor
}
@@ -476,23 +445,23 @@ func (f *ForkChoice) CommonAncestor(ctx context.Context, r1 [32]byte, r2 [32]byt
if ctx.Err() != nil {
return [32]byte{}, 0, ctx.Err()
}
if en1.node.slot > en2.node.slot {
en1 = en1.node.parent
if n1.slot > n2.slot {
n1 = n1.parent
// Reaches the end of the tree and unable to find common ancestor.
// This should not happen at runtime as the finalized
// node has to be a common ancestor
if en1 == nil {
if n1 == nil {
return [32]byte{}, 0, forkchoice.ErrUnknownCommonAncestor
}
} else {
en2 = en2.node.parent
n2 = n2.parent
// Reaches the end of the tree and unable to find common ancestor.
if en2 == nil {
if n2 == nil {
return [32]byte{}, 0, forkchoice.ErrUnknownCommonAncestor
}
}
if en1 == en2 {
return en1.node.root, en1.node.slot, nil
if n1 == n2 {
return n1.root, n1.slot, nil
}
}
}
@@ -539,17 +508,35 @@ func (f *ForkChoice) CachedHeadRoot() [32]byte {
// FinalizedPayloadBlockHash returns the hash of the payload at the finalized checkpoint
func (f *ForkChoice) FinalizedPayloadBlockHash() [32]byte {
return f.store.latestHashForRoot(f.FinalizedCheckpoint().Root)
root := f.FinalizedCheckpoint().Root
node, ok := f.store.nodeByRoot[root]
if !ok || node == nil {
// This should not happen
return [32]byte{}
}
return node.payloadHash
}
// JustifiedPayloadBlockHash returns the hash of the payload at the justified checkpoint
func (f *ForkChoice) JustifiedPayloadBlockHash() [32]byte {
return f.store.latestHashForRoot(f.JustifiedCheckpoint().Root)
root := f.JustifiedCheckpoint().Root
node, ok := f.store.nodeByRoot[root]
if !ok || node == nil {
// This should not happen
return [32]byte{}
}
return node.payloadHash
}
// UnrealizedJustifiedPayloadBlockHash returns the hash of the payload at the unrealized justified checkpoint
func (f *ForkChoice) UnrealizedJustifiedPayloadBlockHash() [32]byte {
return f.store.latestHashForRoot(f.store.unrealizedJustifiedCheckpoint.Root)
root := f.store.unrealizedJustifiedCheckpoint.Root
node, ok := f.store.nodeByRoot[root]
if !ok || node == nil {
// This should not happen
return [32]byte{}
}
return node.payloadHash
}
// ForkChoiceDump returns a full dump of forkchoice.
@@ -573,7 +560,7 @@ func (f *ForkChoice) ForkChoiceDump(ctx context.Context) (*forkchoice2.Dump, err
nodes := make([]*forkchoice2.Node, 0, f.NodeCount())
var err error
if f.store.treeRootNode != nil {
nodes, err = f.store.nodeTreeDump(ctx, f.store.treeRootNode, nodes)
nodes, err = f.store.treeRootNode.nodeTreeDump(ctx, nodes)
if err != nil {
return nil, err
}
@@ -602,7 +589,7 @@ func (f *ForkChoice) SetBalancesByRooter(handler forkchoice.BalancesByRooter) {
// Weight returns the weight of the given root if found on the store
func (f *ForkChoice) Weight(root [32]byte) (uint64, error) {
n, ok := f.store.emptyNodeByRoot[root]
n, ok := f.store.nodeByRoot[root]
if !ok || n == nil {
return 0, ErrNilNode
}
@@ -630,11 +617,11 @@ func (f *ForkChoice) updateJustifiedBalances(ctx context.Context, root [32]byte)
// Slot returns the slot of the given root if it's known to forkchoice
func (f *ForkChoice) Slot(root [32]byte) (primitives.Slot, error) {
n, ok := f.store.emptyNodeByRoot[root]
n, ok := f.store.nodeByRoot[root]
if !ok || n == nil {
return 0, ErrNilNode
}
return n.node.slot, nil
return n.slot, nil
}
// DependentRoot returns the last root of the epoch prior to the requested ecoch in the canonical chain.
@@ -642,7 +629,7 @@ func (f *ForkChoice) DependentRoot(epoch primitives.Epoch) ([32]byte, error) {
return f.DependentRootForEpoch(f.CachedHeadRoot(), epoch)
}
// DependentRootForEpoch return the last root of the epoch prior to the requested epoch for the given root.
// DependentRootForEpoch return the last root of the epoch prior to the requested ecoch for the given root.
func (f *ForkChoice) DependentRootForEpoch(root [32]byte, epoch primitives.Epoch) ([32]byte, error) {
tr, err := f.TargetRootForEpoch(root, epoch)
if err != nil {
@@ -651,18 +638,18 @@ func (f *ForkChoice) DependentRootForEpoch(root [32]byte, epoch primitives.Epoch
if tr == [32]byte{} {
return [32]byte{}, nil
}
en, ok := f.store.emptyNodeByRoot[tr]
if !ok || en == nil {
node, ok := f.store.nodeByRoot[tr]
if !ok || node == nil {
return [32]byte{}, ErrNilNode
}
if slots.ToEpoch(en.node.slot) >= epoch {
if en.node.parent != nil {
en = en.node.parent
if slots.ToEpoch(node.slot) >= epoch {
if node.parent != nil {
node = node.parent
} else {
return f.store.finalizedDependentRoot, nil
}
}
return en.node.root, nil
return node.root, nil
}
// TargetRootForEpoch returns the root of the target block for a given epoch.
@@ -674,48 +661,46 @@ func (f *ForkChoice) DependentRootForEpoch(root [32]byte, epoch primitives.Epoch
// which case we return the root of the checkpoint of the chain containing the
// passed root, at the given epoch
func (f *ForkChoice) TargetRootForEpoch(root [32]byte, epoch primitives.Epoch) ([32]byte, error) {
n, ok := f.store.emptyNodeByRoot[root]
n, ok := f.store.nodeByRoot[root]
if !ok || n == nil {
return [32]byte{}, ErrNilNode
}
node := n.node
nodeEpoch := slots.ToEpoch(node.slot)
nodeEpoch := slots.ToEpoch(n.slot)
if epoch > nodeEpoch {
return node.root, nil
return n.root, nil
}
if node.target == nil {
if n.target == nil {
return [32]byte{}, nil
}
targetRoot := node.target.root
targetRoot := n.target.root
if epoch == nodeEpoch {
return targetRoot, nil
}
targetNode, ok := f.store.emptyNodeByRoot[targetRoot]
targetNode, ok := f.store.nodeByRoot[targetRoot]
if !ok || targetNode == nil {
return [32]byte{}, ErrNilNode
}
// If slot 0 was not missed we consider a previous block to go back at least one epoch
if nodeEpoch == slots.ToEpoch(targetNode.node.slot) {
targetNode = targetNode.node.parent
if nodeEpoch == slots.ToEpoch(targetNode.slot) {
targetNode = targetNode.parent
if targetNode == nil {
return [32]byte{}, ErrNilNode
}
}
return f.TargetRootForEpoch(targetNode.node.root, epoch)
return f.TargetRootForEpoch(targetNode.root, epoch)
}
// ParentRoot returns the block root of the parent node if it is in forkchoice.
// The exception is for the finalized checkpoint root which we return the zero
// hash.
func (f *ForkChoice) ParentRoot(root [32]byte) ([32]byte, error) {
n, ok := f.store.emptyNodeByRoot[root]
n, ok := f.store.nodeByRoot[root]
if !ok || n == nil {
return [32]byte{}, ErrNilNode
}
// Return the zero hash for the tree root
parent := n.node.parent
if parent == nil {
if n.parent == nil {
return [32]byte{}, nil
}
return parent.node.root, nil
return n.parent.root, nil
}

View File

@@ -3,6 +3,7 @@ package doublylinkedtree
import (
"context"
"encoding/binary"
"errors"
"testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/forkchoice"
@@ -103,9 +104,9 @@ func TestForkChoice_UpdateBalancesPositiveChange(t *testing.T) {
f.justifiedBalances = []uint64{10, 20, 30}
require.NoError(t, f.updateBalances())
s := f.store
assert.Equal(t, uint64(10), s.emptyNodeByRoot[indexToHash(1)].balance)
assert.Equal(t, uint64(20), s.emptyNodeByRoot[indexToHash(2)].balance)
assert.Equal(t, uint64(30), s.emptyNodeByRoot[indexToHash(3)].balance)
assert.Equal(t, uint64(10), s.nodeByRoot[indexToHash(1)].balance)
assert.Equal(t, uint64(20), s.nodeByRoot[indexToHash(2)].balance)
assert.Equal(t, uint64(30), s.nodeByRoot[indexToHash(3)].balance)
}
func TestForkChoice_UpdateBalancesNegativeChange(t *testing.T) {
@@ -121,9 +122,9 @@ func TestForkChoice_UpdateBalancesNegativeChange(t *testing.T) {
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, roblock))
s := f.store
s.emptyNodeByRoot[indexToHash(1)].balance = 100
s.emptyNodeByRoot[indexToHash(2)].balance = 100
s.emptyNodeByRoot[indexToHash(3)].balance = 100
s.nodeByRoot[indexToHash(1)].balance = 100
s.nodeByRoot[indexToHash(2)].balance = 100
s.nodeByRoot[indexToHash(3)].balance = 100
f.balances = []uint64{100, 100, 100}
f.votes = []Vote{
@@ -134,9 +135,9 @@ func TestForkChoice_UpdateBalancesNegativeChange(t *testing.T) {
f.justifiedBalances = []uint64{10, 20, 30}
require.NoError(t, f.updateBalances())
assert.Equal(t, uint64(10), s.emptyNodeByRoot[indexToHash(1)].balance)
assert.Equal(t, uint64(20), s.emptyNodeByRoot[indexToHash(2)].balance)
assert.Equal(t, uint64(30), s.emptyNodeByRoot[indexToHash(3)].balance)
assert.Equal(t, uint64(10), s.nodeByRoot[indexToHash(1)].balance)
assert.Equal(t, uint64(20), s.nodeByRoot[indexToHash(2)].balance)
assert.Equal(t, uint64(30), s.nodeByRoot[indexToHash(3)].balance)
}
func TestForkChoice_UpdateBalancesUnderflow(t *testing.T) {
@@ -152,9 +153,9 @@ func TestForkChoice_UpdateBalancesUnderflow(t *testing.T) {
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, roblock))
s := f.store
s.emptyNodeByRoot[indexToHash(1)].balance = 100
s.emptyNodeByRoot[indexToHash(2)].balance = 100
s.emptyNodeByRoot[indexToHash(3)].balance = 100
s.nodeByRoot[indexToHash(1)].balance = 100
s.nodeByRoot[indexToHash(2)].balance = 100
s.nodeByRoot[indexToHash(3)].balance = 100
f.balances = []uint64{125, 125, 125}
f.votes = []Vote{
@@ -165,9 +166,9 @@ func TestForkChoice_UpdateBalancesUnderflow(t *testing.T) {
f.justifiedBalances = []uint64{10, 20, 30}
require.NoError(t, f.updateBalances())
assert.Equal(t, uint64(0), s.emptyNodeByRoot[indexToHash(1)].balance)
assert.Equal(t, uint64(0), s.emptyNodeByRoot[indexToHash(2)].balance)
assert.Equal(t, uint64(5), s.emptyNodeByRoot[indexToHash(3)].balance)
assert.Equal(t, uint64(0), s.nodeByRoot[indexToHash(1)].balance)
assert.Equal(t, uint64(0), s.nodeByRoot[indexToHash(2)].balance)
assert.Equal(t, uint64(5), s.nodeByRoot[indexToHash(3)].balance)
}
func TestForkChoice_IsCanonical(t *testing.T) {
@@ -223,12 +224,12 @@ func TestForkChoice_IsCanonicalReorg(t *testing.T) {
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, roblock))
f.store.emptyNodeByRoot[[32]byte{'3'}].balance = 10
require.NoError(t, f.store.applyWeightChangesConsensusNode(ctx, f.store.treeRootNode))
require.Equal(t, uint64(10), f.store.emptyNodeByRoot[[32]byte{'1'}].node.weight)
require.Equal(t, uint64(0), f.store.emptyNodeByRoot[[32]byte{'2'}].node.weight)
f.store.nodeByRoot[[32]byte{'3'}].balance = 10
require.NoError(t, f.store.treeRootNode.applyWeightChanges(ctx))
require.Equal(t, uint64(10), f.store.nodeByRoot[[32]byte{'1'}].weight)
require.Equal(t, uint64(0), f.store.nodeByRoot[[32]byte{'2'}].weight)
require.NoError(t, f.store.updateBestDescendantConsensusNode(ctx, f.store.treeRootNode, 1, 1, 1))
require.NoError(t, f.store.treeRootNode.updateBestDescendant(ctx, 1, 1, 1))
require.DeepEqual(t, [32]byte{'3'}, f.store.treeRootNode.bestDescendant.root)
r1 := [32]byte{'1'}
@@ -259,7 +260,7 @@ func TestForkChoice_AncestorRoot(t *testing.T) {
st, roblock, err = prepareForkchoiceState(ctx, 5, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, roblock))
f.store.treeRootNode = f.store.emptyNodeByRoot[indexToHash(1)].node
f.store.treeRootNode = f.store.nodeByRoot[indexToHash(1)]
f.store.treeRootNode.parent = nil
r, err := f.AncestorRoot(ctx, indexToHash(3), 6)
@@ -341,21 +342,21 @@ func TestForkChoice_RemoveEquivocating(t *testing.T) {
// Process b's slashing, c is now head
f.InsertSlashedIndex(ctx, 1)
require.Equal(t, uint64(200), f.store.emptyNodeByRoot[[32]byte{'b'}].balance)
require.Equal(t, uint64(200), f.store.nodeByRoot[[32]byte{'b'}].balance)
f.justifiedBalances = []uint64{100, 200, 200, 300}
head, err = f.Head(ctx)
require.Equal(t, uint64(200), f.store.emptyNodeByRoot[[32]byte{'b'}].weight)
require.Equal(t, uint64(300), f.store.emptyNodeByRoot[[32]byte{'c'}].weight)
require.Equal(t, uint64(200), f.store.nodeByRoot[[32]byte{'b'}].weight)
require.Equal(t, uint64(300), f.store.nodeByRoot[[32]byte{'c'}].weight)
require.NoError(t, err)
require.Equal(t, [32]byte{'c'}, head)
// Process b's slashing again, should be a noop
f.InsertSlashedIndex(ctx, 1)
require.Equal(t, uint64(200), f.store.emptyNodeByRoot[[32]byte{'b'}].balance)
require.Equal(t, uint64(200), f.store.nodeByRoot[[32]byte{'b'}].balance)
f.justifiedBalances = []uint64{100, 200, 200, 300}
head, err = f.Head(ctx)
require.Equal(t, uint64(200), f.store.emptyNodeByRoot[[32]byte{'b'}].weight)
require.Equal(t, uint64(300), f.store.emptyNodeByRoot[[32]byte{'c'}].weight)
require.Equal(t, uint64(200), f.store.nodeByRoot[[32]byte{'b'}].weight)
require.Equal(t, uint64(300), f.store.nodeByRoot[[32]byte{'c'}].weight)
require.NoError(t, err)
require.Equal(t, [32]byte{'c'}, head)
@@ -513,6 +514,58 @@ func TestStore_CommonAncestor(t *testing.T) {
})
}
// a -- b -- c -- d
f = setup(0, 0)
st, roblock, err = prepareForkchoiceState(ctx, 0, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, roblock))
st, roblock, err = prepareForkchoiceState(ctx, 1, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, roblock))
st, roblock, err = prepareForkchoiceState(ctx, 2, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, roblock))
st, roblock, err = prepareForkchoiceState(ctx, 3, [32]byte{'d'}, [32]byte{'c'}, [32]byte{}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, roblock))
tests = []struct {
name string
r1 [32]byte
r2 [32]byte
wantRoot [32]byte
wantSlot primitives.Slot
}{
{
name: "Common ancestor between a and b is a",
r1: [32]byte{'a'},
r2: [32]byte{'b'},
wantRoot: [32]byte{'a'},
wantSlot: 0,
},
{
name: "Common ancestor between b and d is b",
r1: [32]byte{'d'},
r2: [32]byte{'b'},
wantRoot: [32]byte{'b'},
wantSlot: 1,
},
{
name: "Common ancestor between d and a is a",
r1: [32]byte{'d'},
r2: [32]byte{'a'},
wantRoot: [32]byte{'a'},
wantSlot: 0,
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
gotRoot, gotSlot, err := f.CommonAncestor(ctx, tc.r1, tc.r2)
require.NoError(t, err)
require.Equal(t, tc.wantRoot, gotRoot)
require.Equal(t, tc.wantSlot, gotSlot)
})
}
// Equal inputs should return the same root.
r, s, err := f.CommonAncestor(ctx, [32]byte{'b'}, [32]byte{'b'})
require.NoError(t, err)
@@ -535,9 +588,10 @@ func TestStore_CommonAncestor(t *testing.T) {
unrealizedJustifiedEpoch: 1,
finalizedEpoch: 1,
unrealizedFinalizedEpoch: 1,
optimistic: true,
}
f.store.emptyNodeByRoot[[32]byte{'y'}] = &PayloadNode{node: n, optimistic: true}
f.store.nodeByRoot[[32]byte{'y'}] = n
// broken link
_, _, err = f.CommonAncestor(ctx, [32]byte{'y'}, [32]byte{'a'})
require.ErrorIs(t, err, forkchoice.ErrUnknownCommonAncestor)
@@ -556,8 +610,7 @@ func TestStore_InsertChain(t *testing.T) {
require.NoError(t, err)
roblock, err := blocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
blks = append(blks, &forkchoicetypes.BlockAndCheckpoints{
Block: roblock,
blks = append(blks, &forkchoicetypes.BlockAndCheckpoints{Block: roblock,
JustifiedCheckpoint: &ethpb.Checkpoint{Epoch: 1, Root: params.BeaconConfig().ZeroHash[:]},
FinalizedCheckpoint: &ethpb.Checkpoint{Epoch: 1, Root: params.BeaconConfig().ZeroHash[:]},
})
@@ -572,8 +625,7 @@ func TestStore_InsertChain(t *testing.T) {
require.NoError(t, err)
roblock, err := blocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
blks = append(blks, &forkchoicetypes.BlockAndCheckpoints{
Block: roblock,
blks = append(blks, &forkchoicetypes.BlockAndCheckpoints{Block: roblock,
JustifiedCheckpoint: &ethpb.Checkpoint{Epoch: 1, Root: params.BeaconConfig().ZeroHash[:]},
FinalizedCheckpoint: &ethpb.Checkpoint{Epoch: 1, Root: params.BeaconConfig().ZeroHash[:]},
})
@@ -690,7 +742,7 @@ func TestWeight(t *testing.T) {
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, roblock))
n, ok := f.store.emptyNodeByRoot[root]
n, ok := f.store.nodeByRoot[root]
require.Equal(t, true, ok)
n.weight = 10
w, err := f.Weight(root)
@@ -862,3 +914,16 @@ func TestForkchoiceParentRoot(t *testing.T) {
require.NoError(t, err)
require.Equal(t, zeroHash, root)
}
func TestForkChoice_CleanupInserting(t *testing.T) {
f := setup(0, 0)
ctx := t.Context()
st, roblock, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 2, 2)
f.SetBalancesByRooter(func(_ context.Context, _ [32]byte) ([]uint64, error) {
return f.justifiedBalances, errors.New("mock err")
})
require.NoError(t, err)
require.NotNil(t, f.InsertNode(ctx, st, roblock))
require.Equal(t, false, f.HasNode(roblock.Root()))
}

View File

@@ -1,289 +0,0 @@
package doublylinkedtree
import (
"bytes"
"context"
"slices"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
forkchoice2 "github.com/OffchainLabs/prysm/v7/consensus-types/forkchoice"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/pkg/errors"
)
func (s *Store) resolveParentPayloadStatus(block interfaces.ReadOnlyBeaconBlock, parent **PayloadNode, blockHash *[32]byte) error {
sb, err := block.Body().SignedExecutionPayloadBid()
if err != nil {
return err
}
wb, err := blocks.WrappedROSignedExecutionPayloadBid(sb)
if err != nil {
return errors.Wrap(err, "failed to wrap signed bid")
}
bid, err := wb.Bid()
if err != nil {
return errors.Wrap(err, "failed to get bid from wrapped bid")
}
*blockHash = bid.BlockHash()
parentRoot := block.ParentRoot()
*parent = s.emptyNodeByRoot[parentRoot]
if *parent == nil {
// This is the tree root node.
return nil
}
if bid.ParentBlockHash() == (*parent).node.blockHash {
// block builds on full
*parent = s.fullNodeByRoot[(*parent).node.root]
}
return nil
}
// applyWeightChangesConsensusNode recomputes the weight of the node passed as an argument and all of its descendants,
// using the current balance stored in each node.
func (s *Store) applyWeightChangesConsensusNode(ctx context.Context, n *Node) error {
// Recursively calling the children to sum their weights.
en := s.emptyNodeByRoot[n.root]
if err := s.applyWeightChangesPayloadNode(ctx, en); err != nil {
return err
}
childrenWeight := en.weight
fn := s.fullNodeByRoot[n.root]
if fn != nil {
if err := s.applyWeightChangesPayloadNode(ctx, fn); err != nil {
return err
}
childrenWeight += fn.weight
}
if n.root == params.BeaconConfig().ZeroHash {
return nil
}
n.weight = n.balance + childrenWeight
return nil
}
// applyWeightChangesPayloadNode recomputes the weight of the node passed as an argument and all of its descendants,
// using the current balance stored in each node.
func (s *Store) applyWeightChangesPayloadNode(ctx context.Context, n *PayloadNode) error {
// Recursively calling the children to sum their weights.
childrenWeight := uint64(0)
for _, child := range n.children {
if ctx.Err() != nil {
return ctx.Err()
}
if err := s.applyWeightChangesConsensusNode(ctx, child); err != nil {
return err
}
childrenWeight += child.weight
}
n.weight = n.balance + childrenWeight
return nil
}
// allConsensusChildren returns the list of all consensus blocks that build on the given node.
func (s *Store) allConsensusChildren(n *Node) []*Node {
en := s.emptyNodeByRoot[n.root]
fn, ok := s.fullNodeByRoot[n.root]
if ok {
return append(slices.Clone(en.children), fn.children...)
}
return en.children
}
// setNodeAndParentValidated sets the current node and all the ancestors as validated (i.e. non-optimistic).
func (s *Store) setNodeAndParentValidated(ctx context.Context, pn *PayloadNode) error {
if ctx.Err() != nil {
return ctx.Err()
}
if !pn.optimistic {
return nil
}
pn.optimistic = false
if pn.full {
// set the empty node also a as valid
en := s.emptyNodeByRoot[pn.node.root]
en.optimistic = false
}
if pn.node.parent == nil {
return nil
}
return s.setNodeAndParentValidated(ctx, pn.node.parent)
}
// fullParent returns the latest full node that this block builds on.
func (s *Store) fullParent(pn *PayloadNode) *PayloadNode {
parent := pn.node.parent
for ; parent != nil && !parent.full; parent = parent.node.parent {
}
return parent
}
// parentHash return the payload hash of the latest full node that this block builds on.
func (s *Store) parentHash(pn *PayloadNode) [32]byte {
fullParent := s.fullParent(pn)
if fullParent == nil {
return [32]byte{}
}
return fullParent.node.blockHash
}
// latestHashForRoot returns the latest payload hash for the given block root.
func (s *Store) latestHashForRoot(root [32]byte) [32]byte {
// try to get the full node first
fn := s.fullNodeByRoot[root]
if fn != nil {
return fn.node.blockHash
}
en := s.emptyNodeByRoot[root]
if en == nil {
// This should not happen
return [32]byte{}
}
return s.parentHash(en)
}
// updateBestDescendantPayloadNode updates the best descendant of this node and its
// children.
func (s *Store) updateBestDescendantPayloadNode(ctx context.Context, n *PayloadNode, justifiedEpoch, finalizedEpoch, currentEpoch primitives.Epoch) error {
if ctx.Err() != nil {
return ctx.Err()
}
var bestChild *Node
bestWeight := uint64(0)
for _, child := range n.children {
if child == nil {
return errors.Wrap(ErrNilNode, "could not update best descendant")
}
if err := s.updateBestDescendantConsensusNode(ctx, child, justifiedEpoch, finalizedEpoch, currentEpoch); err != nil {
return err
}
childLeadsToViableHead := child.leadsToViableHead(justifiedEpoch, currentEpoch)
if childLeadsToViableHead && bestChild == nil {
// The child leads to a viable head, but the current
// parent's best child doesn't.
bestWeight = child.weight
bestChild = child
} else if childLeadsToViableHead {
// If both are viable, compare their weights.
if child.weight == bestWeight {
// Tie-breaker of equal weights by root.
if bytes.Compare(child.root[:], bestChild.root[:]) > 0 {
bestChild = child
}
} else if child.weight > bestWeight {
bestChild = child
bestWeight = child.weight
}
}
}
if bestChild == nil {
n.bestDescendant = nil
} else {
if bestChild.bestDescendant == nil {
n.bestDescendant = bestChild
} else {
n.bestDescendant = bestChild.bestDescendant
}
}
return nil
}
// updateBestDescendantConsensusNode updates the best descendant of this node and its
// children.
func (s *Store) updateBestDescendantConsensusNode(ctx context.Context, n *Node, justifiedEpoch, finalizedEpoch, currentEpoch primitives.Epoch) error {
if ctx.Err() != nil {
return ctx.Err()
}
if len(s.allConsensusChildren(n)) == 0 {
n.bestDescendant = nil
return nil
}
en := s.emptyNodeByRoot[n.root]
if err := s.updateBestDescendantPayloadNode(ctx, en, justifiedEpoch, finalizedEpoch, currentEpoch); err != nil {
return err
}
fn := s.fullNodeByRoot[n.root]
if fn == nil {
n.bestDescendant = en.bestDescendant
return nil
}
// TODO GLOAS: pick between full or empty
if err := s.updateBestDescendantPayloadNode(ctx, fn, justifiedEpoch, finalizedEpoch, currentEpoch); err != nil {
return err
}
n.bestDescendant = fn.bestDescendant
return nil
}
// choosePayloadContent chooses between empty or full for the passed consensus node. TODO Gloas: use PTC to choose.
func (s *Store) choosePayloadContent(n *Node) *PayloadNode {
if n == nil {
return nil
}
fn := s.fullNodeByRoot[n.root]
if fn != nil {
return fn
}
return s.emptyNodeByRoot[n.root]
}
// nodeTreeDump appends to the given list all the nodes descending from this one
func (s *Store) nodeTreeDump(ctx context.Context, n *Node, nodes []*forkchoice2.Node) ([]*forkchoice2.Node, error) {
if ctx.Err() != nil {
return nil, ctx.Err()
}
var parentRoot [32]byte
if n.parent != nil {
parentRoot = n.parent.node.root
}
target := [32]byte{}
if n.target != nil {
target = n.target.root
}
optimistic := false
if n.parent != nil {
optimistic = n.parent.optimistic
}
en := s.emptyNodeByRoot[n.root]
timestamp := en.timestamp
fn := s.fullNodeByRoot[n.root]
if fn != nil {
optimistic = fn.optimistic
timestamp = fn.timestamp
}
thisNode := &forkchoice2.Node{
Slot: n.slot,
BlockRoot: n.root[:],
ParentRoot: parentRoot[:],
JustifiedEpoch: n.justifiedEpoch,
FinalizedEpoch: n.finalizedEpoch,
UnrealizedJustifiedEpoch: n.unrealizedJustifiedEpoch,
UnrealizedFinalizedEpoch: n.unrealizedFinalizedEpoch,
Balance: n.balance,
Weight: n.weight,
ExecutionOptimistic: optimistic,
ExecutionBlockHash: n.blockHash[:],
Timestamp: timestamp,
Target: target[:],
}
if optimistic {
thisNode.Validity = forkchoice2.Optimistic
} else {
thisNode.Validity = forkchoice2.Valid
}
nodes = append(nodes, thisNode)
var err error
children := s.allConsensusChildren(n)
for _, child := range children {
nodes, err = s.nodeTreeDump(ctx, child, nodes)
if err != nil {
return nil, err
}
}
return nodes, nil
}

View File

@@ -0,0 +1,26 @@
package doublylinkedtree
import (
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/time/slots"
)
// LastRoot returns the last canonical block root in the given epoch
func (f *ForkChoice) LastRoot(epoch primitives.Epoch) [32]byte {
head := f.store.headNode
headEpoch := slots.ToEpoch(head.slot)
epochEnd, err := slots.EpochEnd(epoch)
if err != nil {
return [32]byte{}
}
if headEpoch <= epoch {
return head.root
}
for head != nil && head.slot > epochEnd {
head = head.parent
}
if head == nil {
return [32]byte{}
}
return head.root
}

View File

@@ -0,0 +1,38 @@
package doublylinkedtree
import (
"testing"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/testing/require"
)
func TestLastRoot(t *testing.T) {
f := setup(0, 0)
ctx := t.Context()
st, root, err := prepareForkchoiceState(ctx, 1, [32]byte{'1'}, params.BeaconConfig().ZeroHash, [32]byte{'1'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, root))
st, root, err = prepareForkchoiceState(ctx, 2, [32]byte{'2'}, [32]byte{'1'}, [32]byte{'2'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, root))
st, root, err = prepareForkchoiceState(ctx, 3, [32]byte{'3'}, [32]byte{'1'}, [32]byte{'3'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, root))
st, root, err = prepareForkchoiceState(ctx, 32, [32]byte{'4'}, [32]byte{'3'}, [32]byte{'4'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, root))
st, root, err = prepareForkchoiceState(ctx, 33, [32]byte{'5'}, [32]byte{'2'}, [32]byte{'5'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, root))
st, root, err = prepareForkchoiceState(ctx, 34, [32]byte{'6'}, [32]byte{'5'}, [32]byte{'6'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, root))
headNode := f.store.nodeByRoot[[32]byte{'6'}]
f.store.headNode = headNode
require.Equal(t, [32]byte{'6'}, f.store.headNode.root)
require.Equal(t, [32]byte{'2'}, f.LastRoot(0))
require.Equal(t, [32]byte{'6'}, f.LastRoot(1))
require.Equal(t, [32]byte{'6'}, f.LastRoot(2))
}

View File

@@ -1,17 +1,95 @@
package doublylinkedtree
import (
"bytes"
"context"
"time"
"github.com/OffchainLabs/prysm/v7/config/params"
forkchoice2 "github.com/OffchainLabs/prysm/v7/consensus-types/forkchoice"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/time/slots"
"github.com/pkg/errors"
)
// ProcessAttestationsThreshold is the amount of time after which we
// process attestations for the current slot
const ProcessAttestationsThreshold = 10 * time.Second
// applyWeightChanges recomputes the weight of the node passed as an argument and all of its descendants,
// using the current balance stored in each node.
func (n *Node) applyWeightChanges(ctx context.Context) error {
// Recursively calling the children to sum their weights.
childrenWeight := uint64(0)
for _, child := range n.children {
if ctx.Err() != nil {
return ctx.Err()
}
if err := child.applyWeightChanges(ctx); err != nil {
return err
}
childrenWeight += child.weight
}
if n.root == params.BeaconConfig().ZeroHash {
return nil
}
n.weight = n.balance + childrenWeight
return nil
}
// updateBestDescendant updates the best descendant of this node and its
// children.
func (n *Node) updateBestDescendant(ctx context.Context, justifiedEpoch, finalizedEpoch, currentEpoch primitives.Epoch) error {
if ctx.Err() != nil {
return ctx.Err()
}
if len(n.children) == 0 {
n.bestDescendant = nil
return nil
}
var bestChild *Node
bestWeight := uint64(0)
hasViableDescendant := false
for _, child := range n.children {
if child == nil {
return errors.Wrap(ErrNilNode, "could not update best descendant")
}
if err := child.updateBestDescendant(ctx, justifiedEpoch, finalizedEpoch, currentEpoch); err != nil {
return err
}
childLeadsToViableHead := child.leadsToViableHead(justifiedEpoch, currentEpoch)
if childLeadsToViableHead && !hasViableDescendant {
// The child leads to a viable head, but the current
// parent's best child doesn't.
bestWeight = child.weight
bestChild = child
hasViableDescendant = true
} else if childLeadsToViableHead {
// If both are viable, compare their weights.
if child.weight == bestWeight {
// Tie-breaker of equal weights by root.
if bytes.Compare(child.root[:], bestChild.root[:]) > 0 {
bestChild = child
}
} else if child.weight > bestWeight {
bestChild = child
bestWeight = child.weight
}
}
}
if hasViableDescendant {
if bestChild.bestDescendant == nil {
n.bestDescendant = bestChild
} else {
n.bestDescendant = bestChild.bestDescendant
}
} else {
n.bestDescendant = nil
}
return nil
}
// viableForHead returns true if the node is viable to head.
// Any node with different finalized or justified epoch than
// the ones in fork choice store should not be viable to head.
@@ -32,13 +110,30 @@ func (n *Node) leadsToViableHead(justifiedEpoch, currentEpoch primitives.Epoch)
return n.bestDescendant.viableForHead(justifiedEpoch, currentEpoch)
}
// setNodeAndParentValidated sets the current node and all the ancestors as validated (i.e. non-optimistic).
func (n *Node) setNodeAndParentValidated(ctx context.Context) error {
if ctx.Err() != nil {
return ctx.Err()
}
if !n.optimistic {
return nil
}
n.optimistic = false
if n.parent == nil {
return nil
}
return n.parent.setNodeAndParentValidated(ctx)
}
// arrivedEarly returns whether this node was inserted before the first
// threshold to orphan a block.
// Note that genesisTime has seconds granularity, therefore we use a strict
// inequality < here. For example a block that arrives 3.9999 seconds into the
// slot will have secs = 3 below.
func (n *PayloadNode) arrivedEarly(genesis time.Time) (bool, error) {
sss, err := slots.SinceSlotStart(n.node.slot, genesis, n.timestamp.Truncate(time.Second)) // Truncate such that 3.9999 seconds will have a value of 3.
func (n *Node) arrivedEarly(genesis time.Time) (bool, error) {
sss, err := slots.SinceSlotStart(n.slot, genesis, n.timestamp.Truncate(time.Second)) // Truncate such that 3.9999 seconds will have a value of 3.
votingWindow := params.BeaconConfig().SlotComponentDuration(params.BeaconConfig().AttestationDueBPS)
return sss < votingWindow, err
}
@@ -48,7 +143,52 @@ func (n *PayloadNode) arrivedEarly(genesis time.Time) (bool, error) {
// Note that genesisTime has seconds granularity, therefore we use an
// inequality >= here. For example a block that arrives 10.00001 seconds into the
// slot will have secs = 10 below.
func (n *PayloadNode) arrivedAfterOrphanCheck(genesis time.Time) (bool, error) {
secs, err := slots.SinceSlotStart(n.node.slot, genesis, n.timestamp.Truncate(time.Second)) // Truncate such that 10.00001 seconds will have a value of 10.
func (n *Node) arrivedAfterOrphanCheck(genesis time.Time) (bool, error) {
secs, err := slots.SinceSlotStart(n.slot, genesis, n.timestamp.Truncate(time.Second)) // Truncate such that 10.00001 seconds will have a value of 10.
return secs >= ProcessAttestationsThreshold, err
}
// nodeTreeDump appends to the given list all the nodes descending from this one
func (n *Node) nodeTreeDump(ctx context.Context, nodes []*forkchoice2.Node) ([]*forkchoice2.Node, error) {
if ctx.Err() != nil {
return nil, ctx.Err()
}
var parentRoot [32]byte
if n.parent != nil {
parentRoot = n.parent.root
}
target := [32]byte{}
if n.target != nil {
target = n.target.root
}
thisNode := &forkchoice2.Node{
Slot: n.slot,
BlockRoot: n.root[:],
ParentRoot: parentRoot[:],
JustifiedEpoch: n.justifiedEpoch,
FinalizedEpoch: n.finalizedEpoch,
UnrealizedJustifiedEpoch: n.unrealizedJustifiedEpoch,
UnrealizedFinalizedEpoch: n.unrealizedFinalizedEpoch,
Balance: n.balance,
Weight: n.weight,
ExecutionOptimistic: n.optimistic,
ExecutionBlockHash: n.payloadHash[:],
Timestamp: n.timestamp,
Target: target[:],
}
if n.optimistic {
thisNode.Validity = forkchoice2.Optimistic
} else {
thisNode.Validity = forkchoice2.Valid
}
nodes = append(nodes, thisNode)
var err error
for _, child := range n.children {
nodes, err = child.nodeTreeDump(ctx, nodes)
if err != nil {
return nil, err
}
}
return nodes, nil
}

View File

@@ -27,15 +27,15 @@ func TestNode_ApplyWeightChanges_PositiveChange(t *testing.T) {
// The updated balances of each node is 100
s := f.store
s.emptyNodeByRoot[indexToHash(1)].balance = 100
s.emptyNodeByRoot[indexToHash(2)].balance = 100
s.emptyNodeByRoot[indexToHash(3)].balance = 100
s.nodeByRoot[indexToHash(1)].balance = 100
s.nodeByRoot[indexToHash(2)].balance = 100
s.nodeByRoot[indexToHash(3)].balance = 100
assert.NoError(t, s.applyWeightChangesConsensusNode(ctx, s.treeRootNode))
assert.NoError(t, s.treeRootNode.applyWeightChanges(ctx))
assert.Equal(t, uint64(300), s.emptyNodeByRoot[indexToHash(1)].node.weight)
assert.Equal(t, uint64(200), s.emptyNodeByRoot[indexToHash(2)].node.weight)
assert.Equal(t, uint64(100), s.emptyNodeByRoot[indexToHash(3)].node.weight)
assert.Equal(t, uint64(300), s.nodeByRoot[indexToHash(1)].weight)
assert.Equal(t, uint64(200), s.nodeByRoot[indexToHash(2)].weight)
assert.Equal(t, uint64(100), s.nodeByRoot[indexToHash(3)].weight)
}
func TestNode_ApplyWeightChanges_NegativeChange(t *testing.T) {
@@ -53,19 +53,19 @@ func TestNode_ApplyWeightChanges_NegativeChange(t *testing.T) {
// The updated balances of each node is 100
s := f.store
s.emptyNodeByRoot[indexToHash(1)].weight = 400
s.emptyNodeByRoot[indexToHash(2)].weight = 400
s.emptyNodeByRoot[indexToHash(3)].weight = 400
s.nodeByRoot[indexToHash(1)].weight = 400
s.nodeByRoot[indexToHash(2)].weight = 400
s.nodeByRoot[indexToHash(3)].weight = 400
s.emptyNodeByRoot[indexToHash(1)].balance = 100
s.emptyNodeByRoot[indexToHash(2)].balance = 100
s.emptyNodeByRoot[indexToHash(3)].balance = 100
s.nodeByRoot[indexToHash(1)].balance = 100
s.nodeByRoot[indexToHash(2)].balance = 100
s.nodeByRoot[indexToHash(3)].balance = 100
assert.NoError(t, s.applyWeightChangesConsensusNode(ctx, s.treeRootNode))
assert.NoError(t, s.treeRootNode.applyWeightChanges(ctx))
assert.Equal(t, uint64(300), s.emptyNodeByRoot[indexToHash(1)].node.weight)
assert.Equal(t, uint64(200), s.emptyNodeByRoot[indexToHash(2)].node.weight)
assert.Equal(t, uint64(100), s.emptyNodeByRoot[indexToHash(3)].node.weight)
assert.Equal(t, uint64(300), s.nodeByRoot[indexToHash(1)].weight)
assert.Equal(t, uint64(200), s.nodeByRoot[indexToHash(2)].weight)
assert.Equal(t, uint64(100), s.nodeByRoot[indexToHash(3)].weight)
}
func TestNode_UpdateBestDescendant_NonViableChild(t *testing.T) {
@@ -78,7 +78,7 @@ func TestNode_UpdateBestDescendant_NonViableChild(t *testing.T) {
// Verify parent's best child and best descendant are `none`.
s := f.store
assert.Equal(t, 1, len(s.allConsensusChildren(s.treeRootNode)))
assert.Equal(t, 1, len(s.treeRootNode.children))
nilBestDescendant := s.treeRootNode.bestDescendant == nil
assert.Equal(t, true, nilBestDescendant)
}
@@ -92,9 +92,8 @@ func TestNode_UpdateBestDescendant_ViableChild(t *testing.T) {
require.NoError(t, f.InsertNode(ctx, state, blk))
s := f.store
children := s.allConsensusChildren(s.treeRootNode)
assert.Equal(t, 1, len(children))
assert.Equal(t, children[0], s.treeRootNode.bestDescendant)
assert.Equal(t, 1, len(s.treeRootNode.children))
assert.Equal(t, s.treeRootNode.children[0], s.treeRootNode.bestDescendant)
}
func TestNode_UpdateBestDescendant_HigherWeightChild(t *testing.T) {
@@ -109,34 +108,32 @@ func TestNode_UpdateBestDescendant_HigherWeightChild(t *testing.T) {
require.NoError(t, f.InsertNode(ctx, state, blk))
s := f.store
s.emptyNodeByRoot[indexToHash(1)].weight = 100
s.emptyNodeByRoot[indexToHash(2)].weight = 200
assert.NoError(t, s.updateBestDescendantConsensusNode(ctx, s.treeRootNode, 1, 1, 1))
s.nodeByRoot[indexToHash(1)].weight = 100
s.nodeByRoot[indexToHash(2)].weight = 200
assert.NoError(t, s.treeRootNode.updateBestDescendant(ctx, 1, 1, 1))
children := s.allConsensusChildren(s.treeRootNode)
assert.Equal(t, 2, len(children))
assert.Equal(t, children[1], s.treeRootNode.bestDescendant)
assert.Equal(t, 2, len(s.treeRootNode.children))
assert.Equal(t, s.treeRootNode.children[1], s.treeRootNode.bestDescendant)
}
func TestNode_UpdateBestDescendant_LowerWeightChild(t *testing.T) {
f := setup(1, 1)
ctx := t.Context()
// Input child is the best descendant
state, blk, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, indexToHash(101), 1, 1)
state, blk, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blk))
state, blk, err = prepareForkchoiceState(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, indexToHash(102), 1, 1)
state, blk, err = prepareForkchoiceState(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blk))
s := f.store
s.emptyNodeByRoot[indexToHash(1)].node.weight = 200
s.emptyNodeByRoot[indexToHash(2)].node.weight = 100
assert.NoError(t, s.updateBestDescendantConsensusNode(ctx, s.treeRootNode, 1, 1, 1))
s.nodeByRoot[indexToHash(1)].weight = 200
s.nodeByRoot[indexToHash(2)].weight = 100
assert.NoError(t, s.treeRootNode.updateBestDescendant(ctx, 1, 1, 1))
children := s.allConsensusChildren(s.treeRootNode)
assert.Equal(t, 2, len(children))
assert.Equal(t, children[0], s.treeRootNode.bestDescendant)
assert.Equal(t, 2, len(s.treeRootNode.children))
assert.Equal(t, s.treeRootNode.children[0], s.treeRootNode.bestDescendant)
}
func TestNode_ViableForHead(t *testing.T) {
@@ -179,44 +176,44 @@ func TestNode_LeadsToViableHead(t *testing.T) {
require.NoError(t, f.InsertNode(ctx, state, blk))
require.Equal(t, true, f.store.treeRootNode.leadsToViableHead(4, 5))
require.Equal(t, true, f.store.emptyNodeByRoot[indexToHash(5)].node.leadsToViableHead(4, 5))
require.Equal(t, false, f.store.emptyNodeByRoot[indexToHash(2)].node.leadsToViableHead(4, 5))
require.Equal(t, false, f.store.emptyNodeByRoot[indexToHash(4)].node.leadsToViableHead(4, 5))
require.Equal(t, true, f.store.nodeByRoot[indexToHash(5)].leadsToViableHead(4, 5))
require.Equal(t, false, f.store.nodeByRoot[indexToHash(2)].leadsToViableHead(4, 5))
require.Equal(t, false, f.store.nodeByRoot[indexToHash(4)].leadsToViableHead(4, 5))
}
func TestNode_SetFullyValidated(t *testing.T) {
f := setup(1, 1)
ctx := t.Context()
storeNodes := make([]*PayloadNode, 6)
storeNodes[0] = f.store.fullNodeByRoot[params.BeaconConfig().ZeroHash]
storeNodes := make([]*Node, 6)
storeNodes[0] = f.store.treeRootNode
// insert blocks in the fork pattern (optimistic status in parenthesis)
//
// 0 (false) -- 1 (false) -- 2 (false) -- 3 (true) -- 4 (true)
// \
// -- 5 (true)
//
state, blk, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, indexToHash(101), 1, 1)
state, blk, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blk))
storeNodes[1] = f.store.fullNodeByRoot[blk.Root()]
storeNodes[1] = f.store.nodeByRoot[blk.Root()]
require.NoError(t, f.SetOptimisticToValid(ctx, params.BeaconConfig().ZeroHash))
state, blk, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blk))
storeNodes[2] = f.store.nodeByRoot[blk.Root()]
require.NoError(t, f.SetOptimisticToValid(ctx, indexToHash(1)))
state, blk, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), indexToHash(102), 1, 1)
state, blk, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blk))
storeNodes[2] = f.store.fullNodeByRoot[blk.Root()]
require.NoError(t, f.SetOptimisticToValid(ctx, indexToHash(2)))
state, blk, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(2), indexToHash(103), 1, 1)
storeNodes[3] = f.store.nodeByRoot[blk.Root()]
state, blk, err = prepareForkchoiceState(ctx, 4, indexToHash(4), indexToHash(3), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blk))
storeNodes[3] = f.store.fullNodeByRoot[blk.Root()]
state, blk, err = prepareForkchoiceState(ctx, 4, indexToHash(4), indexToHash(3), indexToHash(104), 1, 1)
storeNodes[4] = f.store.nodeByRoot[blk.Root()]
state, blk, err = prepareForkchoiceState(ctx, 5, indexToHash(5), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blk))
storeNodes[4] = f.store.fullNodeByRoot[blk.Root()]
state, blk, err = prepareForkchoiceState(ctx, 5, indexToHash(5), indexToHash(1), indexToHash(105), 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blk))
storeNodes[5] = f.store.fullNodeByRoot[blk.Root()]
storeNodes[5] = f.store.nodeByRoot[blk.Root()]
opt, err := f.IsOptimistic(indexToHash(5))
require.NoError(t, err)
@@ -226,7 +223,7 @@ func TestNode_SetFullyValidated(t *testing.T) {
require.NoError(t, err)
require.Equal(t, true, opt)
require.NoError(t, f.store.setNodeAndParentValidated(ctx, f.store.fullNodeByRoot[indexToHash(4)]))
require.NoError(t, f.store.nodeByRoot[indexToHash(4)].setNodeAndParentValidated(ctx))
// block 5 should still be optimistic
opt, err = f.IsOptimistic(indexToHash(5))
@@ -243,20 +240,20 @@ func TestNode_SetFullyValidated(t *testing.T) {
require.Equal(t, false, opt)
respNodes := make([]*forkchoice.Node, 0)
respNodes, err = f.store.nodeTreeDump(ctx, f.store.treeRootNode, respNodes)
respNodes, err = f.store.treeRootNode.nodeTreeDump(ctx, respNodes)
require.NoError(t, err)
require.Equal(t, len(respNodes), f.NodeCount())
for i, respNode := range respNodes {
require.Equal(t, storeNodes[i].node.slot, respNode.Slot)
require.DeepEqual(t, storeNodes[i].node.root[:], respNode.BlockRoot)
require.Equal(t, storeNodes[i].node.balance, respNode.Balance)
require.Equal(t, storeNodes[i].node.weight, respNode.Weight)
require.Equal(t, storeNodes[i].slot, respNode.Slot)
require.DeepEqual(t, storeNodes[i].root[:], respNode.BlockRoot)
require.Equal(t, storeNodes[i].balance, respNode.Balance)
require.Equal(t, storeNodes[i].weight, respNode.Weight)
require.Equal(t, storeNodes[i].optimistic, respNode.ExecutionOptimistic)
require.Equal(t, storeNodes[i].node.justifiedEpoch, respNode.JustifiedEpoch)
require.Equal(t, storeNodes[i].node.unrealizedJustifiedEpoch, respNode.UnrealizedJustifiedEpoch)
require.Equal(t, storeNodes[i].node.finalizedEpoch, respNode.FinalizedEpoch)
require.Equal(t, storeNodes[i].node.unrealizedFinalizedEpoch, respNode.UnrealizedFinalizedEpoch)
require.Equal(t, storeNodes[i].justifiedEpoch, respNode.JustifiedEpoch)
require.Equal(t, storeNodes[i].unrealizedJustifiedEpoch, respNode.UnrealizedJustifiedEpoch)
require.Equal(t, storeNodes[i].finalizedEpoch, respNode.FinalizedEpoch)
require.Equal(t, storeNodes[i].unrealizedFinalizedEpoch, respNode.UnrealizedFinalizedEpoch)
require.Equal(t, storeNodes[i].timestamp, respNode.Timestamp)
}
}
@@ -275,10 +272,10 @@ func TestNode_TimeStampsChecks(t *testing.T) {
headRoot, err := f.Head(ctx)
require.NoError(t, err)
require.Equal(t, root, headRoot)
early, err := f.store.choosePayloadContent(f.store.headNode).arrivedEarly(f.store.genesisTime)
early, err := f.store.headNode.arrivedEarly(f.store.genesisTime)
require.NoError(t, err)
require.Equal(t, true, early)
late, err := f.store.choosePayloadContent(f.store.headNode).arrivedAfterOrphanCheck(f.store.genesisTime)
late, err := f.store.headNode.arrivedAfterOrphanCheck(f.store.genesisTime)
require.NoError(t, err)
require.Equal(t, false, late)
@@ -292,10 +289,10 @@ func TestNode_TimeStampsChecks(t *testing.T) {
headRoot, err = f.Head(ctx)
require.NoError(t, err)
require.Equal(t, root, headRoot)
early, err = f.store.choosePayloadContent(f.store.headNode).arrivedEarly(f.store.genesisTime)
early, err = f.store.headNode.arrivedEarly(f.store.genesisTime)
require.NoError(t, err)
require.Equal(t, false, early)
late, err = f.store.choosePayloadContent(f.store.headNode).arrivedAfterOrphanCheck(f.store.genesisTime)
late, err = f.store.headNode.arrivedAfterOrphanCheck(f.store.genesisTime)
require.NoError(t, err)
require.Equal(t, false, late)
@@ -308,10 +305,10 @@ func TestNode_TimeStampsChecks(t *testing.T) {
headRoot, err = f.Head(ctx)
require.NoError(t, err)
require.Equal(t, root, headRoot)
early, err = f.store.choosePayloadContent(f.store.headNode).arrivedEarly(f.store.genesisTime)
early, err = f.store.headNode.arrivedEarly(f.store.genesisTime)
require.NoError(t, err)
require.Equal(t, false, early)
late, err = f.store.choosePayloadContent(f.store.headNode).arrivedAfterOrphanCheck(f.store.genesisTime)
late, err = f.store.headNode.arrivedAfterOrphanCheck(f.store.genesisTime)
require.NoError(t, err)
require.Equal(t, true, late)
@@ -323,10 +320,10 @@ func TestNode_TimeStampsChecks(t *testing.T) {
headRoot, err = f.Head(ctx)
require.NoError(t, err)
require.Equal(t, root, headRoot)
early, err = f.store.choosePayloadContent(f.store.headNode).arrivedEarly(f.store.genesisTime)
early, err = f.store.headNode.arrivedEarly(f.store.genesisTime)
require.ErrorContains(t, "invalid timestamp", err)
require.Equal(t, true, early)
late, err = f.store.choosePayloadContent(f.store.headNode).arrivedAfterOrphanCheck(f.store.genesisTime)
late, err = f.store.headNode.arrivedAfterOrphanCheck(f.store.genesisTime)
require.ErrorContains(t, "invalid timestamp", err)
require.Equal(t, false, late)
}

View File

@@ -7,141 +7,93 @@ import (
"github.com/pkg/errors"
)
// setOptimisticToInvalid removes invalid nodes from forkchoice. It does NOT remove the empty node for the passed root.
func (s *Store) setOptimisticToInvalid(ctx context.Context, root, parentRoot, parentHash, lastValidHash [32]byte) ([][32]byte, error) {
func (s *Store) setOptimisticToInvalid(ctx context.Context, root, parentRoot, lastValidHash [32]byte) ([][32]byte, error) {
invalidRoots := make([][32]byte, 0)
n := s.fullNodeByRoot[root]
if n == nil {
// The offending node with its payload is not in forkchoice. Try with the parent
n = s.emptyNodeByRoot[parentRoot]
if n == nil {
return invalidRoots, errors.Wrap(ErrNilNode, "could not set node to invalid, could not find consensus parent")
node, ok := s.nodeByRoot[root]
if !ok {
node, ok = s.nodeByRoot[parentRoot]
if !ok || node == nil {
return invalidRoots, errors.Wrap(ErrNilNode, "could not set node to invalid")
}
if n.node.blockHash == lastValidHash {
// The parent node must have been full and with a valid payload
// return early if the parent is LVH
if node.payloadHash == lastValidHash {
return invalidRoots, nil
}
if n.node.blockHash == parentHash {
// The parent was full and invalid
n = s.fullNodeByRoot[parentRoot]
if n == nil {
return invalidRoots, errors.Wrap(ErrNilNode, "could not set node to invalid, could not find full parent")
}
} else {
// The parent is empty and we don't yet know if it's valid or not
for n = n.node.parent; n != nil; n = n.node.parent {
if ctx.Err() != nil {
return invalidRoots, ctx.Err()
}
if n.node.blockHash == lastValidHash {
// The node built on empty and the whole chain was valid
return invalidRoots, nil
}
if n.node.blockHash == parentHash {
// The parent was full and invalid
break
}
}
if n == nil {
return nil, errors.Wrap(ErrNilNode, "could not set node to invalid, could not find full parent in ancestry")
}
}
} else {
// check consistency with the parent information
if n.node.parent == nil {
return nil, ErrNilNode
if node == nil {
return invalidRoots, errors.Wrap(ErrNilNode, "could not set node to invalid")
}
if n.node.parent.node.root != parentRoot {
return nil, errInvalidParentRoot
if node.parent.root != parentRoot {
return invalidRoots, errInvalidParentRoot
}
}
// n points to a full node that has an invalid payload in forkchoice. We need to find the fist node in the chain that is actually invalid.
startNode := n
fp := s.fullParent(n)
for ; fp != nil && fp.node.blockHash != lastValidHash; fp = s.fullParent(fp) {
firstInvalid := node
for ; firstInvalid.parent != nil && firstInvalid.parent.payloadHash != lastValidHash; firstInvalid = firstInvalid.parent {
if ctx.Err() != nil {
return invalidRoots, ctx.Err()
}
n = fp
}
// Deal with the case that the last valid payload is in a different fork
// This means we are dealing with an EE that does not follow the spec
if fp == nil {
if firstInvalid.parent == nil {
// return early if the invalid node was not imported
if startNode.node.root != root {
if node.root == parentRoot {
return invalidRoots, nil
}
// Remove just the imported invalid root
n = startNode
firstInvalid = node
}
return s.removeNode(ctx, n)
return s.removeNode(ctx, firstInvalid)
}
// removeNode removes the node with the given root and all of its children
// from the Fork Choice Store.
func (s *Store) removeNode(ctx context.Context, pn *PayloadNode) ([][32]byte, error) {
func (s *Store) removeNode(ctx context.Context, node *Node) ([][32]byte, error) {
invalidRoots := make([][32]byte, 0)
if pn == nil {
if node == nil {
return invalidRoots, errors.Wrap(ErrNilNode, "could not remove node")
}
if !pn.optimistic || pn.node.parent == nil {
if !node.optimistic || node.parent == nil {
return invalidRoots, errInvalidOptimisticStatus
}
children := pn.node.parent.children
children := node.parent.children
if len(children) == 1 {
pn.node.parent.children = []*Node{}
node.parent.children = []*Node{}
} else {
for i, n := range children {
if n == pn.node {
if n == node {
if i != len(children)-1 {
children[i] = children[len(children)-1]
}
pn.node.parent.children = children[:len(children)-1]
node.parent.children = children[:len(children)-1]
break
}
}
}
return s.removeNodeAndChildren(ctx, pn, invalidRoots)
return s.removeNodeAndChildren(ctx, node, invalidRoots)
}
// removeNodeAndChildren removes `node` and all of its descendant from the Store
func (s *Store) removeNodeAndChildren(ctx context.Context, pn *PayloadNode, invalidRoots [][32]byte) ([][32]byte, error) {
func (s *Store) removeNodeAndChildren(ctx context.Context, node *Node, invalidRoots [][32]byte) ([][32]byte, error) {
var err error
// If we are removing an empty node, then remove the full node as well if it exists.
if !pn.full {
fn, ok := s.fullNodeByRoot[pn.node.root]
if ok {
invalidRoots, err = s.removeNodeAndChildren(ctx, fn, invalidRoots)
if err != nil {
return invalidRoots, err
}
}
}
// Now we remove the full node's children.
for _, child := range pn.children {
for _, child := range node.children {
if ctx.Err() != nil {
return invalidRoots, ctx.Err()
}
// We need to remove only the empty node here since the recursion will take care of the full one.
en := s.emptyNodeByRoot[child.root]
if invalidRoots, err = s.removeNodeAndChildren(ctx, en, invalidRoots); err != nil {
if invalidRoots, err = s.removeNodeAndChildren(ctx, child, invalidRoots); err != nil {
return invalidRoots, err
}
}
// Only append the root for the empty nodes.
if pn.full {
delete(s.fullNodeByRoot, pn.node.root)
} else {
invalidRoots = append(invalidRoots, pn.node.root)
if pn.node.root == s.proposerBoostRoot {
s.proposerBoostRoot = [32]byte{}
}
if pn.node.root == s.previousProposerBoostRoot {
s.previousProposerBoostRoot = params.BeaconConfig().ZeroHash
s.previousProposerBoostScore = 0
}
delete(s.emptyNodeByRoot, pn.node.root)
invalidRoots = append(invalidRoots, node.root)
if node.root == s.proposerBoostRoot {
s.proposerBoostRoot = [32]byte{}
}
if node.root == s.previousProposerBoostRoot {
s.previousProposerBoostRoot = params.BeaconConfig().ZeroHash
s.previousProposerBoostScore = 0
}
delete(s.nodeByRoot, node.root)
delete(s.nodeByPayload, node.payloadHash)
return invalidRoots, nil
}

View File

@@ -23,35 +23,93 @@ import (
// And every block in the Fork choice is optimistic.
func TestPruneInvalid(t *testing.T) {
tests := []struct {
name string
root [32]byte // the root of the new INVALID block
parentRoot [32]byte // the root of the parent block
parentHash [32]byte // the execution hash of the parent block
lastValidHash [32]byte // the last valid execution hash
payload [32]byte // the last valid hash
wantedNodeNumber int
wantedRoots [][32]byte
wantedErr error
}{
{ // Bogus LVH, root not in forkchoice
name: "bogus LVH not in forkchoice",
root: [32]byte{'x'}, parentRoot: [32]byte{'i'}, parentHash: [32]byte{'I'}, lastValidHash: [32]byte{'R'},
wantedNodeNumber: 13, wantedRoots: [][32]byte{},
},
{ // Bogus LVH
name: "bogus LVH",
root: [32]byte{'i'}, parentRoot: [32]byte{'h'}, parentHash: [32]byte{'H'}, lastValidHash: [32]byte{'R'},
wantedNodeNumber: 13, wantedRoots: [][32]byte{},
[32]byte{'x'},
[32]byte{'i'},
[32]byte{'R'},
13,
[][32]byte{},
nil,
},
{
name: "wanted j",
root: [32]byte{'j'}, parentRoot: [32]byte{'b'}, parentHash: [32]byte{'B'}, lastValidHash: [32]byte{'B'},
wantedNodeNumber: 13, wantedRoots: [][32]byte{},
// Bogus LVH
[32]byte{'i'},
[32]byte{'h'},
[32]byte{'R'},
12,
[][32]byte{{'i'}},
nil,
},
{
name: "wanted 5",
root: [32]byte{'c'}, parentRoot: [32]byte{'b'}, parentHash: [32]byte{'B'}, lastValidHash: [32]byte{'B'},
wantedNodeNumber: 5,
wantedRoots: [][32]byte{
[32]byte{'j'},
[32]byte{'b'},
[32]byte{'B'},
12,
[][32]byte{{'j'}},
nil,
},
{
[32]byte{'c'},
[32]byte{'b'},
[32]byte{'B'},
4,
[][32]byte{{'f'}, {'e'}, {'i'}, {'h'}, {'l'},
{'k'}, {'g'}, {'d'}, {'c'}},
nil,
},
{
[32]byte{'i'},
[32]byte{'h'},
[32]byte{'H'},
12,
[][32]byte{{'i'}},
nil,
},
{
[32]byte{'h'},
[32]byte{'g'},
[32]byte{'G'},
11,
[][32]byte{{'i'}, {'h'}},
nil,
},
{
[32]byte{'g'},
[32]byte{'d'},
[32]byte{'D'},
8,
[][32]byte{{'i'}, {'h'}, {'l'}, {'k'}, {'g'}},
nil,
},
{
[32]byte{'i'},
[32]byte{'h'},
[32]byte{'D'},
8,
[][32]byte{{'i'}, {'h'}, {'l'}, {'k'}, {'g'}},
nil,
},
{
[32]byte{'f'},
[32]byte{'e'},
[32]byte{'D'},
11,
[][32]byte{{'f'}, {'e'}},
nil,
},
{
[32]byte{'h'},
[32]byte{'g'},
[32]byte{'C'},
5,
[][32]byte{
{'f'},
{'e'},
{'i'},
@@ -61,118 +119,106 @@ func TestPruneInvalid(t *testing.T) {
{'g'},
{'d'},
},
nil,
},
{
name: "wanted i",
root: [32]byte{'i'}, parentRoot: [32]byte{'h'}, parentHash: [32]byte{'H'}, lastValidHash: [32]byte{'H'},
wantedNodeNumber: 13, wantedRoots: [][32]byte{},
[32]byte{'g'},
[32]byte{'d'},
[32]byte{'E'},
8,
[][32]byte{{'i'}, {'h'}, {'l'}, {'k'}, {'g'}},
nil,
},
{
name: "wanted i and h",
root: [32]byte{'h'}, parentRoot: [32]byte{'g'}, parentHash: [32]byte{'G'}, lastValidHash: [32]byte{'G'},
wantedNodeNumber: 12, wantedRoots: [][32]byte{{'i'}},
[32]byte{'z'},
[32]byte{'j'},
[32]byte{'B'},
12,
[][32]byte{{'j'}},
nil,
},
{
name: "wanted i--g",
root: [32]byte{'g'}, parentRoot: [32]byte{'d'}, parentHash: [32]byte{'D'}, lastValidHash: [32]byte{'D'},
wantedNodeNumber: 9, wantedRoots: [][32]byte{{'i'}, {'h'}, {'l'}, {'k'}},
[32]byte{'z'},
[32]byte{'j'},
[32]byte{'J'},
13,
[][32]byte{},
nil,
},
{
name: "wanted 9",
root: [32]byte{'i'}, parentRoot: [32]byte{'h'}, parentHash: [32]byte{'H'}, lastValidHash: [32]byte{'D'},
wantedNodeNumber: 9, wantedRoots: [][32]byte{{'i'}, {'h'}, {'l'}, {'k'}},
[32]byte{'j'},
[32]byte{'a'},
[32]byte{'B'},
0,
[][32]byte{},
errInvalidParentRoot,
},
{
name: "wanted f and e",
root: [32]byte{'f'}, parentRoot: [32]byte{'e'}, parentHash: [32]byte{'E'}, lastValidHash: [32]byte{'D'},
wantedNodeNumber: 12, wantedRoots: [][32]byte{{'f'}},
[32]byte{'z'},
[32]byte{'h'},
[32]byte{'D'},
8,
[][32]byte{{'i'}, {'h'}, {'l'}, {'k'}, {'g'}},
nil,
},
{
name: "wanted 6",
root: [32]byte{'h'}, parentRoot: [32]byte{'g'}, parentHash: [32]byte{'G'}, lastValidHash: [32]byte{'C'},
wantedNodeNumber: 6,
wantedRoots: [][32]byte{
{'f'}, {'e'}, {'i'}, {'h'}, {'l'}, {'k'}, {'g'},
},
},
{
name: "wanted 9 again",
root: [32]byte{'g'}, parentRoot: [32]byte{'d'}, parentHash: [32]byte{'D'}, lastValidHash: [32]byte{'E'},
wantedNodeNumber: 9, wantedRoots: [][32]byte{{'i'}, {'h'}, {'l'}, {'k'}},
},
{
name: "wanted 13",
root: [32]byte{'z'}, parentRoot: [32]byte{'j'}, parentHash: [32]byte{'J'}, lastValidHash: [32]byte{'B'},
wantedNodeNumber: 13, wantedRoots: [][32]byte{},
},
{
name: "wanted empty",
root: [32]byte{'z'}, parentRoot: [32]byte{'j'}, parentHash: [32]byte{'J'}, lastValidHash: [32]byte{'J'},
wantedNodeNumber: 13, wantedRoots: [][32]byte{},
},
{
name: "errInvalidParentRoot",
root: [32]byte{'j'}, parentRoot: [32]byte{'a'}, parentHash: [32]byte{'A'}, lastValidHash: [32]byte{'B'},
wantedErr: errInvalidParentRoot,
},
{
name: "root z",
root: [32]byte{'z'}, parentRoot: [32]byte{'h'}, parentHash: [32]byte{'H'}, lastValidHash: [32]byte{'D'},
wantedNodeNumber: 9, wantedRoots: [][32]byte{{'i'}, {'h'}, {'l'}, {'k'}},
[32]byte{'z'},
[32]byte{'h'},
[32]byte{'D'},
8,
[][32]byte{{'i'}, {'h'}, {'l'}, {'k'}, {'g'}},
nil,
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
ctx := t.Context()
f := setup(1, 1)
require.NoError(t, f.SetOptimisticToValid(ctx, [32]byte{}))
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, [32]byte{'J'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, [32]byte{'D'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, [32]byte{'E'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, [32]byte{'G'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, [32]byte{'F'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, [32]byte{'H'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, [32]byte{'K'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, [32]byte{'I'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, [32]byte{'L'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
ctx := t.Context()
f := setup(1, 1)
roots, err := f.store.setOptimisticToInvalid(t.Context(), tc.root, tc.parentRoot, tc.parentHash, tc.lastValidHash)
if tc.wantedErr == nil {
require.NoError(t, err)
require.Equal(t, len(tc.wantedRoots), len(roots))
require.DeepEqual(t, tc.wantedRoots, roots)
require.Equal(t, tc.wantedNodeNumber, f.NodeCount())
} else {
require.ErrorIs(t, tc.wantedErr, err)
}
})
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, [32]byte{'J'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, [32]byte{'D'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, [32]byte{'E'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, [32]byte{'G'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, [32]byte{'F'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, [32]byte{'H'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, [32]byte{'K'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, [32]byte{'I'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, [32]byte{'L'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
roots, err := f.store.setOptimisticToInvalid(t.Context(), tc.root, tc.parentRoot, tc.payload)
if tc.wantedErr == nil {
require.NoError(t, err)
require.DeepEqual(t, tc.wantedRoots, roots)
require.Equal(t, tc.wantedNodeNumber, f.NodeCount())
} else {
require.ErrorIs(t, tc.wantedErr, err)
}
}
}
@@ -194,40 +240,11 @@ func TestSetOptimisticToInvalid_ProposerBoost(t *testing.T) {
f.store.previousProposerBoostScore = 10
f.store.previousProposerBoostRoot = [32]byte{'b'}
_, err = f.SetOptimisticToInvalid(ctx, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'B'}, [32]byte{'A'})
_, err = f.SetOptimisticToInvalid(ctx, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'A'})
require.NoError(t, err)
// proposer boost is still applied to c
require.Equal(t, uint64(10), f.store.previousProposerBoostScore)
require.Equal(t, [32]byte{}, f.store.proposerBoostRoot)
require.Equal(t, [32]byte{'b'}, f.store.previousProposerBoostRoot)
}
func TestSetOptimisticToInvalid_ProposerBoost_Older(t *testing.T) {
ctx := t.Context()
f := setup(1, 1)
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, [32]byte{'D'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
f.store.proposerBoostRoot = [32]byte{'d'}
f.store.previousProposerBoostScore = 10
f.store.previousProposerBoostRoot = [32]byte{'c'}
_, err = f.SetOptimisticToInvalid(ctx, [32]byte{'d'}, [32]byte{'c'}, [32]byte{'C'}, [32]byte{'A'})
require.NoError(t, err)
// proposer boost is still applied to c
require.Equal(t, uint64(0), f.store.previousProposerBoostScore)
require.Equal(t, [32]byte{}, f.store.proposerBoostRoot)
require.Equal(t, [32]byte{}, f.store.previousProposerBoostRoot)
require.DeepEqual(t, [32]byte{}, f.store.proposerBoostRoot)
require.DeepEqual(t, params.BeaconConfig().ZeroHash, f.store.previousProposerBoostRoot)
}
// This is a regression test (10565)
@@ -255,9 +272,10 @@ func TestSetOptimisticToInvalid_CorrectChildren(t *testing.T) {
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
_, err = f.store.setOptimisticToInvalid(ctx, [32]byte{'d'}, [32]byte{'a'}, [32]byte{'A'}, [32]byte{'A'})
_, err = f.store.setOptimisticToInvalid(ctx, [32]byte{'d'}, [32]byte{'a'}, [32]byte{'A'})
require.NoError(t, err)
require.Equal(t, 2, len(f.store.fullNodeByRoot[[32]byte{'a'}].children))
require.Equal(t, 2, len(f.store.nodeByRoot[[32]byte{'a'}].children))
}
// Pow | Pos
@@ -304,13 +322,13 @@ func TestSetOptimisticToInvalid_ForkAtMerge(t *testing.T) {
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, root))
roots, err := f.SetOptimisticToInvalid(ctx, [32]byte{'x'}, [32]byte{'d'}, [32]byte{'D'}, [32]byte{})
roots, err := f.SetOptimisticToInvalid(ctx, [32]byte{'x'}, [32]byte{'d'}, [32]byte{})
require.NoError(t, err)
require.Equal(t, 3, len(roots))
require.Equal(t, 4, len(roots))
sort.Slice(roots, func(i, j int) bool {
return bytesutil.BytesToUint64BigEndian(roots[i][:]) < bytesutil.BytesToUint64BigEndian(roots[j][:])
})
require.DeepEqual(t, roots, [][32]byte{{'c'}, {'d'}, {'e'}})
require.DeepEqual(t, roots, [][32]byte{{'b'}, {'c'}, {'d'}, {'e'}})
}
// Pow | Pos
@@ -357,13 +375,13 @@ func TestSetOptimisticToInvalid_ForkAtMerge_bis(t *testing.T) {
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, root))
roots, err := f.SetOptimisticToInvalid(ctx, [32]byte{'x'}, [32]byte{'d'}, [32]byte{'D'}, [32]byte{})
roots, err := f.SetOptimisticToInvalid(ctx, [32]byte{'x'}, [32]byte{'d'}, [32]byte{})
require.NoError(t, err)
require.Equal(t, 3, len(roots))
require.Equal(t, 4, len(roots))
sort.Slice(roots, func(i, j int) bool {
return bytesutil.BytesToUint64BigEndian(roots[i][:]) < bytesutil.BytesToUint64BigEndian(roots[j][:])
})
require.DeepEqual(t, roots, [][32]byte{{'c'}, {'d'}, {'e'}})
require.DeepEqual(t, roots, [][32]byte{{'b'}, {'c'}, {'d'}, {'e'}})
}
func TestSetOptimisticToValid(t *testing.T) {

View File

@@ -11,7 +11,7 @@ func (f *ForkChoice) applyProposerBoostScore() error {
s := f.store
proposerScore := uint64(0)
if s.previousProposerBoostRoot != params.BeaconConfig().ZeroHash {
previousNode, ok := s.emptyNodeByRoot[s.previousProposerBoostRoot]
previousNode, ok := s.nodeByRoot[s.previousProposerBoostRoot]
if !ok || previousNode == nil {
log.WithError(errInvalidProposerBoostRoot).Errorf("invalid prev root %#x", s.previousProposerBoostRoot)
} else {
@@ -20,7 +20,7 @@ func (f *ForkChoice) applyProposerBoostScore() error {
}
if s.proposerBoostRoot != params.BeaconConfig().ZeroHash {
currentNode, ok := s.emptyNodeByRoot[s.proposerBoostRoot]
currentNode, ok := s.nodeByRoot[s.proposerBoostRoot]
if !ok || currentNode == nil {
log.WithError(errInvalidProposerBoostRoot).Errorf("invalid current root %#x", s.proposerBoostRoot)
} else {

View File

@@ -166,14 +166,14 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
// (1: 48) -> (2: 38) -> (3: 10)
// \--------------->(4: 18)
//
node1 := f.store.emptyNodeByRoot[indexToHash(1)]
require.Equal(t, node1.node.weight, uint64(48))
node2 := f.store.emptyNodeByRoot[indexToHash(2)]
require.Equal(t, node2.node.weight, uint64(38))
node3 := f.store.emptyNodeByRoot[indexToHash(3)]
require.Equal(t, node3.node.weight, uint64(10))
node4 := f.store.emptyNodeByRoot[indexToHash(4)]
require.Equal(t, node4.node.weight, uint64(18))
node1 := f.store.nodeByRoot[indexToHash(1)]
require.Equal(t, node1.weight, uint64(48))
node2 := f.store.nodeByRoot[indexToHash(2)]
require.Equal(t, node2.weight, uint64(38))
node3 := f.store.nodeByRoot[indexToHash(3)]
require.Equal(t, node3.weight, uint64(10))
node4 := f.store.nodeByRoot[indexToHash(4)]
require.Equal(t, node4.weight, uint64(18))
// Regression: process attestations for C, check that it
// becomes head, we need two attestations to have C.weight = 30 > 24 = D.weight

View File

@@ -34,23 +34,22 @@ const orphanLateBlockProposingEarly = 2
func (f *ForkChoice) ShouldOverrideFCU() (override bool) {
override = false
// We only need to override FCU if our current consensusHead is from the current
// We only need to override FCU if our current head is from the current
// slot. This differs from the spec implementation in that we assume
// that we will call this function in the previous slot to proposing.
consensusHead := f.store.headNode
if consensusHead == nil {
head := f.store.headNode
if head == nil {
return
}
if consensusHead.slot != slots.CurrentSlot(f.store.genesisTime) {
if head.slot != slots.CurrentSlot(f.store.genesisTime) {
return
}
// Do not reorg on epoch boundaries
if (consensusHead.slot+1)%params.BeaconConfig().SlotsPerEpoch == 0 {
if (head.slot+1)%params.BeaconConfig().SlotsPerEpoch == 0 {
return
}
head := f.store.choosePayloadContent(consensusHead)
// Only reorg blocks that arrive late
early, err := head.arrivedEarly(f.store.genesisTime)
if err != nil {
@@ -62,15 +61,15 @@ func (f *ForkChoice) ShouldOverrideFCU() (override bool) {
}
// Only reorg if we have been finalizing
finalizedEpoch := f.store.finalizedCheckpoint.Epoch
if slots.ToEpoch(consensusHead.slot+1) > finalizedEpoch+params.BeaconConfig().ReorgMaxEpochsSinceFinalization {
if slots.ToEpoch(head.slot+1) > finalizedEpoch+params.BeaconConfig().ReorgMaxEpochsSinceFinalization {
return
}
// Only orphan a single block
parent := consensusHead.parent
parent := head.parent
if parent == nil {
return
}
if consensusHead.slot > parent.node.slot+1 {
if head.slot > parent.slot+1 {
return
}
// Do not orphan a block that has higher justification than the parent
@@ -79,12 +78,12 @@ func (f *ForkChoice) ShouldOverrideFCU() (override bool) {
// }
// Only orphan a block if the head LMD vote is weak
if consensusHead.weight*100 > f.store.committeeWeight*params.BeaconConfig().ReorgHeadWeightThreshold {
if head.weight*100 > f.store.committeeWeight*params.BeaconConfig().ReorgHeadWeightThreshold {
return
}
// Return early if we are checking before 10 seconds into the slot
sss, err := slots.SinceSlotStart(consensusHead.slot, f.store.genesisTime, time.Now())
sss, err := slots.SinceSlotStart(head.slot, f.store.genesisTime, time.Now())
if err != nil {
log.WithError(err).Error("could not check current slot")
return true
@@ -93,7 +92,7 @@ func (f *ForkChoice) ShouldOverrideFCU() (override bool) {
return true
}
// Only orphan a block if the parent LMD vote is strong
if parent.node.weight*100 < f.store.committeeWeight*params.BeaconConfig().ReorgParentWeightThreshold {
if parent.weight*100 < f.store.committeeWeight*params.BeaconConfig().ReorgParentWeightThreshold {
return
}
return true
@@ -107,61 +106,60 @@ func (f *ForkChoice) ShouldOverrideFCU() (override bool) {
// This function needs to be called only when proposing a block and all
// attestation processing has already happened.
func (f *ForkChoice) GetProposerHead() [32]byte {
consensusHead := f.store.headNode
if consensusHead == nil {
head := f.store.headNode
if head == nil {
return [32]byte{}
}
// Only reorg blocks from the previous slot.
currentSlot := slots.CurrentSlot(f.store.genesisTime)
if consensusHead.slot+1 != currentSlot {
return consensusHead.root
if head.slot+1 != currentSlot {
return head.root
}
// Do not reorg on epoch boundaries
if (consensusHead.slot+1)%params.BeaconConfig().SlotsPerEpoch == 0 {
return consensusHead.root
if (head.slot+1)%params.BeaconConfig().SlotsPerEpoch == 0 {
return head.root
}
// Only reorg blocks that arrive late
head := f.store.choosePayloadContent(consensusHead)
early, err := head.arrivedEarly(f.store.genesisTime)
if err != nil {
log.WithError(err).Error("could not check if block arrived early")
return consensusHead.root
return head.root
}
if early {
return consensusHead.root
return head.root
}
// Only reorg if we have been finalizing
finalizedEpoch := f.store.finalizedCheckpoint.Epoch
if slots.ToEpoch(consensusHead.slot+1) > finalizedEpoch+params.BeaconConfig().ReorgMaxEpochsSinceFinalization {
return consensusHead.root
if slots.ToEpoch(head.slot+1) > finalizedEpoch+params.BeaconConfig().ReorgMaxEpochsSinceFinalization {
return head.root
}
// Only orphan a single block
parent := consensusHead.parent
parent := head.parent
if parent == nil {
return consensusHead.root
return head.root
}
if consensusHead.slot > parent.node.slot+1 {
return consensusHead.root
if head.slot > parent.slot+1 {
return head.root
}
// Only orphan a block if the head LMD vote is weak
if consensusHead.weight*100 > f.store.committeeWeight*params.BeaconConfig().ReorgHeadWeightThreshold {
return consensusHead.root
if head.weight*100 > f.store.committeeWeight*params.BeaconConfig().ReorgHeadWeightThreshold {
return head.root
}
// Only orphan a block if the parent LMD vote is strong
if parent.node.weight*100 < f.store.committeeWeight*params.BeaconConfig().ReorgParentWeightThreshold {
return consensusHead.root
if parent.weight*100 < f.store.committeeWeight*params.BeaconConfig().ReorgParentWeightThreshold {
return head.root
}
// Only reorg if we are proposing early
sss, err := slots.SinceSlotStart(currentSlot, f.store.genesisTime, time.Now())
if err != nil {
log.WithError(err).Error("could not check if proposing early")
return consensusHead.root
return head.root
}
if sss >= orphanLateBlockProposingEarly*time.Second {
return consensusHead.root
return head.root
}
return parent.node.root
return parent.root
}

View File

@@ -38,6 +38,7 @@ func TestForkChoice_ShouldOverrideFCU(t *testing.T) {
require.Equal(t, blk.Root(), headRoot)
t.Run("head is weak", func(t *testing.T) {
require.Equal(t, true, f.ShouldOverrideFCU())
})
t.Run("head is nil", func(t *testing.T) {
saved := f.store.headNode
@@ -59,11 +60,10 @@ func TestForkChoice_ShouldOverrideFCU(t *testing.T) {
f.store.headNode.slot = saved
})
t.Run("head is early", func(t *testing.T) {
fn := f.store.fullNodeByRoot[f.store.headNode.root]
saved := fn.timestamp
fn.timestamp = saved.Add(-2 * time.Second)
saved := f.store.headNode.timestamp
f.store.headNode.timestamp = saved.Add(-2 * time.Second)
require.Equal(t, false, f.ShouldOverrideFCU())
fn.timestamp = saved
f.store.headNode.timestamp = saved
})
t.Run("chain not finalizing", func(t *testing.T) {
saved := f.store.headNode.slot
@@ -74,10 +74,10 @@ func TestForkChoice_ShouldOverrideFCU(t *testing.T) {
driftGenesisTime(f, 2, orphanLateBlockFirstThreshold+time.Second)
})
t.Run("Not single block reorg", func(t *testing.T) {
saved := f.store.headNode.parent.node.slot
f.store.headNode.parent.node.slot = 0
saved := f.store.headNode.parent.slot
f.store.headNode.parent.slot = 0
require.Equal(t, false, f.ShouldOverrideFCU())
f.store.headNode.parent.node.slot = saved
f.store.headNode.parent.slot = saved
})
t.Run("parent is nil", func(t *testing.T) {
saved := f.store.headNode.parent
@@ -86,17 +86,17 @@ func TestForkChoice_ShouldOverrideFCU(t *testing.T) {
f.store.headNode.parent = saved
})
t.Run("parent is weak early call", func(t *testing.T) {
saved := f.store.headNode.parent.node.weight
f.store.headNode.parent.node.weight = 0
saved := f.store.headNode.parent.weight
f.store.headNode.parent.weight = 0
require.Equal(t, true, f.ShouldOverrideFCU())
f.store.headNode.parent.node.weight = saved
f.store.headNode.parent.weight = saved
})
t.Run("parent is weak late call", func(t *testing.T) {
saved := f.store.headNode.parent.node.weight
saved := f.store.headNode.parent.weight
driftGenesisTime(f, 2, 11*time.Second)
f.store.headNode.parent.node.weight = 0
f.store.headNode.parent.weight = 0
require.Equal(t, false, f.ShouldOverrideFCU())
f.store.headNode.parent.node.weight = saved
f.store.headNode.parent.weight = saved
driftGenesisTime(f, 2, orphanLateBlockFirstThreshold+time.Second)
})
t.Run("Head is strong", func(t *testing.T) {
@@ -135,8 +135,7 @@ func TestForkChoice_GetProposerHead(t *testing.T) {
require.NoError(t, err)
require.Equal(t, blk.Root(), headRoot)
orphanLateBlockFirstThreshold := params.BeaconConfig().SlotComponentDuration(params.BeaconConfig().AttestationDueBPS)
fn := f.store.fullNodeByRoot[f.store.headNode.root]
fn.timestamp = fn.timestamp.Add(-1 * (params.BeaconConfig().SlotDuration() - orphanLateBlockFirstThreshold))
f.store.headNode.timestamp.Add(-1 * (params.BeaconConfig().SlotDuration() - orphanLateBlockFirstThreshold))
t.Run("head is weak", func(t *testing.T) {
require.Equal(t, parentRoot, f.GetProposerHead())
})
@@ -160,12 +159,11 @@ func TestForkChoice_GetProposerHead(t *testing.T) {
f.store.headNode.slot = saved
})
t.Run("head is early", func(t *testing.T) {
fn := f.store.fullNodeByRoot[f.store.headNode.root]
saved := fn.timestamp
saved := f.store.headNode.timestamp
headTimeStamp := f.store.genesisTime.Add(time.Duration(uint64(f.store.headNode.slot)*params.BeaconConfig().SecondsPerSlot+1) * time.Second)
fn.timestamp = headTimeStamp
f.store.headNode.timestamp = headTimeStamp
require.Equal(t, childRoot, f.GetProposerHead())
fn.timestamp = saved
f.store.headNode.timestamp = saved
})
t.Run("chain not finalizing", func(t *testing.T) {
saved := f.store.headNode.slot
@@ -176,10 +174,10 @@ func TestForkChoice_GetProposerHead(t *testing.T) {
driftGenesisTime(f, 3, 1*time.Second)
})
t.Run("Not single block reorg", func(t *testing.T) {
saved := f.store.headNode.parent.node.slot
f.store.headNode.parent.node.slot = 0
saved := f.store.headNode.parent.slot
f.store.headNode.parent.slot = 0
require.Equal(t, childRoot, f.GetProposerHead())
f.store.headNode.parent.node.slot = saved
f.store.headNode.parent.slot = saved
})
t.Run("parent is nil", func(t *testing.T) {
saved := f.store.headNode.parent

View File

@@ -13,7 +13,6 @@ import (
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/OffchainLabs/prysm/v7/time/slots"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// head starts from justified root and then follows the best descendant links
@@ -27,16 +26,13 @@ func (s *Store) head(ctx context.Context) ([32]byte, error) {
}
// JustifiedRoot has to be known
var jn *Node
ej := s.emptyNodeByRoot[s.justifiedCheckpoint.Root]
if ej != nil {
jn = ej.node
} else {
justifiedNode, ok := s.nodeByRoot[s.justifiedCheckpoint.Root]
if !ok || justifiedNode == nil {
// If the justifiedCheckpoint is from genesis, then the root is
// zeroHash. In this case it should be the root of forkchoice
// tree.
if s.justifiedCheckpoint.Epoch == params.BeaconConfig().GenesisEpoch {
jn = s.treeRootNode
justifiedNode = s.treeRootNode
} else {
return [32]byte{}, errors.WithMessage(errUnknownJustifiedRoot, fmt.Sprintf("%#x", s.justifiedCheckpoint.Root))
}
@@ -44,9 +40,9 @@ func (s *Store) head(ctx context.Context) ([32]byte, error) {
// If the justified node doesn't have a best descendant,
// the best node is itself.
bestDescendant := jn.bestDescendant
bestDescendant := justifiedNode.bestDescendant
if bestDescendant == nil {
bestDescendant = jn
bestDescendant = justifiedNode
}
currentEpoch := slots.EpochsSinceGenesis(s.genesisTime)
if !bestDescendant.viableForHead(s.justifiedCheckpoint.Epoch, currentEpoch) {
@@ -70,42 +66,29 @@ func (s *Store) head(ctx context.Context) ([32]byte, error) {
// It then updates the new node's parent with the best child and descendant node.
func (s *Store) insert(ctx context.Context,
roblock consensus_blocks.ROBlock,
justifiedEpoch, finalizedEpoch primitives.Epoch,
) (*PayloadNode, error) {
justifiedEpoch, finalizedEpoch primitives.Epoch) (*Node, error) {
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.insert")
defer span.End()
root := roblock.Root()
block := roblock.Block()
slot := block.Slot()
parentRoot := block.ParentRoot()
var payloadHash [32]byte
if block.Version() >= version.Bellatrix {
execution, err := block.Body().Execution()
if err != nil {
return nil, err
}
copy(payloadHash[:], execution.BlockHash())
}
// Return if the block has been inserted into Store before.
if n, ok := s.emptyNodeByRoot[root]; ok {
if n, ok := s.nodeByRoot[root]; ok {
return n, nil
}
block := roblock.Block()
slot := block.Slot()
var parent *PayloadNode
blockHash := &[32]byte{}
if block.Version() >= version.Gloas {
if err := s.resolveParentPayloadStatus(block, &parent, blockHash); err != nil {
return nil, err
}
} else {
if block.Version() >= version.Bellatrix {
execution, err := block.Body().Execution()
if err != nil {
return nil, err
}
copy(blockHash[:], execution.BlockHash())
}
parentRoot := block.ParentRoot()
en := s.emptyNodeByRoot[parentRoot]
parent = s.fullNodeByRoot[parentRoot]
if parent == nil && en != nil {
// pre-Gloas only full parents are allowed.
return nil, errInvalidParentRoot
}
}
parent := s.nodeByRoot[parentRoot]
n := &Node{
slot: slot,
root: root,
@@ -114,51 +97,32 @@ func (s *Store) insert(ctx context.Context,
unrealizedJustifiedEpoch: justifiedEpoch,
finalizedEpoch: finalizedEpoch,
unrealizedFinalizedEpoch: finalizedEpoch,
blockHash: *blockHash,
optimistic: true,
payloadHash: payloadHash,
timestamp: time.Now(),
}
// Set the node's target checkpoint
if slot%params.BeaconConfig().SlotsPerEpoch == 0 {
n.target = n
} else if parent != nil {
if slots.ToEpoch(slot) == slots.ToEpoch(parent.node.slot) {
n.target = parent.node.target
if slots.ToEpoch(slot) == slots.ToEpoch(parent.slot) {
n.target = parent.target
} else {
n.target = parent.node
n.target = parent
}
}
var ret *PayloadNode
optimistic := true
if parent != nil {
optimistic = n.parent.optimistic
}
// Make the empty node.It's optimistic status equals it's parent's status.
pn := &PayloadNode{
node: n,
optimistic: optimistic,
timestamp: time.Now(),
}
s.emptyNodeByRoot[root] = pn
ret = pn
if block.Version() < version.Gloas {
// Make also the full node, this is optimistic until the engine returns the execution payload validation.
fn := &PayloadNode{
node: n,
optimistic: true,
timestamp: time.Now(),
full: true,
}
ret = fn
s.fullNodeByRoot[root] = fn
}
s.nodeByPayload[payloadHash] = n
s.nodeByRoot[root] = n
if parent == nil {
if s.treeRootNode == nil {
s.treeRootNode = n
s.headNode = n
s.highestReceivedNode = n
} else {
delete(s.emptyNodeByRoot, root)
delete(s.fullNodeByRoot, root)
delete(s.nodeByRoot, root)
delete(s.nodeByPayload, payloadHash)
return nil, errInvalidParentRoot
}
} else {
@@ -166,7 +130,7 @@ func (s *Store) insert(ctx context.Context,
// Apply proposer boost
now := time.Now()
if now.Before(s.genesisTime) {
return ret, nil
return n, nil
}
currentSlot := slots.CurrentSlot(s.genesisTime)
sss, err := slots.SinceSlotStart(currentSlot, s.genesisTime, now)
@@ -182,16 +146,17 @@ func (s *Store) insert(ctx context.Context,
// Update best descendants
jEpoch := s.justifiedCheckpoint.Epoch
fEpoch := s.finalizedCheckpoint.Epoch
if err := s.updateBestDescendantConsensusNode(ctx, s.treeRootNode, jEpoch, fEpoch, slots.ToEpoch(currentSlot)); err != nil {
log.WithError(err).WithFields(logrus.Fields{
"slot": slot,
"root": root,
}).Error("Could not update best descendant")
if err := s.treeRootNode.updateBestDescendant(ctx, jEpoch, fEpoch, slots.ToEpoch(currentSlot)); err != nil {
_, remErr := s.removeNode(ctx, n)
if remErr != nil {
log.WithError(remErr).Error("could not remove node")
}
return nil, errors.Wrap(err, "could not update best descendants")
}
}
// Update metrics.
processedBlockCount.Inc()
nodeCount.Set(float64(len(s.emptyNodeByRoot)))
nodeCount.Set(float64(len(s.nodeByRoot)))
// Only update received block slot if it's within epoch from current time.
if slot+params.BeaconConfig().SlotsPerEpoch > slots.CurrentSlot(s.genesisTime) {
@@ -202,10 +167,10 @@ func (s *Store) insert(ctx context.Context,
s.highestReceivedNode = n
}
return ret, nil
return n, nil
}
// pruneFinalizedNodeByRootMap prunes the `nodeByRoot` maps
// pruneFinalizedNodeByRootMap prunes the `nodeByRoot` map
// starting from `node` down to the finalized Node or to a leaf of the Fork
// choice store.
func (s *Store) pruneFinalizedNodeByRootMap(ctx context.Context, node, finalizedNode *Node) error {
@@ -218,51 +183,45 @@ func (s *Store) pruneFinalizedNodeByRootMap(ctx context.Context, node, finalized
}
return nil
}
for _, child := range s.allConsensusChildren(node) {
for _, child := range node.children {
if err := s.pruneFinalizedNodeByRootMap(ctx, child, finalizedNode); err != nil {
return err
}
}
en := s.emptyNodeByRoot[node.root]
en.children = nil
delete(s.emptyNodeByRoot, node.root)
fn := s.fullNodeByRoot[node.root]
if fn != nil {
fn.children = nil
delete(s.fullNodeByRoot, node.root)
}
node.children = nil
delete(s.nodeByRoot, node.root)
delete(s.nodeByPayload, node.payloadHash)
return nil
}
// prune prunes the fork choice store. It removes all nodes that compete with the finalized root.
// This function does not prune for invalid optimistically synced nodes, it deals only with pruning upon finalization
// TODO: GLOAS, to ensure that chains up to a full node are found, we may want to consider pruning only up to the latest full block that was finalized
func (s *Store) prune(ctx context.Context) error {
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.Prune")
defer span.End()
finalizedRoot := s.finalizedCheckpoint.Root
finalizedEpoch := s.finalizedCheckpoint.Epoch
fen, ok := s.emptyNodeByRoot[finalizedRoot]
if !ok || fen == nil {
finalizedNode, ok := s.nodeByRoot[finalizedRoot]
if !ok || finalizedNode == nil {
return errors.WithMessage(errUnknownFinalizedRoot, fmt.Sprintf("%#x", finalizedRoot))
}
fn := fen.node
// return early if we haven't changed the finalized checkpoint
if fn.parent == nil {
if finalizedNode.parent == nil {
return nil
}
// Save the new finalized dependent root because it will be pruned
s.finalizedDependentRoot = fn.parent.node.root
s.finalizedDependentRoot = finalizedNode.parent.root
// Prune nodeByRoot starting from root
if err := s.pruneFinalizedNodeByRootMap(ctx, s.treeRootNode, fn); err != nil {
if err := s.pruneFinalizedNodeByRootMap(ctx, s.treeRootNode, finalizedNode); err != nil {
return err
}
fn.parent = nil
s.treeRootNode = fn
finalizedNode.parent = nil
s.treeRootNode = finalizedNode
prunedCount.Inc()
// Prune all children of the finalized checkpoint block that are incompatible with it
@@ -270,13 +229,13 @@ func (s *Store) prune(ctx context.Context) error {
if err != nil {
return errors.Wrap(err, "could not compute epoch start")
}
if fn.slot == checkpointMaxSlot {
if finalizedNode.slot == checkpointMaxSlot {
return nil
}
for _, child := range fen.children {
for _, child := range finalizedNode.children {
if child != nil && child.slot <= checkpointMaxSlot {
if err := s.pruneFinalizedNodeByRootMap(ctx, child, fn); err != nil {
if err := s.pruneFinalizedNodeByRootMap(ctx, child, finalizedNode); err != nil {
return errors.Wrap(err, "could not prune incompatible finalized child")
}
}
@@ -290,10 +249,10 @@ func (s *Store) tips() ([][32]byte, []primitives.Slot) {
var roots [][32]byte
var slots []primitives.Slot
for root, n := range s.emptyNodeByRoot {
if len(s.allConsensusChildren(n.node)) == 0 {
for root, node := range s.nodeByRoot {
if len(node.children) == 0 {
roots = append(roots, root)
slots = append(slots, n.node.slot)
slots = append(slots, node.slot)
}
}
return roots, slots
@@ -314,6 +273,21 @@ func (f *ForkChoice) HighestReceivedBlockSlot() primitives.Slot {
return f.store.highestReceivedNode.slot
}
// HighestReceivedBlockDelay returns the number of slots that the highest
// received block was late when receiving it. For example, a block was late by 12 slots,
// then this method is expected to return 12.
func (f *ForkChoice) HighestReceivedBlockDelay() primitives.Slot {
n := f.store.highestReceivedNode
if n == nil {
return 0
}
sss, err := slots.SinceSlotStart(n.slot, f.store.genesisTime, n.timestamp)
if err != nil {
return 0
}
return primitives.Slot(uint64(sss/time.Second) / params.BeaconConfig().SecondsPerSlot)
}
// ReceivedBlocksLastEpoch returns the number of blocks received in the last epoch
func (f *ForkChoice) ReceivedBlocksLastEpoch() (uint64, error) {
count := uint64(0)

View File

@@ -1,6 +1,7 @@
package doublylinkedtree
import (
"context"
"testing"
"time"
@@ -40,18 +41,18 @@ func TestStore_NodeByRoot(t *testing.T) {
state, blkRoot, err = prepareForkchoiceState(t.Context(), 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
node0 := f.store.emptyNodeByRoot[params.BeaconConfig().ZeroHash]
node1 := f.store.emptyNodeByRoot[indexToHash(1)]
node2 := f.store.emptyNodeByRoot[indexToHash(2)]
node0 := f.store.treeRootNode
node1 := node0.children[0]
node2 := node1.children[0]
expectedRoots := map[[32]byte]*PayloadNode{
expectedRoots := map[[32]byte]*Node{
params.BeaconConfig().ZeroHash: node0,
indexToHash(1): node1,
indexToHash(2): node2,
}
require.Equal(t, 3, f.NodeCount())
for root, node := range f.store.emptyNodeByRoot {
for root, node := range f.store.nodeByRoot {
v, ok := expectedRoots[root]
require.Equal(t, ok, true)
require.Equal(t, v, node)
@@ -110,28 +111,38 @@ func TestStore_Head_BestDescendant(t *testing.T) {
require.Equal(t, h, indexToHash(4))
}
func TestStore_UpdateBestDescendant_ContextCancelled(t *testing.T) {
ctx, cancel := context.WithCancel(t.Context())
f := setup(0, 0)
state, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
cancel()
state, blkRoot, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0)
require.NoError(t, err)
err = f.InsertNode(ctx, state, blkRoot)
require.ErrorContains(t, "context canceled", err)
}
func TestStore_Insert(t *testing.T) {
// The new node does not have a parent.
treeRootNode := &Node{slot: 0, root: indexToHash(0)}
emptyRootPN := &PayloadNode{node: treeRootNode}
fullRootPN := &PayloadNode{node: treeRootNode, full: true, optimistic: true}
emptyNodeByRoot := map[[32]byte]*PayloadNode{indexToHash(0): emptyRootPN}
fullNodeByRoot := map[[32]byte]*PayloadNode{indexToHash(0): fullRootPN}
nodeByRoot := map[[32]byte]*Node{indexToHash(0): treeRootNode}
nodeByPayload := map[[32]byte]*Node{indexToHash(0): treeRootNode}
jc := &forkchoicetypes.Checkpoint{Epoch: 0}
fc := &forkchoicetypes.Checkpoint{Epoch: 0}
s := &Store{emptyNodeByRoot: emptyNodeByRoot, fullNodeByRoot: fullNodeByRoot, treeRootNode: treeRootNode, justifiedCheckpoint: jc, finalizedCheckpoint: fc, highestReceivedNode: &Node{}}
s := &Store{nodeByRoot: nodeByRoot, treeRootNode: treeRootNode, nodeByPayload: nodeByPayload, justifiedCheckpoint: jc, finalizedCheckpoint: fc, highestReceivedNode: &Node{}}
payloadHash := [32]byte{'a'}
ctx := t.Context()
_, blk, err := prepareForkchoiceState(ctx, 100, indexToHash(100), indexToHash(0), payloadHash, 1, 1)
require.NoError(t, err)
_, err = s.insert(ctx, blk, 1, 1)
require.NoError(t, err)
assert.Equal(t, 2, len(s.emptyNodeByRoot), "Did not insert block")
assert.Equal(t, (*PayloadNode)(nil), treeRootNode.parent, "Incorrect parent")
children := s.allConsensusChildren(treeRootNode)
assert.Equal(t, 1, len(children), "Incorrect children number")
assert.Equal(t, payloadHash, children[0].blockHash, "Incorrect payload hash")
child := children[0]
assert.Equal(t, 2, len(s.nodeByRoot), "Did not insert block")
assert.Equal(t, (*Node)(nil), treeRootNode.parent, "Incorrect parent")
assert.Equal(t, 1, len(treeRootNode.children), "Incorrect children number")
assert.Equal(t, payloadHash, treeRootNode.children[0].payloadHash, "Incorrect payload hash")
child := treeRootNode.children[0]
assert.Equal(t, primitives.Epoch(1), child.justifiedEpoch, "Incorrect justification")
assert.Equal(t, primitives.Epoch(1), child.finalizedEpoch, "Incorrect finalization")
assert.Equal(t, indexToHash(100), child.root, "Incorrect root")
@@ -156,7 +167,7 @@ func TestStore_Prune_MoreThanThreshold(t *testing.T) {
// Finalized root is at index 99 so everything before 99 should be pruned.
s.finalizedCheckpoint.Root = indexToHash(99)
require.NoError(t, s.prune(t.Context()))
assert.Equal(t, 1, len(s.emptyNodeByRoot), "Incorrect nodes count")
assert.Equal(t, 1, len(s.nodeByRoot), "Incorrect nodes count")
}
func TestStore_Prune_MoreThanOnce(t *testing.T) {
@@ -178,12 +189,12 @@ func TestStore_Prune_MoreThanOnce(t *testing.T) {
// Finalized root is at index 11 so everything before 11 should be pruned.
s.finalizedCheckpoint.Root = indexToHash(10)
require.NoError(t, s.prune(t.Context()))
assert.Equal(t, 90, len(s.emptyNodeByRoot), "Incorrect nodes count")
assert.Equal(t, 90, len(s.nodeByRoot), "Incorrect nodes count")
// One more time.
s.finalizedCheckpoint.Root = indexToHash(20)
require.NoError(t, s.prune(t.Context()))
assert.Equal(t, 80, len(s.emptyNodeByRoot), "Incorrect nodes count")
assert.Equal(t, 80, len(s.nodeByRoot), "Incorrect nodes count")
}
func TestStore_Prune_ReturnEarly(t *testing.T) {
@@ -226,7 +237,8 @@ func TestStore_Prune_NoDanglingBranch(t *testing.T) {
s := f.store
s.finalizedCheckpoint.Root = indexToHash(1)
require.NoError(t, s.prune(t.Context()))
require.Equal(t, len(s.emptyNodeByRoot), 1)
require.Equal(t, len(s.nodeByRoot), 1)
require.Equal(t, len(s.nodeByPayload), 1)
}
// This test starts with the following branching diagram
@@ -306,7 +318,9 @@ func TestStore_PruneMapsNodes(t *testing.T) {
s := f.store
s.finalizedCheckpoint.Root = indexToHash(1)
require.NoError(t, s.prune(t.Context()))
require.Equal(t, len(s.emptyNodeByRoot), 1)
require.Equal(t, len(s.nodeByRoot), 1)
require.Equal(t, len(s.nodeByPayload), 1)
}
func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
@@ -325,6 +339,7 @@ func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
require.NoError(t, err)
require.Equal(t, uint64(1), count)
require.Equal(t, primitives.Slot(1), f.HighestReceivedBlockSlot())
require.Equal(t, primitives.Slot(0), f.HighestReceivedBlockDelay())
// 64
// Received block last epoch is 1
@@ -337,6 +352,7 @@ func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
require.NoError(t, err)
require.Equal(t, uint64(1), count)
require.Equal(t, primitives.Slot(64), f.HighestReceivedBlockSlot())
require.Equal(t, primitives.Slot(0), f.HighestReceivedBlockDelay())
// 64 65
// Received block last epoch is 2
@@ -349,6 +365,7 @@ func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
require.NoError(t, err)
require.Equal(t, uint64(2), count)
require.Equal(t, primitives.Slot(65), f.HighestReceivedBlockSlot())
require.Equal(t, primitives.Slot(1), f.HighestReceivedBlockDelay())
// 64 65 66
// Received block last epoch is 3
@@ -700,3 +717,17 @@ func TestStore_CleanupInserting(t *testing.T) {
require.NotNil(t, f.InsertNode(ctx, st, blk))
require.Equal(t, false, f.HasNode(blk.Root()))
}
func TestStore_HighestReceivedBlockDelay(t *testing.T) {
f := ForkChoice{
store: &Store{
genesisTime: time.Unix(0, 0),
highestReceivedNode: &Node{
slot: 10,
timestamp: time.Unix(int64(((10 + 12) * params.BeaconConfig().SecondsPerSlot)), 0), // 12 slots late
},
},
}
require.Equal(t, primitives.Slot(12), f.HighestReceivedBlockDelay())
}

View File

@@ -21,26 +21,24 @@ type ForkChoice struct {
balancesByRoot forkchoice.BalancesByRooter // handler to obtain balances for the state with a given root
}
var _ forkchoice.ForkChoicer = (*ForkChoice)(nil)
// Store defines the fork choice store which includes block nodes and the last view of checkpoint information.
type Store struct {
justifiedCheckpoint *forkchoicetypes.Checkpoint // latest justified epoch in store.
unrealizedJustifiedCheckpoint *forkchoicetypes.Checkpoint // best unrealized justified checkpoint in store.
unrealizedFinalizedCheckpoint *forkchoicetypes.Checkpoint // best unrealized finalized checkpoint in store.
prevJustifiedCheckpoint *forkchoicetypes.Checkpoint // previous justified checkpoint in store.
finalizedCheckpoint *forkchoicetypes.Checkpoint // latest finalized epoch in store.
proposerBoostRoot [fieldparams.RootLength]byte // latest block root that was boosted after being received in a timely manner.
previousProposerBoostRoot [fieldparams.RootLength]byte // previous block root that was boosted after being received in a timely manner.
previousProposerBoostScore uint64 // previous proposer boosted root score.
finalizedDependentRoot [fieldparams.RootLength]byte // dependent root at finalized checkpoint.
committeeWeight uint64 // tracks the total active validator balance divided by the number of slots per Epoch.
treeRootNode *Node // the root node of the store tree.
headNode *Node // last head Node
emptyNodeByRoot map[[fieldparams.RootLength]byte]*PayloadNode // nodes indexed by roots.
fullNodeByRoot map[[fieldparams.RootLength]byte]*PayloadNode // full nodes (the payload was present) indexed by beacon block root.
slashedIndices map[primitives.ValidatorIndex]bool // the list of equivocating validator indices
originRoot [fieldparams.RootLength]byte // The genesis block root
justifiedCheckpoint *forkchoicetypes.Checkpoint // latest justified epoch in store.
unrealizedJustifiedCheckpoint *forkchoicetypes.Checkpoint // best unrealized justified checkpoint in store.
unrealizedFinalizedCheckpoint *forkchoicetypes.Checkpoint // best unrealized finalized checkpoint in store.
prevJustifiedCheckpoint *forkchoicetypes.Checkpoint // previous justified checkpoint in store.
finalizedCheckpoint *forkchoicetypes.Checkpoint // latest finalized epoch in store.
proposerBoostRoot [fieldparams.RootLength]byte // latest block root that was boosted after being received in a timely manner.
previousProposerBoostRoot [fieldparams.RootLength]byte // previous block root that was boosted after being received in a timely manner.
previousProposerBoostScore uint64 // previous proposer boosted root score.
finalizedDependentRoot [fieldparams.RootLength]byte // dependent root at finalized checkpoint.
committeeWeight uint64 // tracks the total active validator balance divided by the number of slots per Epoch.
treeRootNode *Node // the root node of the store tree.
headNode *Node // last head Node
nodeByRoot map[[fieldparams.RootLength]byte]*Node // nodes indexed by roots.
nodeByPayload map[[fieldparams.RootLength]byte]*Node // nodes indexed by payload Hash
slashedIndices map[primitives.ValidatorIndex]bool // the list of equivocating validator indices
originRoot [fieldparams.RootLength]byte // The genesis block root
genesisTime time.Time
highestReceivedNode *Node // The highest slot node.
receivedBlocksLastEpoch [fieldparams.SlotsPerEpoch]primitives.Slot // Using `highestReceivedSlot`. The slot of blocks received in the last epoch.
@@ -52,28 +50,19 @@ type Store struct {
type Node struct {
slot primitives.Slot // slot of the block converted to the node.
root [fieldparams.RootLength]byte // root of the block converted to the node.
blockHash [fieldparams.RootLength]byte // payloadHash of the block converted to the node.
parent *PayloadNode // parent index of this node.
payloadHash [fieldparams.RootLength]byte // payloadHash of the block converted to the node.
parent *Node // parent index of this node.
target *Node // target checkpoint for
bestDescendant *Node // bestDescendant node of this node.
children []*Node // the list of direct children of this Node
justifiedEpoch primitives.Epoch // justifiedEpoch of this node.
unrealizedJustifiedEpoch primitives.Epoch // the epoch that would be justified if the block would be advanced to the next epoch.
finalizedEpoch primitives.Epoch // finalizedEpoch of this node.
unrealizedFinalizedEpoch primitives.Epoch // the epoch that would be finalized if the block would be advanced to the next epoch.
balance uint64 // the balance that voted for this node directly
weight uint64 // weight of this node: the total balance including children
}
// PayloadNode defines a full Forkchoice node after the Gloas fork, with the payload status either empty of full
type PayloadNode struct {
optimistic bool // whether the block has been fully validated or not
full bool // whether this node represents a payload present or not
weight uint64 // weight of this node: the total balance including children
balance uint64 // the balance that voted for this node directly
bestDescendant *Node // bestDescendant node of this payload node.
node *Node // the consensus part of this full forkchoice node
timestamp time.Time // The timestamp when the node was inserted.
children []*Node // the list of direct children of this Node
bestDescendant *Node // bestDescendant node of this node.
optimistic bool // whether the block has been fully validated or not
timestamp time.Time // The timestamp when the node was inserted.
}
// Vote defines an individual validator's vote.

View File

@@ -15,34 +15,33 @@ import (
)
func (s *Store) setUnrealizedJustifiedEpoch(root [32]byte, epoch primitives.Epoch) error {
en, ok := s.emptyNodeByRoot[root]
if !ok || en == nil {
node, ok := s.nodeByRoot[root]
if !ok || node == nil {
return errors.Wrap(ErrNilNode, "could not set unrealized justified epoch")
}
if epoch < en.node.unrealizedJustifiedEpoch {
if epoch < node.unrealizedJustifiedEpoch {
return errInvalidUnrealizedJustifiedEpoch
}
en.node.unrealizedJustifiedEpoch = epoch
node.unrealizedJustifiedEpoch = epoch
return nil
}
func (s *Store) setUnrealizedFinalizedEpoch(root [32]byte, epoch primitives.Epoch) error {
en, ok := s.emptyNodeByRoot[root]
if !ok || en == nil {
node, ok := s.nodeByRoot[root]
if !ok || node == nil {
return errors.Wrap(ErrNilNode, "could not set unrealized finalized epoch")
}
if epoch < en.node.unrealizedFinalizedEpoch {
if epoch < node.unrealizedFinalizedEpoch {
return errInvalidUnrealizedFinalizedEpoch
}
en.node.unrealizedFinalizedEpoch = epoch
node.unrealizedFinalizedEpoch = epoch
return nil
}
// updateUnrealizedCheckpoints "realizes" the unrealized justified and finalized
// epochs stored within nodes. It should be called at the beginning of each epoch.
func (f *ForkChoice) updateUnrealizedCheckpoints(ctx context.Context) error {
for _, en := range f.store.emptyNodeByRoot {
node := en.node
for _, node := range f.store.nodeByRoot {
node.justifiedEpoch = node.unrealizedJustifiedEpoch
node.finalizedEpoch = node.unrealizedFinalizedEpoch
if node.justifiedEpoch > f.store.justifiedCheckpoint.Epoch {
@@ -63,17 +62,16 @@ func (s *Store) pullTips(state state.BeaconState, node *Node, jc, fc *ethpb.Chec
if node.parent == nil { // Nothing to do if the parent is nil.
return jc, fc
}
pn := node.parent.node
currentEpoch := slots.ToEpoch(slots.CurrentSlot(s.genesisTime))
stateSlot := state.Slot()
stateEpoch := slots.ToEpoch(stateSlot)
currJustified := pn.unrealizedJustifiedEpoch == currentEpoch
prevJustified := pn.unrealizedJustifiedEpoch+1 == currentEpoch
currJustified := node.parent.unrealizedJustifiedEpoch == currentEpoch
prevJustified := node.parent.unrealizedJustifiedEpoch+1 == currentEpoch
tooEarlyForCurr := slots.SinceEpochStarts(stateSlot)*3 < params.BeaconConfig().SlotsPerEpoch*2
// Exit early if it's justified or too early to be justified.
if currJustified || (stateEpoch == currentEpoch && prevJustified && tooEarlyForCurr) {
node.unrealizedJustifiedEpoch = pn.unrealizedJustifiedEpoch
node.unrealizedFinalizedEpoch = pn.unrealizedFinalizedEpoch
node.unrealizedJustifiedEpoch = node.parent.unrealizedJustifiedEpoch
node.unrealizedFinalizedEpoch = node.parent.unrealizedFinalizedEpoch
return jc, fc
}

View File

@@ -22,12 +22,12 @@ func TestStore_SetUnrealizedEpochs(t *testing.T) {
state, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
require.Equal(t, primitives.Epoch(1), f.store.emptyNodeByRoot[[32]byte{'b'}].node.unrealizedJustifiedEpoch)
require.Equal(t, primitives.Epoch(1), f.store.emptyNodeByRoot[[32]byte{'b'}].node.unrealizedFinalizedEpoch)
require.Equal(t, primitives.Epoch(1), f.store.nodeByRoot[[32]byte{'b'}].unrealizedJustifiedEpoch)
require.Equal(t, primitives.Epoch(1), f.store.nodeByRoot[[32]byte{'b'}].unrealizedFinalizedEpoch)
require.NoError(t, f.store.setUnrealizedJustifiedEpoch([32]byte{'b'}, 2))
require.NoError(t, f.store.setUnrealizedFinalizedEpoch([32]byte{'b'}, 2))
require.Equal(t, primitives.Epoch(2), f.store.emptyNodeByRoot[[32]byte{'b'}].node.unrealizedJustifiedEpoch)
require.Equal(t, primitives.Epoch(2), f.store.emptyNodeByRoot[[32]byte{'b'}].node.unrealizedFinalizedEpoch)
require.Equal(t, primitives.Epoch(2), f.store.nodeByRoot[[32]byte{'b'}].unrealizedJustifiedEpoch)
require.Equal(t, primitives.Epoch(2), f.store.nodeByRoot[[32]byte{'b'}].unrealizedFinalizedEpoch)
require.ErrorIs(t, errInvalidUnrealizedJustifiedEpoch, f.store.setUnrealizedJustifiedEpoch([32]byte{'b'}, 0))
require.ErrorIs(t, errInvalidUnrealizedFinalizedEpoch, f.store.setUnrealizedFinalizedEpoch([32]byte{'b'}, 0))
@@ -78,9 +78,9 @@ func TestStore_LongFork(t *testing.T) {
// Add an attestation to c, it is head
f.ProcessAttestation(ctx, []uint64{0}, [32]byte{'c'}, 1)
f.justifiedBalances = []uint64{100}
c := f.store.emptyNodeByRoot[[32]byte{'c'}]
require.Equal(t, primitives.Epoch(2), slots.ToEpoch(c.node.slot))
driftGenesisTime(f, c.node.slot, 0)
c := f.store.nodeByRoot[[32]byte{'c'}]
require.Equal(t, primitives.Epoch(2), slots.ToEpoch(c.slot))
driftGenesisTime(f, c.slot, 0)
headRoot, err := f.Head(ctx)
require.NoError(t, err)
require.Equal(t, [32]byte{'c'}, headRoot)
@@ -91,15 +91,15 @@ func TestStore_LongFork(t *testing.T) {
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
require.NoError(t, f.UpdateJustifiedCheckpoint(ctx, &forkchoicetypes.Checkpoint{Epoch: 2, Root: ha}))
d := f.store.emptyNodeByRoot[[32]byte{'d'}]
require.Equal(t, primitives.Epoch(3), slots.ToEpoch(d.node.slot))
driftGenesisTime(f, d.node.slot, 0)
require.Equal(t, true, d.node.viableForHead(f.store.justifiedCheckpoint.Epoch, slots.ToEpoch(d.node.slot)))
d := f.store.nodeByRoot[[32]byte{'d'}]
require.Equal(t, primitives.Epoch(3), slots.ToEpoch(d.slot))
driftGenesisTime(f, d.slot, 0)
require.Equal(t, true, d.viableForHead(f.store.justifiedCheckpoint.Epoch, slots.ToEpoch(d.slot)))
headRoot, err = f.Head(ctx)
require.NoError(t, err)
require.Equal(t, [32]byte{'c'}, headRoot)
require.Equal(t, uint64(0), f.store.emptyNodeByRoot[[32]byte{'d'}].weight)
require.Equal(t, uint64(100), f.store.emptyNodeByRoot[[32]byte{'c'}].weight)
require.Equal(t, uint64(0), f.store.nodeByRoot[[32]byte{'d'}].weight)
require.Equal(t, uint64(100), f.store.nodeByRoot[[32]byte{'c'}].weight)
}
// Epoch 1 Epoch 2 Epoch 3
@@ -243,8 +243,8 @@ func TestStore_ForkNextEpoch(t *testing.T) {
require.NoError(t, err)
require.Equal(t, [32]byte{'d'}, headRoot)
require.Equal(t, primitives.Epoch(2), f.JustifiedCheckpoint().Epoch)
require.Equal(t, uint64(0), f.store.emptyNodeByRoot[[32]byte{'d'}].weight)
require.Equal(t, uint64(100), f.store.emptyNodeByRoot[[32]byte{'h'}].weight)
require.Equal(t, uint64(0), f.store.nodeByRoot[[32]byte{'d'}].weight)
require.Equal(t, uint64(100), f.store.nodeByRoot[[32]byte{'h'}].weight)
// Set current epoch to 3, and H's unrealized checkpoint. Check it's head
driftGenesisTime(f, 99, 0)
require.NoError(t, f.store.setUnrealizedJustifiedEpoch([32]byte{'h'}, 2))
@@ -252,8 +252,8 @@ func TestStore_ForkNextEpoch(t *testing.T) {
require.NoError(t, err)
require.Equal(t, [32]byte{'h'}, headRoot)
require.Equal(t, primitives.Epoch(2), f.JustifiedCheckpoint().Epoch)
require.Equal(t, uint64(0), f.store.emptyNodeByRoot[[32]byte{'d'}].weight)
require.Equal(t, uint64(100), f.store.emptyNodeByRoot[[32]byte{'h'}].weight)
require.Equal(t, uint64(0), f.store.nodeByRoot[[32]byte{'d'}].weight)
require.Equal(t, uint64(100), f.store.nodeByRoot[[32]byte{'h'}].weight)
}
func TestStore_PullTips_Heuristics(t *testing.T) {
@@ -263,14 +263,14 @@ func TestStore_PullTips_Heuristics(t *testing.T) {
st, root, err := prepareForkchoiceState(ctx, 65, [32]byte{'p'}, [32]byte{}, [32]byte{}, 1, 1)
require.NoError(tt, err)
require.NoError(tt, f.InsertNode(ctx, st, root))
f.store.emptyNodeByRoot[[32]byte{'p'}].node.unrealizedJustifiedEpoch = primitives.Epoch(2)
f.store.nodeByRoot[[32]byte{'p'}].unrealizedJustifiedEpoch = primitives.Epoch(2)
driftGenesisTime(f, 66, 0)
st, root, err = prepareForkchoiceState(ctx, 66, [32]byte{'h'}, [32]byte{'p'}, [32]byte{}, 1, 1)
require.NoError(tt, err)
require.NoError(tt, f.InsertNode(ctx, st, root))
require.Equal(tt, primitives.Epoch(2), f.store.emptyNodeByRoot[[32]byte{'h'}].node.unrealizedJustifiedEpoch)
require.Equal(tt, primitives.Epoch(1), f.store.emptyNodeByRoot[[32]byte{'h'}].node.unrealizedFinalizedEpoch)
require.Equal(tt, primitives.Epoch(2), f.store.nodeByRoot[[32]byte{'h'}].unrealizedJustifiedEpoch)
require.Equal(tt, primitives.Epoch(1), f.store.nodeByRoot[[32]byte{'h'}].unrealizedFinalizedEpoch)
})
t.Run("Previous Epoch is justified and too early for current", func(tt *testing.T) {
@@ -278,21 +278,21 @@ func TestStore_PullTips_Heuristics(t *testing.T) {
st, root, err := prepareForkchoiceState(ctx, 95, [32]byte{'p'}, [32]byte{}, [32]byte{}, 1, 1)
require.NoError(tt, err)
require.NoError(tt, f.InsertNode(ctx, st, root))
f.store.emptyNodeByRoot[[32]byte{'p'}].node.unrealizedJustifiedEpoch = primitives.Epoch(2)
f.store.nodeByRoot[[32]byte{'p'}].unrealizedJustifiedEpoch = primitives.Epoch(2)
driftGenesisTime(f, 96, 0)
st, root, err = prepareForkchoiceState(ctx, 96, [32]byte{'h'}, [32]byte{'p'}, [32]byte{}, 1, 1)
require.NoError(tt, err)
require.NoError(tt, f.InsertNode(ctx, st, root))
require.Equal(tt, primitives.Epoch(2), f.store.emptyNodeByRoot[[32]byte{'h'}].node.unrealizedJustifiedEpoch)
require.Equal(tt, primitives.Epoch(1), f.store.emptyNodeByRoot[[32]byte{'h'}].node.unrealizedFinalizedEpoch)
require.Equal(tt, primitives.Epoch(2), f.store.nodeByRoot[[32]byte{'h'}].unrealizedJustifiedEpoch)
require.Equal(tt, primitives.Epoch(1), f.store.nodeByRoot[[32]byte{'h'}].unrealizedFinalizedEpoch)
})
t.Run("Previous Epoch is justified and not too early for current", func(tt *testing.T) {
f := setup(1, 1)
st, root, err := prepareForkchoiceState(ctx, 95, [32]byte{'p'}, [32]byte{}, [32]byte{}, 1, 1)
require.NoError(tt, err)
require.NoError(tt, f.InsertNode(ctx, st, root))
f.store.emptyNodeByRoot[[32]byte{'p'}].node.unrealizedJustifiedEpoch = primitives.Epoch(2)
f.store.nodeByRoot[[32]byte{'p'}].unrealizedJustifiedEpoch = primitives.Epoch(2)
driftGenesisTime(f, 127, 0)
st, root, err = prepareForkchoiceState(ctx, 127, [32]byte{'h'}, [32]byte{'p'}, [32]byte{}, 1, 1)
@@ -302,14 +302,14 @@ func TestStore_PullTips_Heuristics(t *testing.T) {
// This test checks that the heuristics in pullTips did not apply and
// the test continues to compute a bogus unrealized
// justification
require.Equal(tt, primitives.Epoch(1), f.store.emptyNodeByRoot[[32]byte{'h'}].node.unrealizedJustifiedEpoch)
require.Equal(tt, primitives.Epoch(1), f.store.nodeByRoot[[32]byte{'h'}].unrealizedJustifiedEpoch)
})
t.Run("Block from previous Epoch", func(tt *testing.T) {
f := setup(1, 1)
st, root, err := prepareForkchoiceState(ctx, 94, [32]byte{'p'}, [32]byte{}, [32]byte{}, 1, 1)
require.NoError(tt, err)
require.NoError(tt, f.InsertNode(ctx, st, root))
f.store.emptyNodeByRoot[[32]byte{'p'}].node.unrealizedJustifiedEpoch = primitives.Epoch(2)
f.store.nodeByRoot[[32]byte{'p'}].unrealizedJustifiedEpoch = primitives.Epoch(2)
driftGenesisTime(f, 96, 0)
st, root, err = prepareForkchoiceState(ctx, 95, [32]byte{'h'}, [32]byte{'p'}, [32]byte{}, 1, 1)
@@ -319,7 +319,7 @@ func TestStore_PullTips_Heuristics(t *testing.T) {
// This test checks that the heuristics in pullTips did not apply and
// the test continues to compute a bogus unrealized
// justification
require.Equal(tt, primitives.Epoch(1), f.store.emptyNodeByRoot[[32]byte{'h'}].node.unrealizedJustifiedEpoch)
require.Equal(tt, primitives.Epoch(1), f.store.nodeByRoot[[32]byte{'h'}].unrealizedJustifiedEpoch)
})
t.Run("Previous Epoch is not justified", func(tt *testing.T) {
f := setup(1, 1)
@@ -335,6 +335,6 @@ func TestStore_PullTips_Heuristics(t *testing.T) {
// This test checks that the heuristics in pullTips did not apply and
// the test continues to compute a bogus unrealized
// justification
require.Equal(tt, primitives.Epoch(2), f.store.emptyNodeByRoot[[32]byte{'h'}].node.unrealizedJustifiedEpoch)
require.Equal(tt, primitives.Epoch(2), f.store.nodeByRoot[[32]byte{'h'}].unrealizedJustifiedEpoch)
})
}

View File

@@ -284,7 +284,7 @@ func TestVotes_CanFindHead(t *testing.T) {
// 9 10
f.store.finalizedCheckpoint.Root = indexToHash(5)
require.NoError(t, f.store.prune(t.Context()))
assert.Equal(t, 5, len(f.store.emptyNodeByRoot), "Incorrect nodes length after prune")
assert.Equal(t, 5, len(f.store.nodeByRoot), "Incorrect nodes length after prune")
// we pruned artificially the justified root.
f.store.justifiedCheckpoint.Root = indexToHash(5)

View File

@@ -67,11 +67,13 @@ type FastGetter interface {
HasNode([32]byte) bool
HighestReceivedBlockSlot() primitives.Slot
HighestReceivedBlockRoot() [32]byte
HighestReceivedBlockDelay() primitives.Slot
IsCanonical(root [32]byte) bool
IsOptimistic(root [32]byte) (bool, error)
IsViableForCheckpoint(*forkchoicetypes.Checkpoint) (bool, error)
JustifiedCheckpoint() *forkchoicetypes.Checkpoint
JustifiedPayloadBlockHash() [32]byte
LastRoot(primitives.Epoch) [32]byte
NodeCount() int
PreviousJustifiedCheckpoint() *forkchoicetypes.Checkpoint
ProposerBoost() [fieldparams.RootLength]byte
@@ -89,7 +91,7 @@ type FastGetter interface {
// Setter allows to set forkchoice information
type Setter interface {
SetOptimisticToValid(context.Context, [fieldparams.RootLength]byte) error
SetOptimisticToInvalid(context.Context, [32]byte, [32]byte, [32]byte, [32]byte) ([][32]byte, error)
SetOptimisticToInvalid(context.Context, [fieldparams.RootLength]byte, [fieldparams.RootLength]byte, [fieldparams.RootLength]byte) ([][32]byte, error)
UpdateJustifiedCheckpoint(context.Context, *forkchoicetypes.Checkpoint) error
UpdateFinalizedCheckpoint(*forkchoicetypes.Checkpoint) error
SetGenesisTime(time.Time)

View File

@@ -121,6 +121,13 @@ func (ro *ROForkChoice) HighestReceivedBlockRoot() [32]byte {
return ro.getter.HighestReceivedBlockRoot()
}
// HighestReceivedBlockDelay delegates to the underlying forkchoice call, under a lock.
func (ro *ROForkChoice) HighestReceivedBlockDelay() primitives.Slot {
ro.l.RLock()
defer ro.l.RUnlock()
return ro.getter.HighestReceivedBlockDelay()
}
// ReceivedBlocksLastEpoch delegates to the underlying forkchoice call, under a lock.
func (ro *ROForkChoice) ReceivedBlocksLastEpoch() (uint64, error) {
ro.l.RLock()
@@ -156,6 +163,13 @@ func (ro *ROForkChoice) Slot(root [32]byte) (primitives.Slot, error) {
return ro.getter.Slot(root)
}
// LastRoot delegates to the underlying forkchoice call, under a lock.
func (ro *ROForkChoice) LastRoot(e primitives.Epoch) [32]byte {
ro.l.RLock()
defer ro.l.RUnlock()
return ro.getter.LastRoot(e)
}
// DependentRoot delegates to the underlying forkchoice call, under a lock.
func (ro *ROForkChoice) DependentRoot(epoch primitives.Epoch) ([32]byte, error) {
ro.l.RLock()

View File

@@ -30,6 +30,7 @@ const (
nodeCountCalled
highestReceivedBlockSlotCalled
highestReceivedBlockRootCalled
highestReceivedBlockDelayCalled
receivedBlocksLastEpochCalled
weightCalled
isOptimisticCalled
@@ -117,6 +118,11 @@ func TestROLocking(t *testing.T) {
call: highestReceivedBlockSlotCalled,
cb: func(g FastGetter) { g.HighestReceivedBlockSlot() },
},
{
name: "highestReceivedBlockDelayCalled",
call: highestReceivedBlockDelayCalled,
cb: func(g FastGetter) { g.HighestReceivedBlockDelay() },
},
{
name: "receivedBlocksLastEpochCalled",
call: receivedBlocksLastEpochCalled,
@@ -142,6 +148,11 @@ func TestROLocking(t *testing.T) {
call: slotCalled,
cb: func(g FastGetter) { _, err := g.Slot([32]byte{}); _discard(t, err) },
},
{
name: "lastRootCalled",
call: lastRootCalled,
cb: func(g FastGetter) { g.LastRoot(0) },
},
{
name: "targetRootForEpochCalled",
call: targetRootForEpochCalled,
@@ -254,6 +265,11 @@ func (ro *mockROForkchoice) HighestReceivedBlockRoot() [32]byte {
return [32]byte{}
}
func (ro *mockROForkchoice) HighestReceivedBlockDelay() primitives.Slot {
ro.calls = append(ro.calls, highestReceivedBlockDelayCalled)
return 0
}
func (ro *mockROForkchoice) ReceivedBlocksLastEpoch() (uint64, error) {
ro.calls = append(ro.calls, receivedBlocksLastEpochCalled)
return 0, nil
@@ -279,6 +295,11 @@ func (ro *mockROForkchoice) Slot(_ [32]byte) (primitives.Slot, error) {
return 0, nil
}
func (ro *mockROForkchoice) LastRoot(_ primitives.Epoch) [32]byte {
ro.calls = append(ro.calls, lastRootCalled)
return [32]byte{}
}
// DependentRoot impoements FastGetter.
func (ro *mockROForkchoice) DependentRoot(_ primitives.Epoch) ([32]byte, error) {
ro.calls = append(ro.calls, dependentRootCalled)

View File

@@ -0,0 +1,95 @@
# Graffiti Version Info Implementation
## Summary
Add automatic EL+CL version info to block graffiti following [ethereum/execution-apis#517](https://github.com/ethereum/execution-apis/pull/517). Uses the [flexible standard](https://hackmd.io/@wmoBhF17RAOH2NZ5bNXJVg/BJX2c9gja) to pack client info into leftover space after user graffiti.
More details: https://github.com/ethereum/execution-apis/blob/main/src/engine/identification.md
## Implementation
### Core Component: GraffitiInfo Struct
Thread-safe struct holding version information:
```go
const clCode = "PR"
type GraffitiInfo struct {
mu sync.RWMutex
userGraffiti string // From --graffiti flag (set once at startup)
clCommit string // From version.GetCommitPrefix() helper function
elCode string // From engine_getClientVersionV1
elCommit string // From engine_getClientVersionV1
}
```
### Flow
1. **Startup**: Parse flags, create GraffitiInfo with user graffiti and CL info.
2. **Wiring**: Pass struct to both execution service and RPC validator server
3. **Runtime**: Execution service goroutine periodically calls `engine_getClientVersionV1` and updates EL fields
4. **Block Proposal**: RPC validator server calls `GenerateGraffiti()` to get formatted graffiti
### Flexible Graffiti Format
Packs as much client info as space allows (after user graffiti):
| Available Space | Format | Example |
|----------------|--------|---------|
| ≥12 bytes | `EL(2)+commit(4)+CL(2)+commit(4)+user` | `GE168dPR63afBob` |
| 8-11 bytes | `EL(2)+commit(2)+CL(2)+commit(2)+user` | `GE16PR63my node here` |
| 4-7 bytes | `EL(2)+CL(2)+user` | `GEPRthis is my graffiti msg` |
| 2-3 bytes | `EL(2)+user` | `GEalmost full graffiti message` |
| <2 bytes | user only | `full 32 byte user graffiti here` |
```go
func (g *GraffitiInfo) GenerateGraffiti() [32]byte {
available := 32 - len(userGraffiti)
if elCode == "" {
elCommit2 = elCommit4 = ""
}
switch {
case available >= 12:
return elCode + elCommit4 + clCode + clCommit4 + userGraffiti
case available >= 8:
return elCode + elCommit2 + clCode + clCommit2 + userGraffiti
case available >= 4:
return elCode + clCode + userGraffiti
case available >= 2:
return elCode + userGraffiti
default:
return userGraffiti
}
}
```
### Update Logic
Single testable function in execution service:
```go
func (s *Service) updateGraffitiInfo() {
versions, err := s.GetClientVersion(ctx)
if err != nil {
return // Keep last good value
}
if len(versions) == 1 {
s.graffitiInfo.UpdateFromEngine(versions[0].Code, versions[0].Commit)
}
}
```
Goroutine calls this on `slot % 8 == 4` timing (4 times per epoch, avoids slot boundaries).
### Files Changes Required
**New:**
- `beacon-chain/execution/graffiti_info.go` - The struct and methods
- `beacon-chain/execution/graffiti_info_test.go` - Unit tests
- `runtime/version/version.go` - Add `GetCommitPrefix()` helper that extracts first 4 hex chars from the git commit injected via Bazel ldflags at build time
**Modified:**
- `beacon-chain/execution/service.go` - Add goroutine + updateGraffitiInfo()
- `beacon-chain/execution/engine_client.go` - Add GetClientVersion() method that does engine call
- `beacon-chain/rpc/.../validator/proposer.go` - Call GenerateGraffiti()
- `beacon-chain/node/node.go` - Wire GraffitiInfo to services
### Testing Strategy
- Unit test GraffitiInfo methods (priority logic, thread safety)
- Unit test updateGraffitiInfo() with mocked engine client

View File

@@ -785,9 +785,6 @@ func (b *BeaconNode) registerPOWChainService() error {
return err
}
// Create GraffitiInfo for client version tracking in block graffiti
graffitiInfo := execution.NewGraffitiInfo()
// skipcq: CRT-D0001
opts := append(
b.serviceFlagOpts.executionChainFlagOpts,
@@ -800,7 +797,6 @@ func (b *BeaconNode) registerPOWChainService() error {
execution.WithFinalizedStateAtStartup(b.finalizedStateAtStartUp),
execution.WithJwtId(b.cliCtx.String(flags.JwtId.Name)),
execution.WithVerifierWaiter(b.verifyInitWaiter),
execution.WithGraffitiInfo(graffitiInfo),
)
web3Service, err := execution.NewService(b.ctx, opts...)
if err != nil {
@@ -1007,7 +1003,6 @@ func (b *BeaconNode) registerRPCService(router *http.ServeMux) error {
TrackedValidatorsCache: b.trackedValidatorsCache,
PayloadIDCache: b.payloadIDCache,
LCStore: b.lcStore,
GraffitiInfo: web3Service.GraffitiInfo(),
})
return b.services.RegisterService(rpcService)

View File

@@ -25,8 +25,6 @@ var gossipTopicMappings = map[string]func() proto.Message{
LightClientOptimisticUpdateTopicFormat: func() proto.Message { return &ethpb.LightClientOptimisticUpdateAltair{} },
LightClientFinalityUpdateTopicFormat: func() proto.Message { return &ethpb.LightClientFinalityUpdateAltair{} },
DataColumnSubnetTopicFormat: func() proto.Message { return &ethpb.DataColumnSidecar{} },
PayloadAttestationMessageTopicFormat: func() proto.Message { return &ethpb.PayloadAttestationMessage{} },
ExecutionPayloadEnvelopeTopicFormat: func() proto.Message { return &ethpb.SignedExecutionPayloadEnvelope{} },
}
// GossipTopicMappings is a function to return the assigned data type
@@ -146,7 +144,4 @@ func init() {
// Specially handle Fulu objects.
GossipTypeMapping[reflect.TypeFor[*ethpb.SignedBeaconBlockFulu]()] = BlockSubnetTopicFormat
// Payload attestation messages.
GossipTypeMapping[reflect.TypeFor[*ethpb.PayloadAttestationMessage]()] = PayloadAttestationMessageTopicFormat
}

View File

@@ -138,9 +138,6 @@ func connect(a, b host.Host) error {
func (p *TestP2P) ReceiveRPC(topic string, msg proto.Message) {
h, err := libp2p.New(libp2p.ResourceManager(&network.NullResourceManager{}))
require.NoError(p.t, err)
p.t.Cleanup(func() {
require.NoError(p.t, h.Close())
})
if err := connect(h, p.BHost); err != nil {
p.t.Fatalf("Failed to connect two peers for RPC: %v", err)
}
@@ -172,9 +169,6 @@ func (p *TestP2P) ReceiveRPC(topic string, msg proto.Message) {
func (p *TestP2P) ReceivePubSub(topic string, msg proto.Message) {
h, err := libp2p.New(libp2p.ResourceManager(&network.NullResourceManager{}))
require.NoError(p.t, err)
p.t.Cleanup(func() {
require.NoError(p.t, h.Close())
})
ps, err := pubsub.NewFloodSub(context.Background(), h,
pubsub.WithMessageSigning(false),
pubsub.WithStrictSignatureVerification(false),

View File

@@ -46,10 +46,6 @@ const (
GossipLightClientOptimisticUpdateMessage = "light_client_optimistic_update"
// GossipDataColumnSidecarMessage is the name for the data column sidecar message type.
GossipDataColumnSidecarMessage = "data_column_sidecar"
// GossipPayloadAttestationMessageMessage is the name for the payload attestation message type.
GossipPayloadAttestationMessageMessage = "payload_attestation_message"
// GossipExecutionPayloadEnvelopeMessage is the name for the execution payload envelope message type.
GossipExecutionPayloadEnvelopeMessage = "execution_payload_envelope"
// Topic Formats
//
@@ -79,10 +75,6 @@ const (
LightClientOptimisticUpdateTopicFormat = GossipProtocolAndDigest + GossipLightClientOptimisticUpdateMessage
// DataColumnSubnetTopicFormat is the topic format for the data column subnet.
DataColumnSubnetTopicFormat = GossipProtocolAndDigest + GossipDataColumnSidecarMessage + "_%d"
// PayloadAttestationMessageTopicFormat is the topic format for payload attestation messages.
PayloadAttestationMessageTopicFormat = GossipProtocolAndDigest + GossipPayloadAttestationMessageMessage
// ExecutionPayloadEnvelopeTopicFormat is the topic format for execution payload envelopes.
ExecutionPayloadEnvelopeTopicFormat = GossipProtocolAndDigest + GossipExecutionPayloadEnvelopeMessage
)
// topic is a struct representing a single gossipsub topic.
@@ -149,7 +141,7 @@ func (s *Service) allTopics() []topic {
cfg := params.BeaconConfig()
// bellatrix: no special topics; electra: blobs topics handled all together
genesis, altair, capella := cfg.GenesisEpoch, cfg.AltairForkEpoch, cfg.CapellaForkEpoch
deneb, fulu, gloas, future := cfg.DenebForkEpoch, cfg.FuluForkEpoch, cfg.GloasForkEpoch, cfg.FarFutureEpoch
deneb, fulu, future := cfg.DenebForkEpoch, cfg.FuluForkEpoch, cfg.FarFutureEpoch
// Templates are starter topics - they have a placeholder digest and the subnet is set to the maximum value
// for the subnet (see how this is used in allSubnetsBelow). These are not directly returned by the method,
// they are copied and modified for each digest where they apply based on the start and end epochs.
@@ -166,8 +158,6 @@ func (s *Service) allTopics() []topic {
newTopic(altair, future, empty, GossipLightClientOptimisticUpdateMessage),
newTopic(altair, future, empty, GossipLightClientFinalityUpdateMessage),
newTopic(capella, future, empty, GossipBlsToExecutionChangeMessage),
newTopic(gloas, future, empty, GossipPayloadAttestationMessageMessage),
newTopic(gloas, future, empty, GossipExecutionPayloadEnvelopeMessage),
}
last := params.GetNetworkScheduleEntry(genesis)
schedule := []params.NetworkScheduleEntry{last}

View File

@@ -86,7 +86,6 @@ func TestGetSpec(t *testing.T) {
config.GloasForkEpoch = 110
config.BLSWithdrawalPrefixByte = byte('b')
config.ETH1AddressWithdrawalPrefixByte = byte('c')
config.BuilderWithdrawalPrefixByte = byte('e')
config.GenesisDelay = 24
config.SecondsPerSlot = 25
config.SlotDurationMilliseconds = 120

View File

@@ -89,13 +89,7 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
}
// Set slot, graffiti, randao reveal, and parent root.
sBlk.SetSlot(req.Slot)
// Generate graffiti with client version info using flexible standard
if vs.GraffitiInfo != nil {
graffiti := vs.GraffitiInfo.GenerateGraffiti(req.Graffiti)
sBlk.SetGraffiti(graffiti[:])
} else {
sBlk.SetGraffiti(req.Graffiti)
}
sBlk.SetGraffiti(req.Graffiti)
sBlk.SetRandaoReveal(req.RandaoReveal)
sBlk.SetParentRoot(parentRoot[:])

View File

@@ -83,7 +83,6 @@ type Server struct {
ClockWaiter startup.ClockWaiter
CoreService *core.Service
AttestationStateFetcher blockchain.AttestationStateFetcher
GraffitiInfo *execution.GraffitiInfo
}
// Deprecated: The gRPC API will remain the default and fully supported through v8 (expected in 2026) but will be eventually removed in favor of REST API.

View File

@@ -125,7 +125,6 @@ type Config struct {
TrackedValidatorsCache *cache.TrackedValidatorsCache
PayloadIDCache *cache.PayloadIDCache
LCStore *lightClient.Store
GraffitiInfo *execution.GraffitiInfo
}
// NewService instantiates a new RPC service instance that will
@@ -257,7 +256,6 @@ func NewService(ctx context.Context, cfg *Config) *Service {
TrackedValidatorsCache: s.cfg.TrackedValidatorsCache,
PayloadIDCache: s.cfg.PayloadIDCache,
AttestationStateFetcher: s.cfg.AttestationReceiver,
GraffitiInfo: s.cfg.GraffitiInfo,
}
s.validatorServer = validatorServer
nodeServer := &nodev1alpha1.Server{

View File

@@ -1,55 +1,24 @@
package state
import (
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
)
type writeOnlyGloasFields interface {
// Bids.
SetExecutionPayloadBid(h interfaces.ROExecutionPayloadBid) error
// Builder pending payments / withdrawals.
SetBuilderPendingPayment(index primitives.Slot, payment *ethpb.BuilderPendingPayment) error
ClearBuilderPendingPayment(index primitives.Slot) error
QueueBuilderPayment() error
RotateBuilderPendingPayments() error
AppendBuilderPendingWithdrawals([]*ethpb.BuilderPendingWithdrawal) error
// Execution payload availability.
UpdateExecutionPayloadAvailabilityAtIndex(idx uint64, val byte) error
// Misc.
SetLatestBlockHash(hash [32]byte) error
SetExecutionPayloadAvailability(index primitives.Slot, available bool) error
// Builders.
IncreaseBuilderBalance(index primitives.BuilderIndex, amount uint64) error
AddBuilderFromDeposit(pubkey [fieldparams.BLSPubkeyLength]byte, withdrawalCredentials [fieldparams.RootLength]byte, amount uint64) error
UpdatePendingPaymentWeight(att ethpb.Att, indices []uint64, participatedFlags map[uint8]bool) error
}
type readOnlyGloasFields interface {
// Bids.
LatestExecutionPayloadBid() (interfaces.ROExecutionPayloadBid, error)
// Builder pending payments / withdrawals.
BuilderPendingPayments() ([]*ethpb.BuilderPendingPayment, error)
WithdrawalsMatchPayloadExpected(withdrawals []*enginev1.Withdrawal) (bool, error)
// Misc.
LatestBlockHash() ([32]byte, error)
// Builders.
Builder(index primitives.BuilderIndex) (*ethpb.Builder, error)
BuilderPubkey(primitives.BuilderIndex) ([48]byte, error)
BuilderIndexByPubkey(pubkey [fieldparams.BLSPubkeyLength]byte) (primitives.BuilderIndex, bool)
IsActiveBuilder(primitives.BuilderIndex) (bool, error)
CanBuilderCoverBid(primitives.BuilderIndex, primitives.Gwei) (bool, error)
IsAttestationSameSlot(blockRoot [32]byte, slot primitives.Slot) (bool, error)
BuilderPendingPayment(index uint64) (*ethpb.BuilderPendingPayment, error)
ExecutionPayloadAvailability(slot primitives.Slot) (uint64, error)
LatestBlockHash() ([32]byte, error)
BuilderPendingPayments() ([]*ethpb.BuilderPendingPayment, error)
}

View File

@@ -1,19 +1,13 @@
package state_native
import (
"bytes"
"fmt"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/pkg/errors"
)
// LatestBlockHash returns the hash of the latest execution block.
@@ -32,45 +26,6 @@ func (b *BeaconState) LatestBlockHash() ([32]byte, error) {
return [32]byte(b.latestBlockHash), nil
}
// IsAttestationSameSlot checks if the attestation is for the same slot as the block root in the state.
// Spec v1.7.0-alpha pseudocode:
//
// is_attestation_same_slot(state, data):
// if data.slot == 0:
// return True
//
// blockroot = data.beacon_block_root
// slot_blockroot = get_block_root_at_slot(state, data.slot)
// prev_blockroot = get_block_root_at_slot(state, Slot(data.slot - 1))
//
// return blockroot == slot_blockroot and blockroot != prev_blockroot
func (b *BeaconState) IsAttestationSameSlot(blockRoot [32]byte, slot primitives.Slot) (bool, error) {
if b.version < version.Gloas {
return false, errNotSupported("IsAttestationSameSlot", b.version)
}
b.lock.RLock()
defer b.lock.RUnlock()
if slot == 0 {
return true, nil
}
blockRootAtSlot, err := helpers.BlockRootAtSlot(b, slot)
if err != nil {
return false, errors.Wrapf(err, "block root at slot %d", slot)
}
matchingBlockRoot := bytes.Equal(blockRoot[:], blockRootAtSlot)
blockRootAtPrevSlot, err := helpers.BlockRootAtSlot(b, slot-1)
if err != nil {
return false, errors.Wrapf(err, "block root at slot %d", slot-1)
}
matchingPrevBlockRoot := bytes.Equal(blockRoot[:], blockRootAtPrevSlot)
return matchingBlockRoot && !matchingPrevBlockRoot, nil
}
// BuilderPubkey returns the builder pubkey at the provided index.
func (b *BeaconState) BuilderPubkey(builderIndex primitives.BuilderIndex) ([fieldparams.BLSPubkeyLength]byte, error) {
if b.version < version.Gloas {
@@ -201,116 +156,3 @@ func (b *BeaconState) BuilderPendingPayments() ([]*ethpb.BuilderPendingPayment,
return b.builderPendingPaymentsVal(), nil
}
// BuilderPendingPayment returns the builder pending payment for the given index.
func (b *BeaconState) BuilderPendingPayment(index uint64) (*ethpb.BuilderPendingPayment, error) {
if b.version < version.Gloas {
return nil, errNotSupported("BuilderPendingPayment", b.version)
}
b.lock.RLock()
defer b.lock.RUnlock()
if index >= uint64(len(b.builderPendingPayments)) {
return nil, fmt.Errorf("builder pending payment index %d out of range (len=%d)", index, len(b.builderPendingPayments))
}
return ethpb.CopyBuilderPendingPayment(b.builderPendingPayments[index]), nil
}
// LatestExecutionPayloadBid returns the cached latest execution payload bid for Gloas.
func (b *BeaconState) LatestExecutionPayloadBid() (interfaces.ROExecutionPayloadBid, error) {
if b.version < version.Gloas {
return nil, errNotSupported("LatestExecutionPayloadBid", b.version)
}
b.lock.RLock()
defer b.lock.RUnlock()
if b.latestExecutionPayloadBid == nil {
return nil, nil
}
return blocks.WrappedROExecutionPayloadBid(b.latestExecutionPayloadBid.Copy())
}
// WithdrawalsMatchPayloadExpected returns true if the given withdrawals root matches the state's
// payload_expected_withdrawals root.
func (b *BeaconState) WithdrawalsMatchPayloadExpected(withdrawals []*enginev1.Withdrawal) (bool, error) {
if b.version < version.Gloas {
return false, errNotSupported("WithdrawalsMatchPayloadExpected", b.version)
}
b.lock.RLock()
defer b.lock.RUnlock()
return withdrawalsEqual(withdrawals, b.payloadExpectedWithdrawals), nil
}
func withdrawalsEqual(a, b []*enginev1.Withdrawal) bool {
if len(a) != len(b) {
return false
}
for i := range a {
wa := a[i]
wb := b[i]
if wa.Index != wb.Index ||
wa.ValidatorIndex != wb.ValidatorIndex ||
wa.Amount != wb.Amount ||
!bytes.Equal(wa.Address, wb.Address) {
return false
}
}
return true
}
// ExecutionPayloadAvailability returns the execution payload availability bit for the given slot.
func (b *BeaconState) ExecutionPayloadAvailability(slot primitives.Slot) (uint64, error) {
if b.version < version.Gloas {
return 0, errNotSupported("ExecutionPayloadAvailability", b.version)
}
b.lock.RLock()
defer b.lock.RUnlock()
slotIndex := slot % params.BeaconConfig().SlotsPerHistoricalRoot
byteIndex := slotIndex / 8
bitIndex := slotIndex % 8
bit := (b.executionPayloadAvailability[byteIndex] >> bitIndex) & 1
return uint64(bit), nil
}
// Builder returns the builder at the given index.
func (b *BeaconState) Builder(index primitives.BuilderIndex) (*ethpb.Builder, error) {
b.lock.RLock()
defer b.lock.RUnlock()
if b.builders == nil {
return nil, nil
}
if uint64(index) >= uint64(len(b.builders)) {
return nil, fmt.Errorf("builder index %d out of bounds", index)
}
if b.builders[index] == nil {
return nil, nil
}
return ethpb.CopyBuilder(b.builders[index]), nil
}
// BuilderIndexByPubkey returns the builder index for the given pubkey, if present.
func (b *BeaconState) BuilderIndexByPubkey(pubkey [fieldparams.BLSPubkeyLength]byte) (primitives.BuilderIndex, bool) {
b.lock.RLock()
defer b.lock.RUnlock()
for i, builder := range b.builders {
if builder == nil {
continue
}
if bytes.Equal(builder.Pubkey, pubkey[:]) {
return primitives.BuilderIndex(i), true
}
}
return 0, false
}

View File

@@ -5,10 +5,8 @@ import (
"testing"
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/require"
"github.com/OffchainLabs/prysm/v7/testing/util"
@@ -46,103 +44,6 @@ func TestLatestBlockHash(t *testing.T) {
})
}
func TestLatestExecutionPayloadBid(t *testing.T) {
t.Run("returns error before gloas", func(t *testing.T) {
stIface, _ := util.DeterministicGenesisState(t, 1)
native, ok := stIface.(*state_native.BeaconState)
require.Equal(t, true, ok)
_, err := native.LatestExecutionPayloadBid()
require.ErrorContains(t, "is not supported", err)
})
}
func TestIsAttestationSameSlot(t *testing.T) {
buildStateWithBlockRoots := func(t *testing.T, stateSlot primitives.Slot, roots map[primitives.Slot][]byte) *state_native.BeaconState {
t.Helper()
cfg := params.BeaconConfig()
blockRoots := make([][]byte, cfg.SlotsPerHistoricalRoot)
for slot, root := range roots {
blockRoots[slot%cfg.SlotsPerHistoricalRoot] = root
}
stIface, err := state_native.InitializeFromProtoGloas(&ethpb.BeaconStateGloas{
Slot: stateSlot,
BlockRoots: blockRoots,
})
require.NoError(t, err)
return stIface.(*state_native.BeaconState)
}
rootA := bytes.Repeat([]byte{0xAA}, 32)
rootB := bytes.Repeat([]byte{0xBB}, 32)
rootC := bytes.Repeat([]byte{0xCC}, 32)
tests := []struct {
name string
stateSlot primitives.Slot
slot primitives.Slot
blockRoot []byte
roots map[primitives.Slot][]byte
want bool
}{
{
name: "slot zero always true",
stateSlot: 1,
slot: 0,
blockRoot: rootA,
roots: map[primitives.Slot][]byte{},
want: true,
},
{
name: "matching current different previous",
stateSlot: 6,
slot: 4,
blockRoot: rootA,
roots: map[primitives.Slot][]byte{
4: rootA,
3: rootB,
},
want: true,
},
{
name: "matching current same previous",
stateSlot: 6,
slot: 4,
blockRoot: rootA,
roots: map[primitives.Slot][]byte{
4: rootA,
3: rootA,
},
want: false,
},
{
name: "non matching current",
stateSlot: 6,
slot: 4,
blockRoot: rootC,
roots: map[primitives.Slot][]byte{
4: rootA,
3: rootB,
},
want: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
st := buildStateWithBlockRoots(t, tt.stateSlot, tt.roots)
var rootArr [32]byte
copy(rootArr[:], tt.blockRoot)
got, err := st.IsAttestationSameSlot(rootArr, tt.slot)
require.NoError(t, err)
require.Equal(t, tt.want, got)
})
}
}
func TestBuilderPubkey(t *testing.T) {
t.Run("returns error before gloas", func(t *testing.T) {
stIface, _ := util.DeterministicGenesisState(t, 1)
@@ -265,208 +166,3 @@ func TestBuilderPendingPayments_UnsupportedVersion(t *testing.T) {
_, err = st.BuilderPendingPayments()
require.ErrorContains(t, "BuilderPendingPayments", err)
}
func TestWithdrawalsMatchPayloadExpected(t *testing.T) {
t.Run("returns error before gloas", func(t *testing.T) {
stIface, _ := util.DeterministicGenesisState(t, 1)
native, ok := stIface.(*state_native.BeaconState)
require.Equal(t, true, ok)
_, err := native.WithdrawalsMatchPayloadExpected(nil)
require.ErrorContains(t, "is not supported", err)
})
t.Run("returns true when roots match", func(t *testing.T) {
withdrawals := []*enginev1.Withdrawal{
{Index: 0, ValidatorIndex: 1, Address: bytes.Repeat([]byte{0x01}, 20), Amount: 10},
}
st, err := state_native.InitializeFromProtoGloas(&ethpb.BeaconStateGloas{
PayloadExpectedWithdrawals: withdrawals,
})
require.NoError(t, err)
ok, err := st.WithdrawalsMatchPayloadExpected(withdrawals)
require.NoError(t, err)
require.Equal(t, true, ok)
})
t.Run("returns false when roots do not match", func(t *testing.T) {
expected := []*enginev1.Withdrawal{
{Index: 0, ValidatorIndex: 1, Address: bytes.Repeat([]byte{0x01}, 20), Amount: 10},
}
actual := []*enginev1.Withdrawal{
{Index: 0, ValidatorIndex: 1, Address: bytes.Repeat([]byte{0x01}, 20), Amount: 11},
}
st, err := state_native.InitializeFromProtoGloas(&ethpb.BeaconStateGloas{
PayloadExpectedWithdrawals: expected,
})
require.NoError(t, err)
ok, err := st.WithdrawalsMatchPayloadExpected(actual)
require.NoError(t, err)
require.Equal(t, false, ok)
})
}
func TestBuilder(t *testing.T) {
t.Run("nil builders returns nil", func(t *testing.T) {
st, err := state_native.InitializeFromProtoGloas(&ethpb.BeaconStateGloas{
Builders: nil,
})
require.NoError(t, err)
got, err := st.Builder(0)
require.NoError(t, err)
require.Equal(t, (*ethpb.Builder)(nil), got)
})
t.Run("out of bounds returns error", func(t *testing.T) {
st, err := state_native.InitializeFromProtoGloas(&ethpb.BeaconStateGloas{
Builders: []*ethpb.Builder{{}},
})
require.NoError(t, err)
_, err = st.Builder(1)
require.ErrorContains(t, "out of bounds", err)
})
t.Run("returns copy", func(t *testing.T) {
pubkey := bytes.Repeat([]byte{0xAA}, fieldparams.BLSPubkeyLength)
st, err := state_native.InitializeFromProtoGloas(&ethpb.BeaconStateGloas{
Builders: []*ethpb.Builder{
{
Pubkey: pubkey,
Balance: 42,
DepositEpoch: 3,
WithdrawableEpoch: 4,
},
},
})
require.NoError(t, err)
got1, err := st.Builder(0)
require.NoError(t, err)
require.NotEqual(t, (*ethpb.Builder)(nil), got1)
require.Equal(t, primitives.Gwei(42), got1.Balance)
require.DeepEqual(t, pubkey, got1.Pubkey)
// Mutate returned builder; state should be unchanged.
got1.Pubkey[0] = 0xFF
got2, err := st.Builder(0)
require.NoError(t, err)
require.Equal(t, byte(0xAA), got2.Pubkey[0])
})
}
func TestBuilderIndexByPubkey(t *testing.T) {
t.Run("not found returns false", func(t *testing.T) {
st, err := state_native.InitializeFromProtoGloas(&ethpb.BeaconStateGloas{
Builders: []*ethpb.Builder{
{Pubkey: bytes.Repeat([]byte{0x11}, fieldparams.BLSPubkeyLength)},
},
})
require.NoError(t, err)
var pk [fieldparams.BLSPubkeyLength]byte
copy(pk[:], bytes.Repeat([]byte{0x22}, fieldparams.BLSPubkeyLength))
idx, ok := st.BuilderIndexByPubkey(pk)
require.Equal(t, false, ok)
require.Equal(t, primitives.BuilderIndex(0), idx)
})
t.Run("skips nil entries and finds match", func(t *testing.T) {
wantIdx := primitives.BuilderIndex(1)
wantPkBytes := bytes.Repeat([]byte{0xAB}, fieldparams.BLSPubkeyLength)
st, err := state_native.InitializeFromProtoGloas(&ethpb.BeaconStateGloas{
Builders: []*ethpb.Builder{
nil,
{Pubkey: wantPkBytes},
},
})
require.NoError(t, err)
var pk [fieldparams.BLSPubkeyLength]byte
copy(pk[:], wantPkBytes)
idx, ok := st.BuilderIndexByPubkey(pk)
require.Equal(t, true, ok)
require.Equal(t, wantIdx, idx)
})
}
func TestBuilderPendingPayment(t *testing.T) {
t.Run("returns copy", func(t *testing.T) {
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
payments := make([]*ethpb.BuilderPendingPayment, 2*slotsPerEpoch)
target := uint64(slotsPerEpoch + 1)
payments[target] = &ethpb.BuilderPendingPayment{Weight: 10}
st, err := state_native.InitializeFromProtoUnsafeGloas(&ethpb.BeaconStateGloas{
BuilderPendingPayments: payments,
})
require.NoError(t, err)
payment, err := st.BuilderPendingPayment(target)
require.NoError(t, err)
// mutate returned copy
payment.Weight = 99
original, err := st.BuilderPendingPayment(target)
require.NoError(t, err)
require.Equal(t, uint64(10), uint64(original.Weight))
})
t.Run("unsupported version", func(t *testing.T) {
stIface, err := state_native.InitializeFromProtoElectra(&ethpb.BeaconStateElectra{})
require.NoError(t, err)
st := stIface.(*state_native.BeaconState)
_, err = st.BuilderPendingPayment(0)
require.ErrorContains(t, "BuilderPendingPayment", err)
})
t.Run("out of range", func(t *testing.T) {
stIface, err := state_native.InitializeFromProtoUnsafeGloas(&ethpb.BeaconStateGloas{
BuilderPendingPayments: []*ethpb.BuilderPendingPayment{},
})
require.NoError(t, err)
_, err = stIface.BuilderPendingPayment(0)
require.ErrorContains(t, "out of range", err)
})
}
func TestExecutionPayloadAvailability(t *testing.T) {
t.Run("unsupported version", func(t *testing.T) {
stIface, err := state_native.InitializeFromProtoElectra(&ethpb.BeaconStateElectra{})
require.NoError(t, err)
st := stIface.(*state_native.BeaconState)
_, err = st.ExecutionPayloadAvailability(0)
require.ErrorContains(t, "ExecutionPayloadAvailability", err)
})
t.Run("reads expected bit", func(t *testing.T) {
// Ensure the backing slice is large enough.
availability := make([]byte, params.BeaconConfig().SlotsPerHistoricalRoot/8)
// Pick a slot and set its corresponding bit.
slot := primitives.Slot(9) // byteIndex=1, bitIndex=1
availability[1] = 0b00000010
stIface, err := state_native.InitializeFromProtoUnsafeGloas(&ethpb.BeaconStateGloas{
ExecutionPayloadAvailability: availability,
})
require.NoError(t, err)
bit, err := stIface.ExecutionPayloadAvailability(slot)
require.NoError(t, err)
require.Equal(t, uint64(1), bit)
otherBit, err := stIface.ExecutionPayloadAvailability(8)
require.NoError(t, err)
require.Equal(t, uint64(0), otherBit)
})
}

View File

@@ -5,14 +5,11 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native/types"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stateutil"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/OffchainLabs/prysm/v7/time/slots"
)
// RotateBuilderPendingPayments rotates the queue by dropping slots per epoch payments from the
@@ -85,20 +82,20 @@ func (b *BeaconState) SetExecutionPayloadBid(h interfaces.ROExecutionPayloadBid)
parentBlockRoot := h.ParentBlockRoot()
blockHash := h.BlockHash()
randao := h.PrevRandao()
blobKzgCommitments := h.BlobKzgCommitments()
blobKzgCommitmentsRoot := h.BlobKzgCommitmentsRoot()
feeRecipient := h.FeeRecipient()
b.latestExecutionPayloadBid = &ethpb.ExecutionPayloadBid{
ParentBlockHash: parentBlockHash[:],
ParentBlockRoot: parentBlockRoot[:],
BlockHash: blockHash[:],
PrevRandao: randao[:],
GasLimit: h.GasLimit(),
BuilderIndex: h.BuilderIndex(),
Slot: h.Slot(),
Value: h.Value(),
ExecutionPayment: h.ExecutionPayment(),
BlobKzgCommitments: blobKzgCommitments,
FeeRecipient: feeRecipient[:],
ParentBlockHash: parentBlockHash[:],
ParentBlockRoot: parentBlockRoot[:],
BlockHash: blockHash[:],
PrevRandao: randao[:],
GasLimit: h.GasLimit(),
BuilderIndex: h.BuilderIndex(),
Slot: h.Slot(),
Value: h.Value(),
ExecutionPayment: h.ExecutionPayment(),
BlobKzgCommitmentsRoot: blobKzgCommitmentsRoot[:],
FeeRecipient: feeRecipient[:],
}
b.markFieldAsDirty(types.LatestExecutionPayloadBid)
@@ -124,41 +121,6 @@ func (b *BeaconState) ClearBuilderPendingPayment(index primitives.Slot) error {
return nil
}
// QueueBuilderPayment implements the builder payment queuing logic for Gloas.
// Spec v1.7.0-alpha.0 (pseudocode):
// payment = state.builder_pending_payments[SLOTS_PER_EPOCH + state.slot % SLOTS_PER_EPOCH]
// amount = payment.withdrawal.amount
// if amount > 0:
//
// state.builder_pending_withdrawals.append(payment.withdrawal)
//
// state.builder_pending_payments[SLOTS_PER_EPOCH + state.slot % SLOTS_PER_EPOCH] = BuilderPendingPayment()
func (b *BeaconState) QueueBuilderPayment() error {
if b.version < version.Gloas {
return errNotSupported("QueueBuilderPayment", b.version)
}
b.lock.Lock()
defer b.lock.Unlock()
slot := b.slot
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
paymentIndex := slotsPerEpoch + (slot % slotsPerEpoch)
if uint64(paymentIndex) >= uint64(len(b.builderPendingPayments)) {
return fmt.Errorf("builder pending payments index %d out of range (len=%d)", paymentIndex, len(b.builderPendingPayments))
}
payment := b.builderPendingPayments[paymentIndex]
if payment != nil && payment.Withdrawal != nil && payment.Withdrawal.Amount > 0 {
b.builderPendingWithdrawals = append(b.builderPendingWithdrawals, ethpb.CopyBuilderPendingWithdrawal(payment.Withdrawal))
b.markFieldAsDirty(types.BuilderPendingWithdrawals)
}
b.builderPendingPayments[paymentIndex] = emptyBuilderPendingPayment
b.markFieldAsDirty(types.BuilderPendingPayments)
return nil
}
// SetBuilderPendingPayment sets a builder pending payment at the specified index.
func (b *BeaconState) SetBuilderPendingPayment(index primitives.Slot, payment *ethpb.BuilderPendingPayment) error {
if b.version < version.Gloas {
@@ -199,249 +161,3 @@ func (b *BeaconState) UpdateExecutionPayloadAvailabilityAtIndex(idx uint64, val
b.markFieldAsDirty(types.ExecutionPayloadAvailability)
return nil
}
// SetLatestBlockHash sets the latest execution block hash.
func (b *BeaconState) SetLatestBlockHash(hash [32]byte) error {
if b.version < version.Gloas {
return errNotSupported("SetLatestBlockHash", b.version)
}
b.lock.Lock()
defer b.lock.Unlock()
b.latestBlockHash = hash[:]
b.markFieldAsDirty(types.LatestBlockHash)
return nil
}
// SetExecutionPayloadAvailability sets the execution payload availability bit for a specific slot.
func (b *BeaconState) SetExecutionPayloadAvailability(index primitives.Slot, available bool) error {
if b.version < version.Gloas {
return errNotSupported("SetExecutionPayloadAvailability", b.version)
}
b.lock.Lock()
defer b.lock.Unlock()
bitIndex := index % params.BeaconConfig().SlotsPerHistoricalRoot
byteIndex := bitIndex / 8
bitPosition := bitIndex % 8
if uint64(byteIndex) >= uint64(len(b.executionPayloadAvailability)) {
return fmt.Errorf("bit index %d (byte index %d) out of range for execution payload availability length %d", bitIndex, byteIndex, len(b.executionPayloadAvailability))
}
// Set or clear the bit
if available {
b.executionPayloadAvailability[byteIndex] |= 1 << bitPosition
} else {
b.executionPayloadAvailability[byteIndex] &^= 1 << bitPosition
}
b.markFieldAsDirty(types.ExecutionPayloadAvailability)
return nil
}
// IncreaseBuilderBalance increases the balance of the builder at the given index.
func (b *BeaconState) IncreaseBuilderBalance(index primitives.BuilderIndex, amount uint64) error {
if b.version < version.Gloas {
return errNotSupported("IncreaseBuilderBalance", b.version)
}
b.lock.Lock()
defer b.lock.Unlock()
if b.builders == nil || uint64(index) >= uint64(len(b.builders)) {
return fmt.Errorf("builder index %d out of bounds", index)
}
if b.builders[index] == nil {
return fmt.Errorf("builder at index %d is nil", index)
}
builders := b.builders
if b.sharedFieldReferences[types.Builders].Refs() > 1 {
builders = make([]*ethpb.Builder, len(b.builders))
copy(builders, b.builders)
b.sharedFieldReferences[types.Builders].MinusRef()
b.sharedFieldReferences[types.Builders] = stateutil.NewRef(1)
}
builder := ethpb.CopyBuilder(builders[index])
builder.Balance += primitives.Gwei(amount)
builders[index] = builder
b.builders = builders
b.markFieldAsDirty(types.Builders)
return nil
}
// AddBuilderFromDeposit creates or replaces a builder entry derived from a deposit.
func (b *BeaconState) AddBuilderFromDeposit(pubkey [fieldparams.BLSPubkeyLength]byte, withdrawalCredentials [fieldparams.RootLength]byte, amount uint64) error {
if b.version < version.Gloas {
return errNotSupported("AddBuilderFromDeposit", b.version)
}
b.lock.Lock()
defer b.lock.Unlock()
currentEpoch := slots.ToEpoch(b.slot)
index := b.builderInsertionIndex(currentEpoch)
builder := &ethpb.Builder{
Pubkey: bytesutil.SafeCopyBytes(pubkey[:]),
Version: []byte{withdrawalCredentials[0]},
ExecutionAddress: bytesutil.SafeCopyBytes(withdrawalCredentials[12:]),
Balance: primitives.Gwei(amount),
DepositEpoch: currentEpoch,
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
}
builders := b.builders
if b.sharedFieldReferences[types.Builders].Refs() > 1 {
builders = make([]*ethpb.Builder, len(b.builders))
copy(builders, b.builders)
b.sharedFieldReferences[types.Builders].MinusRef()
b.sharedFieldReferences[types.Builders] = stateutil.NewRef(1)
}
if index < primitives.BuilderIndex(len(builders)) {
builders[index] = builder
} else {
gap := index - primitives.BuilderIndex(len(builders)) + 1
builders = append(builders, make([]*ethpb.Builder, gap)...)
builders[index] = builder
}
b.builders = builders
b.markFieldAsDirty(types.Builders)
return nil
}
func (b *BeaconState) builderInsertionIndex(currentEpoch primitives.Epoch) primitives.BuilderIndex {
for i, builder := range b.builders {
if builder.WithdrawableEpoch <= currentEpoch && builder.Balance == 0 {
return primitives.BuilderIndex(i)
}
}
return primitives.BuilderIndex(len(b.builders))
}
// UpdatePendingPaymentWeight updates the builder pending payment weight based on attestation participation.
//
// This is a no-op for pre-Gloas forks.
//
// Spec v1.7.0-alpha pseudocode:
//
// if data.target.epoch == get_current_epoch(state):
// current_epoch_target = True
// epoch_participation = state.current_epoch_participation
// payment = state.builder_pending_payments[SLOTS_PER_EPOCH + data.slot % SLOTS_PER_EPOCH]
// else:
// current_epoch_target = False
// epoch_participation = state.previous_epoch_participation
// payment = state.builder_pending_payments[data.slot % SLOTS_PER_EPOCH]
//
// proposer_reward_numerator = 0
// for index in get_attesting_indices(state, attestation):
// will_set_new_flag = False
// for flag_index, weight in enumerate(PARTICIPATION_FLAG_WEIGHTS):
// if flag_index in participation_flag_indices and not has_flag(epoch_participation[index], flag_index):
// epoch_participation[index] = add_flag(epoch_participation[index], flag_index)
// proposer_reward_numerator += get_base_reward(state, index) * weight
// # [New in Gloas:EIP7732]
// will_set_new_flag = True
// if (
// will_set_new_flag
// and is_attestation_same_slot(state, data)
// and payment.withdrawal.amount > 0
// ):
// payment.weight += state.validators[index].effective_balance
// if current_epoch_target:
// state.builder_pending_payments[SLOTS_PER_EPOCH + data.slot % SLOTS_PER_EPOCH] = payment
// else:
// state.builder_pending_payments[data.slot % SLOTS_PER_EPOCH] = payment
func (b *BeaconState) UpdatePendingPaymentWeight(att ethpb.Att, indices []uint64, participatedFlags map[uint8]bool) error {
var (
paymentSlot primitives.Slot
currentPayment *ethpb.BuilderPendingPayment
weight primitives.Gwei
)
early, err := func() (bool, error) {
b.lock.RLock()
defer b.lock.RUnlock()
if b.version < version.Gloas {
return true, nil
}
data := att.GetData()
var beaconBlockRoot [32]byte
copy(beaconBlockRoot[:], data.BeaconBlockRoot)
sameSlot, err := b.IsAttestationSameSlot(beaconBlockRoot, data.Slot)
if err != nil {
return false, err
}
if !sameSlot {
return true, nil
}
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
var epochParticipation []byte
if data.Target != nil && data.Target.Epoch == slots.ToEpoch(b.slot) {
paymentSlot = slotsPerEpoch + (data.Slot % slotsPerEpoch)
epochParticipation = b.currentEpochParticipation
} else {
paymentSlot = data.Slot % slotsPerEpoch
epochParticipation = b.previousEpochParticipation
}
if uint64(paymentSlot) >= uint64(len(b.builderPendingPayments)) {
return false, fmt.Errorf("builder pending payments index %d out of range (len=%d)", paymentSlot, len(b.builderPendingPayments))
}
currentPayment = b.builderPendingPayments[paymentSlot]
if currentPayment.Withdrawal.Amount == 0 {
return true, nil
}
cfg := params.BeaconConfig()
flagIndices := []uint8{cfg.TimelySourceFlagIndex, cfg.TimelyTargetFlagIndex, cfg.TimelyHeadFlagIndex}
for _, idx := range indices {
if idx >= uint64(len(epochParticipation)) {
return false, fmt.Errorf("index %d exceeds participation length %d", idx, len(epochParticipation))
}
participation := epochParticipation[idx]
for _, f := range flagIndices {
if !participatedFlags[f] {
continue
}
if participation&(1<<f) == 0 {
v, err := b.validatorAtIndexReadOnly(primitives.ValidatorIndex(idx))
if err != nil {
return false, fmt.Errorf("validator at index %d: %w", idx, err)
}
weight += primitives.Gwei(v.EffectiveBalance())
break
}
}
}
return false, nil
}()
if err != nil {
return err
}
if early || weight == 0 {
return nil
}
b.lock.Lock()
defer b.lock.Unlock()
newPayment := ethpb.CopyBuilderPendingPayment(currentPayment)
newPayment.Weight += weight
b.builderPendingPayments[paymentSlot] = newPayment
b.markFieldAsDirty(types.BuilderPendingPayments)
return nil
}

View File

@@ -11,21 +11,20 @@ import (
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/OffchainLabs/prysm/v7/testing/require"
"github.com/OffchainLabs/prysm/v7/time/slots"
)
type testExecutionPayloadBid struct {
parentBlockHash [32]byte
parentBlockRoot [32]byte
blockHash [32]byte
prevRandao [32]byte
blobKzgCommitments [][]byte
feeRecipient [20]byte
gasLimit uint64
builderIndex primitives.BuilderIndex
slot primitives.Slot
value primitives.Gwei
executionPayment primitives.Gwei
parentBlockHash [32]byte
parentBlockRoot [32]byte
blockHash [32]byte
prevRandao [32]byte
blobKzgCommitmentsRoot [32]byte
feeRecipient [20]byte
gasLimit uint64
builderIndex primitives.BuilderIndex
slot primitives.Slot
value primitives.Gwei
executionPayment primitives.Gwei
}
func (t testExecutionPayloadBid) ParentBlockHash() [32]byte { return t.parentBlockHash }
@@ -41,12 +40,9 @@ func (t testExecutionPayloadBid) Value() primitives.Gwei { return t.value }
func (t testExecutionPayloadBid) ExecutionPayment() primitives.Gwei {
return t.executionPayment
}
func (t testExecutionPayloadBid) BlobKzgCommitments() [][]byte { return t.blobKzgCommitments }
func (t testExecutionPayloadBid) BlobKzgCommitmentCount() uint64 {
return uint64(len(t.blobKzgCommitments))
}
func (t testExecutionPayloadBid) FeeRecipient() [20]byte { return t.feeRecipient }
func (t testExecutionPayloadBid) IsNil() bool { return false }
func (t testExecutionPayloadBid) BlobKzgCommitmentsRoot() [32]byte { return t.blobKzgCommitmentsRoot }
func (t testExecutionPayloadBid) FeeRecipient() [20]byte { return t.feeRecipient }
func (t testExecutionPayloadBid) IsNil() bool { return false }
func TestSetExecutionPayloadBid(t *testing.T) {
t.Run("previous fork returns expected error", func(t *testing.T) {
@@ -61,7 +57,7 @@ func TestSetExecutionPayloadBid(t *testing.T) {
parentBlockRoot = [32]byte(bytes.Repeat([]byte{0xCD}, 32))
blockHash = [32]byte(bytes.Repeat([]byte{0xEF}, 32))
prevRandao = [32]byte(bytes.Repeat([]byte{0x11}, 32))
blobCommitments = [][]byte{bytes.Repeat([]byte{0x22}, 48)}
blobRoot = [32]byte(bytes.Repeat([]byte{0x22}, 32))
feeRecipient [20]byte
)
copy(feeRecipient[:], bytes.Repeat([]byte{0x33}, len(feeRecipient)))
@@ -70,17 +66,17 @@ func TestSetExecutionPayloadBid(t *testing.T) {
dirtyFields: make(map[types.FieldIndex]bool),
}
bid := testExecutionPayloadBid{
parentBlockHash: parentBlockHash,
parentBlockRoot: parentBlockRoot,
blockHash: blockHash,
prevRandao: prevRandao,
blobKzgCommitments: blobCommitments,
feeRecipient: feeRecipient,
gasLimit: 123,
builderIndex: 7,
slot: 9,
value: 11,
executionPayment: 22,
parentBlockHash: parentBlockHash,
parentBlockRoot: parentBlockRoot,
blockHash: blockHash,
prevRandao: prevRandao,
blobKzgCommitmentsRoot: blobRoot,
feeRecipient: feeRecipient,
gasLimit: 123,
builderIndex: 7,
slot: 9,
value: 11,
executionPayment: 22,
}
require.NoError(t, st.SetExecutionPayloadBid(bid))
@@ -90,7 +86,7 @@ func TestSetExecutionPayloadBid(t *testing.T) {
require.DeepEqual(t, parentBlockRoot[:], st.latestExecutionPayloadBid.ParentBlockRoot)
require.DeepEqual(t, blockHash[:], st.latestExecutionPayloadBid.BlockHash)
require.DeepEqual(t, prevRandao[:], st.latestExecutionPayloadBid.PrevRandao)
require.DeepEqual(t, blobCommitments, st.latestExecutionPayloadBid.BlobKzgCommitments)
require.DeepEqual(t, blobRoot[:], st.latestExecutionPayloadBid.BlobKzgCommitmentsRoot)
require.DeepEqual(t, feeRecipient[:], st.latestExecutionPayloadBid.FeeRecipient)
require.Equal(t, uint64(123), st.latestExecutionPayloadBid.GasLimit)
require.Equal(t, primitives.BuilderIndex(7), st.latestExecutionPayloadBid.BuilderIndex)
@@ -167,7 +163,7 @@ func TestClearBuilderPendingPayment(t *testing.T) {
}
require.NoError(t, st.ClearBuilderPendingPayment(1))
require.DeepEqual(t, emptyBuilderPendingPayment, st.builderPendingPayments[1])
require.Equal(t, emptyBuilderPendingPayment, st.builderPendingPayments[1])
require.Equal(t, true, st.dirtyFields[types.BuilderPendingPayments])
})
@@ -185,173 +181,6 @@ func TestClearBuilderPendingPayment(t *testing.T) {
})
}
func TestQueueBuilderPayment(t *testing.T) {
t.Run("previous fork returns expected error", func(t *testing.T) {
st := &BeaconState{version: version.Fulu}
err := st.QueueBuilderPayment()
require.ErrorContains(t, "is not supported", err)
})
t.Run("appends withdrawal, clears payment, and marks dirty", func(t *testing.T) {
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
slot := primitives.Slot(3)
paymentIndex := slotsPerEpoch + (slot % slotsPerEpoch)
st := &BeaconState{
version: version.Gloas,
slot: slot,
dirtyFields: make(map[types.FieldIndex]bool),
rebuildTrie: make(map[types.FieldIndex]bool),
sharedFieldReferences: make(map[types.FieldIndex]*stateutil.Reference),
builderPendingPayments: make([]*ethpb.BuilderPendingPayment, slotsPerEpoch*2),
builderPendingWithdrawals: []*ethpb.BuilderPendingWithdrawal{},
}
st.builderPendingPayments[paymentIndex] = &ethpb.BuilderPendingPayment{
Weight: 1,
Withdrawal: &ethpb.BuilderPendingWithdrawal{
FeeRecipient: bytes.Repeat([]byte{0xAB}, 20),
Amount: 99,
BuilderIndex: 1,
},
}
require.NoError(t, st.QueueBuilderPayment())
require.DeepEqual(t, emptyBuilderPendingPayment, st.builderPendingPayments[paymentIndex])
require.Equal(t, true, st.dirtyFields[types.BuilderPendingPayments])
require.Equal(t, true, st.dirtyFields[types.BuilderPendingWithdrawals])
require.Equal(t, 1, len(st.builderPendingWithdrawals))
require.DeepEqual(t, bytes.Repeat([]byte{0xAB}, 20), st.builderPendingWithdrawals[0].FeeRecipient)
require.Equal(t, primitives.Gwei(99), st.builderPendingWithdrawals[0].Amount)
// Ensure copied withdrawal is not aliased.
st.builderPendingPayments[paymentIndex].Withdrawal.FeeRecipient[0] = 0x01
require.Equal(t, byte(0xAB), st.builderPendingWithdrawals[0].FeeRecipient[0])
})
t.Run("zero amount does not append withdrawal", func(t *testing.T) {
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
slot := primitives.Slot(3)
paymentIndex := slotsPerEpoch + (slot % slotsPerEpoch)
st := &BeaconState{
version: version.Gloas,
slot: slot,
dirtyFields: make(map[types.FieldIndex]bool),
rebuildTrie: make(map[types.FieldIndex]bool),
sharedFieldReferences: make(map[types.FieldIndex]*stateutil.Reference),
builderPendingPayments: make([]*ethpb.BuilderPendingPayment, slotsPerEpoch*2),
builderPendingWithdrawals: []*ethpb.BuilderPendingWithdrawal{},
}
st.builderPendingPayments[paymentIndex] = &ethpb.BuilderPendingPayment{
Weight: 1,
Withdrawal: &ethpb.BuilderPendingWithdrawal{
FeeRecipient: bytes.Repeat([]byte{0xAB}, 20),
Amount: 0,
BuilderIndex: 1,
},
}
require.NoError(t, st.QueueBuilderPayment())
require.DeepEqual(t, emptyBuilderPendingPayment, st.builderPendingPayments[paymentIndex])
require.Equal(t, true, st.dirtyFields[types.BuilderPendingPayments])
require.Equal(t, false, st.dirtyFields[types.BuilderPendingWithdrawals])
require.Equal(t, 0, len(st.builderPendingWithdrawals))
})
}
func TestUpdatePendingPaymentWeight(t *testing.T) {
cfg := params.BeaconConfig()
slotsPerEpoch := cfg.SlotsPerEpoch
slot := primitives.Slot(4)
stateSlot := slot + 1
stateEpoch := slots.ToEpoch(stateSlot)
rootA := bytes.Repeat([]byte{0xAA}, 32)
rootB := bytes.Repeat([]byte{0xBB}, 32)
tests := []struct {
name string
targetEpoch primitives.Epoch
blockRoot []byte
initialAmount primitives.Gwei
initialWeight primitives.Gwei
wantWeight primitives.Gwei
}{
{
name: "same slot current epoch adds weight",
targetEpoch: stateEpoch,
blockRoot: rootA,
initialAmount: 1,
initialWeight: 0,
wantWeight: primitives.Gwei(cfg.MinActivationBalance),
},
{
name: "same slot zero amount no weight change",
targetEpoch: stateEpoch,
blockRoot: rootA,
initialAmount: 0,
initialWeight: 5,
wantWeight: 5,
},
{
name: "non matching block root no change",
targetEpoch: stateEpoch,
blockRoot: rootB,
initialAmount: 1,
initialWeight: 7,
wantWeight: 7,
},
{
name: "previous epoch target uses earlier slot",
targetEpoch: stateEpoch - 1,
blockRoot: rootA,
initialAmount: 1,
initialWeight: 0,
wantWeight: primitives.Gwei(cfg.MinActivationBalance),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var paymentIdx int
if tt.targetEpoch == stateEpoch {
paymentIdx = int(slotsPerEpoch + (slot % slotsPerEpoch))
} else {
paymentIdx = int(slot % slotsPerEpoch)
}
state := buildGloasStateForPaymentWeightTest(t, stateSlot, paymentIdx, tt.initialAmount, tt.initialWeight, map[primitives.Slot][]byte{
slot: tt.blockRoot,
slot - 1: rootB,
})
att := &ethpb.Attestation{
Data: &ethpb.AttestationData{
Slot: slot,
CommitteeIndex: 0,
BeaconBlockRoot: tt.blockRoot,
Source: &ethpb.Checkpoint{},
Target: &ethpb.Checkpoint{
Epoch: tt.targetEpoch,
},
},
}
participatedFlags := map[uint8]bool{
cfg.TimelySourceFlagIndex: true,
cfg.TimelyTargetFlagIndex: true,
cfg.TimelyHeadFlagIndex: true,
}
indices := []uint64{0}
require.NoError(t, state.UpdatePendingPaymentWeight(att, indices, participatedFlags))
payment, err := state.BuilderPendingPayment(uint64(paymentIdx))
require.NoError(t, err)
require.Equal(t, tt.wantWeight, payment.Weight)
})
}
}
func TestRotateBuilderPendingPayments(t *testing.T) {
totalPayments := 2 * params.BeaconConfig().SlotsPerEpoch
payments := make([]*ethpb.BuilderPendingPayment, totalPayments)
@@ -489,79 +318,6 @@ func TestUpdateExecutionPayloadAvailabilityAtIndex_OutOfRange(t *testing.T) {
}
}
func buildGloasStateForPaymentWeightTest(
t *testing.T,
stateSlot primitives.Slot,
paymentIdx int,
amount primitives.Gwei,
weight primitives.Gwei,
roots map[primitives.Slot][]byte,
) *BeaconState {
t.Helper()
cfg := params.BeaconConfig()
blockRoots := make([][]byte, cfg.SlotsPerHistoricalRoot)
for slot, root := range roots {
blockRoots[slot%cfg.SlotsPerHistoricalRoot] = root
}
stateRoots := make([][]byte, cfg.SlotsPerHistoricalRoot)
for i := range stateRoots {
stateRoots[i] = bytes.Repeat([]byte{0x44}, 32)
}
randaoMixes := make([][]byte, cfg.EpochsPerHistoricalVector)
for i := range randaoMixes {
randaoMixes[i] = bytes.Repeat([]byte{0x55}, 32)
}
validator := &ethpb.Validator{
PublicKey: bytes.Repeat([]byte{0x01}, 48),
WithdrawalCredentials: append([]byte{cfg.ETH1AddressWithdrawalPrefixByte}, bytes.Repeat([]byte{0x02}, 31)...),
EffectiveBalance: cfg.MinActivationBalance,
}
payments := make([]*ethpb.BuilderPendingPayment, cfg.SlotsPerEpoch*2)
for i := range payments {
payments[i] = &ethpb.BuilderPendingPayment{
Withdrawal: &ethpb.BuilderPendingWithdrawal{
FeeRecipient: make([]byte, 20),
},
}
}
payments[paymentIdx] = &ethpb.BuilderPendingPayment{
Weight: weight,
Withdrawal: &ethpb.BuilderPendingWithdrawal{
FeeRecipient: make([]byte, 20),
Amount: amount,
},
}
execPayloadAvailability := make([]byte, cfg.SlotsPerHistoricalRoot/8)
stProto := &ethpb.BeaconStateGloas{
Slot: stateSlot,
GenesisValidatorsRoot: bytes.Repeat([]byte{0x33}, 32),
BlockRoots: blockRoots,
StateRoots: stateRoots,
RandaoMixes: randaoMixes,
ExecutionPayloadAvailability: execPayloadAvailability,
Validators: []*ethpb.Validator{validator},
Balances: []uint64{cfg.MinActivationBalance},
CurrentEpochParticipation: []byte{0},
PreviousEpochParticipation: []byte{0},
BuilderPendingPayments: payments,
Fork: &ethpb.Fork{
CurrentVersion: bytes.Repeat([]byte{0x66}, 4),
PreviousVersion: bytes.Repeat([]byte{0x66}, 4),
Epoch: 0,
},
}
statePb, err := InitializeFromProtoGloas(stProto)
require.NoError(t, err)
return statePb.(*BeaconState)
}
func newGloasStateWithAvailability(t *testing.T, availability []byte) *BeaconState {
t.Helper()
@@ -572,241 +328,3 @@ func newGloasStateWithAvailability(t *testing.T, availability []byte) *BeaconSta
return st.(*BeaconState)
}
func TestSetLatestBlockHash(t *testing.T) {
t.Run("returns error before gloas", func(t *testing.T) {
var hash [32]byte
st := &BeaconState{version: version.Fulu}
err := st.SetLatestBlockHash(hash)
require.ErrorContains(t, "SetLatestBlockHash", err)
})
var hash [32]byte
copy(hash[:], []byte("latest-block-hash"))
state := &BeaconState{
version: version.Gloas,
dirtyFields: make(map[types.FieldIndex]bool),
}
require.NoError(t, state.SetLatestBlockHash(hash))
require.Equal(t, true, state.dirtyFields[types.LatestBlockHash])
require.DeepEqual(t, hash[:], state.latestBlockHash)
}
func TestSetExecutionPayloadAvailability(t *testing.T) {
t.Run("returns error before gloas", func(t *testing.T) {
st := &BeaconState{version: version.Fulu}
err := st.SetExecutionPayloadAvailability(0, true)
require.ErrorContains(t, "SetExecutionPayloadAvailability", err)
})
state := &BeaconState{
version: version.Gloas,
executionPayloadAvailability: make([]byte, params.BeaconConfig().SlotsPerHistoricalRoot/8),
dirtyFields: make(map[types.FieldIndex]bool),
}
slot := primitives.Slot(10)
bitIndex := slot % params.BeaconConfig().SlotsPerHistoricalRoot
byteIndex := bitIndex / 8
bitPosition := bitIndex % 8
require.NoError(t, state.SetExecutionPayloadAvailability(slot, true))
require.Equal(t, true, state.dirtyFields[types.ExecutionPayloadAvailability])
require.Equal(t, byte(1<<bitPosition), state.executionPayloadAvailability[byteIndex]&(1<<bitPosition))
require.NoError(t, state.SetExecutionPayloadAvailability(slot, false))
require.Equal(t, byte(0), state.executionPayloadAvailability[byteIndex]&(1<<bitPosition))
}
func TestSetExecutionPayloadAvailability_OutOfRange(t *testing.T) {
state := &BeaconState{
version: version.Gloas,
executionPayloadAvailability: []byte{},
dirtyFields: make(map[types.FieldIndex]bool),
}
err := state.SetExecutionPayloadAvailability(0, true)
require.ErrorContains(t, "out of range", err)
require.Equal(t, false, state.dirtyFields[types.ExecutionPayloadAvailability])
}
func TestIncreaseBuilderBalance(t *testing.T) {
t.Run("returns error before gloas", func(t *testing.T) {
st := &BeaconState{version: version.Fulu}
err := st.IncreaseBuilderBalance(0, 1)
require.ErrorContains(t, "IncreaseBuilderBalance", err)
})
t.Run("out of bounds returns error", func(t *testing.T) {
st := &BeaconState{
version: version.Gloas,
dirtyFields: make(map[types.FieldIndex]bool),
sharedFieldReferences: map[types.FieldIndex]*stateutil.Reference{
types.Builders: stateutil.NewRef(1),
},
builders: []*ethpb.Builder{},
}
err := st.IncreaseBuilderBalance(0, 1)
require.ErrorContains(t, "out of bounds", err)
require.Equal(t, false, st.dirtyFields[types.Builders])
})
t.Run("nil builder returns error", func(t *testing.T) {
st := &BeaconState{
version: version.Gloas,
dirtyFields: make(map[types.FieldIndex]bool),
sharedFieldReferences: map[types.FieldIndex]*stateutil.Reference{
types.Builders: stateutil.NewRef(1),
},
builders: []*ethpb.Builder{nil},
}
err := st.IncreaseBuilderBalance(0, 1)
require.ErrorContains(t, "is nil", err)
require.Equal(t, false, st.dirtyFields[types.Builders])
})
t.Run("increments and marks dirty", func(t *testing.T) {
orig := &ethpb.Builder{Balance: 10}
st := &BeaconState{
version: version.Gloas,
dirtyFields: make(map[types.FieldIndex]bool),
sharedFieldReferences: map[types.FieldIndex]*stateutil.Reference{
types.Builders: stateutil.NewRef(1),
},
builders: []*ethpb.Builder{orig},
}
require.NoError(t, st.IncreaseBuilderBalance(0, 5))
require.Equal(t, primitives.Gwei(15), st.builders[0].Balance)
require.Equal(t, true, st.dirtyFields[types.Builders])
// Copy-on-write semantics: builder pointer replaced.
require.NotEqual(t, orig, st.builders[0])
})
}
func TestIncreaseBuilderBalance_CopyOnWrite(t *testing.T) {
orig := &ethpb.Builder{Balance: 10}
statePb, err := InitializeFromProtoUnsafeGloas(&ethpb.BeaconStateGloas{
Builders: []*ethpb.Builder{orig},
})
require.NoError(t, err)
st, ok := statePb.(*BeaconState)
require.Equal(t, true, ok)
copied := st.Copy().(*BeaconState)
require.Equal(t, uint(2), st.sharedFieldReferences[types.Builders].Refs())
require.NoError(t, copied.IncreaseBuilderBalance(0, 5))
require.Equal(t, primitives.Gwei(10), st.builders[0].Balance)
require.Equal(t, primitives.Gwei(15), copied.builders[0].Balance)
require.Equal(t, uint(1), st.sharedFieldReferences[types.Builders].Refs())
require.Equal(t, uint(1), copied.sharedFieldReferences[types.Builders].Refs())
}
func TestAddBuilderFromDeposit(t *testing.T) {
t.Run("returns error before gloas", func(t *testing.T) {
var pubkey [48]byte
var wc [32]byte
st := &BeaconState{version: version.Fulu}
err := st.AddBuilderFromDeposit(pubkey, wc, 1)
require.ErrorContains(t, "AddBuilderFromDeposit", err)
})
t.Run("reuses empty withdrawable slot", func(t *testing.T) {
var pubkey [48]byte
copy(pubkey[:], bytes.Repeat([]byte{0xAA}, 48))
var wc [32]byte
copy(wc[:], bytes.Repeat([]byte{0xBB}, 32))
wc[0] = 0x42 // version byte
st := &BeaconState{
version: version.Gloas,
slot: 0, // epoch 0
dirtyFields: make(map[types.FieldIndex]bool),
sharedFieldReferences: map[types.FieldIndex]*stateutil.Reference{
types.Builders: stateutil.NewRef(1),
},
builders: []*ethpb.Builder{
{
WithdrawableEpoch: 0,
Balance: 0,
},
},
}
require.NoError(t, st.AddBuilderFromDeposit(pubkey, wc, 123))
require.Equal(t, 1, len(st.builders))
got := st.builders[0]
require.NotNil(t, got)
require.DeepEqual(t, pubkey[:], got.Pubkey)
require.DeepEqual(t, []byte{0x42}, got.Version)
require.DeepEqual(t, wc[12:], got.ExecutionAddress)
require.Equal(t, primitives.Gwei(123), got.Balance)
require.Equal(t, primitives.Epoch(0), got.DepositEpoch)
require.Equal(t, params.BeaconConfig().FarFutureEpoch, got.WithdrawableEpoch)
require.Equal(t, true, st.dirtyFields[types.Builders])
})
t.Run("appends new builder when no reusable slot", func(t *testing.T) {
var pubkey [48]byte
copy(pubkey[:], bytes.Repeat([]byte{0xAA}, 48))
var wc [32]byte
copy(wc[:], bytes.Repeat([]byte{0xBB}, 32))
st := &BeaconState{
version: version.Gloas,
slot: 0,
dirtyFields: make(map[types.FieldIndex]bool),
sharedFieldReferences: map[types.FieldIndex]*stateutil.Reference{
types.Builders: stateutil.NewRef(1),
},
builders: []*ethpb.Builder{
{
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
Balance: 1,
},
},
}
require.NoError(t, st.AddBuilderFromDeposit(pubkey, wc, 5))
require.Equal(t, 2, len(st.builders))
require.NotNil(t, st.builders[1])
require.Equal(t, primitives.Gwei(5), st.builders[1].Balance)
})
}
func TestAddBuilderFromDeposit_CopyOnWrite(t *testing.T) {
var pubkey [48]byte
copy(pubkey[:], bytes.Repeat([]byte{0xAA}, 48))
var wc [32]byte
copy(wc[:], bytes.Repeat([]byte{0xBB}, 32))
wc[0] = 0x42 // version byte
statePb, err := InitializeFromProtoUnsafeGloas(&ethpb.BeaconStateGloas{
Slot: 0,
Builders: []*ethpb.Builder{
{
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
Balance: 1,
},
},
})
require.NoError(t, err)
st, ok := statePb.(*BeaconState)
require.Equal(t, true, ok)
copied := st.Copy().(*BeaconState)
require.Equal(t, uint(2), st.sharedFieldReferences[types.Builders].Refs())
require.NoError(t, copied.AddBuilderFromDeposit(pubkey, wc, 5))
require.Equal(t, 1, len(st.builders))
require.Equal(t, 2, len(copied.builders))
require.Equal(t, uint(1), st.sharedFieldReferences[types.Builders].Refs())
require.Equal(t, uint(1), copied.sharedFieldReferences[types.Builders].Refs())
}

View File

@@ -47,7 +47,6 @@ go_library(
"subscriber_bls_to_execution_change.go",
"subscriber_data_column_sidecar.go",
"subscriber_handlers.go",
"subscriber_payload_attestation.go",
"subscriber_sync_committee_message.go",
"subscriber_sync_contribution_proof.go",
"subscription_topic_handler.go",
@@ -58,9 +57,7 @@ go_library(
"validate_blob.go",
"validate_bls_to_execution_change.go",
"validate_data_column.go",
"validate_execution_payload_envelope.go",
"validate_light_client.go",
"validate_payload_attestation.go",
"validate_proposer_slashing.go",
"validate_sync_committee_message.go",
"validate_sync_contribution_proof.go",
@@ -117,7 +114,6 @@ go_library(
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/payload-attestation:go_default_library",
"//consensus-types/primitives:go_default_library",
"//consensus-types/wrapper:go_default_library",
"//container/leaky-bucket:go_default_library",
@@ -215,9 +211,7 @@ go_test(
"validate_blob_test.go",
"validate_bls_to_execution_change_test.go",
"validate_data_column_test.go",
"validate_execution_payload_envelope_test.go",
"validate_light_client_test.go",
"validate_payload_attestation_test.go",
"validate_proposer_slashing_test.go",
"validate_sync_committee_message_test.go",
"validate_sync_contribution_proof_test.go",
@@ -270,7 +264,6 @@ go_test(
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/payload-attestation:go_default_library",
"//consensus-types/primitives:go_default_library",
"//consensus-types/wrapper:go_default_library",
"//container/leaky-bucket:go_default_library",

View File

@@ -207,13 +207,6 @@ func WithTrackedValidatorsCache(c *cache.TrackedValidatorsCache) Option {
}
}
func WithPayloadAttestationCache(c *cache.PayloadAttestationCache) Option {
return func(s *Service) error {
s.payloadAttestationCache = c
return nil
}
}
// WithSlasherEnabled configures the sync package to support slashing detection.
func WithSlasherEnabled(enabled bool) Option {
return func(s *Service) error {

View File

@@ -38,7 +38,6 @@ import (
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
payloadattestation "github.com/OffchainLabs/prysm/v7/consensus-types/payload-attestation"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
leakybucket "github.com/OffchainLabs/prysm/v7/container/leaky-bucket"
"github.com/OffchainLabs/prysm/v7/crypto/rand"
@@ -62,7 +61,6 @@ var _ runtime.Service = (*Service)(nil)
const (
rangeLimit uint64 = 1024
seenBlockSize = 1000
seenPayloadEnvelopeSize = 1000
seenDataColumnSize = seenBlockSize * 128 // Each block can have max 128 data columns.
seenUnaggregatedAttSize = 20000
seenAggregatedAttSize = 16384
@@ -119,12 +117,10 @@ type blockchainService interface {
blockchain.BlockReceiver
blockchain.BlobReceiver
blockchain.DataColumnReceiver
blockchain.ExecutionPayloadEnvelopeReceiver
blockchain.HeadFetcher
blockchain.FinalizationFetcher
blockchain.ForkFetcher
blockchain.AttestationReceiver
blockchain.PayloadAttestationReceiver
blockchain.TimeFetcher
blockchain.GenesisFetcher
blockchain.CanonicalFetcher
@@ -136,78 +132,73 @@ type blockchainService interface {
// Service is responsible for handling all run time p2p related operations as the
// main entry point for network messages.
type Service struct {
cfg *config
ctx context.Context
cancel context.CancelFunc
slotToPendingBlocks *gcache.Cache
seenPendingBlocks map[[32]byte]bool
blkRootToPendingAtts map[[32]byte][]any
subHandler *subTopicHandler
pendingAttsLock sync.RWMutex
pendingQueueLock sync.RWMutex
chainStarted *abool.AtomicBool
validateBlockLock sync.RWMutex
rateLimiter *limiter
seenBlockLock sync.RWMutex
seenBlockCache *lru.Cache
seenPayloadEnvelopeCache *lru.Cache
seenBlobLock sync.RWMutex
seenBlobCache *lru.Cache
seenDataColumnCache *slotAwareCache
seenAggregatedAttestationLock sync.RWMutex
seenAggregatedAttestationCache *lru.Cache
seenUnAggregatedAttestationLock sync.RWMutex
seenUnAggregatedAttestationCache *lru.Cache
seenExitLock sync.RWMutex
seenExitCache *lru.Cache
seenProposerSlashingLock sync.RWMutex
seenProposerSlashingCache *lru.Cache
seenAttesterSlashingLock sync.RWMutex
seenAttesterSlashingCache map[uint64]bool
seenSyncMessageLock sync.RWMutex
seenSyncMessageCache *lru.Cache
seenSyncContributionLock sync.RWMutex
seenSyncContributionCache *lru.Cache
badBlockCache *lru.Cache
badBlockLock sync.RWMutex
syncContributionBitsOverlapLock sync.RWMutex
syncContributionBitsOverlapCache *lru.Cache
signatureChan chan *signatureVerifier
clockWaiter startup.ClockWaiter
initialSyncComplete chan struct{}
verifierWaiter *verification.InitializerWaiter
newBlobVerifier verification.NewBlobVerifier
newColumnsVerifier verification.NewDataColumnsVerifier
newPayloadAttestationVerifier verification.NewPayloadAttestationMsgVerifier
columnSidecarsExecSingleFlight singleflight.Group
reconstructionSingleFlight singleflight.Group
availableBlocker coverage.AvailableBlocker
reconstructionRandGen *rand.Rand
trackedValidatorsCache *cache.TrackedValidatorsCache
ctxMap ContextByteVersions
slasherEnabled bool
lcStore *lightClient.Store
dataColumnLogCh chan dataColumnLogEntry
payloadAttestationCache *cache.PayloadAttestationCache
digestActions perDigestSet
subscriptionSpawner func(func()) // see Service.spawn for details
newExecutionPayloadEnvelopeVerifier verification.NewExecutionPayloadEnvelopeVerifier
cfg *config
ctx context.Context
cancel context.CancelFunc
slotToPendingBlocks *gcache.Cache
seenPendingBlocks map[[32]byte]bool
blkRootToPendingAtts map[[32]byte][]any
subHandler *subTopicHandler
pendingAttsLock sync.RWMutex
pendingQueueLock sync.RWMutex
chainStarted *abool.AtomicBool
validateBlockLock sync.RWMutex
rateLimiter *limiter
seenBlockLock sync.RWMutex
seenBlockCache *lru.Cache
seenBlobLock sync.RWMutex
seenBlobCache *lru.Cache
seenDataColumnCache *slotAwareCache
seenAggregatedAttestationLock sync.RWMutex
seenAggregatedAttestationCache *lru.Cache
seenUnAggregatedAttestationLock sync.RWMutex
seenUnAggregatedAttestationCache *lru.Cache
seenExitLock sync.RWMutex
seenExitCache *lru.Cache
seenProposerSlashingLock sync.RWMutex
seenProposerSlashingCache *lru.Cache
seenAttesterSlashingLock sync.RWMutex
seenAttesterSlashingCache map[uint64]bool
seenSyncMessageLock sync.RWMutex
seenSyncMessageCache *lru.Cache
seenSyncContributionLock sync.RWMutex
seenSyncContributionCache *lru.Cache
badBlockCache *lru.Cache
badBlockLock sync.RWMutex
syncContributionBitsOverlapLock sync.RWMutex
syncContributionBitsOverlapCache *lru.Cache
signatureChan chan *signatureVerifier
clockWaiter startup.ClockWaiter
initialSyncComplete chan struct{}
verifierWaiter *verification.InitializerWaiter
newBlobVerifier verification.NewBlobVerifier
newColumnsVerifier verification.NewDataColumnsVerifier
columnSidecarsExecSingleFlight singleflight.Group
reconstructionSingleFlight singleflight.Group
availableBlocker coverage.AvailableBlocker
reconstructionRandGen *rand.Rand
trackedValidatorsCache *cache.TrackedValidatorsCache
ctxMap ContextByteVersions
slasherEnabled bool
lcStore *lightClient.Store
dataColumnLogCh chan dataColumnLogEntry
digestActions perDigestSet
subscriptionSpawner func(func()) // see Service.spawn for details
}
// NewService initializes new regular sync service.
func NewService(ctx context.Context, opts ...Option) *Service {
ctx, cancel := context.WithCancel(ctx)
r := &Service{
ctx: ctx,
cancel: cancel,
chainStarted: abool.New(),
cfg: &config{clock: startup.NewClock(time.Unix(0, 0), [32]byte{})},
slotToPendingBlocks: gcache.New(pendingBlockExpTime /* exp time */, 0 /* disable janitor */),
seenPendingBlocks: make(map[[32]byte]bool),
blkRootToPendingAtts: make(map[[32]byte][]any),
dataColumnLogCh: make(chan dataColumnLogEntry, 1000),
reconstructionRandGen: rand.NewGenerator(),
payloadAttestationCache: &cache.PayloadAttestationCache{},
ctx: ctx,
cancel: cancel,
chainStarted: abool.New(),
cfg: &config{clock: startup.NewClock(time.Unix(0, 0), [32]byte{})},
slotToPendingBlocks: gcache.New(pendingBlockExpTime /* exp time */, 0 /* disable janitor */),
seenPendingBlocks: make(map[[32]byte]bool),
blkRootToPendingAtts: make(map[[32]byte][]any),
dataColumnLogCh: make(chan dataColumnLogEntry, 1000),
reconstructionRandGen: rand.NewGenerator(),
}
for _, opt := range opts {
@@ -259,12 +250,6 @@ func newDataColumnsVerifierFromInitializer(ini *verification.Initializer) verifi
}
}
func newPayloadAttestationMessageFromInitializer(ini *verification.Initializer) verification.NewPayloadAttestationMsgVerifier {
return func(pa payloadattestation.ROMessage, reqs []verification.Requirement) verification.PayloadAttestationMsgVerifier {
return ini.NewPayloadAttestationMsgVerifier(pa, reqs)
}
}
// Start the regular sync service.
func (s *Service) Start() {
v, err := s.verifierWaiter.WaitForInitializer(s.ctx)
@@ -274,8 +259,6 @@ func (s *Service) Start() {
}
s.newBlobVerifier = newBlobVerifierFromInitializer(v)
s.newColumnsVerifier = newDataColumnsVerifierFromInitializer(v)
s.newPayloadAttestationVerifier = newPayloadAttestationMessageFromInitializer(v)
s.newExecutionPayloadEnvelopeVerifier = newPayloadVerifierFromInitializer(v)
go s.verifierRoutine()
go s.startDiscoveryAndSubscriptions()
@@ -363,7 +346,6 @@ func (s *Service) Status() error {
// and prevent DoS.
func (s *Service) initCaches() {
s.seenBlockCache = lruwrpr.New(seenBlockSize)
s.seenPayloadEnvelopeCache = lruwrpr.New(seenPayloadEnvelopeSize)
s.seenBlobCache = lruwrpr.New(seenBlockSize * params.BeaconConfig().DeprecatedMaxBlobsPerBlockElectra)
s.seenDataColumnCache = newSlotAwareCache(seenDataColumnSize)
s.seenAggregatedAttestationCache = lruwrpr.New(seenAggregatedAttSize)
@@ -564,9 +546,3 @@ type Checker interface {
Status() error
Resync() error
}
func newPayloadVerifierFromInitializer(ini *verification.Initializer) verification.NewExecutionPayloadEnvelopeVerifier {
return func(e interfaces.ROSignedExecutionPayloadEnvelope, reqs []verification.Requirement) verification.ExecutionPayloadEnvelopeVerifier {
return ini.NewPayloadEnvelopeVerifier(e, reqs)
}
}

View File

@@ -330,27 +330,6 @@ func (s *Service) registerSubscribers(nse params.NetworkScheduleEntry) bool {
})
})
}
// New gossip topic in Gloas.
if params.BeaconConfig().GloasForkEpoch <= nse.Epoch {
s.spawn(func() {
s.subscribe(
p2p.PayloadAttestationMessageTopicFormat,
s.validatePayloadAttestation,
s.payloadAttestationSubscriber,
nse,
)
})
s.spawn(func() {
s.subscribe(
p2p.ExecutionPayloadEnvelopeTopicFormat,
s.validateExecutionPayloadEnvelope,
s.executionPayloadEnvelopeSubscriber,
nse,
)
})
}
return true
}

View File

@@ -1,21 +0,0 @@
package sync
import (
"context"
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"google.golang.org/protobuf/proto"
)
func (s *Service) payloadAttestationSubscriber(ctx context.Context, msg proto.Message) error {
a, ok := msg.(*eth.PayloadAttestationMessage)
if !ok {
return errWrongMessage
}
if err := s.payloadAttestationCache.Add(a.Data.Slot, a.ValidatorIndex); err != nil {
return err
}
return s.cfg.chain.ReceivePayloadAttestationMessage(ctx, a)
}

View File

@@ -1,158 +0,0 @@
package sync
import (
"context"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
"github.com/OffchainLabs/prysm/v7/beacon-chain/verification"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v7/monitoring/tracing"
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/pkg/errors"
"google.golang.org/protobuf/proto"
)
func (s *Service) validateExecutionPayloadEnvelope(ctx context.Context, pid peer.ID, msg *pubsub.Message) (pubsub.ValidationResult, error) {
if pid == s.cfg.p2p.PeerID() {
return pubsub.ValidationAccept, nil
}
if s.cfg.initialSync.Syncing() {
return pubsub.ValidationIgnore, nil
}
ctx, span := trace.StartSpan(ctx, "sync.validateExecutionPayloadEnvelope")
defer span.End()
if msg.Topic == nil {
return pubsub.ValidationReject, p2p.ErrInvalidTopic
}
m, err := s.decodePubsubMessage(msg)
if err != nil {
tracing.AnnotateError(span, err)
return pubsub.ValidationReject, err
}
signedEnvelope, ok := m.(*ethpb.SignedExecutionPayloadEnvelope)
if !ok {
return pubsub.ValidationReject, errWrongMessage
}
e, err := blocks.WrappedROSignedExecutionPayloadEnvelope(signedEnvelope)
if err != nil {
log.WithError(err).Error("failed to create read only signed payload execution envelope")
return pubsub.ValidationIgnore, err
}
v := s.newExecutionPayloadEnvelopeVerifier(e, verification.GossipExecutionPayloadEnvelopeRequirements)
env, err := e.Envelope()
if err != nil {
return pubsub.ValidationIgnore, err
}
// [IGNORE] The envelope's block root envelope.block_root has been seen (via gossip or non-gossip sources)
// (a client MAY queue payload for processing once the block is retrieved).
if err := v.VerifyBlockRootSeen(func(root [32]byte) bool { return s.cfg.chain.HasBlock(ctx, root) }); err != nil {
return pubsub.ValidationIgnore, err
}
root := env.BeaconBlockRoot()
// [IGNORE] The node has not seen another valid SignedExecutionPayloadEnvelope for this block root from this builder.
if s.hasSeenPayloadEnvelope(root, env.BuilderIndex()) {
return pubsub.ValidationIgnore, nil
}
finalized := s.cfg.chain.FinalizedCheckpt()
if finalized == nil {
return pubsub.ValidationIgnore, errors.New("nil finalized checkpoint")
}
// [IGNORE] The envelope is from a slot greater than or equal to the latest finalized slot --
// i.e. validate that envelope.slot >= compute_start_slot_at_epoch(store.finalized_checkpoint.epoch).
if err := v.VerifySlotAboveFinalized(finalized.Epoch); err != nil {
return pubsub.ValidationIgnore, err
}
// [REJECT] block passes validation.
if err := v.VerifyBlockRootValid(s.hasBadBlock); err != nil {
return pubsub.ValidationReject, err
}
// Let block be the block with envelope.beacon_block_root.
block, err := s.cfg.beaconDB.Block(ctx, root)
if err != nil {
return pubsub.ValidationIgnore, err
}
// [REJECT] block.slot equals envelope.slot.
if err := v.VerifySlotMatchesBlock(block.Block().Slot()); err != nil {
return pubsub.ValidationReject, err
}
// Let bid alias block.body.signed_execution_payload_bid.message
// (notice that this can be obtained from the state.latest_execution_payload_bid).
signedBid, err := block.Block().Body().SignedExecutionPayloadBid()
if err != nil {
return pubsub.ValidationIgnore, err
}
wrappedBid, err := blocks.WrappedROSignedExecutionPayloadBid(signedBid)
if err != nil {
return pubsub.ValidationIgnore, err
}
bid, err := wrappedBid.Bid()
if err != nil {
return pubsub.ValidationIgnore, err
}
// [REJECT] envelope.builder_index == bid.builder_index.
if err := v.VerifyBuilderValid(bid); err != nil {
return pubsub.ValidationReject, err
}
// [REJECT] payload.block_hash == bid.block_hash.
if err := v.VerifyPayloadHash(bid); err != nil {
return pubsub.ValidationReject, err
}
// For self-build, the state is retrived via how we retrieve for beacon block optimization
// For builder index, the state is retrived via head state read only
st, err := s.blockVerifyingState(ctx, block)
if err != nil {
return pubsub.ValidationIgnore, err
}
// [REJECT] signed_execution_payload_envelope.signature is valid with respect to the builder's public key.
if err := v.VerifySignature(st); err != nil {
return pubsub.ValidationReject, err
}
s.setSeenPayloadEnvelope(root, env.BuilderIndex())
return pubsub.ValidationAccept, nil
}
func (s *Service) executionPayloadEnvelopeSubscriber(ctx context.Context, msg proto.Message) error {
e, ok := msg.(*ethpb.SignedExecutionPayloadEnvelope)
if !ok {
return errWrongMessage
}
env, err := blocks.WrappedROSignedExecutionPayloadEnvelope(e)
if err != nil {
return errors.Wrap(err, "could not wrap signed execution payload envelope")
}
return s.cfg.chain.ReceiveExecutionPayloadEnvelope(ctx, env)
}
func (s *Service) hasSeenPayloadEnvelope(root [32]byte, builderIdx primitives.BuilderIndex) bool {
if s.seenPayloadEnvelopeCache == nil {
return false
}
b := append(bytesutil.Bytes32(uint64(builderIdx)), root[:]...)
_, seen := s.seenPayloadEnvelopeCache.Get(string(b))
return seen
}
func (s *Service) setSeenPayloadEnvelope(root [32]byte, builderIdx primitives.BuilderIndex) {
if s.seenPayloadEnvelopeCache == nil {
return
}
b := append(bytesutil.Bytes32(uint64(builderIdx)), root[:]...)
s.seenPayloadEnvelopeCache.Add(string(b), true)
}

View File

@@ -1,291 +0,0 @@
package sync
import (
"bytes"
"context"
"reflect"
"testing"
"time"
mock "github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/testing"
dbtest "github.com/OffchainLabs/prysm/v7/beacon-chain/db/testing"
doublylinkedtree "github.com/OffchainLabs/prysm/v7/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
p2ptest "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/startup"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stategen"
mockSync "github.com/OffchainLabs/prysm/v7/beacon-chain/sync/initial-sync/testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/verification"
lruwrpr "github.com/OffchainLabs/prysm/v7/cache/lru"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/require"
"github.com/OffchainLabs/prysm/v7/testing/util"
pubsub "github.com/libp2p/go-libp2p-pubsub"
pb "github.com/libp2p/go-libp2p-pubsub/pb"
"github.com/pkg/errors"
)
func TestValidateExecutionPayloadEnvelope_InvalidTopic(t *testing.T) {
ctx := context.Background()
p := p2ptest.NewTestP2P(t)
s := &Service{cfg: &config{p2p: p, initialSync: &mockSync.Sync{}}}
result, err := s.validateExecutionPayloadEnvelope(ctx, "", &pubsub.Message{
Message: &pb.Message{},
})
require.ErrorIs(t, p2p.ErrInvalidTopic, err)
require.Equal(t, result, pubsub.ValidationReject)
}
func TestValidateExecutionPayloadEnvelope_AlreadySeen(t *testing.T) {
ctx := context.Background()
s, msg, builderIdx, root := setupExecutionPayloadEnvelopeService(t, 1, 1)
s.newExecutionPayloadEnvelopeVerifier = testNewExecutionPayloadEnvelopeVerifier(mockExecutionPayloadEnvelopeVerifier{})
s.setSeenPayloadEnvelope(root, builderIdx)
result, err := s.validateExecutionPayloadEnvelope(ctx, "", msg)
require.NoError(t, err)
require.Equal(t, result, pubsub.ValidationIgnore)
}
func TestValidateExecutionPayloadEnvelope_ErrorPathsWithMock(t *testing.T) {
ctx := context.Background()
tests := []struct {
name string
verifier mockExecutionPayloadEnvelopeVerifier
result pubsub.ValidationResult
}{
{
name: "block root not seen",
verifier: mockExecutionPayloadEnvelopeVerifier{errBlockRootSeen: errors.New("not seen")},
result: pubsub.ValidationIgnore,
},
{
name: "slot below finalized",
verifier: mockExecutionPayloadEnvelopeVerifier{errSlotAboveFinalized: errors.New("below finalized")},
result: pubsub.ValidationIgnore,
},
{
name: "block root invalid",
verifier: mockExecutionPayloadEnvelopeVerifier{errBlockRootValid: errors.New("invalid block")},
result: pubsub.ValidationReject,
},
{
name: "slot mismatch",
verifier: mockExecutionPayloadEnvelopeVerifier{errSlotMatchesBlock: errors.New("slot mismatch")},
result: pubsub.ValidationReject,
},
{
name: "builder mismatch",
verifier: mockExecutionPayloadEnvelopeVerifier{errBuilderValid: errors.New("builder mismatch")},
result: pubsub.ValidationReject,
},
{
name: "payload hash mismatch",
verifier: mockExecutionPayloadEnvelopeVerifier{errPayloadHash: errors.New("payload hash mismatch")},
result: pubsub.ValidationReject,
},
{
name: "signature invalid",
verifier: mockExecutionPayloadEnvelopeVerifier{errSignature: errors.New("signature invalid")},
result: pubsub.ValidationReject,
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
s, msg, _, _ := setupExecutionPayloadEnvelopeService(t, 1, 1)
s.newExecutionPayloadEnvelopeVerifier = testNewExecutionPayloadEnvelopeVerifier(tc.verifier)
result, err := s.validateExecutionPayloadEnvelope(ctx, "", msg)
require.NotNil(t, err)
require.Equal(t, result, tc.result)
})
}
}
func TestValidateExecutionPayloadEnvelope_HappyPath(t *testing.T) {
ctx := context.Background()
s, msg, builderIdx, root := setupExecutionPayloadEnvelopeService(t, 1, 1)
s.newExecutionPayloadEnvelopeVerifier = testNewExecutionPayloadEnvelopeVerifier(mockExecutionPayloadEnvelopeVerifier{})
require.Equal(t, false, s.hasSeenPayloadEnvelope(root, builderIdx))
result, err := s.validateExecutionPayloadEnvelope(ctx, "", msg)
require.NoError(t, err)
require.Equal(t, result, pubsub.ValidationAccept)
require.Equal(t, true, s.hasSeenPayloadEnvelope(root, builderIdx))
}
func TestExecutionPayloadEnvelopeSubscriber_WrongMessage(t *testing.T) {
s := &Service{cfg: &config{}}
err := s.executionPayloadEnvelopeSubscriber(context.Background(), &ethpb.BeaconBlock{})
require.ErrorIs(t, errWrongMessage, err)
}
func TestExecutionPayloadEnvelopeSubscriber_HappyPath(t *testing.T) {
s := &Service{cfg: &config{chain: &mock.ChainService{}}}
root := [32]byte{0x01}
blockHash := [32]byte{0x02}
env := testSignedExecutionPayloadEnvelope(t, 1, 2, root, blockHash)
err := s.executionPayloadEnvelopeSubscriber(context.Background(), env)
require.NoError(t, err)
}
type mockExecutionPayloadEnvelopeVerifier struct {
errBlockRootSeen error
errBlockRootValid error
errSlotAboveFinalized error
errSlotMatchesBlock error
errBuilderValid error
errPayloadHash error
errSignature error
}
var _ verification.ExecutionPayloadEnvelopeVerifier = &mockExecutionPayloadEnvelopeVerifier{}
func (m *mockExecutionPayloadEnvelopeVerifier) VerifyBlockRootSeen(_ func([32]byte) bool) error {
return m.errBlockRootSeen
}
func (m *mockExecutionPayloadEnvelopeVerifier) VerifyBlockRootValid(_ func([32]byte) bool) error {
return m.errBlockRootValid
}
func (m *mockExecutionPayloadEnvelopeVerifier) VerifySlotAboveFinalized(_ primitives.Epoch) error {
return m.errSlotAboveFinalized
}
func (m *mockExecutionPayloadEnvelopeVerifier) VerifySlotMatchesBlock(_ primitives.Slot) error {
return m.errSlotMatchesBlock
}
func (m *mockExecutionPayloadEnvelopeVerifier) VerifyBuilderValid(_ interfaces.ROExecutionPayloadBid) error {
return m.errBuilderValid
}
func (m *mockExecutionPayloadEnvelopeVerifier) VerifyPayloadHash(_ interfaces.ROExecutionPayloadBid) error {
return m.errPayloadHash
}
func (m *mockExecutionPayloadEnvelopeVerifier) VerifySignature(_ state.ReadOnlyBeaconState) error {
return m.errSignature
}
func (*mockExecutionPayloadEnvelopeVerifier) SatisfyRequirement(_ verification.Requirement) {}
func testNewExecutionPayloadEnvelopeVerifier(m mockExecutionPayloadEnvelopeVerifier) verification.NewExecutionPayloadEnvelopeVerifier {
return func(_ interfaces.ROSignedExecutionPayloadEnvelope, _ []verification.Requirement) verification.ExecutionPayloadEnvelopeVerifier {
clone := m
return &clone
}
}
func setupExecutionPayloadEnvelopeService(t *testing.T, envelopeSlot, blockSlot primitives.Slot) (*Service, *pubsub.Message, primitives.BuilderIndex, [32]byte) {
t.Helper()
ctx := context.Background()
db := dbtest.SetupDB(t)
p := p2ptest.NewTestP2P(t)
chainService := &mock.ChainService{
Genesis: time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot), 0),
FinalizedCheckPoint: &ethpb.Checkpoint{},
DB: db,
}
stateGen := stategen.New(db, doublylinkedtree.New())
s := &Service{
seenPayloadEnvelopeCache: lruwrpr.New(10),
cfg: &config{
p2p: p,
initialSync: &mockSync.Sync{},
chain: chainService,
beaconDB: db,
stateGen: stateGen,
clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot),
},
}
bid := util.GenerateTestSignedExecutionPayloadBid(blockSlot)
sb := util.NewBeaconBlockGloas()
sb.Block.Slot = blockSlot
sb.Block.Body.SignedExecutionPayloadBid = bid
signedBlock, err := blocks.NewSignedBeaconBlock(sb)
require.NoError(t, err)
root, err := signedBlock.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, db.SaveBlock(ctx, signedBlock))
state, err := util.NewBeaconStateFulu()
require.NoError(t, err)
require.NoError(t, db.SaveState(ctx, state, root))
blockHash := bytesutil.ToBytes32(bid.Message.BlockHash)
env := testSignedExecutionPayloadEnvelope(t, envelopeSlot, primitives.BuilderIndex(bid.Message.BuilderIndex), root, blockHash)
msg := envelopeToPubsub(t, s, p, env)
return s, msg, primitives.BuilderIndex(bid.Message.BuilderIndex), root
}
func envelopeToPubsub(t *testing.T, s *Service, p p2p.P2P, env *ethpb.SignedExecutionPayloadEnvelope) *pubsub.Message {
t.Helper()
buf := new(bytes.Buffer)
_, err := p.Encoding().EncodeGossip(buf, env)
require.NoError(t, err)
topic := p2p.GossipTypeMapping[reflect.TypeFor[*ethpb.SignedExecutionPayloadEnvelope]()]
digest, err := s.currentForkDigest()
require.NoError(t, err)
topic = s.addDigestToTopic(topic, digest)
return &pubsub.Message{
Message: &pb.Message{
Data: buf.Bytes(),
Topic: &topic,
},
}
}
func testSignedExecutionPayloadEnvelope(t *testing.T, slot primitives.Slot, builderIdx primitives.BuilderIndex, root, blockHash [32]byte) *ethpb.SignedExecutionPayloadEnvelope {
t.Helper()
payload := &enginev1.ExecutionPayloadDeneb{
ParentHash: bytes.Repeat([]byte{0x01}, 32),
FeeRecipient: bytes.Repeat([]byte{0x02}, 20),
StateRoot: bytes.Repeat([]byte{0x03}, 32),
ReceiptsRoot: bytes.Repeat([]byte{0x04}, 32),
LogsBloom: bytes.Repeat([]byte{0x05}, 256),
PrevRandao: bytes.Repeat([]byte{0x06}, 32),
BlockNumber: 1,
GasLimit: 2,
GasUsed: 3,
Timestamp: 4,
BaseFeePerGas: bytes.Repeat([]byte{0x07}, 32),
BlockHash: blockHash[:],
Transactions: [][]byte{},
Withdrawals: []*enginev1.Withdrawal{},
BlobGasUsed: 0,
ExcessBlobGas: 0,
}
return &ethpb.SignedExecutionPayloadEnvelope{
Message: &ethpb.ExecutionPayloadEnvelope{
Payload: payload,
ExecutionRequests: &enginev1.ExecutionRequests{
Deposits: []*enginev1.DepositRequest{},
},
BuilderIndex: builderIdx,
BeaconBlockRoot: root[:],
Slot: slot,
StateRoot: bytes.Repeat([]byte{0xBB}, 32),
},
Signature: bytes.Repeat([]byte{0xAA}, 96),
}
}

View File

@@ -1,131 +0,0 @@
package sync
import (
"bytes"
"context"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/beacon-chain/verification"
payloadattestation "github.com/OffchainLabs/prysm/v7/consensus-types/payload-attestation"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/time/slots"
pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/pkg/errors"
)
var (
errAlreadySeenPayloadAttestation = errors.New("payload attestation already seen for validator index")
)
func (s *Service) validatePayloadAttestation(ctx context.Context, pid peer.ID, msg *pubsub.Message) (pubsub.ValidationResult, error) {
if pid == s.cfg.p2p.PeerID() {
return pubsub.ValidationAccept, nil
}
if s.cfg.initialSync.Syncing() {
return pubsub.ValidationIgnore, nil
}
ctx, span := trace.StartSpan(ctx, "sync.validatePayloadAttestation")
defer span.End()
if msg.Topic == nil {
return pubsub.ValidationReject, p2p.ErrInvalidTopic
}
m, err := s.decodePubsubMessage(msg)
if err != nil {
return pubsub.ValidationReject, err
}
att, ok := m.(*eth.PayloadAttestationMessage)
if !ok {
return pubsub.ValidationReject, errWrongMessage
}
pa, err := payloadattestation.NewReadOnly(att)
if err != nil {
return pubsub.ValidationIgnore, err
}
v := s.newPayloadAttestationVerifier(pa, verification.GossipPayloadAttestationMessageRequirements)
// [IGNORE] The message's slot is for the current slot (with a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance),
// i.e. data.slot == current_slot.
if err := v.VerifyCurrentSlot(); err != nil {
return pubsub.ValidationIgnore, err
}
// [IGNORE] The payload_attestation_message is the first valid message received from the validator with
// index payload_attestation_message.validator_index.
if s.payloadAttestationCache.Seen(pa.Slot(), pa.ValidatorIndex()) {
return pubsub.ValidationIgnore, errAlreadySeenPayloadAttestation
}
// [IGNORE] The message's block data.beacon_block_root has been seen (via gossip or non-gossip sources)
// (a client MAY queue attestation for processing once the block is retrieved. Note a client might want to request payload after).
if err := v.VerifyBlockRootSeen(s.cfg.chain.InForkchoice); err != nil {
// TODO: queue attestation
return pubsub.ValidationIgnore, err
}
// [REJECT] The message's block data.beacon_block_root passes validation.
if err := v.VerifyBlockRootValid(s.hasBadBlock); err != nil {
return pubsub.ValidationReject, err
}
st, err := s.getPtcState(ctx, pa)
if err != nil {
return pubsub.ValidationIgnore, err
}
// [REJECT] The message's validator index is within the payload committee in get_ptc(state, data.slot).
// The state is the head state corresponding to processing the block up to the current slot.
if err := v.VerifyValidatorInPTC(ctx, st); err != nil {
return pubsub.ValidationReject, err
}
// [REJECT] payload_attestation_message.signature is valid with respect to the validator's public key.
if err := v.VerifySignature(st); err != nil {
return pubsub.ValidationReject, err
}
msg.ValidatorData = att
return pubsub.ValidationAccept, nil
}
func (s *Service) getPtcState(ctx context.Context, pa payloadattestation.ROMessage) (state.ReadOnlyBeaconState, error) {
blockRoot := pa.BeaconBlockRoot()
blockSlot := pa.Slot()
blockEpoch := slots.ToEpoch(blockSlot)
headSlot := s.cfg.chain.HeadSlot()
headEpoch := slots.ToEpoch(headSlot)
headRoot, err := s.cfg.chain.HeadRoot(ctx)
if err != nil {
return nil, err
}
if blockEpoch == headEpoch {
if bytes.Equal(blockRoot[:], headRoot) {
return s.cfg.chain.HeadStateReadOnly(ctx)
}
headDependent, err := s.cfg.chain.DependentRootForEpoch(bytesutil.ToBytes32(headRoot), blockEpoch)
if err != nil {
return nil, err
}
blockDependent, err := s.cfg.chain.DependentRootForEpoch(blockRoot, blockEpoch)
if err != nil {
return nil, err
}
if bytes.Equal(headDependent[:], blockDependent[:]) {
return s.cfg.chain.HeadStateReadOnly(ctx)
}
}
headState, err := s.cfg.chain.HeadState(ctx)
if err != nil {
return nil, err
}
return transition.ProcessSlotsUsingNextSlotCache(ctx, headState, headRoot, blockSlot)
}

View File

@@ -1,165 +0,0 @@
package sync
import (
"bytes"
"context"
"reflect"
"testing"
"time"
mock "github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
p2ptest "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/startup"
mockSync "github.com/OffchainLabs/prysm/v7/beacon-chain/sync/initial-sync/testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/verification"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
payloadattestation "github.com/OffchainLabs/prysm/v7/consensus-types/payload-attestation"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/require"
"github.com/OffchainLabs/prysm/v7/testing/util"
pubsub "github.com/libp2p/go-libp2p-pubsub"
pb "github.com/libp2p/go-libp2p-pubsub/pb"
"github.com/pkg/errors"
)
func TestValidatePayloadAttestationMessage_IncorrectTopic(t *testing.T) {
ctx := context.Background()
p := p2ptest.NewTestP2P(t)
chainService := &mock.ChainService{Genesis: time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot), 0)}
s := &Service{
payloadAttestationCache: &cache.PayloadAttestationCache{},
cfg: &config{chain: chainService, p2p: p, initialSync: &mockSync.Sync{}, clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot)}}
msg := util.HydratePayloadAttestation(&ethpb.PayloadAttestation{}) // Using payload attestation for message should fail.
buf := new(bytes.Buffer)
_, err := p.Encoding().EncodeGossip(buf, msg)
require.NoError(t, err)
topic := p2p.GossipTypeMapping[reflect.TypeFor[*ethpb.PayloadAttestation]()]
digest, err := s.currentForkDigest()
require.NoError(t, err)
topic = s.addDigestToTopic(topic, digest)
result, err := s.validatePayloadAttestation(ctx, "", &pubsub.Message{
Message: &pb.Message{
Data: buf.Bytes(),
Topic: &topic,
}})
require.ErrorContains(t, "extraction failed for topic", err)
require.Equal(t, result, pubsub.ValidationReject)
}
func TestValidatePayloadAttestationMessage_ErrorPathsWithMock(t *testing.T) {
tests := []struct {
error error
verifier verification.NewPayloadAttestationMsgVerifier
result pubsub.ValidationResult
}{
{
error: errors.New("incorrect slot"),
verifier: func(pa payloadattestation.ROMessage, reqs []verification.Requirement) verification.PayloadAttestationMsgVerifier {
return &verification.MockPayloadAttestation{ErrIncorrectPayloadAttSlot: errors.New("incorrect slot")}
},
result: pubsub.ValidationIgnore,
},
{
error: errors.New("block root seen"),
verifier: func(pa payloadattestation.ROMessage, reqs []verification.Requirement) verification.PayloadAttestationMsgVerifier {
return &verification.MockPayloadAttestation{ErrPayloadAttBlockRootNotSeen: errors.New("block root seen")}
},
result: pubsub.ValidationIgnore,
},
{
error: errors.New("block root invalid"),
verifier: func(pa payloadattestation.ROMessage, reqs []verification.Requirement) verification.PayloadAttestationMsgVerifier {
return &verification.MockPayloadAttestation{ErrPayloadAttBlockRootInvalid: errors.New("block root invalid")}
},
result: pubsub.ValidationReject,
},
{
error: errors.New("validator not in PTC"),
verifier: func(pa payloadattestation.ROMessage, reqs []verification.Requirement) verification.PayloadAttestationMsgVerifier {
return &verification.MockPayloadAttestation{ErrIncorrectPayloadAttValidator: errors.New("validator not in PTC")}
},
result: pubsub.ValidationReject,
},
{
error: errors.New("incorrect signature"),
verifier: func(pa payloadattestation.ROMessage, reqs []verification.Requirement) verification.PayloadAttestationMsgVerifier {
return &verification.MockPayloadAttestation{ErrInvalidMessageSignature: errors.New("incorrect signature")}
},
result: pubsub.ValidationReject,
},
}
for _, tt := range tests {
t.Run(tt.error.Error(), func(t *testing.T) {
ctx := context.Background()
p := p2ptest.NewTestP2P(t)
chainService := &mock.ChainService{Genesis: time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot), 0)}
s := &Service{
payloadAttestationCache: &cache.PayloadAttestationCache{},
cfg: &config{chain: chainService, p2p: p, initialSync: &mockSync.Sync{}, clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot)}}
s.newPayloadAttestationVerifier = tt.verifier
msg := newPayloadAttestationMessage()
buf := new(bytes.Buffer)
_, err := p.Encoding().EncodeGossip(buf, msg)
require.NoError(t, err)
topic := p2p.GossipTypeMapping[reflect.TypeFor[*ethpb.PayloadAttestationMessage]()]
digest, err := s.currentForkDigest()
require.NoError(t, err)
topic = s.addDigestToTopic(topic, digest)
result, err := s.validatePayloadAttestation(ctx, "", &pubsub.Message{
Message: &pb.Message{
Data: buf.Bytes(),
Topic: &topic,
}})
require.ErrorContains(t, tt.error.Error(), err)
require.Equal(t, result, tt.result)
})
}
}
func TestValidatePayloadAttestationMessage_Accept(t *testing.T) {
ctx := context.Background()
p := p2ptest.NewTestP2P(t)
chainService := &mock.ChainService{Genesis: time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot), 0)}
s := &Service{
payloadAttestationCache: &cache.PayloadAttestationCache{},
cfg: &config{chain: chainService, p2p: p, initialSync: &mockSync.Sync{}, clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot)}}
s.newPayloadAttestationVerifier = func(pa payloadattestation.ROMessage, reqs []verification.Requirement) verification.PayloadAttestationMsgVerifier {
return &verification.MockPayloadAttestation{}
}
msg := newPayloadAttestationMessage()
buf := new(bytes.Buffer)
_, err := p.Encoding().EncodeGossip(buf, msg)
require.NoError(t, err)
topic := p2p.GossipTypeMapping[reflect.TypeFor[*ethpb.PayloadAttestationMessage]()]
digest, err := s.currentForkDigest()
require.NoError(t, err)
topic = s.addDigestToTopic(topic, digest)
result, err := s.validatePayloadAttestation(ctx, "", &pubsub.Message{
Message: &pb.Message{
Data: buf.Bytes(),
Topic: &topic,
}})
require.NoError(t, err)
require.Equal(t, result, pubsub.ValidationAccept)
}
func newPayloadAttestationMessage() *ethpb.PayloadAttestationMessage {
return &ethpb.PayloadAttestationMessage{
ValidatorIndex: 0,
Data: util.HydratePayloadAttestationData(&ethpb.PayloadAttestationData{Slot: 1}),
Signature: make([]byte, fieldparams.BLSSignatureLength),
}
}

Some files were not shown because too many files have changed in this diff Show More