mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-02-11 13:35:06 -05:00
Compare commits
1 Commits
gloas/fork
...
develop
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
09d0338aa9 |
@@ -46,7 +46,6 @@ type ForkchoiceFetcher interface {
|
||||
HighestReceivedBlockSlot() primitives.Slot
|
||||
ReceivedBlocksLastEpoch() (uint64, error)
|
||||
InsertNode(context.Context, state.BeaconState, consensus_blocks.ROBlock) error
|
||||
InsertPayload(context.Context, interfaces.ROExecutionPayloadEnvelope) error
|
||||
ForkChoiceDump(context.Context) (*forkchoice.Dump, error)
|
||||
NewSlot(context.Context, primitives.Slot) error
|
||||
ProposerBoost() [32]byte
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
consensus_blocks "github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/forkchoice"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
@@ -56,13 +55,6 @@ func (s *Service) InsertNode(ctx context.Context, st state.BeaconState, block co
|
||||
return s.cfg.ForkChoiceStore.InsertNode(ctx, st, block)
|
||||
}
|
||||
|
||||
// InsertPayload is a wrapper for payload insertion which is self locked
|
||||
func (s *Service) InsertPayload(ctx context.Context, pe interfaces.ROExecutionPayloadEnvelope) error {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
return s.cfg.ForkChoiceStore.InsertPayload(ctx, pe)
|
||||
}
|
||||
|
||||
// ForkChoiceDump returns the corresponding value from forkchoice
|
||||
func (s *Service) ForkChoiceDump(ctx context.Context) (*forkchoice.Dump, error) {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
|
||||
@@ -101,16 +101,11 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*
|
||||
if len(lastValidHash) == 0 {
|
||||
lastValidHash = defaultLatestValidHash
|
||||
}
|
||||
// this call has guaranteed to have the `headRoot` with its payload in forkchoice.
|
||||
invalidRoots, err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, headRoot, headBlk.ParentRoot(), bytesutil.ToBytes32(headPayload.ParentHash()), bytesutil.ToBytes32(lastValidHash))
|
||||
invalidRoots, err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, headRoot, headBlk.ParentRoot(), bytesutil.ToBytes32(lastValidHash))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not set head root to invalid")
|
||||
return nil, nil
|
||||
}
|
||||
// TODO: Gloas, we should not include the head root in this call
|
||||
if len(invalidRoots) == 0 || invalidRoots[0] != headRoot {
|
||||
invalidRoots = append([][32]byte{headRoot}, invalidRoots...)
|
||||
}
|
||||
if err := s.removeInvalidBlockAndState(ctx, invalidRoots); err != nil {
|
||||
log.WithError(err).Error("Could not remove invalid block and state")
|
||||
return nil, nil
|
||||
@@ -295,10 +290,10 @@ func (s *Service) notifyNewPayload(ctx context.Context, stVersion int, header in
|
||||
return false, errors.WithMessage(ErrUndefinedExecutionEngineError, err.Error())
|
||||
}
|
||||
|
||||
// pruneInvalidBlock deals with the event that an invalid block was detected by the execution layer
|
||||
func (s *Service) pruneInvalidBlock(ctx context.Context, root, parentRoot, parentHash [32]byte, lvh [32]byte) error {
|
||||
// reportInvalidBlock deals with the event that an invalid block was detected by the execution layer
|
||||
func (s *Service) pruneInvalidBlock(ctx context.Context, root, parentRoot, lvh [32]byte) error {
|
||||
newPayloadInvalidNodeCount.Inc()
|
||||
invalidRoots, err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, root, parentRoot, parentHash, lvh)
|
||||
invalidRoots, err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, root, parentRoot, lvh)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -465,9 +465,9 @@ func Test_NotifyForkchoiceUpdateRecursive_DoublyLinkedTree(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, brd, headRoot)
|
||||
|
||||
// Ensure F and G's full nodes were removed but their empty (consensus) nodes remain, as does E
|
||||
require.Equal(t, true, fcs.HasNode(brf))
|
||||
require.Equal(t, true, fcs.HasNode(brg))
|
||||
// Ensure F and G where removed but their parent E wasn't
|
||||
require.Equal(t, false, fcs.HasNode(brf))
|
||||
require.Equal(t, false, fcs.HasNode(brg))
|
||||
require.Equal(t, true, fcs.HasNode(bre))
|
||||
}
|
||||
|
||||
@@ -703,13 +703,14 @@ func Test_reportInvalidBlock(t *testing.T) {
|
||||
require.NoError(t, fcs.InsertNode(ctx, st, root))
|
||||
|
||||
require.NoError(t, fcs.SetOptimisticToValid(ctx, [32]byte{'A'}))
|
||||
err = service.pruneInvalidBlock(ctx, [32]byte{'D'}, [32]byte{'C'}, [32]byte{'c'}, [32]byte{'a'})
|
||||
err = service.pruneInvalidBlock(ctx, [32]byte{'D'}, [32]byte{'C'}, [32]byte{'a'})
|
||||
require.Equal(t, IsInvalidBlock(err), true)
|
||||
require.Equal(t, InvalidBlockLVH(err), [32]byte{'a'})
|
||||
invalidRoots := InvalidAncestorRoots(err)
|
||||
require.Equal(t, 2, len(invalidRoots))
|
||||
require.Equal(t, 3, len(invalidRoots))
|
||||
require.Equal(t, [32]byte{'D'}, invalidRoots[0])
|
||||
require.Equal(t, [32]byte{'C'}, invalidRoots[1])
|
||||
require.Equal(t, [32]byte{'B'}, invalidRoots[2])
|
||||
}
|
||||
|
||||
func Test_GetPayloadAttribute(t *testing.T) {
|
||||
@@ -784,7 +785,7 @@ func Test_GetPayloadAttributeV2(t *testing.T) {
|
||||
}
|
||||
|
||||
func Test_GetPayloadAttributeV3(t *testing.T) {
|
||||
testCases := []struct {
|
||||
var testCases = []struct {
|
||||
name string
|
||||
st bstate.BeaconState
|
||||
}{
|
||||
|
||||
@@ -232,8 +232,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
postVersionAndHeaders[i].version,
|
||||
postVersionAndHeaders[i].header, b)
|
||||
if err != nil {
|
||||
// this call does not have the root in forkchoice yet.
|
||||
return s.handleInvalidExecutionError(ctx, err, root, b.Block().ParentRoot(), [32]byte(postVersionAndHeaders[i].header.ParentHash()))
|
||||
return s.handleInvalidExecutionError(ctx, err, root, b.Block().ParentRoot())
|
||||
}
|
||||
if isValidPayload {
|
||||
if err := s.validateMergeTransitionBlock(ctx, preVersionAndHeaders[i].version,
|
||||
@@ -993,9 +992,9 @@ func (s *Service) waitForSync() error {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) handleInvalidExecutionError(ctx context.Context, err error, blockRoot, parentRoot [32]byte, parentHash [32]byte) error {
|
||||
func (s *Service) handleInvalidExecutionError(ctx context.Context, err error, blockRoot, parentRoot [fieldparams.RootLength]byte) error {
|
||||
if IsInvalidBlock(err) && InvalidBlockLVH(err) != [32]byte{} {
|
||||
return s.pruneInvalidBlock(ctx, blockRoot, parentRoot, parentHash, InvalidBlockLVH(err))
|
||||
return s.pruneInvalidBlock(ctx, blockRoot, parentRoot, InvalidBlockLVH(err))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -2006,7 +2006,6 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
// Check that we have justified the second epoch
|
||||
jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(2), jc.Epoch)
|
||||
time.Sleep(20 * time.Millisecond) // wait for async forkchoice update to be processed
|
||||
|
||||
// import block 19 to find out that the whole chain 13--18 was in fact
|
||||
// invalid
|
||||
|
||||
@@ -633,7 +633,7 @@ func (s *Service) validateExecutionOnBlock(ctx context.Context, ver int, header
|
||||
isValidPayload, err := s.notifyNewPayload(ctx, ver, header, block)
|
||||
if err != nil {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
err = s.handleInvalidExecutionError(ctx, err, block.Root(), block.Block().ParentRoot(), [32]byte(header.BlockHash()))
|
||||
err = s.handleInvalidExecutionError(ctx, err, block.Root(), block.Block().ParentRoot())
|
||||
s.cfg.ForkChoiceStore.Unlock()
|
||||
return false, err
|
||||
}
|
||||
|
||||
@@ -700,14 +700,6 @@ func (s *ChainService) InsertNode(ctx context.Context, st state.BeaconState, blo
|
||||
return nil
|
||||
}
|
||||
|
||||
// InsertPayload mocks the same method in the chain service
|
||||
func (s *ChainService) InsertPayload(ctx context.Context, pe interfaces.ROExecutionPayloadEnvelope) error {
|
||||
if s.ForkChoiceStore != nil {
|
||||
return s.ForkChoiceStore.InsertPayload(ctx, pe)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ForkChoiceDump mocks the same method in the chain service
|
||||
func (s *ChainService) ForkChoiceDump(ctx context.Context) (*forkchoice2.Dump, error) {
|
||||
if s.ForkChoiceStore != nil {
|
||||
|
||||
@@ -66,6 +66,10 @@ type ReadOnlyDatabase interface {
|
||||
OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error)
|
||||
BackfillStatus(context.Context) (*dbval.BackfillStatus, error)
|
||||
|
||||
// Execution payload envelope operations (Gloas+).
|
||||
ExecutionPayloadEnvelope(ctx context.Context, blockRoot [32]byte) (*ethpb.SignedBlindedExecutionPayloadEnvelope, error)
|
||||
HasExecutionPayloadEnvelope(ctx context.Context, blockRoot [32]byte) bool
|
||||
|
||||
// P2P Metadata operations.
|
||||
MetadataSeqNum(ctx context.Context) (uint64, error)
|
||||
}
|
||||
@@ -115,6 +119,10 @@ type NoHeadAccessDatabase interface {
|
||||
SaveLightClientUpdate(ctx context.Context, period uint64, update interfaces.LightClientUpdate) error
|
||||
SaveLightClientBootstrap(ctx context.Context, blockRoot []byte, bootstrap interfaces.LightClientBootstrap) error
|
||||
|
||||
// Execution payload envelope operations (Gloas+).
|
||||
SaveExecutionPayloadEnvelope(ctx context.Context, envelope *ethpb.SignedExecutionPayloadEnvelope) error
|
||||
DeleteExecutionPayloadEnvelope(ctx context.Context, blockRoot [32]byte) error
|
||||
|
||||
CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint primitives.Slot) error
|
||||
DeleteHistoricalDataBeforeSlot(ctx context.Context, slot primitives.Slot, batchSize int) (int, error)
|
||||
|
||||
|
||||
@@ -13,6 +13,7 @@ go_library(
|
||||
"encoding.go",
|
||||
"error.go",
|
||||
"execution_chain.go",
|
||||
"execution_payload_envelope.go",
|
||||
"finalized_block_roots.go",
|
||||
"genesis.go",
|
||||
"key.go",
|
||||
@@ -96,6 +97,7 @@ go_test(
|
||||
"deposit_contract_test.go",
|
||||
"encoding_test.go",
|
||||
"execution_chain_test.go",
|
||||
"execution_payload_envelope_test.go",
|
||||
"finalized_block_roots_test.go",
|
||||
"genesis_test.go",
|
||||
"init_test.go",
|
||||
|
||||
@@ -517,6 +517,10 @@ func (s *Store) DeleteHistoricalDataBeforeSlot(ctx context.Context, cutoffSlot p
|
||||
return errors.Wrap(err, "could not delete validators")
|
||||
}
|
||||
|
||||
// TODO: execution payload envelopes (Gloas+) are keyed by execution payload
|
||||
// block hash, not beacon block root, so they cannot be pruned in this loop.
|
||||
// A separate pruning mechanism is needed (e.g. secondary index or cursor scan).
|
||||
|
||||
numSlotsDeleted++
|
||||
}
|
||||
|
||||
@@ -1250,6 +1254,12 @@ func unmarshalBlock(_ context.Context, enc []byte) (interfaces.ReadOnlySignedBea
|
||||
if err := rawBlock.UnmarshalSSZ(enc[len(fuluBlindKey):]); err != nil {
|
||||
return nil, errors.Wrap(err, "could not unmarshal blinded Fulu block")
|
||||
}
|
||||
case hasGloasKey(enc):
|
||||
// post Gloas we save the full beacon block as EIP-7732 separates beacon block and payload
|
||||
rawBlock = ðpb.SignedBeaconBlockGloas{}
|
||||
if err := rawBlock.UnmarshalSSZ(enc[len(gloasKey):]); err != nil {
|
||||
return nil, errors.Wrap(err, "could not unmarshal Gloas block")
|
||||
}
|
||||
default:
|
||||
// Marshal block bytes to phase 0 beacon block.
|
||||
rawBlock = ðpb.SignedBeaconBlock{}
|
||||
@@ -1280,6 +1290,11 @@ func encodeBlock(blk interfaces.ReadOnlySignedBeaconBlock) ([]byte, error) {
|
||||
func keyForBlock(blk interfaces.ReadOnlySignedBeaconBlock) ([]byte, error) {
|
||||
v := blk.Version()
|
||||
|
||||
if v >= version.Gloas {
|
||||
// Gloas blocks are never blinded (no execution payload in block body).
|
||||
return gloasKey, nil
|
||||
}
|
||||
|
||||
if v >= version.Fulu {
|
||||
if blk.IsBlinded() {
|
||||
return fuluBlindKey, nil
|
||||
|
||||
@@ -151,6 +151,17 @@ var blockTests = []struct {
|
||||
}
|
||||
return blocks.NewSignedBeaconBlock(b)
|
||||
}},
|
||||
{
|
||||
name: "gloas",
|
||||
newBlock: func(slot primitives.Slot, root []byte) (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
b := util.NewBeaconBlockGloas()
|
||||
b.Block.Slot = slot
|
||||
if root != nil {
|
||||
b.Block.ParentRoot = root
|
||||
}
|
||||
return blocks.NewSignedBeaconBlock(b)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func TestStore_SaveBlock_NoDuplicates(t *testing.T) {
|
||||
@@ -211,7 +222,7 @@ func TestStore_BlocksCRUD(t *testing.T) {
|
||||
retrievedBlock, err = db.Block(ctx, blockRoot)
|
||||
require.NoError(t, err)
|
||||
wanted := retrievedBlock
|
||||
if retrievedBlock.Version() >= version.Bellatrix {
|
||||
if retrievedBlock.Version() >= version.Bellatrix && retrievedBlock.Version() < version.Gloas {
|
||||
wanted, err = retrievedBlock.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -643,7 +654,7 @@ func TestStore_BlocksCRUD_NoCache(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
wanted := blk
|
||||
if blk.Version() >= version.Bellatrix {
|
||||
if blk.Version() >= version.Bellatrix && blk.Version() < version.Gloas {
|
||||
wanted, err = blk.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -1014,7 +1025,7 @@ func TestStore_SaveBlock_CanGetHighestAt(t *testing.T) {
|
||||
b, err := db.Block(ctx, root)
|
||||
require.NoError(t, err)
|
||||
wanted := block1
|
||||
if block1.Version() >= version.Bellatrix {
|
||||
if block1.Version() >= version.Bellatrix && block1.Version() < version.Gloas {
|
||||
wanted, err = wanted.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -1032,7 +1043,7 @@ func TestStore_SaveBlock_CanGetHighestAt(t *testing.T) {
|
||||
b, err = db.Block(ctx, root)
|
||||
require.NoError(t, err)
|
||||
wanted2 := block2
|
||||
if block2.Version() >= version.Bellatrix {
|
||||
if block2.Version() >= version.Bellatrix && block2.Version() < version.Gloas {
|
||||
wanted2, err = block2.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -1050,7 +1061,7 @@ func TestStore_SaveBlock_CanGetHighestAt(t *testing.T) {
|
||||
b, err = db.Block(ctx, root)
|
||||
require.NoError(t, err)
|
||||
wanted = block3
|
||||
if block3.Version() >= version.Bellatrix {
|
||||
if block3.Version() >= version.Bellatrix && block3.Version() < version.Gloas {
|
||||
wanted, err = wanted.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -1086,7 +1097,7 @@ func TestStore_GenesisBlock_CanGetHighestAt(t *testing.T) {
|
||||
b, err := db.Block(ctx, root)
|
||||
require.NoError(t, err)
|
||||
wanted := block1
|
||||
if block1.Version() >= version.Bellatrix {
|
||||
if block1.Version() >= version.Bellatrix && block1.Version() < version.Gloas {
|
||||
wanted, err = block1.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -1103,7 +1114,7 @@ func TestStore_GenesisBlock_CanGetHighestAt(t *testing.T) {
|
||||
b, err = db.Block(ctx, root)
|
||||
require.NoError(t, err)
|
||||
wanted = genesisBlock
|
||||
if genesisBlock.Version() >= version.Bellatrix {
|
||||
if genesisBlock.Version() >= version.Bellatrix && genesisBlock.Version() < version.Gloas {
|
||||
wanted, err = genesisBlock.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -1120,7 +1131,7 @@ func TestStore_GenesisBlock_CanGetHighestAt(t *testing.T) {
|
||||
b, err = db.Block(ctx, root)
|
||||
require.NoError(t, err)
|
||||
wanted = genesisBlock
|
||||
if genesisBlock.Version() >= version.Bellatrix {
|
||||
if genesisBlock.Version() >= version.Bellatrix && genesisBlock.Version() < version.Gloas {
|
||||
wanted, err = genesisBlock.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -1216,7 +1227,7 @@ func TestStore_BlocksBySlot_BlockRootsBySlot(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
wanted := b1
|
||||
if b1.Version() >= version.Bellatrix {
|
||||
if b1.Version() >= version.Bellatrix && b1.Version() < version.Gloas {
|
||||
wanted, err = b1.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -1232,7 +1243,7 @@ func TestStore_BlocksBySlot_BlockRootsBySlot(t *testing.T) {
|
||||
t.Fatalf("Expected 2 blocks, received %d blocks", len(retrievedBlocks))
|
||||
}
|
||||
wanted = b2
|
||||
if b2.Version() >= version.Bellatrix {
|
||||
if b2.Version() >= version.Bellatrix && b2.Version() < version.Gloas {
|
||||
wanted, err = b2.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -1242,7 +1253,7 @@ func TestStore_BlocksBySlot_BlockRootsBySlot(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, proto.Equal(wantedPb, retrieved0Pb), "Wanted: %v, received: %v", retrievedBlocks[0], wanted)
|
||||
wanted = b3
|
||||
if b3.Version() >= version.Bellatrix {
|
||||
if b3.Version() >= version.Bellatrix && b3.Version() < version.Gloas {
|
||||
wanted, err = b3.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
123
beacon-chain/db/kv/execution_payload_envelope.go
Normal file
123
beacon-chain/db/kv/execution_payload_envelope.go
Normal file
@@ -0,0 +1,123 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/golang/snappy"
|
||||
"github.com/pkg/errors"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
// SaveExecutionPayloadEnvelope blinds and saves a signed execution payload envelope keyed by
|
||||
// beacon block root. The envelope is stored in blinded form: the full execution payload is replaced
|
||||
// with its block hash. The full payload can later be retrieved from the EL via
|
||||
// engine_getPayloadBodiesByHash.
|
||||
func (s *Store) SaveExecutionPayloadEnvelope(ctx context.Context, env *ethpb.SignedExecutionPayloadEnvelope) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.SaveExecutionPayloadEnvelope")
|
||||
defer span.End()
|
||||
|
||||
if env == nil || env.Message == nil || env.Message.Payload == nil {
|
||||
return errors.New("cannot save nil execution payload envelope")
|
||||
}
|
||||
|
||||
blockRoot := bytesutil.ToBytes32(env.Message.BeaconBlockRoot)
|
||||
blinded := blindEnvelope(env)
|
||||
|
||||
enc, err := encodeBlindedEnvelope(blinded)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(executionPayloadEnvelopesBucket)
|
||||
return bkt.Put(blockRoot[:], enc)
|
||||
})
|
||||
}
|
||||
|
||||
// ExecutionPayloadEnvelope retrieves the blinded signed execution payload envelope by beacon block root.
|
||||
func (s *Store) ExecutionPayloadEnvelope(ctx context.Context, blockRoot [32]byte) (*ethpb.SignedBlindedExecutionPayloadEnvelope, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.ExecutionPayloadEnvelope")
|
||||
defer span.End()
|
||||
|
||||
var enc []byte
|
||||
if err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(executionPayloadEnvelopesBucket)
|
||||
enc = bkt.Get(blockRoot[:])
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if enc == nil {
|
||||
return nil, errors.Wrap(ErrNotFound, "execution payload envelope not found")
|
||||
}
|
||||
return decodeBlindedEnvelope(enc)
|
||||
}
|
||||
|
||||
// HasExecutionPayloadEnvelope checks whether an execution payload envelope exists for the given beacon block root.
|
||||
func (s *Store) HasExecutionPayloadEnvelope(ctx context.Context, blockRoot [32]byte) bool {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.HasExecutionPayloadEnvelope")
|
||||
defer span.End()
|
||||
|
||||
var exists bool
|
||||
if err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(executionPayloadEnvelopesBucket)
|
||||
exists = bkt.Get(blockRoot[:]) != nil
|
||||
return nil
|
||||
}); err != nil {
|
||||
return false
|
||||
}
|
||||
return exists
|
||||
}
|
||||
|
||||
// DeleteExecutionPayloadEnvelope removes a signed execution payload envelope by beacon block root.
|
||||
func (s *Store) DeleteExecutionPayloadEnvelope(ctx context.Context, blockRoot [32]byte) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.DeleteExecutionPayloadEnvelope")
|
||||
defer span.End()
|
||||
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(executionPayloadEnvelopesBucket)
|
||||
return bkt.Delete(blockRoot[:])
|
||||
})
|
||||
}
|
||||
|
||||
// blindEnvelope converts a full signed envelope to its blinded form by replacing
|
||||
// the execution payload with its block hash. This avoids computing the expensive
|
||||
// payload hash tree root on the critical path.
|
||||
func blindEnvelope(env *ethpb.SignedExecutionPayloadEnvelope) *ethpb.SignedBlindedExecutionPayloadEnvelope {
|
||||
return ðpb.SignedBlindedExecutionPayloadEnvelope{
|
||||
Message: ðpb.BlindedExecutionPayloadEnvelope{
|
||||
BlockHash: env.Message.Payload.BlockHash,
|
||||
ExecutionRequests: env.Message.ExecutionRequests,
|
||||
BuilderIndex: env.Message.BuilderIndex,
|
||||
BeaconBlockRoot: env.Message.BeaconBlockRoot,
|
||||
Slot: env.Message.Slot,
|
||||
StateRoot: env.Message.StateRoot,
|
||||
},
|
||||
Signature: env.Signature,
|
||||
}
|
||||
}
|
||||
|
||||
// encodeBlindedEnvelope SSZ-encodes and snappy-compresses a blinded envelope for storage.
|
||||
func encodeBlindedEnvelope(env *ethpb.SignedBlindedExecutionPayloadEnvelope) ([]byte, error) {
|
||||
sszBytes, err := env.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not marshal blinded envelope")
|
||||
}
|
||||
return snappy.Encode(nil, sszBytes), nil
|
||||
}
|
||||
|
||||
// decodeBlindedEnvelope snappy-decompresses and SSZ-decodes a blinded envelope from storage.
|
||||
func decodeBlindedEnvelope(enc []byte) (*ethpb.SignedBlindedExecutionPayloadEnvelope, error) {
|
||||
dec, err := snappy.Decode(nil, enc)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not snappy decode envelope")
|
||||
}
|
||||
blinded := ðpb.SignedBlindedExecutionPayloadEnvelope{}
|
||||
if err := blinded.UnmarshalSSZ(dec); err != nil {
|
||||
return nil, errors.Wrap(err, "could not unmarshal blinded envelope")
|
||||
}
|
||||
return blinded, nil
|
||||
}
|
||||
124
beacon-chain/db/kv/execution_payload_envelope_test.go
Normal file
124
beacon-chain/db/kv/execution_payload_envelope_test.go
Normal file
@@ -0,0 +1,124 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
)
|
||||
|
||||
func testEnvelope(t *testing.T) *ethpb.SignedExecutionPayloadEnvelope {
|
||||
t.Helper()
|
||||
return ðpb.SignedExecutionPayloadEnvelope{
|
||||
Message: ðpb.ExecutionPayloadEnvelope{
|
||||
Payload: &enginev1.ExecutionPayloadDeneb{
|
||||
ParentHash: bytesutil.PadTo([]byte("parent"), 32),
|
||||
FeeRecipient: bytesutil.PadTo([]byte("fee"), 20),
|
||||
StateRoot: bytesutil.PadTo([]byte("stateroot"), 32),
|
||||
ReceiptsRoot: bytesutil.PadTo([]byte("receipts"), 32),
|
||||
LogsBloom: bytesutil.PadTo([]byte{}, 256),
|
||||
PrevRandao: bytesutil.PadTo([]byte("randao"), 32),
|
||||
BlockNumber: 100,
|
||||
GasLimit: 30000000,
|
||||
GasUsed: 21000,
|
||||
Timestamp: 1000,
|
||||
ExtraData: []byte("extra"),
|
||||
BaseFeePerGas: bytesutil.PadTo([]byte{1}, 32),
|
||||
BlockHash: bytesutil.PadTo([]byte("blockhash"), 32),
|
||||
Transactions: [][]byte{[]byte("tx1"), []byte("tx2")},
|
||||
Withdrawals: []*enginev1.Withdrawal{{Index: 1, ValidatorIndex: 2, Address: bytesutil.PadTo([]byte("addr"), 20), Amount: 100}},
|
||||
BlobGasUsed: 131072,
|
||||
ExcessBlobGas: 0,
|
||||
},
|
||||
ExecutionRequests: &enginev1.ExecutionRequests{},
|
||||
BuilderIndex: primitives.BuilderIndex(42),
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("beaconroot"), 32),
|
||||
Slot: primitives.Slot(99),
|
||||
StateRoot: bytesutil.PadTo([]byte("envelopestateroot"), 32),
|
||||
},
|
||||
Signature: bytesutil.PadTo([]byte("sig"), 96),
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_SaveAndRetrieveExecutionPayloadEnvelope(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
env := testEnvelope(t)
|
||||
|
||||
// Keyed by beacon block root.
|
||||
blockRoot := bytesutil.ToBytes32(env.Message.BeaconBlockRoot)
|
||||
|
||||
// Initially should not exist.
|
||||
assert.Equal(t, false, db.HasExecutionPayloadEnvelope(ctx, blockRoot))
|
||||
|
||||
// Save (always blinds internally).
|
||||
require.NoError(t, db.SaveExecutionPayloadEnvelope(ctx, env))
|
||||
|
||||
// Should exist now.
|
||||
assert.Equal(t, true, db.HasExecutionPayloadEnvelope(ctx, blockRoot))
|
||||
|
||||
// Load and verify it's blinded.
|
||||
loaded, err := db.ExecutionPayloadEnvelope(ctx, blockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify metadata is preserved.
|
||||
assert.Equal(t, env.Message.Slot, loaded.Message.Slot)
|
||||
assert.Equal(t, env.Message.BuilderIndex, loaded.Message.BuilderIndex)
|
||||
assert.DeepEqual(t, env.Message.BeaconBlockRoot, loaded.Message.BeaconBlockRoot)
|
||||
assert.DeepEqual(t, env.Message.StateRoot, loaded.Message.StateRoot)
|
||||
assert.DeepEqual(t, env.Signature, loaded.Signature)
|
||||
|
||||
// BlockHash should be the payload's block hash (not a hash tree root).
|
||||
assert.DeepEqual(t, env.Message.Payload.BlockHash, loaded.Message.BlockHash)
|
||||
}
|
||||
|
||||
func TestStore_DeleteExecutionPayloadEnvelope(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
env := testEnvelope(t)
|
||||
blockRoot := bytesutil.ToBytes32(env.Message.BeaconBlockRoot)
|
||||
|
||||
require.NoError(t, db.SaveExecutionPayloadEnvelope(ctx, env))
|
||||
assert.Equal(t, true, db.HasExecutionPayloadEnvelope(ctx, blockRoot))
|
||||
|
||||
require.NoError(t, db.DeleteExecutionPayloadEnvelope(ctx, blockRoot))
|
||||
assert.Equal(t, false, db.HasExecutionPayloadEnvelope(ctx, blockRoot))
|
||||
}
|
||||
|
||||
func TestStore_ExecutionPayloadEnvelope_NotFound(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
nonExistent := bytesutil.ToBytes32([]byte("nonexistent"))
|
||||
|
||||
_, err := db.ExecutionPayloadEnvelope(ctx, nonExistent)
|
||||
require.ErrorContains(t, "not found", err)
|
||||
}
|
||||
|
||||
func TestStore_SaveExecutionPayloadEnvelope_NilRejected(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
err := db.SaveExecutionPayloadEnvelope(ctx, nil)
|
||||
require.ErrorContains(t, "nil", err)
|
||||
}
|
||||
|
||||
func TestBlindEnvelope_PreservesBlockHash(t *testing.T) {
|
||||
env := testEnvelope(t)
|
||||
|
||||
blinded := blindEnvelope(env)
|
||||
|
||||
// Should contain the block hash from the payload, not a hash tree root.
|
||||
assert.DeepEqual(t, env.Message.Payload.BlockHash, blinded.Message.BlockHash)
|
||||
|
||||
// Metadata should be preserved.
|
||||
assert.Equal(t, env.Message.BuilderIndex, blinded.Message.BuilderIndex)
|
||||
assert.Equal(t, env.Message.Slot, blinded.Message.Slot)
|
||||
assert.DeepEqual(t, env.Message.BeaconBlockRoot, blinded.Message.BeaconBlockRoot)
|
||||
assert.DeepEqual(t, env.Message.StateRoot, blinded.Message.StateRoot)
|
||||
assert.DeepEqual(t, env.Signature, blinded.Signature)
|
||||
}
|
||||
@@ -87,3 +87,10 @@ func hasFuluBlindKey(enc []byte) bool {
|
||||
}
|
||||
return bytes.Equal(enc[:len(fuluBlindKey)], fuluBlindKey)
|
||||
}
|
||||
|
||||
func hasGloasKey(enc []byte) bool {
|
||||
if len(gloasKey) >= len(enc) {
|
||||
return false
|
||||
}
|
||||
return bytes.Equal(enc[:len(gloasKey)], gloasKey)
|
||||
}
|
||||
|
||||
@@ -126,6 +126,7 @@ var Buckets = [][]byte{
|
||||
feeRecipientBucket,
|
||||
registrationBucket,
|
||||
custodyBucket,
|
||||
executionPayloadEnvelopesBucket,
|
||||
}
|
||||
|
||||
// KVStoreOption is a functional option that modifies a kv.Store.
|
||||
|
||||
@@ -7,16 +7,17 @@ package kv
|
||||
// it easy to scan for keys that have a certain shard number as a prefix and return those
|
||||
// corresponding attestations.
|
||||
var (
|
||||
blocksBucket = []byte("blocks")
|
||||
stateBucket = []byte("state")
|
||||
stateSummaryBucket = []byte("state-summary")
|
||||
chainMetadataBucket = []byte("chain-metadata")
|
||||
checkpointBucket = []byte("check-point")
|
||||
powchainBucket = []byte("powchain")
|
||||
stateValidatorsBucket = []byte("state-validators")
|
||||
feeRecipientBucket = []byte("fee-recipient")
|
||||
registrationBucket = []byte("registration")
|
||||
stateDiffBucket = []byte("state-diff")
|
||||
blocksBucket = []byte("blocks")
|
||||
stateBucket = []byte("state")
|
||||
stateSummaryBucket = []byte("state-summary")
|
||||
chainMetadataBucket = []byte("chain-metadata")
|
||||
checkpointBucket = []byte("check-point")
|
||||
powchainBucket = []byte("powchain")
|
||||
stateValidatorsBucket = []byte("state-validators")
|
||||
feeRecipientBucket = []byte("fee-recipient")
|
||||
registrationBucket = []byte("registration")
|
||||
stateDiffBucket = []byte("state-diff")
|
||||
executionPayloadEnvelopesBucket = []byte("execution-payload-envelopes")
|
||||
|
||||
// Light Client Updates Bucket
|
||||
lightClientUpdatesBucket = []byte("light-client-updates")
|
||||
@@ -60,6 +61,8 @@ var (
|
||||
electraBlindKey = []byte("blind-electra")
|
||||
fuluKey = []byte("fulu")
|
||||
fuluBlindKey = []byte("blind-fulu")
|
||||
gloasKey = []byte("gloas")
|
||||
// No gloasBlindKey needed - Gloas blocks are never blinded (no execution payload in block body).
|
||||
|
||||
// block root included in the beacon state used by weak subjectivity initial sync
|
||||
originCheckpointBlockRootKey = []byte("origin-checkpoint-block-root")
|
||||
|
||||
@@ -20,7 +20,6 @@ go_library(
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/forkchoice:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
|
||||
@@ -6,7 +6,6 @@ go_library(
|
||||
"doc.go",
|
||||
"errors.go",
|
||||
"forkchoice.go",
|
||||
"gloas.go",
|
||||
"log.go",
|
||||
"metrics.go",
|
||||
"node.go",
|
||||
@@ -33,7 +32,6 @@ go_library(
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/forkchoice:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
@@ -52,7 +50,6 @@ go_test(
|
||||
srcs = [
|
||||
"ffg_update_test.go",
|
||||
"forkchoice_test.go",
|
||||
"gloas_test.go",
|
||||
"no_vote_test.go",
|
||||
"node_test.go",
|
||||
"on_tick_test.go",
|
||||
@@ -72,7 +69,6 @@ go_test(
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/forkchoice:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
|
||||
@@ -31,8 +31,7 @@ func New() *ForkChoice {
|
||||
prevJustifiedCheckpoint: &forkchoicetypes.Checkpoint{},
|
||||
finalizedCheckpoint: &forkchoicetypes.Checkpoint{},
|
||||
proposerBoostRoot: [32]byte{},
|
||||
emptyNodeByRoot: make(map[[fieldparams.RootLength]byte]*PayloadNode),
|
||||
fullNodeByRoot: make(map[[fieldparams.RootLength]byte]*PayloadNode),
|
||||
nodeByRoot: make(map[[fieldparams.RootLength]byte]*Node),
|
||||
slashedIndices: make(map[primitives.ValidatorIndex]bool),
|
||||
receivedBlocksLastEpoch: [fieldparams.SlotsPerEpoch]primitives.Slot{},
|
||||
}
|
||||
@@ -44,7 +43,7 @@ func New() *ForkChoice {
|
||||
|
||||
// NodeCount returns the current number of nodes in the Store.
|
||||
func (f *ForkChoice) NodeCount() int {
|
||||
return len(f.store.emptyNodeByRoot)
|
||||
return len(f.store.nodeByRoot)
|
||||
}
|
||||
|
||||
// Head returns the head root from fork choice store.
|
||||
@@ -65,14 +64,14 @@ func (f *ForkChoice) Head(
|
||||
return [32]byte{}, errors.Wrap(err, "could not apply proposer boost score")
|
||||
}
|
||||
|
||||
if err := f.store.applyWeightChangesConsensusNode(ctx, f.store.treeRootNode); err != nil {
|
||||
if err := f.store.treeRootNode.applyWeightChanges(ctx); err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not apply weight changes")
|
||||
}
|
||||
|
||||
jc := f.JustifiedCheckpoint()
|
||||
fc := f.FinalizedCheckpoint()
|
||||
currentEpoch := slots.EpochsSinceGenesis(f.store.genesisTime)
|
||||
if err := f.store.updateBestDescendantConsensusNode(ctx, f.store.treeRootNode, jc.Epoch, fc.Epoch, currentEpoch); err != nil {
|
||||
if err := f.store.treeRootNode.updateBestDescendant(ctx, jc.Epoch, fc.Epoch, currentEpoch); err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not update best descendant")
|
||||
}
|
||||
return f.store.head(ctx)
|
||||
@@ -119,14 +118,14 @@ func (f *ForkChoice) InsertNode(ctx context.Context, state state.BeaconState, ro
|
||||
return errInvalidNilCheckpoint
|
||||
}
|
||||
finalizedEpoch := fc.Epoch
|
||||
pn, err := f.store.insert(ctx, roblock, justifiedEpoch, finalizedEpoch)
|
||||
node, err := f.store.insert(ctx, roblock, justifiedEpoch, finalizedEpoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
jc, fc = f.store.pullTips(state, pn.node, jc, fc)
|
||||
jc, fc = f.store.pullTips(state, node, jc, fc)
|
||||
if err := f.updateCheckpoints(ctx, jc, fc); err != nil {
|
||||
_, remErr := f.store.removeNode(ctx, pn)
|
||||
_, remErr := f.store.removeNode(ctx, node)
|
||||
if remErr != nil {
|
||||
log.WithError(remErr).Error("Could not remove node")
|
||||
}
|
||||
@@ -149,63 +148,49 @@ func (f *ForkChoice) updateCheckpoints(ctx context.Context, jc, fc *ethpb.Checkp
|
||||
if fc.Epoch <= f.store.finalizedCheckpoint.Epoch {
|
||||
return nil
|
||||
}
|
||||
f.store.finalizedCheckpoint = &forkchoicetypes.Checkpoint{
|
||||
Epoch: fc.Epoch,
|
||||
Root: bytesutil.ToBytes32(fc.Root),
|
||||
}
|
||||
f.store.finalizedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: fc.Epoch,
|
||||
Root: bytesutil.ToBytes32(fc.Root)}
|
||||
return f.store.prune(ctx)
|
||||
}
|
||||
|
||||
// HasNode returns true if the node exists in fork choice store,
|
||||
// false else wise.
|
||||
func (f *ForkChoice) HasNode(root [32]byte) bool {
|
||||
_, ok := f.store.emptyNodeByRoot[root]
|
||||
_, ok := f.store.nodeByRoot[root]
|
||||
return ok
|
||||
}
|
||||
|
||||
// IsCanonical returns true if the given root is part of the canonical chain.
|
||||
func (f *ForkChoice) IsCanonical(root [32]byte) bool {
|
||||
// It is fine to pick empty node here since we only check if the beacon block is canonical.
|
||||
pn, ok := f.store.emptyNodeByRoot[root]
|
||||
if !ok || pn == nil {
|
||||
node, ok := f.store.nodeByRoot[root]
|
||||
if !ok || node == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if pn.node.bestDescendant == nil {
|
||||
// The node doesn't have any children
|
||||
if node.bestDescendant == nil {
|
||||
if f.store.headNode.bestDescendant == nil {
|
||||
// headNode is itself head.
|
||||
return pn.node == f.store.headNode
|
||||
return node == f.store.headNode
|
||||
}
|
||||
// headNode is not actualized and there are some descendants
|
||||
return pn.node == f.store.headNode.bestDescendant
|
||||
return node == f.store.headNode.bestDescendant
|
||||
}
|
||||
// The node has children
|
||||
if f.store.headNode.bestDescendant == nil {
|
||||
return pn.node.bestDescendant == f.store.headNode
|
||||
return node.bestDescendant == f.store.headNode
|
||||
}
|
||||
return pn.node.bestDescendant == f.store.headNode.bestDescendant
|
||||
return node.bestDescendant == f.store.headNode.bestDescendant
|
||||
}
|
||||
|
||||
// IsOptimistic returns true if the given root has been optimistically synced.
|
||||
// TODO: Gloas, the current implementation uses the result of the full block for
|
||||
// the given root. In gloas this would be incorrect and we should specify the
|
||||
// payload content, thus we should expose a full/empty version of this call.
|
||||
func (f *ForkChoice) IsOptimistic(root [32]byte) (bool, error) {
|
||||
if f.store.allTipsAreInvalid {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
en, ok := f.store.emptyNodeByRoot[root]
|
||||
if !ok || en == nil {
|
||||
node, ok := f.store.nodeByRoot[root]
|
||||
if !ok || node == nil {
|
||||
return true, ErrNilNode
|
||||
}
|
||||
fn := f.store.fullNodeByRoot[root]
|
||||
if fn != nil {
|
||||
return fn.optimistic, nil
|
||||
}
|
||||
|
||||
return en.optimistic, nil
|
||||
return node.optimistic, nil
|
||||
}
|
||||
|
||||
// AncestorRoot returns the ancestor root of input block root at a given slot.
|
||||
@@ -213,21 +198,17 @@ func (f *ForkChoice) AncestorRoot(ctx context.Context, root [32]byte, slot primi
|
||||
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.AncestorRoot")
|
||||
defer span.End()
|
||||
|
||||
pn, ok := f.store.emptyNodeByRoot[root]
|
||||
if !ok || pn == nil {
|
||||
node, ok := f.store.nodeByRoot[root]
|
||||
if !ok || node == nil {
|
||||
return [32]byte{}, errors.Wrap(ErrNilNode, "could not determine ancestor root")
|
||||
}
|
||||
|
||||
n := pn.node
|
||||
for n.slot > slot {
|
||||
n := node
|
||||
for n != nil && n.slot > slot {
|
||||
if ctx.Err() != nil {
|
||||
return [32]byte{}, ctx.Err()
|
||||
}
|
||||
if n.parent == nil {
|
||||
n = nil
|
||||
break
|
||||
}
|
||||
n = n.parent.node
|
||||
n = n.parent
|
||||
}
|
||||
|
||||
if n == nil {
|
||||
@@ -240,11 +221,10 @@ func (f *ForkChoice) AncestorRoot(ctx context.Context, root [32]byte, slot primi
|
||||
// IsViableForCheckpoint returns whether the root passed is a checkpoint root for any
|
||||
// known chain in forkchoice.
|
||||
func (f *ForkChoice) IsViableForCheckpoint(cp *forkchoicetypes.Checkpoint) (bool, error) {
|
||||
pn, ok := f.store.emptyNodeByRoot[cp.Root]
|
||||
if !ok || pn == nil {
|
||||
node, ok := f.store.nodeByRoot[cp.Root]
|
||||
if !ok || node == nil {
|
||||
return false, nil
|
||||
}
|
||||
node := pn.node
|
||||
epochStart, err := slots.EpochStart(cp.Epoch)
|
||||
if err != nil {
|
||||
return false, err
|
||||
@@ -253,13 +233,10 @@ func (f *ForkChoice) IsViableForCheckpoint(cp *forkchoicetypes.Checkpoint) (bool
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// If it's the start of the epoch, it is a checkpoint
|
||||
if node.slot == epochStart {
|
||||
if len(node.children) == 0 {
|
||||
return true, nil
|
||||
}
|
||||
// If there are no descendants of this beacon block, it is is viable as a checkpoint
|
||||
children := f.store.allConsensusChildren(node)
|
||||
if len(children) == 0 {
|
||||
if node.slot == epochStart {
|
||||
return true, nil
|
||||
}
|
||||
if !features.Get().IgnoreUnviableAttestations {
|
||||
@@ -269,8 +246,7 @@ func (f *ForkChoice) IsViableForCheckpoint(cp *forkchoicetypes.Checkpoint) (bool
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
// If some child is after the start of the epoch, the checkpoint is viable.
|
||||
for _, child := range children {
|
||||
for _, child := range node.children {
|
||||
if child.slot > epochStart {
|
||||
return true, nil
|
||||
}
|
||||
@@ -311,7 +287,7 @@ func (f *ForkChoice) updateBalances() error {
|
||||
if vote.currentRoot != vote.nextRoot || oldBalance != newBalance {
|
||||
// Ignore the vote if the root is not in fork choice
|
||||
// store, that means we have not seen the block before.
|
||||
nextNode, ok := f.store.emptyNodeByRoot[vote.nextRoot]
|
||||
nextNode, ok := f.store.nodeByRoot[vote.nextRoot]
|
||||
if ok && vote.nextRoot != zHash {
|
||||
// Protection against nil node
|
||||
if nextNode == nil {
|
||||
@@ -320,7 +296,7 @@ func (f *ForkChoice) updateBalances() error {
|
||||
nextNode.balance += newBalance
|
||||
}
|
||||
|
||||
currentNode, ok := f.store.emptyNodeByRoot[vote.currentRoot]
|
||||
currentNode, ok := f.store.nodeByRoot[vote.currentRoot]
|
||||
if ok && vote.currentRoot != zHash {
|
||||
// Protection against nil node
|
||||
if currentNode == nil {
|
||||
@@ -361,13 +337,13 @@ func (f *ForkChoice) ProposerBoost() [fieldparams.RootLength]byte {
|
||||
return f.store.proposerBoost()
|
||||
}
|
||||
|
||||
// SetOptimisticToValid sets the node with the given root as a fully validated node. The payload for this root MUST have been processed.
|
||||
// SetOptimisticToValid sets the node with the given root as a fully validated node
|
||||
func (f *ForkChoice) SetOptimisticToValid(ctx context.Context, root [fieldparams.RootLength]byte) error {
|
||||
fn, ok := f.store.fullNodeByRoot[root]
|
||||
if !ok || fn == nil {
|
||||
node, ok := f.store.nodeByRoot[root]
|
||||
if !ok || node == nil {
|
||||
return errors.Wrap(ErrNilNode, "could not set node to valid")
|
||||
}
|
||||
return f.store.setNodeAndParentValidated(ctx, fn)
|
||||
return node.setNodeAndParentValidated(ctx)
|
||||
}
|
||||
|
||||
// PreviousJustifiedCheckpoint of fork choice store.
|
||||
@@ -386,8 +362,8 @@ func (f *ForkChoice) FinalizedCheckpoint() *forkchoicetypes.Checkpoint {
|
||||
}
|
||||
|
||||
// SetOptimisticToInvalid removes a block with an invalid execution payload from fork choice store
|
||||
func (f *ForkChoice) SetOptimisticToInvalid(ctx context.Context, root, parentRoot, parentHash, payloadHash [fieldparams.RootLength]byte) ([][32]byte, error) {
|
||||
return f.store.setOptimisticToInvalid(ctx, root, parentRoot, parentHash, payloadHash)
|
||||
func (f *ForkChoice) SetOptimisticToInvalid(ctx context.Context, root, parentRoot, payloadHash [fieldparams.RootLength]byte) ([][32]byte, error) {
|
||||
return f.store.setOptimisticToInvalid(ctx, root, parentRoot, payloadHash)
|
||||
}
|
||||
|
||||
// InsertSlashedIndex adds the given slashed validator index to the
|
||||
@@ -410,7 +386,7 @@ func (f *ForkChoice) InsertSlashedIndex(_ context.Context, index primitives.Vali
|
||||
return
|
||||
}
|
||||
|
||||
node, ok := f.store.emptyNodeByRoot[f.votes[index].currentRoot]
|
||||
node, ok := f.store.nodeByRoot[f.votes[index].currentRoot]
|
||||
if !ok || node == nil {
|
||||
return
|
||||
}
|
||||
@@ -445,30 +421,22 @@ func (f *ForkChoice) UpdateFinalizedCheckpoint(fc *forkchoicetypes.Checkpoint) e
|
||||
}
|
||||
|
||||
// CommonAncestor returns the common ancestor root and slot between the two block roots r1 and r2.
|
||||
// This is payload aware. Consider the following situation
|
||||
// [A,full] <--- [B, full] <---[C,pending]
|
||||
//
|
||||
// \---------[B, empty] <--[D, pending]
|
||||
//
|
||||
// Then even though C and D both descend from the beacon block B, their common ancestor is A.
|
||||
// Notice that also this function **requires** that the two roots are actually contending blocks! otherwise the
|
||||
// behavior is not defined.
|
||||
func (f *ForkChoice) CommonAncestor(ctx context.Context, r1 [32]byte, r2 [32]byte) ([32]byte, primitives.Slot, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.CommonAncestorRoot")
|
||||
defer span.End()
|
||||
|
||||
en1, ok := f.store.emptyNodeByRoot[r1]
|
||||
if !ok || en1 == nil {
|
||||
n1, ok := f.store.nodeByRoot[r1]
|
||||
if !ok || n1 == nil {
|
||||
return [32]byte{}, 0, forkchoice.ErrUnknownCommonAncestor
|
||||
}
|
||||
|
||||
// Do nothing if the input roots are the same.
|
||||
if r1 == r2 {
|
||||
return r1, en1.node.slot, nil
|
||||
return r1, n1.slot, nil
|
||||
}
|
||||
|
||||
en2, ok := f.store.emptyNodeByRoot[r2]
|
||||
if !ok || en2 == nil {
|
||||
n2, ok := f.store.nodeByRoot[r2]
|
||||
if !ok || n2 == nil {
|
||||
return [32]byte{}, 0, forkchoice.ErrUnknownCommonAncestor
|
||||
}
|
||||
|
||||
@@ -476,23 +444,23 @@ func (f *ForkChoice) CommonAncestor(ctx context.Context, r1 [32]byte, r2 [32]byt
|
||||
if ctx.Err() != nil {
|
||||
return [32]byte{}, 0, ctx.Err()
|
||||
}
|
||||
if en1.node.slot > en2.node.slot {
|
||||
en1 = en1.node.parent
|
||||
if n1.slot > n2.slot {
|
||||
n1 = n1.parent
|
||||
// Reaches the end of the tree and unable to find common ancestor.
|
||||
// This should not happen at runtime as the finalized
|
||||
// node has to be a common ancestor
|
||||
if en1 == nil {
|
||||
if n1 == nil {
|
||||
return [32]byte{}, 0, forkchoice.ErrUnknownCommonAncestor
|
||||
}
|
||||
} else {
|
||||
en2 = en2.node.parent
|
||||
n2 = n2.parent
|
||||
// Reaches the end of the tree and unable to find common ancestor.
|
||||
if en2 == nil {
|
||||
if n2 == nil {
|
||||
return [32]byte{}, 0, forkchoice.ErrUnknownCommonAncestor
|
||||
}
|
||||
}
|
||||
if en1 == en2 {
|
||||
return en1.node.root, en1.node.slot, nil
|
||||
if n1 == n2 {
|
||||
return n1.root, n1.slot, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -539,17 +507,35 @@ func (f *ForkChoice) CachedHeadRoot() [32]byte {
|
||||
|
||||
// FinalizedPayloadBlockHash returns the hash of the payload at the finalized checkpoint
|
||||
func (f *ForkChoice) FinalizedPayloadBlockHash() [32]byte {
|
||||
return f.store.latestHashForRoot(f.FinalizedCheckpoint().Root)
|
||||
root := f.FinalizedCheckpoint().Root
|
||||
node, ok := f.store.nodeByRoot[root]
|
||||
if !ok || node == nil {
|
||||
// This should not happen
|
||||
return [32]byte{}
|
||||
}
|
||||
return node.payloadHash
|
||||
}
|
||||
|
||||
// JustifiedPayloadBlockHash returns the hash of the payload at the justified checkpoint
|
||||
func (f *ForkChoice) JustifiedPayloadBlockHash() [32]byte {
|
||||
return f.store.latestHashForRoot(f.JustifiedCheckpoint().Root)
|
||||
root := f.JustifiedCheckpoint().Root
|
||||
node, ok := f.store.nodeByRoot[root]
|
||||
if !ok || node == nil {
|
||||
// This should not happen
|
||||
return [32]byte{}
|
||||
}
|
||||
return node.payloadHash
|
||||
}
|
||||
|
||||
// UnrealizedJustifiedPayloadBlockHash returns the hash of the payload at the unrealized justified checkpoint
|
||||
func (f *ForkChoice) UnrealizedJustifiedPayloadBlockHash() [32]byte {
|
||||
return f.store.latestHashForRoot(f.store.unrealizedJustifiedCheckpoint.Root)
|
||||
root := f.store.unrealizedJustifiedCheckpoint.Root
|
||||
node, ok := f.store.nodeByRoot[root]
|
||||
if !ok || node == nil {
|
||||
// This should not happen
|
||||
return [32]byte{}
|
||||
}
|
||||
return node.payloadHash
|
||||
}
|
||||
|
||||
// ForkChoiceDump returns a full dump of forkchoice.
|
||||
@@ -573,7 +559,7 @@ func (f *ForkChoice) ForkChoiceDump(ctx context.Context) (*forkchoice2.Dump, err
|
||||
nodes := make([]*forkchoice2.Node, 0, f.NodeCount())
|
||||
var err error
|
||||
if f.store.treeRootNode != nil {
|
||||
nodes, err = f.store.nodeTreeDump(ctx, f.store.treeRootNode, nodes)
|
||||
nodes, err = f.store.treeRootNode.nodeTreeDump(ctx, nodes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -602,7 +588,7 @@ func (f *ForkChoice) SetBalancesByRooter(handler forkchoice.BalancesByRooter) {
|
||||
|
||||
// Weight returns the weight of the given root if found on the store
|
||||
func (f *ForkChoice) Weight(root [32]byte) (uint64, error) {
|
||||
n, ok := f.store.emptyNodeByRoot[root]
|
||||
n, ok := f.store.nodeByRoot[root]
|
||||
if !ok || n == nil {
|
||||
return 0, ErrNilNode
|
||||
}
|
||||
@@ -630,11 +616,11 @@ func (f *ForkChoice) updateJustifiedBalances(ctx context.Context, root [32]byte)
|
||||
|
||||
// Slot returns the slot of the given root if it's known to forkchoice
|
||||
func (f *ForkChoice) Slot(root [32]byte) (primitives.Slot, error) {
|
||||
n, ok := f.store.emptyNodeByRoot[root]
|
||||
n, ok := f.store.nodeByRoot[root]
|
||||
if !ok || n == nil {
|
||||
return 0, ErrNilNode
|
||||
}
|
||||
return n.node.slot, nil
|
||||
return n.slot, nil
|
||||
}
|
||||
|
||||
// DependentRoot returns the last root of the epoch prior to the requested ecoch in the canonical chain.
|
||||
@@ -642,7 +628,7 @@ func (f *ForkChoice) DependentRoot(epoch primitives.Epoch) ([32]byte, error) {
|
||||
return f.DependentRootForEpoch(f.CachedHeadRoot(), epoch)
|
||||
}
|
||||
|
||||
// DependentRootForEpoch return the last root of the epoch prior to the requested epoch for the given root.
|
||||
// DependentRootForEpoch return the last root of the epoch prior to the requested ecoch for the given root.
|
||||
func (f *ForkChoice) DependentRootForEpoch(root [32]byte, epoch primitives.Epoch) ([32]byte, error) {
|
||||
tr, err := f.TargetRootForEpoch(root, epoch)
|
||||
if err != nil {
|
||||
@@ -651,18 +637,18 @@ func (f *ForkChoice) DependentRootForEpoch(root [32]byte, epoch primitives.Epoch
|
||||
if tr == [32]byte{} {
|
||||
return [32]byte{}, nil
|
||||
}
|
||||
en, ok := f.store.emptyNodeByRoot[tr]
|
||||
if !ok || en == nil {
|
||||
node, ok := f.store.nodeByRoot[tr]
|
||||
if !ok || node == nil {
|
||||
return [32]byte{}, ErrNilNode
|
||||
}
|
||||
if slots.ToEpoch(en.node.slot) >= epoch {
|
||||
if en.node.parent != nil {
|
||||
en = en.node.parent
|
||||
if slots.ToEpoch(node.slot) >= epoch {
|
||||
if node.parent != nil {
|
||||
node = node.parent
|
||||
} else {
|
||||
return f.store.finalizedDependentRoot, nil
|
||||
}
|
||||
}
|
||||
return en.node.root, nil
|
||||
return node.root, nil
|
||||
}
|
||||
|
||||
// TargetRootForEpoch returns the root of the target block for a given epoch.
|
||||
@@ -674,48 +660,46 @@ func (f *ForkChoice) DependentRootForEpoch(root [32]byte, epoch primitives.Epoch
|
||||
// which case we return the root of the checkpoint of the chain containing the
|
||||
// passed root, at the given epoch
|
||||
func (f *ForkChoice) TargetRootForEpoch(root [32]byte, epoch primitives.Epoch) ([32]byte, error) {
|
||||
n, ok := f.store.emptyNodeByRoot[root]
|
||||
n, ok := f.store.nodeByRoot[root]
|
||||
if !ok || n == nil {
|
||||
return [32]byte{}, ErrNilNode
|
||||
}
|
||||
node := n.node
|
||||
nodeEpoch := slots.ToEpoch(node.slot)
|
||||
nodeEpoch := slots.ToEpoch(n.slot)
|
||||
if epoch > nodeEpoch {
|
||||
return node.root, nil
|
||||
return n.root, nil
|
||||
}
|
||||
if node.target == nil {
|
||||
if n.target == nil {
|
||||
return [32]byte{}, nil
|
||||
}
|
||||
targetRoot := node.target.root
|
||||
targetRoot := n.target.root
|
||||
if epoch == nodeEpoch {
|
||||
return targetRoot, nil
|
||||
}
|
||||
targetNode, ok := f.store.emptyNodeByRoot[targetRoot]
|
||||
targetNode, ok := f.store.nodeByRoot[targetRoot]
|
||||
if !ok || targetNode == nil {
|
||||
return [32]byte{}, ErrNilNode
|
||||
}
|
||||
// If slot 0 was not missed we consider a previous block to go back at least one epoch
|
||||
if nodeEpoch == slots.ToEpoch(targetNode.node.slot) {
|
||||
targetNode = targetNode.node.parent
|
||||
if nodeEpoch == slots.ToEpoch(targetNode.slot) {
|
||||
targetNode = targetNode.parent
|
||||
if targetNode == nil {
|
||||
return [32]byte{}, ErrNilNode
|
||||
}
|
||||
}
|
||||
return f.TargetRootForEpoch(targetNode.node.root, epoch)
|
||||
return f.TargetRootForEpoch(targetNode.root, epoch)
|
||||
}
|
||||
|
||||
// ParentRoot returns the block root of the parent node if it is in forkchoice.
|
||||
// The exception is for the finalized checkpoint root which we return the zero
|
||||
// hash.
|
||||
func (f *ForkChoice) ParentRoot(root [32]byte) ([32]byte, error) {
|
||||
n, ok := f.store.emptyNodeByRoot[root]
|
||||
n, ok := f.store.nodeByRoot[root]
|
||||
if !ok || n == nil {
|
||||
return [32]byte{}, ErrNilNode
|
||||
}
|
||||
// Return the zero hash for the tree root
|
||||
parent := n.node.parent
|
||||
if parent == nil {
|
||||
if n.parent == nil {
|
||||
return [32]byte{}, nil
|
||||
}
|
||||
return parent.node.root, nil
|
||||
return n.parent.root, nil
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package doublylinkedtree
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/forkchoice"
|
||||
@@ -103,9 +104,9 @@ func TestForkChoice_UpdateBalancesPositiveChange(t *testing.T) {
|
||||
f.justifiedBalances = []uint64{10, 20, 30}
|
||||
require.NoError(t, f.updateBalances())
|
||||
s := f.store
|
||||
assert.Equal(t, uint64(10), s.emptyNodeByRoot[indexToHash(1)].balance)
|
||||
assert.Equal(t, uint64(20), s.emptyNodeByRoot[indexToHash(2)].balance)
|
||||
assert.Equal(t, uint64(30), s.emptyNodeByRoot[indexToHash(3)].balance)
|
||||
assert.Equal(t, uint64(10), s.nodeByRoot[indexToHash(1)].balance)
|
||||
assert.Equal(t, uint64(20), s.nodeByRoot[indexToHash(2)].balance)
|
||||
assert.Equal(t, uint64(30), s.nodeByRoot[indexToHash(3)].balance)
|
||||
}
|
||||
|
||||
func TestForkChoice_UpdateBalancesNegativeChange(t *testing.T) {
|
||||
@@ -121,9 +122,9 @@ func TestForkChoice_UpdateBalancesNegativeChange(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
s := f.store
|
||||
s.emptyNodeByRoot[indexToHash(1)].balance = 100
|
||||
s.emptyNodeByRoot[indexToHash(2)].balance = 100
|
||||
s.emptyNodeByRoot[indexToHash(3)].balance = 100
|
||||
s.nodeByRoot[indexToHash(1)].balance = 100
|
||||
s.nodeByRoot[indexToHash(2)].balance = 100
|
||||
s.nodeByRoot[indexToHash(3)].balance = 100
|
||||
|
||||
f.balances = []uint64{100, 100, 100}
|
||||
f.votes = []Vote{
|
||||
@@ -134,9 +135,9 @@ func TestForkChoice_UpdateBalancesNegativeChange(t *testing.T) {
|
||||
|
||||
f.justifiedBalances = []uint64{10, 20, 30}
|
||||
require.NoError(t, f.updateBalances())
|
||||
assert.Equal(t, uint64(10), s.emptyNodeByRoot[indexToHash(1)].balance)
|
||||
assert.Equal(t, uint64(20), s.emptyNodeByRoot[indexToHash(2)].balance)
|
||||
assert.Equal(t, uint64(30), s.emptyNodeByRoot[indexToHash(3)].balance)
|
||||
assert.Equal(t, uint64(10), s.nodeByRoot[indexToHash(1)].balance)
|
||||
assert.Equal(t, uint64(20), s.nodeByRoot[indexToHash(2)].balance)
|
||||
assert.Equal(t, uint64(30), s.nodeByRoot[indexToHash(3)].balance)
|
||||
}
|
||||
|
||||
func TestForkChoice_UpdateBalancesUnderflow(t *testing.T) {
|
||||
@@ -152,9 +153,9 @@ func TestForkChoice_UpdateBalancesUnderflow(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
s := f.store
|
||||
s.emptyNodeByRoot[indexToHash(1)].balance = 100
|
||||
s.emptyNodeByRoot[indexToHash(2)].balance = 100
|
||||
s.emptyNodeByRoot[indexToHash(3)].balance = 100
|
||||
s.nodeByRoot[indexToHash(1)].balance = 100
|
||||
s.nodeByRoot[indexToHash(2)].balance = 100
|
||||
s.nodeByRoot[indexToHash(3)].balance = 100
|
||||
|
||||
f.balances = []uint64{125, 125, 125}
|
||||
f.votes = []Vote{
|
||||
@@ -165,9 +166,9 @@ func TestForkChoice_UpdateBalancesUnderflow(t *testing.T) {
|
||||
|
||||
f.justifiedBalances = []uint64{10, 20, 30}
|
||||
require.NoError(t, f.updateBalances())
|
||||
assert.Equal(t, uint64(0), s.emptyNodeByRoot[indexToHash(1)].balance)
|
||||
assert.Equal(t, uint64(0), s.emptyNodeByRoot[indexToHash(2)].balance)
|
||||
assert.Equal(t, uint64(5), s.emptyNodeByRoot[indexToHash(3)].balance)
|
||||
assert.Equal(t, uint64(0), s.nodeByRoot[indexToHash(1)].balance)
|
||||
assert.Equal(t, uint64(0), s.nodeByRoot[indexToHash(2)].balance)
|
||||
assert.Equal(t, uint64(5), s.nodeByRoot[indexToHash(3)].balance)
|
||||
}
|
||||
|
||||
func TestForkChoice_IsCanonical(t *testing.T) {
|
||||
@@ -223,12 +224,12 @@ func TestForkChoice_IsCanonicalReorg(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
|
||||
f.store.emptyNodeByRoot[[32]byte{'3'}].balance = 10
|
||||
require.NoError(t, f.store.applyWeightChangesConsensusNode(ctx, f.store.treeRootNode))
|
||||
require.Equal(t, uint64(10), f.store.emptyNodeByRoot[[32]byte{'1'}].node.weight)
|
||||
require.Equal(t, uint64(0), f.store.emptyNodeByRoot[[32]byte{'2'}].node.weight)
|
||||
f.store.nodeByRoot[[32]byte{'3'}].balance = 10
|
||||
require.NoError(t, f.store.treeRootNode.applyWeightChanges(ctx))
|
||||
require.Equal(t, uint64(10), f.store.nodeByRoot[[32]byte{'1'}].weight)
|
||||
require.Equal(t, uint64(0), f.store.nodeByRoot[[32]byte{'2'}].weight)
|
||||
|
||||
require.NoError(t, f.store.updateBestDescendantConsensusNode(ctx, f.store.treeRootNode, 1, 1, 1))
|
||||
require.NoError(t, f.store.treeRootNode.updateBestDescendant(ctx, 1, 1, 1))
|
||||
require.DeepEqual(t, [32]byte{'3'}, f.store.treeRootNode.bestDescendant.root)
|
||||
|
||||
r1 := [32]byte{'1'}
|
||||
@@ -259,7 +260,7 @@ func TestForkChoice_AncestorRoot(t *testing.T) {
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 5, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
f.store.treeRootNode = f.store.emptyNodeByRoot[indexToHash(1)].node
|
||||
f.store.treeRootNode = f.store.nodeByRoot[indexToHash(1)]
|
||||
f.store.treeRootNode.parent = nil
|
||||
|
||||
r, err := f.AncestorRoot(ctx, indexToHash(3), 6)
|
||||
@@ -341,21 +342,21 @@ func TestForkChoice_RemoveEquivocating(t *testing.T) {
|
||||
|
||||
// Process b's slashing, c is now head
|
||||
f.InsertSlashedIndex(ctx, 1)
|
||||
require.Equal(t, uint64(200), f.store.emptyNodeByRoot[[32]byte{'b'}].balance)
|
||||
require.Equal(t, uint64(200), f.store.nodeByRoot[[32]byte{'b'}].balance)
|
||||
f.justifiedBalances = []uint64{100, 200, 200, 300}
|
||||
head, err = f.Head(ctx)
|
||||
require.Equal(t, uint64(200), f.store.emptyNodeByRoot[[32]byte{'b'}].weight)
|
||||
require.Equal(t, uint64(300), f.store.emptyNodeByRoot[[32]byte{'c'}].weight)
|
||||
require.Equal(t, uint64(200), f.store.nodeByRoot[[32]byte{'b'}].weight)
|
||||
require.Equal(t, uint64(300), f.store.nodeByRoot[[32]byte{'c'}].weight)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [32]byte{'c'}, head)
|
||||
|
||||
// Process b's slashing again, should be a noop
|
||||
f.InsertSlashedIndex(ctx, 1)
|
||||
require.Equal(t, uint64(200), f.store.emptyNodeByRoot[[32]byte{'b'}].balance)
|
||||
require.Equal(t, uint64(200), f.store.nodeByRoot[[32]byte{'b'}].balance)
|
||||
f.justifiedBalances = []uint64{100, 200, 200, 300}
|
||||
head, err = f.Head(ctx)
|
||||
require.Equal(t, uint64(200), f.store.emptyNodeByRoot[[32]byte{'b'}].weight)
|
||||
require.Equal(t, uint64(300), f.store.emptyNodeByRoot[[32]byte{'c'}].weight)
|
||||
require.Equal(t, uint64(200), f.store.nodeByRoot[[32]byte{'b'}].weight)
|
||||
require.Equal(t, uint64(300), f.store.nodeByRoot[[32]byte{'c'}].weight)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [32]byte{'c'}, head)
|
||||
|
||||
@@ -513,6 +514,58 @@ func TestStore_CommonAncestor(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
// a -- b -- c -- d
|
||||
f = setup(0, 0)
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 0, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 1, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 2, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 3, [32]byte{'d'}, [32]byte{'c'}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
tests = []struct {
|
||||
name string
|
||||
r1 [32]byte
|
||||
r2 [32]byte
|
||||
wantRoot [32]byte
|
||||
wantSlot primitives.Slot
|
||||
}{
|
||||
{
|
||||
name: "Common ancestor between a and b is a",
|
||||
r1: [32]byte{'a'},
|
||||
r2: [32]byte{'b'},
|
||||
wantRoot: [32]byte{'a'},
|
||||
wantSlot: 0,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between b and d is b",
|
||||
r1: [32]byte{'d'},
|
||||
r2: [32]byte{'b'},
|
||||
wantRoot: [32]byte{'b'},
|
||||
wantSlot: 1,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between d and a is a",
|
||||
r1: [32]byte{'d'},
|
||||
r2: [32]byte{'a'},
|
||||
wantRoot: [32]byte{'a'},
|
||||
wantSlot: 0,
|
||||
},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
gotRoot, gotSlot, err := f.CommonAncestor(ctx, tc.r1, tc.r2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.wantRoot, gotRoot)
|
||||
require.Equal(t, tc.wantSlot, gotSlot)
|
||||
})
|
||||
}
|
||||
|
||||
// Equal inputs should return the same root.
|
||||
r, s, err := f.CommonAncestor(ctx, [32]byte{'b'}, [32]byte{'b'})
|
||||
require.NoError(t, err)
|
||||
@@ -535,9 +588,10 @@ func TestStore_CommonAncestor(t *testing.T) {
|
||||
unrealizedJustifiedEpoch: 1,
|
||||
finalizedEpoch: 1,
|
||||
unrealizedFinalizedEpoch: 1,
|
||||
optimistic: true,
|
||||
}
|
||||
|
||||
f.store.emptyNodeByRoot[[32]byte{'y'}] = &PayloadNode{node: n, optimistic: true}
|
||||
f.store.nodeByRoot[[32]byte{'y'}] = n
|
||||
// broken link
|
||||
_, _, err = f.CommonAncestor(ctx, [32]byte{'y'}, [32]byte{'a'})
|
||||
require.ErrorIs(t, err, forkchoice.ErrUnknownCommonAncestor)
|
||||
@@ -556,8 +610,7 @@ func TestStore_InsertChain(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
roblock, err := blocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
blks = append(blks, &forkchoicetypes.BlockAndCheckpoints{
|
||||
Block: roblock,
|
||||
blks = append(blks, &forkchoicetypes.BlockAndCheckpoints{Block: roblock,
|
||||
JustifiedCheckpoint: ðpb.Checkpoint{Epoch: 1, Root: params.BeaconConfig().ZeroHash[:]},
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{Epoch: 1, Root: params.BeaconConfig().ZeroHash[:]},
|
||||
})
|
||||
@@ -572,8 +625,7 @@ func TestStore_InsertChain(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
roblock, err := blocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
blks = append(blks, &forkchoicetypes.BlockAndCheckpoints{
|
||||
Block: roblock,
|
||||
blks = append(blks, &forkchoicetypes.BlockAndCheckpoints{Block: roblock,
|
||||
JustifiedCheckpoint: ðpb.Checkpoint{Epoch: 1, Root: params.BeaconConfig().ZeroHash[:]},
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{Epoch: 1, Root: params.BeaconConfig().ZeroHash[:]},
|
||||
})
|
||||
@@ -690,7 +742,7 @@ func TestWeight(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
|
||||
n, ok := f.store.emptyNodeByRoot[root]
|
||||
n, ok := f.store.nodeByRoot[root]
|
||||
require.Equal(t, true, ok)
|
||||
n.weight = 10
|
||||
w, err := f.Weight(root)
|
||||
@@ -862,3 +914,16 @@ func TestForkchoiceParentRoot(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, zeroHash, root)
|
||||
}
|
||||
|
||||
func TestForkChoice_CleanupInserting(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
ctx := t.Context()
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 2, 2)
|
||||
f.SetBalancesByRooter(func(_ context.Context, _ [32]byte) ([]uint64, error) {
|
||||
return f.justifiedBalances, errors.New("mock err")
|
||||
})
|
||||
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, f.InsertNode(ctx, st, roblock))
|
||||
require.Equal(t, false, f.HasNode(roblock.Root()))
|
||||
}
|
||||
|
||||
@@ -1,327 +0,0 @@
|
||||
package doublylinkedtree
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
forkchoice2 "github.com/OffchainLabs/prysm/v7/consensus-types/forkchoice"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func (s *Store) resolveParentPayloadStatus(block interfaces.ReadOnlyBeaconBlock, parent **PayloadNode, blockHash *[32]byte) error {
|
||||
sb, err := block.Body().SignedExecutionPayloadBid()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
wb, err := blocks.WrappedROSignedExecutionPayloadBid(sb)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to wrap signed bid")
|
||||
}
|
||||
bid, err := wb.Bid()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get bid from wrapped bid")
|
||||
}
|
||||
*blockHash = bid.BlockHash()
|
||||
parentRoot := block.ParentRoot()
|
||||
*parent = s.emptyNodeByRoot[parentRoot]
|
||||
if *parent == nil {
|
||||
// This is the tree root node.
|
||||
return nil
|
||||
}
|
||||
if bid.ParentBlockHash() == (*parent).node.blockHash {
|
||||
// block builds on full
|
||||
*parent = s.fullNodeByRoot[(*parent).node.root]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// applyWeightChangesConsensusNode recomputes the weight of the node passed as an argument and all of its descendants,
|
||||
// using the current balance stored in each node.
|
||||
func (s *Store) applyWeightChangesConsensusNode(ctx context.Context, n *Node) error {
|
||||
// Recursively calling the children to sum their weights.
|
||||
en := s.emptyNodeByRoot[n.root]
|
||||
if err := s.applyWeightChangesPayloadNode(ctx, en); err != nil {
|
||||
return err
|
||||
}
|
||||
childrenWeight := en.weight
|
||||
fn := s.fullNodeByRoot[n.root]
|
||||
if fn != nil {
|
||||
if err := s.applyWeightChangesPayloadNode(ctx, fn); err != nil {
|
||||
return err
|
||||
}
|
||||
childrenWeight += fn.weight
|
||||
}
|
||||
if n.root == params.BeaconConfig().ZeroHash {
|
||||
return nil
|
||||
}
|
||||
n.weight = n.balance + childrenWeight
|
||||
return nil
|
||||
}
|
||||
|
||||
// applyWeightChangesPayloadNode recomputes the weight of the node passed as an argument and all of its descendants,
|
||||
// using the current balance stored in each node.
|
||||
func (s *Store) applyWeightChangesPayloadNode(ctx context.Context, n *PayloadNode) error {
|
||||
// Recursively calling the children to sum their weights.
|
||||
childrenWeight := uint64(0)
|
||||
for _, child := range n.children {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
if err := s.applyWeightChangesConsensusNode(ctx, child); err != nil {
|
||||
return err
|
||||
}
|
||||
childrenWeight += child.weight
|
||||
}
|
||||
n.weight = n.balance + childrenWeight
|
||||
return nil
|
||||
}
|
||||
|
||||
// allConsensusChildren returns the list of all consensus blocks that build on the given node.
|
||||
func (s *Store) allConsensusChildren(n *Node) []*Node {
|
||||
en := s.emptyNodeByRoot[n.root]
|
||||
fn, ok := s.fullNodeByRoot[n.root]
|
||||
if ok {
|
||||
return append(slices.Clone(en.children), fn.children...)
|
||||
}
|
||||
return en.children
|
||||
}
|
||||
|
||||
// setNodeAndParentValidated sets the current node and all the ancestors as validated (i.e. non-optimistic).
|
||||
func (s *Store) setNodeAndParentValidated(ctx context.Context, pn *PayloadNode) error {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
if !pn.optimistic {
|
||||
return nil
|
||||
}
|
||||
pn.optimistic = false
|
||||
if pn.full {
|
||||
// set the empty node also a as valid
|
||||
en := s.emptyNodeByRoot[pn.node.root]
|
||||
en.optimistic = false
|
||||
}
|
||||
if pn.node.parent == nil {
|
||||
return nil
|
||||
}
|
||||
return s.setNodeAndParentValidated(ctx, pn.node.parent)
|
||||
}
|
||||
|
||||
// fullAncestor returns the highest ancestor with a full payload that a block with the
|
||||
// given root has. If there is a payload for the past root, then it will return that full
|
||||
// node. Otherwise it will use the full parent actually being an ancestor of the given root
|
||||
func (s *Store) fullAncestor(root [32]byte) *PayloadNode {
|
||||
fn, ok := s.fullNodeByRoot[root]
|
||||
if ok {
|
||||
return fn
|
||||
}
|
||||
en := s.emptyNodeByRoot[root]
|
||||
if en == nil {
|
||||
return nil
|
||||
}
|
||||
return s.fullParent(en)
|
||||
}
|
||||
|
||||
// fullParent returns the latest full node that this block builds on.
|
||||
func (s *Store) fullParent(pn *PayloadNode) *PayloadNode {
|
||||
parent := pn.node.parent
|
||||
for ; parent != nil && !parent.full; parent = parent.node.parent {
|
||||
}
|
||||
return parent
|
||||
}
|
||||
|
||||
// parentHash return the payload hash of the latest full node that this block builds on.
|
||||
func (s *Store) parentHash(pn *PayloadNode) [32]byte {
|
||||
fullParent := s.fullParent(pn)
|
||||
if fullParent == nil {
|
||||
return [32]byte{}
|
||||
}
|
||||
return fullParent.node.blockHash
|
||||
}
|
||||
|
||||
// latestHashForRoot returns the latest payload hash for the given block root.
|
||||
func (s *Store) latestHashForRoot(root [32]byte) [32]byte {
|
||||
// try to get the full node first
|
||||
fn, ok := s.fullNodeByRoot[root]
|
||||
if ok && fn != nil {
|
||||
return fn.node.blockHash
|
||||
}
|
||||
en := s.emptyNodeByRoot[root]
|
||||
if !ok || en == nil {
|
||||
// This should not happen
|
||||
return [32]byte{}
|
||||
}
|
||||
return s.parentHash(en)
|
||||
}
|
||||
|
||||
// updateBestDescendantPayloadNode updates the best descendant of this node and its
|
||||
// children.
|
||||
func (s *Store) updateBestDescendantPayloadNode(ctx context.Context, n *PayloadNode, justifiedEpoch, finalizedEpoch, currentEpoch primitives.Epoch) error {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
var bestChild *Node
|
||||
bestWeight := uint64(0)
|
||||
for _, child := range n.children {
|
||||
if child == nil {
|
||||
return errors.Wrap(ErrNilNode, "could not update best descendant")
|
||||
}
|
||||
if err := s.updateBestDescendantConsensusNode(ctx, child, justifiedEpoch, finalizedEpoch, currentEpoch); err != nil {
|
||||
return err
|
||||
}
|
||||
childLeadsToViableHead := child.leadsToViableHead(justifiedEpoch, currentEpoch)
|
||||
if childLeadsToViableHead && bestChild == nil {
|
||||
// The child leads to a viable head, but the current
|
||||
// parent's best child doesn't.
|
||||
bestWeight = child.weight
|
||||
bestChild = child
|
||||
} else if childLeadsToViableHead {
|
||||
// If both are viable, compare their weights.
|
||||
if child.weight == bestWeight {
|
||||
// Tie-breaker of equal weights by root.
|
||||
if bytes.Compare(child.root[:], bestChild.root[:]) > 0 {
|
||||
bestChild = child
|
||||
}
|
||||
} else if child.weight > bestWeight {
|
||||
bestChild = child
|
||||
bestWeight = child.weight
|
||||
}
|
||||
}
|
||||
}
|
||||
if bestChild == nil {
|
||||
n.bestDescendant = nil
|
||||
} else {
|
||||
if bestChild.bestDescendant == nil {
|
||||
n.bestDescendant = bestChild
|
||||
} else {
|
||||
n.bestDescendant = bestChild.bestDescendant
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateBestDescendantConsensusNode updates the best descendant of this node and its
|
||||
// children.
|
||||
func (s *Store) updateBestDescendantConsensusNode(ctx context.Context, n *Node, justifiedEpoch, finalizedEpoch, currentEpoch primitives.Epoch) error {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
if len(s.allConsensusChildren(n)) == 0 {
|
||||
n.bestDescendant = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
en := s.emptyNodeByRoot[n.root]
|
||||
if err := s.updateBestDescendantPayloadNode(ctx, en, justifiedEpoch, finalizedEpoch, currentEpoch); err != nil {
|
||||
return err
|
||||
}
|
||||
fn := s.fullNodeByRoot[n.root]
|
||||
if fn == nil {
|
||||
n.bestDescendant = en.bestDescendant
|
||||
return nil
|
||||
}
|
||||
// TODO GLOAS: pick between full or empty
|
||||
if err := s.updateBestDescendantPayloadNode(ctx, fn, justifiedEpoch, finalizedEpoch, currentEpoch); err != nil {
|
||||
return err
|
||||
}
|
||||
n.bestDescendant = fn.bestDescendant
|
||||
return nil
|
||||
}
|
||||
|
||||
// choosePayloadContent chooses between empty or full for the passed consensus node. TODO Gloas: use PTC to choose.
|
||||
func (s *Store) choosePayloadContent(n *Node) *PayloadNode {
|
||||
if n == nil {
|
||||
return nil
|
||||
}
|
||||
fn := s.fullNodeByRoot[n.root]
|
||||
if fn != nil {
|
||||
return fn
|
||||
}
|
||||
return s.emptyNodeByRoot[n.root]
|
||||
}
|
||||
|
||||
// nodeTreeDump appends to the given list all the nodes descending from this one
|
||||
func (s *Store) nodeTreeDump(ctx context.Context, n *Node, nodes []*forkchoice2.Node) ([]*forkchoice2.Node, error) {
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
var parentRoot [32]byte
|
||||
if n.parent != nil {
|
||||
parentRoot = n.parent.node.root
|
||||
}
|
||||
target := [32]byte{}
|
||||
if n.target != nil {
|
||||
target = n.target.root
|
||||
}
|
||||
optimistic := false
|
||||
if n.parent != nil {
|
||||
optimistic = n.parent.optimistic
|
||||
}
|
||||
en := s.emptyNodeByRoot[n.root]
|
||||
timestamp := en.timestamp
|
||||
fn := s.fullNodeByRoot[n.root]
|
||||
if fn != nil {
|
||||
optimistic = fn.optimistic
|
||||
timestamp = fn.timestamp
|
||||
}
|
||||
thisNode := &forkchoice2.Node{
|
||||
Slot: n.slot,
|
||||
BlockRoot: n.root[:],
|
||||
ParentRoot: parentRoot[:],
|
||||
JustifiedEpoch: n.justifiedEpoch,
|
||||
FinalizedEpoch: n.finalizedEpoch,
|
||||
UnrealizedJustifiedEpoch: n.unrealizedJustifiedEpoch,
|
||||
UnrealizedFinalizedEpoch: n.unrealizedFinalizedEpoch,
|
||||
Balance: n.balance,
|
||||
Weight: n.weight,
|
||||
ExecutionOptimistic: optimistic,
|
||||
ExecutionBlockHash: n.blockHash[:],
|
||||
Timestamp: timestamp,
|
||||
Target: target[:],
|
||||
}
|
||||
if optimistic {
|
||||
thisNode.Validity = forkchoice2.Optimistic
|
||||
} else {
|
||||
thisNode.Validity = forkchoice2.Valid
|
||||
}
|
||||
|
||||
nodes = append(nodes, thisNode)
|
||||
var err error
|
||||
children := s.allConsensusChildren(n)
|
||||
for _, child := range children {
|
||||
nodes, err = s.nodeTreeDump(ctx, child, nodes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
func (f *ForkChoice) InsertPayload(ctx context.Context, pe interfaces.ROExecutionPayloadEnvelope) error {
|
||||
s := f.store
|
||||
root := pe.BeaconBlockRoot()
|
||||
en := s.emptyNodeByRoot[root]
|
||||
if en == nil {
|
||||
return errors.Wrap(ErrNilNode, "cannot insert full node without an empty one")
|
||||
}
|
||||
if _, ok := s.fullNodeByRoot[root]; ok {
|
||||
// We don't import two payloads for the same root
|
||||
return nil
|
||||
}
|
||||
fn := &PayloadNode{
|
||||
node: en.node,
|
||||
optimistic: true,
|
||||
timestamp: time.Now(),
|
||||
full: true,
|
||||
children: make([]*Node, 0),
|
||||
}
|
||||
s.fullNodeByRoot[root] = fn
|
||||
return nil
|
||||
}
|
||||
@@ -1,300 +0,0 @@
|
||||
package doublylinkedtree
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
)
|
||||
|
||||
func prepareGloasForkchoiceState(
|
||||
_ context.Context,
|
||||
slot primitives.Slot,
|
||||
blockRoot [32]byte,
|
||||
parentRoot [32]byte,
|
||||
blockHash [32]byte,
|
||||
parentBlockHash [32]byte,
|
||||
justifiedEpoch primitives.Epoch,
|
||||
finalizedEpoch primitives.Epoch,
|
||||
) (state.BeaconState, blocks.ROBlock, error) {
|
||||
blockHeader := ðpb.BeaconBlockHeader{
|
||||
ParentRoot: parentRoot[:],
|
||||
}
|
||||
|
||||
justifiedCheckpoint := ðpb.Checkpoint{
|
||||
Epoch: justifiedEpoch,
|
||||
}
|
||||
|
||||
finalizedCheckpoint := ðpb.Checkpoint{
|
||||
Epoch: finalizedEpoch,
|
||||
}
|
||||
|
||||
builderPendingPayments := make([]*ethpb.BuilderPendingPayment, 64)
|
||||
for i := range builderPendingPayments {
|
||||
builderPendingPayments[i] = ðpb.BuilderPendingPayment{
|
||||
Withdrawal: ðpb.BuilderPendingWithdrawal{
|
||||
FeeRecipient: make([]byte, 20),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
base := ðpb.BeaconStateGloas{
|
||||
Slot: slot,
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
CurrentJustifiedCheckpoint: justifiedCheckpoint,
|
||||
FinalizedCheckpoint: finalizedCheckpoint,
|
||||
LatestBlockHeader: blockHeader,
|
||||
LatestExecutionPayloadBid: ðpb.ExecutionPayloadBid{
|
||||
BlockHash: blockHash[:],
|
||||
ParentBlockHash: parentBlockHash[:],
|
||||
ParentBlockRoot: make([]byte, 32),
|
||||
PrevRandao: make([]byte, 32),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
BlobKzgCommitments: [][]byte{make([]byte, 48)},
|
||||
},
|
||||
Builders: make([]*ethpb.Builder, 0),
|
||||
BuilderPendingPayments: builderPendingPayments,
|
||||
ExecutionPayloadAvailability: make([]byte, 1024),
|
||||
LatestBlockHash: make([]byte, 32),
|
||||
PayloadExpectedWithdrawals: make([]*enginev1.Withdrawal, 0),
|
||||
ProposerLookahead: make([]uint64, 64),
|
||||
}
|
||||
|
||||
st, err := state_native.InitializeFromProtoUnsafeGloas(base)
|
||||
if err != nil {
|
||||
return nil, blocks.ROBlock{}, err
|
||||
}
|
||||
|
||||
bid := util.HydrateSignedExecutionPayloadBid(ðpb.SignedExecutionPayloadBid{
|
||||
Message: ðpb.ExecutionPayloadBid{
|
||||
BlockHash: blockHash[:],
|
||||
ParentBlockHash: parentBlockHash[:],
|
||||
},
|
||||
})
|
||||
|
||||
blk := util.HydrateSignedBeaconBlockGloas(ðpb.SignedBeaconBlockGloas{
|
||||
Block: ðpb.BeaconBlockGloas{
|
||||
Slot: slot,
|
||||
ParentRoot: parentRoot[:],
|
||||
Body: ðpb.BeaconBlockBodyGloas{
|
||||
SignedExecutionPayloadBid: bid,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
signed, err := blocks.NewSignedBeaconBlock(blk)
|
||||
if err != nil {
|
||||
return nil, blocks.ROBlock{}, err
|
||||
}
|
||||
roblock, err := blocks.NewROBlockWithRoot(signed, blockRoot)
|
||||
return st, roblock, err
|
||||
}
|
||||
|
||||
func prepareGloasForkchoicePayload(
|
||||
blockRoot [32]byte,
|
||||
) (interfaces.ROExecutionPayloadEnvelope, error) {
|
||||
env := ðpb.ExecutionPayloadEnvelope{
|
||||
BeaconBlockRoot: blockRoot[:],
|
||||
Payload: &enginev1.ExecutionPayloadDeneb{},
|
||||
}
|
||||
return blocks.WrappedROExecutionPayloadEnvelope(env)
|
||||
}
|
||||
|
||||
func TestInsertGloasBlock_EmptyNodeOnly(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
ctx := t.Context()
|
||||
|
||||
root := indexToHash(1)
|
||||
blockHash := indexToHash(100)
|
||||
st, roblock, err := prepareGloasForkchoiceState(ctx, 1, root, params.BeaconConfig().ZeroHash, blockHash, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
|
||||
// Empty node should exist.
|
||||
en := f.store.emptyNodeByRoot[root]
|
||||
require.NotNil(t, en)
|
||||
|
||||
// Full node should NOT exist.
|
||||
_, hasFull := f.store.fullNodeByRoot[root]
|
||||
assert.Equal(t, false, hasFull)
|
||||
|
||||
// Parent should be the genesis full node.
|
||||
genesisRoot := params.BeaconConfig().ZeroHash
|
||||
genesisFull := f.store.fullNodeByRoot[genesisRoot]
|
||||
require.NotNil(t, genesisFull)
|
||||
assert.Equal(t, genesisFull, en.node.parent)
|
||||
}
|
||||
|
||||
func TestInsertPayload_CreatesFullNode(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
ctx := t.Context()
|
||||
|
||||
root := indexToHash(1)
|
||||
blockHash := indexToHash(100)
|
||||
st, roblock, err := prepareGloasForkchoiceState(ctx, 1, root, params.BeaconConfig().ZeroHash, blockHash, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
require.Equal(t, 2, len(f.store.emptyNodeByRoot))
|
||||
require.Equal(t, 1, len(f.store.fullNodeByRoot))
|
||||
|
||||
pe, err := prepareGloasForkchoicePayload(root)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertPayload(ctx, pe))
|
||||
require.Equal(t, 2, len(f.store.fullNodeByRoot))
|
||||
|
||||
fn := f.store.fullNodeByRoot[root]
|
||||
require.NotNil(t, fn)
|
||||
|
||||
en := f.store.emptyNodeByRoot[root]
|
||||
require.NotNil(t, en)
|
||||
|
||||
// Empty and full share the same *Node.
|
||||
assert.Equal(t, en.node, fn.node)
|
||||
assert.Equal(t, true, fn.optimistic)
|
||||
assert.Equal(t, true, fn.full)
|
||||
}
|
||||
|
||||
func TestInsertPayload_DuplicateIsNoop(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
ctx := t.Context()
|
||||
|
||||
root := indexToHash(1)
|
||||
blockHash := indexToHash(100)
|
||||
st, roblock, err := prepareGloasForkchoiceState(ctx, 1, root, params.BeaconConfig().ZeroHash, blockHash, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
|
||||
pe, err := prepareGloasForkchoicePayload(root)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertPayload(ctx, pe))
|
||||
require.Equal(t, 2, len(f.store.fullNodeByRoot))
|
||||
|
||||
fn := f.store.fullNodeByRoot[root]
|
||||
require.NotNil(t, fn)
|
||||
|
||||
// Insert again — should be a no-op.
|
||||
require.NoError(t, f.InsertPayload(ctx, pe))
|
||||
assert.Equal(t, fn, f.store.fullNodeByRoot[root])
|
||||
require.Equal(t, 2, len(f.store.fullNodeByRoot))
|
||||
}
|
||||
|
||||
func TestInsertPayload_WithoutEmptyNode_Errors(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
ctx := t.Context()
|
||||
|
||||
root := indexToHash(99)
|
||||
pe, err := prepareGloasForkchoicePayload(root)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = f.InsertPayload(ctx, pe)
|
||||
require.ErrorContains(t, ErrNilNode.Error(), err)
|
||||
}
|
||||
|
||||
func TestGloasBlock_ChildBuildsOnEmpty(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
ctx := t.Context()
|
||||
|
||||
// Insert Gloas block A (empty only).
|
||||
rootA := indexToHash(1)
|
||||
blockHashA := indexToHash(100)
|
||||
st, roblock, err := prepareGloasForkchoiceState(ctx, 1, rootA, params.BeaconConfig().ZeroHash, blockHashA, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
|
||||
// Insert Gloas block B as child of (A, empty)
|
||||
rootB := indexToHash(2)
|
||||
blockHashB := indexToHash(200)
|
||||
nonMatchingParentHash := indexToHash(999)
|
||||
st, roblock, err = prepareGloasForkchoiceState(ctx, 2, rootB, rootA, blockHashB, nonMatchingParentHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
|
||||
emptyA := f.store.emptyNodeByRoot[rootA]
|
||||
require.NotNil(t, emptyA)
|
||||
nodeB := f.store.emptyNodeByRoot[rootB]
|
||||
require.NotNil(t, nodeB)
|
||||
require.Equal(t, emptyA, nodeB.node.parent)
|
||||
}
|
||||
|
||||
func TestGloasBlock_ChildrenOfEmptyAndFull(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
ctx := t.Context()
|
||||
|
||||
// Insert Gloas block A (empty only).
|
||||
rootA := indexToHash(1)
|
||||
blockHashA := indexToHash(100)
|
||||
st, roblock, err := prepareGloasForkchoiceState(ctx, 1, rootA, params.BeaconConfig().ZeroHash, blockHashA, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
// Insert payload for A
|
||||
pe, err := prepareGloasForkchoicePayload(rootA)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertPayload(ctx, pe))
|
||||
|
||||
// Insert Gloas block B as child of (A, empty)
|
||||
rootB := indexToHash(2)
|
||||
blockHashB := indexToHash(200)
|
||||
nonMatchingParentHash := indexToHash(999)
|
||||
st, roblock, err = prepareGloasForkchoiceState(ctx, 2, rootB, rootA, blockHashB, nonMatchingParentHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
|
||||
// Insert Gloas block C as child of (A, full)
|
||||
rootC := indexToHash(3)
|
||||
blockHashC := indexToHash(201)
|
||||
st, roblock, err = prepareGloasForkchoiceState(ctx, 3, rootC, rootA, blockHashC, blockHashA, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
|
||||
emptyA := f.store.emptyNodeByRoot[rootA]
|
||||
require.NotNil(t, emptyA)
|
||||
nodeB := f.store.emptyNodeByRoot[rootB]
|
||||
require.NotNil(t, nodeB)
|
||||
require.Equal(t, emptyA, nodeB.node.parent)
|
||||
nodeC := f.store.emptyNodeByRoot[rootC]
|
||||
require.NotNil(t, nodeC)
|
||||
fullA := f.store.fullNodeByRoot[rootA]
|
||||
require.NotNil(t, fullA)
|
||||
require.Equal(t, fullA, nodeC.node.parent)
|
||||
}
|
||||
|
||||
func TestGloasBlock_ChildBuildsOnFull(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
ctx := t.Context()
|
||||
|
||||
// Insert Gloas block A (empty only).
|
||||
rootA := indexToHash(1)
|
||||
blockHashA := indexToHash(100)
|
||||
st, roblock, err := prepareGloasForkchoiceState(ctx, 1, rootA, params.BeaconConfig().ZeroHash, blockHashA, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
|
||||
// Insert payload for A → creates the full node.
|
||||
pe, err := prepareGloasForkchoicePayload(rootA)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertPayload(ctx, pe))
|
||||
|
||||
fullA := f.store.fullNodeByRoot[rootA]
|
||||
require.NotNil(t, fullA)
|
||||
|
||||
// Child for (A, full)
|
||||
rootB := indexToHash(2)
|
||||
blockHashB := indexToHash(200)
|
||||
st, roblock, err = prepareGloasForkchoiceState(ctx, 2, rootB, rootA, blockHashB, blockHashA, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
|
||||
nodeB := f.store.emptyNodeByRoot[rootB]
|
||||
require.NotNil(t, nodeB)
|
||||
assert.Equal(t, fullA, nodeB.node.parent)
|
||||
}
|
||||
@@ -1,17 +1,95 @@
|
||||
package doublylinkedtree
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
forkchoice2 "github.com/OffchainLabs/prysm/v7/consensus-types/forkchoice"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ProcessAttestationsThreshold is the amount of time after which we
|
||||
// process attestations for the current slot
|
||||
const ProcessAttestationsThreshold = 10 * time.Second
|
||||
|
||||
// applyWeightChanges recomputes the weight of the node passed as an argument and all of its descendants,
|
||||
// using the current balance stored in each node.
|
||||
func (n *Node) applyWeightChanges(ctx context.Context) error {
|
||||
// Recursively calling the children to sum their weights.
|
||||
childrenWeight := uint64(0)
|
||||
for _, child := range n.children {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
if err := child.applyWeightChanges(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
childrenWeight += child.weight
|
||||
}
|
||||
if n.root == params.BeaconConfig().ZeroHash {
|
||||
return nil
|
||||
}
|
||||
n.weight = n.balance + childrenWeight
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateBestDescendant updates the best descendant of this node and its
|
||||
// children.
|
||||
func (n *Node) updateBestDescendant(ctx context.Context, justifiedEpoch, finalizedEpoch, currentEpoch primitives.Epoch) error {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
if len(n.children) == 0 {
|
||||
n.bestDescendant = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
var bestChild *Node
|
||||
bestWeight := uint64(0)
|
||||
hasViableDescendant := false
|
||||
for _, child := range n.children {
|
||||
if child == nil {
|
||||
return errors.Wrap(ErrNilNode, "could not update best descendant")
|
||||
}
|
||||
if err := child.updateBestDescendant(ctx, justifiedEpoch, finalizedEpoch, currentEpoch); err != nil {
|
||||
return err
|
||||
}
|
||||
childLeadsToViableHead := child.leadsToViableHead(justifiedEpoch, currentEpoch)
|
||||
if childLeadsToViableHead && !hasViableDescendant {
|
||||
// The child leads to a viable head, but the current
|
||||
// parent's best child doesn't.
|
||||
bestWeight = child.weight
|
||||
bestChild = child
|
||||
hasViableDescendant = true
|
||||
} else if childLeadsToViableHead {
|
||||
// If both are viable, compare their weights.
|
||||
if child.weight == bestWeight {
|
||||
// Tie-breaker of equal weights by root.
|
||||
if bytes.Compare(child.root[:], bestChild.root[:]) > 0 {
|
||||
bestChild = child
|
||||
}
|
||||
} else if child.weight > bestWeight {
|
||||
bestChild = child
|
||||
bestWeight = child.weight
|
||||
}
|
||||
}
|
||||
}
|
||||
if hasViableDescendant {
|
||||
if bestChild.bestDescendant == nil {
|
||||
n.bestDescendant = bestChild
|
||||
} else {
|
||||
n.bestDescendant = bestChild.bestDescendant
|
||||
}
|
||||
} else {
|
||||
n.bestDescendant = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// viableForHead returns true if the node is viable to head.
|
||||
// Any node with different finalized or justified epoch than
|
||||
// the ones in fork choice store should not be viable to head.
|
||||
@@ -32,13 +110,30 @@ func (n *Node) leadsToViableHead(justifiedEpoch, currentEpoch primitives.Epoch)
|
||||
return n.bestDescendant.viableForHead(justifiedEpoch, currentEpoch)
|
||||
}
|
||||
|
||||
// setNodeAndParentValidated sets the current node and all the ancestors as validated (i.e. non-optimistic).
|
||||
func (n *Node) setNodeAndParentValidated(ctx context.Context) error {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
if !n.optimistic {
|
||||
return nil
|
||||
}
|
||||
n.optimistic = false
|
||||
|
||||
if n.parent == nil {
|
||||
return nil
|
||||
}
|
||||
return n.parent.setNodeAndParentValidated(ctx)
|
||||
}
|
||||
|
||||
// arrivedEarly returns whether this node was inserted before the first
|
||||
// threshold to orphan a block.
|
||||
// Note that genesisTime has seconds granularity, therefore we use a strict
|
||||
// inequality < here. For example a block that arrives 3.9999 seconds into the
|
||||
// slot will have secs = 3 below.
|
||||
func (n *PayloadNode) arrivedEarly(genesis time.Time) (bool, error) {
|
||||
sss, err := slots.SinceSlotStart(n.node.slot, genesis, n.timestamp.Truncate(time.Second)) // Truncate such that 3.9999 seconds will have a value of 3.
|
||||
func (n *Node) arrivedEarly(genesis time.Time) (bool, error) {
|
||||
sss, err := slots.SinceSlotStart(n.slot, genesis, n.timestamp.Truncate(time.Second)) // Truncate such that 3.9999 seconds will have a value of 3.
|
||||
votingWindow := params.BeaconConfig().SlotComponentDuration(params.BeaconConfig().AttestationDueBPS)
|
||||
return sss < votingWindow, err
|
||||
}
|
||||
@@ -48,7 +143,52 @@ func (n *PayloadNode) arrivedEarly(genesis time.Time) (bool, error) {
|
||||
// Note that genesisTime has seconds granularity, therefore we use an
|
||||
// inequality >= here. For example a block that arrives 10.00001 seconds into the
|
||||
// slot will have secs = 10 below.
|
||||
func (n *PayloadNode) arrivedAfterOrphanCheck(genesis time.Time) (bool, error) {
|
||||
secs, err := slots.SinceSlotStart(n.node.slot, genesis, n.timestamp.Truncate(time.Second)) // Truncate such that 10.00001 seconds will have a value of 10.
|
||||
func (n *Node) arrivedAfterOrphanCheck(genesis time.Time) (bool, error) {
|
||||
secs, err := slots.SinceSlotStart(n.slot, genesis, n.timestamp.Truncate(time.Second)) // Truncate such that 10.00001 seconds will have a value of 10.
|
||||
return secs >= ProcessAttestationsThreshold, err
|
||||
}
|
||||
|
||||
// nodeTreeDump appends to the given list all the nodes descending from this one
|
||||
func (n *Node) nodeTreeDump(ctx context.Context, nodes []*forkchoice2.Node) ([]*forkchoice2.Node, error) {
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
var parentRoot [32]byte
|
||||
if n.parent != nil {
|
||||
parentRoot = n.parent.root
|
||||
}
|
||||
target := [32]byte{}
|
||||
if n.target != nil {
|
||||
target = n.target.root
|
||||
}
|
||||
thisNode := &forkchoice2.Node{
|
||||
Slot: n.slot,
|
||||
BlockRoot: n.root[:],
|
||||
ParentRoot: parentRoot[:],
|
||||
JustifiedEpoch: n.justifiedEpoch,
|
||||
FinalizedEpoch: n.finalizedEpoch,
|
||||
UnrealizedJustifiedEpoch: n.unrealizedJustifiedEpoch,
|
||||
UnrealizedFinalizedEpoch: n.unrealizedFinalizedEpoch,
|
||||
Balance: n.balance,
|
||||
Weight: n.weight,
|
||||
ExecutionOptimistic: n.optimistic,
|
||||
ExecutionBlockHash: n.payloadHash[:],
|
||||
Timestamp: n.timestamp,
|
||||
Target: target[:],
|
||||
}
|
||||
if n.optimistic {
|
||||
thisNode.Validity = forkchoice2.Optimistic
|
||||
} else {
|
||||
thisNode.Validity = forkchoice2.Valid
|
||||
}
|
||||
|
||||
nodes = append(nodes, thisNode)
|
||||
var err error
|
||||
for _, child := range n.children {
|
||||
nodes, err = child.nodeTreeDump(ctx, nodes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
@@ -27,15 +27,15 @@ func TestNode_ApplyWeightChanges_PositiveChange(t *testing.T) {
|
||||
// The updated balances of each node is 100
|
||||
s := f.store
|
||||
|
||||
s.emptyNodeByRoot[indexToHash(1)].balance = 100
|
||||
s.emptyNodeByRoot[indexToHash(2)].balance = 100
|
||||
s.emptyNodeByRoot[indexToHash(3)].balance = 100
|
||||
s.nodeByRoot[indexToHash(1)].balance = 100
|
||||
s.nodeByRoot[indexToHash(2)].balance = 100
|
||||
s.nodeByRoot[indexToHash(3)].balance = 100
|
||||
|
||||
assert.NoError(t, s.applyWeightChangesConsensusNode(ctx, s.treeRootNode))
|
||||
assert.NoError(t, s.treeRootNode.applyWeightChanges(ctx))
|
||||
|
||||
assert.Equal(t, uint64(300), s.emptyNodeByRoot[indexToHash(1)].node.weight)
|
||||
assert.Equal(t, uint64(200), s.emptyNodeByRoot[indexToHash(2)].node.weight)
|
||||
assert.Equal(t, uint64(100), s.emptyNodeByRoot[indexToHash(3)].node.weight)
|
||||
assert.Equal(t, uint64(300), s.nodeByRoot[indexToHash(1)].weight)
|
||||
assert.Equal(t, uint64(200), s.nodeByRoot[indexToHash(2)].weight)
|
||||
assert.Equal(t, uint64(100), s.nodeByRoot[indexToHash(3)].weight)
|
||||
}
|
||||
|
||||
func TestNode_ApplyWeightChanges_NegativeChange(t *testing.T) {
|
||||
@@ -53,19 +53,19 @@ func TestNode_ApplyWeightChanges_NegativeChange(t *testing.T) {
|
||||
|
||||
// The updated balances of each node is 100
|
||||
s := f.store
|
||||
s.emptyNodeByRoot[indexToHash(1)].weight = 400
|
||||
s.emptyNodeByRoot[indexToHash(2)].weight = 400
|
||||
s.emptyNodeByRoot[indexToHash(3)].weight = 400
|
||||
s.nodeByRoot[indexToHash(1)].weight = 400
|
||||
s.nodeByRoot[indexToHash(2)].weight = 400
|
||||
s.nodeByRoot[indexToHash(3)].weight = 400
|
||||
|
||||
s.emptyNodeByRoot[indexToHash(1)].balance = 100
|
||||
s.emptyNodeByRoot[indexToHash(2)].balance = 100
|
||||
s.emptyNodeByRoot[indexToHash(3)].balance = 100
|
||||
s.nodeByRoot[indexToHash(1)].balance = 100
|
||||
s.nodeByRoot[indexToHash(2)].balance = 100
|
||||
s.nodeByRoot[indexToHash(3)].balance = 100
|
||||
|
||||
assert.NoError(t, s.applyWeightChangesConsensusNode(ctx, s.treeRootNode))
|
||||
assert.NoError(t, s.treeRootNode.applyWeightChanges(ctx))
|
||||
|
||||
assert.Equal(t, uint64(300), s.emptyNodeByRoot[indexToHash(1)].node.weight)
|
||||
assert.Equal(t, uint64(200), s.emptyNodeByRoot[indexToHash(2)].node.weight)
|
||||
assert.Equal(t, uint64(100), s.emptyNodeByRoot[indexToHash(3)].node.weight)
|
||||
assert.Equal(t, uint64(300), s.nodeByRoot[indexToHash(1)].weight)
|
||||
assert.Equal(t, uint64(200), s.nodeByRoot[indexToHash(2)].weight)
|
||||
assert.Equal(t, uint64(100), s.nodeByRoot[indexToHash(3)].weight)
|
||||
}
|
||||
|
||||
func TestNode_UpdateBestDescendant_NonViableChild(t *testing.T) {
|
||||
@@ -78,7 +78,7 @@ func TestNode_UpdateBestDescendant_NonViableChild(t *testing.T) {
|
||||
|
||||
// Verify parent's best child and best descendant are `none`.
|
||||
s := f.store
|
||||
assert.Equal(t, 1, len(s.allConsensusChildren(s.treeRootNode)))
|
||||
assert.Equal(t, 1, len(s.treeRootNode.children))
|
||||
nilBestDescendant := s.treeRootNode.bestDescendant == nil
|
||||
assert.Equal(t, true, nilBestDescendant)
|
||||
}
|
||||
@@ -92,9 +92,8 @@ func TestNode_UpdateBestDescendant_ViableChild(t *testing.T) {
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
|
||||
s := f.store
|
||||
children := s.allConsensusChildren(s.treeRootNode)
|
||||
assert.Equal(t, 1, len(children))
|
||||
assert.Equal(t, children[0], s.treeRootNode.bestDescendant)
|
||||
assert.Equal(t, 1, len(s.treeRootNode.children))
|
||||
assert.Equal(t, s.treeRootNode.children[0], s.treeRootNode.bestDescendant)
|
||||
}
|
||||
|
||||
func TestNode_UpdateBestDescendant_HigherWeightChild(t *testing.T) {
|
||||
@@ -109,34 +108,32 @@ func TestNode_UpdateBestDescendant_HigherWeightChild(t *testing.T) {
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
|
||||
s := f.store
|
||||
s.emptyNodeByRoot[indexToHash(1)].weight = 100
|
||||
s.emptyNodeByRoot[indexToHash(2)].weight = 200
|
||||
assert.NoError(t, s.updateBestDescendantConsensusNode(ctx, s.treeRootNode, 1, 1, 1))
|
||||
s.nodeByRoot[indexToHash(1)].weight = 100
|
||||
s.nodeByRoot[indexToHash(2)].weight = 200
|
||||
assert.NoError(t, s.treeRootNode.updateBestDescendant(ctx, 1, 1, 1))
|
||||
|
||||
children := s.allConsensusChildren(s.treeRootNode)
|
||||
assert.Equal(t, 2, len(children))
|
||||
assert.Equal(t, children[1], s.treeRootNode.bestDescendant)
|
||||
assert.Equal(t, 2, len(s.treeRootNode.children))
|
||||
assert.Equal(t, s.treeRootNode.children[1], s.treeRootNode.bestDescendant)
|
||||
}
|
||||
|
||||
func TestNode_UpdateBestDescendant_LowerWeightChild(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := t.Context()
|
||||
// Input child is the best descendant
|
||||
state, blk, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, indexToHash(101), 1, 1)
|
||||
state, blk, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
state, blk, err = prepareForkchoiceState(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, indexToHash(102), 1, 1)
|
||||
state, blk, err = prepareForkchoiceState(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
|
||||
s := f.store
|
||||
s.emptyNodeByRoot[indexToHash(1)].node.weight = 200
|
||||
s.emptyNodeByRoot[indexToHash(2)].node.weight = 100
|
||||
assert.NoError(t, s.updateBestDescendantConsensusNode(ctx, s.treeRootNode, 1, 1, 1))
|
||||
s.nodeByRoot[indexToHash(1)].weight = 200
|
||||
s.nodeByRoot[indexToHash(2)].weight = 100
|
||||
assert.NoError(t, s.treeRootNode.updateBestDescendant(ctx, 1, 1, 1))
|
||||
|
||||
children := s.allConsensusChildren(s.treeRootNode)
|
||||
assert.Equal(t, 2, len(children))
|
||||
assert.Equal(t, children[0], s.treeRootNode.bestDescendant)
|
||||
assert.Equal(t, 2, len(s.treeRootNode.children))
|
||||
assert.Equal(t, s.treeRootNode.children[0], s.treeRootNode.bestDescendant)
|
||||
}
|
||||
|
||||
func TestNode_ViableForHead(t *testing.T) {
|
||||
@@ -179,44 +176,44 @@ func TestNode_LeadsToViableHead(t *testing.T) {
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
|
||||
require.Equal(t, true, f.store.treeRootNode.leadsToViableHead(4, 5))
|
||||
require.Equal(t, true, f.store.emptyNodeByRoot[indexToHash(5)].node.leadsToViableHead(4, 5))
|
||||
require.Equal(t, false, f.store.emptyNodeByRoot[indexToHash(2)].node.leadsToViableHead(4, 5))
|
||||
require.Equal(t, false, f.store.emptyNodeByRoot[indexToHash(4)].node.leadsToViableHead(4, 5))
|
||||
require.Equal(t, true, f.store.nodeByRoot[indexToHash(5)].leadsToViableHead(4, 5))
|
||||
require.Equal(t, false, f.store.nodeByRoot[indexToHash(2)].leadsToViableHead(4, 5))
|
||||
require.Equal(t, false, f.store.nodeByRoot[indexToHash(4)].leadsToViableHead(4, 5))
|
||||
}
|
||||
|
||||
func TestNode_SetFullyValidated(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := t.Context()
|
||||
storeNodes := make([]*PayloadNode, 6)
|
||||
storeNodes[0] = f.store.fullNodeByRoot[params.BeaconConfig().ZeroHash]
|
||||
storeNodes := make([]*Node, 6)
|
||||
storeNodes[0] = f.store.treeRootNode
|
||||
// insert blocks in the fork pattern (optimistic status in parenthesis)
|
||||
//
|
||||
// 0 (false) -- 1 (false) -- 2 (false) -- 3 (true) -- 4 (true)
|
||||
// \
|
||||
// -- 5 (true)
|
||||
//
|
||||
state, blk, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, indexToHash(101), 1, 1)
|
||||
state, blk, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
storeNodes[1] = f.store.fullNodeByRoot[blk.Root()]
|
||||
storeNodes[1] = f.store.nodeByRoot[blk.Root()]
|
||||
require.NoError(t, f.SetOptimisticToValid(ctx, params.BeaconConfig().ZeroHash))
|
||||
state, blk, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
storeNodes[2] = f.store.nodeByRoot[blk.Root()]
|
||||
require.NoError(t, f.SetOptimisticToValid(ctx, indexToHash(1)))
|
||||
state, blk, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), indexToHash(102), 1, 1)
|
||||
state, blk, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
storeNodes[2] = f.store.fullNodeByRoot[blk.Root()]
|
||||
require.NoError(t, f.SetOptimisticToValid(ctx, indexToHash(2)))
|
||||
state, blk, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(2), indexToHash(103), 1, 1)
|
||||
storeNodes[3] = f.store.nodeByRoot[blk.Root()]
|
||||
state, blk, err = prepareForkchoiceState(ctx, 4, indexToHash(4), indexToHash(3), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
storeNodes[3] = f.store.fullNodeByRoot[blk.Root()]
|
||||
state, blk, err = prepareForkchoiceState(ctx, 4, indexToHash(4), indexToHash(3), indexToHash(104), 1, 1)
|
||||
storeNodes[4] = f.store.nodeByRoot[blk.Root()]
|
||||
state, blk, err = prepareForkchoiceState(ctx, 5, indexToHash(5), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
storeNodes[4] = f.store.fullNodeByRoot[blk.Root()]
|
||||
state, blk, err = prepareForkchoiceState(ctx, 5, indexToHash(5), indexToHash(1), indexToHash(105), 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
storeNodes[5] = f.store.fullNodeByRoot[blk.Root()]
|
||||
storeNodes[5] = f.store.nodeByRoot[blk.Root()]
|
||||
|
||||
opt, err := f.IsOptimistic(indexToHash(5))
|
||||
require.NoError(t, err)
|
||||
@@ -226,7 +223,7 @@ func TestNode_SetFullyValidated(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, opt)
|
||||
|
||||
require.NoError(t, f.store.setNodeAndParentValidated(ctx, f.store.fullNodeByRoot[indexToHash(4)]))
|
||||
require.NoError(t, f.store.nodeByRoot[indexToHash(4)].setNodeAndParentValidated(ctx))
|
||||
|
||||
// block 5 should still be optimistic
|
||||
opt, err = f.IsOptimistic(indexToHash(5))
|
||||
@@ -243,20 +240,20 @@ func TestNode_SetFullyValidated(t *testing.T) {
|
||||
require.Equal(t, false, opt)
|
||||
|
||||
respNodes := make([]*forkchoice.Node, 0)
|
||||
respNodes, err = f.store.nodeTreeDump(ctx, f.store.treeRootNode, respNodes)
|
||||
respNodes, err = f.store.treeRootNode.nodeTreeDump(ctx, respNodes)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(respNodes), f.NodeCount())
|
||||
|
||||
for i, respNode := range respNodes {
|
||||
require.Equal(t, storeNodes[i].node.slot, respNode.Slot)
|
||||
require.DeepEqual(t, storeNodes[i].node.root[:], respNode.BlockRoot)
|
||||
require.Equal(t, storeNodes[i].node.balance, respNode.Balance)
|
||||
require.Equal(t, storeNodes[i].node.weight, respNode.Weight)
|
||||
require.Equal(t, storeNodes[i].slot, respNode.Slot)
|
||||
require.DeepEqual(t, storeNodes[i].root[:], respNode.BlockRoot)
|
||||
require.Equal(t, storeNodes[i].balance, respNode.Balance)
|
||||
require.Equal(t, storeNodes[i].weight, respNode.Weight)
|
||||
require.Equal(t, storeNodes[i].optimistic, respNode.ExecutionOptimistic)
|
||||
require.Equal(t, storeNodes[i].node.justifiedEpoch, respNode.JustifiedEpoch)
|
||||
require.Equal(t, storeNodes[i].node.unrealizedJustifiedEpoch, respNode.UnrealizedJustifiedEpoch)
|
||||
require.Equal(t, storeNodes[i].node.finalizedEpoch, respNode.FinalizedEpoch)
|
||||
require.Equal(t, storeNodes[i].node.unrealizedFinalizedEpoch, respNode.UnrealizedFinalizedEpoch)
|
||||
require.Equal(t, storeNodes[i].justifiedEpoch, respNode.JustifiedEpoch)
|
||||
require.Equal(t, storeNodes[i].unrealizedJustifiedEpoch, respNode.UnrealizedJustifiedEpoch)
|
||||
require.Equal(t, storeNodes[i].finalizedEpoch, respNode.FinalizedEpoch)
|
||||
require.Equal(t, storeNodes[i].unrealizedFinalizedEpoch, respNode.UnrealizedFinalizedEpoch)
|
||||
require.Equal(t, storeNodes[i].timestamp, respNode.Timestamp)
|
||||
}
|
||||
}
|
||||
@@ -275,10 +272,10 @@ func TestNode_TimeStampsChecks(t *testing.T) {
|
||||
headRoot, err := f.Head(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, root, headRoot)
|
||||
early, err := f.store.choosePayloadContent(f.store.headNode).arrivedEarly(f.store.genesisTime)
|
||||
early, err := f.store.headNode.arrivedEarly(f.store.genesisTime)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, early)
|
||||
late, err := f.store.choosePayloadContent(f.store.headNode).arrivedAfterOrphanCheck(f.store.genesisTime)
|
||||
late, err := f.store.headNode.arrivedAfterOrphanCheck(f.store.genesisTime)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, late)
|
||||
|
||||
@@ -292,10 +289,10 @@ func TestNode_TimeStampsChecks(t *testing.T) {
|
||||
headRoot, err = f.Head(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, root, headRoot)
|
||||
early, err = f.store.choosePayloadContent(f.store.headNode).arrivedEarly(f.store.genesisTime)
|
||||
early, err = f.store.headNode.arrivedEarly(f.store.genesisTime)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, early)
|
||||
late, err = f.store.choosePayloadContent(f.store.headNode).arrivedAfterOrphanCheck(f.store.genesisTime)
|
||||
late, err = f.store.headNode.arrivedAfterOrphanCheck(f.store.genesisTime)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, late)
|
||||
|
||||
@@ -308,10 +305,10 @@ func TestNode_TimeStampsChecks(t *testing.T) {
|
||||
headRoot, err = f.Head(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, root, headRoot)
|
||||
early, err = f.store.choosePayloadContent(f.store.headNode).arrivedEarly(f.store.genesisTime)
|
||||
early, err = f.store.headNode.arrivedEarly(f.store.genesisTime)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, early)
|
||||
late, err = f.store.choosePayloadContent(f.store.headNode).arrivedAfterOrphanCheck(f.store.genesisTime)
|
||||
late, err = f.store.headNode.arrivedAfterOrphanCheck(f.store.genesisTime)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, late)
|
||||
|
||||
@@ -323,10 +320,10 @@ func TestNode_TimeStampsChecks(t *testing.T) {
|
||||
headRoot, err = f.Head(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, root, headRoot)
|
||||
early, err = f.store.choosePayloadContent(f.store.headNode).arrivedEarly(f.store.genesisTime)
|
||||
early, err = f.store.headNode.arrivedEarly(f.store.genesisTime)
|
||||
require.ErrorContains(t, "invalid timestamp", err)
|
||||
require.Equal(t, true, early)
|
||||
late, err = f.store.choosePayloadContent(f.store.headNode).arrivedAfterOrphanCheck(f.store.genesisTime)
|
||||
late, err = f.store.headNode.arrivedAfterOrphanCheck(f.store.genesisTime)
|
||||
require.ErrorContains(t, "invalid timestamp", err)
|
||||
require.Equal(t, false, late)
|
||||
}
|
||||
|
||||
@@ -7,141 +7,92 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// setOptimisticToInvalid removes invalid nodes from forkchoice. It does NOT remove the empty node for the passed root.
|
||||
func (s *Store) setOptimisticToInvalid(ctx context.Context, root, parentRoot, parentHash, lastValidHash [32]byte) ([][32]byte, error) {
|
||||
func (s *Store) setOptimisticToInvalid(ctx context.Context, root, parentRoot, lastValidHash [32]byte) ([][32]byte, error) {
|
||||
invalidRoots := make([][32]byte, 0)
|
||||
n := s.fullNodeByRoot[root]
|
||||
if n == nil {
|
||||
// The offending node with its payload is not in forkchoice. Try with the parent
|
||||
n = s.emptyNodeByRoot[parentRoot]
|
||||
if n == nil {
|
||||
return invalidRoots, errors.Wrap(ErrNilNode, "could not set node to invalid, could not find consensus parent")
|
||||
node, ok := s.nodeByRoot[root]
|
||||
if !ok {
|
||||
node, ok = s.nodeByRoot[parentRoot]
|
||||
if !ok || node == nil {
|
||||
return invalidRoots, errors.Wrap(ErrNilNode, "could not set node to invalid")
|
||||
}
|
||||
if n.node.blockHash == lastValidHash {
|
||||
// The parent node must have been full and with a valid payload
|
||||
// return early if the parent is LVH
|
||||
if node.payloadHash == lastValidHash {
|
||||
return invalidRoots, nil
|
||||
}
|
||||
if n.node.blockHash == parentHash {
|
||||
// The parent was full and invalid
|
||||
n = s.fullNodeByRoot[parentRoot]
|
||||
if n == nil {
|
||||
return invalidRoots, errors.Wrap(ErrNilNode, "could not set node to invalid, could not find full parent")
|
||||
}
|
||||
} else {
|
||||
// The parent is empty and we don't yet know if it's valid or not
|
||||
for n = n.node.parent; n != nil; n = n.node.parent {
|
||||
if ctx.Err() != nil {
|
||||
return invalidRoots, ctx.Err()
|
||||
}
|
||||
if n.node.blockHash == lastValidHash {
|
||||
// The node built on empty and the whole chain was valid
|
||||
return invalidRoots, nil
|
||||
}
|
||||
if n.node.blockHash == parentHash {
|
||||
// The parent was full and invalid
|
||||
break
|
||||
}
|
||||
}
|
||||
if n == nil {
|
||||
return nil, errors.Wrap(ErrNilNode, "could not set node to invalid, could not find full parent in ancestry")
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// check consistency with the parent information
|
||||
if n.node.parent == nil {
|
||||
return nil, ErrNilNode
|
||||
if node == nil {
|
||||
return invalidRoots, errors.Wrap(ErrNilNode, "could not set node to invalid")
|
||||
}
|
||||
if n.node.parent.node.root != parentRoot {
|
||||
return nil, errInvalidParentRoot
|
||||
if node.parent.root != parentRoot {
|
||||
return invalidRoots, errInvalidParentRoot
|
||||
}
|
||||
}
|
||||
// n points to a full node that has an invalid payload in forkchoice. We need to find the fist node in the chain that is actually invalid.
|
||||
startNode := n
|
||||
fp := s.fullParent(n)
|
||||
for ; fp != nil && fp.node.blockHash != lastValidHash; fp = s.fullParent(fp) {
|
||||
firstInvalid := node
|
||||
for ; firstInvalid.parent != nil && firstInvalid.parent.payloadHash != lastValidHash; firstInvalid = firstInvalid.parent {
|
||||
if ctx.Err() != nil {
|
||||
return invalidRoots, ctx.Err()
|
||||
}
|
||||
n = fp
|
||||
}
|
||||
// Deal with the case that the last valid payload is in a different fork
|
||||
// This means we are dealing with an EE that does not follow the spec
|
||||
if fp == nil {
|
||||
if firstInvalid.parent == nil {
|
||||
// return early if the invalid node was not imported
|
||||
if startNode.node.root != root {
|
||||
if node.root == parentRoot {
|
||||
return invalidRoots, nil
|
||||
}
|
||||
// Remove just the imported invalid root
|
||||
n = startNode
|
||||
firstInvalid = node
|
||||
}
|
||||
return s.removeNode(ctx, n)
|
||||
return s.removeNode(ctx, firstInvalid)
|
||||
}
|
||||
|
||||
// removeNode removes the node with the given root and all of its children
|
||||
// from the Fork Choice Store.
|
||||
func (s *Store) removeNode(ctx context.Context, pn *PayloadNode) ([][32]byte, error) {
|
||||
func (s *Store) removeNode(ctx context.Context, node *Node) ([][32]byte, error) {
|
||||
invalidRoots := make([][32]byte, 0)
|
||||
|
||||
if pn == nil {
|
||||
if node == nil {
|
||||
return invalidRoots, errors.Wrap(ErrNilNode, "could not remove node")
|
||||
}
|
||||
if !pn.optimistic || pn.node.parent == nil {
|
||||
if !node.optimistic || node.parent == nil {
|
||||
return invalidRoots, errInvalidOptimisticStatus
|
||||
}
|
||||
children := pn.node.parent.children
|
||||
|
||||
children := node.parent.children
|
||||
if len(children) == 1 {
|
||||
pn.node.parent.children = []*Node{}
|
||||
node.parent.children = []*Node{}
|
||||
} else {
|
||||
for i, n := range children {
|
||||
if n == pn.node {
|
||||
if n == node {
|
||||
if i != len(children)-1 {
|
||||
children[i] = children[len(children)-1]
|
||||
}
|
||||
pn.node.parent.children = children[:len(children)-1]
|
||||
node.parent.children = children[:len(children)-1]
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return s.removeNodeAndChildren(ctx, pn, invalidRoots)
|
||||
return s.removeNodeAndChildren(ctx, node, invalidRoots)
|
||||
}
|
||||
|
||||
// removeNodeAndChildren removes `node` and all of its descendant from the Store
|
||||
func (s *Store) removeNodeAndChildren(ctx context.Context, pn *PayloadNode, invalidRoots [][32]byte) ([][32]byte, error) {
|
||||
func (s *Store) removeNodeAndChildren(ctx context.Context, node *Node, invalidRoots [][32]byte) ([][32]byte, error) {
|
||||
var err error
|
||||
// If we are removing an empty node, then remove the full node as well if it exists.
|
||||
if !pn.full {
|
||||
fn, ok := s.fullNodeByRoot[pn.node.root]
|
||||
if ok {
|
||||
invalidRoots, err = s.removeNodeAndChildren(ctx, fn, invalidRoots)
|
||||
if err != nil {
|
||||
return invalidRoots, err
|
||||
}
|
||||
}
|
||||
}
|
||||
// Now we remove the full node's children.
|
||||
for _, child := range pn.children {
|
||||
for _, child := range node.children {
|
||||
if ctx.Err() != nil {
|
||||
return invalidRoots, ctx.Err()
|
||||
}
|
||||
// We need to remove only the empty node here since the recursion will take care of the full one.
|
||||
en := s.emptyNodeByRoot[child.root]
|
||||
if invalidRoots, err = s.removeNodeAndChildren(ctx, en, invalidRoots); err != nil {
|
||||
if invalidRoots, err = s.removeNodeAndChildren(ctx, child, invalidRoots); err != nil {
|
||||
return invalidRoots, err
|
||||
}
|
||||
}
|
||||
// Only append the root for the empty nodes.
|
||||
if pn.full {
|
||||
delete(s.fullNodeByRoot, pn.node.root)
|
||||
} else {
|
||||
invalidRoots = append(invalidRoots, pn.node.root)
|
||||
if pn.node.root == s.proposerBoostRoot {
|
||||
s.proposerBoostRoot = [32]byte{}
|
||||
}
|
||||
if pn.node.root == s.previousProposerBoostRoot {
|
||||
s.previousProposerBoostRoot = params.BeaconConfig().ZeroHash
|
||||
s.previousProposerBoostScore = 0
|
||||
}
|
||||
delete(s.emptyNodeByRoot, pn.node.root)
|
||||
invalidRoots = append(invalidRoots, node.root)
|
||||
if node.root == s.proposerBoostRoot {
|
||||
s.proposerBoostRoot = [32]byte{}
|
||||
}
|
||||
if node.root == s.previousProposerBoostRoot {
|
||||
s.previousProposerBoostRoot = params.BeaconConfig().ZeroHash
|
||||
s.previousProposerBoostScore = 0
|
||||
}
|
||||
delete(s.nodeByRoot, node.root)
|
||||
return invalidRoots, nil
|
||||
}
|
||||
|
||||
@@ -23,35 +23,93 @@ import (
|
||||
// And every block in the Fork choice is optimistic.
|
||||
func TestPruneInvalid(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
root [32]byte // the root of the new INVALID block
|
||||
parentRoot [32]byte // the root of the parent block
|
||||
parentHash [32]byte // the execution hash of the parent block
|
||||
lastValidHash [32]byte // the last valid execution hash
|
||||
payload [32]byte // the last valid hash
|
||||
wantedNodeNumber int
|
||||
wantedRoots [][32]byte
|
||||
wantedErr error
|
||||
}{
|
||||
{ // Bogus LVH, root not in forkchoice
|
||||
name: "bogus LVH not in forkchoice",
|
||||
root: [32]byte{'x'}, parentRoot: [32]byte{'i'}, parentHash: [32]byte{'I'}, lastValidHash: [32]byte{'R'},
|
||||
wantedNodeNumber: 13, wantedRoots: [][32]byte{},
|
||||
},
|
||||
{ // Bogus LVH
|
||||
name: "bogus LVH",
|
||||
root: [32]byte{'i'}, parentRoot: [32]byte{'h'}, parentHash: [32]byte{'H'}, lastValidHash: [32]byte{'R'},
|
||||
wantedNodeNumber: 13, wantedRoots: [][32]byte{},
|
||||
[32]byte{'x'},
|
||||
[32]byte{'i'},
|
||||
[32]byte{'R'},
|
||||
13,
|
||||
[][32]byte{},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
name: "wanted j",
|
||||
root: [32]byte{'j'}, parentRoot: [32]byte{'b'}, parentHash: [32]byte{'B'}, lastValidHash: [32]byte{'B'},
|
||||
wantedNodeNumber: 13, wantedRoots: [][32]byte{},
|
||||
// Bogus LVH
|
||||
[32]byte{'i'},
|
||||
[32]byte{'h'},
|
||||
[32]byte{'R'},
|
||||
12,
|
||||
[][32]byte{{'i'}},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
name: "wanted 5",
|
||||
root: [32]byte{'c'}, parentRoot: [32]byte{'b'}, parentHash: [32]byte{'B'}, lastValidHash: [32]byte{'B'},
|
||||
wantedNodeNumber: 5,
|
||||
wantedRoots: [][32]byte{
|
||||
[32]byte{'j'},
|
||||
[32]byte{'b'},
|
||||
[32]byte{'B'},
|
||||
12,
|
||||
[][32]byte{{'j'}},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
[32]byte{'c'},
|
||||
[32]byte{'b'},
|
||||
[32]byte{'B'},
|
||||
4,
|
||||
[][32]byte{{'f'}, {'e'}, {'i'}, {'h'}, {'l'},
|
||||
{'k'}, {'g'}, {'d'}, {'c'}},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
[32]byte{'i'},
|
||||
[32]byte{'h'},
|
||||
[32]byte{'H'},
|
||||
12,
|
||||
[][32]byte{{'i'}},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
[32]byte{'h'},
|
||||
[32]byte{'g'},
|
||||
[32]byte{'G'},
|
||||
11,
|
||||
[][32]byte{{'i'}, {'h'}},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
[32]byte{'g'},
|
||||
[32]byte{'d'},
|
||||
[32]byte{'D'},
|
||||
8,
|
||||
[][32]byte{{'i'}, {'h'}, {'l'}, {'k'}, {'g'}},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
[32]byte{'i'},
|
||||
[32]byte{'h'},
|
||||
[32]byte{'D'},
|
||||
8,
|
||||
[][32]byte{{'i'}, {'h'}, {'l'}, {'k'}, {'g'}},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
[32]byte{'f'},
|
||||
[32]byte{'e'},
|
||||
[32]byte{'D'},
|
||||
11,
|
||||
[][32]byte{{'f'}, {'e'}},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
[32]byte{'h'},
|
||||
[32]byte{'g'},
|
||||
[32]byte{'C'},
|
||||
5,
|
||||
[][32]byte{
|
||||
{'f'},
|
||||
{'e'},
|
||||
{'i'},
|
||||
@@ -61,118 +119,106 @@ func TestPruneInvalid(t *testing.T) {
|
||||
{'g'},
|
||||
{'d'},
|
||||
},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
name: "wanted i",
|
||||
root: [32]byte{'i'}, parentRoot: [32]byte{'h'}, parentHash: [32]byte{'H'}, lastValidHash: [32]byte{'H'},
|
||||
wantedNodeNumber: 13, wantedRoots: [][32]byte{},
|
||||
[32]byte{'g'},
|
||||
[32]byte{'d'},
|
||||
[32]byte{'E'},
|
||||
8,
|
||||
[][32]byte{{'i'}, {'h'}, {'l'}, {'k'}, {'g'}},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
name: "wanted i and h",
|
||||
root: [32]byte{'h'}, parentRoot: [32]byte{'g'}, parentHash: [32]byte{'G'}, lastValidHash: [32]byte{'G'},
|
||||
wantedNodeNumber: 12, wantedRoots: [][32]byte{{'i'}},
|
||||
[32]byte{'z'},
|
||||
[32]byte{'j'},
|
||||
[32]byte{'B'},
|
||||
12,
|
||||
[][32]byte{{'j'}},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
name: "wanted i--g",
|
||||
root: [32]byte{'g'}, parentRoot: [32]byte{'d'}, parentHash: [32]byte{'D'}, lastValidHash: [32]byte{'D'},
|
||||
wantedNodeNumber: 9, wantedRoots: [][32]byte{{'i'}, {'h'}, {'l'}, {'k'}},
|
||||
[32]byte{'z'},
|
||||
[32]byte{'j'},
|
||||
[32]byte{'J'},
|
||||
13,
|
||||
[][32]byte{},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
name: "wanted 9",
|
||||
root: [32]byte{'i'}, parentRoot: [32]byte{'h'}, parentHash: [32]byte{'H'}, lastValidHash: [32]byte{'D'},
|
||||
wantedNodeNumber: 9, wantedRoots: [][32]byte{{'i'}, {'h'}, {'l'}, {'k'}},
|
||||
[32]byte{'j'},
|
||||
[32]byte{'a'},
|
||||
[32]byte{'B'},
|
||||
0,
|
||||
[][32]byte{},
|
||||
errInvalidParentRoot,
|
||||
},
|
||||
{
|
||||
name: "wanted f and e",
|
||||
root: [32]byte{'f'}, parentRoot: [32]byte{'e'}, parentHash: [32]byte{'E'}, lastValidHash: [32]byte{'D'},
|
||||
wantedNodeNumber: 12, wantedRoots: [][32]byte{{'f'}},
|
||||
[32]byte{'z'},
|
||||
[32]byte{'h'},
|
||||
[32]byte{'D'},
|
||||
8,
|
||||
[][32]byte{{'i'}, {'h'}, {'l'}, {'k'}, {'g'}},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
name: "wanted 6",
|
||||
root: [32]byte{'h'}, parentRoot: [32]byte{'g'}, parentHash: [32]byte{'G'}, lastValidHash: [32]byte{'C'},
|
||||
wantedNodeNumber: 6,
|
||||
wantedRoots: [][32]byte{
|
||||
{'f'}, {'e'}, {'i'}, {'h'}, {'l'}, {'k'}, {'g'},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "wanted 9 again",
|
||||
root: [32]byte{'g'}, parentRoot: [32]byte{'d'}, parentHash: [32]byte{'D'}, lastValidHash: [32]byte{'E'},
|
||||
wantedNodeNumber: 9, wantedRoots: [][32]byte{{'i'}, {'h'}, {'l'}, {'k'}},
|
||||
},
|
||||
{
|
||||
name: "wanted 13",
|
||||
root: [32]byte{'z'}, parentRoot: [32]byte{'j'}, parentHash: [32]byte{'J'}, lastValidHash: [32]byte{'B'},
|
||||
wantedNodeNumber: 13, wantedRoots: [][32]byte{},
|
||||
},
|
||||
{
|
||||
name: "wanted empty",
|
||||
root: [32]byte{'z'}, parentRoot: [32]byte{'j'}, parentHash: [32]byte{'J'}, lastValidHash: [32]byte{'J'},
|
||||
wantedNodeNumber: 13, wantedRoots: [][32]byte{},
|
||||
},
|
||||
{
|
||||
name: "errInvalidParentRoot",
|
||||
root: [32]byte{'j'}, parentRoot: [32]byte{'a'}, parentHash: [32]byte{'A'}, lastValidHash: [32]byte{'B'},
|
||||
wantedErr: errInvalidParentRoot,
|
||||
},
|
||||
{
|
||||
name: "root z",
|
||||
root: [32]byte{'z'}, parentRoot: [32]byte{'h'}, parentHash: [32]byte{'H'}, lastValidHash: [32]byte{'D'},
|
||||
wantedNodeNumber: 9, wantedRoots: [][32]byte{{'i'}, {'h'}, {'l'}, {'k'}},
|
||||
[32]byte{'z'},
|
||||
[32]byte{'h'},
|
||||
[32]byte{'D'},
|
||||
8,
|
||||
[][32]byte{{'i'}, {'h'}, {'l'}, {'k'}, {'g'}},
|
||||
nil,
|
||||
},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
f := setup(1, 1)
|
||||
require.NoError(t, f.SetOptimisticToValid(ctx, [32]byte{}))
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, [32]byte{'J'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, [32]byte{'D'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, [32]byte{'E'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, [32]byte{'G'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, [32]byte{'F'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, [32]byte{'H'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, [32]byte{'K'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, [32]byte{'I'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, [32]byte{'L'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
ctx := t.Context()
|
||||
f := setup(1, 1)
|
||||
|
||||
roots, err := f.store.setOptimisticToInvalid(t.Context(), tc.root, tc.parentRoot, tc.parentHash, tc.lastValidHash)
|
||||
if tc.wantedErr == nil {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(tc.wantedRoots), len(roots))
|
||||
require.DeepEqual(t, tc.wantedRoots, roots)
|
||||
require.Equal(t, tc.wantedNodeNumber, f.NodeCount())
|
||||
} else {
|
||||
require.ErrorIs(t, tc.wantedErr, err)
|
||||
}
|
||||
})
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, [32]byte{'J'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, [32]byte{'D'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, [32]byte{'E'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, [32]byte{'G'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, [32]byte{'F'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, [32]byte{'H'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, [32]byte{'K'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, [32]byte{'I'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, [32]byte{'L'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
roots, err := f.store.setOptimisticToInvalid(t.Context(), tc.root, tc.parentRoot, tc.payload)
|
||||
if tc.wantedErr == nil {
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, tc.wantedRoots, roots)
|
||||
require.Equal(t, tc.wantedNodeNumber, f.NodeCount())
|
||||
} else {
|
||||
require.ErrorIs(t, tc.wantedErr, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -194,40 +240,11 @@ func TestSetOptimisticToInvalid_ProposerBoost(t *testing.T) {
|
||||
f.store.previousProposerBoostScore = 10
|
||||
f.store.previousProposerBoostRoot = [32]byte{'b'}
|
||||
|
||||
_, err = f.SetOptimisticToInvalid(ctx, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'B'}, [32]byte{'A'})
|
||||
_, err = f.SetOptimisticToInvalid(ctx, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'A'})
|
||||
require.NoError(t, err)
|
||||
// proposer boost is still applied to c
|
||||
require.Equal(t, uint64(10), f.store.previousProposerBoostScore)
|
||||
require.Equal(t, [32]byte{}, f.store.proposerBoostRoot)
|
||||
require.Equal(t, [32]byte{'b'}, f.store.previousProposerBoostRoot)
|
||||
}
|
||||
|
||||
func TestSetOptimisticToInvalid_ProposerBoost_Older(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
f := setup(1, 1)
|
||||
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, [32]byte{'D'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
f.store.proposerBoostRoot = [32]byte{'d'}
|
||||
f.store.previousProposerBoostScore = 10
|
||||
f.store.previousProposerBoostRoot = [32]byte{'c'}
|
||||
|
||||
_, err = f.SetOptimisticToInvalid(ctx, [32]byte{'d'}, [32]byte{'c'}, [32]byte{'C'}, [32]byte{'A'})
|
||||
require.NoError(t, err)
|
||||
// proposer boost is still applied to c
|
||||
require.Equal(t, uint64(0), f.store.previousProposerBoostScore)
|
||||
require.Equal(t, [32]byte{}, f.store.proposerBoostRoot)
|
||||
require.Equal(t, [32]byte{}, f.store.previousProposerBoostRoot)
|
||||
require.DeepEqual(t, [32]byte{}, f.store.proposerBoostRoot)
|
||||
require.DeepEqual(t, params.BeaconConfig().ZeroHash, f.store.previousProposerBoostRoot)
|
||||
}
|
||||
|
||||
// This is a regression test (10565)
|
||||
@@ -255,9 +272,10 @@ func TestSetOptimisticToInvalid_CorrectChildren(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
_, err = f.store.setOptimisticToInvalid(ctx, [32]byte{'d'}, [32]byte{'a'}, [32]byte{'A'}, [32]byte{'A'})
|
||||
_, err = f.store.setOptimisticToInvalid(ctx, [32]byte{'d'}, [32]byte{'a'}, [32]byte{'A'})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(f.store.fullNodeByRoot[[32]byte{'a'}].children))
|
||||
require.Equal(t, 2, len(f.store.nodeByRoot[[32]byte{'a'}].children))
|
||||
|
||||
}
|
||||
|
||||
// Pow | Pos
|
||||
@@ -304,13 +322,13 @@ func TestSetOptimisticToInvalid_ForkAtMerge(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
roots, err := f.SetOptimisticToInvalid(ctx, [32]byte{'x'}, [32]byte{'d'}, [32]byte{'D'}, [32]byte{})
|
||||
roots, err := f.SetOptimisticToInvalid(ctx, [32]byte{'x'}, [32]byte{'d'}, [32]byte{})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3, len(roots))
|
||||
require.Equal(t, 4, len(roots))
|
||||
sort.Slice(roots, func(i, j int) bool {
|
||||
return bytesutil.BytesToUint64BigEndian(roots[i][:]) < bytesutil.BytesToUint64BigEndian(roots[j][:])
|
||||
})
|
||||
require.DeepEqual(t, roots, [][32]byte{{'c'}, {'d'}, {'e'}})
|
||||
require.DeepEqual(t, roots, [][32]byte{{'b'}, {'c'}, {'d'}, {'e'}})
|
||||
}
|
||||
|
||||
// Pow | Pos
|
||||
@@ -357,13 +375,13 @@ func TestSetOptimisticToInvalid_ForkAtMerge_bis(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
roots, err := f.SetOptimisticToInvalid(ctx, [32]byte{'x'}, [32]byte{'d'}, [32]byte{'D'}, [32]byte{})
|
||||
roots, err := f.SetOptimisticToInvalid(ctx, [32]byte{'x'}, [32]byte{'d'}, [32]byte{})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3, len(roots))
|
||||
require.Equal(t, 4, len(roots))
|
||||
sort.Slice(roots, func(i, j int) bool {
|
||||
return bytesutil.BytesToUint64BigEndian(roots[i][:]) < bytesutil.BytesToUint64BigEndian(roots[j][:])
|
||||
})
|
||||
require.DeepEqual(t, roots, [][32]byte{{'c'}, {'d'}, {'e'}})
|
||||
require.DeepEqual(t, roots, [][32]byte{{'b'}, {'c'}, {'d'}, {'e'}})
|
||||
}
|
||||
|
||||
func TestSetOptimisticToValid(t *testing.T) {
|
||||
|
||||
@@ -11,7 +11,7 @@ func (f *ForkChoice) applyProposerBoostScore() error {
|
||||
s := f.store
|
||||
proposerScore := uint64(0)
|
||||
if s.previousProposerBoostRoot != params.BeaconConfig().ZeroHash {
|
||||
previousNode, ok := s.emptyNodeByRoot[s.previousProposerBoostRoot]
|
||||
previousNode, ok := s.nodeByRoot[s.previousProposerBoostRoot]
|
||||
if !ok || previousNode == nil {
|
||||
log.WithError(errInvalidProposerBoostRoot).Errorf("invalid prev root %#x", s.previousProposerBoostRoot)
|
||||
} else {
|
||||
@@ -20,7 +20,7 @@ func (f *ForkChoice) applyProposerBoostScore() error {
|
||||
}
|
||||
|
||||
if s.proposerBoostRoot != params.BeaconConfig().ZeroHash {
|
||||
currentNode, ok := s.emptyNodeByRoot[s.proposerBoostRoot]
|
||||
currentNode, ok := s.nodeByRoot[s.proposerBoostRoot]
|
||||
if !ok || currentNode == nil {
|
||||
log.WithError(errInvalidProposerBoostRoot).Errorf("invalid current root %#x", s.proposerBoostRoot)
|
||||
} else {
|
||||
|
||||
@@ -166,14 +166,14 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
// (1: 48) -> (2: 38) -> (3: 10)
|
||||
// \--------------->(4: 18)
|
||||
//
|
||||
node1 := f.store.emptyNodeByRoot[indexToHash(1)]
|
||||
require.Equal(t, node1.node.weight, uint64(48))
|
||||
node2 := f.store.emptyNodeByRoot[indexToHash(2)]
|
||||
require.Equal(t, node2.node.weight, uint64(38))
|
||||
node3 := f.store.emptyNodeByRoot[indexToHash(3)]
|
||||
require.Equal(t, node3.node.weight, uint64(10))
|
||||
node4 := f.store.emptyNodeByRoot[indexToHash(4)]
|
||||
require.Equal(t, node4.node.weight, uint64(18))
|
||||
node1 := f.store.nodeByRoot[indexToHash(1)]
|
||||
require.Equal(t, node1.weight, uint64(48))
|
||||
node2 := f.store.nodeByRoot[indexToHash(2)]
|
||||
require.Equal(t, node2.weight, uint64(38))
|
||||
node3 := f.store.nodeByRoot[indexToHash(3)]
|
||||
require.Equal(t, node3.weight, uint64(10))
|
||||
node4 := f.store.nodeByRoot[indexToHash(4)]
|
||||
require.Equal(t, node4.weight, uint64(18))
|
||||
|
||||
// Regression: process attestations for C, check that it
|
||||
// becomes head, we need two attestations to have C.weight = 30 > 24 = D.weight
|
||||
|
||||
@@ -34,23 +34,22 @@ const orphanLateBlockProposingEarly = 2
|
||||
func (f *ForkChoice) ShouldOverrideFCU() (override bool) {
|
||||
override = false
|
||||
|
||||
// We only need to override FCU if our current consensusHead is from the current
|
||||
// We only need to override FCU if our current head is from the current
|
||||
// slot. This differs from the spec implementation in that we assume
|
||||
// that we will call this function in the previous slot to proposing.
|
||||
consensusHead := f.store.headNode
|
||||
if consensusHead == nil {
|
||||
head := f.store.headNode
|
||||
if head == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if consensusHead.slot != slots.CurrentSlot(f.store.genesisTime) {
|
||||
if head.slot != slots.CurrentSlot(f.store.genesisTime) {
|
||||
return
|
||||
}
|
||||
|
||||
// Do not reorg on epoch boundaries
|
||||
if (consensusHead.slot+1)%params.BeaconConfig().SlotsPerEpoch == 0 {
|
||||
if (head.slot+1)%params.BeaconConfig().SlotsPerEpoch == 0 {
|
||||
return
|
||||
}
|
||||
head := f.store.choosePayloadContent(consensusHead)
|
||||
// Only reorg blocks that arrive late
|
||||
early, err := head.arrivedEarly(f.store.genesisTime)
|
||||
if err != nil {
|
||||
@@ -62,15 +61,15 @@ func (f *ForkChoice) ShouldOverrideFCU() (override bool) {
|
||||
}
|
||||
// Only reorg if we have been finalizing
|
||||
finalizedEpoch := f.store.finalizedCheckpoint.Epoch
|
||||
if slots.ToEpoch(consensusHead.slot+1) > finalizedEpoch+params.BeaconConfig().ReorgMaxEpochsSinceFinalization {
|
||||
if slots.ToEpoch(head.slot+1) > finalizedEpoch+params.BeaconConfig().ReorgMaxEpochsSinceFinalization {
|
||||
return
|
||||
}
|
||||
// Only orphan a single block
|
||||
parent := consensusHead.parent
|
||||
parent := head.parent
|
||||
if parent == nil {
|
||||
return
|
||||
}
|
||||
if consensusHead.slot > parent.node.slot+1 {
|
||||
if head.slot > parent.slot+1 {
|
||||
return
|
||||
}
|
||||
// Do not orphan a block that has higher justification than the parent
|
||||
@@ -79,12 +78,12 @@ func (f *ForkChoice) ShouldOverrideFCU() (override bool) {
|
||||
// }
|
||||
|
||||
// Only orphan a block if the head LMD vote is weak
|
||||
if consensusHead.weight*100 > f.store.committeeWeight*params.BeaconConfig().ReorgHeadWeightThreshold {
|
||||
if head.weight*100 > f.store.committeeWeight*params.BeaconConfig().ReorgHeadWeightThreshold {
|
||||
return
|
||||
}
|
||||
|
||||
// Return early if we are checking before 10 seconds into the slot
|
||||
sss, err := slots.SinceSlotStart(consensusHead.slot, f.store.genesisTime, time.Now())
|
||||
sss, err := slots.SinceSlotStart(head.slot, f.store.genesisTime, time.Now())
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not check current slot")
|
||||
return true
|
||||
@@ -93,7 +92,7 @@ func (f *ForkChoice) ShouldOverrideFCU() (override bool) {
|
||||
return true
|
||||
}
|
||||
// Only orphan a block if the parent LMD vote is strong
|
||||
if parent.node.weight*100 < f.store.committeeWeight*params.BeaconConfig().ReorgParentWeightThreshold {
|
||||
if parent.weight*100 < f.store.committeeWeight*params.BeaconConfig().ReorgParentWeightThreshold {
|
||||
return
|
||||
}
|
||||
return true
|
||||
@@ -107,61 +106,60 @@ func (f *ForkChoice) ShouldOverrideFCU() (override bool) {
|
||||
// This function needs to be called only when proposing a block and all
|
||||
// attestation processing has already happened.
|
||||
func (f *ForkChoice) GetProposerHead() [32]byte {
|
||||
consensusHead := f.store.headNode
|
||||
if consensusHead == nil {
|
||||
head := f.store.headNode
|
||||
if head == nil {
|
||||
return [32]byte{}
|
||||
}
|
||||
// Only reorg blocks from the previous slot.
|
||||
currentSlot := slots.CurrentSlot(f.store.genesisTime)
|
||||
if consensusHead.slot+1 != currentSlot {
|
||||
return consensusHead.root
|
||||
if head.slot+1 != currentSlot {
|
||||
return head.root
|
||||
}
|
||||
// Do not reorg on epoch boundaries
|
||||
if (consensusHead.slot+1)%params.BeaconConfig().SlotsPerEpoch == 0 {
|
||||
return consensusHead.root
|
||||
if (head.slot+1)%params.BeaconConfig().SlotsPerEpoch == 0 {
|
||||
return head.root
|
||||
}
|
||||
// Only reorg blocks that arrive late
|
||||
head := f.store.choosePayloadContent(consensusHead)
|
||||
early, err := head.arrivedEarly(f.store.genesisTime)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not check if block arrived early")
|
||||
return consensusHead.root
|
||||
return head.root
|
||||
}
|
||||
if early {
|
||||
return consensusHead.root
|
||||
return head.root
|
||||
}
|
||||
// Only reorg if we have been finalizing
|
||||
finalizedEpoch := f.store.finalizedCheckpoint.Epoch
|
||||
if slots.ToEpoch(consensusHead.slot+1) > finalizedEpoch+params.BeaconConfig().ReorgMaxEpochsSinceFinalization {
|
||||
return consensusHead.root
|
||||
if slots.ToEpoch(head.slot+1) > finalizedEpoch+params.BeaconConfig().ReorgMaxEpochsSinceFinalization {
|
||||
return head.root
|
||||
}
|
||||
// Only orphan a single block
|
||||
parent := consensusHead.parent
|
||||
parent := head.parent
|
||||
if parent == nil {
|
||||
return consensusHead.root
|
||||
return head.root
|
||||
}
|
||||
if consensusHead.slot > parent.node.slot+1 {
|
||||
return consensusHead.root
|
||||
if head.slot > parent.slot+1 {
|
||||
return head.root
|
||||
}
|
||||
|
||||
// Only orphan a block if the head LMD vote is weak
|
||||
if consensusHead.weight*100 > f.store.committeeWeight*params.BeaconConfig().ReorgHeadWeightThreshold {
|
||||
return consensusHead.root
|
||||
if head.weight*100 > f.store.committeeWeight*params.BeaconConfig().ReorgHeadWeightThreshold {
|
||||
return head.root
|
||||
}
|
||||
|
||||
// Only orphan a block if the parent LMD vote is strong
|
||||
if parent.node.weight*100 < f.store.committeeWeight*params.BeaconConfig().ReorgParentWeightThreshold {
|
||||
return consensusHead.root
|
||||
if parent.weight*100 < f.store.committeeWeight*params.BeaconConfig().ReorgParentWeightThreshold {
|
||||
return head.root
|
||||
}
|
||||
|
||||
// Only reorg if we are proposing early
|
||||
sss, err := slots.SinceSlotStart(currentSlot, f.store.genesisTime, time.Now())
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not check if proposing early")
|
||||
return consensusHead.root
|
||||
return head.root
|
||||
}
|
||||
if sss >= orphanLateBlockProposingEarly*time.Second {
|
||||
return consensusHead.root
|
||||
return head.root
|
||||
}
|
||||
return parent.node.root
|
||||
return parent.root
|
||||
}
|
||||
|
||||
@@ -38,6 +38,7 @@ func TestForkChoice_ShouldOverrideFCU(t *testing.T) {
|
||||
require.Equal(t, blk.Root(), headRoot)
|
||||
t.Run("head is weak", func(t *testing.T) {
|
||||
require.Equal(t, true, f.ShouldOverrideFCU())
|
||||
|
||||
})
|
||||
t.Run("head is nil", func(t *testing.T) {
|
||||
saved := f.store.headNode
|
||||
@@ -59,11 +60,10 @@ func TestForkChoice_ShouldOverrideFCU(t *testing.T) {
|
||||
f.store.headNode.slot = saved
|
||||
})
|
||||
t.Run("head is early", func(t *testing.T) {
|
||||
fn := f.store.fullNodeByRoot[f.store.headNode.root]
|
||||
saved := fn.timestamp
|
||||
fn.timestamp = saved.Add(-2 * time.Second)
|
||||
saved := f.store.headNode.timestamp
|
||||
f.store.headNode.timestamp = saved.Add(-2 * time.Second)
|
||||
require.Equal(t, false, f.ShouldOverrideFCU())
|
||||
fn.timestamp = saved
|
||||
f.store.headNode.timestamp = saved
|
||||
})
|
||||
t.Run("chain not finalizing", func(t *testing.T) {
|
||||
saved := f.store.headNode.slot
|
||||
@@ -74,10 +74,10 @@ func TestForkChoice_ShouldOverrideFCU(t *testing.T) {
|
||||
driftGenesisTime(f, 2, orphanLateBlockFirstThreshold+time.Second)
|
||||
})
|
||||
t.Run("Not single block reorg", func(t *testing.T) {
|
||||
saved := f.store.headNode.parent.node.slot
|
||||
f.store.headNode.parent.node.slot = 0
|
||||
saved := f.store.headNode.parent.slot
|
||||
f.store.headNode.parent.slot = 0
|
||||
require.Equal(t, false, f.ShouldOverrideFCU())
|
||||
f.store.headNode.parent.node.slot = saved
|
||||
f.store.headNode.parent.slot = saved
|
||||
})
|
||||
t.Run("parent is nil", func(t *testing.T) {
|
||||
saved := f.store.headNode.parent
|
||||
@@ -86,17 +86,17 @@ func TestForkChoice_ShouldOverrideFCU(t *testing.T) {
|
||||
f.store.headNode.parent = saved
|
||||
})
|
||||
t.Run("parent is weak early call", func(t *testing.T) {
|
||||
saved := f.store.headNode.parent.node.weight
|
||||
f.store.headNode.parent.node.weight = 0
|
||||
saved := f.store.headNode.parent.weight
|
||||
f.store.headNode.parent.weight = 0
|
||||
require.Equal(t, true, f.ShouldOverrideFCU())
|
||||
f.store.headNode.parent.node.weight = saved
|
||||
f.store.headNode.parent.weight = saved
|
||||
})
|
||||
t.Run("parent is weak late call", func(t *testing.T) {
|
||||
saved := f.store.headNode.parent.node.weight
|
||||
saved := f.store.headNode.parent.weight
|
||||
driftGenesisTime(f, 2, 11*time.Second)
|
||||
f.store.headNode.parent.node.weight = 0
|
||||
f.store.headNode.parent.weight = 0
|
||||
require.Equal(t, false, f.ShouldOverrideFCU())
|
||||
f.store.headNode.parent.node.weight = saved
|
||||
f.store.headNode.parent.weight = saved
|
||||
driftGenesisTime(f, 2, orphanLateBlockFirstThreshold+time.Second)
|
||||
})
|
||||
t.Run("Head is strong", func(t *testing.T) {
|
||||
@@ -135,8 +135,7 @@ func TestForkChoice_GetProposerHead(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, blk.Root(), headRoot)
|
||||
orphanLateBlockFirstThreshold := params.BeaconConfig().SlotComponentDuration(params.BeaconConfig().AttestationDueBPS)
|
||||
fn := f.store.fullNodeByRoot[f.store.headNode.root]
|
||||
fn.timestamp = fn.timestamp.Add(-1 * (params.BeaconConfig().SlotDuration() - orphanLateBlockFirstThreshold))
|
||||
f.store.headNode.timestamp.Add(-1 * (params.BeaconConfig().SlotDuration() - orphanLateBlockFirstThreshold))
|
||||
t.Run("head is weak", func(t *testing.T) {
|
||||
require.Equal(t, parentRoot, f.GetProposerHead())
|
||||
})
|
||||
@@ -160,12 +159,11 @@ func TestForkChoice_GetProposerHead(t *testing.T) {
|
||||
f.store.headNode.slot = saved
|
||||
})
|
||||
t.Run("head is early", func(t *testing.T) {
|
||||
fn := f.store.fullNodeByRoot[f.store.headNode.root]
|
||||
saved := fn.timestamp
|
||||
saved := f.store.headNode.timestamp
|
||||
headTimeStamp := f.store.genesisTime.Add(time.Duration(uint64(f.store.headNode.slot)*params.BeaconConfig().SecondsPerSlot+1) * time.Second)
|
||||
fn.timestamp = headTimeStamp
|
||||
f.store.headNode.timestamp = headTimeStamp
|
||||
require.Equal(t, childRoot, f.GetProposerHead())
|
||||
fn.timestamp = saved
|
||||
f.store.headNode.timestamp = saved
|
||||
})
|
||||
t.Run("chain not finalizing", func(t *testing.T) {
|
||||
saved := f.store.headNode.slot
|
||||
@@ -176,10 +174,10 @@ func TestForkChoice_GetProposerHead(t *testing.T) {
|
||||
driftGenesisTime(f, 3, 1*time.Second)
|
||||
})
|
||||
t.Run("Not single block reorg", func(t *testing.T) {
|
||||
saved := f.store.headNode.parent.node.slot
|
||||
f.store.headNode.parent.node.slot = 0
|
||||
saved := f.store.headNode.parent.slot
|
||||
f.store.headNode.parent.slot = 0
|
||||
require.Equal(t, childRoot, f.GetProposerHead())
|
||||
f.store.headNode.parent.node.slot = saved
|
||||
f.store.headNode.parent.slot = saved
|
||||
})
|
||||
t.Run("parent is nil", func(t *testing.T) {
|
||||
saved := f.store.headNode.parent
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// head starts from justified root and then follows the best descendant links
|
||||
@@ -27,16 +26,13 @@ func (s *Store) head(ctx context.Context) ([32]byte, error) {
|
||||
}
|
||||
|
||||
// JustifiedRoot has to be known
|
||||
var jn *Node
|
||||
ej := s.emptyNodeByRoot[s.justifiedCheckpoint.Root]
|
||||
if ej != nil {
|
||||
jn = ej.node
|
||||
} else {
|
||||
justifiedNode, ok := s.nodeByRoot[s.justifiedCheckpoint.Root]
|
||||
if !ok || justifiedNode == nil {
|
||||
// If the justifiedCheckpoint is from genesis, then the root is
|
||||
// zeroHash. In this case it should be the root of forkchoice
|
||||
// tree.
|
||||
if s.justifiedCheckpoint.Epoch == params.BeaconConfig().GenesisEpoch {
|
||||
jn = s.treeRootNode
|
||||
justifiedNode = s.treeRootNode
|
||||
} else {
|
||||
return [32]byte{}, errors.WithMessage(errUnknownJustifiedRoot, fmt.Sprintf("%#x", s.justifiedCheckpoint.Root))
|
||||
}
|
||||
@@ -44,9 +40,9 @@ func (s *Store) head(ctx context.Context) ([32]byte, error) {
|
||||
|
||||
// If the justified node doesn't have a best descendant,
|
||||
// the best node is itself.
|
||||
bestDescendant := jn.bestDescendant
|
||||
bestDescendant := justifiedNode.bestDescendant
|
||||
if bestDescendant == nil {
|
||||
bestDescendant = jn
|
||||
bestDescendant = justifiedNode
|
||||
}
|
||||
currentEpoch := slots.EpochsSinceGenesis(s.genesisTime)
|
||||
if !bestDescendant.viableForHead(s.justifiedCheckpoint.Epoch, currentEpoch) {
|
||||
@@ -70,42 +66,29 @@ func (s *Store) head(ctx context.Context) ([32]byte, error) {
|
||||
// It then updates the new node's parent with the best child and descendant node.
|
||||
func (s *Store) insert(ctx context.Context,
|
||||
roblock consensus_blocks.ROBlock,
|
||||
justifiedEpoch, finalizedEpoch primitives.Epoch,
|
||||
) (*PayloadNode, error) {
|
||||
justifiedEpoch, finalizedEpoch primitives.Epoch) (*Node, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.insert")
|
||||
defer span.End()
|
||||
|
||||
root := roblock.Root()
|
||||
block := roblock.Block()
|
||||
slot := block.Slot()
|
||||
parentRoot := block.ParentRoot()
|
||||
var payloadHash [32]byte
|
||||
if block.Version() >= version.Bellatrix {
|
||||
execution, err := block.Body().Execution()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
copy(payloadHash[:], execution.BlockHash())
|
||||
}
|
||||
|
||||
// Return if the block has been inserted into Store before.
|
||||
if n, ok := s.emptyNodeByRoot[root]; ok {
|
||||
if n, ok := s.nodeByRoot[root]; ok {
|
||||
return n, nil
|
||||
}
|
||||
|
||||
block := roblock.Block()
|
||||
slot := block.Slot()
|
||||
var parent *PayloadNode
|
||||
blockHash := &[32]byte{}
|
||||
if block.Version() >= version.Gloas {
|
||||
if err := s.resolveParentPayloadStatus(block, &parent, blockHash); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
if block.Version() >= version.Bellatrix {
|
||||
execution, err := block.Body().Execution()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
copy(blockHash[:], execution.BlockHash())
|
||||
}
|
||||
parentRoot := block.ParentRoot()
|
||||
en := s.emptyNodeByRoot[parentRoot]
|
||||
parent = s.fullNodeByRoot[parentRoot]
|
||||
if parent == nil && en != nil {
|
||||
// pre-Gloas only full parents are allowed.
|
||||
return nil, errInvalidParentRoot
|
||||
}
|
||||
}
|
||||
|
||||
parent := s.nodeByRoot[parentRoot]
|
||||
n := &Node{
|
||||
slot: slot,
|
||||
root: root,
|
||||
@@ -114,52 +97,30 @@ func (s *Store) insert(ctx context.Context,
|
||||
unrealizedJustifiedEpoch: justifiedEpoch,
|
||||
finalizedEpoch: finalizedEpoch,
|
||||
unrealizedFinalizedEpoch: finalizedEpoch,
|
||||
blockHash: *blockHash,
|
||||
optimistic: true,
|
||||
payloadHash: payloadHash,
|
||||
timestamp: time.Now(),
|
||||
}
|
||||
|
||||
// Set the node's target checkpoint
|
||||
if slot%params.BeaconConfig().SlotsPerEpoch == 0 {
|
||||
n.target = n
|
||||
} else if parent != nil {
|
||||
if slots.ToEpoch(slot) == slots.ToEpoch(parent.node.slot) {
|
||||
n.target = parent.node.target
|
||||
if slots.ToEpoch(slot) == slots.ToEpoch(parent.slot) {
|
||||
n.target = parent.target
|
||||
} else {
|
||||
n.target = parent.node
|
||||
n.target = parent
|
||||
}
|
||||
}
|
||||
var ret *PayloadNode
|
||||
optimistic := true
|
||||
if parent != nil {
|
||||
optimistic = n.parent.optimistic
|
||||
}
|
||||
// Make the empty node.It's optimistic status equals it's parent's status.
|
||||
pn := &PayloadNode{
|
||||
node: n,
|
||||
optimistic: optimistic,
|
||||
timestamp: time.Now(),
|
||||
children: make([]*Node, 0),
|
||||
}
|
||||
s.emptyNodeByRoot[root] = pn
|
||||
ret = pn
|
||||
if block.Version() < version.Gloas {
|
||||
// Make also the full node, this is optimistic until the engine returns the execution payload validation.
|
||||
fn := &PayloadNode{
|
||||
node: n,
|
||||
optimistic: true,
|
||||
timestamp: time.Now(),
|
||||
full: true,
|
||||
}
|
||||
ret = fn
|
||||
s.fullNodeByRoot[root] = fn
|
||||
}
|
||||
|
||||
s.nodeByRoot[root] = n
|
||||
if parent == nil {
|
||||
if s.treeRootNode == nil {
|
||||
s.treeRootNode = n
|
||||
s.headNode = n
|
||||
s.highestReceivedNode = n
|
||||
} else {
|
||||
delete(s.emptyNodeByRoot, root)
|
||||
delete(s.fullNodeByRoot, root)
|
||||
delete(s.nodeByRoot, root)
|
||||
return nil, errInvalidParentRoot
|
||||
}
|
||||
} else {
|
||||
@@ -167,7 +128,7 @@ func (s *Store) insert(ctx context.Context,
|
||||
// Apply proposer boost
|
||||
now := time.Now()
|
||||
if now.Before(s.genesisTime) {
|
||||
return ret, nil
|
||||
return n, nil
|
||||
}
|
||||
currentSlot := slots.CurrentSlot(s.genesisTime)
|
||||
sss, err := slots.SinceSlotStart(currentSlot, s.genesisTime, now)
|
||||
@@ -183,16 +144,17 @@ func (s *Store) insert(ctx context.Context,
|
||||
// Update best descendants
|
||||
jEpoch := s.justifiedCheckpoint.Epoch
|
||||
fEpoch := s.finalizedCheckpoint.Epoch
|
||||
if err := s.updateBestDescendantConsensusNode(ctx, s.treeRootNode, jEpoch, fEpoch, slots.ToEpoch(currentSlot)); err != nil {
|
||||
log.WithError(err).WithFields(logrus.Fields{
|
||||
"slot": slot,
|
||||
"root": root,
|
||||
}).Error("Could not update best descendant")
|
||||
if err := s.treeRootNode.updateBestDescendant(ctx, jEpoch, fEpoch, slots.ToEpoch(currentSlot)); err != nil {
|
||||
_, remErr := s.removeNode(ctx, n)
|
||||
if remErr != nil {
|
||||
log.WithError(remErr).Error("could not remove node")
|
||||
}
|
||||
return nil, errors.Wrap(err, "could not update best descendants")
|
||||
}
|
||||
}
|
||||
// Update metrics.
|
||||
processedBlockCount.Inc()
|
||||
nodeCount.Set(float64(len(s.emptyNodeByRoot)))
|
||||
nodeCount.Set(float64(len(s.nodeByRoot)))
|
||||
|
||||
// Only update received block slot if it's within epoch from current time.
|
||||
if slot+params.BeaconConfig().SlotsPerEpoch > slots.CurrentSlot(s.genesisTime) {
|
||||
@@ -203,10 +165,10 @@ func (s *Store) insert(ctx context.Context,
|
||||
s.highestReceivedNode = n
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// pruneFinalizedNodeByRootMap prunes the `nodeByRoot` maps
|
||||
// pruneFinalizedNodeByRootMap prunes the `nodeByRoot` map
|
||||
// starting from `node` down to the finalized Node or to a leaf of the Fork
|
||||
// choice store.
|
||||
func (s *Store) pruneFinalizedNodeByRootMap(ctx context.Context, node, finalizedNode *Node) error {
|
||||
@@ -219,51 +181,44 @@ func (s *Store) pruneFinalizedNodeByRootMap(ctx context.Context, node, finalized
|
||||
}
|
||||
return nil
|
||||
}
|
||||
for _, child := range s.allConsensusChildren(node) {
|
||||
for _, child := range node.children {
|
||||
if err := s.pruneFinalizedNodeByRootMap(ctx, child, finalizedNode); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
en := s.emptyNodeByRoot[node.root]
|
||||
en.children = nil
|
||||
delete(s.emptyNodeByRoot, node.root)
|
||||
fn := s.fullNodeByRoot[node.root]
|
||||
if fn != nil {
|
||||
fn.children = nil
|
||||
delete(s.fullNodeByRoot, node.root)
|
||||
}
|
||||
|
||||
node.children = nil
|
||||
delete(s.nodeByRoot, node.root)
|
||||
return nil
|
||||
}
|
||||
|
||||
// prune prunes the fork choice store. It removes all nodes that compete with the finalized root.
|
||||
// This function does not prune for invalid optimistically synced nodes, it deals only with pruning upon finalization
|
||||
// TODO: GLOAS, to ensure that chains up to a full node are found, we may want to consider pruning only up to the latest full block that was finalized
|
||||
func (s *Store) prune(ctx context.Context) error {
|
||||
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.Prune")
|
||||
defer span.End()
|
||||
|
||||
finalizedRoot := s.finalizedCheckpoint.Root
|
||||
finalizedEpoch := s.finalizedCheckpoint.Epoch
|
||||
fen, ok := s.emptyNodeByRoot[finalizedRoot]
|
||||
if !ok || fen == nil {
|
||||
finalizedNode, ok := s.nodeByRoot[finalizedRoot]
|
||||
if !ok || finalizedNode == nil {
|
||||
return errors.WithMessage(errUnknownFinalizedRoot, fmt.Sprintf("%#x", finalizedRoot))
|
||||
}
|
||||
fn := fen.node
|
||||
// return early if we haven't changed the finalized checkpoint
|
||||
if fn.parent == nil {
|
||||
if finalizedNode.parent == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Save the new finalized dependent root because it will be pruned
|
||||
s.finalizedDependentRoot = fn.parent.node.root
|
||||
s.finalizedDependentRoot = finalizedNode.parent.root
|
||||
|
||||
// Prune nodeByRoot starting from root
|
||||
if err := s.pruneFinalizedNodeByRootMap(ctx, s.treeRootNode, fn); err != nil {
|
||||
if err := s.pruneFinalizedNodeByRootMap(ctx, s.treeRootNode, finalizedNode); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fn.parent = nil
|
||||
s.treeRootNode = fn
|
||||
finalizedNode.parent = nil
|
||||
s.treeRootNode = finalizedNode
|
||||
|
||||
prunedCount.Inc()
|
||||
// Prune all children of the finalized checkpoint block that are incompatible with it
|
||||
@@ -271,13 +226,13 @@ func (s *Store) prune(ctx context.Context) error {
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not compute epoch start")
|
||||
}
|
||||
if fn.slot == checkpointMaxSlot {
|
||||
if finalizedNode.slot == checkpointMaxSlot {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, child := range fen.children {
|
||||
for _, child := range finalizedNode.children {
|
||||
if child != nil && child.slot <= checkpointMaxSlot {
|
||||
if err := s.pruneFinalizedNodeByRootMap(ctx, child, fn); err != nil {
|
||||
if err := s.pruneFinalizedNodeByRootMap(ctx, child, finalizedNode); err != nil {
|
||||
return errors.Wrap(err, "could not prune incompatible finalized child")
|
||||
}
|
||||
}
|
||||
@@ -291,10 +246,10 @@ func (s *Store) tips() ([][32]byte, []primitives.Slot) {
|
||||
var roots [][32]byte
|
||||
var slots []primitives.Slot
|
||||
|
||||
for root, n := range s.emptyNodeByRoot {
|
||||
if len(s.allConsensusChildren(n.node)) == 0 {
|
||||
for root, node := range s.nodeByRoot {
|
||||
if len(node.children) == 0 {
|
||||
roots = append(roots, root)
|
||||
slots = append(slots, n.node.slot)
|
||||
slots = append(slots, node.slot)
|
||||
}
|
||||
}
|
||||
return roots, slots
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package doublylinkedtree
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -40,18 +41,18 @@ func TestStore_NodeByRoot(t *testing.T) {
|
||||
state, blkRoot, err = prepareForkchoiceState(t.Context(), 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
node0 := f.store.emptyNodeByRoot[params.BeaconConfig().ZeroHash]
|
||||
node1 := f.store.emptyNodeByRoot[indexToHash(1)]
|
||||
node2 := f.store.emptyNodeByRoot[indexToHash(2)]
|
||||
node0 := f.store.treeRootNode
|
||||
node1 := node0.children[0]
|
||||
node2 := node1.children[0]
|
||||
|
||||
expectedRoots := map[[32]byte]*PayloadNode{
|
||||
expectedRoots := map[[32]byte]*Node{
|
||||
params.BeaconConfig().ZeroHash: node0,
|
||||
indexToHash(1): node1,
|
||||
indexToHash(2): node2,
|
||||
}
|
||||
|
||||
require.Equal(t, 3, f.NodeCount())
|
||||
for root, node := range f.store.emptyNodeByRoot {
|
||||
for root, node := range f.store.nodeByRoot {
|
||||
v, ok := expectedRoots[root]
|
||||
require.Equal(t, ok, true)
|
||||
require.Equal(t, v, node)
|
||||
@@ -110,28 +111,37 @@ func TestStore_Head_BestDescendant(t *testing.T) {
|
||||
require.Equal(t, h, indexToHash(4))
|
||||
}
|
||||
|
||||
func TestStore_UpdateBestDescendant_ContextCancelled(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
f := setup(0, 0)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
cancel()
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
err = f.InsertNode(ctx, state, blkRoot)
|
||||
require.ErrorContains(t, "context canceled", err)
|
||||
}
|
||||
|
||||
func TestStore_Insert(t *testing.T) {
|
||||
// The new node does not have a parent.
|
||||
treeRootNode := &Node{slot: 0, root: indexToHash(0)}
|
||||
emptyRootPN := &PayloadNode{node: treeRootNode}
|
||||
fullRootPN := &PayloadNode{node: treeRootNode, full: true, optimistic: true}
|
||||
emptyNodeByRoot := map[[32]byte]*PayloadNode{indexToHash(0): emptyRootPN}
|
||||
fullNodeByRoot := map[[32]byte]*PayloadNode{indexToHash(0): fullRootPN}
|
||||
nodeByRoot := map[[32]byte]*Node{indexToHash(0): treeRootNode}
|
||||
jc := &forkchoicetypes.Checkpoint{Epoch: 0}
|
||||
fc := &forkchoicetypes.Checkpoint{Epoch: 0}
|
||||
s := &Store{emptyNodeByRoot: emptyNodeByRoot, fullNodeByRoot: fullNodeByRoot, treeRootNode: treeRootNode, justifiedCheckpoint: jc, finalizedCheckpoint: fc, highestReceivedNode: &Node{}}
|
||||
s := &Store{nodeByRoot: nodeByRoot, treeRootNode: treeRootNode, justifiedCheckpoint: jc, finalizedCheckpoint: fc, highestReceivedNode: &Node{}}
|
||||
payloadHash := [32]byte{'a'}
|
||||
ctx := t.Context()
|
||||
_, blk, err := prepareForkchoiceState(ctx, 100, indexToHash(100), indexToHash(0), payloadHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
_, err = s.insert(ctx, blk, 1, 1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 2, len(s.emptyNodeByRoot), "Did not insert block")
|
||||
assert.Equal(t, (*PayloadNode)(nil), treeRootNode.parent, "Incorrect parent")
|
||||
children := s.allConsensusChildren(treeRootNode)
|
||||
assert.Equal(t, 1, len(children), "Incorrect children number")
|
||||
assert.Equal(t, payloadHash, children[0].blockHash, "Incorrect payload hash")
|
||||
child := children[0]
|
||||
assert.Equal(t, 2, len(s.nodeByRoot), "Did not insert block")
|
||||
assert.Equal(t, (*Node)(nil), treeRootNode.parent, "Incorrect parent")
|
||||
assert.Equal(t, 1, len(treeRootNode.children), "Incorrect children number")
|
||||
assert.Equal(t, payloadHash, treeRootNode.children[0].payloadHash, "Incorrect payload hash")
|
||||
child := treeRootNode.children[0]
|
||||
assert.Equal(t, primitives.Epoch(1), child.justifiedEpoch, "Incorrect justification")
|
||||
assert.Equal(t, primitives.Epoch(1), child.finalizedEpoch, "Incorrect finalization")
|
||||
assert.Equal(t, indexToHash(100), child.root, "Incorrect root")
|
||||
@@ -156,7 +166,7 @@ func TestStore_Prune_MoreThanThreshold(t *testing.T) {
|
||||
// Finalized root is at index 99 so everything before 99 should be pruned.
|
||||
s.finalizedCheckpoint.Root = indexToHash(99)
|
||||
require.NoError(t, s.prune(t.Context()))
|
||||
assert.Equal(t, 1, len(s.emptyNodeByRoot), "Incorrect nodes count")
|
||||
assert.Equal(t, 1, len(s.nodeByRoot), "Incorrect nodes count")
|
||||
}
|
||||
|
||||
func TestStore_Prune_MoreThanOnce(t *testing.T) {
|
||||
@@ -178,12 +188,12 @@ func TestStore_Prune_MoreThanOnce(t *testing.T) {
|
||||
// Finalized root is at index 11 so everything before 11 should be pruned.
|
||||
s.finalizedCheckpoint.Root = indexToHash(10)
|
||||
require.NoError(t, s.prune(t.Context()))
|
||||
assert.Equal(t, 90, len(s.emptyNodeByRoot), "Incorrect nodes count")
|
||||
assert.Equal(t, 90, len(s.nodeByRoot), "Incorrect nodes count")
|
||||
|
||||
// One more time.
|
||||
s.finalizedCheckpoint.Root = indexToHash(20)
|
||||
require.NoError(t, s.prune(t.Context()))
|
||||
assert.Equal(t, 80, len(s.emptyNodeByRoot), "Incorrect nodes count")
|
||||
assert.Equal(t, 80, len(s.nodeByRoot), "Incorrect nodes count")
|
||||
}
|
||||
|
||||
func TestStore_Prune_ReturnEarly(t *testing.T) {
|
||||
@@ -226,7 +236,7 @@ func TestStore_Prune_NoDanglingBranch(t *testing.T) {
|
||||
s := f.store
|
||||
s.finalizedCheckpoint.Root = indexToHash(1)
|
||||
require.NoError(t, s.prune(t.Context()))
|
||||
require.Equal(t, len(s.emptyNodeByRoot), 1)
|
||||
require.Equal(t, len(s.nodeByRoot), 1)
|
||||
}
|
||||
|
||||
// This test starts with the following branching diagram
|
||||
@@ -306,7 +316,7 @@ func TestStore_PruneMapsNodes(t *testing.T) {
|
||||
s := f.store
|
||||
s.finalizedCheckpoint.Root = indexToHash(1)
|
||||
require.NoError(t, s.prune(t.Context()))
|
||||
require.Equal(t, len(s.emptyNodeByRoot), 1)
|
||||
require.Equal(t, len(s.nodeByRoot), 1)
|
||||
}
|
||||
|
||||
func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
|
||||
|
||||
@@ -21,26 +21,23 @@ type ForkChoice struct {
|
||||
balancesByRoot forkchoice.BalancesByRooter // handler to obtain balances for the state with a given root
|
||||
}
|
||||
|
||||
var _ forkchoice.ForkChoicer = (*ForkChoice)(nil)
|
||||
|
||||
// Store defines the fork choice store which includes block nodes and the last view of checkpoint information.
|
||||
type Store struct {
|
||||
justifiedCheckpoint *forkchoicetypes.Checkpoint // latest justified epoch in store.
|
||||
unrealizedJustifiedCheckpoint *forkchoicetypes.Checkpoint // best unrealized justified checkpoint in store.
|
||||
unrealizedFinalizedCheckpoint *forkchoicetypes.Checkpoint // best unrealized finalized checkpoint in store.
|
||||
prevJustifiedCheckpoint *forkchoicetypes.Checkpoint // previous justified checkpoint in store.
|
||||
finalizedCheckpoint *forkchoicetypes.Checkpoint // latest finalized epoch in store.
|
||||
proposerBoostRoot [fieldparams.RootLength]byte // latest block root that was boosted after being received in a timely manner.
|
||||
previousProposerBoostRoot [fieldparams.RootLength]byte // previous block root that was boosted after being received in a timely manner.
|
||||
previousProposerBoostScore uint64 // previous proposer boosted root score.
|
||||
finalizedDependentRoot [fieldparams.RootLength]byte // dependent root at finalized checkpoint.
|
||||
committeeWeight uint64 // tracks the total active validator balance divided by the number of slots per Epoch.
|
||||
treeRootNode *Node // the root node of the store tree.
|
||||
headNode *Node // last head Node
|
||||
emptyNodeByRoot map[[fieldparams.RootLength]byte]*PayloadNode // nodes indexed by roots.
|
||||
fullNodeByRoot map[[fieldparams.RootLength]byte]*PayloadNode // full nodes (the payload was present) indexed by beacon block root.
|
||||
slashedIndices map[primitives.ValidatorIndex]bool // the list of equivocating validator indices
|
||||
originRoot [fieldparams.RootLength]byte // The genesis block root
|
||||
justifiedCheckpoint *forkchoicetypes.Checkpoint // latest justified epoch in store.
|
||||
unrealizedJustifiedCheckpoint *forkchoicetypes.Checkpoint // best unrealized justified checkpoint in store.
|
||||
unrealizedFinalizedCheckpoint *forkchoicetypes.Checkpoint // best unrealized finalized checkpoint in store.
|
||||
prevJustifiedCheckpoint *forkchoicetypes.Checkpoint // previous justified checkpoint in store.
|
||||
finalizedCheckpoint *forkchoicetypes.Checkpoint // latest finalized epoch in store.
|
||||
proposerBoostRoot [fieldparams.RootLength]byte // latest block root that was boosted after being received in a timely manner.
|
||||
previousProposerBoostRoot [fieldparams.RootLength]byte // previous block root that was boosted after being received in a timely manner.
|
||||
previousProposerBoostScore uint64 // previous proposer boosted root score.
|
||||
finalizedDependentRoot [fieldparams.RootLength]byte // dependent root at finalized checkpoint.
|
||||
committeeWeight uint64 // tracks the total active validator balance divided by the number of slots per Epoch.
|
||||
treeRootNode *Node // the root node of the store tree.
|
||||
headNode *Node // last head Node
|
||||
nodeByRoot map[[fieldparams.RootLength]byte]*Node // nodes indexed by roots.
|
||||
slashedIndices map[primitives.ValidatorIndex]bool // the list of equivocating validator indices
|
||||
originRoot [fieldparams.RootLength]byte // The genesis block root
|
||||
genesisTime time.Time
|
||||
highestReceivedNode *Node // The highest slot node.
|
||||
receivedBlocksLastEpoch [fieldparams.SlotsPerEpoch]primitives.Slot // Using `highestReceivedSlot`. The slot of blocks received in the last epoch.
|
||||
@@ -52,28 +49,19 @@ type Store struct {
|
||||
type Node struct {
|
||||
slot primitives.Slot // slot of the block converted to the node.
|
||||
root [fieldparams.RootLength]byte // root of the block converted to the node.
|
||||
blockHash [fieldparams.RootLength]byte // payloadHash of the block converted to the node.
|
||||
parent *PayloadNode // parent index of this node.
|
||||
payloadHash [fieldparams.RootLength]byte // payloadHash of the block converted to the node.
|
||||
parent *Node // parent index of this node.
|
||||
target *Node // target checkpoint for
|
||||
bestDescendant *Node // bestDescendant node of this node.
|
||||
children []*Node // the list of direct children of this Node
|
||||
justifiedEpoch primitives.Epoch // justifiedEpoch of this node.
|
||||
unrealizedJustifiedEpoch primitives.Epoch // the epoch that would be justified if the block would be advanced to the next epoch.
|
||||
finalizedEpoch primitives.Epoch // finalizedEpoch of this node.
|
||||
unrealizedFinalizedEpoch primitives.Epoch // the epoch that would be finalized if the block would be advanced to the next epoch.
|
||||
balance uint64 // the balance that voted for this node directly
|
||||
weight uint64 // weight of this node: the total balance including children
|
||||
}
|
||||
|
||||
// PayloadNode defines a full Forkchoice node after the Gloas fork, with the payload status either empty of full
|
||||
type PayloadNode struct {
|
||||
optimistic bool // whether the block has been fully validated or not
|
||||
full bool // whether this node represents a payload present or not
|
||||
weight uint64 // weight of this node: the total balance including children
|
||||
balance uint64 // the balance that voted for this node directly
|
||||
bestDescendant *Node // bestDescendant node of this payload node.
|
||||
node *Node // the consensus part of this full forkchoice node
|
||||
timestamp time.Time // The timestamp when the node was inserted.
|
||||
children []*Node // the list of direct children of this Node
|
||||
bestDescendant *Node // bestDescendant node of this node.
|
||||
optimistic bool // whether the block has been fully validated or not
|
||||
timestamp time.Time // The timestamp when the node was inserted.
|
||||
}
|
||||
|
||||
// Vote defines an individual validator's vote.
|
||||
|
||||
@@ -15,34 +15,33 @@ import (
|
||||
)
|
||||
|
||||
func (s *Store) setUnrealizedJustifiedEpoch(root [32]byte, epoch primitives.Epoch) error {
|
||||
en, ok := s.emptyNodeByRoot[root]
|
||||
if !ok || en == nil {
|
||||
node, ok := s.nodeByRoot[root]
|
||||
if !ok || node == nil {
|
||||
return errors.Wrap(ErrNilNode, "could not set unrealized justified epoch")
|
||||
}
|
||||
if epoch < en.node.unrealizedJustifiedEpoch {
|
||||
if epoch < node.unrealizedJustifiedEpoch {
|
||||
return errInvalidUnrealizedJustifiedEpoch
|
||||
}
|
||||
en.node.unrealizedJustifiedEpoch = epoch
|
||||
node.unrealizedJustifiedEpoch = epoch
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Store) setUnrealizedFinalizedEpoch(root [32]byte, epoch primitives.Epoch) error {
|
||||
en, ok := s.emptyNodeByRoot[root]
|
||||
if !ok || en == nil {
|
||||
node, ok := s.nodeByRoot[root]
|
||||
if !ok || node == nil {
|
||||
return errors.Wrap(ErrNilNode, "could not set unrealized finalized epoch")
|
||||
}
|
||||
if epoch < en.node.unrealizedFinalizedEpoch {
|
||||
if epoch < node.unrealizedFinalizedEpoch {
|
||||
return errInvalidUnrealizedFinalizedEpoch
|
||||
}
|
||||
en.node.unrealizedFinalizedEpoch = epoch
|
||||
node.unrealizedFinalizedEpoch = epoch
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateUnrealizedCheckpoints "realizes" the unrealized justified and finalized
|
||||
// epochs stored within nodes. It should be called at the beginning of each epoch.
|
||||
func (f *ForkChoice) updateUnrealizedCheckpoints(ctx context.Context) error {
|
||||
for _, en := range f.store.emptyNodeByRoot {
|
||||
node := en.node
|
||||
for _, node := range f.store.nodeByRoot {
|
||||
node.justifiedEpoch = node.unrealizedJustifiedEpoch
|
||||
node.finalizedEpoch = node.unrealizedFinalizedEpoch
|
||||
if node.justifiedEpoch > f.store.justifiedCheckpoint.Epoch {
|
||||
@@ -63,17 +62,16 @@ func (s *Store) pullTips(state state.BeaconState, node *Node, jc, fc *ethpb.Chec
|
||||
if node.parent == nil { // Nothing to do if the parent is nil.
|
||||
return jc, fc
|
||||
}
|
||||
pn := node.parent.node
|
||||
currentEpoch := slots.ToEpoch(slots.CurrentSlot(s.genesisTime))
|
||||
stateSlot := state.Slot()
|
||||
stateEpoch := slots.ToEpoch(stateSlot)
|
||||
currJustified := pn.unrealizedJustifiedEpoch == currentEpoch
|
||||
prevJustified := pn.unrealizedJustifiedEpoch+1 == currentEpoch
|
||||
currJustified := node.parent.unrealizedJustifiedEpoch == currentEpoch
|
||||
prevJustified := node.parent.unrealizedJustifiedEpoch+1 == currentEpoch
|
||||
tooEarlyForCurr := slots.SinceEpochStarts(stateSlot)*3 < params.BeaconConfig().SlotsPerEpoch*2
|
||||
// Exit early if it's justified or too early to be justified.
|
||||
if currJustified || (stateEpoch == currentEpoch && prevJustified && tooEarlyForCurr) {
|
||||
node.unrealizedJustifiedEpoch = pn.unrealizedJustifiedEpoch
|
||||
node.unrealizedFinalizedEpoch = pn.unrealizedFinalizedEpoch
|
||||
node.unrealizedJustifiedEpoch = node.parent.unrealizedJustifiedEpoch
|
||||
node.unrealizedFinalizedEpoch = node.parent.unrealizedFinalizedEpoch
|
||||
return jc, fc
|
||||
}
|
||||
|
||||
|
||||
@@ -22,12 +22,12 @@ func TestStore_SetUnrealizedEpochs(t *testing.T) {
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
require.Equal(t, primitives.Epoch(1), f.store.emptyNodeByRoot[[32]byte{'b'}].node.unrealizedJustifiedEpoch)
|
||||
require.Equal(t, primitives.Epoch(1), f.store.emptyNodeByRoot[[32]byte{'b'}].node.unrealizedFinalizedEpoch)
|
||||
require.Equal(t, primitives.Epoch(1), f.store.nodeByRoot[[32]byte{'b'}].unrealizedJustifiedEpoch)
|
||||
require.Equal(t, primitives.Epoch(1), f.store.nodeByRoot[[32]byte{'b'}].unrealizedFinalizedEpoch)
|
||||
require.NoError(t, f.store.setUnrealizedJustifiedEpoch([32]byte{'b'}, 2))
|
||||
require.NoError(t, f.store.setUnrealizedFinalizedEpoch([32]byte{'b'}, 2))
|
||||
require.Equal(t, primitives.Epoch(2), f.store.emptyNodeByRoot[[32]byte{'b'}].node.unrealizedJustifiedEpoch)
|
||||
require.Equal(t, primitives.Epoch(2), f.store.emptyNodeByRoot[[32]byte{'b'}].node.unrealizedFinalizedEpoch)
|
||||
require.Equal(t, primitives.Epoch(2), f.store.nodeByRoot[[32]byte{'b'}].unrealizedJustifiedEpoch)
|
||||
require.Equal(t, primitives.Epoch(2), f.store.nodeByRoot[[32]byte{'b'}].unrealizedFinalizedEpoch)
|
||||
|
||||
require.ErrorIs(t, errInvalidUnrealizedJustifiedEpoch, f.store.setUnrealizedJustifiedEpoch([32]byte{'b'}, 0))
|
||||
require.ErrorIs(t, errInvalidUnrealizedFinalizedEpoch, f.store.setUnrealizedFinalizedEpoch([32]byte{'b'}, 0))
|
||||
@@ -78,9 +78,9 @@ func TestStore_LongFork(t *testing.T) {
|
||||
// Add an attestation to c, it is head
|
||||
f.ProcessAttestation(ctx, []uint64{0}, [32]byte{'c'}, 1)
|
||||
f.justifiedBalances = []uint64{100}
|
||||
c := f.store.emptyNodeByRoot[[32]byte{'c'}]
|
||||
require.Equal(t, primitives.Epoch(2), slots.ToEpoch(c.node.slot))
|
||||
driftGenesisTime(f, c.node.slot, 0)
|
||||
c := f.store.nodeByRoot[[32]byte{'c'}]
|
||||
require.Equal(t, primitives.Epoch(2), slots.ToEpoch(c.slot))
|
||||
driftGenesisTime(f, c.slot, 0)
|
||||
headRoot, err := f.Head(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [32]byte{'c'}, headRoot)
|
||||
@@ -91,15 +91,15 @@ func TestStore_LongFork(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, f.UpdateJustifiedCheckpoint(ctx, &forkchoicetypes.Checkpoint{Epoch: 2, Root: ha}))
|
||||
d := f.store.emptyNodeByRoot[[32]byte{'d'}]
|
||||
require.Equal(t, primitives.Epoch(3), slots.ToEpoch(d.node.slot))
|
||||
driftGenesisTime(f, d.node.slot, 0)
|
||||
require.Equal(t, true, d.node.viableForHead(f.store.justifiedCheckpoint.Epoch, slots.ToEpoch(d.node.slot)))
|
||||
d := f.store.nodeByRoot[[32]byte{'d'}]
|
||||
require.Equal(t, primitives.Epoch(3), slots.ToEpoch(d.slot))
|
||||
driftGenesisTime(f, d.slot, 0)
|
||||
require.Equal(t, true, d.viableForHead(f.store.justifiedCheckpoint.Epoch, slots.ToEpoch(d.slot)))
|
||||
headRoot, err = f.Head(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [32]byte{'c'}, headRoot)
|
||||
require.Equal(t, uint64(0), f.store.emptyNodeByRoot[[32]byte{'d'}].weight)
|
||||
require.Equal(t, uint64(100), f.store.emptyNodeByRoot[[32]byte{'c'}].weight)
|
||||
require.Equal(t, uint64(0), f.store.nodeByRoot[[32]byte{'d'}].weight)
|
||||
require.Equal(t, uint64(100), f.store.nodeByRoot[[32]byte{'c'}].weight)
|
||||
}
|
||||
|
||||
// Epoch 1 Epoch 2 Epoch 3
|
||||
@@ -243,8 +243,8 @@ func TestStore_ForkNextEpoch(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [32]byte{'d'}, headRoot)
|
||||
require.Equal(t, primitives.Epoch(2), f.JustifiedCheckpoint().Epoch)
|
||||
require.Equal(t, uint64(0), f.store.emptyNodeByRoot[[32]byte{'d'}].weight)
|
||||
require.Equal(t, uint64(100), f.store.emptyNodeByRoot[[32]byte{'h'}].weight)
|
||||
require.Equal(t, uint64(0), f.store.nodeByRoot[[32]byte{'d'}].weight)
|
||||
require.Equal(t, uint64(100), f.store.nodeByRoot[[32]byte{'h'}].weight)
|
||||
// Set current epoch to 3, and H's unrealized checkpoint. Check it's head
|
||||
driftGenesisTime(f, 99, 0)
|
||||
require.NoError(t, f.store.setUnrealizedJustifiedEpoch([32]byte{'h'}, 2))
|
||||
@@ -252,8 +252,8 @@ func TestStore_ForkNextEpoch(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [32]byte{'h'}, headRoot)
|
||||
require.Equal(t, primitives.Epoch(2), f.JustifiedCheckpoint().Epoch)
|
||||
require.Equal(t, uint64(0), f.store.emptyNodeByRoot[[32]byte{'d'}].weight)
|
||||
require.Equal(t, uint64(100), f.store.emptyNodeByRoot[[32]byte{'h'}].weight)
|
||||
require.Equal(t, uint64(0), f.store.nodeByRoot[[32]byte{'d'}].weight)
|
||||
require.Equal(t, uint64(100), f.store.nodeByRoot[[32]byte{'h'}].weight)
|
||||
}
|
||||
|
||||
func TestStore_PullTips_Heuristics(t *testing.T) {
|
||||
@@ -263,14 +263,14 @@ func TestStore_PullTips_Heuristics(t *testing.T) {
|
||||
st, root, err := prepareForkchoiceState(ctx, 65, [32]byte{'p'}, [32]byte{}, [32]byte{}, 1, 1)
|
||||
require.NoError(tt, err)
|
||||
require.NoError(tt, f.InsertNode(ctx, st, root))
|
||||
f.store.emptyNodeByRoot[[32]byte{'p'}].node.unrealizedJustifiedEpoch = primitives.Epoch(2)
|
||||
f.store.nodeByRoot[[32]byte{'p'}].unrealizedJustifiedEpoch = primitives.Epoch(2)
|
||||
driftGenesisTime(f, 66, 0)
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 66, [32]byte{'h'}, [32]byte{'p'}, [32]byte{}, 1, 1)
|
||||
require.NoError(tt, err)
|
||||
require.NoError(tt, f.InsertNode(ctx, st, root))
|
||||
require.Equal(tt, primitives.Epoch(2), f.store.emptyNodeByRoot[[32]byte{'h'}].node.unrealizedJustifiedEpoch)
|
||||
require.Equal(tt, primitives.Epoch(1), f.store.emptyNodeByRoot[[32]byte{'h'}].node.unrealizedFinalizedEpoch)
|
||||
require.Equal(tt, primitives.Epoch(2), f.store.nodeByRoot[[32]byte{'h'}].unrealizedJustifiedEpoch)
|
||||
require.Equal(tt, primitives.Epoch(1), f.store.nodeByRoot[[32]byte{'h'}].unrealizedFinalizedEpoch)
|
||||
})
|
||||
|
||||
t.Run("Previous Epoch is justified and too early for current", func(tt *testing.T) {
|
||||
@@ -278,21 +278,21 @@ func TestStore_PullTips_Heuristics(t *testing.T) {
|
||||
st, root, err := prepareForkchoiceState(ctx, 95, [32]byte{'p'}, [32]byte{}, [32]byte{}, 1, 1)
|
||||
require.NoError(tt, err)
|
||||
require.NoError(tt, f.InsertNode(ctx, st, root))
|
||||
f.store.emptyNodeByRoot[[32]byte{'p'}].node.unrealizedJustifiedEpoch = primitives.Epoch(2)
|
||||
f.store.nodeByRoot[[32]byte{'p'}].unrealizedJustifiedEpoch = primitives.Epoch(2)
|
||||
driftGenesisTime(f, 96, 0)
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 96, [32]byte{'h'}, [32]byte{'p'}, [32]byte{}, 1, 1)
|
||||
require.NoError(tt, err)
|
||||
require.NoError(tt, f.InsertNode(ctx, st, root))
|
||||
require.Equal(tt, primitives.Epoch(2), f.store.emptyNodeByRoot[[32]byte{'h'}].node.unrealizedJustifiedEpoch)
|
||||
require.Equal(tt, primitives.Epoch(1), f.store.emptyNodeByRoot[[32]byte{'h'}].node.unrealizedFinalizedEpoch)
|
||||
require.Equal(tt, primitives.Epoch(2), f.store.nodeByRoot[[32]byte{'h'}].unrealizedJustifiedEpoch)
|
||||
require.Equal(tt, primitives.Epoch(1), f.store.nodeByRoot[[32]byte{'h'}].unrealizedFinalizedEpoch)
|
||||
})
|
||||
t.Run("Previous Epoch is justified and not too early for current", func(tt *testing.T) {
|
||||
f := setup(1, 1)
|
||||
st, root, err := prepareForkchoiceState(ctx, 95, [32]byte{'p'}, [32]byte{}, [32]byte{}, 1, 1)
|
||||
require.NoError(tt, err)
|
||||
require.NoError(tt, f.InsertNode(ctx, st, root))
|
||||
f.store.emptyNodeByRoot[[32]byte{'p'}].node.unrealizedJustifiedEpoch = primitives.Epoch(2)
|
||||
f.store.nodeByRoot[[32]byte{'p'}].unrealizedJustifiedEpoch = primitives.Epoch(2)
|
||||
driftGenesisTime(f, 127, 0)
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 127, [32]byte{'h'}, [32]byte{'p'}, [32]byte{}, 1, 1)
|
||||
@@ -302,14 +302,14 @@ func TestStore_PullTips_Heuristics(t *testing.T) {
|
||||
// This test checks that the heuristics in pullTips did not apply and
|
||||
// the test continues to compute a bogus unrealized
|
||||
// justification
|
||||
require.Equal(tt, primitives.Epoch(1), f.store.emptyNodeByRoot[[32]byte{'h'}].node.unrealizedJustifiedEpoch)
|
||||
require.Equal(tt, primitives.Epoch(1), f.store.nodeByRoot[[32]byte{'h'}].unrealizedJustifiedEpoch)
|
||||
})
|
||||
t.Run("Block from previous Epoch", func(tt *testing.T) {
|
||||
f := setup(1, 1)
|
||||
st, root, err := prepareForkchoiceState(ctx, 94, [32]byte{'p'}, [32]byte{}, [32]byte{}, 1, 1)
|
||||
require.NoError(tt, err)
|
||||
require.NoError(tt, f.InsertNode(ctx, st, root))
|
||||
f.store.emptyNodeByRoot[[32]byte{'p'}].node.unrealizedJustifiedEpoch = primitives.Epoch(2)
|
||||
f.store.nodeByRoot[[32]byte{'p'}].unrealizedJustifiedEpoch = primitives.Epoch(2)
|
||||
driftGenesisTime(f, 96, 0)
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 95, [32]byte{'h'}, [32]byte{'p'}, [32]byte{}, 1, 1)
|
||||
@@ -319,7 +319,7 @@ func TestStore_PullTips_Heuristics(t *testing.T) {
|
||||
// This test checks that the heuristics in pullTips did not apply and
|
||||
// the test continues to compute a bogus unrealized
|
||||
// justification
|
||||
require.Equal(tt, primitives.Epoch(1), f.store.emptyNodeByRoot[[32]byte{'h'}].node.unrealizedJustifiedEpoch)
|
||||
require.Equal(tt, primitives.Epoch(1), f.store.nodeByRoot[[32]byte{'h'}].unrealizedJustifiedEpoch)
|
||||
})
|
||||
t.Run("Previous Epoch is not justified", func(tt *testing.T) {
|
||||
f := setup(1, 1)
|
||||
@@ -335,6 +335,6 @@ func TestStore_PullTips_Heuristics(t *testing.T) {
|
||||
// This test checks that the heuristics in pullTips did not apply and
|
||||
// the test continues to compute a bogus unrealized
|
||||
// justification
|
||||
require.Equal(tt, primitives.Epoch(2), f.store.emptyNodeByRoot[[32]byte{'h'}].node.unrealizedJustifiedEpoch)
|
||||
require.Equal(tt, primitives.Epoch(2), f.store.nodeByRoot[[32]byte{'h'}].unrealizedJustifiedEpoch)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -284,7 +284,7 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
// 9 10
|
||||
f.store.finalizedCheckpoint.Root = indexToHash(5)
|
||||
require.NoError(t, f.store.prune(t.Context()))
|
||||
assert.Equal(t, 5, len(f.store.emptyNodeByRoot), "Incorrect nodes length after prune")
|
||||
assert.Equal(t, 5, len(f.store.nodeByRoot), "Incorrect nodes length after prune")
|
||||
// we pruned artificially the justified root.
|
||||
f.store.justifiedCheckpoint.Root = indexToHash(5)
|
||||
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
consensus_blocks "github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
forkchoice2 "github.com/OffchainLabs/prysm/v7/consensus-types/forkchoice"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
)
|
||||
|
||||
@@ -24,7 +23,6 @@ type ForkChoicer interface {
|
||||
Unlock()
|
||||
HeadRetriever // to compute head.
|
||||
BlockProcessor // to track new block for fork choice.
|
||||
PayloadProcessor // to track new payloads for fork choice.
|
||||
AttestationProcessor // to track new attestation for fork choice.
|
||||
Getter // to retrieve fork choice information.
|
||||
Setter // to set fork choice information.
|
||||
@@ -49,11 +47,6 @@ type BlockProcessor interface {
|
||||
InsertChain(context.Context, []*forkchoicetypes.BlockAndCheckpoints) error
|
||||
}
|
||||
|
||||
// PayloadProcessor processes a payload envelope
|
||||
type PayloadProcessor interface {
|
||||
InsertPayload(context.Context, interfaces.ROExecutionPayloadEnvelope) error
|
||||
}
|
||||
|
||||
// AttestationProcessor processes the attestation that's used for accounting fork choice.
|
||||
type AttestationProcessor interface {
|
||||
ProcessAttestation(context.Context, []uint64, [32]byte, primitives.Epoch)
|
||||
@@ -96,7 +89,7 @@ type FastGetter interface {
|
||||
// Setter allows to set forkchoice information
|
||||
type Setter interface {
|
||||
SetOptimisticToValid(context.Context, [fieldparams.RootLength]byte) error
|
||||
SetOptimisticToInvalid(context.Context, [32]byte, [32]byte, [32]byte, [32]byte) ([][32]byte, error)
|
||||
SetOptimisticToInvalid(context.Context, [fieldparams.RootLength]byte, [fieldparams.RootLength]byte, [fieldparams.RootLength]byte) ([][32]byte, error)
|
||||
UpdateJustifiedCheckpoint(context.Context, *forkchoicetypes.Checkpoint) error
|
||||
UpdateFinalizedCheckpoint(*forkchoicetypes.Checkpoint) error
|
||||
SetGenesisTime(time.Time)
|
||||
|
||||
3
changelog/james-prysm_gloas-db-save-block.md
Normal file
3
changelog/james-prysm_gloas-db-save-block.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Added
|
||||
|
||||
- gloas db save functions for gloas block , payload envelope, and blinded payload envelope.
|
||||
2
changelog/potuz_forkchoice_unused_highestblockdelay.md
Normal file
2
changelog/potuz_forkchoice_unused_highestblockdelay.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Ignored
|
||||
- Remove unused `HighestBlockDelay` method in forkchoice.
|
||||
@@ -1,2 +0,0 @@
|
||||
### Added
|
||||
- Adapted forkchoice to future Gloas compatible type.
|
||||
@@ -1,2 +0,0 @@
|
||||
### Added
|
||||
- Added an InsertPayload method to allow full node insertion after gloas.
|
||||
@@ -195,6 +195,7 @@ ssz_fulu_objs = [
|
||||
]
|
||||
|
||||
ssz_gloas_objs = [
|
||||
"BlindedExecutionPayloadEnvelope",
|
||||
"BuilderPendingPayment",
|
||||
"BuilderPendingWithdrawal",
|
||||
"DataColumnSidecarGloas",
|
||||
@@ -204,6 +205,7 @@ ssz_gloas_objs = [
|
||||
"PayloadAttestationMessage",
|
||||
"ExecutionPayloadBid",
|
||||
"SignedExecutionPayloadBid",
|
||||
"SignedBlindedExecutionPayloadEnvelope",
|
||||
"SignedExecutionPayloadEnvelope",
|
||||
"BeaconBlockGloas",
|
||||
"SignedBeaconBlockGloas",
|
||||
|
||||
@@ -192,6 +192,58 @@ func copyBeaconBlockBodyGloas(body *BeaconBlockBodyGloas) *BeaconBlockBodyGloas
|
||||
return copied
|
||||
}
|
||||
|
||||
// CopySignedExecutionPayloadEnvelope copies the provided signed execution payload envelope.
|
||||
func CopySignedExecutionPayloadEnvelope(env *SignedExecutionPayloadEnvelope) *SignedExecutionPayloadEnvelope {
|
||||
if env == nil {
|
||||
return nil
|
||||
}
|
||||
return &SignedExecutionPayloadEnvelope{
|
||||
Message: copyExecutionPayloadEnvelope(env.Message),
|
||||
Signature: bytesutil.SafeCopyBytes(env.Signature),
|
||||
}
|
||||
}
|
||||
|
||||
// copyExecutionPayloadEnvelope copies the provided execution payload envelope.
|
||||
func copyExecutionPayloadEnvelope(env *ExecutionPayloadEnvelope) *ExecutionPayloadEnvelope {
|
||||
if env == nil {
|
||||
return nil
|
||||
}
|
||||
return &ExecutionPayloadEnvelope{
|
||||
Payload: env.Payload, // engine proto, not deep copied here
|
||||
ExecutionRequests: env.ExecutionRequests,
|
||||
BuilderIndex: env.BuilderIndex,
|
||||
BeaconBlockRoot: bytesutil.SafeCopyBytes(env.BeaconBlockRoot),
|
||||
Slot: env.Slot,
|
||||
StateRoot: bytesutil.SafeCopyBytes(env.StateRoot),
|
||||
}
|
||||
}
|
||||
|
||||
// CopySignedBlindedExecutionPayloadEnvelope copies the provided signed blinded execution payload envelope.
|
||||
func CopySignedBlindedExecutionPayloadEnvelope(env *SignedBlindedExecutionPayloadEnvelope) *SignedBlindedExecutionPayloadEnvelope {
|
||||
if env == nil {
|
||||
return nil
|
||||
}
|
||||
return &SignedBlindedExecutionPayloadEnvelope{
|
||||
Message: copyBlindedExecutionPayloadEnvelope(env.Message),
|
||||
Signature: bytesutil.SafeCopyBytes(env.Signature),
|
||||
}
|
||||
}
|
||||
|
||||
// copyBlindedExecutionPayloadEnvelope copies the provided blinded execution payload envelope.
|
||||
func copyBlindedExecutionPayloadEnvelope(env *BlindedExecutionPayloadEnvelope) *BlindedExecutionPayloadEnvelope {
|
||||
if env == nil {
|
||||
return nil
|
||||
}
|
||||
return &BlindedExecutionPayloadEnvelope{
|
||||
BlockHash: bytesutil.SafeCopyBytes(env.BlockHash),
|
||||
ExecutionRequests: env.ExecutionRequests,
|
||||
BuilderIndex: env.BuilderIndex,
|
||||
BeaconBlockRoot: bytesutil.SafeCopyBytes(env.BeaconBlockRoot),
|
||||
Slot: env.Slot,
|
||||
StateRoot: bytesutil.SafeCopyBytes(env.StateRoot),
|
||||
}
|
||||
}
|
||||
|
||||
// CopyBuilderPendingPayment creates a deep copy of a builder pending payment.
|
||||
func CopyBuilderPendingPayment(original *BuilderPendingPayment) *BuilderPendingPayment {
|
||||
if original == nil {
|
||||
|
||||
388
proto/prysm/v1alpha1/gloas.pb.go
generated
388
proto/prysm/v1alpha1/gloas.pb.go
generated
@@ -1369,6 +1369,142 @@ func (x *SignedExecutionPayloadEnvelope) GetSignature() []byte {
|
||||
return nil
|
||||
}
|
||||
|
||||
type BlindedExecutionPayloadEnvelope struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
BlockHash []byte `protobuf:"bytes,1,opt,name=block_hash,json=blockHash,proto3" json:"block_hash,omitempty" ssz-size:"32"`
|
||||
ExecutionRequests *v1.ExecutionRequests `protobuf:"bytes,2,opt,name=execution_requests,json=executionRequests,proto3" json:"execution_requests,omitempty"`
|
||||
BuilderIndex github_com_OffchainLabs_prysm_v7_consensus_types_primitives.BuilderIndex `protobuf:"varint,3,opt,name=builder_index,json=builderIndex,proto3" json:"builder_index,omitempty" cast-type:"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.BuilderIndex"`
|
||||
BeaconBlockRoot []byte `protobuf:"bytes,4,opt,name=beacon_block_root,json=beaconBlockRoot,proto3" json:"beacon_block_root,omitempty" ssz-size:"32"`
|
||||
Slot github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Slot `protobuf:"varint,5,opt,name=slot,proto3" json:"slot,omitempty" cast-type:"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.Slot"`
|
||||
StateRoot []byte `protobuf:"bytes,6,opt,name=state_root,json=stateRoot,proto3" json:"state_root,omitempty" ssz-size:"32"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *BlindedExecutionPayloadEnvelope) Reset() {
|
||||
*x = BlindedExecutionPayloadEnvelope{}
|
||||
mi := &file_proto_prysm_v1alpha1_gloas_proto_msgTypes[14]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *BlindedExecutionPayloadEnvelope) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*BlindedExecutionPayloadEnvelope) ProtoMessage() {}
|
||||
|
||||
func (x *BlindedExecutionPayloadEnvelope) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_proto_prysm_v1alpha1_gloas_proto_msgTypes[14]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use BlindedExecutionPayloadEnvelope.ProtoReflect.Descriptor instead.
|
||||
func (*BlindedExecutionPayloadEnvelope) Descriptor() ([]byte, []int) {
|
||||
return file_proto_prysm_v1alpha1_gloas_proto_rawDescGZIP(), []int{14}
|
||||
}
|
||||
|
||||
func (x *BlindedExecutionPayloadEnvelope) GetBlockHash() []byte {
|
||||
if x != nil {
|
||||
return x.BlockHash
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *BlindedExecutionPayloadEnvelope) GetExecutionRequests() *v1.ExecutionRequests {
|
||||
if x != nil {
|
||||
return x.ExecutionRequests
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *BlindedExecutionPayloadEnvelope) GetBuilderIndex() github_com_OffchainLabs_prysm_v7_consensus_types_primitives.BuilderIndex {
|
||||
if x != nil {
|
||||
return x.BuilderIndex
|
||||
}
|
||||
return github_com_OffchainLabs_prysm_v7_consensus_types_primitives.BuilderIndex(0)
|
||||
}
|
||||
|
||||
func (x *BlindedExecutionPayloadEnvelope) GetBeaconBlockRoot() []byte {
|
||||
if x != nil {
|
||||
return x.BeaconBlockRoot
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *BlindedExecutionPayloadEnvelope) GetSlot() github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Slot {
|
||||
if x != nil {
|
||||
return x.Slot
|
||||
}
|
||||
return github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Slot(0)
|
||||
}
|
||||
|
||||
func (x *BlindedExecutionPayloadEnvelope) GetStateRoot() []byte {
|
||||
if x != nil {
|
||||
return x.StateRoot
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type SignedBlindedExecutionPayloadEnvelope struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Message *BlindedExecutionPayloadEnvelope `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"`
|
||||
Signature []byte `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty" ssz-size:"96"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *SignedBlindedExecutionPayloadEnvelope) Reset() {
|
||||
*x = SignedBlindedExecutionPayloadEnvelope{}
|
||||
mi := &file_proto_prysm_v1alpha1_gloas_proto_msgTypes[15]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *SignedBlindedExecutionPayloadEnvelope) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*SignedBlindedExecutionPayloadEnvelope) ProtoMessage() {}
|
||||
|
||||
func (x *SignedBlindedExecutionPayloadEnvelope) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_proto_prysm_v1alpha1_gloas_proto_msgTypes[15]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use SignedBlindedExecutionPayloadEnvelope.ProtoReflect.Descriptor instead.
|
||||
func (*SignedBlindedExecutionPayloadEnvelope) Descriptor() ([]byte, []int) {
|
||||
return file_proto_prysm_v1alpha1_gloas_proto_rawDescGZIP(), []int{15}
|
||||
}
|
||||
|
||||
func (x *SignedBlindedExecutionPayloadEnvelope) GetMessage() *BlindedExecutionPayloadEnvelope {
|
||||
if x != nil {
|
||||
return x.Message
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *SignedBlindedExecutionPayloadEnvelope) GetSignature() []byte {
|
||||
if x != nil {
|
||||
return x.Signature
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Builder struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Pubkey []byte `protobuf:"bytes,1,opt,name=pubkey,proto3" json:"pubkey,omitempty" ssz-size:"48"`
|
||||
@@ -1383,7 +1519,7 @@ type Builder struct {
|
||||
|
||||
func (x *Builder) Reset() {
|
||||
*x = Builder{}
|
||||
mi := &file_proto_prysm_v1alpha1_gloas_proto_msgTypes[14]
|
||||
mi := &file_proto_prysm_v1alpha1_gloas_proto_msgTypes[16]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
@@ -1395,7 +1531,7 @@ func (x *Builder) String() string {
|
||||
func (*Builder) ProtoMessage() {}
|
||||
|
||||
func (x *Builder) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_proto_prysm_v1alpha1_gloas_proto_msgTypes[14]
|
||||
mi := &file_proto_prysm_v1alpha1_gloas_proto_msgTypes[16]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
@@ -1408,7 +1544,7 @@ func (x *Builder) ProtoReflect() protoreflect.Message {
|
||||
|
||||
// Deprecated: Use Builder.ProtoReflect.Descriptor instead.
|
||||
func (*Builder) Descriptor() ([]byte, []int) {
|
||||
return file_proto_prysm_v1alpha1_gloas_proto_rawDescGZIP(), []int{14}
|
||||
return file_proto_prysm_v1alpha1_gloas_proto_rawDescGZIP(), []int{16}
|
||||
}
|
||||
|
||||
func (x *Builder) GetPubkey() []byte {
|
||||
@@ -2011,40 +2147,78 @@ var file_proto_prysm_v1alpha1_gloas_proto_rawDesc = []byte{
|
||||
0x64, 0x45, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61,
|
||||
0x67, 0x65, 0x12, 0x24, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18,
|
||||
0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x39, 0x36, 0x52, 0x09, 0x73,
|
||||
0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0xc1, 0x03, 0x0a, 0x07, 0x42, 0x75, 0x69,
|
||||
0x6c, 0x64, 0x65, 0x72, 0x12, 0x1e, 0x0a, 0x06, 0x70, 0x75, 0x62, 0x6b, 0x65, 0x79, 0x18, 0x01,
|
||||
0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x34, 0x38, 0x52, 0x06, 0x70, 0x75,
|
||||
0x62, 0x6b, 0x65, 0x79, 0x12, 0x1f, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18,
|
||||
0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x05, 0x8a, 0xb5, 0x18, 0x01, 0x31, 0x52, 0x07, 0x76, 0x65,
|
||||
0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a, 0x11, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69,
|
||||
0x6f, 0x6e, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c,
|
||||
0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x32, 0x30, 0x52, 0x10, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74,
|
||||
0x69, 0x6f, 0x6e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x5e, 0x0a, 0x07, 0x62, 0x61,
|
||||
0x6c, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x42, 0x44, 0x82, 0xb5, 0x18,
|
||||
0x40, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63,
|
||||
0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76,
|
||||
0x37, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x74, 0x79, 0x70, 0x65,
|
||||
0x73, 0x2f, 0x70, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2e, 0x47, 0x77, 0x65,
|
||||
0x69, 0x52, 0x07, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x6a, 0x0a, 0x0d, 0x64, 0x65,
|
||||
0x70, 0x6f, 0x73, 0x69, 0x74, 0x5f, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28,
|
||||
0x04, 0x42, 0x45, 0x82, 0xb5, 0x18, 0x41, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f,
|
||||
0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x62, 0x73, 0x2f, 0x70,
|
||||
0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x37, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75,
|
||||
0x73, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76,
|
||||
0x65, 0x73, 0x2e, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x52, 0x0c, 0x64, 0x65, 0x70, 0x6f, 0x73, 0x69,
|
||||
0x74, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x12, 0x74, 0x0a, 0x12, 0x77, 0x69, 0x74, 0x68, 0x64, 0x72,
|
||||
0x61, 0x77, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01,
|
||||
0x28, 0x04, 0x42, 0x45, 0x82, 0xb5, 0x18, 0x41, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63,
|
||||
0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x62, 0x73, 0x2f,
|
||||
0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x37, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73,
|
||||
0x75, 0x73, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69,
|
||||
0x76, 0x65, 0x73, 0x2e, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x52, 0x11, 0x77, 0x69, 0x74, 0x68, 0x64,
|
||||
0x72, 0x61, 0x77, 0x61, 0x62, 0x6c, 0x65, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x42, 0x3b, 0x5a, 0x39,
|
||||
0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68,
|
||||
0x61, 0x69, 0x6e, 0x4c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x37,
|
||||
0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x31, 0x61,
|
||||
0x6c, 0x70, 0x68, 0x61, 0x31, 0x3b, 0x65, 0x74, 0x68, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x33,
|
||||
0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0xc6, 0x03, 0x0a, 0x1f, 0x42, 0x6c, 0x69,
|
||||
0x6e, 0x64, 0x65, 0x64, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x79,
|
||||
0x6c, 0x6f, 0x61, 0x64, 0x45, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x12, 0x25, 0x0a, 0x0a,
|
||||
0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c,
|
||||
0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x33, 0x32, 0x52, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48,
|
||||
0x61, 0x73, 0x68, 0x12, 0x54, 0x0a, 0x12, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e,
|
||||
0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
|
||||
0x25, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x6e, 0x67, 0x69, 0x6e,
|
||||
0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65,
|
||||
0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x52, 0x11, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f,
|
||||
0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x12, 0x71, 0x0a, 0x0d, 0x62, 0x75, 0x69,
|
||||
0x6c, 0x64, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04,
|
||||
0x42, 0x4c, 0x82, 0xb5, 0x18, 0x48, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
|
||||
0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72,
|
||||
0x79, 0x73, 0x6d, 0x2f, 0x76, 0x37, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, 0x73,
|
||||
0x2d, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65,
|
||||
0x73, 0x2e, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x0c,
|
||||
0x62, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x32, 0x0a, 0x11,
|
||||
0x62, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x72, 0x6f, 0x6f,
|
||||
0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x33, 0x32, 0x52,
|
||||
0x0f, 0x62, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x6f, 0x6f, 0x74,
|
||||
0x12, 0x58, 0x0a, 0x04, 0x73, 0x6c, 0x6f, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x42, 0x44,
|
||||
0x82, 0xb5, 0x18, 0x40, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f,
|
||||
0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73,
|
||||
0x6d, 0x2f, 0x76, 0x37, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x74,
|
||||
0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2e,
|
||||
0x53, 0x6c, 0x6f, 0x74, 0x52, 0x04, 0x73, 0x6c, 0x6f, 0x74, 0x12, 0x25, 0x0a, 0x0a, 0x73, 0x74,
|
||||
0x61, 0x74, 0x65, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06,
|
||||
0x8a, 0xb5, 0x18, 0x02, 0x33, 0x32, 0x52, 0x09, 0x73, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x6f,
|
||||
0x74, 0x22, 0x9f, 0x01, 0x0a, 0x25, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x42, 0x6c, 0x69, 0x6e,
|
||||
0x64, 0x65, 0x64, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x79, 0x6c,
|
||||
0x6f, 0x61, 0x64, 0x45, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x12, 0x50, 0x0a, 0x07, 0x6d,
|
||||
0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x65,
|
||||
0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c,
|
||||
0x70, 0x68, 0x61, 0x31, 0x2e, 0x42, 0x6c, 0x69, 0x6e, 0x64, 0x65, 0x64, 0x45, 0x78, 0x65, 0x63,
|
||||
0x75, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x45, 0x6e, 0x76, 0x65,
|
||||
0x6c, 0x6f, 0x70, 0x65, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x24, 0x0a,
|
||||
0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c,
|
||||
0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x39, 0x36, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74,
|
||||
0x75, 0x72, 0x65, 0x22, 0xc1, 0x03, 0x0a, 0x07, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x12,
|
||||
0x1e, 0x0a, 0x06, 0x70, 0x75, 0x62, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42,
|
||||
0x06, 0x8a, 0xb5, 0x18, 0x02, 0x34, 0x38, 0x52, 0x06, 0x70, 0x75, 0x62, 0x6b, 0x65, 0x79, 0x12,
|
||||
0x1f, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c,
|
||||
0x42, 0x05, 0x8a, 0xb5, 0x18, 0x01, 0x31, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
|
||||
0x12, 0x33, 0x0a, 0x11, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x64,
|
||||
0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18,
|
||||
0x02, 0x32, 0x30, 0x52, 0x10, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x64,
|
||||
0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x5e, 0x0a, 0x07, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65,
|
||||
0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x42, 0x44, 0x82, 0xb5, 0x18, 0x40, 0x67, 0x69, 0x74, 0x68,
|
||||
0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c,
|
||||
0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x37, 0x2f, 0x63, 0x6f, 0x6e,
|
||||
0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x72, 0x69,
|
||||
0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2e, 0x47, 0x77, 0x65, 0x69, 0x52, 0x07, 0x62, 0x61,
|
||||
0x6c, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x6a, 0x0a, 0x0d, 0x64, 0x65, 0x70, 0x6f, 0x73, 0x69, 0x74,
|
||||
0x5f, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x42, 0x45, 0x82, 0xb5,
|
||||
0x18, 0x41, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66,
|
||||
0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f,
|
||||
0x76, 0x37, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x74, 0x79, 0x70,
|
||||
0x65, 0x73, 0x2f, 0x70, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2e, 0x45, 0x70,
|
||||
0x6f, 0x63, 0x68, 0x52, 0x0c, 0x64, 0x65, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x45, 0x70, 0x6f, 0x63,
|
||||
0x68, 0x12, 0x74, 0x0a, 0x12, 0x77, 0x69, 0x74, 0x68, 0x64, 0x72, 0x61, 0x77, 0x61, 0x62, 0x6c,
|
||||
0x65, 0x5f, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x42, 0x45, 0x82,
|
||||
0xb5, 0x18, 0x41, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66,
|
||||
0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d,
|
||||
0x2f, 0x76, 0x37, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x74, 0x79,
|
||||
0x70, 0x65, 0x73, 0x2f, 0x70, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2e, 0x45,
|
||||
0x70, 0x6f, 0x63, 0x68, 0x52, 0x11, 0x77, 0x69, 0x74, 0x68, 0x64, 0x72, 0x61, 0x77, 0x61, 0x62,
|
||||
0x6c, 0x65, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x42, 0x3b, 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75,
|
||||
0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61,
|
||||
0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x37, 0x2f, 0x70, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31,
|
||||
0x3b, 0x65, 0x74, 0x68, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -2059,88 +2233,92 @@ func file_proto_prysm_v1alpha1_gloas_proto_rawDescGZIP() []byte {
|
||||
return file_proto_prysm_v1alpha1_gloas_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_proto_prysm_v1alpha1_gloas_proto_msgTypes = make([]protoimpl.MessageInfo, 15)
|
||||
var file_proto_prysm_v1alpha1_gloas_proto_msgTypes = make([]protoimpl.MessageInfo, 17)
|
||||
var file_proto_prysm_v1alpha1_gloas_proto_goTypes = []any{
|
||||
(*ExecutionPayloadBid)(nil), // 0: ethereum.eth.v1alpha1.ExecutionPayloadBid
|
||||
(*SignedExecutionPayloadBid)(nil), // 1: ethereum.eth.v1alpha1.SignedExecutionPayloadBid
|
||||
(*PayloadAttestationData)(nil), // 2: ethereum.eth.v1alpha1.PayloadAttestationData
|
||||
(*PayloadAttestation)(nil), // 3: ethereum.eth.v1alpha1.PayloadAttestation
|
||||
(*PayloadAttestationMessage)(nil), // 4: ethereum.eth.v1alpha1.PayloadAttestationMessage
|
||||
(*BeaconBlockGloas)(nil), // 5: ethereum.eth.v1alpha1.BeaconBlockGloas
|
||||
(*BeaconBlockBodyGloas)(nil), // 6: ethereum.eth.v1alpha1.BeaconBlockBodyGloas
|
||||
(*SignedBeaconBlockGloas)(nil), // 7: ethereum.eth.v1alpha1.SignedBeaconBlockGloas
|
||||
(*BeaconStateGloas)(nil), // 8: ethereum.eth.v1alpha1.BeaconStateGloas
|
||||
(*BuilderPendingPayment)(nil), // 9: ethereum.eth.v1alpha1.BuilderPendingPayment
|
||||
(*BuilderPendingWithdrawal)(nil), // 10: ethereum.eth.v1alpha1.BuilderPendingWithdrawal
|
||||
(*DataColumnSidecarGloas)(nil), // 11: ethereum.eth.v1alpha1.DataColumnSidecarGloas
|
||||
(*ExecutionPayloadEnvelope)(nil), // 12: ethereum.eth.v1alpha1.ExecutionPayloadEnvelope
|
||||
(*SignedExecutionPayloadEnvelope)(nil), // 13: ethereum.eth.v1alpha1.SignedExecutionPayloadEnvelope
|
||||
(*Builder)(nil), // 14: ethereum.eth.v1alpha1.Builder
|
||||
(*Eth1Data)(nil), // 15: ethereum.eth.v1alpha1.Eth1Data
|
||||
(*ProposerSlashing)(nil), // 16: ethereum.eth.v1alpha1.ProposerSlashing
|
||||
(*AttesterSlashingElectra)(nil), // 17: ethereum.eth.v1alpha1.AttesterSlashingElectra
|
||||
(*AttestationElectra)(nil), // 18: ethereum.eth.v1alpha1.AttestationElectra
|
||||
(*Deposit)(nil), // 19: ethereum.eth.v1alpha1.Deposit
|
||||
(*SignedVoluntaryExit)(nil), // 20: ethereum.eth.v1alpha1.SignedVoluntaryExit
|
||||
(*SyncAggregate)(nil), // 21: ethereum.eth.v1alpha1.SyncAggregate
|
||||
(*SignedBLSToExecutionChange)(nil), // 22: ethereum.eth.v1alpha1.SignedBLSToExecutionChange
|
||||
(*Fork)(nil), // 23: ethereum.eth.v1alpha1.Fork
|
||||
(*BeaconBlockHeader)(nil), // 24: ethereum.eth.v1alpha1.BeaconBlockHeader
|
||||
(*Validator)(nil), // 25: ethereum.eth.v1alpha1.Validator
|
||||
(*Checkpoint)(nil), // 26: ethereum.eth.v1alpha1.Checkpoint
|
||||
(*SyncCommittee)(nil), // 27: ethereum.eth.v1alpha1.SyncCommittee
|
||||
(*HistoricalSummary)(nil), // 28: ethereum.eth.v1alpha1.HistoricalSummary
|
||||
(*PendingDeposit)(nil), // 29: ethereum.eth.v1alpha1.PendingDeposit
|
||||
(*PendingPartialWithdrawal)(nil), // 30: ethereum.eth.v1alpha1.PendingPartialWithdrawal
|
||||
(*PendingConsolidation)(nil), // 31: ethereum.eth.v1alpha1.PendingConsolidation
|
||||
(*v1.Withdrawal)(nil), // 32: ethereum.engine.v1.Withdrawal
|
||||
(*v1.ExecutionPayloadDeneb)(nil), // 33: ethereum.engine.v1.ExecutionPayloadDeneb
|
||||
(*v1.ExecutionRequests)(nil), // 34: ethereum.engine.v1.ExecutionRequests
|
||||
(*ExecutionPayloadBid)(nil), // 0: ethereum.eth.v1alpha1.ExecutionPayloadBid
|
||||
(*SignedExecutionPayloadBid)(nil), // 1: ethereum.eth.v1alpha1.SignedExecutionPayloadBid
|
||||
(*PayloadAttestationData)(nil), // 2: ethereum.eth.v1alpha1.PayloadAttestationData
|
||||
(*PayloadAttestation)(nil), // 3: ethereum.eth.v1alpha1.PayloadAttestation
|
||||
(*PayloadAttestationMessage)(nil), // 4: ethereum.eth.v1alpha1.PayloadAttestationMessage
|
||||
(*BeaconBlockGloas)(nil), // 5: ethereum.eth.v1alpha1.BeaconBlockGloas
|
||||
(*BeaconBlockBodyGloas)(nil), // 6: ethereum.eth.v1alpha1.BeaconBlockBodyGloas
|
||||
(*SignedBeaconBlockGloas)(nil), // 7: ethereum.eth.v1alpha1.SignedBeaconBlockGloas
|
||||
(*BeaconStateGloas)(nil), // 8: ethereum.eth.v1alpha1.BeaconStateGloas
|
||||
(*BuilderPendingPayment)(nil), // 9: ethereum.eth.v1alpha1.BuilderPendingPayment
|
||||
(*BuilderPendingWithdrawal)(nil), // 10: ethereum.eth.v1alpha1.BuilderPendingWithdrawal
|
||||
(*DataColumnSidecarGloas)(nil), // 11: ethereum.eth.v1alpha1.DataColumnSidecarGloas
|
||||
(*ExecutionPayloadEnvelope)(nil), // 12: ethereum.eth.v1alpha1.ExecutionPayloadEnvelope
|
||||
(*SignedExecutionPayloadEnvelope)(nil), // 13: ethereum.eth.v1alpha1.SignedExecutionPayloadEnvelope
|
||||
(*BlindedExecutionPayloadEnvelope)(nil), // 14: ethereum.eth.v1alpha1.BlindedExecutionPayloadEnvelope
|
||||
(*SignedBlindedExecutionPayloadEnvelope)(nil), // 15: ethereum.eth.v1alpha1.SignedBlindedExecutionPayloadEnvelope
|
||||
(*Builder)(nil), // 16: ethereum.eth.v1alpha1.Builder
|
||||
(*Eth1Data)(nil), // 17: ethereum.eth.v1alpha1.Eth1Data
|
||||
(*ProposerSlashing)(nil), // 18: ethereum.eth.v1alpha1.ProposerSlashing
|
||||
(*AttesterSlashingElectra)(nil), // 19: ethereum.eth.v1alpha1.AttesterSlashingElectra
|
||||
(*AttestationElectra)(nil), // 20: ethereum.eth.v1alpha1.AttestationElectra
|
||||
(*Deposit)(nil), // 21: ethereum.eth.v1alpha1.Deposit
|
||||
(*SignedVoluntaryExit)(nil), // 22: ethereum.eth.v1alpha1.SignedVoluntaryExit
|
||||
(*SyncAggregate)(nil), // 23: ethereum.eth.v1alpha1.SyncAggregate
|
||||
(*SignedBLSToExecutionChange)(nil), // 24: ethereum.eth.v1alpha1.SignedBLSToExecutionChange
|
||||
(*Fork)(nil), // 25: ethereum.eth.v1alpha1.Fork
|
||||
(*BeaconBlockHeader)(nil), // 26: ethereum.eth.v1alpha1.BeaconBlockHeader
|
||||
(*Validator)(nil), // 27: ethereum.eth.v1alpha1.Validator
|
||||
(*Checkpoint)(nil), // 28: ethereum.eth.v1alpha1.Checkpoint
|
||||
(*SyncCommittee)(nil), // 29: ethereum.eth.v1alpha1.SyncCommittee
|
||||
(*HistoricalSummary)(nil), // 30: ethereum.eth.v1alpha1.HistoricalSummary
|
||||
(*PendingDeposit)(nil), // 31: ethereum.eth.v1alpha1.PendingDeposit
|
||||
(*PendingPartialWithdrawal)(nil), // 32: ethereum.eth.v1alpha1.PendingPartialWithdrawal
|
||||
(*PendingConsolidation)(nil), // 33: ethereum.eth.v1alpha1.PendingConsolidation
|
||||
(*v1.Withdrawal)(nil), // 34: ethereum.engine.v1.Withdrawal
|
||||
(*v1.ExecutionPayloadDeneb)(nil), // 35: ethereum.engine.v1.ExecutionPayloadDeneb
|
||||
(*v1.ExecutionRequests)(nil), // 36: ethereum.engine.v1.ExecutionRequests
|
||||
}
|
||||
var file_proto_prysm_v1alpha1_gloas_proto_depIdxs = []int32{
|
||||
0, // 0: ethereum.eth.v1alpha1.SignedExecutionPayloadBid.message:type_name -> ethereum.eth.v1alpha1.ExecutionPayloadBid
|
||||
2, // 1: ethereum.eth.v1alpha1.PayloadAttestation.data:type_name -> ethereum.eth.v1alpha1.PayloadAttestationData
|
||||
2, // 2: ethereum.eth.v1alpha1.PayloadAttestationMessage.data:type_name -> ethereum.eth.v1alpha1.PayloadAttestationData
|
||||
6, // 3: ethereum.eth.v1alpha1.BeaconBlockGloas.body:type_name -> ethereum.eth.v1alpha1.BeaconBlockBodyGloas
|
||||
15, // 4: ethereum.eth.v1alpha1.BeaconBlockBodyGloas.eth1_data:type_name -> ethereum.eth.v1alpha1.Eth1Data
|
||||
16, // 5: ethereum.eth.v1alpha1.BeaconBlockBodyGloas.proposer_slashings:type_name -> ethereum.eth.v1alpha1.ProposerSlashing
|
||||
17, // 6: ethereum.eth.v1alpha1.BeaconBlockBodyGloas.attester_slashings:type_name -> ethereum.eth.v1alpha1.AttesterSlashingElectra
|
||||
18, // 7: ethereum.eth.v1alpha1.BeaconBlockBodyGloas.attestations:type_name -> ethereum.eth.v1alpha1.AttestationElectra
|
||||
19, // 8: ethereum.eth.v1alpha1.BeaconBlockBodyGloas.deposits:type_name -> ethereum.eth.v1alpha1.Deposit
|
||||
20, // 9: ethereum.eth.v1alpha1.BeaconBlockBodyGloas.voluntary_exits:type_name -> ethereum.eth.v1alpha1.SignedVoluntaryExit
|
||||
21, // 10: ethereum.eth.v1alpha1.BeaconBlockBodyGloas.sync_aggregate:type_name -> ethereum.eth.v1alpha1.SyncAggregate
|
||||
22, // 11: ethereum.eth.v1alpha1.BeaconBlockBodyGloas.bls_to_execution_changes:type_name -> ethereum.eth.v1alpha1.SignedBLSToExecutionChange
|
||||
17, // 4: ethereum.eth.v1alpha1.BeaconBlockBodyGloas.eth1_data:type_name -> ethereum.eth.v1alpha1.Eth1Data
|
||||
18, // 5: ethereum.eth.v1alpha1.BeaconBlockBodyGloas.proposer_slashings:type_name -> ethereum.eth.v1alpha1.ProposerSlashing
|
||||
19, // 6: ethereum.eth.v1alpha1.BeaconBlockBodyGloas.attester_slashings:type_name -> ethereum.eth.v1alpha1.AttesterSlashingElectra
|
||||
20, // 7: ethereum.eth.v1alpha1.BeaconBlockBodyGloas.attestations:type_name -> ethereum.eth.v1alpha1.AttestationElectra
|
||||
21, // 8: ethereum.eth.v1alpha1.BeaconBlockBodyGloas.deposits:type_name -> ethereum.eth.v1alpha1.Deposit
|
||||
22, // 9: ethereum.eth.v1alpha1.BeaconBlockBodyGloas.voluntary_exits:type_name -> ethereum.eth.v1alpha1.SignedVoluntaryExit
|
||||
23, // 10: ethereum.eth.v1alpha1.BeaconBlockBodyGloas.sync_aggregate:type_name -> ethereum.eth.v1alpha1.SyncAggregate
|
||||
24, // 11: ethereum.eth.v1alpha1.BeaconBlockBodyGloas.bls_to_execution_changes:type_name -> ethereum.eth.v1alpha1.SignedBLSToExecutionChange
|
||||
1, // 12: ethereum.eth.v1alpha1.BeaconBlockBodyGloas.signed_execution_payload_bid:type_name -> ethereum.eth.v1alpha1.SignedExecutionPayloadBid
|
||||
3, // 13: ethereum.eth.v1alpha1.BeaconBlockBodyGloas.payload_attestations:type_name -> ethereum.eth.v1alpha1.PayloadAttestation
|
||||
5, // 14: ethereum.eth.v1alpha1.SignedBeaconBlockGloas.block:type_name -> ethereum.eth.v1alpha1.BeaconBlockGloas
|
||||
23, // 15: ethereum.eth.v1alpha1.BeaconStateGloas.fork:type_name -> ethereum.eth.v1alpha1.Fork
|
||||
24, // 16: ethereum.eth.v1alpha1.BeaconStateGloas.latest_block_header:type_name -> ethereum.eth.v1alpha1.BeaconBlockHeader
|
||||
15, // 17: ethereum.eth.v1alpha1.BeaconStateGloas.eth1_data:type_name -> ethereum.eth.v1alpha1.Eth1Data
|
||||
15, // 18: ethereum.eth.v1alpha1.BeaconStateGloas.eth1_data_votes:type_name -> ethereum.eth.v1alpha1.Eth1Data
|
||||
25, // 19: ethereum.eth.v1alpha1.BeaconStateGloas.validators:type_name -> ethereum.eth.v1alpha1.Validator
|
||||
26, // 20: ethereum.eth.v1alpha1.BeaconStateGloas.previous_justified_checkpoint:type_name -> ethereum.eth.v1alpha1.Checkpoint
|
||||
26, // 21: ethereum.eth.v1alpha1.BeaconStateGloas.current_justified_checkpoint:type_name -> ethereum.eth.v1alpha1.Checkpoint
|
||||
26, // 22: ethereum.eth.v1alpha1.BeaconStateGloas.finalized_checkpoint:type_name -> ethereum.eth.v1alpha1.Checkpoint
|
||||
27, // 23: ethereum.eth.v1alpha1.BeaconStateGloas.current_sync_committee:type_name -> ethereum.eth.v1alpha1.SyncCommittee
|
||||
27, // 24: ethereum.eth.v1alpha1.BeaconStateGloas.next_sync_committee:type_name -> ethereum.eth.v1alpha1.SyncCommittee
|
||||
25, // 15: ethereum.eth.v1alpha1.BeaconStateGloas.fork:type_name -> ethereum.eth.v1alpha1.Fork
|
||||
26, // 16: ethereum.eth.v1alpha1.BeaconStateGloas.latest_block_header:type_name -> ethereum.eth.v1alpha1.BeaconBlockHeader
|
||||
17, // 17: ethereum.eth.v1alpha1.BeaconStateGloas.eth1_data:type_name -> ethereum.eth.v1alpha1.Eth1Data
|
||||
17, // 18: ethereum.eth.v1alpha1.BeaconStateGloas.eth1_data_votes:type_name -> ethereum.eth.v1alpha1.Eth1Data
|
||||
27, // 19: ethereum.eth.v1alpha1.BeaconStateGloas.validators:type_name -> ethereum.eth.v1alpha1.Validator
|
||||
28, // 20: ethereum.eth.v1alpha1.BeaconStateGloas.previous_justified_checkpoint:type_name -> ethereum.eth.v1alpha1.Checkpoint
|
||||
28, // 21: ethereum.eth.v1alpha1.BeaconStateGloas.current_justified_checkpoint:type_name -> ethereum.eth.v1alpha1.Checkpoint
|
||||
28, // 22: ethereum.eth.v1alpha1.BeaconStateGloas.finalized_checkpoint:type_name -> ethereum.eth.v1alpha1.Checkpoint
|
||||
29, // 23: ethereum.eth.v1alpha1.BeaconStateGloas.current_sync_committee:type_name -> ethereum.eth.v1alpha1.SyncCommittee
|
||||
29, // 24: ethereum.eth.v1alpha1.BeaconStateGloas.next_sync_committee:type_name -> ethereum.eth.v1alpha1.SyncCommittee
|
||||
0, // 25: ethereum.eth.v1alpha1.BeaconStateGloas.latest_execution_payload_bid:type_name -> ethereum.eth.v1alpha1.ExecutionPayloadBid
|
||||
28, // 26: ethereum.eth.v1alpha1.BeaconStateGloas.historical_summaries:type_name -> ethereum.eth.v1alpha1.HistoricalSummary
|
||||
29, // 27: ethereum.eth.v1alpha1.BeaconStateGloas.pending_deposits:type_name -> ethereum.eth.v1alpha1.PendingDeposit
|
||||
30, // 28: ethereum.eth.v1alpha1.BeaconStateGloas.pending_partial_withdrawals:type_name -> ethereum.eth.v1alpha1.PendingPartialWithdrawal
|
||||
31, // 29: ethereum.eth.v1alpha1.BeaconStateGloas.pending_consolidations:type_name -> ethereum.eth.v1alpha1.PendingConsolidation
|
||||
14, // 30: ethereum.eth.v1alpha1.BeaconStateGloas.builders:type_name -> ethereum.eth.v1alpha1.Builder
|
||||
30, // 26: ethereum.eth.v1alpha1.BeaconStateGloas.historical_summaries:type_name -> ethereum.eth.v1alpha1.HistoricalSummary
|
||||
31, // 27: ethereum.eth.v1alpha1.BeaconStateGloas.pending_deposits:type_name -> ethereum.eth.v1alpha1.PendingDeposit
|
||||
32, // 28: ethereum.eth.v1alpha1.BeaconStateGloas.pending_partial_withdrawals:type_name -> ethereum.eth.v1alpha1.PendingPartialWithdrawal
|
||||
33, // 29: ethereum.eth.v1alpha1.BeaconStateGloas.pending_consolidations:type_name -> ethereum.eth.v1alpha1.PendingConsolidation
|
||||
16, // 30: ethereum.eth.v1alpha1.BeaconStateGloas.builders:type_name -> ethereum.eth.v1alpha1.Builder
|
||||
9, // 31: ethereum.eth.v1alpha1.BeaconStateGloas.builder_pending_payments:type_name -> ethereum.eth.v1alpha1.BuilderPendingPayment
|
||||
10, // 32: ethereum.eth.v1alpha1.BeaconStateGloas.builder_pending_withdrawals:type_name -> ethereum.eth.v1alpha1.BuilderPendingWithdrawal
|
||||
32, // 33: ethereum.eth.v1alpha1.BeaconStateGloas.payload_expected_withdrawals:type_name -> ethereum.engine.v1.Withdrawal
|
||||
34, // 33: ethereum.eth.v1alpha1.BeaconStateGloas.payload_expected_withdrawals:type_name -> ethereum.engine.v1.Withdrawal
|
||||
10, // 34: ethereum.eth.v1alpha1.BuilderPendingPayment.withdrawal:type_name -> ethereum.eth.v1alpha1.BuilderPendingWithdrawal
|
||||
33, // 35: ethereum.eth.v1alpha1.ExecutionPayloadEnvelope.payload:type_name -> ethereum.engine.v1.ExecutionPayloadDeneb
|
||||
34, // 36: ethereum.eth.v1alpha1.ExecutionPayloadEnvelope.execution_requests:type_name -> ethereum.engine.v1.ExecutionRequests
|
||||
35, // 35: ethereum.eth.v1alpha1.ExecutionPayloadEnvelope.payload:type_name -> ethereum.engine.v1.ExecutionPayloadDeneb
|
||||
36, // 36: ethereum.eth.v1alpha1.ExecutionPayloadEnvelope.execution_requests:type_name -> ethereum.engine.v1.ExecutionRequests
|
||||
12, // 37: ethereum.eth.v1alpha1.SignedExecutionPayloadEnvelope.message:type_name -> ethereum.eth.v1alpha1.ExecutionPayloadEnvelope
|
||||
38, // [38:38] is the sub-list for method output_type
|
||||
38, // [38:38] is the sub-list for method input_type
|
||||
38, // [38:38] is the sub-list for extension type_name
|
||||
38, // [38:38] is the sub-list for extension extendee
|
||||
0, // [0:38] is the sub-list for field type_name
|
||||
36, // 38: ethereum.eth.v1alpha1.BlindedExecutionPayloadEnvelope.execution_requests:type_name -> ethereum.engine.v1.ExecutionRequests
|
||||
14, // 39: ethereum.eth.v1alpha1.SignedBlindedExecutionPayloadEnvelope.message:type_name -> ethereum.eth.v1alpha1.BlindedExecutionPayloadEnvelope
|
||||
40, // [40:40] is the sub-list for method output_type
|
||||
40, // [40:40] is the sub-list for method input_type
|
||||
40, // [40:40] is the sub-list for extension type_name
|
||||
40, // [40:40] is the sub-list for extension extendee
|
||||
0, // [0:40] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_proto_prysm_v1alpha1_gloas_proto_init() }
|
||||
@@ -2158,7 +2336,7 @@ func file_proto_prysm_v1alpha1_gloas_proto_init() {
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_proto_prysm_v1alpha1_gloas_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 15,
|
||||
NumMessages: 17,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
|
||||
@@ -427,6 +427,29 @@ message SignedExecutionPayloadEnvelope {
|
||||
bytes signature = 2 [ (ethereum.eth.ext.ssz_size) = "96" ];
|
||||
}
|
||||
|
||||
// BlindedExecutionPayloadEnvelope is a storage-only envelope that replaces the
|
||||
// full execution payload with its block hash for space savings. The full payload
|
||||
// can be retrieved from the EL via engine_getPayloadBodiesByHash using the block_hash.
|
||||
message BlindedExecutionPayloadEnvelope {
|
||||
bytes block_hash = 1 [ (ethereum.eth.ext.ssz_size) = "32" ];
|
||||
ethereum.engine.v1.ExecutionRequests execution_requests = 2;
|
||||
uint64 builder_index = 3 [ (ethereum.eth.ext.cast_type) =
|
||||
"github.com/OffchainLabs/prysm/v7/"
|
||||
"consensus-types/primitives.BuilderIndex" ];
|
||||
bytes beacon_block_root = 4 [ (ethereum.eth.ext.ssz_size) = "32" ];
|
||||
uint64 slot = 5 [
|
||||
(ethereum.eth.ext.cast_type) =
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.Slot"
|
||||
];
|
||||
bytes state_root = 6 [ (ethereum.eth.ext.ssz_size) = "32" ];
|
||||
}
|
||||
|
||||
// SignedBlindedExecutionPayloadEnvelope wraps a blinded execution payload envelope with a signature.
|
||||
message SignedBlindedExecutionPayloadEnvelope {
|
||||
BlindedExecutionPayloadEnvelope message = 1;
|
||||
bytes signature = 2 [ (ethereum.eth.ext.ssz_size) = "96" ];
|
||||
}
|
||||
|
||||
// Builder represents a builder in the Gloas fork.
|
||||
//
|
||||
// Spec:
|
||||
|
||||
@@ -3586,6 +3586,282 @@ func (s *SignedExecutionPayloadEnvelope) HashTreeRootWith(hh *ssz.Hasher) (err e
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalSSZ ssz marshals the BlindedExecutionPayloadEnvelope object
|
||||
func (b *BlindedExecutionPayloadEnvelope) MarshalSSZ() ([]byte, error) {
|
||||
return ssz.MarshalSSZ(b)
|
||||
}
|
||||
|
||||
// MarshalSSZTo ssz marshals the BlindedExecutionPayloadEnvelope object to a target array
|
||||
func (b *BlindedExecutionPayloadEnvelope) MarshalSSZTo(buf []byte) (dst []byte, err error) {
|
||||
dst = buf
|
||||
offset := int(116)
|
||||
|
||||
// Field (0) 'BlockHash'
|
||||
if size := len(b.BlockHash); size != 32 {
|
||||
err = ssz.ErrBytesLengthFn("--.BlockHash", size, 32)
|
||||
return
|
||||
}
|
||||
dst = append(dst, b.BlockHash...)
|
||||
|
||||
// Offset (1) 'ExecutionRequests'
|
||||
dst = ssz.WriteOffset(dst, offset)
|
||||
if b.ExecutionRequests == nil {
|
||||
b.ExecutionRequests = new(v1.ExecutionRequests)
|
||||
}
|
||||
offset += b.ExecutionRequests.SizeSSZ()
|
||||
|
||||
// Field (2) 'BuilderIndex'
|
||||
dst = ssz.MarshalUint64(dst, uint64(b.BuilderIndex))
|
||||
|
||||
// Field (3) 'BeaconBlockRoot'
|
||||
if size := len(b.BeaconBlockRoot); size != 32 {
|
||||
err = ssz.ErrBytesLengthFn("--.BeaconBlockRoot", size, 32)
|
||||
return
|
||||
}
|
||||
dst = append(dst, b.BeaconBlockRoot...)
|
||||
|
||||
// Field (4) 'Slot'
|
||||
dst = ssz.MarshalUint64(dst, uint64(b.Slot))
|
||||
|
||||
// Field (5) 'StateRoot'
|
||||
if size := len(b.StateRoot); size != 32 {
|
||||
err = ssz.ErrBytesLengthFn("--.StateRoot", size, 32)
|
||||
return
|
||||
}
|
||||
dst = append(dst, b.StateRoot...)
|
||||
|
||||
// Field (1) 'ExecutionRequests'
|
||||
if dst, err = b.ExecutionRequests.MarshalSSZTo(dst); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalSSZ ssz unmarshals the BlindedExecutionPayloadEnvelope object
|
||||
func (b *BlindedExecutionPayloadEnvelope) UnmarshalSSZ(buf []byte) error {
|
||||
var err error
|
||||
size := uint64(len(buf))
|
||||
if size < 116 {
|
||||
return ssz.ErrSize
|
||||
}
|
||||
|
||||
tail := buf
|
||||
var o1 uint64
|
||||
|
||||
// Field (0) 'BlockHash'
|
||||
if cap(b.BlockHash) == 0 {
|
||||
b.BlockHash = make([]byte, 0, len(buf[0:32]))
|
||||
}
|
||||
b.BlockHash = append(b.BlockHash, buf[0:32]...)
|
||||
|
||||
// Offset (1) 'ExecutionRequests'
|
||||
if o1 = ssz.ReadOffset(buf[32:36]); o1 > size {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
if o1 != 116 {
|
||||
return ssz.ErrInvalidVariableOffset
|
||||
}
|
||||
|
||||
// Field (2) 'BuilderIndex'
|
||||
b.BuilderIndex = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.BuilderIndex(ssz.UnmarshallUint64(buf[36:44]))
|
||||
|
||||
// Field (3) 'BeaconBlockRoot'
|
||||
if cap(b.BeaconBlockRoot) == 0 {
|
||||
b.BeaconBlockRoot = make([]byte, 0, len(buf[44:76]))
|
||||
}
|
||||
b.BeaconBlockRoot = append(b.BeaconBlockRoot, buf[44:76]...)
|
||||
|
||||
// Field (4) 'Slot'
|
||||
b.Slot = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Slot(ssz.UnmarshallUint64(buf[76:84]))
|
||||
|
||||
// Field (5) 'StateRoot'
|
||||
if cap(b.StateRoot) == 0 {
|
||||
b.StateRoot = make([]byte, 0, len(buf[84:116]))
|
||||
}
|
||||
b.StateRoot = append(b.StateRoot, buf[84:116]...)
|
||||
|
||||
// Field (1) 'ExecutionRequests'
|
||||
{
|
||||
buf = tail[o1:]
|
||||
if b.ExecutionRequests == nil {
|
||||
b.ExecutionRequests = new(v1.ExecutionRequests)
|
||||
}
|
||||
if err = b.ExecutionRequests.UnmarshalSSZ(buf); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// SizeSSZ returns the ssz encoded size in bytes for the BlindedExecutionPayloadEnvelope object
|
||||
func (b *BlindedExecutionPayloadEnvelope) SizeSSZ() (size int) {
|
||||
size = 116
|
||||
|
||||
// Field (1) 'ExecutionRequests'
|
||||
if b.ExecutionRequests == nil {
|
||||
b.ExecutionRequests = new(v1.ExecutionRequests)
|
||||
}
|
||||
size += b.ExecutionRequests.SizeSSZ()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// HashTreeRoot ssz hashes the BlindedExecutionPayloadEnvelope object
|
||||
func (b *BlindedExecutionPayloadEnvelope) HashTreeRoot() ([32]byte, error) {
|
||||
return ssz.HashWithDefaultHasher(b)
|
||||
}
|
||||
|
||||
// HashTreeRootWith ssz hashes the BlindedExecutionPayloadEnvelope object with a hasher
|
||||
func (b *BlindedExecutionPayloadEnvelope) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
indx := hh.Index()
|
||||
|
||||
// Field (0) 'BlockHash'
|
||||
if size := len(b.BlockHash); size != 32 {
|
||||
err = ssz.ErrBytesLengthFn("--.BlockHash", size, 32)
|
||||
return
|
||||
}
|
||||
hh.PutBytes(b.BlockHash)
|
||||
|
||||
// Field (1) 'ExecutionRequests'
|
||||
if err = b.ExecutionRequests.HashTreeRootWith(hh); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Field (2) 'BuilderIndex'
|
||||
hh.PutUint64(uint64(b.BuilderIndex))
|
||||
|
||||
// Field (3) 'BeaconBlockRoot'
|
||||
if size := len(b.BeaconBlockRoot); size != 32 {
|
||||
err = ssz.ErrBytesLengthFn("--.BeaconBlockRoot", size, 32)
|
||||
return
|
||||
}
|
||||
hh.PutBytes(b.BeaconBlockRoot)
|
||||
|
||||
// Field (4) 'Slot'
|
||||
hh.PutUint64(uint64(b.Slot))
|
||||
|
||||
// Field (5) 'StateRoot'
|
||||
if size := len(b.StateRoot); size != 32 {
|
||||
err = ssz.ErrBytesLengthFn("--.StateRoot", size, 32)
|
||||
return
|
||||
}
|
||||
hh.PutBytes(b.StateRoot)
|
||||
|
||||
hh.Merkleize(indx)
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalSSZ ssz marshals the SignedBlindedExecutionPayloadEnvelope object
|
||||
func (s *SignedBlindedExecutionPayloadEnvelope) MarshalSSZ() ([]byte, error) {
|
||||
return ssz.MarshalSSZ(s)
|
||||
}
|
||||
|
||||
// MarshalSSZTo ssz marshals the SignedBlindedExecutionPayloadEnvelope object to a target array
|
||||
func (s *SignedBlindedExecutionPayloadEnvelope) MarshalSSZTo(buf []byte) (dst []byte, err error) {
|
||||
dst = buf
|
||||
offset := int(100)
|
||||
|
||||
// Offset (0) 'Message'
|
||||
dst = ssz.WriteOffset(dst, offset)
|
||||
if s.Message == nil {
|
||||
s.Message = new(BlindedExecutionPayloadEnvelope)
|
||||
}
|
||||
offset += s.Message.SizeSSZ()
|
||||
|
||||
// Field (1) 'Signature'
|
||||
if size := len(s.Signature); size != 96 {
|
||||
err = ssz.ErrBytesLengthFn("--.Signature", size, 96)
|
||||
return
|
||||
}
|
||||
dst = append(dst, s.Signature...)
|
||||
|
||||
// Field (0) 'Message'
|
||||
if dst, err = s.Message.MarshalSSZTo(dst); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalSSZ ssz unmarshals the SignedBlindedExecutionPayloadEnvelope object
|
||||
func (s *SignedBlindedExecutionPayloadEnvelope) UnmarshalSSZ(buf []byte) error {
|
||||
var err error
|
||||
size := uint64(len(buf))
|
||||
if size < 100 {
|
||||
return ssz.ErrSize
|
||||
}
|
||||
|
||||
tail := buf
|
||||
var o0 uint64
|
||||
|
||||
// Offset (0) 'Message'
|
||||
if o0 = ssz.ReadOffset(buf[0:4]); o0 > size {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
if o0 != 100 {
|
||||
return ssz.ErrInvalidVariableOffset
|
||||
}
|
||||
|
||||
// Field (1) 'Signature'
|
||||
if cap(s.Signature) == 0 {
|
||||
s.Signature = make([]byte, 0, len(buf[4:100]))
|
||||
}
|
||||
s.Signature = append(s.Signature, buf[4:100]...)
|
||||
|
||||
// Field (0) 'Message'
|
||||
{
|
||||
buf = tail[o0:]
|
||||
if s.Message == nil {
|
||||
s.Message = new(BlindedExecutionPayloadEnvelope)
|
||||
}
|
||||
if err = s.Message.UnmarshalSSZ(buf); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// SizeSSZ returns the ssz encoded size in bytes for the SignedBlindedExecutionPayloadEnvelope object
|
||||
func (s *SignedBlindedExecutionPayloadEnvelope) SizeSSZ() (size int) {
|
||||
size = 100
|
||||
|
||||
// Field (0) 'Message'
|
||||
if s.Message == nil {
|
||||
s.Message = new(BlindedExecutionPayloadEnvelope)
|
||||
}
|
||||
size += s.Message.SizeSSZ()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// HashTreeRoot ssz hashes the SignedBlindedExecutionPayloadEnvelope object
|
||||
func (s *SignedBlindedExecutionPayloadEnvelope) HashTreeRoot() ([32]byte, error) {
|
||||
return ssz.HashWithDefaultHasher(s)
|
||||
}
|
||||
|
||||
// HashTreeRootWith ssz hashes the SignedBlindedExecutionPayloadEnvelope object with a hasher
|
||||
func (s *SignedBlindedExecutionPayloadEnvelope) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
indx := hh.Index()
|
||||
|
||||
// Field (0) 'Message'
|
||||
if err = s.Message.HashTreeRootWith(hh); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Field (1) 'Signature'
|
||||
if size := len(s.Signature); size != 96 {
|
||||
err = ssz.ErrBytesLengthFn("--.Signature", size, 96)
|
||||
return
|
||||
}
|
||||
hh.PutBytes(s.Signature)
|
||||
|
||||
hh.Merkleize(indx)
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalSSZ ssz marshals the Builder object
|
||||
func (b *Builder) MarshalSSZ() ([]byte, error) {
|
||||
return ssz.MarshalSSZ(b)
|
||||
|
||||
Reference in New Issue
Block a user