mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-02-11 21:45:12 -05:00
Compare commits
10 Commits
gloas/fork
...
process-ex
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
78d6d158a5 | ||
|
|
b1e0a287b7 | ||
|
|
30a561099a | ||
|
|
ed85456661 | ||
|
|
ebf0bd1164 | ||
|
|
f6863bf22c | ||
|
|
7608930a61 | ||
|
|
3a63f6780b | ||
|
|
69f6d32c01 | ||
|
|
fb4847deaa |
@@ -27,7 +27,6 @@ go_library(
|
||||
"receive_blob.go",
|
||||
"receive_block.go",
|
||||
"receive_data_column.go",
|
||||
"receive_payload_attestation_message.go",
|
||||
"service.go",
|
||||
"setup_forkchoice.go",
|
||||
"tracked_proposer.go",
|
||||
|
||||
@@ -46,7 +46,6 @@ type ForkchoiceFetcher interface {
|
||||
HighestReceivedBlockSlot() primitives.Slot
|
||||
ReceivedBlocksLastEpoch() (uint64, error)
|
||||
InsertNode(context.Context, state.BeaconState, consensus_blocks.ROBlock) error
|
||||
InsertPayload(context.Context, interfaces.ROExecutionPayloadEnvelope) error
|
||||
ForkChoiceDump(context.Context) (*forkchoice.Dump, error)
|
||||
NewSlot(context.Context, primitives.Slot) error
|
||||
ProposerBoost() [32]byte
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
consensus_blocks "github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/forkchoice"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
@@ -56,13 +55,6 @@ func (s *Service) InsertNode(ctx context.Context, st state.BeaconState, block co
|
||||
return s.cfg.ForkChoiceStore.InsertNode(ctx, st, block)
|
||||
}
|
||||
|
||||
// InsertPayload is a wrapper for payload insertion which is self locked
|
||||
func (s *Service) InsertPayload(ctx context.Context, pe interfaces.ROExecutionPayloadEnvelope) error {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
return s.cfg.ForkChoiceStore.InsertPayload(ctx, pe)
|
||||
}
|
||||
|
||||
// ForkChoiceDump returns the corresponding value from forkchoice
|
||||
func (s *Service) ForkChoiceDump(ctx context.Context) (*forkchoice.Dump, error) {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
|
||||
@@ -101,16 +101,11 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*
|
||||
if len(lastValidHash) == 0 {
|
||||
lastValidHash = defaultLatestValidHash
|
||||
}
|
||||
// this call has guaranteed to have the `headRoot` with its payload in forkchoice.
|
||||
invalidRoots, err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, headRoot, headBlk.ParentRoot(), bytesutil.ToBytes32(headPayload.ParentHash()), bytesutil.ToBytes32(lastValidHash))
|
||||
invalidRoots, err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, headRoot, headBlk.ParentRoot(), bytesutil.ToBytes32(lastValidHash))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not set head root to invalid")
|
||||
return nil, nil
|
||||
}
|
||||
// TODO: Gloas, we should not include the head root in this call
|
||||
if len(invalidRoots) == 0 || invalidRoots[0] != headRoot {
|
||||
invalidRoots = append([][32]byte{headRoot}, invalidRoots...)
|
||||
}
|
||||
if err := s.removeInvalidBlockAndState(ctx, invalidRoots); err != nil {
|
||||
log.WithError(err).Error("Could not remove invalid block and state")
|
||||
return nil, nil
|
||||
@@ -295,10 +290,10 @@ func (s *Service) notifyNewPayload(ctx context.Context, stVersion int, header in
|
||||
return false, errors.WithMessage(ErrUndefinedExecutionEngineError, err.Error())
|
||||
}
|
||||
|
||||
// pruneInvalidBlock deals with the event that an invalid block was detected by the execution layer
|
||||
func (s *Service) pruneInvalidBlock(ctx context.Context, root, parentRoot, parentHash [32]byte, lvh [32]byte) error {
|
||||
// reportInvalidBlock deals with the event that an invalid block was detected by the execution layer
|
||||
func (s *Service) pruneInvalidBlock(ctx context.Context, root, parentRoot, lvh [32]byte) error {
|
||||
newPayloadInvalidNodeCount.Inc()
|
||||
invalidRoots, err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, root, parentRoot, parentHash, lvh)
|
||||
invalidRoots, err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, root, parentRoot, lvh)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -465,9 +465,9 @@ func Test_NotifyForkchoiceUpdateRecursive_DoublyLinkedTree(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, brd, headRoot)
|
||||
|
||||
// Ensure F and G's full nodes were removed but their empty (consensus) nodes remain, as does E
|
||||
require.Equal(t, true, fcs.HasNode(brf))
|
||||
require.Equal(t, true, fcs.HasNode(brg))
|
||||
// Ensure F and G where removed but their parent E wasn't
|
||||
require.Equal(t, false, fcs.HasNode(brf))
|
||||
require.Equal(t, false, fcs.HasNode(brg))
|
||||
require.Equal(t, true, fcs.HasNode(bre))
|
||||
}
|
||||
|
||||
@@ -703,13 +703,14 @@ func Test_reportInvalidBlock(t *testing.T) {
|
||||
require.NoError(t, fcs.InsertNode(ctx, st, root))
|
||||
|
||||
require.NoError(t, fcs.SetOptimisticToValid(ctx, [32]byte{'A'}))
|
||||
err = service.pruneInvalidBlock(ctx, [32]byte{'D'}, [32]byte{'C'}, [32]byte{'c'}, [32]byte{'a'})
|
||||
err = service.pruneInvalidBlock(ctx, [32]byte{'D'}, [32]byte{'C'}, [32]byte{'a'})
|
||||
require.Equal(t, IsInvalidBlock(err), true)
|
||||
require.Equal(t, InvalidBlockLVH(err), [32]byte{'a'})
|
||||
invalidRoots := InvalidAncestorRoots(err)
|
||||
require.Equal(t, 2, len(invalidRoots))
|
||||
require.Equal(t, 3, len(invalidRoots))
|
||||
require.Equal(t, [32]byte{'D'}, invalidRoots[0])
|
||||
require.Equal(t, [32]byte{'C'}, invalidRoots[1])
|
||||
require.Equal(t, [32]byte{'B'}, invalidRoots[2])
|
||||
}
|
||||
|
||||
func Test_GetPayloadAttribute(t *testing.T) {
|
||||
@@ -784,7 +785,7 @@ func Test_GetPayloadAttributeV2(t *testing.T) {
|
||||
}
|
||||
|
||||
func Test_GetPayloadAttributeV3(t *testing.T) {
|
||||
testCases := []struct {
|
||||
var testCases = []struct {
|
||||
name string
|
||||
st bstate.BeaconState
|
||||
}{
|
||||
|
||||
@@ -232,8 +232,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
postVersionAndHeaders[i].version,
|
||||
postVersionAndHeaders[i].header, b)
|
||||
if err != nil {
|
||||
// this call does not have the root in forkchoice yet.
|
||||
return s.handleInvalidExecutionError(ctx, err, root, b.Block().ParentRoot(), [32]byte(postVersionAndHeaders[i].header.ParentHash()))
|
||||
return s.handleInvalidExecutionError(ctx, err, root, b.Block().ParentRoot())
|
||||
}
|
||||
if isValidPayload {
|
||||
if err := s.validateMergeTransitionBlock(ctx, preVersionAndHeaders[i].version,
|
||||
@@ -993,9 +992,9 @@ func (s *Service) waitForSync() error {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) handleInvalidExecutionError(ctx context.Context, err error, blockRoot, parentRoot [32]byte, parentHash [32]byte) error {
|
||||
func (s *Service) handleInvalidExecutionError(ctx context.Context, err error, blockRoot, parentRoot [fieldparams.RootLength]byte) error {
|
||||
if IsInvalidBlock(err) && InvalidBlockLVH(err) != [32]byte{} {
|
||||
return s.pruneInvalidBlock(ctx, blockRoot, parentRoot, parentHash, InvalidBlockLVH(err))
|
||||
return s.pruneInvalidBlock(ctx, blockRoot, parentRoot, InvalidBlockLVH(err))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -2006,7 +2006,6 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
// Check that we have justified the second epoch
|
||||
jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(2), jc.Epoch)
|
||||
time.Sleep(20 * time.Millisecond) // wait for async forkchoice update to be processed
|
||||
|
||||
// import block 19 to find out that the whole chain 13--18 was in fact
|
||||
// invalid
|
||||
|
||||
@@ -633,7 +633,7 @@ func (s *Service) validateExecutionOnBlock(ctx context.Context, ver int, header
|
||||
isValidPayload, err := s.notifyNewPayload(ctx, ver, header, block)
|
||||
if err != nil {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
err = s.handleInvalidExecutionError(ctx, err, block.Root(), block.Block().ParentRoot(), [32]byte(header.BlockHash()))
|
||||
err = s.handleInvalidExecutionError(ctx, err, block.Root(), block.Block().ParentRoot())
|
||||
s.cfg.ForkChoiceStore.Unlock()
|
||||
return false, err
|
||||
}
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// PayloadAttestationReceiver interface defines the methods of chain service for receiving
|
||||
// validated payload attestation messages.
|
||||
type PayloadAttestationReceiver interface {
|
||||
ReceivePayloadAttestationMessage(context.Context, *ethpb.PayloadAttestationMessage) error
|
||||
}
|
||||
|
||||
// ReceivePayloadAttestationMessage accepts a payload attestation message.
|
||||
func (s *Service) ReceivePayloadAttestationMessage(ctx context.Context, a *ethpb.PayloadAttestationMessage) error {
|
||||
// TODO: Handle payload attestation message processing once Gloas is fully wired.
|
||||
return nil
|
||||
}
|
||||
@@ -700,14 +700,6 @@ func (s *ChainService) InsertNode(ctx context.Context, st state.BeaconState, blo
|
||||
return nil
|
||||
}
|
||||
|
||||
// InsertPayload mocks the same method in the chain service
|
||||
func (s *ChainService) InsertPayload(ctx context.Context, pe interfaces.ROExecutionPayloadEnvelope) error {
|
||||
if s.ForkChoiceStore != nil {
|
||||
return s.ForkChoiceStore.InsertPayload(ctx, pe)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ForkChoiceDump mocks the same method in the chain service
|
||||
func (s *ChainService) ForkChoiceDump(ctx context.Context) (*forkchoice2.Dump, error) {
|
||||
if s.ForkChoiceStore != nil {
|
||||
@@ -765,11 +757,6 @@ func (c *ChainService) ReceiveDataColumns(dcs []blocks.VerifiedRODataColumn) err
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReceivePayloadAttestationMessage implements the same method in the chain service.
|
||||
func (c *ChainService) ReceivePayloadAttestationMessage(_ context.Context, _ *ethpb.PayloadAttestationMessage) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// DependentRootForEpoch mocks the same method in the chain service
|
||||
func (c *ChainService) DependentRootForEpoch(_ [32]byte, _ primitives.Epoch) ([32]byte, error) {
|
||||
return c.TargetRoot, nil
|
||||
|
||||
2
beacon-chain/cache/BUILD.bazel
vendored
2
beacon-chain/cache/BUILD.bazel
vendored
@@ -17,7 +17,6 @@ go_library(
|
||||
"error.go",
|
||||
"interfaces.go",
|
||||
"log.go",
|
||||
"payload_attestation.go",
|
||||
"payload_id.go",
|
||||
"proposer_indices.go",
|
||||
"proposer_indices_disabled.go", # keep
|
||||
@@ -77,7 +76,6 @@ go_test(
|
||||
"checkpoint_state_test.go",
|
||||
"committee_fuzz_test.go",
|
||||
"committee_test.go",
|
||||
"payload_attestation_test.go",
|
||||
"payload_id_test.go",
|
||||
"private_access_test.go",
|
||||
"proposer_indices_test.go",
|
||||
|
||||
53
beacon-chain/cache/payload_attestation.go
vendored
53
beacon-chain/cache/payload_attestation.go
vendored
@@ -1,53 +0,0 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
)
|
||||
|
||||
// PayloadAttestationCache tracks seen payload attestation messages for a single slot.
|
||||
type PayloadAttestationCache struct {
|
||||
slot primitives.Slot
|
||||
seen map[primitives.ValidatorIndex]struct{}
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
// Seen returns true if a vote for the given slot has already been
|
||||
// processed for this validator index.
|
||||
func (p *PayloadAttestationCache) Seen(slot primitives.Slot, idx primitives.ValidatorIndex) bool {
|
||||
p.mu.RLock()
|
||||
defer p.mu.RUnlock()
|
||||
if p.slot != slot {
|
||||
return false
|
||||
}
|
||||
if p.seen == nil {
|
||||
return false
|
||||
}
|
||||
_, ok := p.seen[idx]
|
||||
return ok
|
||||
}
|
||||
|
||||
// Add marks the given slot and validator index as seen.
|
||||
// This function assumes that the message has already been validated.
|
||||
func (p *PayloadAttestationCache) Add(slot primitives.Slot, idx primitives.ValidatorIndex) error {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
if p.slot != slot {
|
||||
p.slot = slot
|
||||
p.seen = make(map[primitives.ValidatorIndex]struct{})
|
||||
}
|
||||
if p.seen == nil {
|
||||
p.seen = make(map[primitives.ValidatorIndex]struct{})
|
||||
}
|
||||
p.seen[idx] = struct{}{}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Clear clears the internal cache.
|
||||
func (p *PayloadAttestationCache) Clear() {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
p.slot = 0
|
||||
p.seen = nil
|
||||
}
|
||||
48
beacon-chain/cache/payload_attestation_test.go
vendored
48
beacon-chain/cache/payload_attestation_test.go
vendored
@@ -1,48 +0,0 @@
|
||||
package cache_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestPayloadAttestationCache_SeenAndAdd(t *testing.T) {
|
||||
var c cache.PayloadAttestationCache
|
||||
slot1 := primitives.Slot(1)
|
||||
slot2 := primitives.Slot(2)
|
||||
idx1 := primitives.ValidatorIndex(3)
|
||||
idx2 := primitives.ValidatorIndex(4)
|
||||
|
||||
require.False(t, c.Seen(slot1, idx1))
|
||||
|
||||
require.NoError(t, c.Add(slot1, idx1))
|
||||
require.True(t, c.Seen(slot1, idx1))
|
||||
require.False(t, c.Seen(slot1, idx2))
|
||||
require.False(t, c.Seen(slot2, idx1))
|
||||
|
||||
require.NoError(t, c.Add(slot1, idx2))
|
||||
require.True(t, c.Seen(slot1, idx1))
|
||||
require.True(t, c.Seen(slot1, idx2))
|
||||
|
||||
require.NoError(t, c.Add(slot2, idx1))
|
||||
require.True(t, c.Seen(slot2, idx1))
|
||||
require.False(t, c.Seen(slot1, idx1))
|
||||
require.False(t, c.Seen(slot1, idx2))
|
||||
}
|
||||
|
||||
func TestPayloadAttestationCache_Clear(t *testing.T) {
|
||||
var c cache.PayloadAttestationCache
|
||||
slot := primitives.Slot(10)
|
||||
idx := primitives.ValidatorIndex(42)
|
||||
|
||||
require.NoError(t, c.Add(slot, idx))
|
||||
require.True(t, c.Seen(slot, idx))
|
||||
|
||||
c.Clear()
|
||||
require.False(t, c.Seen(slot, idx))
|
||||
|
||||
require.NoError(t, c.Add(slot, idx))
|
||||
require.True(t, c.Seen(slot, idx))
|
||||
}
|
||||
@@ -4,6 +4,9 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"bid.go",
|
||||
"deposit_request.go",
|
||||
"log.go",
|
||||
"payload.go",
|
||||
"payload_attestation.go",
|
||||
"pending_payment.go",
|
||||
"proposer_slashing.go",
|
||||
@@ -12,6 +15,7 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/requests:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
@@ -25,9 +29,12 @@ go_library(
|
||||
"//crypto/bls/common:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -35,7 +42,9 @@ go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"bid_test.go",
|
||||
"deposit_request_test.go",
|
||||
"payload_attestation_test.go",
|
||||
"payload_test.go",
|
||||
"pending_payment_test.go",
|
||||
"proposer_slashing_test.go",
|
||||
],
|
||||
@@ -45,6 +54,7 @@ go_test(
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//beacon-chain/state/testing:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
|
||||
180
beacon-chain/core/gloas/deposit_request.go
Normal file
180
beacon-chain/core/gloas/deposit_request.go
Normal file
@@ -0,0 +1,180 @@
|
||||
package gloas
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func processDepositRequests(ctx context.Context, beaconState state.BeaconState, requests []*enginev1.DepositRequest) error {
|
||||
if len(requests) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, receipt := range requests {
|
||||
if err := processDepositRequest(beaconState, receipt); err != nil {
|
||||
return errors.Wrap(err, "could not apply deposit request")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// processDepositRequest processes the specific deposit request
|
||||
//
|
||||
// <spec fn="process_deposit_request" fork="gloas" hash="3c6b0310">
|
||||
// def process_deposit_request(state: BeaconState, deposit_request: DepositRequest) -> None:
|
||||
// # [New in Gloas:EIP7732]
|
||||
// builder_pubkeys = [b.pubkey for b in state.builders]
|
||||
// validator_pubkeys = [v.pubkey for v in state.validators]
|
||||
//
|
||||
// # [New in Gloas:EIP7732]
|
||||
// # Regardless of the withdrawal credentials prefix, if a builder/validator
|
||||
// # already exists with this pubkey, apply the deposit to their balance
|
||||
// is_builder = deposit_request.pubkey in builder_pubkeys
|
||||
// is_validator = deposit_request.pubkey in validator_pubkeys
|
||||
// is_builder_prefix = is_builder_withdrawal_credential(deposit_request.withdrawal_credentials)
|
||||
// if is_builder or (is_builder_prefix and not is_validator):
|
||||
// # Apply builder deposits immediately
|
||||
// apply_deposit_for_builder(
|
||||
// state,
|
||||
// deposit_request.pubkey,
|
||||
// deposit_request.withdrawal_credentials,
|
||||
// deposit_request.amount,
|
||||
// deposit_request.signature,
|
||||
// state.slot,
|
||||
// )
|
||||
// return
|
||||
//
|
||||
// # Add validator deposits to the queue
|
||||
// state.pending_deposits.append(
|
||||
// PendingDeposit(
|
||||
// pubkey=deposit_request.pubkey,
|
||||
// withdrawal_credentials=deposit_request.withdrawal_credentials,
|
||||
// amount=deposit_request.amount,
|
||||
// signature=deposit_request.signature,
|
||||
// slot=state.slot,
|
||||
// )
|
||||
// )
|
||||
// </spec>
|
||||
func processDepositRequest(beaconState state.BeaconState, request *enginev1.DepositRequest) error {
|
||||
if request == nil {
|
||||
return errors.New("nil deposit request")
|
||||
}
|
||||
|
||||
applied, err := applyBuilderDepositRequest(beaconState, request)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not apply builder deposit")
|
||||
}
|
||||
if applied {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := beaconState.AppendPendingDeposit(ðpb.PendingDeposit{
|
||||
PublicKey: request.Pubkey,
|
||||
WithdrawalCredentials: request.WithdrawalCredentials,
|
||||
Amount: request.Amount,
|
||||
Signature: request.Signature,
|
||||
Slot: beaconState.Slot(),
|
||||
}); err != nil {
|
||||
return errors.Wrap(err, "could not append deposit request")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// <spec fn="apply_deposit_for_builder" fork="gloas" hash="e4bc98c7">
|
||||
// def apply_deposit_for_builder(
|
||||
//
|
||||
// state: BeaconState,
|
||||
// pubkey: BLSPubkey,
|
||||
// withdrawal_credentials: Bytes32,
|
||||
// amount: uint64,
|
||||
// signature: BLSSignature,
|
||||
// slot: Slot,
|
||||
//
|
||||
// ) -> None:
|
||||
//
|
||||
// builder_pubkeys = [b.pubkey for b in state.builders]
|
||||
// if pubkey not in builder_pubkeys:
|
||||
// # Verify the deposit signature (proof of possession) which is not checked by the deposit contract
|
||||
// if is_valid_deposit_signature(pubkey, withdrawal_credentials, amount, signature):
|
||||
// add_builder_to_registry(state, pubkey, withdrawal_credentials, amount, slot)
|
||||
// else:
|
||||
// # Increase balance by deposit amount
|
||||
// builder_index = builder_pubkeys.index(pubkey)
|
||||
// state.builders[builder_index].balance += amount
|
||||
//
|
||||
// </spec>
|
||||
func applyBuilderDepositRequest(beaconState state.BeaconState, request *enginev1.DepositRequest) (bool, error) {
|
||||
if beaconState.Version() < version.Gloas {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
pubkey := bytesutil.ToBytes48(request.Pubkey)
|
||||
_, isValidator := beaconState.ValidatorIndexByPubkey(pubkey)
|
||||
idx, isBuilder := beaconState.BuilderIndexByPubkey(pubkey)
|
||||
isBuilderPrefix := IsBuilderWithdrawalCredential(request.WithdrawalCredentials)
|
||||
if !isBuilder && (!isBuilderPrefix || isValidator) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if isBuilder {
|
||||
if err := beaconState.IncreaseBuilderBalance(idx, request.Amount); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if err := applyDepositForNewBuilder(
|
||||
beaconState,
|
||||
request.Pubkey,
|
||||
request.WithdrawalCredentials,
|
||||
request.Amount,
|
||||
request.Signature,
|
||||
); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func applyDepositForNewBuilder(
|
||||
beaconState state.BeaconState,
|
||||
pubkey []byte,
|
||||
withdrawalCredentials []byte,
|
||||
amount uint64,
|
||||
signature []byte,
|
||||
) error {
|
||||
pubkeyBytes := bytesutil.ToBytes48(pubkey)
|
||||
valid, err := helpers.IsValidDepositSignature(ðpb.Deposit_Data{
|
||||
PublicKey: pubkey,
|
||||
WithdrawalCredentials: withdrawalCredentials,
|
||||
Amount: amount,
|
||||
Signature: signature,
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not verify deposit signature")
|
||||
}
|
||||
if !valid {
|
||||
log.WithFields(logrus.Fields{
|
||||
"pubkey": fmt.Sprintf("%x", pubkey),
|
||||
}).Warn("ignoring builder deposit: invalid signature")
|
||||
return nil
|
||||
}
|
||||
|
||||
withdrawalCredBytes := bytesutil.ToBytes32(withdrawalCredentials)
|
||||
return beaconState.AddBuilderFromDeposit(pubkeyBytes, withdrawalCredBytes, amount)
|
||||
}
|
||||
|
||||
func IsBuilderWithdrawalCredential(withdrawalCredentials []byte) bool {
|
||||
return len(withdrawalCredentials) == fieldparams.RootLength &&
|
||||
withdrawalCredentials[0] == params.BeaconConfig().BuilderWithdrawalPrefixByte
|
||||
}
|
||||
150
beacon-chain/core/gloas/deposit_request_test.go
Normal file
150
beacon-chain/core/gloas/deposit_request_test.go
Normal file
@@ -0,0 +1,150 @@
|
||||
package gloas
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
stateTesting "github.com/OffchainLabs/prysm/v7/beacon-chain/state/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/bls"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
)
|
||||
|
||||
func TestProcessDepositRequests_EmptyAndNil(t *testing.T) {
|
||||
st := newGloasState(t, nil, nil)
|
||||
|
||||
t.Run("empty requests continues", func(t *testing.T) {
|
||||
err := processDepositRequests(t.Context(), st, []*enginev1.DepositRequest{})
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("nil request errors", func(t *testing.T) {
|
||||
err := processDepositRequests(t.Context(), st, []*enginev1.DepositRequest{nil})
|
||||
require.ErrorContains(t, "nil deposit request", err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestProcessDepositRequest_BuilderDepositAddsBuilder(t *testing.T) {
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
cred := builderWithdrawalCredentials()
|
||||
pd := stateTesting.GeneratePendingDeposit(t, sk, 1234, cred, 0)
|
||||
req := depositRequestFromPending(pd, 1)
|
||||
|
||||
st := newGloasState(t, nil, nil)
|
||||
err = processDepositRequest(st, req)
|
||||
require.NoError(t, err)
|
||||
|
||||
idx, ok := st.BuilderIndexByPubkey(toBytes48(req.Pubkey))
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
builder, err := st.Builder(idx)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, builder)
|
||||
require.DeepEqual(t, req.Pubkey, builder.Pubkey)
|
||||
require.DeepEqual(t, []byte{cred[0]}, builder.Version)
|
||||
require.DeepEqual(t, cred[12:], builder.ExecutionAddress)
|
||||
require.Equal(t, uint64(1234), uint64(builder.Balance))
|
||||
require.Equal(t, params.BeaconConfig().FarFutureEpoch, builder.WithdrawableEpoch)
|
||||
|
||||
pending, err := st.PendingDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(pending))
|
||||
}
|
||||
|
||||
func TestProcessDepositRequest_ExistingBuilderIncreasesBalance(t *testing.T) {
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
pubkey := sk.PublicKey().Marshal()
|
||||
builders := []*ethpb.Builder{
|
||||
{
|
||||
Pubkey: pubkey,
|
||||
Version: []byte{0},
|
||||
ExecutionAddress: bytes.Repeat([]byte{0x11}, 20),
|
||||
Balance: 5,
|
||||
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
},
|
||||
}
|
||||
st := newGloasState(t, nil, builders)
|
||||
|
||||
cred := validatorWithdrawalCredentials()
|
||||
pd := stateTesting.GeneratePendingDeposit(t, sk, 200, cred, 0)
|
||||
req := depositRequestFromPending(pd, 9)
|
||||
|
||||
err = processDepositRequest(st, req)
|
||||
require.NoError(t, err)
|
||||
|
||||
idx, ok := st.BuilderIndexByPubkey(toBytes48(pubkey))
|
||||
require.Equal(t, true, ok)
|
||||
builder, err := st.Builder(idx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(205), uint64(builder.Balance))
|
||||
|
||||
pending, err := st.PendingDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(pending))
|
||||
}
|
||||
|
||||
func TestApplyDepositForBuilder_InvalidSignatureIgnoresDeposit(t *testing.T) {
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
cred := builderWithdrawalCredentials()
|
||||
st := newGloasState(t, nil, nil)
|
||||
err = applyDepositForNewBuilder(st, sk.PublicKey().Marshal(), cred[:], 100, make([]byte, 96))
|
||||
require.NoError(t, err)
|
||||
|
||||
_, ok := st.BuilderIndexByPubkey(toBytes48(sk.PublicKey().Marshal()))
|
||||
require.Equal(t, false, ok)
|
||||
}
|
||||
|
||||
func newGloasState(t *testing.T, validators []*ethpb.Validator, builders []*ethpb.Builder) state.BeaconState {
|
||||
t.Helper()
|
||||
|
||||
st, err := state_native.InitializeFromProtoGloas(ðpb.BeaconStateGloas{
|
||||
DepositRequestsStartIndex: params.BeaconConfig().UnsetDepositRequestsStartIndex,
|
||||
Validators: validators,
|
||||
Balances: make([]uint64, len(validators)),
|
||||
PendingDeposits: []*ethpb.PendingDeposit{},
|
||||
Builders: builders,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
return st
|
||||
}
|
||||
|
||||
func depositRequestFromPending(pd *ethpb.PendingDeposit, index uint64) *enginev1.DepositRequest {
|
||||
return &enginev1.DepositRequest{
|
||||
Pubkey: pd.PublicKey,
|
||||
WithdrawalCredentials: pd.WithdrawalCredentials,
|
||||
Amount: pd.Amount,
|
||||
Signature: pd.Signature,
|
||||
Index: index,
|
||||
}
|
||||
}
|
||||
|
||||
func builderWithdrawalCredentials() [32]byte {
|
||||
var cred [32]byte
|
||||
cred[0] = params.BeaconConfig().BuilderWithdrawalPrefixByte
|
||||
copy(cred[12:], bytes.Repeat([]byte{0x22}, 20))
|
||||
return cred
|
||||
}
|
||||
|
||||
func validatorWithdrawalCredentials() [32]byte {
|
||||
var cred [32]byte
|
||||
cred[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
copy(cred[12:], bytes.Repeat([]byte{0x33}, 20))
|
||||
return cred
|
||||
}
|
||||
|
||||
func toBytes48(b []byte) [48]byte {
|
||||
var out [48]byte
|
||||
copy(out[:], b)
|
||||
return out
|
||||
}
|
||||
9
beacon-chain/core/gloas/log.go
Normal file
9
beacon-chain/core/gloas/log.go
Normal file
@@ -0,0 +1,9 @@
|
||||
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
||||
// This file is created and regenerated automatically. Anything added here might get removed.
|
||||
package gloas
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
// The prefix for logs from this package will be the text after the last slash in the package path.
|
||||
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
||||
var log = logrus.WithField("package", "beacon-chain/core/gloas")
|
||||
345
beacon-chain/core/gloas/payload.go
Normal file
345
beacon-chain/core/gloas/payload.go
Normal file
@@ -0,0 +1,345 @@
|
||||
package gloas
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
requests "github.com/OffchainLabs/prysm/v7/beacon-chain/core/requests"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/bls"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ProcessExecutionPayload processes the signed execution payload envelope for the Gloas fork.
|
||||
//
|
||||
// <spec fn="process_execution_payload" fork="gloas" hash="36bd3af3">
|
||||
// def process_execution_payload(
|
||||
// state: BeaconState,
|
||||
// # [Modified in Gloas:EIP7732]
|
||||
// # Removed `body`
|
||||
// # [New in Gloas:EIP7732]
|
||||
// signed_envelope: SignedExecutionPayloadEnvelope,
|
||||
// execution_engine: ExecutionEngine,
|
||||
// # [New in Gloas:EIP7732]
|
||||
// verify: bool = True,
|
||||
// ) -> None:
|
||||
// envelope = signed_envelope.message
|
||||
// payload = envelope.payload
|
||||
//
|
||||
// # Verify signature
|
||||
// if verify:
|
||||
// assert verify_execution_payload_envelope_signature(state, signed_envelope)
|
||||
//
|
||||
// # Cache latest block header state root
|
||||
// previous_state_root = hash_tree_root(state)
|
||||
// if state.latest_block_header.state_root == Root():
|
||||
// state.latest_block_header.state_root = previous_state_root
|
||||
//
|
||||
// # Verify consistency with the beacon block
|
||||
// assert envelope.beacon_block_root == hash_tree_root(state.latest_block_header)
|
||||
// assert envelope.slot == state.slot
|
||||
//
|
||||
// # Verify consistency with the committed bid
|
||||
// committed_bid = state.latest_execution_payload_bid
|
||||
// assert envelope.builder_index == committed_bid.builder_index
|
||||
// assert committed_bid.prev_randao == payload.prev_randao
|
||||
//
|
||||
// # Verify consistency with expected withdrawals
|
||||
// assert hash_tree_root(payload.withdrawals) == hash_tree_root(state.payload_expected_withdrawals)
|
||||
//
|
||||
// # Verify the gas_limit
|
||||
// assert committed_bid.gas_limit == payload.gas_limit
|
||||
// # Verify the block hash
|
||||
// assert committed_bid.block_hash == payload.block_hash
|
||||
// # Verify consistency of the parent hash with respect to the previous execution payload
|
||||
// assert payload.parent_hash == state.latest_block_hash
|
||||
// # Verify timestamp
|
||||
// assert payload.timestamp == compute_time_at_slot(state, state.slot)
|
||||
// # Verify the execution payload is valid
|
||||
// versioned_hashes = [
|
||||
// kzg_commitment_to_versioned_hash(commitment)
|
||||
// # [Modified in Gloas:EIP7732]
|
||||
// for commitment in committed_bid.blob_kzg_commitments
|
||||
// ]
|
||||
// requests = envelope.execution_requests
|
||||
// assert execution_engine.verify_and_notify_new_payload(
|
||||
// NewPayloadRequest(
|
||||
// execution_payload=payload,
|
||||
// versioned_hashes=versioned_hashes,
|
||||
// parent_beacon_block_root=state.latest_block_header.parent_root,
|
||||
// execution_requests=requests,
|
||||
// )
|
||||
// )
|
||||
//
|
||||
// def for_ops(operations: Sequence[Any], fn: Callable[[BeaconState, Any], None]) -> None:
|
||||
// for operation in operations:
|
||||
// fn(state, operation)
|
||||
//
|
||||
// for_ops(requests.deposits, process_deposit_request)
|
||||
// for_ops(requests.withdrawals, process_withdrawal_request)
|
||||
// for_ops(requests.consolidations, process_consolidation_request)
|
||||
//
|
||||
// # Queue the builder payment
|
||||
// payment = state.builder_pending_payments[SLOTS_PER_EPOCH + state.slot % SLOTS_PER_EPOCH]
|
||||
// amount = payment.withdrawal.amount
|
||||
// if amount > 0:
|
||||
// state.builder_pending_withdrawals.append(payment.withdrawal)
|
||||
// state.builder_pending_payments[SLOTS_PER_EPOCH + state.slot % SLOTS_PER_EPOCH] = (
|
||||
// BuilderPendingPayment()
|
||||
// )
|
||||
//
|
||||
// # Cache the execution payload hash
|
||||
// state.execution_payload_availability[state.slot % SLOTS_PER_HISTORICAL_ROOT] = 0b1
|
||||
// state.latest_block_hash = payload.block_hash
|
||||
//
|
||||
// # Verify the state root
|
||||
// if verify:
|
||||
// assert envelope.state_root == hash_tree_root(state)
|
||||
// </spec>
|
||||
func ProcessExecutionPayload(
|
||||
ctx context.Context,
|
||||
st state.BeaconState,
|
||||
signedEnvelope interfaces.ROSignedExecutionPayloadEnvelope,
|
||||
) error {
|
||||
if err := verifyExecutionPayloadEnvelopeSignature(st, signedEnvelope); err != nil {
|
||||
return errors.Wrap(err, "signature verification failed")
|
||||
}
|
||||
|
||||
latestHeader := st.LatestBlockHeader()
|
||||
if len(latestHeader.StateRoot) == 0 || bytes.Equal(latestHeader.StateRoot, make([]byte, 32)) {
|
||||
previousStateRoot, err := st.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not compute state root")
|
||||
}
|
||||
latestHeader.StateRoot = previousStateRoot[:]
|
||||
if err := st.SetLatestBlockHeader(latestHeader); err != nil {
|
||||
return errors.Wrap(err, "could not set latest block header")
|
||||
}
|
||||
}
|
||||
|
||||
blockHeaderRoot, err := latestHeader.HashTreeRoot()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not compute block header root")
|
||||
}
|
||||
envelope, err := signedEnvelope.Envelope()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get envelope from signed envelope")
|
||||
}
|
||||
|
||||
beaconBlockRoot := envelope.BeaconBlockRoot()
|
||||
if !bytes.Equal(beaconBlockRoot[:], blockHeaderRoot[:]) {
|
||||
return errors.Errorf("envelope beacon block root does not match state latest block header root: envelope=%#x, header=%#x", beaconBlockRoot, blockHeaderRoot)
|
||||
}
|
||||
|
||||
if envelope.Slot() != st.Slot() {
|
||||
return errors.Errorf("envelope slot does not match state slot: envelope=%d, state=%d", envelope.Slot(), st.Slot())
|
||||
}
|
||||
|
||||
latestBid, err := st.LatestExecutionPayloadBid()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get latest execution payload bid")
|
||||
}
|
||||
if latestBid == nil {
|
||||
return errors.New("latest execution payload bid is nil")
|
||||
}
|
||||
if envelope.BuilderIndex() != latestBid.BuilderIndex() {
|
||||
return errors.Errorf("envelope builder index does not match committed bid builder index: envelope=%d, bid=%d", envelope.BuilderIndex(), latestBid.BuilderIndex())
|
||||
}
|
||||
|
||||
payload, err := envelope.Execution()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get execution payload from envelope")
|
||||
}
|
||||
latestBidPrevRandao := latestBid.PrevRandao()
|
||||
if !bytes.Equal(payload.PrevRandao(), latestBidPrevRandao[:]) {
|
||||
return errors.Errorf("payload prev randao does not match committed bid prev randao: payload=%#x, bid=%#x", payload.PrevRandao(), latestBidPrevRandao)
|
||||
}
|
||||
|
||||
withdrawals, err := payload.Withdrawals()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get withdrawals from payload")
|
||||
}
|
||||
|
||||
ok, err := st.WithdrawalsMatchPayloadExpected(withdrawals)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not validate payload withdrawals")
|
||||
}
|
||||
if !ok {
|
||||
return errors.New("payload withdrawals do not match expected withdrawals")
|
||||
}
|
||||
|
||||
if latestBid.GasLimit() != payload.GasLimit() {
|
||||
return errors.Errorf("committed bid gas limit does not match payload gas limit: bid=%d, payload=%d", latestBid.GasLimit(), payload.GasLimit())
|
||||
}
|
||||
|
||||
bidBlockHash := latestBid.BlockHash()
|
||||
payloadBlockHash := payload.BlockHash()
|
||||
if !bytes.Equal(bidBlockHash[:], payloadBlockHash) {
|
||||
return errors.Errorf("committed bid block hash does not match payload block hash: bid=%#x, payload=%#x", bidBlockHash, payloadBlockHash)
|
||||
}
|
||||
|
||||
latestBlockHash, err := st.LatestBlockHash()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get latest block hash")
|
||||
}
|
||||
if !bytes.Equal(payload.ParentHash(), latestBlockHash[:]) {
|
||||
return errors.Errorf("payload parent hash does not match state latest block hash: payload=%#x, state=%#x", payload.ParentHash(), latestBlockHash)
|
||||
}
|
||||
|
||||
t, err := slots.StartTime(st.GenesisTime(), st.Slot())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not compute timestamp")
|
||||
}
|
||||
if payload.Timestamp() != uint64(t.Unix()) {
|
||||
return errors.Errorf("payload timestamp does not match expected timestamp: payload=%d, expected=%d", payload.Timestamp(), uint64(t.Unix()))
|
||||
}
|
||||
|
||||
if err := processExecutionRequests(ctx, st, envelope.ExecutionRequests()); err != nil {
|
||||
return errors.Wrap(err, "could not process execution requests")
|
||||
}
|
||||
|
||||
if err := st.QueueBuilderPayment(); err != nil {
|
||||
return errors.Wrap(err, "could not queue builder payment")
|
||||
}
|
||||
|
||||
if err := st.SetExecutionPayloadAvailability(st.Slot(), true); err != nil {
|
||||
return errors.Wrap(err, "could not set execution payload availability")
|
||||
}
|
||||
|
||||
if err := st.SetLatestBlockHash([32]byte(payload.BlockHash())); err != nil {
|
||||
return errors.Wrap(err, "could not set latest block hash")
|
||||
}
|
||||
|
||||
r, err := st.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get hash tree root")
|
||||
}
|
||||
if r != envelope.StateRoot() {
|
||||
return fmt.Errorf("state root mismatch: expected %#x, got %#x", envelope.StateRoot(), r)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func envelopePublicKey(st state.BeaconState, builderIdx primitives.BuilderIndex) (bls.PublicKey, error) {
|
||||
if builderIdx == params.BeaconConfig().BuilderIndexSelfBuild {
|
||||
return proposerPublicKey(st)
|
||||
}
|
||||
return builderPublicKey(st, builderIdx)
|
||||
}
|
||||
|
||||
func proposerPublicKey(st state.BeaconState) (bls.PublicKey, error) {
|
||||
header := st.LatestBlockHeader()
|
||||
if header == nil {
|
||||
return nil, fmt.Errorf("latest block header is nil")
|
||||
}
|
||||
proposerPubkey := st.PubkeyAtIndex(header.ProposerIndex)
|
||||
publicKey, err := bls.PublicKeyFromBytes(proposerPubkey[:])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid proposer public key: %w", err)
|
||||
}
|
||||
return publicKey, nil
|
||||
}
|
||||
|
||||
func builderPublicKey(st state.BeaconState, builderIdx primitives.BuilderIndex) (bls.PublicKey, error) {
|
||||
builder, err := st.Builder(builderIdx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get builder: %w", err)
|
||||
}
|
||||
if builder == nil {
|
||||
return nil, fmt.Errorf("builder at index %d not found", builderIdx)
|
||||
}
|
||||
publicKey, err := bls.PublicKeyFromBytes(builder.Pubkey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid builder public key: %w", err)
|
||||
}
|
||||
return publicKey, nil
|
||||
}
|
||||
|
||||
// processExecutionRequests processes deposits, withdrawals, and consolidations from execution requests.
|
||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
||||
// for op in requests.deposits: process_deposit_request(state, op)
|
||||
// for op in requests.withdrawals: process_withdrawal_request(state, op)
|
||||
// for op in requests.consolidations: process_consolidation_request(state, op)
|
||||
func processExecutionRequests(ctx context.Context, st state.BeaconState, rqs *enginev1.ExecutionRequests) error {
|
||||
if err := processDepositRequests(ctx, st, rqs.Deposits); err != nil {
|
||||
return errors.Wrap(err, "could not process deposit requests")
|
||||
}
|
||||
|
||||
var err error
|
||||
st, err = requests.ProcessWithdrawalRequests(ctx, st, rqs.Withdrawals)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not process withdrawal requests")
|
||||
}
|
||||
err = requests.ProcessConsolidationRequests(ctx, st, rqs.Consolidations)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not process consolidation requests")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// verifyExecutionPayloadEnvelopeSignature verifies the BLS signature on a signed execution payload envelope.
|
||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
||||
// builder_index = signed_envelope.message.builder_index
|
||||
// if builder_index == BUILDER_INDEX_SELF_BUILD:
|
||||
//
|
||||
// validator_index = state.latest_block_header.proposer_index
|
||||
// pubkey = state.validators[validator_index].pubkey
|
||||
//
|
||||
// else:
|
||||
//
|
||||
// pubkey = state.builders[builder_index].pubkey
|
||||
//
|
||||
// signing_root = compute_signing_root(
|
||||
//
|
||||
// signed_envelope.message, get_domain(state, DOMAIN_BEACON_BUILDER)
|
||||
//
|
||||
// )
|
||||
// return bls.Verify(pubkey, signing_root, signed_envelope.signature)
|
||||
func verifyExecutionPayloadEnvelopeSignature(st state.BeaconState, signedEnvelope interfaces.ROSignedExecutionPayloadEnvelope) error {
|
||||
envelope, err := signedEnvelope.Envelope()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get envelope: %w", err)
|
||||
}
|
||||
|
||||
builderIdx := envelope.BuilderIndex()
|
||||
publicKey, err := envelopePublicKey(st, builderIdx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
signatureBytes := signedEnvelope.Signature()
|
||||
signature, err := bls.SignatureFromBytes(signatureBytes[:])
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid signature format: %w", err)
|
||||
}
|
||||
|
||||
currentEpoch := slots.ToEpoch(envelope.Slot())
|
||||
domain, err := signing.Domain(
|
||||
st.Fork(),
|
||||
currentEpoch,
|
||||
params.BeaconConfig().DomainBeaconBuilder,
|
||||
st.GenesisValidatorsRoot(),
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to compute signing domain: %w", err)
|
||||
}
|
||||
|
||||
signingRoot, err := signedEnvelope.SigningRoot(domain)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to compute signing root: %w", err)
|
||||
}
|
||||
|
||||
if !signature.Verify(publicKey, signingRoot[:]) {
|
||||
return fmt.Errorf("signature verification failed: %w", signing.ErrSigFailedToVerify)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -77,7 +77,7 @@ func ProcessPayloadAttestations(ctx context.Context, st state.BeaconState, body
|
||||
|
||||
// indexedPayloadAttestation converts a payload attestation into its indexed form.
|
||||
func indexedPayloadAttestation(ctx context.Context, st state.ReadOnlyBeaconState, att *eth.PayloadAttestation) (*consensus_types.IndexedPayloadAttestation, error) {
|
||||
committee, err := PayloadCommittee(ctx, st, att.Data.Slot)
|
||||
committee, err := payloadCommittee(ctx, st, att.Data.Slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -96,7 +96,7 @@ func indexedPayloadAttestation(ctx context.Context, st state.ReadOnlyBeaconState
|
||||
}, nil
|
||||
}
|
||||
|
||||
// PayloadCommittee returns the payload timeliness committee for a given slot for the state.
|
||||
// payloadCommittee returns the payload timeliness committee for a given slot for the state.
|
||||
//
|
||||
// <spec fn="get_ptc" fork="gloas" hash="ae15f761">
|
||||
// def get_ptc(state: BeaconState, slot: Slot) -> Vector[ValidatorIndex, PTC_SIZE]:
|
||||
@@ -115,7 +115,7 @@ func indexedPayloadAttestation(ctx context.Context, st state.ReadOnlyBeaconState
|
||||
// state, indices, seed, size=PTC_SIZE, shuffle_indices=False
|
||||
// )
|
||||
// </spec>
|
||||
func PayloadCommittee(ctx context.Context, st state.ReadOnlyBeaconState, slot primitives.Slot) ([]primitives.ValidatorIndex, error) {
|
||||
func payloadCommittee(ctx context.Context, st state.ReadOnlyBeaconState, slot primitives.Slot) ([]primitives.ValidatorIndex, error) {
|
||||
epoch := slots.ToEpoch(slot)
|
||||
seed, err := ptcSeed(st, epoch, slot)
|
||||
if err != nil {
|
||||
|
||||
349
beacon-chain/core/gloas/payload_test.go
Normal file
349
beacon-chain/core/gloas/payload_test.go
Normal file
@@ -0,0 +1,349 @@
|
||||
package gloas
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/bls"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
type payloadFixture struct {
|
||||
state state.BeaconState
|
||||
signed interfaces.ROSignedExecutionPayloadEnvelope
|
||||
signedProto *ethpb.SignedExecutionPayloadEnvelope
|
||||
envelope *ethpb.ExecutionPayloadEnvelope
|
||||
payload *enginev1.ExecutionPayloadDeneb
|
||||
slot primitives.Slot
|
||||
}
|
||||
|
||||
func buildPayloadFixture(t *testing.T, mutate func(payload *enginev1.ExecutionPayloadDeneb, bid *ethpb.ExecutionPayloadBid, envelope *ethpb.ExecutionPayloadEnvelope)) payloadFixture {
|
||||
t.Helper()
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
slot := primitives.Slot(5)
|
||||
builderIdx := primitives.BuilderIndex(0)
|
||||
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
pk := sk.PublicKey().Marshal()
|
||||
|
||||
randao := bytes.Repeat([]byte{0xAA}, 32)
|
||||
parentHash := bytes.Repeat([]byte{0xBB}, 32)
|
||||
blockHash := bytes.Repeat([]byte{0xCC}, 32)
|
||||
|
||||
withdrawals := []*enginev1.Withdrawal{
|
||||
{Index: 0, ValidatorIndex: 1, Address: bytes.Repeat([]byte{0x01}, 20), Amount: 0},
|
||||
}
|
||||
|
||||
payload := &enginev1.ExecutionPayloadDeneb{
|
||||
ParentHash: parentHash,
|
||||
FeeRecipient: bytes.Repeat([]byte{0x01}, 20),
|
||||
StateRoot: bytes.Repeat([]byte{0x02}, 32),
|
||||
ReceiptsRoot: bytes.Repeat([]byte{0x03}, 32),
|
||||
LogsBloom: bytes.Repeat([]byte{0x04}, 256),
|
||||
PrevRandao: randao,
|
||||
BlockNumber: 1,
|
||||
GasLimit: 1,
|
||||
GasUsed: 0,
|
||||
Timestamp: 100,
|
||||
ExtraData: []byte{},
|
||||
BaseFeePerGas: bytes.Repeat([]byte{0x05}, 32),
|
||||
BlockHash: blockHash,
|
||||
Transactions: [][]byte{},
|
||||
Withdrawals: withdrawals,
|
||||
BlobGasUsed: 0,
|
||||
ExcessBlobGas: 0,
|
||||
}
|
||||
|
||||
bid := ðpb.ExecutionPayloadBid{
|
||||
ParentBlockHash: parentHash,
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0xDD}, 32),
|
||||
BlockHash: blockHash,
|
||||
PrevRandao: randao,
|
||||
GasLimit: 1,
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot,
|
||||
Value: 0,
|
||||
ExecutionPayment: 0,
|
||||
FeeRecipient: bytes.Repeat([]byte{0xEE}, 20),
|
||||
}
|
||||
|
||||
header := ðpb.BeaconBlockHeader{
|
||||
Slot: slot,
|
||||
ParentRoot: bytes.Repeat([]byte{0x11}, 32),
|
||||
StateRoot: bytes.Repeat([]byte{0x22}, 32),
|
||||
BodyRoot: bytes.Repeat([]byte{0x33}, 32),
|
||||
}
|
||||
headerRoot, err := header.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
envelope := ðpb.ExecutionPayloadEnvelope{
|
||||
Slot: slot,
|
||||
BuilderIndex: builderIdx,
|
||||
BeaconBlockRoot: headerRoot[:],
|
||||
Payload: payload,
|
||||
ExecutionRequests: &enginev1.ExecutionRequests{},
|
||||
}
|
||||
|
||||
if mutate != nil {
|
||||
mutate(payload, bid, envelope)
|
||||
}
|
||||
|
||||
genesisRoot := bytes.Repeat([]byte{0xAB}, 32)
|
||||
blockRoots := make([][]byte, cfg.SlotsPerHistoricalRoot)
|
||||
stateRoots := make([][]byte, cfg.SlotsPerHistoricalRoot)
|
||||
for i := range blockRoots {
|
||||
blockRoots[i] = bytes.Repeat([]byte{0x44}, 32)
|
||||
stateRoots[i] = bytes.Repeat([]byte{0x55}, 32)
|
||||
}
|
||||
randaoMixes := make([][]byte, cfg.EpochsPerHistoricalVector)
|
||||
for i := range randaoMixes {
|
||||
randaoMixes[i] = randao
|
||||
}
|
||||
|
||||
withdrawalCreds := make([]byte, 32)
|
||||
withdrawalCreds[0] = cfg.ETH1AddressWithdrawalPrefixByte
|
||||
|
||||
eth1Data := ðpb.Eth1Data{
|
||||
DepositRoot: bytes.Repeat([]byte{0x66}, 32),
|
||||
DepositCount: 0,
|
||||
BlockHash: bytes.Repeat([]byte{0x77}, 32),
|
||||
}
|
||||
|
||||
vals := []*ethpb.Validator{
|
||||
{
|
||||
PublicKey: pk,
|
||||
WithdrawalCredentials: withdrawalCreds,
|
||||
EffectiveBalance: cfg.MinActivationBalance + 1_000,
|
||||
},
|
||||
}
|
||||
balances := []uint64{cfg.MinActivationBalance + 1_000}
|
||||
|
||||
payments := make([]*ethpb.BuilderPendingPayment, cfg.SlotsPerEpoch*2)
|
||||
for i := range payments {
|
||||
payments[i] = ðpb.BuilderPendingPayment{
|
||||
Withdrawal: ðpb.BuilderPendingWithdrawal{
|
||||
FeeRecipient: make([]byte, 20),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
executionPayloadAvailability := make([]byte, cfg.SlotsPerHistoricalRoot/8)
|
||||
|
||||
builders := make([]*ethpb.Builder, builderIdx+1)
|
||||
builders[builderIdx] = ðpb.Builder{
|
||||
Pubkey: pk,
|
||||
Version: []byte{0},
|
||||
ExecutionAddress: bytes.Repeat([]byte{0x09}, 20),
|
||||
Balance: 0,
|
||||
DepositEpoch: 0,
|
||||
WithdrawableEpoch: 0,
|
||||
}
|
||||
|
||||
genesisTime := uint64(0)
|
||||
slotSeconds := cfg.SecondsPerSlot * uint64(slot)
|
||||
if payload.Timestamp > slotSeconds {
|
||||
genesisTime = payload.Timestamp - slotSeconds
|
||||
}
|
||||
|
||||
stProto := ðpb.BeaconStateGloas{
|
||||
Slot: slot,
|
||||
GenesisTime: genesisTime,
|
||||
GenesisValidatorsRoot: genesisRoot,
|
||||
Fork: ðpb.Fork{
|
||||
CurrentVersion: bytes.Repeat([]byte{0x01}, 4),
|
||||
PreviousVersion: bytes.Repeat([]byte{0x01}, 4),
|
||||
Epoch: 0,
|
||||
},
|
||||
LatestBlockHeader: header,
|
||||
BlockRoots: blockRoots,
|
||||
StateRoots: stateRoots,
|
||||
RandaoMixes: randaoMixes,
|
||||
Eth1Data: eth1Data,
|
||||
Validators: vals,
|
||||
Balances: balances,
|
||||
LatestBlockHash: payload.ParentHash,
|
||||
LatestExecutionPayloadBid: bid,
|
||||
BuilderPendingPayments: payments,
|
||||
ExecutionPayloadAvailability: executionPayloadAvailability,
|
||||
BuilderPendingWithdrawals: []*ethpb.BuilderPendingWithdrawal{},
|
||||
PayloadExpectedWithdrawals: payload.Withdrawals,
|
||||
Builders: builders,
|
||||
}
|
||||
|
||||
st, err := state_native.InitializeFromProtoGloas(stProto)
|
||||
require.NoError(t, err)
|
||||
|
||||
expected := st.Copy()
|
||||
ctx := context.Background()
|
||||
require.NoError(t, processExecutionRequests(ctx, expected, envelope.ExecutionRequests))
|
||||
require.NoError(t, expected.QueueBuilderPayment())
|
||||
require.NoError(t, expected.SetExecutionPayloadAvailability(slot, true))
|
||||
var blockHashArr [32]byte
|
||||
copy(blockHashArr[:], payload.BlockHash)
|
||||
require.NoError(t, expected.SetLatestBlockHash(blockHashArr))
|
||||
expectedRoot, err := expected.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
envelope.StateRoot = expectedRoot[:]
|
||||
|
||||
epoch := slots.ToEpoch(slot)
|
||||
domain, err := signing.Domain(st.Fork(), epoch, cfg.DomainBeaconBuilder, st.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
signingRoot, err := signing.ComputeSigningRoot(envelope, domain)
|
||||
require.NoError(t, err)
|
||||
signature := sk.Sign(signingRoot[:]).Marshal()
|
||||
|
||||
signedProto := ðpb.SignedExecutionPayloadEnvelope{
|
||||
Message: envelope,
|
||||
Signature: signature,
|
||||
}
|
||||
signed, err := blocks.WrappedROSignedExecutionPayloadEnvelope(signedProto)
|
||||
require.NoError(t, err)
|
||||
|
||||
return payloadFixture{
|
||||
state: st,
|
||||
signed: signed,
|
||||
signedProto: signedProto,
|
||||
envelope: envelope,
|
||||
payload: payload,
|
||||
slot: slot,
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessExecutionPayload_Success(t *testing.T) {
|
||||
fixture := buildPayloadFixture(t, nil)
|
||||
require.NoError(t, ProcessExecutionPayload(t.Context(), fixture.state, fixture.signed))
|
||||
|
||||
latestHash, err := fixture.state.LatestBlockHash()
|
||||
require.NoError(t, err)
|
||||
var expectedHash [32]byte
|
||||
copy(expectedHash[:], fixture.payload.BlockHash)
|
||||
require.Equal(t, expectedHash, latestHash)
|
||||
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
paymentIndex := slotsPerEpoch + (fixture.slot % slotsPerEpoch)
|
||||
payments, err := fixture.state.BuilderPendingPayments()
|
||||
require.NoError(t, err)
|
||||
payment := payments[paymentIndex]
|
||||
require.NotNil(t, payment)
|
||||
require.Equal(t, primitives.Gwei(0), payment.Withdrawal.Amount)
|
||||
}
|
||||
|
||||
func TestProcessExecutionPayload_PrevRandaoMismatch(t *testing.T) {
|
||||
fixture := buildPayloadFixture(t, func(_ *enginev1.ExecutionPayloadDeneb, bid *ethpb.ExecutionPayloadBid, _ *ethpb.ExecutionPayloadEnvelope) {
|
||||
bid.PrevRandao = bytes.Repeat([]byte{0xFF}, 32)
|
||||
})
|
||||
|
||||
err := ProcessExecutionPayload(t.Context(), fixture.state, fixture.signed)
|
||||
require.ErrorContains(t, "prev randao", err)
|
||||
}
|
||||
|
||||
func TestQueueBuilderPayment_ZeroAmountClearsSlot(t *testing.T) {
|
||||
fixture := buildPayloadFixture(t, nil)
|
||||
|
||||
require.NoError(t, fixture.state.QueueBuilderPayment())
|
||||
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
paymentIndex := slotsPerEpoch + (fixture.slot % slotsPerEpoch)
|
||||
payments, err := fixture.state.BuilderPendingPayments()
|
||||
require.NoError(t, err)
|
||||
payment := payments[paymentIndex]
|
||||
require.NotNil(t, payment)
|
||||
require.Equal(t, primitives.Gwei(0), payment.Withdrawal.Amount)
|
||||
}
|
||||
|
||||
func TestVerifyExecutionPayloadEnvelopeSignature(t *testing.T) {
|
||||
fixture := buildPayloadFixture(t, nil)
|
||||
|
||||
t.Run("self build", func(t *testing.T) {
|
||||
proposerSk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
proposerPk := proposerSk.PublicKey().Marshal()
|
||||
|
||||
stPb, ok := fixture.state.ToProtoUnsafe().(*ethpb.BeaconStateGloas)
|
||||
require.Equal(t, true, ok)
|
||||
stPb = proto.Clone(stPb).(*ethpb.BeaconStateGloas)
|
||||
stPb.Validators[0].PublicKey = proposerPk
|
||||
st, err := state_native.InitializeFromProtoUnsafeGloas(stPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
msg := proto.Clone(fixture.signedProto.Message).(*ethpb.ExecutionPayloadEnvelope)
|
||||
msg.BuilderIndex = params.BeaconConfig().BuilderIndexSelfBuild
|
||||
|
||||
epoch := slots.ToEpoch(msg.Slot)
|
||||
domain, err := signing.Domain(st.Fork(), epoch, params.BeaconConfig().DomainBeaconBuilder, st.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
signingRoot, err := signing.ComputeSigningRoot(msg, domain)
|
||||
require.NoError(t, err)
|
||||
signature := proposerSk.Sign(signingRoot[:]).Marshal()
|
||||
|
||||
signedProto := ðpb.SignedExecutionPayloadEnvelope{
|
||||
Message: msg,
|
||||
Signature: signature,
|
||||
}
|
||||
signed, err := blocks.WrappedROSignedExecutionPayloadEnvelope(signedProto)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, verifyExecutionPayloadEnvelopeSignature(st, signed))
|
||||
})
|
||||
|
||||
t.Run("builder", func(t *testing.T) {
|
||||
signed, err := blocks.WrappedROSignedExecutionPayloadEnvelope(fixture.signedProto)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, verifyExecutionPayloadEnvelopeSignature(fixture.state, signed))
|
||||
})
|
||||
|
||||
t.Run("invalid signature", func(t *testing.T) {
|
||||
t.Run("self build", func(t *testing.T) {
|
||||
proposerSk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
proposerPk := proposerSk.PublicKey().Marshal()
|
||||
|
||||
stPb, ok := fixture.state.ToProtoUnsafe().(*ethpb.BeaconStateGloas)
|
||||
require.Equal(t, true, ok)
|
||||
stPb = proto.Clone(stPb).(*ethpb.BeaconStateGloas)
|
||||
stPb.Validators[0].PublicKey = proposerPk
|
||||
st, err := state_native.InitializeFromProtoUnsafeGloas(stPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
msg := proto.Clone(fixture.signedProto.Message).(*ethpb.ExecutionPayloadEnvelope)
|
||||
msg.BuilderIndex = params.BeaconConfig().BuilderIndexSelfBuild
|
||||
|
||||
signedProto := ðpb.SignedExecutionPayloadEnvelope{
|
||||
Message: msg,
|
||||
Signature: bytes.Repeat([]byte{0xFF}, 96),
|
||||
}
|
||||
badSigned, err := blocks.WrappedROSignedExecutionPayloadEnvelope(signedProto)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = verifyExecutionPayloadEnvelopeSignature(st, badSigned)
|
||||
require.ErrorContains(t, "invalid signature format", err)
|
||||
})
|
||||
|
||||
t.Run("builder", func(t *testing.T) {
|
||||
signedProto := ðpb.SignedExecutionPayloadEnvelope{
|
||||
Message: fixture.signedProto.Message,
|
||||
Signature: bytes.Repeat([]byte{0xFF}, 96),
|
||||
}
|
||||
badSigned, err := blocks.WrappedROSignedExecutionPayloadEnvelope(signedProto)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = verifyExecutionPayloadEnvelopeSignature(fixture.state, badSigned)
|
||||
require.ErrorContains(t, "invalid signature format", err)
|
||||
})
|
||||
})
|
||||
}
|
||||
@@ -20,7 +20,6 @@ go_library(
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/forkchoice:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
|
||||
@@ -6,7 +6,6 @@ go_library(
|
||||
"doc.go",
|
||||
"errors.go",
|
||||
"forkchoice.go",
|
||||
"gloas.go",
|
||||
"log.go",
|
||||
"metrics.go",
|
||||
"node.go",
|
||||
@@ -33,7 +32,6 @@ go_library(
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/forkchoice:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
@@ -52,7 +50,6 @@ go_test(
|
||||
srcs = [
|
||||
"ffg_update_test.go",
|
||||
"forkchoice_test.go",
|
||||
"gloas_test.go",
|
||||
"no_vote_test.go",
|
||||
"node_test.go",
|
||||
"on_tick_test.go",
|
||||
@@ -72,7 +69,6 @@ go_test(
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/forkchoice:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
|
||||
@@ -31,8 +31,7 @@ func New() *ForkChoice {
|
||||
prevJustifiedCheckpoint: &forkchoicetypes.Checkpoint{},
|
||||
finalizedCheckpoint: &forkchoicetypes.Checkpoint{},
|
||||
proposerBoostRoot: [32]byte{},
|
||||
emptyNodeByRoot: make(map[[fieldparams.RootLength]byte]*PayloadNode),
|
||||
fullNodeByRoot: make(map[[fieldparams.RootLength]byte]*PayloadNode),
|
||||
nodeByRoot: make(map[[fieldparams.RootLength]byte]*Node),
|
||||
slashedIndices: make(map[primitives.ValidatorIndex]bool),
|
||||
receivedBlocksLastEpoch: [fieldparams.SlotsPerEpoch]primitives.Slot{},
|
||||
}
|
||||
@@ -44,7 +43,7 @@ func New() *ForkChoice {
|
||||
|
||||
// NodeCount returns the current number of nodes in the Store.
|
||||
func (f *ForkChoice) NodeCount() int {
|
||||
return len(f.store.emptyNodeByRoot)
|
||||
return len(f.store.nodeByRoot)
|
||||
}
|
||||
|
||||
// Head returns the head root from fork choice store.
|
||||
@@ -65,14 +64,14 @@ func (f *ForkChoice) Head(
|
||||
return [32]byte{}, errors.Wrap(err, "could not apply proposer boost score")
|
||||
}
|
||||
|
||||
if err := f.store.applyWeightChangesConsensusNode(ctx, f.store.treeRootNode); err != nil {
|
||||
if err := f.store.treeRootNode.applyWeightChanges(ctx); err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not apply weight changes")
|
||||
}
|
||||
|
||||
jc := f.JustifiedCheckpoint()
|
||||
fc := f.FinalizedCheckpoint()
|
||||
currentEpoch := slots.EpochsSinceGenesis(f.store.genesisTime)
|
||||
if err := f.store.updateBestDescendantConsensusNode(ctx, f.store.treeRootNode, jc.Epoch, fc.Epoch, currentEpoch); err != nil {
|
||||
if err := f.store.treeRootNode.updateBestDescendant(ctx, jc.Epoch, fc.Epoch, currentEpoch); err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not update best descendant")
|
||||
}
|
||||
return f.store.head(ctx)
|
||||
@@ -119,14 +118,14 @@ func (f *ForkChoice) InsertNode(ctx context.Context, state state.BeaconState, ro
|
||||
return errInvalidNilCheckpoint
|
||||
}
|
||||
finalizedEpoch := fc.Epoch
|
||||
pn, err := f.store.insert(ctx, roblock, justifiedEpoch, finalizedEpoch)
|
||||
node, err := f.store.insert(ctx, roblock, justifiedEpoch, finalizedEpoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
jc, fc = f.store.pullTips(state, pn.node, jc, fc)
|
||||
jc, fc = f.store.pullTips(state, node, jc, fc)
|
||||
if err := f.updateCheckpoints(ctx, jc, fc); err != nil {
|
||||
_, remErr := f.store.removeNode(ctx, pn)
|
||||
_, remErr := f.store.removeNode(ctx, node)
|
||||
if remErr != nil {
|
||||
log.WithError(remErr).Error("Could not remove node")
|
||||
}
|
||||
@@ -149,63 +148,49 @@ func (f *ForkChoice) updateCheckpoints(ctx context.Context, jc, fc *ethpb.Checkp
|
||||
if fc.Epoch <= f.store.finalizedCheckpoint.Epoch {
|
||||
return nil
|
||||
}
|
||||
f.store.finalizedCheckpoint = &forkchoicetypes.Checkpoint{
|
||||
Epoch: fc.Epoch,
|
||||
Root: bytesutil.ToBytes32(fc.Root),
|
||||
}
|
||||
f.store.finalizedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: fc.Epoch,
|
||||
Root: bytesutil.ToBytes32(fc.Root)}
|
||||
return f.store.prune(ctx)
|
||||
}
|
||||
|
||||
// HasNode returns true if the node exists in fork choice store,
|
||||
// false else wise.
|
||||
func (f *ForkChoice) HasNode(root [32]byte) bool {
|
||||
_, ok := f.store.emptyNodeByRoot[root]
|
||||
_, ok := f.store.nodeByRoot[root]
|
||||
return ok
|
||||
}
|
||||
|
||||
// IsCanonical returns true if the given root is part of the canonical chain.
|
||||
func (f *ForkChoice) IsCanonical(root [32]byte) bool {
|
||||
// It is fine to pick empty node here since we only check if the beacon block is canonical.
|
||||
pn, ok := f.store.emptyNodeByRoot[root]
|
||||
if !ok || pn == nil {
|
||||
node, ok := f.store.nodeByRoot[root]
|
||||
if !ok || node == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if pn.node.bestDescendant == nil {
|
||||
// The node doesn't have any children
|
||||
if node.bestDescendant == nil {
|
||||
if f.store.headNode.bestDescendant == nil {
|
||||
// headNode is itself head.
|
||||
return pn.node == f.store.headNode
|
||||
return node == f.store.headNode
|
||||
}
|
||||
// headNode is not actualized and there are some descendants
|
||||
return pn.node == f.store.headNode.bestDescendant
|
||||
return node == f.store.headNode.bestDescendant
|
||||
}
|
||||
// The node has children
|
||||
if f.store.headNode.bestDescendant == nil {
|
||||
return pn.node.bestDescendant == f.store.headNode
|
||||
return node.bestDescendant == f.store.headNode
|
||||
}
|
||||
return pn.node.bestDescendant == f.store.headNode.bestDescendant
|
||||
return node.bestDescendant == f.store.headNode.bestDescendant
|
||||
}
|
||||
|
||||
// IsOptimistic returns true if the given root has been optimistically synced.
|
||||
// TODO: Gloas, the current implementation uses the result of the full block for
|
||||
// the given root. In gloas this would be incorrect and we should specify the
|
||||
// payload content, thus we should expose a full/empty version of this call.
|
||||
func (f *ForkChoice) IsOptimistic(root [32]byte) (bool, error) {
|
||||
if f.store.allTipsAreInvalid {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
en, ok := f.store.emptyNodeByRoot[root]
|
||||
if !ok || en == nil {
|
||||
node, ok := f.store.nodeByRoot[root]
|
||||
if !ok || node == nil {
|
||||
return true, ErrNilNode
|
||||
}
|
||||
fn := f.store.fullNodeByRoot[root]
|
||||
if fn != nil {
|
||||
return fn.optimistic, nil
|
||||
}
|
||||
|
||||
return en.optimistic, nil
|
||||
return node.optimistic, nil
|
||||
}
|
||||
|
||||
// AncestorRoot returns the ancestor root of input block root at a given slot.
|
||||
@@ -213,21 +198,17 @@ func (f *ForkChoice) AncestorRoot(ctx context.Context, root [32]byte, slot primi
|
||||
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.AncestorRoot")
|
||||
defer span.End()
|
||||
|
||||
pn, ok := f.store.emptyNodeByRoot[root]
|
||||
if !ok || pn == nil {
|
||||
node, ok := f.store.nodeByRoot[root]
|
||||
if !ok || node == nil {
|
||||
return [32]byte{}, errors.Wrap(ErrNilNode, "could not determine ancestor root")
|
||||
}
|
||||
|
||||
n := pn.node
|
||||
for n.slot > slot {
|
||||
n := node
|
||||
for n != nil && n.slot > slot {
|
||||
if ctx.Err() != nil {
|
||||
return [32]byte{}, ctx.Err()
|
||||
}
|
||||
if n.parent == nil {
|
||||
n = nil
|
||||
break
|
||||
}
|
||||
n = n.parent.node
|
||||
n = n.parent
|
||||
}
|
||||
|
||||
if n == nil {
|
||||
@@ -240,11 +221,10 @@ func (f *ForkChoice) AncestorRoot(ctx context.Context, root [32]byte, slot primi
|
||||
// IsViableForCheckpoint returns whether the root passed is a checkpoint root for any
|
||||
// known chain in forkchoice.
|
||||
func (f *ForkChoice) IsViableForCheckpoint(cp *forkchoicetypes.Checkpoint) (bool, error) {
|
||||
pn, ok := f.store.emptyNodeByRoot[cp.Root]
|
||||
if !ok || pn == nil {
|
||||
node, ok := f.store.nodeByRoot[cp.Root]
|
||||
if !ok || node == nil {
|
||||
return false, nil
|
||||
}
|
||||
node := pn.node
|
||||
epochStart, err := slots.EpochStart(cp.Epoch)
|
||||
if err != nil {
|
||||
return false, err
|
||||
@@ -253,13 +233,10 @@ func (f *ForkChoice) IsViableForCheckpoint(cp *forkchoicetypes.Checkpoint) (bool
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// If it's the start of the epoch, it is a checkpoint
|
||||
if node.slot == epochStart {
|
||||
if len(node.children) == 0 {
|
||||
return true, nil
|
||||
}
|
||||
// If there are no descendants of this beacon block, it is is viable as a checkpoint
|
||||
children := f.store.allConsensusChildren(node)
|
||||
if len(children) == 0 {
|
||||
if node.slot == epochStart {
|
||||
return true, nil
|
||||
}
|
||||
if !features.Get().IgnoreUnviableAttestations {
|
||||
@@ -269,8 +246,7 @@ func (f *ForkChoice) IsViableForCheckpoint(cp *forkchoicetypes.Checkpoint) (bool
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
// If some child is after the start of the epoch, the checkpoint is viable.
|
||||
for _, child := range children {
|
||||
for _, child := range node.children {
|
||||
if child.slot > epochStart {
|
||||
return true, nil
|
||||
}
|
||||
@@ -311,7 +287,7 @@ func (f *ForkChoice) updateBalances() error {
|
||||
if vote.currentRoot != vote.nextRoot || oldBalance != newBalance {
|
||||
// Ignore the vote if the root is not in fork choice
|
||||
// store, that means we have not seen the block before.
|
||||
nextNode, ok := f.store.emptyNodeByRoot[vote.nextRoot]
|
||||
nextNode, ok := f.store.nodeByRoot[vote.nextRoot]
|
||||
if ok && vote.nextRoot != zHash {
|
||||
// Protection against nil node
|
||||
if nextNode == nil {
|
||||
@@ -320,7 +296,7 @@ func (f *ForkChoice) updateBalances() error {
|
||||
nextNode.balance += newBalance
|
||||
}
|
||||
|
||||
currentNode, ok := f.store.emptyNodeByRoot[vote.currentRoot]
|
||||
currentNode, ok := f.store.nodeByRoot[vote.currentRoot]
|
||||
if ok && vote.currentRoot != zHash {
|
||||
// Protection against nil node
|
||||
if currentNode == nil {
|
||||
@@ -361,13 +337,13 @@ func (f *ForkChoice) ProposerBoost() [fieldparams.RootLength]byte {
|
||||
return f.store.proposerBoost()
|
||||
}
|
||||
|
||||
// SetOptimisticToValid sets the node with the given root as a fully validated node. The payload for this root MUST have been processed.
|
||||
// SetOptimisticToValid sets the node with the given root as a fully validated node
|
||||
func (f *ForkChoice) SetOptimisticToValid(ctx context.Context, root [fieldparams.RootLength]byte) error {
|
||||
fn, ok := f.store.fullNodeByRoot[root]
|
||||
if !ok || fn == nil {
|
||||
node, ok := f.store.nodeByRoot[root]
|
||||
if !ok || node == nil {
|
||||
return errors.Wrap(ErrNilNode, "could not set node to valid")
|
||||
}
|
||||
return f.store.setNodeAndParentValidated(ctx, fn)
|
||||
return node.setNodeAndParentValidated(ctx)
|
||||
}
|
||||
|
||||
// PreviousJustifiedCheckpoint of fork choice store.
|
||||
@@ -386,8 +362,8 @@ func (f *ForkChoice) FinalizedCheckpoint() *forkchoicetypes.Checkpoint {
|
||||
}
|
||||
|
||||
// SetOptimisticToInvalid removes a block with an invalid execution payload from fork choice store
|
||||
func (f *ForkChoice) SetOptimisticToInvalid(ctx context.Context, root, parentRoot, parentHash, payloadHash [fieldparams.RootLength]byte) ([][32]byte, error) {
|
||||
return f.store.setOptimisticToInvalid(ctx, root, parentRoot, parentHash, payloadHash)
|
||||
func (f *ForkChoice) SetOptimisticToInvalid(ctx context.Context, root, parentRoot, payloadHash [fieldparams.RootLength]byte) ([][32]byte, error) {
|
||||
return f.store.setOptimisticToInvalid(ctx, root, parentRoot, payloadHash)
|
||||
}
|
||||
|
||||
// InsertSlashedIndex adds the given slashed validator index to the
|
||||
@@ -410,7 +386,7 @@ func (f *ForkChoice) InsertSlashedIndex(_ context.Context, index primitives.Vali
|
||||
return
|
||||
}
|
||||
|
||||
node, ok := f.store.emptyNodeByRoot[f.votes[index].currentRoot]
|
||||
node, ok := f.store.nodeByRoot[f.votes[index].currentRoot]
|
||||
if !ok || node == nil {
|
||||
return
|
||||
}
|
||||
@@ -445,30 +421,22 @@ func (f *ForkChoice) UpdateFinalizedCheckpoint(fc *forkchoicetypes.Checkpoint) e
|
||||
}
|
||||
|
||||
// CommonAncestor returns the common ancestor root and slot between the two block roots r1 and r2.
|
||||
// This is payload aware. Consider the following situation
|
||||
// [A,full] <--- [B, full] <---[C,pending]
|
||||
//
|
||||
// \---------[B, empty] <--[D, pending]
|
||||
//
|
||||
// Then even though C and D both descend from the beacon block B, their common ancestor is A.
|
||||
// Notice that also this function **requires** that the two roots are actually contending blocks! otherwise the
|
||||
// behavior is not defined.
|
||||
func (f *ForkChoice) CommonAncestor(ctx context.Context, r1 [32]byte, r2 [32]byte) ([32]byte, primitives.Slot, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.CommonAncestorRoot")
|
||||
defer span.End()
|
||||
|
||||
en1, ok := f.store.emptyNodeByRoot[r1]
|
||||
if !ok || en1 == nil {
|
||||
n1, ok := f.store.nodeByRoot[r1]
|
||||
if !ok || n1 == nil {
|
||||
return [32]byte{}, 0, forkchoice.ErrUnknownCommonAncestor
|
||||
}
|
||||
|
||||
// Do nothing if the input roots are the same.
|
||||
if r1 == r2 {
|
||||
return r1, en1.node.slot, nil
|
||||
return r1, n1.slot, nil
|
||||
}
|
||||
|
||||
en2, ok := f.store.emptyNodeByRoot[r2]
|
||||
if !ok || en2 == nil {
|
||||
n2, ok := f.store.nodeByRoot[r2]
|
||||
if !ok || n2 == nil {
|
||||
return [32]byte{}, 0, forkchoice.ErrUnknownCommonAncestor
|
||||
}
|
||||
|
||||
@@ -476,23 +444,23 @@ func (f *ForkChoice) CommonAncestor(ctx context.Context, r1 [32]byte, r2 [32]byt
|
||||
if ctx.Err() != nil {
|
||||
return [32]byte{}, 0, ctx.Err()
|
||||
}
|
||||
if en1.node.slot > en2.node.slot {
|
||||
en1 = en1.node.parent
|
||||
if n1.slot > n2.slot {
|
||||
n1 = n1.parent
|
||||
// Reaches the end of the tree and unable to find common ancestor.
|
||||
// This should not happen at runtime as the finalized
|
||||
// node has to be a common ancestor
|
||||
if en1 == nil {
|
||||
if n1 == nil {
|
||||
return [32]byte{}, 0, forkchoice.ErrUnknownCommonAncestor
|
||||
}
|
||||
} else {
|
||||
en2 = en2.node.parent
|
||||
n2 = n2.parent
|
||||
// Reaches the end of the tree and unable to find common ancestor.
|
||||
if en2 == nil {
|
||||
if n2 == nil {
|
||||
return [32]byte{}, 0, forkchoice.ErrUnknownCommonAncestor
|
||||
}
|
||||
}
|
||||
if en1 == en2 {
|
||||
return en1.node.root, en1.node.slot, nil
|
||||
if n1 == n2 {
|
||||
return n1.root, n1.slot, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -539,17 +507,35 @@ func (f *ForkChoice) CachedHeadRoot() [32]byte {
|
||||
|
||||
// FinalizedPayloadBlockHash returns the hash of the payload at the finalized checkpoint
|
||||
func (f *ForkChoice) FinalizedPayloadBlockHash() [32]byte {
|
||||
return f.store.latestHashForRoot(f.FinalizedCheckpoint().Root)
|
||||
root := f.FinalizedCheckpoint().Root
|
||||
node, ok := f.store.nodeByRoot[root]
|
||||
if !ok || node == nil {
|
||||
// This should not happen
|
||||
return [32]byte{}
|
||||
}
|
||||
return node.payloadHash
|
||||
}
|
||||
|
||||
// JustifiedPayloadBlockHash returns the hash of the payload at the justified checkpoint
|
||||
func (f *ForkChoice) JustifiedPayloadBlockHash() [32]byte {
|
||||
return f.store.latestHashForRoot(f.JustifiedCheckpoint().Root)
|
||||
root := f.JustifiedCheckpoint().Root
|
||||
node, ok := f.store.nodeByRoot[root]
|
||||
if !ok || node == nil {
|
||||
// This should not happen
|
||||
return [32]byte{}
|
||||
}
|
||||
return node.payloadHash
|
||||
}
|
||||
|
||||
// UnrealizedJustifiedPayloadBlockHash returns the hash of the payload at the unrealized justified checkpoint
|
||||
func (f *ForkChoice) UnrealizedJustifiedPayloadBlockHash() [32]byte {
|
||||
return f.store.latestHashForRoot(f.store.unrealizedJustifiedCheckpoint.Root)
|
||||
root := f.store.unrealizedJustifiedCheckpoint.Root
|
||||
node, ok := f.store.nodeByRoot[root]
|
||||
if !ok || node == nil {
|
||||
// This should not happen
|
||||
return [32]byte{}
|
||||
}
|
||||
return node.payloadHash
|
||||
}
|
||||
|
||||
// ForkChoiceDump returns a full dump of forkchoice.
|
||||
@@ -573,7 +559,7 @@ func (f *ForkChoice) ForkChoiceDump(ctx context.Context) (*forkchoice2.Dump, err
|
||||
nodes := make([]*forkchoice2.Node, 0, f.NodeCount())
|
||||
var err error
|
||||
if f.store.treeRootNode != nil {
|
||||
nodes, err = f.store.nodeTreeDump(ctx, f.store.treeRootNode, nodes)
|
||||
nodes, err = f.store.treeRootNode.nodeTreeDump(ctx, nodes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -602,7 +588,7 @@ func (f *ForkChoice) SetBalancesByRooter(handler forkchoice.BalancesByRooter) {
|
||||
|
||||
// Weight returns the weight of the given root if found on the store
|
||||
func (f *ForkChoice) Weight(root [32]byte) (uint64, error) {
|
||||
n, ok := f.store.emptyNodeByRoot[root]
|
||||
n, ok := f.store.nodeByRoot[root]
|
||||
if !ok || n == nil {
|
||||
return 0, ErrNilNode
|
||||
}
|
||||
@@ -630,11 +616,11 @@ func (f *ForkChoice) updateJustifiedBalances(ctx context.Context, root [32]byte)
|
||||
|
||||
// Slot returns the slot of the given root if it's known to forkchoice
|
||||
func (f *ForkChoice) Slot(root [32]byte) (primitives.Slot, error) {
|
||||
n, ok := f.store.emptyNodeByRoot[root]
|
||||
n, ok := f.store.nodeByRoot[root]
|
||||
if !ok || n == nil {
|
||||
return 0, ErrNilNode
|
||||
}
|
||||
return n.node.slot, nil
|
||||
return n.slot, nil
|
||||
}
|
||||
|
||||
// DependentRoot returns the last root of the epoch prior to the requested ecoch in the canonical chain.
|
||||
@@ -642,7 +628,7 @@ func (f *ForkChoice) DependentRoot(epoch primitives.Epoch) ([32]byte, error) {
|
||||
return f.DependentRootForEpoch(f.CachedHeadRoot(), epoch)
|
||||
}
|
||||
|
||||
// DependentRootForEpoch return the last root of the epoch prior to the requested epoch for the given root.
|
||||
// DependentRootForEpoch return the last root of the epoch prior to the requested ecoch for the given root.
|
||||
func (f *ForkChoice) DependentRootForEpoch(root [32]byte, epoch primitives.Epoch) ([32]byte, error) {
|
||||
tr, err := f.TargetRootForEpoch(root, epoch)
|
||||
if err != nil {
|
||||
@@ -651,18 +637,18 @@ func (f *ForkChoice) DependentRootForEpoch(root [32]byte, epoch primitives.Epoch
|
||||
if tr == [32]byte{} {
|
||||
return [32]byte{}, nil
|
||||
}
|
||||
en, ok := f.store.emptyNodeByRoot[tr]
|
||||
if !ok || en == nil {
|
||||
node, ok := f.store.nodeByRoot[tr]
|
||||
if !ok || node == nil {
|
||||
return [32]byte{}, ErrNilNode
|
||||
}
|
||||
if slots.ToEpoch(en.node.slot) >= epoch {
|
||||
if en.node.parent != nil {
|
||||
en = en.node.parent
|
||||
if slots.ToEpoch(node.slot) >= epoch {
|
||||
if node.parent != nil {
|
||||
node = node.parent
|
||||
} else {
|
||||
return f.store.finalizedDependentRoot, nil
|
||||
}
|
||||
}
|
||||
return en.node.root, nil
|
||||
return node.root, nil
|
||||
}
|
||||
|
||||
// TargetRootForEpoch returns the root of the target block for a given epoch.
|
||||
@@ -674,48 +660,46 @@ func (f *ForkChoice) DependentRootForEpoch(root [32]byte, epoch primitives.Epoch
|
||||
// which case we return the root of the checkpoint of the chain containing the
|
||||
// passed root, at the given epoch
|
||||
func (f *ForkChoice) TargetRootForEpoch(root [32]byte, epoch primitives.Epoch) ([32]byte, error) {
|
||||
n, ok := f.store.emptyNodeByRoot[root]
|
||||
n, ok := f.store.nodeByRoot[root]
|
||||
if !ok || n == nil {
|
||||
return [32]byte{}, ErrNilNode
|
||||
}
|
||||
node := n.node
|
||||
nodeEpoch := slots.ToEpoch(node.slot)
|
||||
nodeEpoch := slots.ToEpoch(n.slot)
|
||||
if epoch > nodeEpoch {
|
||||
return node.root, nil
|
||||
return n.root, nil
|
||||
}
|
||||
if node.target == nil {
|
||||
if n.target == nil {
|
||||
return [32]byte{}, nil
|
||||
}
|
||||
targetRoot := node.target.root
|
||||
targetRoot := n.target.root
|
||||
if epoch == nodeEpoch {
|
||||
return targetRoot, nil
|
||||
}
|
||||
targetNode, ok := f.store.emptyNodeByRoot[targetRoot]
|
||||
targetNode, ok := f.store.nodeByRoot[targetRoot]
|
||||
if !ok || targetNode == nil {
|
||||
return [32]byte{}, ErrNilNode
|
||||
}
|
||||
// If slot 0 was not missed we consider a previous block to go back at least one epoch
|
||||
if nodeEpoch == slots.ToEpoch(targetNode.node.slot) {
|
||||
targetNode = targetNode.node.parent
|
||||
if nodeEpoch == slots.ToEpoch(targetNode.slot) {
|
||||
targetNode = targetNode.parent
|
||||
if targetNode == nil {
|
||||
return [32]byte{}, ErrNilNode
|
||||
}
|
||||
}
|
||||
return f.TargetRootForEpoch(targetNode.node.root, epoch)
|
||||
return f.TargetRootForEpoch(targetNode.root, epoch)
|
||||
}
|
||||
|
||||
// ParentRoot returns the block root of the parent node if it is in forkchoice.
|
||||
// The exception is for the finalized checkpoint root which we return the zero
|
||||
// hash.
|
||||
func (f *ForkChoice) ParentRoot(root [32]byte) ([32]byte, error) {
|
||||
n, ok := f.store.emptyNodeByRoot[root]
|
||||
n, ok := f.store.nodeByRoot[root]
|
||||
if !ok || n == nil {
|
||||
return [32]byte{}, ErrNilNode
|
||||
}
|
||||
// Return the zero hash for the tree root
|
||||
parent := n.node.parent
|
||||
if parent == nil {
|
||||
if n.parent == nil {
|
||||
return [32]byte{}, nil
|
||||
}
|
||||
return parent.node.root, nil
|
||||
return n.parent.root, nil
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package doublylinkedtree
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/forkchoice"
|
||||
@@ -103,9 +104,9 @@ func TestForkChoice_UpdateBalancesPositiveChange(t *testing.T) {
|
||||
f.justifiedBalances = []uint64{10, 20, 30}
|
||||
require.NoError(t, f.updateBalances())
|
||||
s := f.store
|
||||
assert.Equal(t, uint64(10), s.emptyNodeByRoot[indexToHash(1)].balance)
|
||||
assert.Equal(t, uint64(20), s.emptyNodeByRoot[indexToHash(2)].balance)
|
||||
assert.Equal(t, uint64(30), s.emptyNodeByRoot[indexToHash(3)].balance)
|
||||
assert.Equal(t, uint64(10), s.nodeByRoot[indexToHash(1)].balance)
|
||||
assert.Equal(t, uint64(20), s.nodeByRoot[indexToHash(2)].balance)
|
||||
assert.Equal(t, uint64(30), s.nodeByRoot[indexToHash(3)].balance)
|
||||
}
|
||||
|
||||
func TestForkChoice_UpdateBalancesNegativeChange(t *testing.T) {
|
||||
@@ -121,9 +122,9 @@ func TestForkChoice_UpdateBalancesNegativeChange(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
s := f.store
|
||||
s.emptyNodeByRoot[indexToHash(1)].balance = 100
|
||||
s.emptyNodeByRoot[indexToHash(2)].balance = 100
|
||||
s.emptyNodeByRoot[indexToHash(3)].balance = 100
|
||||
s.nodeByRoot[indexToHash(1)].balance = 100
|
||||
s.nodeByRoot[indexToHash(2)].balance = 100
|
||||
s.nodeByRoot[indexToHash(3)].balance = 100
|
||||
|
||||
f.balances = []uint64{100, 100, 100}
|
||||
f.votes = []Vote{
|
||||
@@ -134,9 +135,9 @@ func TestForkChoice_UpdateBalancesNegativeChange(t *testing.T) {
|
||||
|
||||
f.justifiedBalances = []uint64{10, 20, 30}
|
||||
require.NoError(t, f.updateBalances())
|
||||
assert.Equal(t, uint64(10), s.emptyNodeByRoot[indexToHash(1)].balance)
|
||||
assert.Equal(t, uint64(20), s.emptyNodeByRoot[indexToHash(2)].balance)
|
||||
assert.Equal(t, uint64(30), s.emptyNodeByRoot[indexToHash(3)].balance)
|
||||
assert.Equal(t, uint64(10), s.nodeByRoot[indexToHash(1)].balance)
|
||||
assert.Equal(t, uint64(20), s.nodeByRoot[indexToHash(2)].balance)
|
||||
assert.Equal(t, uint64(30), s.nodeByRoot[indexToHash(3)].balance)
|
||||
}
|
||||
|
||||
func TestForkChoice_UpdateBalancesUnderflow(t *testing.T) {
|
||||
@@ -152,9 +153,9 @@ func TestForkChoice_UpdateBalancesUnderflow(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
s := f.store
|
||||
s.emptyNodeByRoot[indexToHash(1)].balance = 100
|
||||
s.emptyNodeByRoot[indexToHash(2)].balance = 100
|
||||
s.emptyNodeByRoot[indexToHash(3)].balance = 100
|
||||
s.nodeByRoot[indexToHash(1)].balance = 100
|
||||
s.nodeByRoot[indexToHash(2)].balance = 100
|
||||
s.nodeByRoot[indexToHash(3)].balance = 100
|
||||
|
||||
f.balances = []uint64{125, 125, 125}
|
||||
f.votes = []Vote{
|
||||
@@ -165,9 +166,9 @@ func TestForkChoice_UpdateBalancesUnderflow(t *testing.T) {
|
||||
|
||||
f.justifiedBalances = []uint64{10, 20, 30}
|
||||
require.NoError(t, f.updateBalances())
|
||||
assert.Equal(t, uint64(0), s.emptyNodeByRoot[indexToHash(1)].balance)
|
||||
assert.Equal(t, uint64(0), s.emptyNodeByRoot[indexToHash(2)].balance)
|
||||
assert.Equal(t, uint64(5), s.emptyNodeByRoot[indexToHash(3)].balance)
|
||||
assert.Equal(t, uint64(0), s.nodeByRoot[indexToHash(1)].balance)
|
||||
assert.Equal(t, uint64(0), s.nodeByRoot[indexToHash(2)].balance)
|
||||
assert.Equal(t, uint64(5), s.nodeByRoot[indexToHash(3)].balance)
|
||||
}
|
||||
|
||||
func TestForkChoice_IsCanonical(t *testing.T) {
|
||||
@@ -223,12 +224,12 @@ func TestForkChoice_IsCanonicalReorg(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
|
||||
f.store.emptyNodeByRoot[[32]byte{'3'}].balance = 10
|
||||
require.NoError(t, f.store.applyWeightChangesConsensusNode(ctx, f.store.treeRootNode))
|
||||
require.Equal(t, uint64(10), f.store.emptyNodeByRoot[[32]byte{'1'}].node.weight)
|
||||
require.Equal(t, uint64(0), f.store.emptyNodeByRoot[[32]byte{'2'}].node.weight)
|
||||
f.store.nodeByRoot[[32]byte{'3'}].balance = 10
|
||||
require.NoError(t, f.store.treeRootNode.applyWeightChanges(ctx))
|
||||
require.Equal(t, uint64(10), f.store.nodeByRoot[[32]byte{'1'}].weight)
|
||||
require.Equal(t, uint64(0), f.store.nodeByRoot[[32]byte{'2'}].weight)
|
||||
|
||||
require.NoError(t, f.store.updateBestDescendantConsensusNode(ctx, f.store.treeRootNode, 1, 1, 1))
|
||||
require.NoError(t, f.store.treeRootNode.updateBestDescendant(ctx, 1, 1, 1))
|
||||
require.DeepEqual(t, [32]byte{'3'}, f.store.treeRootNode.bestDescendant.root)
|
||||
|
||||
r1 := [32]byte{'1'}
|
||||
@@ -259,7 +260,7 @@ func TestForkChoice_AncestorRoot(t *testing.T) {
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 5, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
f.store.treeRootNode = f.store.emptyNodeByRoot[indexToHash(1)].node
|
||||
f.store.treeRootNode = f.store.nodeByRoot[indexToHash(1)]
|
||||
f.store.treeRootNode.parent = nil
|
||||
|
||||
r, err := f.AncestorRoot(ctx, indexToHash(3), 6)
|
||||
@@ -341,21 +342,21 @@ func TestForkChoice_RemoveEquivocating(t *testing.T) {
|
||||
|
||||
// Process b's slashing, c is now head
|
||||
f.InsertSlashedIndex(ctx, 1)
|
||||
require.Equal(t, uint64(200), f.store.emptyNodeByRoot[[32]byte{'b'}].balance)
|
||||
require.Equal(t, uint64(200), f.store.nodeByRoot[[32]byte{'b'}].balance)
|
||||
f.justifiedBalances = []uint64{100, 200, 200, 300}
|
||||
head, err = f.Head(ctx)
|
||||
require.Equal(t, uint64(200), f.store.emptyNodeByRoot[[32]byte{'b'}].weight)
|
||||
require.Equal(t, uint64(300), f.store.emptyNodeByRoot[[32]byte{'c'}].weight)
|
||||
require.Equal(t, uint64(200), f.store.nodeByRoot[[32]byte{'b'}].weight)
|
||||
require.Equal(t, uint64(300), f.store.nodeByRoot[[32]byte{'c'}].weight)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [32]byte{'c'}, head)
|
||||
|
||||
// Process b's slashing again, should be a noop
|
||||
f.InsertSlashedIndex(ctx, 1)
|
||||
require.Equal(t, uint64(200), f.store.emptyNodeByRoot[[32]byte{'b'}].balance)
|
||||
require.Equal(t, uint64(200), f.store.nodeByRoot[[32]byte{'b'}].balance)
|
||||
f.justifiedBalances = []uint64{100, 200, 200, 300}
|
||||
head, err = f.Head(ctx)
|
||||
require.Equal(t, uint64(200), f.store.emptyNodeByRoot[[32]byte{'b'}].weight)
|
||||
require.Equal(t, uint64(300), f.store.emptyNodeByRoot[[32]byte{'c'}].weight)
|
||||
require.Equal(t, uint64(200), f.store.nodeByRoot[[32]byte{'b'}].weight)
|
||||
require.Equal(t, uint64(300), f.store.nodeByRoot[[32]byte{'c'}].weight)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [32]byte{'c'}, head)
|
||||
|
||||
@@ -513,6 +514,58 @@ func TestStore_CommonAncestor(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
// a -- b -- c -- d
|
||||
f = setup(0, 0)
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 0, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 1, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 2, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 3, [32]byte{'d'}, [32]byte{'c'}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
tests = []struct {
|
||||
name string
|
||||
r1 [32]byte
|
||||
r2 [32]byte
|
||||
wantRoot [32]byte
|
||||
wantSlot primitives.Slot
|
||||
}{
|
||||
{
|
||||
name: "Common ancestor between a and b is a",
|
||||
r1: [32]byte{'a'},
|
||||
r2: [32]byte{'b'},
|
||||
wantRoot: [32]byte{'a'},
|
||||
wantSlot: 0,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between b and d is b",
|
||||
r1: [32]byte{'d'},
|
||||
r2: [32]byte{'b'},
|
||||
wantRoot: [32]byte{'b'},
|
||||
wantSlot: 1,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between d and a is a",
|
||||
r1: [32]byte{'d'},
|
||||
r2: [32]byte{'a'},
|
||||
wantRoot: [32]byte{'a'},
|
||||
wantSlot: 0,
|
||||
},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
gotRoot, gotSlot, err := f.CommonAncestor(ctx, tc.r1, tc.r2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.wantRoot, gotRoot)
|
||||
require.Equal(t, tc.wantSlot, gotSlot)
|
||||
})
|
||||
}
|
||||
|
||||
// Equal inputs should return the same root.
|
||||
r, s, err := f.CommonAncestor(ctx, [32]byte{'b'}, [32]byte{'b'})
|
||||
require.NoError(t, err)
|
||||
@@ -535,9 +588,10 @@ func TestStore_CommonAncestor(t *testing.T) {
|
||||
unrealizedJustifiedEpoch: 1,
|
||||
finalizedEpoch: 1,
|
||||
unrealizedFinalizedEpoch: 1,
|
||||
optimistic: true,
|
||||
}
|
||||
|
||||
f.store.emptyNodeByRoot[[32]byte{'y'}] = &PayloadNode{node: n, optimistic: true}
|
||||
f.store.nodeByRoot[[32]byte{'y'}] = n
|
||||
// broken link
|
||||
_, _, err = f.CommonAncestor(ctx, [32]byte{'y'}, [32]byte{'a'})
|
||||
require.ErrorIs(t, err, forkchoice.ErrUnknownCommonAncestor)
|
||||
@@ -556,8 +610,7 @@ func TestStore_InsertChain(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
roblock, err := blocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
blks = append(blks, &forkchoicetypes.BlockAndCheckpoints{
|
||||
Block: roblock,
|
||||
blks = append(blks, &forkchoicetypes.BlockAndCheckpoints{Block: roblock,
|
||||
JustifiedCheckpoint: ðpb.Checkpoint{Epoch: 1, Root: params.BeaconConfig().ZeroHash[:]},
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{Epoch: 1, Root: params.BeaconConfig().ZeroHash[:]},
|
||||
})
|
||||
@@ -572,8 +625,7 @@ func TestStore_InsertChain(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
roblock, err := blocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
blks = append(blks, &forkchoicetypes.BlockAndCheckpoints{
|
||||
Block: roblock,
|
||||
blks = append(blks, &forkchoicetypes.BlockAndCheckpoints{Block: roblock,
|
||||
JustifiedCheckpoint: ðpb.Checkpoint{Epoch: 1, Root: params.BeaconConfig().ZeroHash[:]},
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{Epoch: 1, Root: params.BeaconConfig().ZeroHash[:]},
|
||||
})
|
||||
@@ -690,7 +742,7 @@ func TestWeight(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
|
||||
n, ok := f.store.emptyNodeByRoot[root]
|
||||
n, ok := f.store.nodeByRoot[root]
|
||||
require.Equal(t, true, ok)
|
||||
n.weight = 10
|
||||
w, err := f.Weight(root)
|
||||
@@ -862,3 +914,16 @@ func TestForkchoiceParentRoot(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, zeroHash, root)
|
||||
}
|
||||
|
||||
func TestForkChoice_CleanupInserting(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
ctx := t.Context()
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 2, 2)
|
||||
f.SetBalancesByRooter(func(_ context.Context, _ [32]byte) ([]uint64, error) {
|
||||
return f.justifiedBalances, errors.New("mock err")
|
||||
})
|
||||
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, f.InsertNode(ctx, st, roblock))
|
||||
require.Equal(t, false, f.HasNode(roblock.Root()))
|
||||
}
|
||||
|
||||
@@ -1,327 +0,0 @@
|
||||
package doublylinkedtree
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
forkchoice2 "github.com/OffchainLabs/prysm/v7/consensus-types/forkchoice"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func (s *Store) resolveParentPayloadStatus(block interfaces.ReadOnlyBeaconBlock, parent **PayloadNode, blockHash *[32]byte) error {
|
||||
sb, err := block.Body().SignedExecutionPayloadBid()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
wb, err := blocks.WrappedROSignedExecutionPayloadBid(sb)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to wrap signed bid")
|
||||
}
|
||||
bid, err := wb.Bid()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get bid from wrapped bid")
|
||||
}
|
||||
*blockHash = bid.BlockHash()
|
||||
parentRoot := block.ParentRoot()
|
||||
*parent = s.emptyNodeByRoot[parentRoot]
|
||||
if *parent == nil {
|
||||
// This is the tree root node.
|
||||
return nil
|
||||
}
|
||||
if bid.ParentBlockHash() == (*parent).node.blockHash {
|
||||
// block builds on full
|
||||
*parent = s.fullNodeByRoot[(*parent).node.root]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// applyWeightChangesConsensusNode recomputes the weight of the node passed as an argument and all of its descendants,
|
||||
// using the current balance stored in each node.
|
||||
func (s *Store) applyWeightChangesConsensusNode(ctx context.Context, n *Node) error {
|
||||
// Recursively calling the children to sum their weights.
|
||||
en := s.emptyNodeByRoot[n.root]
|
||||
if err := s.applyWeightChangesPayloadNode(ctx, en); err != nil {
|
||||
return err
|
||||
}
|
||||
childrenWeight := en.weight
|
||||
fn := s.fullNodeByRoot[n.root]
|
||||
if fn != nil {
|
||||
if err := s.applyWeightChangesPayloadNode(ctx, fn); err != nil {
|
||||
return err
|
||||
}
|
||||
childrenWeight += fn.weight
|
||||
}
|
||||
if n.root == params.BeaconConfig().ZeroHash {
|
||||
return nil
|
||||
}
|
||||
n.weight = n.balance + childrenWeight
|
||||
return nil
|
||||
}
|
||||
|
||||
// applyWeightChangesPayloadNode recomputes the weight of the node passed as an argument and all of its descendants,
|
||||
// using the current balance stored in each node.
|
||||
func (s *Store) applyWeightChangesPayloadNode(ctx context.Context, n *PayloadNode) error {
|
||||
// Recursively calling the children to sum their weights.
|
||||
childrenWeight := uint64(0)
|
||||
for _, child := range n.children {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
if err := s.applyWeightChangesConsensusNode(ctx, child); err != nil {
|
||||
return err
|
||||
}
|
||||
childrenWeight += child.weight
|
||||
}
|
||||
n.weight = n.balance + childrenWeight
|
||||
return nil
|
||||
}
|
||||
|
||||
// allConsensusChildren returns the list of all consensus blocks that build on the given node.
|
||||
func (s *Store) allConsensusChildren(n *Node) []*Node {
|
||||
en := s.emptyNodeByRoot[n.root]
|
||||
fn, ok := s.fullNodeByRoot[n.root]
|
||||
if ok {
|
||||
return append(slices.Clone(en.children), fn.children...)
|
||||
}
|
||||
return en.children
|
||||
}
|
||||
|
||||
// setNodeAndParentValidated sets the current node and all the ancestors as validated (i.e. non-optimistic).
|
||||
func (s *Store) setNodeAndParentValidated(ctx context.Context, pn *PayloadNode) error {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
if !pn.optimistic {
|
||||
return nil
|
||||
}
|
||||
pn.optimistic = false
|
||||
if pn.full {
|
||||
// set the empty node also a as valid
|
||||
en := s.emptyNodeByRoot[pn.node.root]
|
||||
en.optimistic = false
|
||||
}
|
||||
if pn.node.parent == nil {
|
||||
return nil
|
||||
}
|
||||
return s.setNodeAndParentValidated(ctx, pn.node.parent)
|
||||
}
|
||||
|
||||
// fullAncestor returns the highest ancestor with a full payload that a block with the
|
||||
// given root has. If there is a payload for the past root, then it will return that full
|
||||
// node. Otherwise it will use the full parent actually being an ancestor of the given root
|
||||
func (s *Store) fullAncestor(root [32]byte) *PayloadNode {
|
||||
fn, ok := s.fullNodeByRoot[root]
|
||||
if ok {
|
||||
return fn
|
||||
}
|
||||
en := s.emptyNodeByRoot[root]
|
||||
if en == nil {
|
||||
return nil
|
||||
}
|
||||
return s.fullParent(en)
|
||||
}
|
||||
|
||||
// fullParent returns the latest full node that this block builds on.
|
||||
func (s *Store) fullParent(pn *PayloadNode) *PayloadNode {
|
||||
parent := pn.node.parent
|
||||
for ; parent != nil && !parent.full; parent = parent.node.parent {
|
||||
}
|
||||
return parent
|
||||
}
|
||||
|
||||
// parentHash return the payload hash of the latest full node that this block builds on.
|
||||
func (s *Store) parentHash(pn *PayloadNode) [32]byte {
|
||||
fullParent := s.fullParent(pn)
|
||||
if fullParent == nil {
|
||||
return [32]byte{}
|
||||
}
|
||||
return fullParent.node.blockHash
|
||||
}
|
||||
|
||||
// latestHashForRoot returns the latest payload hash for the given block root.
|
||||
func (s *Store) latestHashForRoot(root [32]byte) [32]byte {
|
||||
// try to get the full node first
|
||||
fn, ok := s.fullNodeByRoot[root]
|
||||
if ok && fn != nil {
|
||||
return fn.node.blockHash
|
||||
}
|
||||
en := s.emptyNodeByRoot[root]
|
||||
if !ok || en == nil {
|
||||
// This should not happen
|
||||
return [32]byte{}
|
||||
}
|
||||
return s.parentHash(en)
|
||||
}
|
||||
|
||||
// updateBestDescendantPayloadNode updates the best descendant of this node and its
|
||||
// children.
|
||||
func (s *Store) updateBestDescendantPayloadNode(ctx context.Context, n *PayloadNode, justifiedEpoch, finalizedEpoch, currentEpoch primitives.Epoch) error {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
var bestChild *Node
|
||||
bestWeight := uint64(0)
|
||||
for _, child := range n.children {
|
||||
if child == nil {
|
||||
return errors.Wrap(ErrNilNode, "could not update best descendant")
|
||||
}
|
||||
if err := s.updateBestDescendantConsensusNode(ctx, child, justifiedEpoch, finalizedEpoch, currentEpoch); err != nil {
|
||||
return err
|
||||
}
|
||||
childLeadsToViableHead := child.leadsToViableHead(justifiedEpoch, currentEpoch)
|
||||
if childLeadsToViableHead && bestChild == nil {
|
||||
// The child leads to a viable head, but the current
|
||||
// parent's best child doesn't.
|
||||
bestWeight = child.weight
|
||||
bestChild = child
|
||||
} else if childLeadsToViableHead {
|
||||
// If both are viable, compare their weights.
|
||||
if child.weight == bestWeight {
|
||||
// Tie-breaker of equal weights by root.
|
||||
if bytes.Compare(child.root[:], bestChild.root[:]) > 0 {
|
||||
bestChild = child
|
||||
}
|
||||
} else if child.weight > bestWeight {
|
||||
bestChild = child
|
||||
bestWeight = child.weight
|
||||
}
|
||||
}
|
||||
}
|
||||
if bestChild == nil {
|
||||
n.bestDescendant = nil
|
||||
} else {
|
||||
if bestChild.bestDescendant == nil {
|
||||
n.bestDescendant = bestChild
|
||||
} else {
|
||||
n.bestDescendant = bestChild.bestDescendant
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateBestDescendantConsensusNode updates the best descendant of this node and its
|
||||
// children.
|
||||
func (s *Store) updateBestDescendantConsensusNode(ctx context.Context, n *Node, justifiedEpoch, finalizedEpoch, currentEpoch primitives.Epoch) error {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
if len(s.allConsensusChildren(n)) == 0 {
|
||||
n.bestDescendant = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
en := s.emptyNodeByRoot[n.root]
|
||||
if err := s.updateBestDescendantPayloadNode(ctx, en, justifiedEpoch, finalizedEpoch, currentEpoch); err != nil {
|
||||
return err
|
||||
}
|
||||
fn := s.fullNodeByRoot[n.root]
|
||||
if fn == nil {
|
||||
n.bestDescendant = en.bestDescendant
|
||||
return nil
|
||||
}
|
||||
// TODO GLOAS: pick between full or empty
|
||||
if err := s.updateBestDescendantPayloadNode(ctx, fn, justifiedEpoch, finalizedEpoch, currentEpoch); err != nil {
|
||||
return err
|
||||
}
|
||||
n.bestDescendant = fn.bestDescendant
|
||||
return nil
|
||||
}
|
||||
|
||||
// choosePayloadContent chooses between empty or full for the passed consensus node. TODO Gloas: use PTC to choose.
|
||||
func (s *Store) choosePayloadContent(n *Node) *PayloadNode {
|
||||
if n == nil {
|
||||
return nil
|
||||
}
|
||||
fn := s.fullNodeByRoot[n.root]
|
||||
if fn != nil {
|
||||
return fn
|
||||
}
|
||||
return s.emptyNodeByRoot[n.root]
|
||||
}
|
||||
|
||||
// nodeTreeDump appends to the given list all the nodes descending from this one
|
||||
func (s *Store) nodeTreeDump(ctx context.Context, n *Node, nodes []*forkchoice2.Node) ([]*forkchoice2.Node, error) {
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
var parentRoot [32]byte
|
||||
if n.parent != nil {
|
||||
parentRoot = n.parent.node.root
|
||||
}
|
||||
target := [32]byte{}
|
||||
if n.target != nil {
|
||||
target = n.target.root
|
||||
}
|
||||
optimistic := false
|
||||
if n.parent != nil {
|
||||
optimistic = n.parent.optimistic
|
||||
}
|
||||
en := s.emptyNodeByRoot[n.root]
|
||||
timestamp := en.timestamp
|
||||
fn := s.fullNodeByRoot[n.root]
|
||||
if fn != nil {
|
||||
optimistic = fn.optimistic
|
||||
timestamp = fn.timestamp
|
||||
}
|
||||
thisNode := &forkchoice2.Node{
|
||||
Slot: n.slot,
|
||||
BlockRoot: n.root[:],
|
||||
ParentRoot: parentRoot[:],
|
||||
JustifiedEpoch: n.justifiedEpoch,
|
||||
FinalizedEpoch: n.finalizedEpoch,
|
||||
UnrealizedJustifiedEpoch: n.unrealizedJustifiedEpoch,
|
||||
UnrealizedFinalizedEpoch: n.unrealizedFinalizedEpoch,
|
||||
Balance: n.balance,
|
||||
Weight: n.weight,
|
||||
ExecutionOptimistic: optimistic,
|
||||
ExecutionBlockHash: n.blockHash[:],
|
||||
Timestamp: timestamp,
|
||||
Target: target[:],
|
||||
}
|
||||
if optimistic {
|
||||
thisNode.Validity = forkchoice2.Optimistic
|
||||
} else {
|
||||
thisNode.Validity = forkchoice2.Valid
|
||||
}
|
||||
|
||||
nodes = append(nodes, thisNode)
|
||||
var err error
|
||||
children := s.allConsensusChildren(n)
|
||||
for _, child := range children {
|
||||
nodes, err = s.nodeTreeDump(ctx, child, nodes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
func (f *ForkChoice) InsertPayload(ctx context.Context, pe interfaces.ROExecutionPayloadEnvelope) error {
|
||||
s := f.store
|
||||
root := pe.BeaconBlockRoot()
|
||||
en := s.emptyNodeByRoot[root]
|
||||
if en == nil {
|
||||
return errors.Wrap(ErrNilNode, "cannot insert full node without an empty one")
|
||||
}
|
||||
if _, ok := s.fullNodeByRoot[root]; ok {
|
||||
// We don't import two payloads for the same root
|
||||
return nil
|
||||
}
|
||||
fn := &PayloadNode{
|
||||
node: en.node,
|
||||
optimistic: true,
|
||||
timestamp: time.Now(),
|
||||
full: true,
|
||||
children: make([]*Node, 0),
|
||||
}
|
||||
s.fullNodeByRoot[root] = fn
|
||||
return nil
|
||||
}
|
||||
@@ -1,300 +0,0 @@
|
||||
package doublylinkedtree
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
)
|
||||
|
||||
func prepareGloasForkchoiceState(
|
||||
_ context.Context,
|
||||
slot primitives.Slot,
|
||||
blockRoot [32]byte,
|
||||
parentRoot [32]byte,
|
||||
blockHash [32]byte,
|
||||
parentBlockHash [32]byte,
|
||||
justifiedEpoch primitives.Epoch,
|
||||
finalizedEpoch primitives.Epoch,
|
||||
) (state.BeaconState, blocks.ROBlock, error) {
|
||||
blockHeader := ðpb.BeaconBlockHeader{
|
||||
ParentRoot: parentRoot[:],
|
||||
}
|
||||
|
||||
justifiedCheckpoint := ðpb.Checkpoint{
|
||||
Epoch: justifiedEpoch,
|
||||
}
|
||||
|
||||
finalizedCheckpoint := ðpb.Checkpoint{
|
||||
Epoch: finalizedEpoch,
|
||||
}
|
||||
|
||||
builderPendingPayments := make([]*ethpb.BuilderPendingPayment, 64)
|
||||
for i := range builderPendingPayments {
|
||||
builderPendingPayments[i] = ðpb.BuilderPendingPayment{
|
||||
Withdrawal: ðpb.BuilderPendingWithdrawal{
|
||||
FeeRecipient: make([]byte, 20),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
base := ðpb.BeaconStateGloas{
|
||||
Slot: slot,
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
CurrentJustifiedCheckpoint: justifiedCheckpoint,
|
||||
FinalizedCheckpoint: finalizedCheckpoint,
|
||||
LatestBlockHeader: blockHeader,
|
||||
LatestExecutionPayloadBid: ðpb.ExecutionPayloadBid{
|
||||
BlockHash: blockHash[:],
|
||||
ParentBlockHash: parentBlockHash[:],
|
||||
ParentBlockRoot: make([]byte, 32),
|
||||
PrevRandao: make([]byte, 32),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
BlobKzgCommitments: [][]byte{make([]byte, 48)},
|
||||
},
|
||||
Builders: make([]*ethpb.Builder, 0),
|
||||
BuilderPendingPayments: builderPendingPayments,
|
||||
ExecutionPayloadAvailability: make([]byte, 1024),
|
||||
LatestBlockHash: make([]byte, 32),
|
||||
PayloadExpectedWithdrawals: make([]*enginev1.Withdrawal, 0),
|
||||
ProposerLookahead: make([]uint64, 64),
|
||||
}
|
||||
|
||||
st, err := state_native.InitializeFromProtoUnsafeGloas(base)
|
||||
if err != nil {
|
||||
return nil, blocks.ROBlock{}, err
|
||||
}
|
||||
|
||||
bid := util.HydrateSignedExecutionPayloadBid(ðpb.SignedExecutionPayloadBid{
|
||||
Message: ðpb.ExecutionPayloadBid{
|
||||
BlockHash: blockHash[:],
|
||||
ParentBlockHash: parentBlockHash[:],
|
||||
},
|
||||
})
|
||||
|
||||
blk := util.HydrateSignedBeaconBlockGloas(ðpb.SignedBeaconBlockGloas{
|
||||
Block: ðpb.BeaconBlockGloas{
|
||||
Slot: slot,
|
||||
ParentRoot: parentRoot[:],
|
||||
Body: ðpb.BeaconBlockBodyGloas{
|
||||
SignedExecutionPayloadBid: bid,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
signed, err := blocks.NewSignedBeaconBlock(blk)
|
||||
if err != nil {
|
||||
return nil, blocks.ROBlock{}, err
|
||||
}
|
||||
roblock, err := blocks.NewROBlockWithRoot(signed, blockRoot)
|
||||
return st, roblock, err
|
||||
}
|
||||
|
||||
func prepareGloasForkchoicePayload(
|
||||
blockRoot [32]byte,
|
||||
) (interfaces.ROExecutionPayloadEnvelope, error) {
|
||||
env := ðpb.ExecutionPayloadEnvelope{
|
||||
BeaconBlockRoot: blockRoot[:],
|
||||
Payload: &enginev1.ExecutionPayloadDeneb{},
|
||||
}
|
||||
return blocks.WrappedROExecutionPayloadEnvelope(env)
|
||||
}
|
||||
|
||||
func TestInsertGloasBlock_EmptyNodeOnly(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
ctx := t.Context()
|
||||
|
||||
root := indexToHash(1)
|
||||
blockHash := indexToHash(100)
|
||||
st, roblock, err := prepareGloasForkchoiceState(ctx, 1, root, params.BeaconConfig().ZeroHash, blockHash, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
|
||||
// Empty node should exist.
|
||||
en := f.store.emptyNodeByRoot[root]
|
||||
require.NotNil(t, en)
|
||||
|
||||
// Full node should NOT exist.
|
||||
_, hasFull := f.store.fullNodeByRoot[root]
|
||||
assert.Equal(t, false, hasFull)
|
||||
|
||||
// Parent should be the genesis full node.
|
||||
genesisRoot := params.BeaconConfig().ZeroHash
|
||||
genesisFull := f.store.fullNodeByRoot[genesisRoot]
|
||||
require.NotNil(t, genesisFull)
|
||||
assert.Equal(t, genesisFull, en.node.parent)
|
||||
}
|
||||
|
||||
func TestInsertPayload_CreatesFullNode(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
ctx := t.Context()
|
||||
|
||||
root := indexToHash(1)
|
||||
blockHash := indexToHash(100)
|
||||
st, roblock, err := prepareGloasForkchoiceState(ctx, 1, root, params.BeaconConfig().ZeroHash, blockHash, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
require.Equal(t, 2, len(f.store.emptyNodeByRoot))
|
||||
require.Equal(t, 1, len(f.store.fullNodeByRoot))
|
||||
|
||||
pe, err := prepareGloasForkchoicePayload(root)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertPayload(ctx, pe))
|
||||
require.Equal(t, 2, len(f.store.fullNodeByRoot))
|
||||
|
||||
fn := f.store.fullNodeByRoot[root]
|
||||
require.NotNil(t, fn)
|
||||
|
||||
en := f.store.emptyNodeByRoot[root]
|
||||
require.NotNil(t, en)
|
||||
|
||||
// Empty and full share the same *Node.
|
||||
assert.Equal(t, en.node, fn.node)
|
||||
assert.Equal(t, true, fn.optimistic)
|
||||
assert.Equal(t, true, fn.full)
|
||||
}
|
||||
|
||||
func TestInsertPayload_DuplicateIsNoop(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
ctx := t.Context()
|
||||
|
||||
root := indexToHash(1)
|
||||
blockHash := indexToHash(100)
|
||||
st, roblock, err := prepareGloasForkchoiceState(ctx, 1, root, params.BeaconConfig().ZeroHash, blockHash, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
|
||||
pe, err := prepareGloasForkchoicePayload(root)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertPayload(ctx, pe))
|
||||
require.Equal(t, 2, len(f.store.fullNodeByRoot))
|
||||
|
||||
fn := f.store.fullNodeByRoot[root]
|
||||
require.NotNil(t, fn)
|
||||
|
||||
// Insert again — should be a no-op.
|
||||
require.NoError(t, f.InsertPayload(ctx, pe))
|
||||
assert.Equal(t, fn, f.store.fullNodeByRoot[root])
|
||||
require.Equal(t, 2, len(f.store.fullNodeByRoot))
|
||||
}
|
||||
|
||||
func TestInsertPayload_WithoutEmptyNode_Errors(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
ctx := t.Context()
|
||||
|
||||
root := indexToHash(99)
|
||||
pe, err := prepareGloasForkchoicePayload(root)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = f.InsertPayload(ctx, pe)
|
||||
require.ErrorContains(t, ErrNilNode.Error(), err)
|
||||
}
|
||||
|
||||
func TestGloasBlock_ChildBuildsOnEmpty(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
ctx := t.Context()
|
||||
|
||||
// Insert Gloas block A (empty only).
|
||||
rootA := indexToHash(1)
|
||||
blockHashA := indexToHash(100)
|
||||
st, roblock, err := prepareGloasForkchoiceState(ctx, 1, rootA, params.BeaconConfig().ZeroHash, blockHashA, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
|
||||
// Insert Gloas block B as child of (A, empty)
|
||||
rootB := indexToHash(2)
|
||||
blockHashB := indexToHash(200)
|
||||
nonMatchingParentHash := indexToHash(999)
|
||||
st, roblock, err = prepareGloasForkchoiceState(ctx, 2, rootB, rootA, blockHashB, nonMatchingParentHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
|
||||
emptyA := f.store.emptyNodeByRoot[rootA]
|
||||
require.NotNil(t, emptyA)
|
||||
nodeB := f.store.emptyNodeByRoot[rootB]
|
||||
require.NotNil(t, nodeB)
|
||||
require.Equal(t, emptyA, nodeB.node.parent)
|
||||
}
|
||||
|
||||
func TestGloasBlock_ChildrenOfEmptyAndFull(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
ctx := t.Context()
|
||||
|
||||
// Insert Gloas block A (empty only).
|
||||
rootA := indexToHash(1)
|
||||
blockHashA := indexToHash(100)
|
||||
st, roblock, err := prepareGloasForkchoiceState(ctx, 1, rootA, params.BeaconConfig().ZeroHash, blockHashA, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
// Insert payload for A
|
||||
pe, err := prepareGloasForkchoicePayload(rootA)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertPayload(ctx, pe))
|
||||
|
||||
// Insert Gloas block B as child of (A, empty)
|
||||
rootB := indexToHash(2)
|
||||
blockHashB := indexToHash(200)
|
||||
nonMatchingParentHash := indexToHash(999)
|
||||
st, roblock, err = prepareGloasForkchoiceState(ctx, 2, rootB, rootA, blockHashB, nonMatchingParentHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
|
||||
// Insert Gloas block C as child of (A, full)
|
||||
rootC := indexToHash(3)
|
||||
blockHashC := indexToHash(201)
|
||||
st, roblock, err = prepareGloasForkchoiceState(ctx, 3, rootC, rootA, blockHashC, blockHashA, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
|
||||
emptyA := f.store.emptyNodeByRoot[rootA]
|
||||
require.NotNil(t, emptyA)
|
||||
nodeB := f.store.emptyNodeByRoot[rootB]
|
||||
require.NotNil(t, nodeB)
|
||||
require.Equal(t, emptyA, nodeB.node.parent)
|
||||
nodeC := f.store.emptyNodeByRoot[rootC]
|
||||
require.NotNil(t, nodeC)
|
||||
fullA := f.store.fullNodeByRoot[rootA]
|
||||
require.NotNil(t, fullA)
|
||||
require.Equal(t, fullA, nodeC.node.parent)
|
||||
}
|
||||
|
||||
func TestGloasBlock_ChildBuildsOnFull(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
ctx := t.Context()
|
||||
|
||||
// Insert Gloas block A (empty only).
|
||||
rootA := indexToHash(1)
|
||||
blockHashA := indexToHash(100)
|
||||
st, roblock, err := prepareGloasForkchoiceState(ctx, 1, rootA, params.BeaconConfig().ZeroHash, blockHashA, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
|
||||
// Insert payload for A → creates the full node.
|
||||
pe, err := prepareGloasForkchoicePayload(rootA)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertPayload(ctx, pe))
|
||||
|
||||
fullA := f.store.fullNodeByRoot[rootA]
|
||||
require.NotNil(t, fullA)
|
||||
|
||||
// Child for (A, full)
|
||||
rootB := indexToHash(2)
|
||||
blockHashB := indexToHash(200)
|
||||
st, roblock, err = prepareGloasForkchoiceState(ctx, 2, rootB, rootA, blockHashB, blockHashA, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
|
||||
nodeB := f.store.emptyNodeByRoot[rootB]
|
||||
require.NotNil(t, nodeB)
|
||||
assert.Equal(t, fullA, nodeB.node.parent)
|
||||
}
|
||||
@@ -1,17 +1,95 @@
|
||||
package doublylinkedtree
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
forkchoice2 "github.com/OffchainLabs/prysm/v7/consensus-types/forkchoice"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ProcessAttestationsThreshold is the amount of time after which we
|
||||
// process attestations for the current slot
|
||||
const ProcessAttestationsThreshold = 10 * time.Second
|
||||
|
||||
// applyWeightChanges recomputes the weight of the node passed as an argument and all of its descendants,
|
||||
// using the current balance stored in each node.
|
||||
func (n *Node) applyWeightChanges(ctx context.Context) error {
|
||||
// Recursively calling the children to sum their weights.
|
||||
childrenWeight := uint64(0)
|
||||
for _, child := range n.children {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
if err := child.applyWeightChanges(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
childrenWeight += child.weight
|
||||
}
|
||||
if n.root == params.BeaconConfig().ZeroHash {
|
||||
return nil
|
||||
}
|
||||
n.weight = n.balance + childrenWeight
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateBestDescendant updates the best descendant of this node and its
|
||||
// children.
|
||||
func (n *Node) updateBestDescendant(ctx context.Context, justifiedEpoch, finalizedEpoch, currentEpoch primitives.Epoch) error {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
if len(n.children) == 0 {
|
||||
n.bestDescendant = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
var bestChild *Node
|
||||
bestWeight := uint64(0)
|
||||
hasViableDescendant := false
|
||||
for _, child := range n.children {
|
||||
if child == nil {
|
||||
return errors.Wrap(ErrNilNode, "could not update best descendant")
|
||||
}
|
||||
if err := child.updateBestDescendant(ctx, justifiedEpoch, finalizedEpoch, currentEpoch); err != nil {
|
||||
return err
|
||||
}
|
||||
childLeadsToViableHead := child.leadsToViableHead(justifiedEpoch, currentEpoch)
|
||||
if childLeadsToViableHead && !hasViableDescendant {
|
||||
// The child leads to a viable head, but the current
|
||||
// parent's best child doesn't.
|
||||
bestWeight = child.weight
|
||||
bestChild = child
|
||||
hasViableDescendant = true
|
||||
} else if childLeadsToViableHead {
|
||||
// If both are viable, compare their weights.
|
||||
if child.weight == bestWeight {
|
||||
// Tie-breaker of equal weights by root.
|
||||
if bytes.Compare(child.root[:], bestChild.root[:]) > 0 {
|
||||
bestChild = child
|
||||
}
|
||||
} else if child.weight > bestWeight {
|
||||
bestChild = child
|
||||
bestWeight = child.weight
|
||||
}
|
||||
}
|
||||
}
|
||||
if hasViableDescendant {
|
||||
if bestChild.bestDescendant == nil {
|
||||
n.bestDescendant = bestChild
|
||||
} else {
|
||||
n.bestDescendant = bestChild.bestDescendant
|
||||
}
|
||||
} else {
|
||||
n.bestDescendant = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// viableForHead returns true if the node is viable to head.
|
||||
// Any node with different finalized or justified epoch than
|
||||
// the ones in fork choice store should not be viable to head.
|
||||
@@ -32,13 +110,30 @@ func (n *Node) leadsToViableHead(justifiedEpoch, currentEpoch primitives.Epoch)
|
||||
return n.bestDescendant.viableForHead(justifiedEpoch, currentEpoch)
|
||||
}
|
||||
|
||||
// setNodeAndParentValidated sets the current node and all the ancestors as validated (i.e. non-optimistic).
|
||||
func (n *Node) setNodeAndParentValidated(ctx context.Context) error {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
if !n.optimistic {
|
||||
return nil
|
||||
}
|
||||
n.optimistic = false
|
||||
|
||||
if n.parent == nil {
|
||||
return nil
|
||||
}
|
||||
return n.parent.setNodeAndParentValidated(ctx)
|
||||
}
|
||||
|
||||
// arrivedEarly returns whether this node was inserted before the first
|
||||
// threshold to orphan a block.
|
||||
// Note that genesisTime has seconds granularity, therefore we use a strict
|
||||
// inequality < here. For example a block that arrives 3.9999 seconds into the
|
||||
// slot will have secs = 3 below.
|
||||
func (n *PayloadNode) arrivedEarly(genesis time.Time) (bool, error) {
|
||||
sss, err := slots.SinceSlotStart(n.node.slot, genesis, n.timestamp.Truncate(time.Second)) // Truncate such that 3.9999 seconds will have a value of 3.
|
||||
func (n *Node) arrivedEarly(genesis time.Time) (bool, error) {
|
||||
sss, err := slots.SinceSlotStart(n.slot, genesis, n.timestamp.Truncate(time.Second)) // Truncate such that 3.9999 seconds will have a value of 3.
|
||||
votingWindow := params.BeaconConfig().SlotComponentDuration(params.BeaconConfig().AttestationDueBPS)
|
||||
return sss < votingWindow, err
|
||||
}
|
||||
@@ -48,7 +143,52 @@ func (n *PayloadNode) arrivedEarly(genesis time.Time) (bool, error) {
|
||||
// Note that genesisTime has seconds granularity, therefore we use an
|
||||
// inequality >= here. For example a block that arrives 10.00001 seconds into the
|
||||
// slot will have secs = 10 below.
|
||||
func (n *PayloadNode) arrivedAfterOrphanCheck(genesis time.Time) (bool, error) {
|
||||
secs, err := slots.SinceSlotStart(n.node.slot, genesis, n.timestamp.Truncate(time.Second)) // Truncate such that 10.00001 seconds will have a value of 10.
|
||||
func (n *Node) arrivedAfterOrphanCheck(genesis time.Time) (bool, error) {
|
||||
secs, err := slots.SinceSlotStart(n.slot, genesis, n.timestamp.Truncate(time.Second)) // Truncate such that 10.00001 seconds will have a value of 10.
|
||||
return secs >= ProcessAttestationsThreshold, err
|
||||
}
|
||||
|
||||
// nodeTreeDump appends to the given list all the nodes descending from this one
|
||||
func (n *Node) nodeTreeDump(ctx context.Context, nodes []*forkchoice2.Node) ([]*forkchoice2.Node, error) {
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
var parentRoot [32]byte
|
||||
if n.parent != nil {
|
||||
parentRoot = n.parent.root
|
||||
}
|
||||
target := [32]byte{}
|
||||
if n.target != nil {
|
||||
target = n.target.root
|
||||
}
|
||||
thisNode := &forkchoice2.Node{
|
||||
Slot: n.slot,
|
||||
BlockRoot: n.root[:],
|
||||
ParentRoot: parentRoot[:],
|
||||
JustifiedEpoch: n.justifiedEpoch,
|
||||
FinalizedEpoch: n.finalizedEpoch,
|
||||
UnrealizedJustifiedEpoch: n.unrealizedJustifiedEpoch,
|
||||
UnrealizedFinalizedEpoch: n.unrealizedFinalizedEpoch,
|
||||
Balance: n.balance,
|
||||
Weight: n.weight,
|
||||
ExecutionOptimistic: n.optimistic,
|
||||
ExecutionBlockHash: n.payloadHash[:],
|
||||
Timestamp: n.timestamp,
|
||||
Target: target[:],
|
||||
}
|
||||
if n.optimistic {
|
||||
thisNode.Validity = forkchoice2.Optimistic
|
||||
} else {
|
||||
thisNode.Validity = forkchoice2.Valid
|
||||
}
|
||||
|
||||
nodes = append(nodes, thisNode)
|
||||
var err error
|
||||
for _, child := range n.children {
|
||||
nodes, err = child.nodeTreeDump(ctx, nodes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
@@ -27,15 +27,15 @@ func TestNode_ApplyWeightChanges_PositiveChange(t *testing.T) {
|
||||
// The updated balances of each node is 100
|
||||
s := f.store
|
||||
|
||||
s.emptyNodeByRoot[indexToHash(1)].balance = 100
|
||||
s.emptyNodeByRoot[indexToHash(2)].balance = 100
|
||||
s.emptyNodeByRoot[indexToHash(3)].balance = 100
|
||||
s.nodeByRoot[indexToHash(1)].balance = 100
|
||||
s.nodeByRoot[indexToHash(2)].balance = 100
|
||||
s.nodeByRoot[indexToHash(3)].balance = 100
|
||||
|
||||
assert.NoError(t, s.applyWeightChangesConsensusNode(ctx, s.treeRootNode))
|
||||
assert.NoError(t, s.treeRootNode.applyWeightChanges(ctx))
|
||||
|
||||
assert.Equal(t, uint64(300), s.emptyNodeByRoot[indexToHash(1)].node.weight)
|
||||
assert.Equal(t, uint64(200), s.emptyNodeByRoot[indexToHash(2)].node.weight)
|
||||
assert.Equal(t, uint64(100), s.emptyNodeByRoot[indexToHash(3)].node.weight)
|
||||
assert.Equal(t, uint64(300), s.nodeByRoot[indexToHash(1)].weight)
|
||||
assert.Equal(t, uint64(200), s.nodeByRoot[indexToHash(2)].weight)
|
||||
assert.Equal(t, uint64(100), s.nodeByRoot[indexToHash(3)].weight)
|
||||
}
|
||||
|
||||
func TestNode_ApplyWeightChanges_NegativeChange(t *testing.T) {
|
||||
@@ -53,19 +53,19 @@ func TestNode_ApplyWeightChanges_NegativeChange(t *testing.T) {
|
||||
|
||||
// The updated balances of each node is 100
|
||||
s := f.store
|
||||
s.emptyNodeByRoot[indexToHash(1)].weight = 400
|
||||
s.emptyNodeByRoot[indexToHash(2)].weight = 400
|
||||
s.emptyNodeByRoot[indexToHash(3)].weight = 400
|
||||
s.nodeByRoot[indexToHash(1)].weight = 400
|
||||
s.nodeByRoot[indexToHash(2)].weight = 400
|
||||
s.nodeByRoot[indexToHash(3)].weight = 400
|
||||
|
||||
s.emptyNodeByRoot[indexToHash(1)].balance = 100
|
||||
s.emptyNodeByRoot[indexToHash(2)].balance = 100
|
||||
s.emptyNodeByRoot[indexToHash(3)].balance = 100
|
||||
s.nodeByRoot[indexToHash(1)].balance = 100
|
||||
s.nodeByRoot[indexToHash(2)].balance = 100
|
||||
s.nodeByRoot[indexToHash(3)].balance = 100
|
||||
|
||||
assert.NoError(t, s.applyWeightChangesConsensusNode(ctx, s.treeRootNode))
|
||||
assert.NoError(t, s.treeRootNode.applyWeightChanges(ctx))
|
||||
|
||||
assert.Equal(t, uint64(300), s.emptyNodeByRoot[indexToHash(1)].node.weight)
|
||||
assert.Equal(t, uint64(200), s.emptyNodeByRoot[indexToHash(2)].node.weight)
|
||||
assert.Equal(t, uint64(100), s.emptyNodeByRoot[indexToHash(3)].node.weight)
|
||||
assert.Equal(t, uint64(300), s.nodeByRoot[indexToHash(1)].weight)
|
||||
assert.Equal(t, uint64(200), s.nodeByRoot[indexToHash(2)].weight)
|
||||
assert.Equal(t, uint64(100), s.nodeByRoot[indexToHash(3)].weight)
|
||||
}
|
||||
|
||||
func TestNode_UpdateBestDescendant_NonViableChild(t *testing.T) {
|
||||
@@ -78,7 +78,7 @@ func TestNode_UpdateBestDescendant_NonViableChild(t *testing.T) {
|
||||
|
||||
// Verify parent's best child and best descendant are `none`.
|
||||
s := f.store
|
||||
assert.Equal(t, 1, len(s.allConsensusChildren(s.treeRootNode)))
|
||||
assert.Equal(t, 1, len(s.treeRootNode.children))
|
||||
nilBestDescendant := s.treeRootNode.bestDescendant == nil
|
||||
assert.Equal(t, true, nilBestDescendant)
|
||||
}
|
||||
@@ -92,9 +92,8 @@ func TestNode_UpdateBestDescendant_ViableChild(t *testing.T) {
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
|
||||
s := f.store
|
||||
children := s.allConsensusChildren(s.treeRootNode)
|
||||
assert.Equal(t, 1, len(children))
|
||||
assert.Equal(t, children[0], s.treeRootNode.bestDescendant)
|
||||
assert.Equal(t, 1, len(s.treeRootNode.children))
|
||||
assert.Equal(t, s.treeRootNode.children[0], s.treeRootNode.bestDescendant)
|
||||
}
|
||||
|
||||
func TestNode_UpdateBestDescendant_HigherWeightChild(t *testing.T) {
|
||||
@@ -109,34 +108,32 @@ func TestNode_UpdateBestDescendant_HigherWeightChild(t *testing.T) {
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
|
||||
s := f.store
|
||||
s.emptyNodeByRoot[indexToHash(1)].weight = 100
|
||||
s.emptyNodeByRoot[indexToHash(2)].weight = 200
|
||||
assert.NoError(t, s.updateBestDescendantConsensusNode(ctx, s.treeRootNode, 1, 1, 1))
|
||||
s.nodeByRoot[indexToHash(1)].weight = 100
|
||||
s.nodeByRoot[indexToHash(2)].weight = 200
|
||||
assert.NoError(t, s.treeRootNode.updateBestDescendant(ctx, 1, 1, 1))
|
||||
|
||||
children := s.allConsensusChildren(s.treeRootNode)
|
||||
assert.Equal(t, 2, len(children))
|
||||
assert.Equal(t, children[1], s.treeRootNode.bestDescendant)
|
||||
assert.Equal(t, 2, len(s.treeRootNode.children))
|
||||
assert.Equal(t, s.treeRootNode.children[1], s.treeRootNode.bestDescendant)
|
||||
}
|
||||
|
||||
func TestNode_UpdateBestDescendant_LowerWeightChild(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := t.Context()
|
||||
// Input child is the best descendant
|
||||
state, blk, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, indexToHash(101), 1, 1)
|
||||
state, blk, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
state, blk, err = prepareForkchoiceState(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, indexToHash(102), 1, 1)
|
||||
state, blk, err = prepareForkchoiceState(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
|
||||
s := f.store
|
||||
s.emptyNodeByRoot[indexToHash(1)].node.weight = 200
|
||||
s.emptyNodeByRoot[indexToHash(2)].node.weight = 100
|
||||
assert.NoError(t, s.updateBestDescendantConsensusNode(ctx, s.treeRootNode, 1, 1, 1))
|
||||
s.nodeByRoot[indexToHash(1)].weight = 200
|
||||
s.nodeByRoot[indexToHash(2)].weight = 100
|
||||
assert.NoError(t, s.treeRootNode.updateBestDescendant(ctx, 1, 1, 1))
|
||||
|
||||
children := s.allConsensusChildren(s.treeRootNode)
|
||||
assert.Equal(t, 2, len(children))
|
||||
assert.Equal(t, children[0], s.treeRootNode.bestDescendant)
|
||||
assert.Equal(t, 2, len(s.treeRootNode.children))
|
||||
assert.Equal(t, s.treeRootNode.children[0], s.treeRootNode.bestDescendant)
|
||||
}
|
||||
|
||||
func TestNode_ViableForHead(t *testing.T) {
|
||||
@@ -179,44 +176,44 @@ func TestNode_LeadsToViableHead(t *testing.T) {
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
|
||||
require.Equal(t, true, f.store.treeRootNode.leadsToViableHead(4, 5))
|
||||
require.Equal(t, true, f.store.emptyNodeByRoot[indexToHash(5)].node.leadsToViableHead(4, 5))
|
||||
require.Equal(t, false, f.store.emptyNodeByRoot[indexToHash(2)].node.leadsToViableHead(4, 5))
|
||||
require.Equal(t, false, f.store.emptyNodeByRoot[indexToHash(4)].node.leadsToViableHead(4, 5))
|
||||
require.Equal(t, true, f.store.nodeByRoot[indexToHash(5)].leadsToViableHead(4, 5))
|
||||
require.Equal(t, false, f.store.nodeByRoot[indexToHash(2)].leadsToViableHead(4, 5))
|
||||
require.Equal(t, false, f.store.nodeByRoot[indexToHash(4)].leadsToViableHead(4, 5))
|
||||
}
|
||||
|
||||
func TestNode_SetFullyValidated(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := t.Context()
|
||||
storeNodes := make([]*PayloadNode, 6)
|
||||
storeNodes[0] = f.store.fullNodeByRoot[params.BeaconConfig().ZeroHash]
|
||||
storeNodes := make([]*Node, 6)
|
||||
storeNodes[0] = f.store.treeRootNode
|
||||
// insert blocks in the fork pattern (optimistic status in parenthesis)
|
||||
//
|
||||
// 0 (false) -- 1 (false) -- 2 (false) -- 3 (true) -- 4 (true)
|
||||
// \
|
||||
// -- 5 (true)
|
||||
//
|
||||
state, blk, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, indexToHash(101), 1, 1)
|
||||
state, blk, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
storeNodes[1] = f.store.fullNodeByRoot[blk.Root()]
|
||||
storeNodes[1] = f.store.nodeByRoot[blk.Root()]
|
||||
require.NoError(t, f.SetOptimisticToValid(ctx, params.BeaconConfig().ZeroHash))
|
||||
state, blk, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
storeNodes[2] = f.store.nodeByRoot[blk.Root()]
|
||||
require.NoError(t, f.SetOptimisticToValid(ctx, indexToHash(1)))
|
||||
state, blk, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), indexToHash(102), 1, 1)
|
||||
state, blk, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
storeNodes[2] = f.store.fullNodeByRoot[blk.Root()]
|
||||
require.NoError(t, f.SetOptimisticToValid(ctx, indexToHash(2)))
|
||||
state, blk, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(2), indexToHash(103), 1, 1)
|
||||
storeNodes[3] = f.store.nodeByRoot[blk.Root()]
|
||||
state, blk, err = prepareForkchoiceState(ctx, 4, indexToHash(4), indexToHash(3), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
storeNodes[3] = f.store.fullNodeByRoot[blk.Root()]
|
||||
state, blk, err = prepareForkchoiceState(ctx, 4, indexToHash(4), indexToHash(3), indexToHash(104), 1, 1)
|
||||
storeNodes[4] = f.store.nodeByRoot[blk.Root()]
|
||||
state, blk, err = prepareForkchoiceState(ctx, 5, indexToHash(5), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
storeNodes[4] = f.store.fullNodeByRoot[blk.Root()]
|
||||
state, blk, err = prepareForkchoiceState(ctx, 5, indexToHash(5), indexToHash(1), indexToHash(105), 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
storeNodes[5] = f.store.fullNodeByRoot[blk.Root()]
|
||||
storeNodes[5] = f.store.nodeByRoot[blk.Root()]
|
||||
|
||||
opt, err := f.IsOptimistic(indexToHash(5))
|
||||
require.NoError(t, err)
|
||||
@@ -226,7 +223,7 @@ func TestNode_SetFullyValidated(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, opt)
|
||||
|
||||
require.NoError(t, f.store.setNodeAndParentValidated(ctx, f.store.fullNodeByRoot[indexToHash(4)]))
|
||||
require.NoError(t, f.store.nodeByRoot[indexToHash(4)].setNodeAndParentValidated(ctx))
|
||||
|
||||
// block 5 should still be optimistic
|
||||
opt, err = f.IsOptimistic(indexToHash(5))
|
||||
@@ -243,20 +240,20 @@ func TestNode_SetFullyValidated(t *testing.T) {
|
||||
require.Equal(t, false, opt)
|
||||
|
||||
respNodes := make([]*forkchoice.Node, 0)
|
||||
respNodes, err = f.store.nodeTreeDump(ctx, f.store.treeRootNode, respNodes)
|
||||
respNodes, err = f.store.treeRootNode.nodeTreeDump(ctx, respNodes)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(respNodes), f.NodeCount())
|
||||
|
||||
for i, respNode := range respNodes {
|
||||
require.Equal(t, storeNodes[i].node.slot, respNode.Slot)
|
||||
require.DeepEqual(t, storeNodes[i].node.root[:], respNode.BlockRoot)
|
||||
require.Equal(t, storeNodes[i].node.balance, respNode.Balance)
|
||||
require.Equal(t, storeNodes[i].node.weight, respNode.Weight)
|
||||
require.Equal(t, storeNodes[i].slot, respNode.Slot)
|
||||
require.DeepEqual(t, storeNodes[i].root[:], respNode.BlockRoot)
|
||||
require.Equal(t, storeNodes[i].balance, respNode.Balance)
|
||||
require.Equal(t, storeNodes[i].weight, respNode.Weight)
|
||||
require.Equal(t, storeNodes[i].optimistic, respNode.ExecutionOptimistic)
|
||||
require.Equal(t, storeNodes[i].node.justifiedEpoch, respNode.JustifiedEpoch)
|
||||
require.Equal(t, storeNodes[i].node.unrealizedJustifiedEpoch, respNode.UnrealizedJustifiedEpoch)
|
||||
require.Equal(t, storeNodes[i].node.finalizedEpoch, respNode.FinalizedEpoch)
|
||||
require.Equal(t, storeNodes[i].node.unrealizedFinalizedEpoch, respNode.UnrealizedFinalizedEpoch)
|
||||
require.Equal(t, storeNodes[i].justifiedEpoch, respNode.JustifiedEpoch)
|
||||
require.Equal(t, storeNodes[i].unrealizedJustifiedEpoch, respNode.UnrealizedJustifiedEpoch)
|
||||
require.Equal(t, storeNodes[i].finalizedEpoch, respNode.FinalizedEpoch)
|
||||
require.Equal(t, storeNodes[i].unrealizedFinalizedEpoch, respNode.UnrealizedFinalizedEpoch)
|
||||
require.Equal(t, storeNodes[i].timestamp, respNode.Timestamp)
|
||||
}
|
||||
}
|
||||
@@ -275,10 +272,10 @@ func TestNode_TimeStampsChecks(t *testing.T) {
|
||||
headRoot, err := f.Head(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, root, headRoot)
|
||||
early, err := f.store.choosePayloadContent(f.store.headNode).arrivedEarly(f.store.genesisTime)
|
||||
early, err := f.store.headNode.arrivedEarly(f.store.genesisTime)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, early)
|
||||
late, err := f.store.choosePayloadContent(f.store.headNode).arrivedAfterOrphanCheck(f.store.genesisTime)
|
||||
late, err := f.store.headNode.arrivedAfterOrphanCheck(f.store.genesisTime)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, late)
|
||||
|
||||
@@ -292,10 +289,10 @@ func TestNode_TimeStampsChecks(t *testing.T) {
|
||||
headRoot, err = f.Head(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, root, headRoot)
|
||||
early, err = f.store.choosePayloadContent(f.store.headNode).arrivedEarly(f.store.genesisTime)
|
||||
early, err = f.store.headNode.arrivedEarly(f.store.genesisTime)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, early)
|
||||
late, err = f.store.choosePayloadContent(f.store.headNode).arrivedAfterOrphanCheck(f.store.genesisTime)
|
||||
late, err = f.store.headNode.arrivedAfterOrphanCheck(f.store.genesisTime)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, late)
|
||||
|
||||
@@ -308,10 +305,10 @@ func TestNode_TimeStampsChecks(t *testing.T) {
|
||||
headRoot, err = f.Head(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, root, headRoot)
|
||||
early, err = f.store.choosePayloadContent(f.store.headNode).arrivedEarly(f.store.genesisTime)
|
||||
early, err = f.store.headNode.arrivedEarly(f.store.genesisTime)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, early)
|
||||
late, err = f.store.choosePayloadContent(f.store.headNode).arrivedAfterOrphanCheck(f.store.genesisTime)
|
||||
late, err = f.store.headNode.arrivedAfterOrphanCheck(f.store.genesisTime)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, late)
|
||||
|
||||
@@ -323,10 +320,10 @@ func TestNode_TimeStampsChecks(t *testing.T) {
|
||||
headRoot, err = f.Head(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, root, headRoot)
|
||||
early, err = f.store.choosePayloadContent(f.store.headNode).arrivedEarly(f.store.genesisTime)
|
||||
early, err = f.store.headNode.arrivedEarly(f.store.genesisTime)
|
||||
require.ErrorContains(t, "invalid timestamp", err)
|
||||
require.Equal(t, true, early)
|
||||
late, err = f.store.choosePayloadContent(f.store.headNode).arrivedAfterOrphanCheck(f.store.genesisTime)
|
||||
late, err = f.store.headNode.arrivedAfterOrphanCheck(f.store.genesisTime)
|
||||
require.ErrorContains(t, "invalid timestamp", err)
|
||||
require.Equal(t, false, late)
|
||||
}
|
||||
|
||||
@@ -7,141 +7,92 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// setOptimisticToInvalid removes invalid nodes from forkchoice. It does NOT remove the empty node for the passed root.
|
||||
func (s *Store) setOptimisticToInvalid(ctx context.Context, root, parentRoot, parentHash, lastValidHash [32]byte) ([][32]byte, error) {
|
||||
func (s *Store) setOptimisticToInvalid(ctx context.Context, root, parentRoot, lastValidHash [32]byte) ([][32]byte, error) {
|
||||
invalidRoots := make([][32]byte, 0)
|
||||
n := s.fullNodeByRoot[root]
|
||||
if n == nil {
|
||||
// The offending node with its payload is not in forkchoice. Try with the parent
|
||||
n = s.emptyNodeByRoot[parentRoot]
|
||||
if n == nil {
|
||||
return invalidRoots, errors.Wrap(ErrNilNode, "could not set node to invalid, could not find consensus parent")
|
||||
node, ok := s.nodeByRoot[root]
|
||||
if !ok {
|
||||
node, ok = s.nodeByRoot[parentRoot]
|
||||
if !ok || node == nil {
|
||||
return invalidRoots, errors.Wrap(ErrNilNode, "could not set node to invalid")
|
||||
}
|
||||
if n.node.blockHash == lastValidHash {
|
||||
// The parent node must have been full and with a valid payload
|
||||
// return early if the parent is LVH
|
||||
if node.payloadHash == lastValidHash {
|
||||
return invalidRoots, nil
|
||||
}
|
||||
if n.node.blockHash == parentHash {
|
||||
// The parent was full and invalid
|
||||
n = s.fullNodeByRoot[parentRoot]
|
||||
if n == nil {
|
||||
return invalidRoots, errors.Wrap(ErrNilNode, "could not set node to invalid, could not find full parent")
|
||||
}
|
||||
} else {
|
||||
// The parent is empty and we don't yet know if it's valid or not
|
||||
for n = n.node.parent; n != nil; n = n.node.parent {
|
||||
if ctx.Err() != nil {
|
||||
return invalidRoots, ctx.Err()
|
||||
}
|
||||
if n.node.blockHash == lastValidHash {
|
||||
// The node built on empty and the whole chain was valid
|
||||
return invalidRoots, nil
|
||||
}
|
||||
if n.node.blockHash == parentHash {
|
||||
// The parent was full and invalid
|
||||
break
|
||||
}
|
||||
}
|
||||
if n == nil {
|
||||
return nil, errors.Wrap(ErrNilNode, "could not set node to invalid, could not find full parent in ancestry")
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// check consistency with the parent information
|
||||
if n.node.parent == nil {
|
||||
return nil, ErrNilNode
|
||||
if node == nil {
|
||||
return invalidRoots, errors.Wrap(ErrNilNode, "could not set node to invalid")
|
||||
}
|
||||
if n.node.parent.node.root != parentRoot {
|
||||
return nil, errInvalidParentRoot
|
||||
if node.parent.root != parentRoot {
|
||||
return invalidRoots, errInvalidParentRoot
|
||||
}
|
||||
}
|
||||
// n points to a full node that has an invalid payload in forkchoice. We need to find the fist node in the chain that is actually invalid.
|
||||
startNode := n
|
||||
fp := s.fullParent(n)
|
||||
for ; fp != nil && fp.node.blockHash != lastValidHash; fp = s.fullParent(fp) {
|
||||
firstInvalid := node
|
||||
for ; firstInvalid.parent != nil && firstInvalid.parent.payloadHash != lastValidHash; firstInvalid = firstInvalid.parent {
|
||||
if ctx.Err() != nil {
|
||||
return invalidRoots, ctx.Err()
|
||||
}
|
||||
n = fp
|
||||
}
|
||||
// Deal with the case that the last valid payload is in a different fork
|
||||
// This means we are dealing with an EE that does not follow the spec
|
||||
if fp == nil {
|
||||
if firstInvalid.parent == nil {
|
||||
// return early if the invalid node was not imported
|
||||
if startNode.node.root != root {
|
||||
if node.root == parentRoot {
|
||||
return invalidRoots, nil
|
||||
}
|
||||
// Remove just the imported invalid root
|
||||
n = startNode
|
||||
firstInvalid = node
|
||||
}
|
||||
return s.removeNode(ctx, n)
|
||||
return s.removeNode(ctx, firstInvalid)
|
||||
}
|
||||
|
||||
// removeNode removes the node with the given root and all of its children
|
||||
// from the Fork Choice Store.
|
||||
func (s *Store) removeNode(ctx context.Context, pn *PayloadNode) ([][32]byte, error) {
|
||||
func (s *Store) removeNode(ctx context.Context, node *Node) ([][32]byte, error) {
|
||||
invalidRoots := make([][32]byte, 0)
|
||||
|
||||
if pn == nil {
|
||||
if node == nil {
|
||||
return invalidRoots, errors.Wrap(ErrNilNode, "could not remove node")
|
||||
}
|
||||
if !pn.optimistic || pn.node.parent == nil {
|
||||
if !node.optimistic || node.parent == nil {
|
||||
return invalidRoots, errInvalidOptimisticStatus
|
||||
}
|
||||
children := pn.node.parent.children
|
||||
|
||||
children := node.parent.children
|
||||
if len(children) == 1 {
|
||||
pn.node.parent.children = []*Node{}
|
||||
node.parent.children = []*Node{}
|
||||
} else {
|
||||
for i, n := range children {
|
||||
if n == pn.node {
|
||||
if n == node {
|
||||
if i != len(children)-1 {
|
||||
children[i] = children[len(children)-1]
|
||||
}
|
||||
pn.node.parent.children = children[:len(children)-1]
|
||||
node.parent.children = children[:len(children)-1]
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return s.removeNodeAndChildren(ctx, pn, invalidRoots)
|
||||
return s.removeNodeAndChildren(ctx, node, invalidRoots)
|
||||
}
|
||||
|
||||
// removeNodeAndChildren removes `node` and all of its descendant from the Store
|
||||
func (s *Store) removeNodeAndChildren(ctx context.Context, pn *PayloadNode, invalidRoots [][32]byte) ([][32]byte, error) {
|
||||
func (s *Store) removeNodeAndChildren(ctx context.Context, node *Node, invalidRoots [][32]byte) ([][32]byte, error) {
|
||||
var err error
|
||||
// If we are removing an empty node, then remove the full node as well if it exists.
|
||||
if !pn.full {
|
||||
fn, ok := s.fullNodeByRoot[pn.node.root]
|
||||
if ok {
|
||||
invalidRoots, err = s.removeNodeAndChildren(ctx, fn, invalidRoots)
|
||||
if err != nil {
|
||||
return invalidRoots, err
|
||||
}
|
||||
}
|
||||
}
|
||||
// Now we remove the full node's children.
|
||||
for _, child := range pn.children {
|
||||
for _, child := range node.children {
|
||||
if ctx.Err() != nil {
|
||||
return invalidRoots, ctx.Err()
|
||||
}
|
||||
// We need to remove only the empty node here since the recursion will take care of the full one.
|
||||
en := s.emptyNodeByRoot[child.root]
|
||||
if invalidRoots, err = s.removeNodeAndChildren(ctx, en, invalidRoots); err != nil {
|
||||
if invalidRoots, err = s.removeNodeAndChildren(ctx, child, invalidRoots); err != nil {
|
||||
return invalidRoots, err
|
||||
}
|
||||
}
|
||||
// Only append the root for the empty nodes.
|
||||
if pn.full {
|
||||
delete(s.fullNodeByRoot, pn.node.root)
|
||||
} else {
|
||||
invalidRoots = append(invalidRoots, pn.node.root)
|
||||
if pn.node.root == s.proposerBoostRoot {
|
||||
s.proposerBoostRoot = [32]byte{}
|
||||
}
|
||||
if pn.node.root == s.previousProposerBoostRoot {
|
||||
s.previousProposerBoostRoot = params.BeaconConfig().ZeroHash
|
||||
s.previousProposerBoostScore = 0
|
||||
}
|
||||
delete(s.emptyNodeByRoot, pn.node.root)
|
||||
invalidRoots = append(invalidRoots, node.root)
|
||||
if node.root == s.proposerBoostRoot {
|
||||
s.proposerBoostRoot = [32]byte{}
|
||||
}
|
||||
if node.root == s.previousProposerBoostRoot {
|
||||
s.previousProposerBoostRoot = params.BeaconConfig().ZeroHash
|
||||
s.previousProposerBoostScore = 0
|
||||
}
|
||||
delete(s.nodeByRoot, node.root)
|
||||
return invalidRoots, nil
|
||||
}
|
||||
|
||||
@@ -23,35 +23,93 @@ import (
|
||||
// And every block in the Fork choice is optimistic.
|
||||
func TestPruneInvalid(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
root [32]byte // the root of the new INVALID block
|
||||
parentRoot [32]byte // the root of the parent block
|
||||
parentHash [32]byte // the execution hash of the parent block
|
||||
lastValidHash [32]byte // the last valid execution hash
|
||||
payload [32]byte // the last valid hash
|
||||
wantedNodeNumber int
|
||||
wantedRoots [][32]byte
|
||||
wantedErr error
|
||||
}{
|
||||
{ // Bogus LVH, root not in forkchoice
|
||||
name: "bogus LVH not in forkchoice",
|
||||
root: [32]byte{'x'}, parentRoot: [32]byte{'i'}, parentHash: [32]byte{'I'}, lastValidHash: [32]byte{'R'},
|
||||
wantedNodeNumber: 13, wantedRoots: [][32]byte{},
|
||||
},
|
||||
{ // Bogus LVH
|
||||
name: "bogus LVH",
|
||||
root: [32]byte{'i'}, parentRoot: [32]byte{'h'}, parentHash: [32]byte{'H'}, lastValidHash: [32]byte{'R'},
|
||||
wantedNodeNumber: 13, wantedRoots: [][32]byte{},
|
||||
[32]byte{'x'},
|
||||
[32]byte{'i'},
|
||||
[32]byte{'R'},
|
||||
13,
|
||||
[][32]byte{},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
name: "wanted j",
|
||||
root: [32]byte{'j'}, parentRoot: [32]byte{'b'}, parentHash: [32]byte{'B'}, lastValidHash: [32]byte{'B'},
|
||||
wantedNodeNumber: 13, wantedRoots: [][32]byte{},
|
||||
// Bogus LVH
|
||||
[32]byte{'i'},
|
||||
[32]byte{'h'},
|
||||
[32]byte{'R'},
|
||||
12,
|
||||
[][32]byte{{'i'}},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
name: "wanted 5",
|
||||
root: [32]byte{'c'}, parentRoot: [32]byte{'b'}, parentHash: [32]byte{'B'}, lastValidHash: [32]byte{'B'},
|
||||
wantedNodeNumber: 5,
|
||||
wantedRoots: [][32]byte{
|
||||
[32]byte{'j'},
|
||||
[32]byte{'b'},
|
||||
[32]byte{'B'},
|
||||
12,
|
||||
[][32]byte{{'j'}},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
[32]byte{'c'},
|
||||
[32]byte{'b'},
|
||||
[32]byte{'B'},
|
||||
4,
|
||||
[][32]byte{{'f'}, {'e'}, {'i'}, {'h'}, {'l'},
|
||||
{'k'}, {'g'}, {'d'}, {'c'}},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
[32]byte{'i'},
|
||||
[32]byte{'h'},
|
||||
[32]byte{'H'},
|
||||
12,
|
||||
[][32]byte{{'i'}},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
[32]byte{'h'},
|
||||
[32]byte{'g'},
|
||||
[32]byte{'G'},
|
||||
11,
|
||||
[][32]byte{{'i'}, {'h'}},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
[32]byte{'g'},
|
||||
[32]byte{'d'},
|
||||
[32]byte{'D'},
|
||||
8,
|
||||
[][32]byte{{'i'}, {'h'}, {'l'}, {'k'}, {'g'}},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
[32]byte{'i'},
|
||||
[32]byte{'h'},
|
||||
[32]byte{'D'},
|
||||
8,
|
||||
[][32]byte{{'i'}, {'h'}, {'l'}, {'k'}, {'g'}},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
[32]byte{'f'},
|
||||
[32]byte{'e'},
|
||||
[32]byte{'D'},
|
||||
11,
|
||||
[][32]byte{{'f'}, {'e'}},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
[32]byte{'h'},
|
||||
[32]byte{'g'},
|
||||
[32]byte{'C'},
|
||||
5,
|
||||
[][32]byte{
|
||||
{'f'},
|
||||
{'e'},
|
||||
{'i'},
|
||||
@@ -61,118 +119,106 @@ func TestPruneInvalid(t *testing.T) {
|
||||
{'g'},
|
||||
{'d'},
|
||||
},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
name: "wanted i",
|
||||
root: [32]byte{'i'}, parentRoot: [32]byte{'h'}, parentHash: [32]byte{'H'}, lastValidHash: [32]byte{'H'},
|
||||
wantedNodeNumber: 13, wantedRoots: [][32]byte{},
|
||||
[32]byte{'g'},
|
||||
[32]byte{'d'},
|
||||
[32]byte{'E'},
|
||||
8,
|
||||
[][32]byte{{'i'}, {'h'}, {'l'}, {'k'}, {'g'}},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
name: "wanted i and h",
|
||||
root: [32]byte{'h'}, parentRoot: [32]byte{'g'}, parentHash: [32]byte{'G'}, lastValidHash: [32]byte{'G'},
|
||||
wantedNodeNumber: 12, wantedRoots: [][32]byte{{'i'}},
|
||||
[32]byte{'z'},
|
||||
[32]byte{'j'},
|
||||
[32]byte{'B'},
|
||||
12,
|
||||
[][32]byte{{'j'}},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
name: "wanted i--g",
|
||||
root: [32]byte{'g'}, parentRoot: [32]byte{'d'}, parentHash: [32]byte{'D'}, lastValidHash: [32]byte{'D'},
|
||||
wantedNodeNumber: 9, wantedRoots: [][32]byte{{'i'}, {'h'}, {'l'}, {'k'}},
|
||||
[32]byte{'z'},
|
||||
[32]byte{'j'},
|
||||
[32]byte{'J'},
|
||||
13,
|
||||
[][32]byte{},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
name: "wanted 9",
|
||||
root: [32]byte{'i'}, parentRoot: [32]byte{'h'}, parentHash: [32]byte{'H'}, lastValidHash: [32]byte{'D'},
|
||||
wantedNodeNumber: 9, wantedRoots: [][32]byte{{'i'}, {'h'}, {'l'}, {'k'}},
|
||||
[32]byte{'j'},
|
||||
[32]byte{'a'},
|
||||
[32]byte{'B'},
|
||||
0,
|
||||
[][32]byte{},
|
||||
errInvalidParentRoot,
|
||||
},
|
||||
{
|
||||
name: "wanted f and e",
|
||||
root: [32]byte{'f'}, parentRoot: [32]byte{'e'}, parentHash: [32]byte{'E'}, lastValidHash: [32]byte{'D'},
|
||||
wantedNodeNumber: 12, wantedRoots: [][32]byte{{'f'}},
|
||||
[32]byte{'z'},
|
||||
[32]byte{'h'},
|
||||
[32]byte{'D'},
|
||||
8,
|
||||
[][32]byte{{'i'}, {'h'}, {'l'}, {'k'}, {'g'}},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
name: "wanted 6",
|
||||
root: [32]byte{'h'}, parentRoot: [32]byte{'g'}, parentHash: [32]byte{'G'}, lastValidHash: [32]byte{'C'},
|
||||
wantedNodeNumber: 6,
|
||||
wantedRoots: [][32]byte{
|
||||
{'f'}, {'e'}, {'i'}, {'h'}, {'l'}, {'k'}, {'g'},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "wanted 9 again",
|
||||
root: [32]byte{'g'}, parentRoot: [32]byte{'d'}, parentHash: [32]byte{'D'}, lastValidHash: [32]byte{'E'},
|
||||
wantedNodeNumber: 9, wantedRoots: [][32]byte{{'i'}, {'h'}, {'l'}, {'k'}},
|
||||
},
|
||||
{
|
||||
name: "wanted 13",
|
||||
root: [32]byte{'z'}, parentRoot: [32]byte{'j'}, parentHash: [32]byte{'J'}, lastValidHash: [32]byte{'B'},
|
||||
wantedNodeNumber: 13, wantedRoots: [][32]byte{},
|
||||
},
|
||||
{
|
||||
name: "wanted empty",
|
||||
root: [32]byte{'z'}, parentRoot: [32]byte{'j'}, parentHash: [32]byte{'J'}, lastValidHash: [32]byte{'J'},
|
||||
wantedNodeNumber: 13, wantedRoots: [][32]byte{},
|
||||
},
|
||||
{
|
||||
name: "errInvalidParentRoot",
|
||||
root: [32]byte{'j'}, parentRoot: [32]byte{'a'}, parentHash: [32]byte{'A'}, lastValidHash: [32]byte{'B'},
|
||||
wantedErr: errInvalidParentRoot,
|
||||
},
|
||||
{
|
||||
name: "root z",
|
||||
root: [32]byte{'z'}, parentRoot: [32]byte{'h'}, parentHash: [32]byte{'H'}, lastValidHash: [32]byte{'D'},
|
||||
wantedNodeNumber: 9, wantedRoots: [][32]byte{{'i'}, {'h'}, {'l'}, {'k'}},
|
||||
[32]byte{'z'},
|
||||
[32]byte{'h'},
|
||||
[32]byte{'D'},
|
||||
8,
|
||||
[][32]byte{{'i'}, {'h'}, {'l'}, {'k'}, {'g'}},
|
||||
nil,
|
||||
},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
f := setup(1, 1)
|
||||
require.NoError(t, f.SetOptimisticToValid(ctx, [32]byte{}))
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, [32]byte{'J'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, [32]byte{'D'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, [32]byte{'E'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, [32]byte{'G'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, [32]byte{'F'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, [32]byte{'H'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, [32]byte{'K'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, [32]byte{'I'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, [32]byte{'L'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
ctx := t.Context()
|
||||
f := setup(1, 1)
|
||||
|
||||
roots, err := f.store.setOptimisticToInvalid(t.Context(), tc.root, tc.parentRoot, tc.parentHash, tc.lastValidHash)
|
||||
if tc.wantedErr == nil {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(tc.wantedRoots), len(roots))
|
||||
require.DeepEqual(t, tc.wantedRoots, roots)
|
||||
require.Equal(t, tc.wantedNodeNumber, f.NodeCount())
|
||||
} else {
|
||||
require.ErrorIs(t, tc.wantedErr, err)
|
||||
}
|
||||
})
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, [32]byte{'J'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, [32]byte{'D'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, [32]byte{'E'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, [32]byte{'G'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, [32]byte{'F'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, [32]byte{'H'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, [32]byte{'K'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, [32]byte{'I'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, [32]byte{'L'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
roots, err := f.store.setOptimisticToInvalid(t.Context(), tc.root, tc.parentRoot, tc.payload)
|
||||
if tc.wantedErr == nil {
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, tc.wantedRoots, roots)
|
||||
require.Equal(t, tc.wantedNodeNumber, f.NodeCount())
|
||||
} else {
|
||||
require.ErrorIs(t, tc.wantedErr, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -194,40 +240,11 @@ func TestSetOptimisticToInvalid_ProposerBoost(t *testing.T) {
|
||||
f.store.previousProposerBoostScore = 10
|
||||
f.store.previousProposerBoostRoot = [32]byte{'b'}
|
||||
|
||||
_, err = f.SetOptimisticToInvalid(ctx, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'B'}, [32]byte{'A'})
|
||||
_, err = f.SetOptimisticToInvalid(ctx, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'A'})
|
||||
require.NoError(t, err)
|
||||
// proposer boost is still applied to c
|
||||
require.Equal(t, uint64(10), f.store.previousProposerBoostScore)
|
||||
require.Equal(t, [32]byte{}, f.store.proposerBoostRoot)
|
||||
require.Equal(t, [32]byte{'b'}, f.store.previousProposerBoostRoot)
|
||||
}
|
||||
|
||||
func TestSetOptimisticToInvalid_ProposerBoost_Older(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
f := setup(1, 1)
|
||||
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, [32]byte{'D'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
f.store.proposerBoostRoot = [32]byte{'d'}
|
||||
f.store.previousProposerBoostScore = 10
|
||||
f.store.previousProposerBoostRoot = [32]byte{'c'}
|
||||
|
||||
_, err = f.SetOptimisticToInvalid(ctx, [32]byte{'d'}, [32]byte{'c'}, [32]byte{'C'}, [32]byte{'A'})
|
||||
require.NoError(t, err)
|
||||
// proposer boost is still applied to c
|
||||
require.Equal(t, uint64(0), f.store.previousProposerBoostScore)
|
||||
require.Equal(t, [32]byte{}, f.store.proposerBoostRoot)
|
||||
require.Equal(t, [32]byte{}, f.store.previousProposerBoostRoot)
|
||||
require.DeepEqual(t, [32]byte{}, f.store.proposerBoostRoot)
|
||||
require.DeepEqual(t, params.BeaconConfig().ZeroHash, f.store.previousProposerBoostRoot)
|
||||
}
|
||||
|
||||
// This is a regression test (10565)
|
||||
@@ -255,9 +272,10 @@ func TestSetOptimisticToInvalid_CorrectChildren(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
_, err = f.store.setOptimisticToInvalid(ctx, [32]byte{'d'}, [32]byte{'a'}, [32]byte{'A'}, [32]byte{'A'})
|
||||
_, err = f.store.setOptimisticToInvalid(ctx, [32]byte{'d'}, [32]byte{'a'}, [32]byte{'A'})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(f.store.fullNodeByRoot[[32]byte{'a'}].children))
|
||||
require.Equal(t, 2, len(f.store.nodeByRoot[[32]byte{'a'}].children))
|
||||
|
||||
}
|
||||
|
||||
// Pow | Pos
|
||||
@@ -304,13 +322,13 @@ func TestSetOptimisticToInvalid_ForkAtMerge(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
roots, err := f.SetOptimisticToInvalid(ctx, [32]byte{'x'}, [32]byte{'d'}, [32]byte{'D'}, [32]byte{})
|
||||
roots, err := f.SetOptimisticToInvalid(ctx, [32]byte{'x'}, [32]byte{'d'}, [32]byte{})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3, len(roots))
|
||||
require.Equal(t, 4, len(roots))
|
||||
sort.Slice(roots, func(i, j int) bool {
|
||||
return bytesutil.BytesToUint64BigEndian(roots[i][:]) < bytesutil.BytesToUint64BigEndian(roots[j][:])
|
||||
})
|
||||
require.DeepEqual(t, roots, [][32]byte{{'c'}, {'d'}, {'e'}})
|
||||
require.DeepEqual(t, roots, [][32]byte{{'b'}, {'c'}, {'d'}, {'e'}})
|
||||
}
|
||||
|
||||
// Pow | Pos
|
||||
@@ -357,13 +375,13 @@ func TestSetOptimisticToInvalid_ForkAtMerge_bis(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
roots, err := f.SetOptimisticToInvalid(ctx, [32]byte{'x'}, [32]byte{'d'}, [32]byte{'D'}, [32]byte{})
|
||||
roots, err := f.SetOptimisticToInvalid(ctx, [32]byte{'x'}, [32]byte{'d'}, [32]byte{})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3, len(roots))
|
||||
require.Equal(t, 4, len(roots))
|
||||
sort.Slice(roots, func(i, j int) bool {
|
||||
return bytesutil.BytesToUint64BigEndian(roots[i][:]) < bytesutil.BytesToUint64BigEndian(roots[j][:])
|
||||
})
|
||||
require.DeepEqual(t, roots, [][32]byte{{'c'}, {'d'}, {'e'}})
|
||||
require.DeepEqual(t, roots, [][32]byte{{'b'}, {'c'}, {'d'}, {'e'}})
|
||||
}
|
||||
|
||||
func TestSetOptimisticToValid(t *testing.T) {
|
||||
|
||||
@@ -11,7 +11,7 @@ func (f *ForkChoice) applyProposerBoostScore() error {
|
||||
s := f.store
|
||||
proposerScore := uint64(0)
|
||||
if s.previousProposerBoostRoot != params.BeaconConfig().ZeroHash {
|
||||
previousNode, ok := s.emptyNodeByRoot[s.previousProposerBoostRoot]
|
||||
previousNode, ok := s.nodeByRoot[s.previousProposerBoostRoot]
|
||||
if !ok || previousNode == nil {
|
||||
log.WithError(errInvalidProposerBoostRoot).Errorf("invalid prev root %#x", s.previousProposerBoostRoot)
|
||||
} else {
|
||||
@@ -20,7 +20,7 @@ func (f *ForkChoice) applyProposerBoostScore() error {
|
||||
}
|
||||
|
||||
if s.proposerBoostRoot != params.BeaconConfig().ZeroHash {
|
||||
currentNode, ok := s.emptyNodeByRoot[s.proposerBoostRoot]
|
||||
currentNode, ok := s.nodeByRoot[s.proposerBoostRoot]
|
||||
if !ok || currentNode == nil {
|
||||
log.WithError(errInvalidProposerBoostRoot).Errorf("invalid current root %#x", s.proposerBoostRoot)
|
||||
} else {
|
||||
|
||||
@@ -166,14 +166,14 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
// (1: 48) -> (2: 38) -> (3: 10)
|
||||
// \--------------->(4: 18)
|
||||
//
|
||||
node1 := f.store.emptyNodeByRoot[indexToHash(1)]
|
||||
require.Equal(t, node1.node.weight, uint64(48))
|
||||
node2 := f.store.emptyNodeByRoot[indexToHash(2)]
|
||||
require.Equal(t, node2.node.weight, uint64(38))
|
||||
node3 := f.store.emptyNodeByRoot[indexToHash(3)]
|
||||
require.Equal(t, node3.node.weight, uint64(10))
|
||||
node4 := f.store.emptyNodeByRoot[indexToHash(4)]
|
||||
require.Equal(t, node4.node.weight, uint64(18))
|
||||
node1 := f.store.nodeByRoot[indexToHash(1)]
|
||||
require.Equal(t, node1.weight, uint64(48))
|
||||
node2 := f.store.nodeByRoot[indexToHash(2)]
|
||||
require.Equal(t, node2.weight, uint64(38))
|
||||
node3 := f.store.nodeByRoot[indexToHash(3)]
|
||||
require.Equal(t, node3.weight, uint64(10))
|
||||
node4 := f.store.nodeByRoot[indexToHash(4)]
|
||||
require.Equal(t, node4.weight, uint64(18))
|
||||
|
||||
// Regression: process attestations for C, check that it
|
||||
// becomes head, we need two attestations to have C.weight = 30 > 24 = D.weight
|
||||
|
||||
@@ -34,23 +34,22 @@ const orphanLateBlockProposingEarly = 2
|
||||
func (f *ForkChoice) ShouldOverrideFCU() (override bool) {
|
||||
override = false
|
||||
|
||||
// We only need to override FCU if our current consensusHead is from the current
|
||||
// We only need to override FCU if our current head is from the current
|
||||
// slot. This differs from the spec implementation in that we assume
|
||||
// that we will call this function in the previous slot to proposing.
|
||||
consensusHead := f.store.headNode
|
||||
if consensusHead == nil {
|
||||
head := f.store.headNode
|
||||
if head == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if consensusHead.slot != slots.CurrentSlot(f.store.genesisTime) {
|
||||
if head.slot != slots.CurrentSlot(f.store.genesisTime) {
|
||||
return
|
||||
}
|
||||
|
||||
// Do not reorg on epoch boundaries
|
||||
if (consensusHead.slot+1)%params.BeaconConfig().SlotsPerEpoch == 0 {
|
||||
if (head.slot+1)%params.BeaconConfig().SlotsPerEpoch == 0 {
|
||||
return
|
||||
}
|
||||
head := f.store.choosePayloadContent(consensusHead)
|
||||
// Only reorg blocks that arrive late
|
||||
early, err := head.arrivedEarly(f.store.genesisTime)
|
||||
if err != nil {
|
||||
@@ -62,15 +61,15 @@ func (f *ForkChoice) ShouldOverrideFCU() (override bool) {
|
||||
}
|
||||
// Only reorg if we have been finalizing
|
||||
finalizedEpoch := f.store.finalizedCheckpoint.Epoch
|
||||
if slots.ToEpoch(consensusHead.slot+1) > finalizedEpoch+params.BeaconConfig().ReorgMaxEpochsSinceFinalization {
|
||||
if slots.ToEpoch(head.slot+1) > finalizedEpoch+params.BeaconConfig().ReorgMaxEpochsSinceFinalization {
|
||||
return
|
||||
}
|
||||
// Only orphan a single block
|
||||
parent := consensusHead.parent
|
||||
parent := head.parent
|
||||
if parent == nil {
|
||||
return
|
||||
}
|
||||
if consensusHead.slot > parent.node.slot+1 {
|
||||
if head.slot > parent.slot+1 {
|
||||
return
|
||||
}
|
||||
// Do not orphan a block that has higher justification than the parent
|
||||
@@ -79,12 +78,12 @@ func (f *ForkChoice) ShouldOverrideFCU() (override bool) {
|
||||
// }
|
||||
|
||||
// Only orphan a block if the head LMD vote is weak
|
||||
if consensusHead.weight*100 > f.store.committeeWeight*params.BeaconConfig().ReorgHeadWeightThreshold {
|
||||
if head.weight*100 > f.store.committeeWeight*params.BeaconConfig().ReorgHeadWeightThreshold {
|
||||
return
|
||||
}
|
||||
|
||||
// Return early if we are checking before 10 seconds into the slot
|
||||
sss, err := slots.SinceSlotStart(consensusHead.slot, f.store.genesisTime, time.Now())
|
||||
sss, err := slots.SinceSlotStart(head.slot, f.store.genesisTime, time.Now())
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not check current slot")
|
||||
return true
|
||||
@@ -93,7 +92,7 @@ func (f *ForkChoice) ShouldOverrideFCU() (override bool) {
|
||||
return true
|
||||
}
|
||||
// Only orphan a block if the parent LMD vote is strong
|
||||
if parent.node.weight*100 < f.store.committeeWeight*params.BeaconConfig().ReorgParentWeightThreshold {
|
||||
if parent.weight*100 < f.store.committeeWeight*params.BeaconConfig().ReorgParentWeightThreshold {
|
||||
return
|
||||
}
|
||||
return true
|
||||
@@ -107,61 +106,60 @@ func (f *ForkChoice) ShouldOverrideFCU() (override bool) {
|
||||
// This function needs to be called only when proposing a block and all
|
||||
// attestation processing has already happened.
|
||||
func (f *ForkChoice) GetProposerHead() [32]byte {
|
||||
consensusHead := f.store.headNode
|
||||
if consensusHead == nil {
|
||||
head := f.store.headNode
|
||||
if head == nil {
|
||||
return [32]byte{}
|
||||
}
|
||||
// Only reorg blocks from the previous slot.
|
||||
currentSlot := slots.CurrentSlot(f.store.genesisTime)
|
||||
if consensusHead.slot+1 != currentSlot {
|
||||
return consensusHead.root
|
||||
if head.slot+1 != currentSlot {
|
||||
return head.root
|
||||
}
|
||||
// Do not reorg on epoch boundaries
|
||||
if (consensusHead.slot+1)%params.BeaconConfig().SlotsPerEpoch == 0 {
|
||||
return consensusHead.root
|
||||
if (head.slot+1)%params.BeaconConfig().SlotsPerEpoch == 0 {
|
||||
return head.root
|
||||
}
|
||||
// Only reorg blocks that arrive late
|
||||
head := f.store.choosePayloadContent(consensusHead)
|
||||
early, err := head.arrivedEarly(f.store.genesisTime)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not check if block arrived early")
|
||||
return consensusHead.root
|
||||
return head.root
|
||||
}
|
||||
if early {
|
||||
return consensusHead.root
|
||||
return head.root
|
||||
}
|
||||
// Only reorg if we have been finalizing
|
||||
finalizedEpoch := f.store.finalizedCheckpoint.Epoch
|
||||
if slots.ToEpoch(consensusHead.slot+1) > finalizedEpoch+params.BeaconConfig().ReorgMaxEpochsSinceFinalization {
|
||||
return consensusHead.root
|
||||
if slots.ToEpoch(head.slot+1) > finalizedEpoch+params.BeaconConfig().ReorgMaxEpochsSinceFinalization {
|
||||
return head.root
|
||||
}
|
||||
// Only orphan a single block
|
||||
parent := consensusHead.parent
|
||||
parent := head.parent
|
||||
if parent == nil {
|
||||
return consensusHead.root
|
||||
return head.root
|
||||
}
|
||||
if consensusHead.slot > parent.node.slot+1 {
|
||||
return consensusHead.root
|
||||
if head.slot > parent.slot+1 {
|
||||
return head.root
|
||||
}
|
||||
|
||||
// Only orphan a block if the head LMD vote is weak
|
||||
if consensusHead.weight*100 > f.store.committeeWeight*params.BeaconConfig().ReorgHeadWeightThreshold {
|
||||
return consensusHead.root
|
||||
if head.weight*100 > f.store.committeeWeight*params.BeaconConfig().ReorgHeadWeightThreshold {
|
||||
return head.root
|
||||
}
|
||||
|
||||
// Only orphan a block if the parent LMD vote is strong
|
||||
if parent.node.weight*100 < f.store.committeeWeight*params.BeaconConfig().ReorgParentWeightThreshold {
|
||||
return consensusHead.root
|
||||
if parent.weight*100 < f.store.committeeWeight*params.BeaconConfig().ReorgParentWeightThreshold {
|
||||
return head.root
|
||||
}
|
||||
|
||||
// Only reorg if we are proposing early
|
||||
sss, err := slots.SinceSlotStart(currentSlot, f.store.genesisTime, time.Now())
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not check if proposing early")
|
||||
return consensusHead.root
|
||||
return head.root
|
||||
}
|
||||
if sss >= orphanLateBlockProposingEarly*time.Second {
|
||||
return consensusHead.root
|
||||
return head.root
|
||||
}
|
||||
return parent.node.root
|
||||
return parent.root
|
||||
}
|
||||
|
||||
@@ -38,6 +38,7 @@ func TestForkChoice_ShouldOverrideFCU(t *testing.T) {
|
||||
require.Equal(t, blk.Root(), headRoot)
|
||||
t.Run("head is weak", func(t *testing.T) {
|
||||
require.Equal(t, true, f.ShouldOverrideFCU())
|
||||
|
||||
})
|
||||
t.Run("head is nil", func(t *testing.T) {
|
||||
saved := f.store.headNode
|
||||
@@ -59,11 +60,10 @@ func TestForkChoice_ShouldOverrideFCU(t *testing.T) {
|
||||
f.store.headNode.slot = saved
|
||||
})
|
||||
t.Run("head is early", func(t *testing.T) {
|
||||
fn := f.store.fullNodeByRoot[f.store.headNode.root]
|
||||
saved := fn.timestamp
|
||||
fn.timestamp = saved.Add(-2 * time.Second)
|
||||
saved := f.store.headNode.timestamp
|
||||
f.store.headNode.timestamp = saved.Add(-2 * time.Second)
|
||||
require.Equal(t, false, f.ShouldOverrideFCU())
|
||||
fn.timestamp = saved
|
||||
f.store.headNode.timestamp = saved
|
||||
})
|
||||
t.Run("chain not finalizing", func(t *testing.T) {
|
||||
saved := f.store.headNode.slot
|
||||
@@ -74,10 +74,10 @@ func TestForkChoice_ShouldOverrideFCU(t *testing.T) {
|
||||
driftGenesisTime(f, 2, orphanLateBlockFirstThreshold+time.Second)
|
||||
})
|
||||
t.Run("Not single block reorg", func(t *testing.T) {
|
||||
saved := f.store.headNode.parent.node.slot
|
||||
f.store.headNode.parent.node.slot = 0
|
||||
saved := f.store.headNode.parent.slot
|
||||
f.store.headNode.parent.slot = 0
|
||||
require.Equal(t, false, f.ShouldOverrideFCU())
|
||||
f.store.headNode.parent.node.slot = saved
|
||||
f.store.headNode.parent.slot = saved
|
||||
})
|
||||
t.Run("parent is nil", func(t *testing.T) {
|
||||
saved := f.store.headNode.parent
|
||||
@@ -86,17 +86,17 @@ func TestForkChoice_ShouldOverrideFCU(t *testing.T) {
|
||||
f.store.headNode.parent = saved
|
||||
})
|
||||
t.Run("parent is weak early call", func(t *testing.T) {
|
||||
saved := f.store.headNode.parent.node.weight
|
||||
f.store.headNode.parent.node.weight = 0
|
||||
saved := f.store.headNode.parent.weight
|
||||
f.store.headNode.parent.weight = 0
|
||||
require.Equal(t, true, f.ShouldOverrideFCU())
|
||||
f.store.headNode.parent.node.weight = saved
|
||||
f.store.headNode.parent.weight = saved
|
||||
})
|
||||
t.Run("parent is weak late call", func(t *testing.T) {
|
||||
saved := f.store.headNode.parent.node.weight
|
||||
saved := f.store.headNode.parent.weight
|
||||
driftGenesisTime(f, 2, 11*time.Second)
|
||||
f.store.headNode.parent.node.weight = 0
|
||||
f.store.headNode.parent.weight = 0
|
||||
require.Equal(t, false, f.ShouldOverrideFCU())
|
||||
f.store.headNode.parent.node.weight = saved
|
||||
f.store.headNode.parent.weight = saved
|
||||
driftGenesisTime(f, 2, orphanLateBlockFirstThreshold+time.Second)
|
||||
})
|
||||
t.Run("Head is strong", func(t *testing.T) {
|
||||
@@ -135,8 +135,7 @@ func TestForkChoice_GetProposerHead(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, blk.Root(), headRoot)
|
||||
orphanLateBlockFirstThreshold := params.BeaconConfig().SlotComponentDuration(params.BeaconConfig().AttestationDueBPS)
|
||||
fn := f.store.fullNodeByRoot[f.store.headNode.root]
|
||||
fn.timestamp = fn.timestamp.Add(-1 * (params.BeaconConfig().SlotDuration() - orphanLateBlockFirstThreshold))
|
||||
f.store.headNode.timestamp.Add(-1 * (params.BeaconConfig().SlotDuration() - orphanLateBlockFirstThreshold))
|
||||
t.Run("head is weak", func(t *testing.T) {
|
||||
require.Equal(t, parentRoot, f.GetProposerHead())
|
||||
})
|
||||
@@ -160,12 +159,11 @@ func TestForkChoice_GetProposerHead(t *testing.T) {
|
||||
f.store.headNode.slot = saved
|
||||
})
|
||||
t.Run("head is early", func(t *testing.T) {
|
||||
fn := f.store.fullNodeByRoot[f.store.headNode.root]
|
||||
saved := fn.timestamp
|
||||
saved := f.store.headNode.timestamp
|
||||
headTimeStamp := f.store.genesisTime.Add(time.Duration(uint64(f.store.headNode.slot)*params.BeaconConfig().SecondsPerSlot+1) * time.Second)
|
||||
fn.timestamp = headTimeStamp
|
||||
f.store.headNode.timestamp = headTimeStamp
|
||||
require.Equal(t, childRoot, f.GetProposerHead())
|
||||
fn.timestamp = saved
|
||||
f.store.headNode.timestamp = saved
|
||||
})
|
||||
t.Run("chain not finalizing", func(t *testing.T) {
|
||||
saved := f.store.headNode.slot
|
||||
@@ -176,10 +174,10 @@ func TestForkChoice_GetProposerHead(t *testing.T) {
|
||||
driftGenesisTime(f, 3, 1*time.Second)
|
||||
})
|
||||
t.Run("Not single block reorg", func(t *testing.T) {
|
||||
saved := f.store.headNode.parent.node.slot
|
||||
f.store.headNode.parent.node.slot = 0
|
||||
saved := f.store.headNode.parent.slot
|
||||
f.store.headNode.parent.slot = 0
|
||||
require.Equal(t, childRoot, f.GetProposerHead())
|
||||
f.store.headNode.parent.node.slot = saved
|
||||
f.store.headNode.parent.slot = saved
|
||||
})
|
||||
t.Run("parent is nil", func(t *testing.T) {
|
||||
saved := f.store.headNode.parent
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// head starts from justified root and then follows the best descendant links
|
||||
@@ -27,16 +26,13 @@ func (s *Store) head(ctx context.Context) ([32]byte, error) {
|
||||
}
|
||||
|
||||
// JustifiedRoot has to be known
|
||||
var jn *Node
|
||||
ej := s.emptyNodeByRoot[s.justifiedCheckpoint.Root]
|
||||
if ej != nil {
|
||||
jn = ej.node
|
||||
} else {
|
||||
justifiedNode, ok := s.nodeByRoot[s.justifiedCheckpoint.Root]
|
||||
if !ok || justifiedNode == nil {
|
||||
// If the justifiedCheckpoint is from genesis, then the root is
|
||||
// zeroHash. In this case it should be the root of forkchoice
|
||||
// tree.
|
||||
if s.justifiedCheckpoint.Epoch == params.BeaconConfig().GenesisEpoch {
|
||||
jn = s.treeRootNode
|
||||
justifiedNode = s.treeRootNode
|
||||
} else {
|
||||
return [32]byte{}, errors.WithMessage(errUnknownJustifiedRoot, fmt.Sprintf("%#x", s.justifiedCheckpoint.Root))
|
||||
}
|
||||
@@ -44,9 +40,9 @@ func (s *Store) head(ctx context.Context) ([32]byte, error) {
|
||||
|
||||
// If the justified node doesn't have a best descendant,
|
||||
// the best node is itself.
|
||||
bestDescendant := jn.bestDescendant
|
||||
bestDescendant := justifiedNode.bestDescendant
|
||||
if bestDescendant == nil {
|
||||
bestDescendant = jn
|
||||
bestDescendant = justifiedNode
|
||||
}
|
||||
currentEpoch := slots.EpochsSinceGenesis(s.genesisTime)
|
||||
if !bestDescendant.viableForHead(s.justifiedCheckpoint.Epoch, currentEpoch) {
|
||||
@@ -70,42 +66,29 @@ func (s *Store) head(ctx context.Context) ([32]byte, error) {
|
||||
// It then updates the new node's parent with the best child and descendant node.
|
||||
func (s *Store) insert(ctx context.Context,
|
||||
roblock consensus_blocks.ROBlock,
|
||||
justifiedEpoch, finalizedEpoch primitives.Epoch,
|
||||
) (*PayloadNode, error) {
|
||||
justifiedEpoch, finalizedEpoch primitives.Epoch) (*Node, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.insert")
|
||||
defer span.End()
|
||||
|
||||
root := roblock.Root()
|
||||
block := roblock.Block()
|
||||
slot := block.Slot()
|
||||
parentRoot := block.ParentRoot()
|
||||
var payloadHash [32]byte
|
||||
if block.Version() >= version.Bellatrix {
|
||||
execution, err := block.Body().Execution()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
copy(payloadHash[:], execution.BlockHash())
|
||||
}
|
||||
|
||||
// Return if the block has been inserted into Store before.
|
||||
if n, ok := s.emptyNodeByRoot[root]; ok {
|
||||
if n, ok := s.nodeByRoot[root]; ok {
|
||||
return n, nil
|
||||
}
|
||||
|
||||
block := roblock.Block()
|
||||
slot := block.Slot()
|
||||
var parent *PayloadNode
|
||||
blockHash := &[32]byte{}
|
||||
if block.Version() >= version.Gloas {
|
||||
if err := s.resolveParentPayloadStatus(block, &parent, blockHash); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
if block.Version() >= version.Bellatrix {
|
||||
execution, err := block.Body().Execution()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
copy(blockHash[:], execution.BlockHash())
|
||||
}
|
||||
parentRoot := block.ParentRoot()
|
||||
en := s.emptyNodeByRoot[parentRoot]
|
||||
parent = s.fullNodeByRoot[parentRoot]
|
||||
if parent == nil && en != nil {
|
||||
// pre-Gloas only full parents are allowed.
|
||||
return nil, errInvalidParentRoot
|
||||
}
|
||||
}
|
||||
|
||||
parent := s.nodeByRoot[parentRoot]
|
||||
n := &Node{
|
||||
slot: slot,
|
||||
root: root,
|
||||
@@ -114,52 +97,30 @@ func (s *Store) insert(ctx context.Context,
|
||||
unrealizedJustifiedEpoch: justifiedEpoch,
|
||||
finalizedEpoch: finalizedEpoch,
|
||||
unrealizedFinalizedEpoch: finalizedEpoch,
|
||||
blockHash: *blockHash,
|
||||
optimistic: true,
|
||||
payloadHash: payloadHash,
|
||||
timestamp: time.Now(),
|
||||
}
|
||||
|
||||
// Set the node's target checkpoint
|
||||
if slot%params.BeaconConfig().SlotsPerEpoch == 0 {
|
||||
n.target = n
|
||||
} else if parent != nil {
|
||||
if slots.ToEpoch(slot) == slots.ToEpoch(parent.node.slot) {
|
||||
n.target = parent.node.target
|
||||
if slots.ToEpoch(slot) == slots.ToEpoch(parent.slot) {
|
||||
n.target = parent.target
|
||||
} else {
|
||||
n.target = parent.node
|
||||
n.target = parent
|
||||
}
|
||||
}
|
||||
var ret *PayloadNode
|
||||
optimistic := true
|
||||
if parent != nil {
|
||||
optimistic = n.parent.optimistic
|
||||
}
|
||||
// Make the empty node.It's optimistic status equals it's parent's status.
|
||||
pn := &PayloadNode{
|
||||
node: n,
|
||||
optimistic: optimistic,
|
||||
timestamp: time.Now(),
|
||||
children: make([]*Node, 0),
|
||||
}
|
||||
s.emptyNodeByRoot[root] = pn
|
||||
ret = pn
|
||||
if block.Version() < version.Gloas {
|
||||
// Make also the full node, this is optimistic until the engine returns the execution payload validation.
|
||||
fn := &PayloadNode{
|
||||
node: n,
|
||||
optimistic: true,
|
||||
timestamp: time.Now(),
|
||||
full: true,
|
||||
}
|
||||
ret = fn
|
||||
s.fullNodeByRoot[root] = fn
|
||||
}
|
||||
|
||||
s.nodeByRoot[root] = n
|
||||
if parent == nil {
|
||||
if s.treeRootNode == nil {
|
||||
s.treeRootNode = n
|
||||
s.headNode = n
|
||||
s.highestReceivedNode = n
|
||||
} else {
|
||||
delete(s.emptyNodeByRoot, root)
|
||||
delete(s.fullNodeByRoot, root)
|
||||
delete(s.nodeByRoot, root)
|
||||
return nil, errInvalidParentRoot
|
||||
}
|
||||
} else {
|
||||
@@ -167,7 +128,7 @@ func (s *Store) insert(ctx context.Context,
|
||||
// Apply proposer boost
|
||||
now := time.Now()
|
||||
if now.Before(s.genesisTime) {
|
||||
return ret, nil
|
||||
return n, nil
|
||||
}
|
||||
currentSlot := slots.CurrentSlot(s.genesisTime)
|
||||
sss, err := slots.SinceSlotStart(currentSlot, s.genesisTime, now)
|
||||
@@ -183,16 +144,17 @@ func (s *Store) insert(ctx context.Context,
|
||||
// Update best descendants
|
||||
jEpoch := s.justifiedCheckpoint.Epoch
|
||||
fEpoch := s.finalizedCheckpoint.Epoch
|
||||
if err := s.updateBestDescendantConsensusNode(ctx, s.treeRootNode, jEpoch, fEpoch, slots.ToEpoch(currentSlot)); err != nil {
|
||||
log.WithError(err).WithFields(logrus.Fields{
|
||||
"slot": slot,
|
||||
"root": root,
|
||||
}).Error("Could not update best descendant")
|
||||
if err := s.treeRootNode.updateBestDescendant(ctx, jEpoch, fEpoch, slots.ToEpoch(currentSlot)); err != nil {
|
||||
_, remErr := s.removeNode(ctx, n)
|
||||
if remErr != nil {
|
||||
log.WithError(remErr).Error("could not remove node")
|
||||
}
|
||||
return nil, errors.Wrap(err, "could not update best descendants")
|
||||
}
|
||||
}
|
||||
// Update metrics.
|
||||
processedBlockCount.Inc()
|
||||
nodeCount.Set(float64(len(s.emptyNodeByRoot)))
|
||||
nodeCount.Set(float64(len(s.nodeByRoot)))
|
||||
|
||||
// Only update received block slot if it's within epoch from current time.
|
||||
if slot+params.BeaconConfig().SlotsPerEpoch > slots.CurrentSlot(s.genesisTime) {
|
||||
@@ -203,10 +165,10 @@ func (s *Store) insert(ctx context.Context,
|
||||
s.highestReceivedNode = n
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// pruneFinalizedNodeByRootMap prunes the `nodeByRoot` maps
|
||||
// pruneFinalizedNodeByRootMap prunes the `nodeByRoot` map
|
||||
// starting from `node` down to the finalized Node or to a leaf of the Fork
|
||||
// choice store.
|
||||
func (s *Store) pruneFinalizedNodeByRootMap(ctx context.Context, node, finalizedNode *Node) error {
|
||||
@@ -219,51 +181,44 @@ func (s *Store) pruneFinalizedNodeByRootMap(ctx context.Context, node, finalized
|
||||
}
|
||||
return nil
|
||||
}
|
||||
for _, child := range s.allConsensusChildren(node) {
|
||||
for _, child := range node.children {
|
||||
if err := s.pruneFinalizedNodeByRootMap(ctx, child, finalizedNode); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
en := s.emptyNodeByRoot[node.root]
|
||||
en.children = nil
|
||||
delete(s.emptyNodeByRoot, node.root)
|
||||
fn := s.fullNodeByRoot[node.root]
|
||||
if fn != nil {
|
||||
fn.children = nil
|
||||
delete(s.fullNodeByRoot, node.root)
|
||||
}
|
||||
|
||||
node.children = nil
|
||||
delete(s.nodeByRoot, node.root)
|
||||
return nil
|
||||
}
|
||||
|
||||
// prune prunes the fork choice store. It removes all nodes that compete with the finalized root.
|
||||
// This function does not prune for invalid optimistically synced nodes, it deals only with pruning upon finalization
|
||||
// TODO: GLOAS, to ensure that chains up to a full node are found, we may want to consider pruning only up to the latest full block that was finalized
|
||||
func (s *Store) prune(ctx context.Context) error {
|
||||
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.Prune")
|
||||
defer span.End()
|
||||
|
||||
finalizedRoot := s.finalizedCheckpoint.Root
|
||||
finalizedEpoch := s.finalizedCheckpoint.Epoch
|
||||
fen, ok := s.emptyNodeByRoot[finalizedRoot]
|
||||
if !ok || fen == nil {
|
||||
finalizedNode, ok := s.nodeByRoot[finalizedRoot]
|
||||
if !ok || finalizedNode == nil {
|
||||
return errors.WithMessage(errUnknownFinalizedRoot, fmt.Sprintf("%#x", finalizedRoot))
|
||||
}
|
||||
fn := fen.node
|
||||
// return early if we haven't changed the finalized checkpoint
|
||||
if fn.parent == nil {
|
||||
if finalizedNode.parent == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Save the new finalized dependent root because it will be pruned
|
||||
s.finalizedDependentRoot = fn.parent.node.root
|
||||
s.finalizedDependentRoot = finalizedNode.parent.root
|
||||
|
||||
// Prune nodeByRoot starting from root
|
||||
if err := s.pruneFinalizedNodeByRootMap(ctx, s.treeRootNode, fn); err != nil {
|
||||
if err := s.pruneFinalizedNodeByRootMap(ctx, s.treeRootNode, finalizedNode); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fn.parent = nil
|
||||
s.treeRootNode = fn
|
||||
finalizedNode.parent = nil
|
||||
s.treeRootNode = finalizedNode
|
||||
|
||||
prunedCount.Inc()
|
||||
// Prune all children of the finalized checkpoint block that are incompatible with it
|
||||
@@ -271,13 +226,13 @@ func (s *Store) prune(ctx context.Context) error {
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not compute epoch start")
|
||||
}
|
||||
if fn.slot == checkpointMaxSlot {
|
||||
if finalizedNode.slot == checkpointMaxSlot {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, child := range fen.children {
|
||||
for _, child := range finalizedNode.children {
|
||||
if child != nil && child.slot <= checkpointMaxSlot {
|
||||
if err := s.pruneFinalizedNodeByRootMap(ctx, child, fn); err != nil {
|
||||
if err := s.pruneFinalizedNodeByRootMap(ctx, child, finalizedNode); err != nil {
|
||||
return errors.Wrap(err, "could not prune incompatible finalized child")
|
||||
}
|
||||
}
|
||||
@@ -291,10 +246,10 @@ func (s *Store) tips() ([][32]byte, []primitives.Slot) {
|
||||
var roots [][32]byte
|
||||
var slots []primitives.Slot
|
||||
|
||||
for root, n := range s.emptyNodeByRoot {
|
||||
if len(s.allConsensusChildren(n.node)) == 0 {
|
||||
for root, node := range s.nodeByRoot {
|
||||
if len(node.children) == 0 {
|
||||
roots = append(roots, root)
|
||||
slots = append(slots, n.node.slot)
|
||||
slots = append(slots, node.slot)
|
||||
}
|
||||
}
|
||||
return roots, slots
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package doublylinkedtree
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -40,18 +41,18 @@ func TestStore_NodeByRoot(t *testing.T) {
|
||||
state, blkRoot, err = prepareForkchoiceState(t.Context(), 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
node0 := f.store.emptyNodeByRoot[params.BeaconConfig().ZeroHash]
|
||||
node1 := f.store.emptyNodeByRoot[indexToHash(1)]
|
||||
node2 := f.store.emptyNodeByRoot[indexToHash(2)]
|
||||
node0 := f.store.treeRootNode
|
||||
node1 := node0.children[0]
|
||||
node2 := node1.children[0]
|
||||
|
||||
expectedRoots := map[[32]byte]*PayloadNode{
|
||||
expectedRoots := map[[32]byte]*Node{
|
||||
params.BeaconConfig().ZeroHash: node0,
|
||||
indexToHash(1): node1,
|
||||
indexToHash(2): node2,
|
||||
}
|
||||
|
||||
require.Equal(t, 3, f.NodeCount())
|
||||
for root, node := range f.store.emptyNodeByRoot {
|
||||
for root, node := range f.store.nodeByRoot {
|
||||
v, ok := expectedRoots[root]
|
||||
require.Equal(t, ok, true)
|
||||
require.Equal(t, v, node)
|
||||
@@ -110,28 +111,37 @@ func TestStore_Head_BestDescendant(t *testing.T) {
|
||||
require.Equal(t, h, indexToHash(4))
|
||||
}
|
||||
|
||||
func TestStore_UpdateBestDescendant_ContextCancelled(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
f := setup(0, 0)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
cancel()
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
err = f.InsertNode(ctx, state, blkRoot)
|
||||
require.ErrorContains(t, "context canceled", err)
|
||||
}
|
||||
|
||||
func TestStore_Insert(t *testing.T) {
|
||||
// The new node does not have a parent.
|
||||
treeRootNode := &Node{slot: 0, root: indexToHash(0)}
|
||||
emptyRootPN := &PayloadNode{node: treeRootNode}
|
||||
fullRootPN := &PayloadNode{node: treeRootNode, full: true, optimistic: true}
|
||||
emptyNodeByRoot := map[[32]byte]*PayloadNode{indexToHash(0): emptyRootPN}
|
||||
fullNodeByRoot := map[[32]byte]*PayloadNode{indexToHash(0): fullRootPN}
|
||||
nodeByRoot := map[[32]byte]*Node{indexToHash(0): treeRootNode}
|
||||
jc := &forkchoicetypes.Checkpoint{Epoch: 0}
|
||||
fc := &forkchoicetypes.Checkpoint{Epoch: 0}
|
||||
s := &Store{emptyNodeByRoot: emptyNodeByRoot, fullNodeByRoot: fullNodeByRoot, treeRootNode: treeRootNode, justifiedCheckpoint: jc, finalizedCheckpoint: fc, highestReceivedNode: &Node{}}
|
||||
s := &Store{nodeByRoot: nodeByRoot, treeRootNode: treeRootNode, justifiedCheckpoint: jc, finalizedCheckpoint: fc, highestReceivedNode: &Node{}}
|
||||
payloadHash := [32]byte{'a'}
|
||||
ctx := t.Context()
|
||||
_, blk, err := prepareForkchoiceState(ctx, 100, indexToHash(100), indexToHash(0), payloadHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
_, err = s.insert(ctx, blk, 1, 1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 2, len(s.emptyNodeByRoot), "Did not insert block")
|
||||
assert.Equal(t, (*PayloadNode)(nil), treeRootNode.parent, "Incorrect parent")
|
||||
children := s.allConsensusChildren(treeRootNode)
|
||||
assert.Equal(t, 1, len(children), "Incorrect children number")
|
||||
assert.Equal(t, payloadHash, children[0].blockHash, "Incorrect payload hash")
|
||||
child := children[0]
|
||||
assert.Equal(t, 2, len(s.nodeByRoot), "Did not insert block")
|
||||
assert.Equal(t, (*Node)(nil), treeRootNode.parent, "Incorrect parent")
|
||||
assert.Equal(t, 1, len(treeRootNode.children), "Incorrect children number")
|
||||
assert.Equal(t, payloadHash, treeRootNode.children[0].payloadHash, "Incorrect payload hash")
|
||||
child := treeRootNode.children[0]
|
||||
assert.Equal(t, primitives.Epoch(1), child.justifiedEpoch, "Incorrect justification")
|
||||
assert.Equal(t, primitives.Epoch(1), child.finalizedEpoch, "Incorrect finalization")
|
||||
assert.Equal(t, indexToHash(100), child.root, "Incorrect root")
|
||||
@@ -156,7 +166,7 @@ func TestStore_Prune_MoreThanThreshold(t *testing.T) {
|
||||
// Finalized root is at index 99 so everything before 99 should be pruned.
|
||||
s.finalizedCheckpoint.Root = indexToHash(99)
|
||||
require.NoError(t, s.prune(t.Context()))
|
||||
assert.Equal(t, 1, len(s.emptyNodeByRoot), "Incorrect nodes count")
|
||||
assert.Equal(t, 1, len(s.nodeByRoot), "Incorrect nodes count")
|
||||
}
|
||||
|
||||
func TestStore_Prune_MoreThanOnce(t *testing.T) {
|
||||
@@ -178,12 +188,12 @@ func TestStore_Prune_MoreThanOnce(t *testing.T) {
|
||||
// Finalized root is at index 11 so everything before 11 should be pruned.
|
||||
s.finalizedCheckpoint.Root = indexToHash(10)
|
||||
require.NoError(t, s.prune(t.Context()))
|
||||
assert.Equal(t, 90, len(s.emptyNodeByRoot), "Incorrect nodes count")
|
||||
assert.Equal(t, 90, len(s.nodeByRoot), "Incorrect nodes count")
|
||||
|
||||
// One more time.
|
||||
s.finalizedCheckpoint.Root = indexToHash(20)
|
||||
require.NoError(t, s.prune(t.Context()))
|
||||
assert.Equal(t, 80, len(s.emptyNodeByRoot), "Incorrect nodes count")
|
||||
assert.Equal(t, 80, len(s.nodeByRoot), "Incorrect nodes count")
|
||||
}
|
||||
|
||||
func TestStore_Prune_ReturnEarly(t *testing.T) {
|
||||
@@ -226,7 +236,7 @@ func TestStore_Prune_NoDanglingBranch(t *testing.T) {
|
||||
s := f.store
|
||||
s.finalizedCheckpoint.Root = indexToHash(1)
|
||||
require.NoError(t, s.prune(t.Context()))
|
||||
require.Equal(t, len(s.emptyNodeByRoot), 1)
|
||||
require.Equal(t, len(s.nodeByRoot), 1)
|
||||
}
|
||||
|
||||
// This test starts with the following branching diagram
|
||||
@@ -306,7 +316,7 @@ func TestStore_PruneMapsNodes(t *testing.T) {
|
||||
s := f.store
|
||||
s.finalizedCheckpoint.Root = indexToHash(1)
|
||||
require.NoError(t, s.prune(t.Context()))
|
||||
require.Equal(t, len(s.emptyNodeByRoot), 1)
|
||||
require.Equal(t, len(s.nodeByRoot), 1)
|
||||
}
|
||||
|
||||
func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
|
||||
|
||||
@@ -21,26 +21,23 @@ type ForkChoice struct {
|
||||
balancesByRoot forkchoice.BalancesByRooter // handler to obtain balances for the state with a given root
|
||||
}
|
||||
|
||||
var _ forkchoice.ForkChoicer = (*ForkChoice)(nil)
|
||||
|
||||
// Store defines the fork choice store which includes block nodes and the last view of checkpoint information.
|
||||
type Store struct {
|
||||
justifiedCheckpoint *forkchoicetypes.Checkpoint // latest justified epoch in store.
|
||||
unrealizedJustifiedCheckpoint *forkchoicetypes.Checkpoint // best unrealized justified checkpoint in store.
|
||||
unrealizedFinalizedCheckpoint *forkchoicetypes.Checkpoint // best unrealized finalized checkpoint in store.
|
||||
prevJustifiedCheckpoint *forkchoicetypes.Checkpoint // previous justified checkpoint in store.
|
||||
finalizedCheckpoint *forkchoicetypes.Checkpoint // latest finalized epoch in store.
|
||||
proposerBoostRoot [fieldparams.RootLength]byte // latest block root that was boosted after being received in a timely manner.
|
||||
previousProposerBoostRoot [fieldparams.RootLength]byte // previous block root that was boosted after being received in a timely manner.
|
||||
previousProposerBoostScore uint64 // previous proposer boosted root score.
|
||||
finalizedDependentRoot [fieldparams.RootLength]byte // dependent root at finalized checkpoint.
|
||||
committeeWeight uint64 // tracks the total active validator balance divided by the number of slots per Epoch.
|
||||
treeRootNode *Node // the root node of the store tree.
|
||||
headNode *Node // last head Node
|
||||
emptyNodeByRoot map[[fieldparams.RootLength]byte]*PayloadNode // nodes indexed by roots.
|
||||
fullNodeByRoot map[[fieldparams.RootLength]byte]*PayloadNode // full nodes (the payload was present) indexed by beacon block root.
|
||||
slashedIndices map[primitives.ValidatorIndex]bool // the list of equivocating validator indices
|
||||
originRoot [fieldparams.RootLength]byte // The genesis block root
|
||||
justifiedCheckpoint *forkchoicetypes.Checkpoint // latest justified epoch in store.
|
||||
unrealizedJustifiedCheckpoint *forkchoicetypes.Checkpoint // best unrealized justified checkpoint in store.
|
||||
unrealizedFinalizedCheckpoint *forkchoicetypes.Checkpoint // best unrealized finalized checkpoint in store.
|
||||
prevJustifiedCheckpoint *forkchoicetypes.Checkpoint // previous justified checkpoint in store.
|
||||
finalizedCheckpoint *forkchoicetypes.Checkpoint // latest finalized epoch in store.
|
||||
proposerBoostRoot [fieldparams.RootLength]byte // latest block root that was boosted after being received in a timely manner.
|
||||
previousProposerBoostRoot [fieldparams.RootLength]byte // previous block root that was boosted after being received in a timely manner.
|
||||
previousProposerBoostScore uint64 // previous proposer boosted root score.
|
||||
finalizedDependentRoot [fieldparams.RootLength]byte // dependent root at finalized checkpoint.
|
||||
committeeWeight uint64 // tracks the total active validator balance divided by the number of slots per Epoch.
|
||||
treeRootNode *Node // the root node of the store tree.
|
||||
headNode *Node // last head Node
|
||||
nodeByRoot map[[fieldparams.RootLength]byte]*Node // nodes indexed by roots.
|
||||
slashedIndices map[primitives.ValidatorIndex]bool // the list of equivocating validator indices
|
||||
originRoot [fieldparams.RootLength]byte // The genesis block root
|
||||
genesisTime time.Time
|
||||
highestReceivedNode *Node // The highest slot node.
|
||||
receivedBlocksLastEpoch [fieldparams.SlotsPerEpoch]primitives.Slot // Using `highestReceivedSlot`. The slot of blocks received in the last epoch.
|
||||
@@ -52,28 +49,19 @@ type Store struct {
|
||||
type Node struct {
|
||||
slot primitives.Slot // slot of the block converted to the node.
|
||||
root [fieldparams.RootLength]byte // root of the block converted to the node.
|
||||
blockHash [fieldparams.RootLength]byte // payloadHash of the block converted to the node.
|
||||
parent *PayloadNode // parent index of this node.
|
||||
payloadHash [fieldparams.RootLength]byte // payloadHash of the block converted to the node.
|
||||
parent *Node // parent index of this node.
|
||||
target *Node // target checkpoint for
|
||||
bestDescendant *Node // bestDescendant node of this node.
|
||||
children []*Node // the list of direct children of this Node
|
||||
justifiedEpoch primitives.Epoch // justifiedEpoch of this node.
|
||||
unrealizedJustifiedEpoch primitives.Epoch // the epoch that would be justified if the block would be advanced to the next epoch.
|
||||
finalizedEpoch primitives.Epoch // finalizedEpoch of this node.
|
||||
unrealizedFinalizedEpoch primitives.Epoch // the epoch that would be finalized if the block would be advanced to the next epoch.
|
||||
balance uint64 // the balance that voted for this node directly
|
||||
weight uint64 // weight of this node: the total balance including children
|
||||
}
|
||||
|
||||
// PayloadNode defines a full Forkchoice node after the Gloas fork, with the payload status either empty of full
|
||||
type PayloadNode struct {
|
||||
optimistic bool // whether the block has been fully validated or not
|
||||
full bool // whether this node represents a payload present or not
|
||||
weight uint64 // weight of this node: the total balance including children
|
||||
balance uint64 // the balance that voted for this node directly
|
||||
bestDescendant *Node // bestDescendant node of this payload node.
|
||||
node *Node // the consensus part of this full forkchoice node
|
||||
timestamp time.Time // The timestamp when the node was inserted.
|
||||
children []*Node // the list of direct children of this Node
|
||||
bestDescendant *Node // bestDescendant node of this node.
|
||||
optimistic bool // whether the block has been fully validated or not
|
||||
timestamp time.Time // The timestamp when the node was inserted.
|
||||
}
|
||||
|
||||
// Vote defines an individual validator's vote.
|
||||
|
||||
@@ -15,34 +15,33 @@ import (
|
||||
)
|
||||
|
||||
func (s *Store) setUnrealizedJustifiedEpoch(root [32]byte, epoch primitives.Epoch) error {
|
||||
en, ok := s.emptyNodeByRoot[root]
|
||||
if !ok || en == nil {
|
||||
node, ok := s.nodeByRoot[root]
|
||||
if !ok || node == nil {
|
||||
return errors.Wrap(ErrNilNode, "could not set unrealized justified epoch")
|
||||
}
|
||||
if epoch < en.node.unrealizedJustifiedEpoch {
|
||||
if epoch < node.unrealizedJustifiedEpoch {
|
||||
return errInvalidUnrealizedJustifiedEpoch
|
||||
}
|
||||
en.node.unrealizedJustifiedEpoch = epoch
|
||||
node.unrealizedJustifiedEpoch = epoch
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Store) setUnrealizedFinalizedEpoch(root [32]byte, epoch primitives.Epoch) error {
|
||||
en, ok := s.emptyNodeByRoot[root]
|
||||
if !ok || en == nil {
|
||||
node, ok := s.nodeByRoot[root]
|
||||
if !ok || node == nil {
|
||||
return errors.Wrap(ErrNilNode, "could not set unrealized finalized epoch")
|
||||
}
|
||||
if epoch < en.node.unrealizedFinalizedEpoch {
|
||||
if epoch < node.unrealizedFinalizedEpoch {
|
||||
return errInvalidUnrealizedFinalizedEpoch
|
||||
}
|
||||
en.node.unrealizedFinalizedEpoch = epoch
|
||||
node.unrealizedFinalizedEpoch = epoch
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateUnrealizedCheckpoints "realizes" the unrealized justified and finalized
|
||||
// epochs stored within nodes. It should be called at the beginning of each epoch.
|
||||
func (f *ForkChoice) updateUnrealizedCheckpoints(ctx context.Context) error {
|
||||
for _, en := range f.store.emptyNodeByRoot {
|
||||
node := en.node
|
||||
for _, node := range f.store.nodeByRoot {
|
||||
node.justifiedEpoch = node.unrealizedJustifiedEpoch
|
||||
node.finalizedEpoch = node.unrealizedFinalizedEpoch
|
||||
if node.justifiedEpoch > f.store.justifiedCheckpoint.Epoch {
|
||||
@@ -63,17 +62,16 @@ func (s *Store) pullTips(state state.BeaconState, node *Node, jc, fc *ethpb.Chec
|
||||
if node.parent == nil { // Nothing to do if the parent is nil.
|
||||
return jc, fc
|
||||
}
|
||||
pn := node.parent.node
|
||||
currentEpoch := slots.ToEpoch(slots.CurrentSlot(s.genesisTime))
|
||||
stateSlot := state.Slot()
|
||||
stateEpoch := slots.ToEpoch(stateSlot)
|
||||
currJustified := pn.unrealizedJustifiedEpoch == currentEpoch
|
||||
prevJustified := pn.unrealizedJustifiedEpoch+1 == currentEpoch
|
||||
currJustified := node.parent.unrealizedJustifiedEpoch == currentEpoch
|
||||
prevJustified := node.parent.unrealizedJustifiedEpoch+1 == currentEpoch
|
||||
tooEarlyForCurr := slots.SinceEpochStarts(stateSlot)*3 < params.BeaconConfig().SlotsPerEpoch*2
|
||||
// Exit early if it's justified or too early to be justified.
|
||||
if currJustified || (stateEpoch == currentEpoch && prevJustified && tooEarlyForCurr) {
|
||||
node.unrealizedJustifiedEpoch = pn.unrealizedJustifiedEpoch
|
||||
node.unrealizedFinalizedEpoch = pn.unrealizedFinalizedEpoch
|
||||
node.unrealizedJustifiedEpoch = node.parent.unrealizedJustifiedEpoch
|
||||
node.unrealizedFinalizedEpoch = node.parent.unrealizedFinalizedEpoch
|
||||
return jc, fc
|
||||
}
|
||||
|
||||
|
||||
@@ -22,12 +22,12 @@ func TestStore_SetUnrealizedEpochs(t *testing.T) {
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
require.Equal(t, primitives.Epoch(1), f.store.emptyNodeByRoot[[32]byte{'b'}].node.unrealizedJustifiedEpoch)
|
||||
require.Equal(t, primitives.Epoch(1), f.store.emptyNodeByRoot[[32]byte{'b'}].node.unrealizedFinalizedEpoch)
|
||||
require.Equal(t, primitives.Epoch(1), f.store.nodeByRoot[[32]byte{'b'}].unrealizedJustifiedEpoch)
|
||||
require.Equal(t, primitives.Epoch(1), f.store.nodeByRoot[[32]byte{'b'}].unrealizedFinalizedEpoch)
|
||||
require.NoError(t, f.store.setUnrealizedJustifiedEpoch([32]byte{'b'}, 2))
|
||||
require.NoError(t, f.store.setUnrealizedFinalizedEpoch([32]byte{'b'}, 2))
|
||||
require.Equal(t, primitives.Epoch(2), f.store.emptyNodeByRoot[[32]byte{'b'}].node.unrealizedJustifiedEpoch)
|
||||
require.Equal(t, primitives.Epoch(2), f.store.emptyNodeByRoot[[32]byte{'b'}].node.unrealizedFinalizedEpoch)
|
||||
require.Equal(t, primitives.Epoch(2), f.store.nodeByRoot[[32]byte{'b'}].unrealizedJustifiedEpoch)
|
||||
require.Equal(t, primitives.Epoch(2), f.store.nodeByRoot[[32]byte{'b'}].unrealizedFinalizedEpoch)
|
||||
|
||||
require.ErrorIs(t, errInvalidUnrealizedJustifiedEpoch, f.store.setUnrealizedJustifiedEpoch([32]byte{'b'}, 0))
|
||||
require.ErrorIs(t, errInvalidUnrealizedFinalizedEpoch, f.store.setUnrealizedFinalizedEpoch([32]byte{'b'}, 0))
|
||||
@@ -78,9 +78,9 @@ func TestStore_LongFork(t *testing.T) {
|
||||
// Add an attestation to c, it is head
|
||||
f.ProcessAttestation(ctx, []uint64{0}, [32]byte{'c'}, 1)
|
||||
f.justifiedBalances = []uint64{100}
|
||||
c := f.store.emptyNodeByRoot[[32]byte{'c'}]
|
||||
require.Equal(t, primitives.Epoch(2), slots.ToEpoch(c.node.slot))
|
||||
driftGenesisTime(f, c.node.slot, 0)
|
||||
c := f.store.nodeByRoot[[32]byte{'c'}]
|
||||
require.Equal(t, primitives.Epoch(2), slots.ToEpoch(c.slot))
|
||||
driftGenesisTime(f, c.slot, 0)
|
||||
headRoot, err := f.Head(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [32]byte{'c'}, headRoot)
|
||||
@@ -91,15 +91,15 @@ func TestStore_LongFork(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, f.UpdateJustifiedCheckpoint(ctx, &forkchoicetypes.Checkpoint{Epoch: 2, Root: ha}))
|
||||
d := f.store.emptyNodeByRoot[[32]byte{'d'}]
|
||||
require.Equal(t, primitives.Epoch(3), slots.ToEpoch(d.node.slot))
|
||||
driftGenesisTime(f, d.node.slot, 0)
|
||||
require.Equal(t, true, d.node.viableForHead(f.store.justifiedCheckpoint.Epoch, slots.ToEpoch(d.node.slot)))
|
||||
d := f.store.nodeByRoot[[32]byte{'d'}]
|
||||
require.Equal(t, primitives.Epoch(3), slots.ToEpoch(d.slot))
|
||||
driftGenesisTime(f, d.slot, 0)
|
||||
require.Equal(t, true, d.viableForHead(f.store.justifiedCheckpoint.Epoch, slots.ToEpoch(d.slot)))
|
||||
headRoot, err = f.Head(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [32]byte{'c'}, headRoot)
|
||||
require.Equal(t, uint64(0), f.store.emptyNodeByRoot[[32]byte{'d'}].weight)
|
||||
require.Equal(t, uint64(100), f.store.emptyNodeByRoot[[32]byte{'c'}].weight)
|
||||
require.Equal(t, uint64(0), f.store.nodeByRoot[[32]byte{'d'}].weight)
|
||||
require.Equal(t, uint64(100), f.store.nodeByRoot[[32]byte{'c'}].weight)
|
||||
}
|
||||
|
||||
// Epoch 1 Epoch 2 Epoch 3
|
||||
@@ -243,8 +243,8 @@ func TestStore_ForkNextEpoch(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [32]byte{'d'}, headRoot)
|
||||
require.Equal(t, primitives.Epoch(2), f.JustifiedCheckpoint().Epoch)
|
||||
require.Equal(t, uint64(0), f.store.emptyNodeByRoot[[32]byte{'d'}].weight)
|
||||
require.Equal(t, uint64(100), f.store.emptyNodeByRoot[[32]byte{'h'}].weight)
|
||||
require.Equal(t, uint64(0), f.store.nodeByRoot[[32]byte{'d'}].weight)
|
||||
require.Equal(t, uint64(100), f.store.nodeByRoot[[32]byte{'h'}].weight)
|
||||
// Set current epoch to 3, and H's unrealized checkpoint. Check it's head
|
||||
driftGenesisTime(f, 99, 0)
|
||||
require.NoError(t, f.store.setUnrealizedJustifiedEpoch([32]byte{'h'}, 2))
|
||||
@@ -252,8 +252,8 @@ func TestStore_ForkNextEpoch(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [32]byte{'h'}, headRoot)
|
||||
require.Equal(t, primitives.Epoch(2), f.JustifiedCheckpoint().Epoch)
|
||||
require.Equal(t, uint64(0), f.store.emptyNodeByRoot[[32]byte{'d'}].weight)
|
||||
require.Equal(t, uint64(100), f.store.emptyNodeByRoot[[32]byte{'h'}].weight)
|
||||
require.Equal(t, uint64(0), f.store.nodeByRoot[[32]byte{'d'}].weight)
|
||||
require.Equal(t, uint64(100), f.store.nodeByRoot[[32]byte{'h'}].weight)
|
||||
}
|
||||
|
||||
func TestStore_PullTips_Heuristics(t *testing.T) {
|
||||
@@ -263,14 +263,14 @@ func TestStore_PullTips_Heuristics(t *testing.T) {
|
||||
st, root, err := prepareForkchoiceState(ctx, 65, [32]byte{'p'}, [32]byte{}, [32]byte{}, 1, 1)
|
||||
require.NoError(tt, err)
|
||||
require.NoError(tt, f.InsertNode(ctx, st, root))
|
||||
f.store.emptyNodeByRoot[[32]byte{'p'}].node.unrealizedJustifiedEpoch = primitives.Epoch(2)
|
||||
f.store.nodeByRoot[[32]byte{'p'}].unrealizedJustifiedEpoch = primitives.Epoch(2)
|
||||
driftGenesisTime(f, 66, 0)
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 66, [32]byte{'h'}, [32]byte{'p'}, [32]byte{}, 1, 1)
|
||||
require.NoError(tt, err)
|
||||
require.NoError(tt, f.InsertNode(ctx, st, root))
|
||||
require.Equal(tt, primitives.Epoch(2), f.store.emptyNodeByRoot[[32]byte{'h'}].node.unrealizedJustifiedEpoch)
|
||||
require.Equal(tt, primitives.Epoch(1), f.store.emptyNodeByRoot[[32]byte{'h'}].node.unrealizedFinalizedEpoch)
|
||||
require.Equal(tt, primitives.Epoch(2), f.store.nodeByRoot[[32]byte{'h'}].unrealizedJustifiedEpoch)
|
||||
require.Equal(tt, primitives.Epoch(1), f.store.nodeByRoot[[32]byte{'h'}].unrealizedFinalizedEpoch)
|
||||
})
|
||||
|
||||
t.Run("Previous Epoch is justified and too early for current", func(tt *testing.T) {
|
||||
@@ -278,21 +278,21 @@ func TestStore_PullTips_Heuristics(t *testing.T) {
|
||||
st, root, err := prepareForkchoiceState(ctx, 95, [32]byte{'p'}, [32]byte{}, [32]byte{}, 1, 1)
|
||||
require.NoError(tt, err)
|
||||
require.NoError(tt, f.InsertNode(ctx, st, root))
|
||||
f.store.emptyNodeByRoot[[32]byte{'p'}].node.unrealizedJustifiedEpoch = primitives.Epoch(2)
|
||||
f.store.nodeByRoot[[32]byte{'p'}].unrealizedJustifiedEpoch = primitives.Epoch(2)
|
||||
driftGenesisTime(f, 96, 0)
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 96, [32]byte{'h'}, [32]byte{'p'}, [32]byte{}, 1, 1)
|
||||
require.NoError(tt, err)
|
||||
require.NoError(tt, f.InsertNode(ctx, st, root))
|
||||
require.Equal(tt, primitives.Epoch(2), f.store.emptyNodeByRoot[[32]byte{'h'}].node.unrealizedJustifiedEpoch)
|
||||
require.Equal(tt, primitives.Epoch(1), f.store.emptyNodeByRoot[[32]byte{'h'}].node.unrealizedFinalizedEpoch)
|
||||
require.Equal(tt, primitives.Epoch(2), f.store.nodeByRoot[[32]byte{'h'}].unrealizedJustifiedEpoch)
|
||||
require.Equal(tt, primitives.Epoch(1), f.store.nodeByRoot[[32]byte{'h'}].unrealizedFinalizedEpoch)
|
||||
})
|
||||
t.Run("Previous Epoch is justified and not too early for current", func(tt *testing.T) {
|
||||
f := setup(1, 1)
|
||||
st, root, err := prepareForkchoiceState(ctx, 95, [32]byte{'p'}, [32]byte{}, [32]byte{}, 1, 1)
|
||||
require.NoError(tt, err)
|
||||
require.NoError(tt, f.InsertNode(ctx, st, root))
|
||||
f.store.emptyNodeByRoot[[32]byte{'p'}].node.unrealizedJustifiedEpoch = primitives.Epoch(2)
|
||||
f.store.nodeByRoot[[32]byte{'p'}].unrealizedJustifiedEpoch = primitives.Epoch(2)
|
||||
driftGenesisTime(f, 127, 0)
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 127, [32]byte{'h'}, [32]byte{'p'}, [32]byte{}, 1, 1)
|
||||
@@ -302,14 +302,14 @@ func TestStore_PullTips_Heuristics(t *testing.T) {
|
||||
// This test checks that the heuristics in pullTips did not apply and
|
||||
// the test continues to compute a bogus unrealized
|
||||
// justification
|
||||
require.Equal(tt, primitives.Epoch(1), f.store.emptyNodeByRoot[[32]byte{'h'}].node.unrealizedJustifiedEpoch)
|
||||
require.Equal(tt, primitives.Epoch(1), f.store.nodeByRoot[[32]byte{'h'}].unrealizedJustifiedEpoch)
|
||||
})
|
||||
t.Run("Block from previous Epoch", func(tt *testing.T) {
|
||||
f := setup(1, 1)
|
||||
st, root, err := prepareForkchoiceState(ctx, 94, [32]byte{'p'}, [32]byte{}, [32]byte{}, 1, 1)
|
||||
require.NoError(tt, err)
|
||||
require.NoError(tt, f.InsertNode(ctx, st, root))
|
||||
f.store.emptyNodeByRoot[[32]byte{'p'}].node.unrealizedJustifiedEpoch = primitives.Epoch(2)
|
||||
f.store.nodeByRoot[[32]byte{'p'}].unrealizedJustifiedEpoch = primitives.Epoch(2)
|
||||
driftGenesisTime(f, 96, 0)
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 95, [32]byte{'h'}, [32]byte{'p'}, [32]byte{}, 1, 1)
|
||||
@@ -319,7 +319,7 @@ func TestStore_PullTips_Heuristics(t *testing.T) {
|
||||
// This test checks that the heuristics in pullTips did not apply and
|
||||
// the test continues to compute a bogus unrealized
|
||||
// justification
|
||||
require.Equal(tt, primitives.Epoch(1), f.store.emptyNodeByRoot[[32]byte{'h'}].node.unrealizedJustifiedEpoch)
|
||||
require.Equal(tt, primitives.Epoch(1), f.store.nodeByRoot[[32]byte{'h'}].unrealizedJustifiedEpoch)
|
||||
})
|
||||
t.Run("Previous Epoch is not justified", func(tt *testing.T) {
|
||||
f := setup(1, 1)
|
||||
@@ -335,6 +335,6 @@ func TestStore_PullTips_Heuristics(t *testing.T) {
|
||||
// This test checks that the heuristics in pullTips did not apply and
|
||||
// the test continues to compute a bogus unrealized
|
||||
// justification
|
||||
require.Equal(tt, primitives.Epoch(2), f.store.emptyNodeByRoot[[32]byte{'h'}].node.unrealizedJustifiedEpoch)
|
||||
require.Equal(tt, primitives.Epoch(2), f.store.nodeByRoot[[32]byte{'h'}].unrealizedJustifiedEpoch)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -284,7 +284,7 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
// 9 10
|
||||
f.store.finalizedCheckpoint.Root = indexToHash(5)
|
||||
require.NoError(t, f.store.prune(t.Context()))
|
||||
assert.Equal(t, 5, len(f.store.emptyNodeByRoot), "Incorrect nodes length after prune")
|
||||
assert.Equal(t, 5, len(f.store.nodeByRoot), "Incorrect nodes length after prune")
|
||||
// we pruned artificially the justified root.
|
||||
f.store.justifiedCheckpoint.Root = indexToHash(5)
|
||||
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
consensus_blocks "github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
forkchoice2 "github.com/OffchainLabs/prysm/v7/consensus-types/forkchoice"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
)
|
||||
|
||||
@@ -24,7 +23,6 @@ type ForkChoicer interface {
|
||||
Unlock()
|
||||
HeadRetriever // to compute head.
|
||||
BlockProcessor // to track new block for fork choice.
|
||||
PayloadProcessor // to track new payloads for fork choice.
|
||||
AttestationProcessor // to track new attestation for fork choice.
|
||||
Getter // to retrieve fork choice information.
|
||||
Setter // to set fork choice information.
|
||||
@@ -49,11 +47,6 @@ type BlockProcessor interface {
|
||||
InsertChain(context.Context, []*forkchoicetypes.BlockAndCheckpoints) error
|
||||
}
|
||||
|
||||
// PayloadProcessor processes a payload envelope
|
||||
type PayloadProcessor interface {
|
||||
InsertPayload(context.Context, interfaces.ROExecutionPayloadEnvelope) error
|
||||
}
|
||||
|
||||
// AttestationProcessor processes the attestation that's used for accounting fork choice.
|
||||
type AttestationProcessor interface {
|
||||
ProcessAttestation(context.Context, []uint64, [32]byte, primitives.Epoch)
|
||||
@@ -96,7 +89,7 @@ type FastGetter interface {
|
||||
// Setter allows to set forkchoice information
|
||||
type Setter interface {
|
||||
SetOptimisticToValid(context.Context, [fieldparams.RootLength]byte) error
|
||||
SetOptimisticToInvalid(context.Context, [32]byte, [32]byte, [32]byte, [32]byte) ([][32]byte, error)
|
||||
SetOptimisticToInvalid(context.Context, [fieldparams.RootLength]byte, [fieldparams.RootLength]byte, [fieldparams.RootLength]byte) ([][32]byte, error)
|
||||
UpdateJustifiedCheckpoint(context.Context, *forkchoicetypes.Checkpoint) error
|
||||
UpdateFinalizedCheckpoint(*forkchoicetypes.Checkpoint) error
|
||||
SetGenesisTime(time.Time)
|
||||
|
||||
@@ -25,7 +25,6 @@ var gossipTopicMappings = map[string]func() proto.Message{
|
||||
LightClientOptimisticUpdateTopicFormat: func() proto.Message { return ðpb.LightClientOptimisticUpdateAltair{} },
|
||||
LightClientFinalityUpdateTopicFormat: func() proto.Message { return ðpb.LightClientFinalityUpdateAltair{} },
|
||||
DataColumnSubnetTopicFormat: func() proto.Message { return ðpb.DataColumnSidecar{} },
|
||||
PayloadAttestationMessageTopicFormat: func() proto.Message { return ðpb.PayloadAttestationMessage{} },
|
||||
}
|
||||
|
||||
// GossipTopicMappings is a function to return the assigned data type
|
||||
@@ -145,7 +144,4 @@ func init() {
|
||||
|
||||
// Specially handle Fulu objects.
|
||||
GossipTypeMapping[reflect.TypeFor[*ethpb.SignedBeaconBlockFulu]()] = BlockSubnetTopicFormat
|
||||
|
||||
// Payload attestation messages.
|
||||
GossipTypeMapping[reflect.TypeFor[*ethpb.PayloadAttestationMessage]()] = PayloadAttestationMessageTopicFormat
|
||||
}
|
||||
|
||||
@@ -46,8 +46,6 @@ const (
|
||||
GossipLightClientOptimisticUpdateMessage = "light_client_optimistic_update"
|
||||
// GossipDataColumnSidecarMessage is the name for the data column sidecar message type.
|
||||
GossipDataColumnSidecarMessage = "data_column_sidecar"
|
||||
// GossipPayloadAttestationMessage is the name for the payload attestation message type.
|
||||
GossipPayloadAttestationMessage = "payload_attestation_message"
|
||||
|
||||
// Topic Formats
|
||||
//
|
||||
@@ -77,8 +75,6 @@ const (
|
||||
LightClientOptimisticUpdateTopicFormat = GossipProtocolAndDigest + GossipLightClientOptimisticUpdateMessage
|
||||
// DataColumnSubnetTopicFormat is the topic format for the data column subnet.
|
||||
DataColumnSubnetTopicFormat = GossipProtocolAndDigest + GossipDataColumnSidecarMessage + "_%d"
|
||||
// PayloadAttestationMessageTopicFormat is the topic format for payload attestation messages.
|
||||
PayloadAttestationMessageTopicFormat = GossipProtocolAndDigest + GossipPayloadAttestationMessage
|
||||
)
|
||||
|
||||
// topic is a struct representing a single gossipsub topic.
|
||||
@@ -145,7 +141,7 @@ func (s *Service) allTopics() []topic {
|
||||
cfg := params.BeaconConfig()
|
||||
// bellatrix: no special topics; electra: blobs topics handled all together
|
||||
genesis, altair, capella := cfg.GenesisEpoch, cfg.AltairForkEpoch, cfg.CapellaForkEpoch
|
||||
deneb, fulu, gloas, future := cfg.DenebForkEpoch, cfg.FuluForkEpoch, cfg.GloasForkEpoch, cfg.FarFutureEpoch
|
||||
deneb, fulu, future := cfg.DenebForkEpoch, cfg.FuluForkEpoch, cfg.FarFutureEpoch
|
||||
// Templates are starter topics - they have a placeholder digest and the subnet is set to the maximum value
|
||||
// for the subnet (see how this is used in allSubnetsBelow). These are not directly returned by the method,
|
||||
// they are copied and modified for each digest where they apply based on the start and end epochs.
|
||||
@@ -162,7 +158,6 @@ func (s *Service) allTopics() []topic {
|
||||
newTopic(altair, future, empty, GossipLightClientOptimisticUpdateMessage),
|
||||
newTopic(altair, future, empty, GossipLightClientFinalityUpdateMessage),
|
||||
newTopic(capella, future, empty, GossipBlsToExecutionChangeMessage),
|
||||
newTopic(gloas, future, empty, GossipPayloadAttestationMessage),
|
||||
}
|
||||
last := params.GetNetworkScheduleEntry(genesis)
|
||||
schedule := []params.NetworkScheduleEntry{last}
|
||||
|
||||
@@ -86,6 +86,7 @@ func TestGetSpec(t *testing.T) {
|
||||
config.GloasForkEpoch = 110
|
||||
config.BLSWithdrawalPrefixByte = byte('b')
|
||||
config.ETH1AddressWithdrawalPrefixByte = byte('c')
|
||||
config.BuilderWithdrawalPrefixByte = byte('e')
|
||||
config.GenesisDelay = 24
|
||||
config.SecondsPerSlot = 25
|
||||
config.SlotDurationMilliseconds = 120
|
||||
|
||||
@@ -1,24 +1,51 @@
|
||||
package state
|
||||
|
||||
import (
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
type writeOnlyGloasFields interface {
|
||||
// Bids.
|
||||
SetExecutionPayloadBid(h interfaces.ROExecutionPayloadBid) error
|
||||
|
||||
// Builder pending payments / withdrawals.
|
||||
SetBuilderPendingPayment(index primitives.Slot, payment *ethpb.BuilderPendingPayment) error
|
||||
ClearBuilderPendingPayment(index primitives.Slot) error
|
||||
QueueBuilderPayment() error
|
||||
RotateBuilderPendingPayments() error
|
||||
AppendBuilderPendingWithdrawals([]*ethpb.BuilderPendingWithdrawal) error
|
||||
|
||||
// Execution payload availability.
|
||||
UpdateExecutionPayloadAvailabilityAtIndex(idx uint64, val byte) error
|
||||
|
||||
// Misc.
|
||||
SetLatestBlockHash(hash [32]byte) error
|
||||
SetExecutionPayloadAvailability(index primitives.Slot, available bool) error
|
||||
|
||||
// Builders.
|
||||
IncreaseBuilderBalance(index primitives.BuilderIndex, amount uint64) error
|
||||
AddBuilderFromDeposit(pubkey [fieldparams.BLSPubkeyLength]byte, withdrawalCredentials [fieldparams.RootLength]byte, amount uint64) error
|
||||
}
|
||||
|
||||
type readOnlyGloasFields interface {
|
||||
// Bids.
|
||||
LatestExecutionPayloadBid() (interfaces.ROExecutionPayloadBid, error)
|
||||
|
||||
// Builder pending payments / withdrawals.
|
||||
BuilderPendingPayments() ([]*ethpb.BuilderPendingPayment, error)
|
||||
WithdrawalsMatchPayloadExpected(withdrawals []*enginev1.Withdrawal) (bool, error)
|
||||
|
||||
// Misc.
|
||||
LatestBlockHash() ([32]byte, error)
|
||||
|
||||
// Builders.
|
||||
Builder(index primitives.BuilderIndex) (*ethpb.Builder, error)
|
||||
BuilderPubkey(primitives.BuilderIndex) ([48]byte, error)
|
||||
BuilderIndexByPubkey(pubkey [fieldparams.BLSPubkeyLength]byte) (primitives.BuilderIndex, bool)
|
||||
IsActiveBuilder(primitives.BuilderIndex) (bool, error)
|
||||
CanBuilderCoverBid(primitives.BuilderIndex, primitives.Gwei) (bool, error)
|
||||
LatestBlockHash() ([32]byte, error)
|
||||
BuilderPendingPayments() ([]*ethpb.BuilderPendingPayment, error)
|
||||
}
|
||||
|
||||
@@ -1,11 +1,15 @@
|
||||
package state_native
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
)
|
||||
@@ -156,3 +160,83 @@ func (b *BeaconState) BuilderPendingPayments() ([]*ethpb.BuilderPendingPayment,
|
||||
|
||||
return b.builderPendingPaymentsVal(), nil
|
||||
}
|
||||
|
||||
// LatestExecutionPayloadBid returns the cached latest execution payload bid for Gloas.
|
||||
func (b *BeaconState) LatestExecutionPayloadBid() (interfaces.ROExecutionPayloadBid, error) {
|
||||
if b.version < version.Gloas {
|
||||
return nil, errNotSupported("LatestExecutionPayloadBid", b.version)
|
||||
}
|
||||
|
||||
b.lock.RLock()
|
||||
defer b.lock.RUnlock()
|
||||
|
||||
if b.latestExecutionPayloadBid == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return blocks.WrappedROExecutionPayloadBid(b.latestExecutionPayloadBid.Copy())
|
||||
}
|
||||
|
||||
// WithdrawalsMatchPayloadExpected returns true if the given withdrawals root matches the state's
|
||||
// payload_expected_withdrawals root.
|
||||
func (b *BeaconState) WithdrawalsMatchPayloadExpected(withdrawals []*enginev1.Withdrawal) (bool, error) {
|
||||
if b.version < version.Gloas {
|
||||
return false, errNotSupported("WithdrawalsMatchPayloadExpected", b.version)
|
||||
}
|
||||
|
||||
b.lock.RLock()
|
||||
defer b.lock.RUnlock()
|
||||
|
||||
return withdrawalsEqual(withdrawals, b.payloadExpectedWithdrawals), nil
|
||||
}
|
||||
|
||||
func withdrawalsEqual(a, b []*enginev1.Withdrawal) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
for i := range a {
|
||||
wa := a[i]
|
||||
wb := b[i]
|
||||
if wa.Index != wb.Index ||
|
||||
wa.ValidatorIndex != wb.ValidatorIndex ||
|
||||
wa.Amount != wb.Amount ||
|
||||
!bytes.Equal(wa.Address, wb.Address) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Builder returns the builder at the given index.
|
||||
func (b *BeaconState) Builder(index primitives.BuilderIndex) (*ethpb.Builder, error) {
|
||||
b.lock.RLock()
|
||||
defer b.lock.RUnlock()
|
||||
|
||||
if b.builders == nil {
|
||||
return nil, nil
|
||||
}
|
||||
if uint64(index) >= uint64(len(b.builders)) {
|
||||
return nil, fmt.Errorf("builder index %d out of bounds", index)
|
||||
}
|
||||
if b.builders[index] == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return ethpb.CopyBuilder(b.builders[index]), nil
|
||||
}
|
||||
|
||||
// BuilderIndexByPubkey returns the builder index for the given pubkey, if present.
|
||||
func (b *BeaconState) BuilderIndexByPubkey(pubkey [fieldparams.BLSPubkeyLength]byte) (primitives.BuilderIndex, bool) {
|
||||
b.lock.RLock()
|
||||
defer b.lock.RUnlock()
|
||||
|
||||
for i, builder := range b.builders {
|
||||
if builder == nil {
|
||||
continue
|
||||
}
|
||||
if bytes.Equal(builder.Pubkey, pubkey[:]) {
|
||||
return primitives.BuilderIndex(i), true
|
||||
}
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
||||
@@ -5,8 +5,10 @@ import (
|
||||
"testing"
|
||||
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
@@ -44,6 +46,17 @@ func TestLatestBlockHash(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestLatestExecutionPayloadBid(t *testing.T) {
|
||||
t.Run("returns error before gloas", func(t *testing.T) {
|
||||
stIface, _ := util.DeterministicGenesisState(t, 1)
|
||||
native, ok := stIface.(*state_native.BeaconState)
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
_, err := native.LatestExecutionPayloadBid()
|
||||
require.ErrorContains(t, "is not supported", err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestBuilderPubkey(t *testing.T) {
|
||||
t.Run("returns error before gloas", func(t *testing.T) {
|
||||
stIface, _ := util.DeterministicGenesisState(t, 1)
|
||||
@@ -166,3 +179,132 @@ func TestBuilderPendingPayments_UnsupportedVersion(t *testing.T) {
|
||||
_, err = st.BuilderPendingPayments()
|
||||
require.ErrorContains(t, "BuilderPendingPayments", err)
|
||||
}
|
||||
|
||||
func TestWithdrawalsMatchPayloadExpected(t *testing.T) {
|
||||
t.Run("returns error before gloas", func(t *testing.T) {
|
||||
stIface, _ := util.DeterministicGenesisState(t, 1)
|
||||
native, ok := stIface.(*state_native.BeaconState)
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
_, err := native.WithdrawalsMatchPayloadExpected(nil)
|
||||
require.ErrorContains(t, "is not supported", err)
|
||||
})
|
||||
|
||||
t.Run("returns true when roots match", func(t *testing.T) {
|
||||
withdrawals := []*enginev1.Withdrawal{
|
||||
{Index: 0, ValidatorIndex: 1, Address: bytes.Repeat([]byte{0x01}, 20), Amount: 10},
|
||||
}
|
||||
st, err := state_native.InitializeFromProtoGloas(ðpb.BeaconStateGloas{
|
||||
PayloadExpectedWithdrawals: withdrawals,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
ok, err := st.WithdrawalsMatchPayloadExpected(withdrawals)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, ok)
|
||||
})
|
||||
|
||||
t.Run("returns false when roots do not match", func(t *testing.T) {
|
||||
expected := []*enginev1.Withdrawal{
|
||||
{Index: 0, ValidatorIndex: 1, Address: bytes.Repeat([]byte{0x01}, 20), Amount: 10},
|
||||
}
|
||||
actual := []*enginev1.Withdrawal{
|
||||
{Index: 0, ValidatorIndex: 1, Address: bytes.Repeat([]byte{0x01}, 20), Amount: 11},
|
||||
}
|
||||
|
||||
st, err := state_native.InitializeFromProtoGloas(ðpb.BeaconStateGloas{
|
||||
PayloadExpectedWithdrawals: expected,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
ok, err := st.WithdrawalsMatchPayloadExpected(actual)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, ok)
|
||||
})
|
||||
}
|
||||
|
||||
func TestBuilder(t *testing.T) {
|
||||
t.Run("nil builders returns nil", func(t *testing.T) {
|
||||
st, err := state_native.InitializeFromProtoGloas(ðpb.BeaconStateGloas{
|
||||
Builders: nil,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
got, err := st.Builder(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, (*ethpb.Builder)(nil), got)
|
||||
})
|
||||
|
||||
t.Run("out of bounds returns error", func(t *testing.T) {
|
||||
st, err := state_native.InitializeFromProtoGloas(ðpb.BeaconStateGloas{
|
||||
Builders: []*ethpb.Builder{{}},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = st.Builder(1)
|
||||
require.ErrorContains(t, "out of bounds", err)
|
||||
})
|
||||
|
||||
t.Run("returns copy", func(t *testing.T) {
|
||||
pubkey := bytes.Repeat([]byte{0xAA}, fieldparams.BLSPubkeyLength)
|
||||
st, err := state_native.InitializeFromProtoGloas(ðpb.BeaconStateGloas{
|
||||
Builders: []*ethpb.Builder{
|
||||
{
|
||||
Pubkey: pubkey,
|
||||
Balance: 42,
|
||||
DepositEpoch: 3,
|
||||
WithdrawableEpoch: 4,
|
||||
},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
got1, err := st.Builder(0)
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, (*ethpb.Builder)(nil), got1)
|
||||
require.Equal(t, primitives.Gwei(42), got1.Balance)
|
||||
require.DeepEqual(t, pubkey, got1.Pubkey)
|
||||
|
||||
// Mutate returned builder; state should be unchanged.
|
||||
got1.Pubkey[0] = 0xFF
|
||||
got2, err := st.Builder(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, byte(0xAA), got2.Pubkey[0])
|
||||
})
|
||||
}
|
||||
|
||||
func TestBuilderIndexByPubkey(t *testing.T) {
|
||||
t.Run("not found returns false", func(t *testing.T) {
|
||||
st, err := state_native.InitializeFromProtoGloas(ðpb.BeaconStateGloas{
|
||||
Builders: []*ethpb.Builder{
|
||||
{Pubkey: bytes.Repeat([]byte{0x11}, fieldparams.BLSPubkeyLength)},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
var pk [fieldparams.BLSPubkeyLength]byte
|
||||
copy(pk[:], bytes.Repeat([]byte{0x22}, fieldparams.BLSPubkeyLength))
|
||||
idx, ok := st.BuilderIndexByPubkey(pk)
|
||||
require.Equal(t, false, ok)
|
||||
require.Equal(t, primitives.BuilderIndex(0), idx)
|
||||
})
|
||||
|
||||
t.Run("skips nil entries and finds match", func(t *testing.T) {
|
||||
wantIdx := primitives.BuilderIndex(1)
|
||||
wantPkBytes := bytes.Repeat([]byte{0xAB}, fieldparams.BLSPubkeyLength)
|
||||
|
||||
st, err := state_native.InitializeFromProtoGloas(ðpb.BeaconStateGloas{
|
||||
Builders: []*ethpb.Builder{
|
||||
nil,
|
||||
{Pubkey: wantPkBytes},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
var pk [fieldparams.BLSPubkeyLength]byte
|
||||
copy(pk[:], wantPkBytes)
|
||||
idx, ok := st.BuilderIndexByPubkey(pk)
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, wantIdx, idx)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -5,11 +5,14 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native/types"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stateutil"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
)
|
||||
|
||||
// RotateBuilderPendingPayments rotates the queue by dropping slots per epoch payments from the
|
||||
@@ -121,6 +124,41 @@ func (b *BeaconState) ClearBuilderPendingPayment(index primitives.Slot) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// QueueBuilderPayment implements the builder payment queuing logic for Gloas.
|
||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
||||
// payment = state.builder_pending_payments[SLOTS_PER_EPOCH + state.slot % SLOTS_PER_EPOCH]
|
||||
// amount = payment.withdrawal.amount
|
||||
// if amount > 0:
|
||||
//
|
||||
// state.builder_pending_withdrawals.append(payment.withdrawal)
|
||||
//
|
||||
// state.builder_pending_payments[SLOTS_PER_EPOCH + state.slot % SLOTS_PER_EPOCH] = BuilderPendingPayment()
|
||||
func (b *BeaconState) QueueBuilderPayment() error {
|
||||
if b.version < version.Gloas {
|
||||
return errNotSupported("QueueBuilderPayment", b.version)
|
||||
}
|
||||
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
slot := b.slot
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
paymentIndex := slotsPerEpoch + (slot % slotsPerEpoch)
|
||||
if uint64(paymentIndex) >= uint64(len(b.builderPendingPayments)) {
|
||||
return fmt.Errorf("builder pending payments index %d out of range (len=%d)", paymentIndex, len(b.builderPendingPayments))
|
||||
}
|
||||
|
||||
payment := b.builderPendingPayments[paymentIndex]
|
||||
if payment != nil && payment.Withdrawal != nil && payment.Withdrawal.Amount > 0 {
|
||||
b.builderPendingWithdrawals = append(b.builderPendingWithdrawals, ethpb.CopyBuilderPendingWithdrawal(payment.Withdrawal))
|
||||
b.markFieldAsDirty(types.BuilderPendingWithdrawals)
|
||||
}
|
||||
|
||||
b.builderPendingPayments[paymentIndex] = emptyBuilderPendingPayment
|
||||
b.markFieldAsDirty(types.BuilderPendingPayments)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetBuilderPendingPayment sets a builder pending payment at the specified index.
|
||||
func (b *BeaconState) SetBuilderPendingPayment(index primitives.Slot, payment *ethpb.BuilderPendingPayment) error {
|
||||
if b.version < version.Gloas {
|
||||
@@ -161,3 +199,129 @@ func (b *BeaconState) UpdateExecutionPayloadAvailabilityAtIndex(idx uint64, val
|
||||
b.markFieldAsDirty(types.ExecutionPayloadAvailability)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetLatestBlockHash sets the latest execution block hash.
|
||||
func (b *BeaconState) SetLatestBlockHash(hash [32]byte) error {
|
||||
if b.version < version.Gloas {
|
||||
return errNotSupported("SetLatestBlockHash", b.version)
|
||||
}
|
||||
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
b.latestBlockHash = hash[:]
|
||||
b.markFieldAsDirty(types.LatestBlockHash)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetExecutionPayloadAvailability sets the execution payload availability bit for a specific slot.
|
||||
func (b *BeaconState) SetExecutionPayloadAvailability(index primitives.Slot, available bool) error {
|
||||
if b.version < version.Gloas {
|
||||
return errNotSupported("SetExecutionPayloadAvailability", b.version)
|
||||
}
|
||||
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
bitIndex := index % params.BeaconConfig().SlotsPerHistoricalRoot
|
||||
byteIndex := bitIndex / 8
|
||||
bitPosition := bitIndex % 8
|
||||
|
||||
if uint64(byteIndex) >= uint64(len(b.executionPayloadAvailability)) {
|
||||
return fmt.Errorf("bit index %d (byte index %d) out of range for execution payload availability length %d", bitIndex, byteIndex, len(b.executionPayloadAvailability))
|
||||
}
|
||||
|
||||
// Set or clear the bit
|
||||
if available {
|
||||
b.executionPayloadAvailability[byteIndex] |= 1 << bitPosition
|
||||
} else {
|
||||
b.executionPayloadAvailability[byteIndex] &^= 1 << bitPosition
|
||||
}
|
||||
|
||||
b.markFieldAsDirty(types.ExecutionPayloadAvailability)
|
||||
return nil
|
||||
}
|
||||
|
||||
// IncreaseBuilderBalance increases the balance of the builder at the given index.
|
||||
func (b *BeaconState) IncreaseBuilderBalance(index primitives.BuilderIndex, amount uint64) error {
|
||||
if b.version < version.Gloas {
|
||||
return errNotSupported("IncreaseBuilderBalance", b.version)
|
||||
}
|
||||
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
if b.builders == nil || uint64(index) >= uint64(len(b.builders)) {
|
||||
return fmt.Errorf("builder index %d out of bounds", index)
|
||||
}
|
||||
if b.builders[index] == nil {
|
||||
return fmt.Errorf("builder at index %d is nil", index)
|
||||
}
|
||||
|
||||
builders := b.builders
|
||||
if b.sharedFieldReferences[types.Builders].Refs() > 1 {
|
||||
builders = make([]*ethpb.Builder, len(b.builders))
|
||||
copy(builders, b.builders)
|
||||
b.sharedFieldReferences[types.Builders].MinusRef()
|
||||
b.sharedFieldReferences[types.Builders] = stateutil.NewRef(1)
|
||||
}
|
||||
|
||||
builder := ethpb.CopyBuilder(builders[index])
|
||||
builder.Balance += primitives.Gwei(amount)
|
||||
builders[index] = builder
|
||||
b.builders = builders
|
||||
|
||||
b.markFieldAsDirty(types.Builders)
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddBuilderFromDeposit creates or replaces a builder entry derived from a deposit.
|
||||
func (b *BeaconState) AddBuilderFromDeposit(pubkey [fieldparams.BLSPubkeyLength]byte, withdrawalCredentials [fieldparams.RootLength]byte, amount uint64) error {
|
||||
if b.version < version.Gloas {
|
||||
return errNotSupported("AddBuilderFromDeposit", b.version)
|
||||
}
|
||||
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
currentEpoch := slots.ToEpoch(b.slot)
|
||||
index := b.builderInsertionIndex(currentEpoch)
|
||||
|
||||
builder := ðpb.Builder{
|
||||
Pubkey: bytesutil.SafeCopyBytes(pubkey[:]),
|
||||
Version: []byte{withdrawalCredentials[0]},
|
||||
ExecutionAddress: bytesutil.SafeCopyBytes(withdrawalCredentials[12:]),
|
||||
Balance: primitives.Gwei(amount),
|
||||
DepositEpoch: currentEpoch,
|
||||
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
|
||||
builders := b.builders
|
||||
if b.sharedFieldReferences[types.Builders].Refs() > 1 {
|
||||
builders = make([]*ethpb.Builder, len(b.builders))
|
||||
copy(builders, b.builders)
|
||||
b.sharedFieldReferences[types.Builders].MinusRef()
|
||||
b.sharedFieldReferences[types.Builders] = stateutil.NewRef(1)
|
||||
}
|
||||
|
||||
if index < primitives.BuilderIndex(len(builders)) {
|
||||
builders[index] = builder
|
||||
} else {
|
||||
gap := index - primitives.BuilderIndex(len(builders)) + 1
|
||||
builders = append(builders, make([]*ethpb.Builder, gap)...)
|
||||
builders[index] = builder
|
||||
}
|
||||
b.builders = builders
|
||||
|
||||
b.markFieldAsDirty(types.Builders)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *BeaconState) builderInsertionIndex(currentEpoch primitives.Epoch) primitives.BuilderIndex {
|
||||
for i, builder := range b.builders {
|
||||
if builder.WithdrawableEpoch <= currentEpoch && builder.Balance == 0 {
|
||||
return primitives.BuilderIndex(i)
|
||||
}
|
||||
}
|
||||
return primitives.BuilderIndex(len(b.builders))
|
||||
}
|
||||
|
||||
@@ -166,7 +166,7 @@ func TestClearBuilderPendingPayment(t *testing.T) {
|
||||
}
|
||||
|
||||
require.NoError(t, st.ClearBuilderPendingPayment(1))
|
||||
require.Equal(t, emptyBuilderPendingPayment, st.builderPendingPayments[1])
|
||||
require.DeepEqual(t, emptyBuilderPendingPayment, st.builderPendingPayments[1])
|
||||
require.Equal(t, true, st.dirtyFields[types.BuilderPendingPayments])
|
||||
})
|
||||
|
||||
@@ -184,6 +184,80 @@ func TestClearBuilderPendingPayment(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestQueueBuilderPayment(t *testing.T) {
|
||||
t.Run("previous fork returns expected error", func(t *testing.T) {
|
||||
st := &BeaconState{version: version.Fulu}
|
||||
err := st.QueueBuilderPayment()
|
||||
require.ErrorContains(t, "is not supported", err)
|
||||
})
|
||||
|
||||
t.Run("appends withdrawal, clears payment, and marks dirty", func(t *testing.T) {
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
slot := primitives.Slot(3)
|
||||
paymentIndex := slotsPerEpoch + (slot % slotsPerEpoch)
|
||||
|
||||
st := &BeaconState{
|
||||
version: version.Gloas,
|
||||
slot: slot,
|
||||
dirtyFields: make(map[types.FieldIndex]bool),
|
||||
rebuildTrie: make(map[types.FieldIndex]bool),
|
||||
sharedFieldReferences: make(map[types.FieldIndex]*stateutil.Reference),
|
||||
builderPendingPayments: make([]*ethpb.BuilderPendingPayment, slotsPerEpoch*2),
|
||||
builderPendingWithdrawals: []*ethpb.BuilderPendingWithdrawal{},
|
||||
}
|
||||
st.builderPendingPayments[paymentIndex] = ðpb.BuilderPendingPayment{
|
||||
Weight: 1,
|
||||
Withdrawal: ðpb.BuilderPendingWithdrawal{
|
||||
FeeRecipient: bytes.Repeat([]byte{0xAB}, 20),
|
||||
Amount: 99,
|
||||
BuilderIndex: 1,
|
||||
},
|
||||
}
|
||||
|
||||
require.NoError(t, st.QueueBuilderPayment())
|
||||
require.DeepEqual(t, emptyBuilderPendingPayment, st.builderPendingPayments[paymentIndex])
|
||||
require.Equal(t, true, st.dirtyFields[types.BuilderPendingPayments])
|
||||
require.Equal(t, true, st.dirtyFields[types.BuilderPendingWithdrawals])
|
||||
require.Equal(t, 1, len(st.builderPendingWithdrawals))
|
||||
require.DeepEqual(t, bytes.Repeat([]byte{0xAB}, 20), st.builderPendingWithdrawals[0].FeeRecipient)
|
||||
require.Equal(t, primitives.Gwei(99), st.builderPendingWithdrawals[0].Amount)
|
||||
|
||||
// Ensure copied withdrawal is not aliased.
|
||||
st.builderPendingPayments[paymentIndex].Withdrawal.FeeRecipient[0] = 0x01
|
||||
require.Equal(t, byte(0xAB), st.builderPendingWithdrawals[0].FeeRecipient[0])
|
||||
})
|
||||
|
||||
t.Run("zero amount does not append withdrawal", func(t *testing.T) {
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
slot := primitives.Slot(3)
|
||||
paymentIndex := slotsPerEpoch + (slot % slotsPerEpoch)
|
||||
|
||||
st := &BeaconState{
|
||||
version: version.Gloas,
|
||||
slot: slot,
|
||||
dirtyFields: make(map[types.FieldIndex]bool),
|
||||
rebuildTrie: make(map[types.FieldIndex]bool),
|
||||
sharedFieldReferences: make(map[types.FieldIndex]*stateutil.Reference),
|
||||
builderPendingPayments: make([]*ethpb.BuilderPendingPayment, slotsPerEpoch*2),
|
||||
builderPendingWithdrawals: []*ethpb.BuilderPendingWithdrawal{},
|
||||
}
|
||||
st.builderPendingPayments[paymentIndex] = ðpb.BuilderPendingPayment{
|
||||
Weight: 1,
|
||||
Withdrawal: ðpb.BuilderPendingWithdrawal{
|
||||
FeeRecipient: bytes.Repeat([]byte{0xAB}, 20),
|
||||
Amount: 0,
|
||||
BuilderIndex: 1,
|
||||
},
|
||||
}
|
||||
|
||||
require.NoError(t, st.QueueBuilderPayment())
|
||||
require.DeepEqual(t, emptyBuilderPendingPayment, st.builderPendingPayments[paymentIndex])
|
||||
require.Equal(t, true, st.dirtyFields[types.BuilderPendingPayments])
|
||||
require.Equal(t, false, st.dirtyFields[types.BuilderPendingWithdrawals])
|
||||
require.Equal(t, 0, len(st.builderPendingWithdrawals))
|
||||
})
|
||||
}
|
||||
|
||||
func TestRotateBuilderPendingPayments(t *testing.T) {
|
||||
totalPayments := 2 * params.BeaconConfig().SlotsPerEpoch
|
||||
payments := make([]*ethpb.BuilderPendingPayment, totalPayments)
|
||||
@@ -331,3 +405,241 @@ func newGloasStateWithAvailability(t *testing.T, availability []byte) *BeaconSta
|
||||
|
||||
return st.(*BeaconState)
|
||||
}
|
||||
|
||||
func TestSetLatestBlockHash(t *testing.T) {
|
||||
t.Run("returns error before gloas", func(t *testing.T) {
|
||||
var hash [32]byte
|
||||
st := &BeaconState{version: version.Fulu}
|
||||
err := st.SetLatestBlockHash(hash)
|
||||
require.ErrorContains(t, "SetLatestBlockHash", err)
|
||||
})
|
||||
|
||||
var hash [32]byte
|
||||
copy(hash[:], []byte("latest-block-hash"))
|
||||
|
||||
state := &BeaconState{
|
||||
version: version.Gloas,
|
||||
dirtyFields: make(map[types.FieldIndex]bool),
|
||||
}
|
||||
|
||||
require.NoError(t, state.SetLatestBlockHash(hash))
|
||||
require.Equal(t, true, state.dirtyFields[types.LatestBlockHash])
|
||||
require.DeepEqual(t, hash[:], state.latestBlockHash)
|
||||
}
|
||||
|
||||
func TestSetExecutionPayloadAvailability(t *testing.T) {
|
||||
t.Run("returns error before gloas", func(t *testing.T) {
|
||||
st := &BeaconState{version: version.Fulu}
|
||||
err := st.SetExecutionPayloadAvailability(0, true)
|
||||
require.ErrorContains(t, "SetExecutionPayloadAvailability", err)
|
||||
})
|
||||
|
||||
state := &BeaconState{
|
||||
version: version.Gloas,
|
||||
executionPayloadAvailability: make([]byte, params.BeaconConfig().SlotsPerHistoricalRoot/8),
|
||||
dirtyFields: make(map[types.FieldIndex]bool),
|
||||
}
|
||||
|
||||
slot := primitives.Slot(10)
|
||||
bitIndex := slot % params.BeaconConfig().SlotsPerHistoricalRoot
|
||||
byteIndex := bitIndex / 8
|
||||
bitPosition := bitIndex % 8
|
||||
|
||||
require.NoError(t, state.SetExecutionPayloadAvailability(slot, true))
|
||||
require.Equal(t, true, state.dirtyFields[types.ExecutionPayloadAvailability])
|
||||
require.Equal(t, byte(1<<bitPosition), state.executionPayloadAvailability[byteIndex]&(1<<bitPosition))
|
||||
|
||||
require.NoError(t, state.SetExecutionPayloadAvailability(slot, false))
|
||||
require.Equal(t, byte(0), state.executionPayloadAvailability[byteIndex]&(1<<bitPosition))
|
||||
}
|
||||
|
||||
func TestSetExecutionPayloadAvailability_OutOfRange(t *testing.T) {
|
||||
state := &BeaconState{
|
||||
version: version.Gloas,
|
||||
executionPayloadAvailability: []byte{},
|
||||
dirtyFields: make(map[types.FieldIndex]bool),
|
||||
}
|
||||
|
||||
err := state.SetExecutionPayloadAvailability(0, true)
|
||||
require.ErrorContains(t, "out of range", err)
|
||||
require.Equal(t, false, state.dirtyFields[types.ExecutionPayloadAvailability])
|
||||
}
|
||||
|
||||
func TestIncreaseBuilderBalance(t *testing.T) {
|
||||
t.Run("returns error before gloas", func(t *testing.T) {
|
||||
st := &BeaconState{version: version.Fulu}
|
||||
err := st.IncreaseBuilderBalance(0, 1)
|
||||
require.ErrorContains(t, "IncreaseBuilderBalance", err)
|
||||
})
|
||||
|
||||
t.Run("out of bounds returns error", func(t *testing.T) {
|
||||
st := &BeaconState{
|
||||
version: version.Gloas,
|
||||
dirtyFields: make(map[types.FieldIndex]bool),
|
||||
sharedFieldReferences: map[types.FieldIndex]*stateutil.Reference{
|
||||
types.Builders: stateutil.NewRef(1),
|
||||
},
|
||||
builders: []*ethpb.Builder{},
|
||||
}
|
||||
|
||||
err := st.IncreaseBuilderBalance(0, 1)
|
||||
require.ErrorContains(t, "out of bounds", err)
|
||||
require.Equal(t, false, st.dirtyFields[types.Builders])
|
||||
})
|
||||
|
||||
t.Run("nil builder returns error", func(t *testing.T) {
|
||||
st := &BeaconState{
|
||||
version: version.Gloas,
|
||||
dirtyFields: make(map[types.FieldIndex]bool),
|
||||
sharedFieldReferences: map[types.FieldIndex]*stateutil.Reference{
|
||||
types.Builders: stateutil.NewRef(1),
|
||||
},
|
||||
builders: []*ethpb.Builder{nil},
|
||||
}
|
||||
|
||||
err := st.IncreaseBuilderBalance(0, 1)
|
||||
require.ErrorContains(t, "is nil", err)
|
||||
require.Equal(t, false, st.dirtyFields[types.Builders])
|
||||
})
|
||||
|
||||
t.Run("increments and marks dirty", func(t *testing.T) {
|
||||
orig := ðpb.Builder{Balance: 10}
|
||||
st := &BeaconState{
|
||||
version: version.Gloas,
|
||||
dirtyFields: make(map[types.FieldIndex]bool),
|
||||
sharedFieldReferences: map[types.FieldIndex]*stateutil.Reference{
|
||||
types.Builders: stateutil.NewRef(1),
|
||||
},
|
||||
builders: []*ethpb.Builder{orig},
|
||||
}
|
||||
|
||||
require.NoError(t, st.IncreaseBuilderBalance(0, 5))
|
||||
require.Equal(t, primitives.Gwei(15), st.builders[0].Balance)
|
||||
require.Equal(t, true, st.dirtyFields[types.Builders])
|
||||
// Copy-on-write semantics: builder pointer replaced.
|
||||
require.NotEqual(t, orig, st.builders[0])
|
||||
})
|
||||
}
|
||||
|
||||
func TestIncreaseBuilderBalance_CopyOnWrite(t *testing.T) {
|
||||
orig := ðpb.Builder{Balance: 10}
|
||||
statePb, err := InitializeFromProtoUnsafeGloas(ðpb.BeaconStateGloas{
|
||||
Builders: []*ethpb.Builder{orig},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
st, ok := statePb.(*BeaconState)
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
copied := st.Copy().(*BeaconState)
|
||||
require.Equal(t, uint(2), st.sharedFieldReferences[types.Builders].Refs())
|
||||
|
||||
require.NoError(t, copied.IncreaseBuilderBalance(0, 5))
|
||||
require.Equal(t, primitives.Gwei(10), st.builders[0].Balance)
|
||||
require.Equal(t, primitives.Gwei(15), copied.builders[0].Balance)
|
||||
require.Equal(t, uint(1), st.sharedFieldReferences[types.Builders].Refs())
|
||||
require.Equal(t, uint(1), copied.sharedFieldReferences[types.Builders].Refs())
|
||||
}
|
||||
|
||||
func TestAddBuilderFromDeposit(t *testing.T) {
|
||||
t.Run("returns error before gloas", func(t *testing.T) {
|
||||
var pubkey [48]byte
|
||||
var wc [32]byte
|
||||
st := &BeaconState{version: version.Fulu}
|
||||
err := st.AddBuilderFromDeposit(pubkey, wc, 1)
|
||||
require.ErrorContains(t, "AddBuilderFromDeposit", err)
|
||||
})
|
||||
|
||||
t.Run("reuses empty withdrawable slot", func(t *testing.T) {
|
||||
var pubkey [48]byte
|
||||
copy(pubkey[:], bytes.Repeat([]byte{0xAA}, 48))
|
||||
var wc [32]byte
|
||||
copy(wc[:], bytes.Repeat([]byte{0xBB}, 32))
|
||||
wc[0] = 0x42 // version byte
|
||||
|
||||
st := &BeaconState{
|
||||
version: version.Gloas,
|
||||
slot: 0, // epoch 0
|
||||
dirtyFields: make(map[types.FieldIndex]bool),
|
||||
sharedFieldReferences: map[types.FieldIndex]*stateutil.Reference{
|
||||
types.Builders: stateutil.NewRef(1),
|
||||
},
|
||||
builders: []*ethpb.Builder{
|
||||
{
|
||||
WithdrawableEpoch: 0,
|
||||
Balance: 0,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
require.NoError(t, st.AddBuilderFromDeposit(pubkey, wc, 123))
|
||||
require.Equal(t, 1, len(st.builders))
|
||||
got := st.builders[0]
|
||||
require.NotNil(t, got)
|
||||
require.DeepEqual(t, pubkey[:], got.Pubkey)
|
||||
require.DeepEqual(t, []byte{0x42}, got.Version)
|
||||
require.DeepEqual(t, wc[12:], got.ExecutionAddress)
|
||||
require.Equal(t, primitives.Gwei(123), got.Balance)
|
||||
require.Equal(t, primitives.Epoch(0), got.DepositEpoch)
|
||||
require.Equal(t, params.BeaconConfig().FarFutureEpoch, got.WithdrawableEpoch)
|
||||
require.Equal(t, true, st.dirtyFields[types.Builders])
|
||||
})
|
||||
|
||||
t.Run("appends new builder when no reusable slot", func(t *testing.T) {
|
||||
var pubkey [48]byte
|
||||
copy(pubkey[:], bytes.Repeat([]byte{0xAA}, 48))
|
||||
var wc [32]byte
|
||||
copy(wc[:], bytes.Repeat([]byte{0xBB}, 32))
|
||||
|
||||
st := &BeaconState{
|
||||
version: version.Gloas,
|
||||
slot: 0,
|
||||
dirtyFields: make(map[types.FieldIndex]bool),
|
||||
sharedFieldReferences: map[types.FieldIndex]*stateutil.Reference{
|
||||
types.Builders: stateutil.NewRef(1),
|
||||
},
|
||||
builders: []*ethpb.Builder{
|
||||
{
|
||||
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
Balance: 1,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
require.NoError(t, st.AddBuilderFromDeposit(pubkey, wc, 5))
|
||||
require.Equal(t, 2, len(st.builders))
|
||||
require.NotNil(t, st.builders[1])
|
||||
require.Equal(t, primitives.Gwei(5), st.builders[1].Balance)
|
||||
})
|
||||
}
|
||||
|
||||
func TestAddBuilderFromDeposit_CopyOnWrite(t *testing.T) {
|
||||
var pubkey [48]byte
|
||||
copy(pubkey[:], bytes.Repeat([]byte{0xAA}, 48))
|
||||
var wc [32]byte
|
||||
copy(wc[:], bytes.Repeat([]byte{0xBB}, 32))
|
||||
wc[0] = 0x42 // version byte
|
||||
|
||||
statePb, err := InitializeFromProtoUnsafeGloas(ðpb.BeaconStateGloas{
|
||||
Slot: 0,
|
||||
Builders: []*ethpb.Builder{
|
||||
{
|
||||
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
Balance: 1,
|
||||
},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
st, ok := statePb.(*BeaconState)
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
copied := st.Copy().(*BeaconState)
|
||||
require.Equal(t, uint(2), st.sharedFieldReferences[types.Builders].Refs())
|
||||
|
||||
require.NoError(t, copied.AddBuilderFromDeposit(pubkey, wc, 5))
|
||||
require.Equal(t, 1, len(st.builders))
|
||||
require.Equal(t, 2, len(copied.builders))
|
||||
require.Equal(t, uint(1), st.sharedFieldReferences[types.Builders].Refs())
|
||||
require.Equal(t, uint(1), copied.sharedFieldReferences[types.Builders].Refs())
|
||||
}
|
||||
|
||||
@@ -47,7 +47,6 @@ go_library(
|
||||
"subscriber_bls_to_execution_change.go",
|
||||
"subscriber_data_column_sidecar.go",
|
||||
"subscriber_handlers.go",
|
||||
"subscriber_payload_attestation.go",
|
||||
"subscriber_sync_committee_message.go",
|
||||
"subscriber_sync_contribution_proof.go",
|
||||
"subscription_topic_handler.go",
|
||||
@@ -59,7 +58,6 @@ go_library(
|
||||
"validate_bls_to_execution_change.go",
|
||||
"validate_data_column.go",
|
||||
"validate_light_client.go",
|
||||
"validate_payload_attestation.go",
|
||||
"validate_proposer_slashing.go",
|
||||
"validate_sync_committee_message.go",
|
||||
"validate_sync_contribution_proof.go",
|
||||
@@ -116,7 +114,6 @@ go_library(
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/payload-attestation:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
"//container/leaky-bucket:go_default_library",
|
||||
@@ -215,7 +212,6 @@ go_test(
|
||||
"validate_bls_to_execution_change_test.go",
|
||||
"validate_data_column_test.go",
|
||||
"validate_light_client_test.go",
|
||||
"validate_payload_attestation_test.go",
|
||||
"validate_proposer_slashing_test.go",
|
||||
"validate_sync_committee_message_test.go",
|
||||
"validate_sync_contribution_proof_test.go",
|
||||
@@ -268,7 +264,6 @@ go_test(
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/payload-attestation:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
"//container/leaky-bucket:go_default_library",
|
||||
|
||||
@@ -207,13 +207,6 @@ func WithTrackedValidatorsCache(c *cache.TrackedValidatorsCache) Option {
|
||||
}
|
||||
}
|
||||
|
||||
func WithPayloadAttestationCache(c *cache.PayloadAttestationCache) Option {
|
||||
return func(s *Service) error {
|
||||
s.payloadAttestationCache = c
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithSlasherEnabled configures the sync package to support slashing detection.
|
||||
func WithSlasherEnabled(enabled bool) Option {
|
||||
return func(s *Service) error {
|
||||
|
||||
@@ -38,7 +38,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
payloadattestation "github.com/OffchainLabs/prysm/v7/consensus-types/payload-attestation"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
leakybucket "github.com/OffchainLabs/prysm/v7/container/leaky-bucket"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/rand"
|
||||
@@ -122,7 +121,6 @@ type blockchainService interface {
|
||||
blockchain.FinalizationFetcher
|
||||
blockchain.ForkFetcher
|
||||
blockchain.AttestationReceiver
|
||||
blockchain.PayloadAttestationReceiver
|
||||
blockchain.TimeFetcher
|
||||
blockchain.GenesisFetcher
|
||||
blockchain.CanonicalFetcher
|
||||
@@ -175,7 +173,6 @@ type Service struct {
|
||||
verifierWaiter *verification.InitializerWaiter
|
||||
newBlobVerifier verification.NewBlobVerifier
|
||||
newColumnsVerifier verification.NewDataColumnsVerifier
|
||||
newPayloadAttestationVerifier verification.NewPayloadAttestationMsgVerifier
|
||||
columnSidecarsExecSingleFlight singleflight.Group
|
||||
reconstructionSingleFlight singleflight.Group
|
||||
availableBlocker coverage.AvailableBlocker
|
||||
@@ -185,7 +182,6 @@ type Service struct {
|
||||
slasherEnabled bool
|
||||
lcStore *lightClient.Store
|
||||
dataColumnLogCh chan dataColumnLogEntry
|
||||
payloadAttestationCache *cache.PayloadAttestationCache
|
||||
digestActions perDigestSet
|
||||
subscriptionSpawner func(func()) // see Service.spawn for details
|
||||
}
|
||||
@@ -194,16 +190,15 @@ type Service struct {
|
||||
func NewService(ctx context.Context, opts ...Option) *Service {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
r := &Service{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
chainStarted: abool.New(),
|
||||
cfg: &config{clock: startup.NewClock(time.Unix(0, 0), [32]byte{})},
|
||||
slotToPendingBlocks: gcache.New(pendingBlockExpTime /* exp time */, 0 /* disable janitor */),
|
||||
seenPendingBlocks: make(map[[32]byte]bool),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]any),
|
||||
dataColumnLogCh: make(chan dataColumnLogEntry, 1000),
|
||||
reconstructionRandGen: rand.NewGenerator(),
|
||||
payloadAttestationCache: &cache.PayloadAttestationCache{},
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
chainStarted: abool.New(),
|
||||
cfg: &config{clock: startup.NewClock(time.Unix(0, 0), [32]byte{})},
|
||||
slotToPendingBlocks: gcache.New(pendingBlockExpTime /* exp time */, 0 /* disable janitor */),
|
||||
seenPendingBlocks: make(map[[32]byte]bool),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]any),
|
||||
dataColumnLogCh: make(chan dataColumnLogEntry, 1000),
|
||||
reconstructionRandGen: rand.NewGenerator(),
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
@@ -255,12 +250,6 @@ func newDataColumnsVerifierFromInitializer(ini *verification.Initializer) verifi
|
||||
}
|
||||
}
|
||||
|
||||
func newPayloadAttestationMessageFromInitializer(ini *verification.Initializer) verification.NewPayloadAttestationMsgVerifier {
|
||||
return func(pa payloadattestation.ROMessage, reqs []verification.Requirement) verification.PayloadAttestationMsgVerifier {
|
||||
return ini.NewPayloadAttestationMsgVerifier(pa, reqs)
|
||||
}
|
||||
}
|
||||
|
||||
// Start the regular sync service.
|
||||
func (s *Service) Start() {
|
||||
v, err := s.verifierWaiter.WaitForInitializer(s.ctx)
|
||||
@@ -270,7 +259,6 @@ func (s *Service) Start() {
|
||||
}
|
||||
s.newBlobVerifier = newBlobVerifierFromInitializer(v)
|
||||
s.newColumnsVerifier = newDataColumnsVerifierFromInitializer(v)
|
||||
s.newPayloadAttestationVerifier = newPayloadAttestationMessageFromInitializer(v)
|
||||
|
||||
go s.verifierRoutine()
|
||||
go s.startDiscoveryAndSubscriptions()
|
||||
|
||||
@@ -330,18 +330,6 @@ func (s *Service) registerSubscribers(nse params.NetworkScheduleEntry) bool {
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// New gossip topic in Gloas.
|
||||
if params.BeaconConfig().GloasForkEpoch <= nse.Epoch {
|
||||
s.spawn(func() {
|
||||
s.subscribe(
|
||||
p2p.PayloadAttestationMessageTopicFormat,
|
||||
s.validatePayloadAttestation,
|
||||
s.payloadAttestationSubscriber,
|
||||
nse,
|
||||
)
|
||||
})
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func (s *Service) payloadAttestationSubscriber(ctx context.Context, msg proto.Message) error {
|
||||
a, ok := msg.(*eth.PayloadAttestationMessage)
|
||||
if !ok {
|
||||
return errWrongMessage
|
||||
}
|
||||
|
||||
if err := s.payloadAttestationCache.Add(a.Data.Slot, a.ValidatorIndex); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return s.cfg.chain.ReceivePayloadAttestationMessage(ctx, a)
|
||||
}
|
||||
@@ -1,131 +0,0 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/verification"
|
||||
payloadattestation "github.com/OffchainLabs/prysm/v7/consensus-types/payload-attestation"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
errAlreadySeenPayloadAttestation = errors.New("payload attestation already seen for validator index")
|
||||
)
|
||||
|
||||
func (s *Service) validatePayloadAttestation(ctx context.Context, pid peer.ID, msg *pubsub.Message) (pubsub.ValidationResult, error) {
|
||||
if pid == s.cfg.p2p.PeerID() {
|
||||
return pubsub.ValidationAccept, nil
|
||||
}
|
||||
if s.cfg.initialSync.Syncing() {
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
ctx, span := trace.StartSpan(ctx, "sync.validatePayloadAttestation")
|
||||
defer span.End()
|
||||
|
||||
if msg.Topic == nil {
|
||||
return pubsub.ValidationReject, p2p.ErrInvalidTopic
|
||||
}
|
||||
m, err := s.decodePubsubMessage(msg)
|
||||
if err != nil {
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
att, ok := m.(*eth.PayloadAttestationMessage)
|
||||
if !ok {
|
||||
return pubsub.ValidationReject, errWrongMessage
|
||||
}
|
||||
pa, err := payloadattestation.NewReadOnly(att)
|
||||
if err != nil {
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
v := s.newPayloadAttestationVerifier(pa, verification.GossipPayloadAttestationMessageRequirements)
|
||||
|
||||
// [IGNORE] The message's slot is for the current slot (with a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance),
|
||||
// i.e. data.slot == current_slot.
|
||||
if err := v.VerifyCurrentSlot(); err != nil {
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
|
||||
// [IGNORE] The payload_attestation_message is the first valid message received from the validator with
|
||||
// index payload_attestation_message.validator_index.
|
||||
if s.payloadAttestationCache.Seen(pa.Slot(), pa.ValidatorIndex()) {
|
||||
return pubsub.ValidationIgnore, errAlreadySeenPayloadAttestation
|
||||
}
|
||||
|
||||
// [IGNORE] The message's block data.beacon_block_root has been seen (via gossip or non-gossip sources)
|
||||
// (a client MAY queue attestation for processing once the block is retrieved. Note a client might want to request payload after).
|
||||
if err := v.VerifyBlockRootSeen(s.cfg.chain.InForkchoice); err != nil {
|
||||
// TODO: queue attestation
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
|
||||
// [REJECT] The message's block data.beacon_block_root passes validation.
|
||||
if err := v.VerifyBlockRootValid(s.hasBadBlock); err != nil {
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
st, err := s.getPtcState(ctx, pa)
|
||||
if err != nil {
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
|
||||
// [REJECT] The message's validator index is within the payload committee in get_ptc(state, data.slot).
|
||||
// The state is the head state corresponding to processing the block up to the current slot.
|
||||
if err := v.VerifyValidatorInPTC(ctx, st); err != nil {
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
// [REJECT] payload_attestation_message.signature is valid with respect to the validator's public key.
|
||||
if err := v.VerifySignature(st); err != nil {
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
msg.ValidatorData = att
|
||||
|
||||
return pubsub.ValidationAccept, nil
|
||||
}
|
||||
|
||||
func (s *Service) getPtcState(ctx context.Context, pa payloadattestation.ROMessage) (state.ReadOnlyBeaconState, error) {
|
||||
blockRoot := pa.BeaconBlockRoot()
|
||||
blockSlot := pa.Slot()
|
||||
blockEpoch := slots.ToEpoch(blockSlot)
|
||||
headSlot := s.cfg.chain.HeadSlot()
|
||||
headEpoch := slots.ToEpoch(headSlot)
|
||||
headRoot, err := s.cfg.chain.HeadRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if blockEpoch == headEpoch {
|
||||
if bytes.Equal(blockRoot[:], headRoot) {
|
||||
return s.cfg.chain.HeadStateReadOnly(ctx)
|
||||
}
|
||||
|
||||
headDependent, err := s.cfg.chain.DependentRootForEpoch(bytesutil.ToBytes32(headRoot), blockEpoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blockDependent, err := s.cfg.chain.DependentRootForEpoch(blockRoot, blockEpoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if bytes.Equal(headDependent[:], blockDependent[:]) {
|
||||
return s.cfg.chain.HeadStateReadOnly(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
headState, err := s.cfg.chain.HeadState(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return transition.ProcessSlotsUsingNextSlotCache(ctx, headState, headRoot, blockSlot)
|
||||
}
|
||||
@@ -1,165 +0,0 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
mock "github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
|
||||
p2ptest "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/startup"
|
||||
mockSync "github.com/OffchainLabs/prysm/v7/beacon-chain/sync/initial-sync/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/verification"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
payloadattestation "github.com/OffchainLabs/prysm/v7/consensus-types/payload-attestation"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
pb "github.com/libp2p/go-libp2p-pubsub/pb"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func TestValidatePayloadAttestationMessage_IncorrectTopic(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
chainService := &mock.ChainService{Genesis: time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot), 0)}
|
||||
s := &Service{
|
||||
payloadAttestationCache: &cache.PayloadAttestationCache{},
|
||||
cfg: &config{chain: chainService, p2p: p, initialSync: &mockSync.Sync{}, clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot)}}
|
||||
|
||||
msg := util.HydratePayloadAttestation(ðpb.PayloadAttestation{}) // Using payload attestation for message should fail.
|
||||
buf := new(bytes.Buffer)
|
||||
_, err := p.Encoding().EncodeGossip(buf, msg)
|
||||
require.NoError(t, err)
|
||||
|
||||
topic := p2p.GossipTypeMapping[reflect.TypeFor[*ethpb.PayloadAttestation]()]
|
||||
digest, err := s.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
topic = s.addDigestToTopic(topic, digest)
|
||||
|
||||
result, err := s.validatePayloadAttestation(ctx, "", &pubsub.Message{
|
||||
Message: &pb.Message{
|
||||
Data: buf.Bytes(),
|
||||
Topic: &topic,
|
||||
}})
|
||||
require.ErrorContains(t, "extraction failed for topic", err)
|
||||
require.Equal(t, result, pubsub.ValidationReject)
|
||||
}
|
||||
|
||||
func TestValidatePayloadAttestationMessage_ErrorPathsWithMock(t *testing.T) {
|
||||
tests := []struct {
|
||||
error error
|
||||
verifier verification.NewPayloadAttestationMsgVerifier
|
||||
result pubsub.ValidationResult
|
||||
}{
|
||||
{
|
||||
error: errors.New("incorrect slot"),
|
||||
verifier: func(pa payloadattestation.ROMessage, reqs []verification.Requirement) verification.PayloadAttestationMsgVerifier {
|
||||
return &verification.MockPayloadAttestation{ErrIncorrectPayloadAttSlot: errors.New("incorrect slot")}
|
||||
},
|
||||
result: pubsub.ValidationIgnore,
|
||||
},
|
||||
{
|
||||
error: errors.New("block root seen"),
|
||||
verifier: func(pa payloadattestation.ROMessage, reqs []verification.Requirement) verification.PayloadAttestationMsgVerifier {
|
||||
return &verification.MockPayloadAttestation{ErrPayloadAttBlockRootNotSeen: errors.New("block root seen")}
|
||||
},
|
||||
result: pubsub.ValidationIgnore,
|
||||
},
|
||||
{
|
||||
error: errors.New("block root invalid"),
|
||||
verifier: func(pa payloadattestation.ROMessage, reqs []verification.Requirement) verification.PayloadAttestationMsgVerifier {
|
||||
return &verification.MockPayloadAttestation{ErrPayloadAttBlockRootInvalid: errors.New("block root invalid")}
|
||||
},
|
||||
result: pubsub.ValidationReject,
|
||||
},
|
||||
{
|
||||
error: errors.New("validator not in PTC"),
|
||||
verifier: func(pa payloadattestation.ROMessage, reqs []verification.Requirement) verification.PayloadAttestationMsgVerifier {
|
||||
return &verification.MockPayloadAttestation{ErrIncorrectPayloadAttValidator: errors.New("validator not in PTC")}
|
||||
},
|
||||
result: pubsub.ValidationReject,
|
||||
},
|
||||
{
|
||||
error: errors.New("incorrect signature"),
|
||||
verifier: func(pa payloadattestation.ROMessage, reqs []verification.Requirement) verification.PayloadAttestationMsgVerifier {
|
||||
return &verification.MockPayloadAttestation{ErrInvalidMessageSignature: errors.New("incorrect signature")}
|
||||
},
|
||||
result: pubsub.ValidationReject,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.error.Error(), func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
chainService := &mock.ChainService{Genesis: time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot), 0)}
|
||||
s := &Service{
|
||||
payloadAttestationCache: &cache.PayloadAttestationCache{},
|
||||
cfg: &config{chain: chainService, p2p: p, initialSync: &mockSync.Sync{}, clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot)}}
|
||||
s.newPayloadAttestationVerifier = tt.verifier
|
||||
|
||||
msg := newPayloadAttestationMessage()
|
||||
buf := new(bytes.Buffer)
|
||||
_, err := p.Encoding().EncodeGossip(buf, msg)
|
||||
require.NoError(t, err)
|
||||
|
||||
topic := p2p.GossipTypeMapping[reflect.TypeFor[*ethpb.PayloadAttestationMessage]()]
|
||||
digest, err := s.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
topic = s.addDigestToTopic(topic, digest)
|
||||
|
||||
result, err := s.validatePayloadAttestation(ctx, "", &pubsub.Message{
|
||||
Message: &pb.Message{
|
||||
Data: buf.Bytes(),
|
||||
Topic: &topic,
|
||||
}})
|
||||
|
||||
require.ErrorContains(t, tt.error.Error(), err)
|
||||
require.Equal(t, result, tt.result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidatePayloadAttestationMessage_Accept(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
chainService := &mock.ChainService{Genesis: time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot), 0)}
|
||||
s := &Service{
|
||||
payloadAttestationCache: &cache.PayloadAttestationCache{},
|
||||
cfg: &config{chain: chainService, p2p: p, initialSync: &mockSync.Sync{}, clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot)}}
|
||||
s.newPayloadAttestationVerifier = func(pa payloadattestation.ROMessage, reqs []verification.Requirement) verification.PayloadAttestationMsgVerifier {
|
||||
return &verification.MockPayloadAttestation{}
|
||||
}
|
||||
|
||||
msg := newPayloadAttestationMessage()
|
||||
buf := new(bytes.Buffer)
|
||||
_, err := p.Encoding().EncodeGossip(buf, msg)
|
||||
require.NoError(t, err)
|
||||
|
||||
topic := p2p.GossipTypeMapping[reflect.TypeFor[*ethpb.PayloadAttestationMessage]()]
|
||||
digest, err := s.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
topic = s.addDigestToTopic(topic, digest)
|
||||
|
||||
result, err := s.validatePayloadAttestation(ctx, "", &pubsub.Message{
|
||||
Message: &pb.Message{
|
||||
Data: buf.Bytes(),
|
||||
Topic: &topic,
|
||||
}})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, result, pubsub.ValidationAccept)
|
||||
}
|
||||
|
||||
func newPayloadAttestationMessage() *ethpb.PayloadAttestationMessage {
|
||||
return ðpb.PayloadAttestationMessage{
|
||||
ValidatorIndex: 0,
|
||||
Data: util.HydratePayloadAttestationData(ðpb.PayloadAttestationData{Slot: 1}),
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
}
|
||||
}
|
||||
@@ -15,16 +15,12 @@ go_library(
|
||||
"log.go",
|
||||
"metrics.go",
|
||||
"mock.go",
|
||||
"payload_attestation.go",
|
||||
"payload_attestation_mock.go",
|
||||
"requirements.go",
|
||||
"result.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/verification",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/core/gloas:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
@@ -36,7 +32,6 @@ go_library(
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/payload-attestation:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
@@ -62,14 +57,12 @@ go_test(
|
||||
"data_column_test.go",
|
||||
"filesystem_test.go",
|
||||
"initializer_test.go",
|
||||
"payload_attestation_test.go",
|
||||
"result_test.go",
|
||||
"verification_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
@@ -80,10 +73,8 @@ go_test(
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/payload-attestation:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//crypto/bls/common:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/interop:go_default_library",
|
||||
|
||||
@@ -14,6 +14,24 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
RequireBlobIndexInBounds Requirement = iota
|
||||
RequireNotFromFutureSlot
|
||||
RequireSlotAboveFinalized
|
||||
RequireValidProposerSignature
|
||||
RequireSidecarParentSeen
|
||||
RequireSidecarParentValid
|
||||
RequireSidecarParentSlotLower
|
||||
RequireSidecarDescendsFromFinalized
|
||||
RequireSidecarInclusionProven
|
||||
RequireSidecarKzgProofVerified
|
||||
RequireSidecarProposerExpected
|
||||
|
||||
// Data columns specific.
|
||||
RequireValidFields
|
||||
RequireCorrectSubnet
|
||||
)
|
||||
|
||||
var allBlobSidecarRequirements = []Requirement{
|
||||
RequireBlobIndexInBounds,
|
||||
RequireNotFromFutureSlot,
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
payloadattestation "github.com/OffchainLabs/prysm/v7/consensus-types/payload-attestation"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"golang.org/x/sync/singleflight"
|
||||
@@ -87,16 +86,6 @@ func (ini *Initializer) NewDataColumnsVerifier(roDataColumns []blocks.RODataColu
|
||||
}
|
||||
}
|
||||
|
||||
// NewPayloadAttestationMsgVerifier creates a PayloadAttestationMsgVerifier for a single payload attestation message,
|
||||
// with the given set of requirements.
|
||||
func (ini *Initializer) NewPayloadAttestationMsgVerifier(pa payloadattestation.ROMessage, reqs []Requirement) *PayloadAttMsgVerifier {
|
||||
return &PayloadAttMsgVerifier{
|
||||
sharedResources: ini.shared,
|
||||
results: newResults(reqs...),
|
||||
pa: pa,
|
||||
}
|
||||
}
|
||||
|
||||
// InitializerWaiter provides an Initializer once all dependent resources are ready
|
||||
// via the WaitForInitializer method.
|
||||
type InitializerWaiter struct {
|
||||
|
||||
@@ -3,10 +3,8 @@ package verification
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
payloadattestation "github.com/OffchainLabs/prysm/v7/consensus-types/payload-attestation"
|
||||
)
|
||||
|
||||
// BlobVerifier defines the methods implemented by the ROBlobVerifier.
|
||||
@@ -56,18 +54,3 @@ type DataColumnsVerifier interface {
|
||||
// NewDataColumnsVerifier is a function signature that can be used to mock a setup where a
|
||||
// column verifier can be easily initialized.
|
||||
type NewDataColumnsVerifier func(dataColumns []blocks.RODataColumn, reqs []Requirement) DataColumnsVerifier
|
||||
|
||||
// PayloadAttestationMsgVerifier defines the methods implemented by the ROPayloadAttestation.
|
||||
type PayloadAttestationMsgVerifier interface {
|
||||
VerifyCurrentSlot() error
|
||||
VerifyBlockRootSeen(blockRootSeen func([32]byte) bool) error
|
||||
VerifyBlockRootValid(func([32]byte) bool) error
|
||||
VerifyValidatorInPTC(context.Context, state.ReadOnlyBeaconState) error
|
||||
VerifySignature(state.ReadOnlyBeaconState) error
|
||||
VerifiedPayloadAttestation() (payloadattestation.VerifiedROMessage, error)
|
||||
SatisfyRequirement(Requirement)
|
||||
}
|
||||
|
||||
// NewPayloadAttestationMsgVerifier is a function signature that can be used by code that needs to be
|
||||
// able to mock Initializer.NewPayloadAttestationMsgVerifier without complex setup.
|
||||
type NewPayloadAttestationMsgVerifier func(pa payloadattestation.ROMessage, reqs []Requirement) PayloadAttestationMsgVerifier
|
||||
|
||||
@@ -1,177 +0,0 @@
|
||||
package verification
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
payloadattestation "github.com/OffchainLabs/prysm/v7/consensus-types/payload-attestation"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// RequirementList defines a list of requirements.
|
||||
type RequirementList []Requirement
|
||||
|
||||
// PayloadAttGossipRequirements defines the list of requirements for gossip payload attestation messages.
|
||||
var PayloadAttGossipRequirements = []Requirement{
|
||||
RequireCurrentSlot,
|
||||
RequireMessageNotSeen,
|
||||
RequireValidatorInPTC,
|
||||
RequireBlockRootSeen,
|
||||
RequireBlockRootValid,
|
||||
RequireSignatureValid,
|
||||
}
|
||||
|
||||
// GossipPayloadAttestationMessageRequirements is a requirement list for gossip payload attestation messages.
|
||||
var GossipPayloadAttestationMessageRequirements = RequirementList(PayloadAttGossipRequirements)
|
||||
|
||||
var (
|
||||
ErrIncorrectPayloadAttSlot = errors.New("payload att slot does not match the current slot")
|
||||
ErrPayloadAttBlockRootNotSeen = errors.New("block root not seen")
|
||||
ErrPayloadAttBlockRootInvalid = errors.New("block root invalid")
|
||||
ErrIncorrectPayloadAttValidator = errors.New("validator not present in payload timeliness committee")
|
||||
ErrInvalidPayloadAttMessage = errors.New("invalid payload attestation message")
|
||||
)
|
||||
|
||||
var _ PayloadAttestationMsgVerifier = &PayloadAttMsgVerifier{}
|
||||
|
||||
// PayloadAttMsgVerifier is a read-only verifier for payload attestation messages.
|
||||
type PayloadAttMsgVerifier struct {
|
||||
*sharedResources
|
||||
results *results
|
||||
pa payloadattestation.ROMessage
|
||||
}
|
||||
|
||||
// VerifyCurrentSlot verifies if the current slot matches the expected slot.
|
||||
// Represents the following spec verification:
|
||||
// [IGNORE] data.slot is the current slot.
|
||||
func (v *PayloadAttMsgVerifier) VerifyCurrentSlot() (err error) {
|
||||
defer v.record(RequireCurrentSlot, &err)
|
||||
|
||||
currentSlot := v.clock.CurrentSlot()
|
||||
if v.pa.Slot() != currentSlot {
|
||||
return fmt.Errorf("%w: got %d want %d", ErrIncorrectPayloadAttSlot, v.pa.Slot(), currentSlot)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// VerifyBlockRootSeen verifies if the block root has been seen before.
|
||||
// Represents the following spec verification:
|
||||
// [IGNORE] The attestation's data.beacon_block_root has been seen (via both gossip and non-gossip sources).
|
||||
func (v *PayloadAttMsgVerifier) VerifyBlockRootSeen(blockRootSeen func([32]byte) bool) (err error) {
|
||||
defer v.record(RequireBlockRootSeen, &err)
|
||||
if blockRootSeen != nil && blockRootSeen(v.pa.BeaconBlockRoot()) {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("%w: root=%#x", ErrPayloadAttBlockRootNotSeen, v.pa.BeaconBlockRoot())
|
||||
}
|
||||
|
||||
// VerifyBlockRootValid verifies if the block root is valid.
|
||||
// Represents the following spec verification:
|
||||
// [REJECT] The beacon block with root data.beacon_block_root passes validation.
|
||||
func (v *PayloadAttMsgVerifier) VerifyBlockRootValid(badBlock func([32]byte) bool) (err error) {
|
||||
defer v.record(RequireBlockRootValid, &err)
|
||||
|
||||
if badBlock != nil && badBlock(v.pa.BeaconBlockRoot()) {
|
||||
return fmt.Errorf("%w: root=%#x", ErrPayloadAttBlockRootInvalid, v.pa.BeaconBlockRoot())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// VerifyValidatorInPTC verifies if the validator is present.
|
||||
// Represents the following spec verification:
|
||||
// [REJECT] The validator index is within the payload committee in get_ptc(state, data.slot). For the current's slot head state.
|
||||
func (v *PayloadAttMsgVerifier) VerifyValidatorInPTC(ctx context.Context, st state.ReadOnlyBeaconState) (err error) {
|
||||
defer v.record(RequireValidatorInPTC, &err)
|
||||
|
||||
ptc, err := gloas.PayloadCommittee(ctx, st, v.pa.Slot())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if slices.Index(ptc, v.pa.ValidatorIndex()) == -1 {
|
||||
return fmt.Errorf("%w: validatorIndex=%d", ErrIncorrectPayloadAttValidator, v.pa.ValidatorIndex())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// VerifySignature verifies the signature of the payload attestation message.
|
||||
// Represents the following spec verification:
|
||||
// [REJECT] The signature of payload_attestation_message.signature is valid with respect to the validator index.
|
||||
func (v *PayloadAttMsgVerifier) VerifySignature(st state.ReadOnlyBeaconState) (err error) {
|
||||
defer v.record(RequireSignatureValid, &err)
|
||||
|
||||
err = validatePayloadAttestationMessageSignature(st, v.pa)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// VerifiedPayloadAttestation returns a verified payload attestation message by checking all requirements.
|
||||
func (v *PayloadAttMsgVerifier) VerifiedPayloadAttestation() (payloadattestation.VerifiedROMessage, error) {
|
||||
if v.results.allSatisfied() {
|
||||
return payloadattestation.NewVerifiedROMessage(v.pa), nil
|
||||
}
|
||||
return payloadattestation.VerifiedROMessage{}, ErrInvalidPayloadAttMessage
|
||||
}
|
||||
|
||||
// SatisfyRequirement allows the caller to manually mark a requirement as satisfied.
|
||||
func (v *PayloadAttMsgVerifier) SatisfyRequirement(req Requirement) {
|
||||
v.record(req, nil)
|
||||
}
|
||||
|
||||
// ValidatePayloadAttestationMessageSignature verifies the signature of a payload attestation message.
|
||||
func validatePayloadAttestationMessageSignature(st state.ReadOnlyBeaconState, payloadAtt payloadattestation.ROMessage) error {
|
||||
val, err := st.ValidatorAtIndex(payloadAtt.ValidatorIndex())
|
||||
if err != nil {
|
||||
return fmt.Errorf("validator %d: %w", payloadAtt.ValidatorIndex(), err)
|
||||
}
|
||||
|
||||
pub, err := bls.PublicKeyFromBytes(val.PublicKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("public key: %w", err)
|
||||
}
|
||||
|
||||
s := payloadAtt.Signature()
|
||||
sig, err := bls.SignatureFromBytes(s[:])
|
||||
if err != nil {
|
||||
return fmt.Errorf("signature bytes: %w", err)
|
||||
}
|
||||
|
||||
currentEpoch := slots.ToEpoch(st.Slot())
|
||||
domain, err := signing.Domain(st.Fork(), currentEpoch, params.BeaconConfig().DomainPTCAttester, st.GenesisValidatorsRoot())
|
||||
if err != nil {
|
||||
return fmt.Errorf("domain: %w", err)
|
||||
}
|
||||
|
||||
root, err := payloadAtt.SigningRoot(domain)
|
||||
if err != nil {
|
||||
return fmt.Errorf("signing root: %w", err)
|
||||
}
|
||||
|
||||
if !sig.Verify(pub, root[:]) {
|
||||
return fmt.Errorf("verify signature: %w", signing.ErrSigFailedToVerify)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// record records the result of a requirement verification.
|
||||
func (v *PayloadAttMsgVerifier) record(req Requirement, err *error) {
|
||||
if err == nil || *err == nil {
|
||||
v.results.record(req, nil)
|
||||
return
|
||||
}
|
||||
|
||||
v.results.record(req, *err)
|
||||
}
|
||||
@@ -1,46 +0,0 @@
|
||||
package verification
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
payloadattestation "github.com/OffchainLabs/prysm/v7/consensus-types/payload-attestation"
|
||||
)
|
||||
|
||||
type MockPayloadAttestation struct {
|
||||
ErrIncorrectPayloadAttSlot error
|
||||
ErrIncorrectPayloadAttValidator error
|
||||
ErrPayloadAttBlockRootNotSeen error
|
||||
ErrPayloadAttBlockRootInvalid error
|
||||
ErrInvalidPayloadAttMessage error
|
||||
ErrInvalidMessageSignature error
|
||||
ErrUnsatisfiedRequirement error
|
||||
}
|
||||
|
||||
var _ PayloadAttestationMsgVerifier = &MockPayloadAttestation{}
|
||||
|
||||
func (m *MockPayloadAttestation) VerifyCurrentSlot() error {
|
||||
return m.ErrIncorrectPayloadAttSlot
|
||||
}
|
||||
|
||||
func (m *MockPayloadAttestation) VerifyValidatorInPTC(ctx context.Context, st state.ReadOnlyBeaconState) error {
|
||||
return m.ErrIncorrectPayloadAttValidator
|
||||
}
|
||||
|
||||
func (m *MockPayloadAttestation) VerifyBlockRootSeen(_ func([32]byte) bool) error {
|
||||
return m.ErrPayloadAttBlockRootNotSeen
|
||||
}
|
||||
|
||||
func (m *MockPayloadAttestation) VerifyBlockRootValid(func([32]byte) bool) error {
|
||||
return m.ErrPayloadAttBlockRootInvalid
|
||||
}
|
||||
|
||||
func (m *MockPayloadAttestation) VerifySignature(st state.ReadOnlyBeaconState) (err error) {
|
||||
return m.ErrInvalidMessageSignature
|
||||
}
|
||||
|
||||
func (m *MockPayloadAttestation) VerifiedPayloadAttestation() (payloadattestation.VerifiedROMessage, error) {
|
||||
return payloadattestation.VerifiedROMessage{}, nil
|
||||
}
|
||||
|
||||
func (m *MockPayloadAttestation) SatisfyRequirement(req Requirement) {}
|
||||
@@ -1,167 +0,0 @@
|
||||
package verification
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
payloadattestation "github.com/OffchainLabs/prysm/v7/consensus-types/payload-attestation"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/bls/common"
|
||||
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
testutil "github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
)
|
||||
|
||||
func TestPayloadAttestationVerifyCurrentSlot(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
now := time.Unix(1000, 0)
|
||||
genesis := now.Add(-time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)
|
||||
clock := startup.NewClock(genesis, [32]byte{}, startup.WithNower(func() time.Time { return now }))
|
||||
ini := &Initializer{shared: &sharedResources{clock: clock}}
|
||||
|
||||
msg := newPayloadAttestationMessage(primitives.Slot(1), 0, bytes.Repeat([]byte{0x11}, 32))
|
||||
pa, err := payloadattestation.NewReadOnly(msg)
|
||||
require.NoError(t, err)
|
||||
v := ini.NewPayloadAttestationMsgVerifier(pa, GossipPayloadAttestationMessageRequirements)
|
||||
require.NoError(t, v.VerifyCurrentSlot())
|
||||
|
||||
msg = newPayloadAttestationMessage(primitives.Slot(2), 0, bytes.Repeat([]byte{0x11}, 32))
|
||||
pa, err = payloadattestation.NewReadOnly(msg)
|
||||
require.NoError(t, err)
|
||||
v = ini.NewPayloadAttestationMsgVerifier(pa, GossipPayloadAttestationMessageRequirements)
|
||||
require.ErrorIs(t, v.VerifyCurrentSlot(), ErrIncorrectPayloadAttSlot)
|
||||
}
|
||||
|
||||
func TestPayloadAttestationVerifyBlockRootSeenAndValid(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
ini := &Initializer{shared: &sharedResources{}}
|
||||
root := bytes.Repeat([]byte{0x22}, 32)
|
||||
var root32 [32]byte
|
||||
copy(root32[:], root)
|
||||
|
||||
msg := newPayloadAttestationMessage(primitives.Slot(1), 0, root)
|
||||
pa, err := payloadattestation.NewReadOnly(msg)
|
||||
require.NoError(t, err)
|
||||
v := ini.NewPayloadAttestationMsgVerifier(pa, GossipPayloadAttestationMessageRequirements)
|
||||
|
||||
require.NoError(t, v.VerifyBlockRootSeen(func(r [32]byte) bool { return r == root32 }))
|
||||
require.ErrorIs(t, v.VerifyBlockRootSeen(func([32]byte) bool { return false }), ErrPayloadAttBlockRootNotSeen)
|
||||
|
||||
require.NoError(t, v.VerifyBlockRootValid(func([32]byte) bool { return false }))
|
||||
require.ErrorIs(t, v.VerifyBlockRootValid(func([32]byte) bool { return true }), ErrPayloadAttBlockRootInvalid)
|
||||
}
|
||||
|
||||
func TestPayloadAttestationVerifyValidatorInPTC(t *testing.T) {
|
||||
setupPayloadAttTestConfig(t)
|
||||
|
||||
_, pk := newKey(t)
|
||||
st := newTestState(t, []*eth.Validator{activeValidator(pk)}, 1)
|
||||
msg := newPayloadAttestationMessage(primitives.Slot(1), 0, bytes.Repeat([]byte{0x33}, 32))
|
||||
pa, err := payloadattestation.NewReadOnly(msg)
|
||||
require.NoError(t, err)
|
||||
v := (&Initializer{shared: &sharedResources{}}).NewPayloadAttestationMsgVerifier(pa, GossipPayloadAttestationMessageRequirements)
|
||||
require.NoError(t, v.VerifyValidatorInPTC(context.Background(), st))
|
||||
|
||||
msg = newPayloadAttestationMessage(primitives.Slot(1), 1, bytes.Repeat([]byte{0x33}, 32))
|
||||
pa, err = payloadattestation.NewReadOnly(msg)
|
||||
require.NoError(t, err)
|
||||
v = (&Initializer{shared: &sharedResources{}}).NewPayloadAttestationMsgVerifier(pa, GossipPayloadAttestationMessageRequirements)
|
||||
require.ErrorIs(t, v.VerifyValidatorInPTC(context.Background(), st), ErrIncorrectPayloadAttValidator)
|
||||
}
|
||||
|
||||
func TestPayloadAttestationVerifySignature(t *testing.T) {
|
||||
setupPayloadAttTestConfig(t)
|
||||
|
||||
sk, pk := newKey(t)
|
||||
st := newTestState(t, []*eth.Validator{activeValidator(pk)}, 1)
|
||||
root := bytes.Repeat([]byte{0x44}, 32)
|
||||
data := ð.PayloadAttestationData{
|
||||
BeaconBlockRoot: root,
|
||||
Slot: 1,
|
||||
PayloadPresent: true,
|
||||
BlobDataAvailable: true,
|
||||
}
|
||||
msg := ð.PayloadAttestationMessage{
|
||||
ValidatorIndex: 0,
|
||||
Data: data,
|
||||
Signature: signPayloadAttestationMessage(t, st, data, sk),
|
||||
}
|
||||
pa, err := payloadattestation.NewReadOnly(msg)
|
||||
require.NoError(t, err)
|
||||
v := (&Initializer{shared: &sharedResources{}}).NewPayloadAttestationMsgVerifier(pa, GossipPayloadAttestationMessageRequirements)
|
||||
require.NoError(t, v.VerifySignature(st))
|
||||
|
||||
sk2, _ := newKey(t)
|
||||
msg.Signature = signPayloadAttestationMessage(t, st, data, sk2)
|
||||
pa, err = payloadattestation.NewReadOnly(msg)
|
||||
require.NoError(t, err)
|
||||
v = (&Initializer{shared: &sharedResources{}}).NewPayloadAttestationMsgVerifier(pa, GossipPayloadAttestationMessageRequirements)
|
||||
require.ErrorIs(t, v.VerifySignature(st), signing.ErrSigFailedToVerify)
|
||||
}
|
||||
|
||||
func newPayloadAttestationMessage(slot primitives.Slot, idx primitives.ValidatorIndex, root []byte) *eth.PayloadAttestationMessage {
|
||||
return ð.PayloadAttestationMessage{
|
||||
ValidatorIndex: idx,
|
||||
Data: ð.PayloadAttestationData{
|
||||
BeaconBlockRoot: root,
|
||||
Slot: slot,
|
||||
PayloadPresent: true,
|
||||
BlobDataAvailable: true,
|
||||
},
|
||||
Signature: []byte{0x01},
|
||||
}
|
||||
}
|
||||
|
||||
func newTestState(t *testing.T, vals []*eth.Validator, slot primitives.Slot) state.BeaconState {
|
||||
st, err := testutil.NewBeaconStateGloas()
|
||||
require.NoError(t, err)
|
||||
for _, v := range vals {
|
||||
require.NoError(t, st.AppendValidator(v))
|
||||
require.NoError(t, st.AppendBalance(v.EffectiveBalance))
|
||||
}
|
||||
require.NoError(t, st.SetSlot(slot))
|
||||
require.NoError(t, helpers.UpdateCommitteeCache(t.Context(), st, slots.ToEpoch(slot)))
|
||||
return st
|
||||
}
|
||||
|
||||
func setupPayloadAttTestConfig(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.SlotsPerEpoch = 1
|
||||
cfg.MaxEffectiveBalanceElectra = cfg.MaxEffectiveBalance
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
}
|
||||
|
||||
func activeValidator(pub []byte) *eth.Validator {
|
||||
return ð.Validator{
|
||||
PublicKey: pub,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
}
|
||||
|
||||
func newKey(t *testing.T) (common.SecretKey, []byte) {
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
return sk, sk.PublicKey().Marshal()
|
||||
}
|
||||
|
||||
func signPayloadAttestationMessage(t *testing.T, st state.ReadOnlyBeaconState, data *eth.PayloadAttestationData, sk common.SecretKey) []byte {
|
||||
domain, err := signing.Domain(st.Fork(), slots.ToEpoch(st.Slot()), params.BeaconConfig().DomainPTCAttester, st.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
root, err := signing.ComputeSigningRoot(data, domain)
|
||||
require.NoError(t, err)
|
||||
sig := sk.Sign(root[:])
|
||||
return sig.Marshal()
|
||||
}
|
||||
@@ -1,27 +0,0 @@
|
||||
package verification
|
||||
|
||||
const (
|
||||
RequireBlobIndexInBounds Requirement = iota
|
||||
RequireNotFromFutureSlot
|
||||
RequireSlotAboveFinalized
|
||||
RequireValidProposerSignature
|
||||
RequireSidecarParentSeen
|
||||
RequireSidecarParentValid
|
||||
RequireSidecarParentSlotLower
|
||||
RequireSidecarDescendsFromFinalized
|
||||
RequireSidecarInclusionProven
|
||||
RequireSidecarKzgProofVerified
|
||||
RequireSidecarProposerExpected
|
||||
|
||||
// Data columns specific.
|
||||
RequireValidFields
|
||||
RequireCorrectSubnet
|
||||
|
||||
// Payload attestation specific.
|
||||
RequireCurrentSlot
|
||||
RequireMessageNotSeen
|
||||
RequireValidatorInPTC
|
||||
RequireBlockRootSeen
|
||||
RequireBlockRootValid
|
||||
RequireSignatureValid
|
||||
)
|
||||
@@ -29,22 +29,6 @@ func (r Requirement) String() string {
|
||||
return "RequireSidecarKzgProofVerified"
|
||||
case RequireSidecarProposerExpected:
|
||||
return "RequireSidecarProposerExpected"
|
||||
case RequireValidFields:
|
||||
return "RequireValidFields"
|
||||
case RequireCorrectSubnet:
|
||||
return "RequireCorrectSubnet"
|
||||
case RequireCurrentSlot:
|
||||
return "RequireCurrentSlot"
|
||||
case RequireMessageNotSeen:
|
||||
return "RequireMessageNotSeen"
|
||||
case RequireValidatorInPTC:
|
||||
return "RequireValidatorInPTC"
|
||||
case RequireBlockRootSeen:
|
||||
return "RequireBlockRootSeen"
|
||||
case RequireBlockRootValid:
|
||||
return "RequireBlockRootValid"
|
||||
case RequireSignatureValid:
|
||||
return "RequireSignatureValid"
|
||||
default:
|
||||
return unknownRequirementName
|
||||
}
|
||||
|
||||
@@ -61,16 +61,3 @@ func TestAllBlobRequirementsHaveStrings(t *testing.T) {
|
||||
require.NotEqual(t, unknownRequirementName, allBlobSidecarRequirements[i].String())
|
||||
}
|
||||
}
|
||||
|
||||
func TestPayloadAttestationRequirementsHaveStrings(t *testing.T) {
|
||||
blobReqs := make(map[Requirement]struct{}, len(allBlobSidecarRequirements))
|
||||
for i := range allBlobSidecarRequirements {
|
||||
blobReqs[allBlobSidecarRequirements[i]] = struct{}{}
|
||||
}
|
||||
for i := range PayloadAttGossipRequirements {
|
||||
req := PayloadAttGossipRequirements[i]
|
||||
require.NotEqual(t, unknownRequirementName, req.String())
|
||||
_, overlaps := blobReqs[req]
|
||||
require.Equal(t, false, overlaps)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
### Ignored
|
||||
|
||||
- improving maintainability and deduplication on get and post block parsing.
|
||||
2
changelog/potuz_forkchoice_unused_highestblockdelay.md
Normal file
2
changelog/potuz_forkchoice_unused_highestblockdelay.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Ignored
|
||||
- Remove unused `HighestBlockDelay` method in forkchoice.
|
||||
@@ -1,2 +0,0 @@
|
||||
### Added
|
||||
- Adapted forkchoice to future Gloas compatible type.
|
||||
@@ -1,2 +0,0 @@
|
||||
### Added
|
||||
- Added an InsertPayload method to allow full node insertion after gloas.
|
||||
@@ -0,0 +1,2 @@
|
||||
### Added
|
||||
- Add process execution payload for gloas
|
||||
@@ -1,3 +0,0 @@
|
||||
### Added
|
||||
|
||||
- Added support for Payload attestation gossip net in gloas
|
||||
@@ -98,6 +98,7 @@ func compareConfigs(t *testing.T, expected, actual *BeaconChainConfig) {
|
||||
require.DeepEqual(t, expected.EjectionBalance, actual.EjectionBalance)
|
||||
require.DeepEqual(t, expected.EffectiveBalanceIncrement, actual.EffectiveBalanceIncrement)
|
||||
require.DeepEqual(t, expected.BLSWithdrawalPrefixByte, actual.BLSWithdrawalPrefixByte)
|
||||
require.DeepEqual(t, expected.BuilderWithdrawalPrefixByte, actual.BuilderWithdrawalPrefixByte)
|
||||
require.DeepEqual(t, expected.ZeroHash, actual.ZeroHash)
|
||||
require.DeepEqual(t, expected.GenesisDelay, actual.GenesisDelay)
|
||||
require.DeepEqual(t, expected.MinAttestationInclusionDelay, actual.MinAttestationInclusionDelay)
|
||||
|
||||
@@ -119,6 +119,7 @@ func assertEqualConfigs(t *testing.T, name string, fields []string, expected, ac
|
||||
// Initial values.
|
||||
assert.DeepEqual(t, expected.GenesisForkVersion, actual.GenesisForkVersion, "%s: GenesisForkVersion", name)
|
||||
assert.DeepEqual(t, expected.BLSWithdrawalPrefixByte, actual.BLSWithdrawalPrefixByte, "%s: BLSWithdrawalPrefixByte", name)
|
||||
assert.DeepEqual(t, expected.BuilderWithdrawalPrefixByte, actual.BuilderWithdrawalPrefixByte, "%s: BuilderWithdrawalPrefixByte", name)
|
||||
assert.DeepEqual(t, expected.ETH1AddressWithdrawalPrefixByte, actual.ETH1AddressWithdrawalPrefixByte, "%s: ETH1AddressWithdrawalPrefixByte", name)
|
||||
|
||||
// Time parameters.
|
||||
|
||||
@@ -31,6 +31,7 @@ func MinimalSpecConfig() *BeaconChainConfig {
|
||||
// Initial values
|
||||
minimalConfig.BLSWithdrawalPrefixByte = byte(0)
|
||||
minimalConfig.ETH1AddressWithdrawalPrefixByte = byte(1)
|
||||
minimalConfig.BuilderWithdrawalPrefixByte = byte(3)
|
||||
|
||||
// Time parameters
|
||||
minimalConfig.SecondsPerSlot = 6
|
||||
|
||||
@@ -54,6 +54,7 @@ func compareConfigs(t *testing.T, expected, actual *params.BeaconChainConfig) {
|
||||
require.DeepEqual(t, expected.EjectionBalance, actual.EjectionBalance)
|
||||
require.DeepEqual(t, expected.EffectiveBalanceIncrement, actual.EffectiveBalanceIncrement)
|
||||
require.DeepEqual(t, expected.BLSWithdrawalPrefixByte, actual.BLSWithdrawalPrefixByte)
|
||||
require.DeepEqual(t, expected.BuilderWithdrawalPrefixByte, actual.BuilderWithdrawalPrefixByte)
|
||||
require.DeepEqual(t, expected.ZeroHash, actual.ZeroHash)
|
||||
require.DeepEqual(t, expected.GenesisDelay, actual.GenesisDelay)
|
||||
require.DeepEqual(t, expected.MinAttestationInclusionDelay, actual.MinAttestationInclusionDelay)
|
||||
|
||||
@@ -117,7 +117,7 @@ func (h executionPayloadBidGloas) GasLimit() uint64 {
|
||||
return h.payload.GasLimit
|
||||
}
|
||||
|
||||
// BuilderIndex returns the validator index of the builder who created this bid.
|
||||
// BuilderIndex returns the builder index of the builder who created this bid.
|
||||
func (h executionPayloadBidGloas) BuilderIndex() primitives.BuilderIndex {
|
||||
return h.payload.BuilderIndex
|
||||
}
|
||||
|
||||
@@ -1,15 +0,0 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["readonly_message.go"],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/consensus-types/payload-attestation",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -1,87 +0,0 @@
|
||||
package payloadattestation
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
errNilPayloadAttMessage = errors.New("received nil payload attestation message")
|
||||
errNilPayloadAttData = errors.New("received nil payload attestation data")
|
||||
errNilPayloadAttSignature = errors.New("received nil payload attestation signature")
|
||||
)
|
||||
|
||||
// ROMessage represents a read-only payload attestation message.
|
||||
type ROMessage struct {
|
||||
m *ethpb.PayloadAttestationMessage
|
||||
}
|
||||
|
||||
// validatePayloadAtt checks if the given payload attestation message is valid.
|
||||
func validatePayloadAtt(m *ethpb.PayloadAttestationMessage) error {
|
||||
if m == nil {
|
||||
return errNilPayloadAttMessage
|
||||
}
|
||||
if m.Data == nil {
|
||||
return errNilPayloadAttData
|
||||
}
|
||||
if len(m.Signature) == 0 {
|
||||
return errNilPayloadAttSignature
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewReadOnly creates a new ReadOnly instance after validating the message.
|
||||
func NewReadOnly(m *ethpb.PayloadAttestationMessage) (ROMessage, error) {
|
||||
if err := validatePayloadAtt(m); err != nil {
|
||||
return ROMessage{}, err
|
||||
}
|
||||
return ROMessage{m}, nil
|
||||
}
|
||||
|
||||
// ValidatorIndex returns the validator index from the payload attestation message.
|
||||
func (r *ROMessage) ValidatorIndex() primitives.ValidatorIndex {
|
||||
return r.m.ValidatorIndex
|
||||
}
|
||||
|
||||
// Signature returns the signature from the payload attestation message.
|
||||
func (r *ROMessage) Signature() [96]byte {
|
||||
return bytesutil.ToBytes96(r.m.Signature)
|
||||
}
|
||||
|
||||
// BeaconBlockRoot returns the beacon block root from the payload attestation message.
|
||||
func (r *ROMessage) BeaconBlockRoot() [32]byte {
|
||||
return bytesutil.ToBytes32(r.m.Data.BeaconBlockRoot)
|
||||
}
|
||||
|
||||
// Slot returns the slot from the payload attestation message.
|
||||
func (r *ROMessage) Slot() primitives.Slot {
|
||||
return r.m.Data.Slot
|
||||
}
|
||||
|
||||
// PayloadPresent returns whether the payload was present.
|
||||
func (r *ROMessage) PayloadPresent() bool {
|
||||
return r.m.Data.PayloadPresent
|
||||
}
|
||||
|
||||
// BlobDataAvailable returns whether blob data was available.
|
||||
func (r *ROMessage) BlobDataAvailable() bool {
|
||||
return r.m.Data.BlobDataAvailable
|
||||
}
|
||||
|
||||
// SigningRoot returns the signing root from the payload attestation message.
|
||||
func (r *ROMessage) SigningRoot(domain []byte) ([32]byte, error) {
|
||||
return signing.ComputeSigningRoot(r.m.Data, domain)
|
||||
}
|
||||
|
||||
// VerifiedROMessage represents a verified read-only payload attestation message.
|
||||
type VerifiedROMessage struct {
|
||||
ROMessage
|
||||
}
|
||||
|
||||
// NewVerifiedROMessage creates a new VerifiedROMessage instance after validating the message.
|
||||
func NewVerifiedROMessage(r ROMessage) VerifiedROMessage {
|
||||
return VerifiedROMessage{r}
|
||||
}
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"encoding/binary"
|
||||
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/hash/htr"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
@@ -141,3 +142,24 @@ func withdrawalRoot(w *enginev1.Withdrawal) ([32]byte, error) {
|
||||
}
|
||||
return w.HashTreeRoot()
|
||||
}
|
||||
|
||||
// KzgCommitmentsRoot computes the HTR for a list of KZG commitments
|
||||
func KzgCommitmentsRoot(commitments [][]byte) ([32]byte, error) {
|
||||
roots := make([][32]byte, len(commitments))
|
||||
for i, commitment := range commitments {
|
||||
chunks, err := PackByChunk([][]byte{commitment})
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
roots[i] = htr.VectorizedSha256(chunks)[0]
|
||||
}
|
||||
|
||||
commitmentsRoot, err := BitwiseMerkleize(roots, uint64(len(roots)), fieldparams.MaxBlobCommitmentsPerBlock)
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not compute merkleization")
|
||||
}
|
||||
|
||||
length := make([]byte, 32)
|
||||
binary.LittleEndian.PutUint64(length[:8], uint64(len(roots)))
|
||||
return MixInLength(commitmentsRoot, length), nil
|
||||
}
|
||||
|
||||
@@ -202,6 +202,7 @@ go_test(
|
||||
"fulu__ssz_static__ssz_static_test.go",
|
||||
"gloas__epoch_processing__process_builder_pending_payments_test.go",
|
||||
"gloas__operations__execution_payload_header_test.go",
|
||||
"gloas__operations__execution_payload_test.go",
|
||||
"gloas__operations__payload_attestation_test.go",
|
||||
"gloas__operations__proposer_slashing_test.go",
|
||||
"gloas__sanity__slots_test.go",
|
||||
|
||||
@@ -0,0 +1,11 @@
|
||||
package mainnet
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/testing/spectest/shared/gloas/operations"
|
||||
)
|
||||
|
||||
func TestMainnet_Gloas_Operations_ExecutionPayloadEnvelope(t *testing.T) {
|
||||
operations.RunExecutionPayloadTest(t, "mainnet")
|
||||
}
|
||||
@@ -208,6 +208,7 @@ go_test(
|
||||
"fulu__ssz_static__ssz_static_test.go",
|
||||
"gloas__epoch_processing__process_builder_pending_payments_test.go",
|
||||
"gloas__operations__execution_payload_bid_test.go",
|
||||
"gloas__operations__execution_payload_test.go",
|
||||
"gloas__operations__payload_attestation_test.go",
|
||||
"gloas__operations__proposer_slashing_test.go",
|
||||
"gloas__sanity__slots_test.go",
|
||||
|
||||
@@ -0,0 +1,11 @@
|
||||
package minimal
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/testing/spectest/shared/gloas/operations"
|
||||
)
|
||||
|
||||
func TestMinimal_Gloas_Operations_ExecutionPayloadEnvelope(t *testing.T) {
|
||||
operations.RunExecutionPayloadTest(t, "minimal")
|
||||
}
|
||||
@@ -4,6 +4,7 @@ go_library(
|
||||
name = "go_default_library",
|
||||
testonly = True,
|
||||
srcs = [
|
||||
"execution_payload.go",
|
||||
"execution_payload_bid.go",
|
||||
"helpers.go",
|
||||
"payload_attestation.go",
|
||||
@@ -12,12 +13,23 @@ go_library(
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/testing/spectest/shared/gloas/operations",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/gloas:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/spectest/shared/common/operations:go_default_library",
|
||||
"//testing/spectest/utils:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_golang_snappy//:go_default_library",
|
||||
"@com_github_google_go_cmp//cmp:go_default_library",
|
||||
"@io_bazel_rules_go//go/tools/bazel:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
"@org_golang_google_protobuf//testing/protocmp:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
123
testing/spectest/shared/gloas/operations/execution_payload.go
Normal file
123
testing/spectest/shared/gloas/operations/execution_payload.go
Normal file
@@ -0,0 +1,123 @@
|
||||
package operations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/spectest/utils"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
"github.com/bazelbuild/rules_go/go/tools/bazel"
|
||||
"github.com/golang/snappy"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/testing/protocmp"
|
||||
)
|
||||
|
||||
type ExecutionConfig struct {
|
||||
Valid bool `json:"execution_valid"`
|
||||
}
|
||||
|
||||
func sszToSignedExecutionPayloadEnvelope(b []byte) (interfaces.ROSignedExecutionPayloadEnvelope, error) {
|
||||
envelope := ðpb.SignedExecutionPayloadEnvelope{}
|
||||
if err := envelope.UnmarshalSSZ(b); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return blocks.WrappedROSignedExecutionPayloadEnvelope(envelope)
|
||||
}
|
||||
|
||||
func RunExecutionPayloadTest(t *testing.T, config string) {
|
||||
require.NoError(t, utils.SetConfig(t, config))
|
||||
cfg := params.BeaconConfig()
|
||||
params.SetGenesisFork(t, cfg, version.Fulu)
|
||||
testFolders, testsFolderPath := utils.TestFolders(t, config, "gloas", "operations/execution_payload/pyspec_tests")
|
||||
if len(testFolders) == 0 {
|
||||
t.Fatalf("No test folders found for %s/%s/%s", config, "gloas", "operations/execution_payload/pyspec_tests")
|
||||
}
|
||||
for _, folder := range testFolders {
|
||||
t.Run(folder.Name(), func(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
// Check if signed_envelope.ssz_snappy exists, skip if not
|
||||
_, err := bazel.Runfile(path.Join(testsFolderPath, folder.Name(), "signed_envelope.ssz_snappy"))
|
||||
if err != nil && strings.Contains(err.Error(), "could not locate file") {
|
||||
t.Skipf("Skipping test %s: signed_envelope.ssz_snappy not found", folder.Name())
|
||||
return
|
||||
}
|
||||
|
||||
// Read the signed execution payload envelope
|
||||
envelopeFile, err := util.BazelFileBytes(testsFolderPath, folder.Name(), "signed_envelope.ssz_snappy")
|
||||
require.NoError(t, err)
|
||||
envelopeSSZ, err := snappy.Decode(nil /* dst */, envelopeFile)
|
||||
require.NoError(t, err, "Failed to decompress envelope")
|
||||
signedEnvelope, err := sszToSignedExecutionPayloadEnvelope(envelopeSSZ)
|
||||
require.NoError(t, err, "Failed to unmarshal signed envelope")
|
||||
|
||||
preBeaconStateFile, err := util.BazelFileBytes(testsFolderPath, folder.Name(), "pre.ssz_snappy")
|
||||
require.NoError(t, err)
|
||||
preBeaconStateSSZ, err := snappy.Decode(nil /* dst */, preBeaconStateFile)
|
||||
require.NoError(t, err, "Failed to decompress")
|
||||
preBeaconState, err := sszToState(preBeaconStateSSZ)
|
||||
require.NoError(t, err)
|
||||
|
||||
postSSZFilepath, err := bazel.Runfile(path.Join(testsFolderPath, folder.Name(), "post.ssz_snappy"))
|
||||
postSSZExists := true
|
||||
if err != nil && strings.Contains(err.Error(), "could not locate file") {
|
||||
postSSZExists = false
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
file, err := util.BazelFileBytes(testsFolderPath, folder.Name(), "execution.yaml")
|
||||
require.NoError(t, err)
|
||||
config := &ExecutionConfig{}
|
||||
require.NoError(t, utils.UnmarshalYaml(file, config), "Failed to Unmarshal")
|
||||
if !config.Valid {
|
||||
t.Skip("Skipping invalid execution engine test as it's never supported")
|
||||
}
|
||||
|
||||
err = gloas.ProcessExecutionPayload(context.Background(), preBeaconState, signedEnvelope)
|
||||
if postSSZExists {
|
||||
require.NoError(t, err)
|
||||
comparePostState(t, postSSZFilepath, preBeaconState)
|
||||
} else if config.Valid {
|
||||
// Note: This doesn't test anything worthwhile. It essentially tests
|
||||
// that *any* error has occurred, not any specific error.
|
||||
if err == nil {
|
||||
t.Fatal("Did not fail when expected")
|
||||
}
|
||||
t.Logf("Expected failure; failure reason = %v", err)
|
||||
return
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func comparePostState(t *testing.T, postSSZFilepath string, want state.BeaconState) {
|
||||
postBeaconStateFile, err := os.ReadFile(postSSZFilepath) // #nosec G304
|
||||
require.NoError(t, err)
|
||||
postBeaconStateSSZ, err := snappy.Decode(nil /* dst */, postBeaconStateFile)
|
||||
require.NoError(t, err, "Failed to decompress")
|
||||
postBeaconState, err := sszToState(postBeaconStateSSZ)
|
||||
require.NoError(t, err)
|
||||
postBeaconStatePb, ok := postBeaconState.ToProtoUnsafe().(proto.Message)
|
||||
require.Equal(t, true, ok, "post beacon state did not return a proto.Message")
|
||||
pbState, ok := want.ToProtoUnsafe().(proto.Message)
|
||||
require.Equal(t, true, ok, "beacon state did not return a proto.Message")
|
||||
|
||||
if !proto.Equal(postBeaconStatePb, pbState) {
|
||||
diff := cmp.Diff(pbState, postBeaconStatePb, protocmp.Transform())
|
||||
t.Fatalf("Post state does not match expected state, diff: %s", diff)
|
||||
}
|
||||
}
|
||||
@@ -52,7 +52,6 @@ go_library(
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/validator:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//network/httputil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
|
||||
@@ -66,13 +66,12 @@ func (c *beaconApiValidatorClient) duties(ctx context.Context, in *ethpb.DutiesR
|
||||
}()
|
||||
|
||||
nextEpochDuties := ðpb.ValidatorDutiesContainer{}
|
||||
nextEpochErr := c.dutiesForEpoch(ctx, nextEpochDuties, in.Epoch+1, vals, fetchSyncDuties)
|
||||
|
||||
if currEpochErr := <-errCh; currEpochErr != nil {
|
||||
return nil, currEpochErr
|
||||
if err := c.dutiesForEpoch(ctx, nextEpochDuties, in.Epoch+1, vals, fetchSyncDuties); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get duties for next epoch `%d`", in.Epoch+1)
|
||||
}
|
||||
if nextEpochErr != nil {
|
||||
return nil, errors.Wrapf(nextEpochErr, "failed to get duties for next epoch `%d`", in.Epoch+1)
|
||||
|
||||
if err = <-errCh; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ðpb.ValidatorDutiesContainer{
|
||||
|
||||
@@ -55,153 +55,114 @@ func (c *beaconApiValidatorClient) beaconBlock(ctx context.Context, slot primiti
|
||||
}
|
||||
}
|
||||
|
||||
// sszBlockCodec defines SSZ unmarshalers for a fork's block and blinded block types.
|
||||
type sszBlockCodec struct {
|
||||
unmarshalBlock func([]byte) (*ethpb.GenericBeaconBlock, error)
|
||||
unmarshalBlinded func([]byte) (*ethpb.GenericBeaconBlock, error) // nil for Phase0/Altair
|
||||
}
|
||||
|
||||
type sszCodecEntry struct {
|
||||
minVersion int
|
||||
codec sszBlockCodec
|
||||
}
|
||||
|
||||
// sszCodecs is ordered descending by version so that unknown future versions
|
||||
// fall through to the latest known fork (matching the original if-cascade).
|
||||
var sszCodecs = []sszCodecEntry{
|
||||
{
|
||||
minVersion: version.Fulu,
|
||||
codec: sszBlockCodec{
|
||||
unmarshalBlock: func(data []byte) (*ethpb.GenericBeaconBlock, error) {
|
||||
block := ðpb.BeaconBlockContentsFulu{}
|
||||
if err := block.UnmarshalSSZ(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_Fulu{Fulu: block}}, nil
|
||||
},
|
||||
unmarshalBlinded: func(data []byte) (*ethpb.GenericBeaconBlock, error) {
|
||||
blindedBlock := ðpb.BlindedBeaconBlockFulu{}
|
||||
if err := blindedBlock.UnmarshalSSZ(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_BlindedFulu{BlindedFulu: blindedBlock}, IsBlinded: true}, nil
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
minVersion: version.Electra,
|
||||
codec: sszBlockCodec{
|
||||
unmarshalBlock: func(data []byte) (*ethpb.GenericBeaconBlock, error) {
|
||||
block := ðpb.BeaconBlockContentsElectra{}
|
||||
if err := block.UnmarshalSSZ(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_Electra{Electra: block}}, nil
|
||||
},
|
||||
unmarshalBlinded: func(data []byte) (*ethpb.GenericBeaconBlock, error) {
|
||||
blindedBlock := ðpb.BlindedBeaconBlockElectra{}
|
||||
if err := blindedBlock.UnmarshalSSZ(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_BlindedElectra{BlindedElectra: blindedBlock}, IsBlinded: true}, nil
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
minVersion: version.Deneb,
|
||||
codec: sszBlockCodec{
|
||||
unmarshalBlock: func(data []byte) (*ethpb.GenericBeaconBlock, error) {
|
||||
block := ðpb.BeaconBlockContentsDeneb{}
|
||||
if err := block.UnmarshalSSZ(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_Deneb{Deneb: block}}, nil
|
||||
},
|
||||
unmarshalBlinded: func(data []byte) (*ethpb.GenericBeaconBlock, error) {
|
||||
blindedBlock := ðpb.BlindedBeaconBlockDeneb{}
|
||||
if err := blindedBlock.UnmarshalSSZ(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_BlindedDeneb{BlindedDeneb: blindedBlock}, IsBlinded: true}, nil
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
minVersion: version.Capella,
|
||||
codec: sszBlockCodec{
|
||||
unmarshalBlock: func(data []byte) (*ethpb.GenericBeaconBlock, error) {
|
||||
block := ðpb.BeaconBlockCapella{}
|
||||
if err := block.UnmarshalSSZ(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_Capella{Capella: block}}, nil
|
||||
},
|
||||
unmarshalBlinded: func(data []byte) (*ethpb.GenericBeaconBlock, error) {
|
||||
blindedBlock := ðpb.BlindedBeaconBlockCapella{}
|
||||
if err := blindedBlock.UnmarshalSSZ(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_BlindedCapella{BlindedCapella: blindedBlock}, IsBlinded: true}, nil
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
minVersion: version.Bellatrix,
|
||||
codec: sszBlockCodec{
|
||||
unmarshalBlock: func(data []byte) (*ethpb.GenericBeaconBlock, error) {
|
||||
block := ðpb.BeaconBlockBellatrix{}
|
||||
if err := block.UnmarshalSSZ(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_Bellatrix{Bellatrix: block}}, nil
|
||||
},
|
||||
unmarshalBlinded: func(data []byte) (*ethpb.GenericBeaconBlock, error) {
|
||||
blindedBlock := ðpb.BlindedBeaconBlockBellatrix{}
|
||||
if err := blindedBlock.UnmarshalSSZ(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_BlindedBellatrix{BlindedBellatrix: blindedBlock}, IsBlinded: true}, nil
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
minVersion: version.Altair,
|
||||
codec: sszBlockCodec{
|
||||
unmarshalBlock: func(data []byte) (*ethpb.GenericBeaconBlock, error) {
|
||||
block := ðpb.BeaconBlockAltair{}
|
||||
if err := block.UnmarshalSSZ(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_Altair{Altair: block}}, nil
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
minVersion: version.Phase0,
|
||||
codec: sszBlockCodec{
|
||||
unmarshalBlock: func(data []byte) (*ethpb.GenericBeaconBlock, error) {
|
||||
block := ðpb.BeaconBlock{}
|
||||
if err := block.UnmarshalSSZ(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_Phase0{Phase0: block}}, nil
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func processBlockSSZResponse(ver int, data []byte, isBlinded bool) (*ethpb.GenericBeaconBlock, error) {
|
||||
for _, entry := range sszCodecs {
|
||||
if ver >= entry.minVersion {
|
||||
if isBlinded && entry.codec.unmarshalBlinded != nil {
|
||||
return entry.codec.unmarshalBlinded(data)
|
||||
}
|
||||
return entry.codec.unmarshalBlock(data)
|
||||
if ver >= version.Fulu {
|
||||
return processBlockSSZResponseFulu(data, isBlinded)
|
||||
}
|
||||
if ver >= version.Electra {
|
||||
return processBlockSSZResponseElectra(data, isBlinded)
|
||||
}
|
||||
if ver >= version.Deneb {
|
||||
return processBlockSSZResponseDeneb(data, isBlinded)
|
||||
}
|
||||
if ver >= version.Capella {
|
||||
return processBlockSSZResponseCapella(data, isBlinded)
|
||||
}
|
||||
if ver >= version.Bellatrix {
|
||||
return processBlockSSZResponseBellatrix(data, isBlinded)
|
||||
}
|
||||
if ver >= version.Altair {
|
||||
block := ðpb.BeaconBlockAltair{}
|
||||
if err := block.UnmarshalSSZ(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_Altair{Altair: block}}, nil
|
||||
}
|
||||
if ver >= version.Phase0 {
|
||||
block := ðpb.BeaconBlock{}
|
||||
if err := block.UnmarshalSSZ(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_Phase0{Phase0: block}}, nil
|
||||
}
|
||||
return nil, fmt.Errorf("unsupported block version %s", version.String(ver))
|
||||
}
|
||||
|
||||
func processBlockSSZResponseFulu(data []byte, isBlinded bool) (*ethpb.GenericBeaconBlock, error) {
|
||||
if isBlinded {
|
||||
blindedBlock := ðpb.BlindedBeaconBlockFulu{}
|
||||
if err := blindedBlock.UnmarshalSSZ(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_BlindedFulu{BlindedFulu: blindedBlock}, IsBlinded: true}, nil
|
||||
}
|
||||
block := ðpb.BeaconBlockContentsFulu{}
|
||||
if err := block.UnmarshalSSZ(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_Fulu{Fulu: block}}, nil
|
||||
}
|
||||
|
||||
func processBlockSSZResponseElectra(data []byte, isBlinded bool) (*ethpb.GenericBeaconBlock, error) {
|
||||
if isBlinded {
|
||||
blindedBlock := ðpb.BlindedBeaconBlockElectra{}
|
||||
if err := blindedBlock.UnmarshalSSZ(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_BlindedElectra{BlindedElectra: blindedBlock}, IsBlinded: true}, nil
|
||||
}
|
||||
block := ðpb.BeaconBlockContentsElectra{}
|
||||
if err := block.UnmarshalSSZ(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_Electra{Electra: block}}, nil
|
||||
}
|
||||
|
||||
func processBlockSSZResponseDeneb(data []byte, isBlinded bool) (*ethpb.GenericBeaconBlock, error) {
|
||||
if isBlinded {
|
||||
blindedBlock := ðpb.BlindedBeaconBlockDeneb{}
|
||||
if err := blindedBlock.UnmarshalSSZ(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_BlindedDeneb{BlindedDeneb: blindedBlock}, IsBlinded: true}, nil
|
||||
}
|
||||
block := ðpb.BeaconBlockContentsDeneb{}
|
||||
if err := block.UnmarshalSSZ(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_Deneb{Deneb: block}}, nil
|
||||
}
|
||||
|
||||
func processBlockSSZResponseCapella(data []byte, isBlinded bool) (*ethpb.GenericBeaconBlock, error) {
|
||||
if isBlinded {
|
||||
blindedBlock := ðpb.BlindedBeaconBlockCapella{}
|
||||
if err := blindedBlock.UnmarshalSSZ(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_BlindedCapella{BlindedCapella: blindedBlock}, IsBlinded: true}, nil
|
||||
}
|
||||
block := ðpb.BeaconBlockCapella{}
|
||||
if err := block.UnmarshalSSZ(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_Capella{Capella: block}}, nil
|
||||
}
|
||||
|
||||
func processBlockSSZResponseBellatrix(data []byte, isBlinded bool) (*ethpb.GenericBeaconBlock, error) {
|
||||
if isBlinded {
|
||||
blindedBlock := ðpb.BlindedBeaconBlockBellatrix{}
|
||||
if err := blindedBlock.UnmarshalSSZ(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_BlindedBellatrix{BlindedBellatrix: blindedBlock}, IsBlinded: true}, nil
|
||||
}
|
||||
block := ðpb.BeaconBlockBellatrix{}
|
||||
if err := block.UnmarshalSSZ(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_Bellatrix{Bellatrix: block}}, nil
|
||||
}
|
||||
|
||||
func convertBlockToGeneric(decoder *json.Decoder, dest ethpb.GenericConverter, version string, isBlinded bool) (*ethpb.GenericBeaconBlock, error) {
|
||||
typeName := version
|
||||
if isBlinded {
|
||||
@@ -219,52 +180,69 @@ func convertBlockToGeneric(decoder *json.Decoder, dest ethpb.GenericConverter, v
|
||||
return genericBlock, nil
|
||||
}
|
||||
|
||||
// jsonBlockTypes defines factory functions for creating block and blinded block structs for JSON decoding.
|
||||
type jsonBlockTypes struct {
|
||||
newBlock func() ethpb.GenericConverter
|
||||
newBlinded func() ethpb.GenericConverter // nil for Phase0/Altair
|
||||
}
|
||||
|
||||
var jsonBlockFactories = map[string]jsonBlockTypes{
|
||||
version.String(version.Phase0): {
|
||||
newBlock: func() ethpb.GenericConverter { return &structs.BeaconBlock{} },
|
||||
},
|
||||
version.String(version.Altair): {
|
||||
newBlock: func() ethpb.GenericConverter { return &structs.BeaconBlockAltair{} },
|
||||
},
|
||||
version.String(version.Bellatrix): {
|
||||
newBlock: func() ethpb.GenericConverter { return &structs.BeaconBlockBellatrix{} },
|
||||
newBlinded: func() ethpb.GenericConverter { return &structs.BlindedBeaconBlockBellatrix{} },
|
||||
},
|
||||
version.String(version.Capella): {
|
||||
newBlock: func() ethpb.GenericConverter { return &structs.BeaconBlockCapella{} },
|
||||
newBlinded: func() ethpb.GenericConverter { return &structs.BlindedBeaconBlockCapella{} },
|
||||
},
|
||||
version.String(version.Deneb): {
|
||||
newBlock: func() ethpb.GenericConverter { return &structs.BeaconBlockContentsDeneb{} },
|
||||
newBlinded: func() ethpb.GenericConverter { return &structs.BlindedBeaconBlockDeneb{} },
|
||||
},
|
||||
version.String(version.Electra): {
|
||||
newBlock: func() ethpb.GenericConverter { return &structs.BeaconBlockContentsElectra{} },
|
||||
newBlinded: func() ethpb.GenericConverter { return &structs.BlindedBeaconBlockElectra{} },
|
||||
},
|
||||
version.String(version.Fulu): {
|
||||
newBlock: func() ethpb.GenericConverter { return &structs.BeaconBlockContentsFulu{} },
|
||||
newBlinded: func() ethpb.GenericConverter { return &structs.BlindedBeaconBlockFulu{} },
|
||||
},
|
||||
}
|
||||
|
||||
func processBlockJSONResponse(ver string, isBlinded bool, decoder *json.Decoder) (*ethpb.GenericBeaconBlock, error) {
|
||||
if decoder == nil {
|
||||
return nil, errors.New("no produce block json decoder found")
|
||||
}
|
||||
|
||||
factory, ok := jsonBlockFactories[ver]
|
||||
if !ok {
|
||||
switch ver {
|
||||
case version.String(version.Phase0):
|
||||
return convertBlockToGeneric(decoder, &structs.BeaconBlock{}, version.String(version.Phase0), false)
|
||||
|
||||
case version.String(version.Altair):
|
||||
return convertBlockToGeneric(decoder, &structs.BeaconBlockAltair{}, "altair", false)
|
||||
|
||||
case version.String(version.Bellatrix):
|
||||
return processBellatrixBlock(decoder, isBlinded)
|
||||
|
||||
case version.String(version.Capella):
|
||||
return processCapellaBlock(decoder, isBlinded)
|
||||
|
||||
case version.String(version.Deneb):
|
||||
return processDenebBlock(decoder, isBlinded)
|
||||
|
||||
case version.String(version.Electra):
|
||||
return processElectraBlock(decoder, isBlinded)
|
||||
|
||||
case version.String(version.Fulu):
|
||||
return processFuluBlock(decoder, isBlinded)
|
||||
|
||||
default:
|
||||
return nil, errors.Errorf("unsupported consensus version `%s`", ver)
|
||||
}
|
||||
if isBlinded && factory.newBlinded != nil {
|
||||
return convertBlockToGeneric(decoder, factory.newBlinded(), ver, true)
|
||||
}
|
||||
return convertBlockToGeneric(decoder, factory.newBlock(), ver, false)
|
||||
}
|
||||
|
||||
func processBellatrixBlock(decoder *json.Decoder, isBlinded bool) (*ethpb.GenericBeaconBlock, error) {
|
||||
if isBlinded {
|
||||
return convertBlockToGeneric(decoder, &structs.BlindedBeaconBlockBellatrix{}, "bellatrix", true)
|
||||
}
|
||||
return convertBlockToGeneric(decoder, &structs.BeaconBlockBellatrix{}, "bellatrix", false)
|
||||
}
|
||||
|
||||
func processCapellaBlock(decoder *json.Decoder, isBlinded bool) (*ethpb.GenericBeaconBlock, error) {
|
||||
if isBlinded {
|
||||
return convertBlockToGeneric(decoder, &structs.BlindedBeaconBlockCapella{}, "capella", true)
|
||||
}
|
||||
return convertBlockToGeneric(decoder, &structs.BeaconBlockCapella{}, "capella", false)
|
||||
}
|
||||
|
||||
func processDenebBlock(decoder *json.Decoder, isBlinded bool) (*ethpb.GenericBeaconBlock, error) {
|
||||
if isBlinded {
|
||||
return convertBlockToGeneric(decoder, &structs.BlindedBeaconBlockDeneb{}, "deneb", true)
|
||||
}
|
||||
return convertBlockToGeneric(decoder, &structs.BeaconBlockContentsDeneb{}, "deneb", false)
|
||||
}
|
||||
|
||||
func processElectraBlock(decoder *json.Decoder, isBlinded bool) (*ethpb.GenericBeaconBlock, error) {
|
||||
if isBlinded {
|
||||
return convertBlockToGeneric(decoder, &structs.BlindedBeaconBlockElectra{}, "electra", true)
|
||||
}
|
||||
return convertBlockToGeneric(decoder, &structs.BeaconBlockContentsElectra{}, "electra", false)
|
||||
}
|
||||
|
||||
func processFuluBlock(decoder *json.Decoder, isBlinded bool) (*ethpb.GenericBeaconBlock, error) {
|
||||
if isBlinded {
|
||||
return convertBlockToGeneric(decoder, &structs.BlindedBeaconBlockFulu{}, "fulu", true)
|
||||
}
|
||||
return convertBlockToGeneric(decoder, &structs.BeaconBlockContentsFulu{}, "fulu", false)
|
||||
}
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/validator/client/beacon-api/mock"
|
||||
@@ -26,7 +25,7 @@ func TestGetBeaconBlock_RequestFailed(t *testing.T) {
|
||||
|
||||
ctx := t.Context()
|
||||
|
||||
handler := mock.NewMockHandler(ctrl)
|
||||
handler := mock.NewMockJsonRestHandler(ctrl)
|
||||
handler.EXPECT().GetSSZ(
|
||||
gomock.Any(),
|
||||
gomock.Any(),
|
||||
@@ -150,7 +149,7 @@ func TestGetBeaconBlock_Error(t *testing.T) {
|
||||
|
||||
b, err := json.Marshal(resp)
|
||||
require.NoError(t, err)
|
||||
handler := mock.NewMockHandler(ctrl)
|
||||
handler := mock.NewMockJsonRestHandler(ctrl)
|
||||
handler.EXPECT().GetSSZ(
|
||||
gomock.Any(),
|
||||
gomock.Any(),
|
||||
@@ -186,7 +185,7 @@ func TestGetBeaconBlock_Phase0Valid(t *testing.T) {
|
||||
Data: bytes,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
handler := mock.NewMockHandler(ctrl)
|
||||
handler := mock.NewMockJsonRestHandler(ctrl)
|
||||
handler.EXPECT().GetSSZ(
|
||||
gomock.Any(),
|
||||
fmt.Sprintf("/eth/v3/validator/blocks/%d?graffiti=%s&randao_reveal=%s", slot, hexutil.Encode(graffiti), hexutil.Encode(randaoReveal)),
|
||||
@@ -209,25 +208,6 @@ func TestGetBeaconBlock_Phase0Valid(t *testing.T) {
|
||||
assert.DeepEqual(t, expectedBeaconBlock, beaconBlock)
|
||||
}
|
||||
|
||||
func TestSSZCodecs_OrderAndCoverage(t *testing.T) {
|
||||
versions := version.All()
|
||||
require.NotEmpty(t, versions)
|
||||
|
||||
expected := make([]int, 0, len(versions))
|
||||
for i := len(versions) - 1; i >= 0; i-- {
|
||||
expected = append(expected, versions[i])
|
||||
}
|
||||
|
||||
require.Equal(t, len(expected), len(sszCodecs))
|
||||
|
||||
for i, entry := range sszCodecs {
|
||||
assert.Equal(t, expected[i], entry.minVersion, "sszCodecs[%d] has wrong fork order", i)
|
||||
if i > 0 {
|
||||
require.Equal(t, true, entry.minVersion < sszCodecs[i-1].minVersion, "sszCodecs not strictly descending at index %d", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Add SSZ test cases below this line
|
||||
|
||||
func TestGetBeaconBlock_SSZ_BellatrixValid(t *testing.T) {
|
||||
@@ -244,7 +224,7 @@ func TestGetBeaconBlock_SSZ_BellatrixValid(t *testing.T) {
|
||||
|
||||
ctx := t.Context()
|
||||
|
||||
handler := mock.NewMockHandler(ctrl)
|
||||
handler := mock.NewMockJsonRestHandler(ctrl)
|
||||
handler.EXPECT().GetSSZ(
|
||||
gomock.Any(),
|
||||
fmt.Sprintf("/eth/v3/validator/blocks/%d?graffiti=%s&randao_reveal=%s", slot, hexutil.Encode(graffiti), hexutil.Encode(randaoReveal)),
|
||||
@@ -286,7 +266,7 @@ func TestGetBeaconBlock_SSZ_BlindedBellatrixValid(t *testing.T) {
|
||||
|
||||
ctx := t.Context()
|
||||
|
||||
handler := mock.NewMockHandler(ctrl)
|
||||
handler := mock.NewMockJsonRestHandler(ctrl)
|
||||
handler.EXPECT().GetSSZ(
|
||||
gomock.Any(),
|
||||
fmt.Sprintf("/eth/v3/validator/blocks/%d?graffiti=%s&randao_reveal=%s", slot, hexutil.Encode(graffiti), hexutil.Encode(randaoReveal)),
|
||||
@@ -328,7 +308,7 @@ func TestGetBeaconBlock_SSZ_CapellaValid(t *testing.T) {
|
||||
|
||||
ctx := t.Context()
|
||||
|
||||
handler := mock.NewMockHandler(ctrl)
|
||||
handler := mock.NewMockJsonRestHandler(ctrl)
|
||||
handler.EXPECT().GetSSZ(
|
||||
gomock.Any(),
|
||||
fmt.Sprintf("/eth/v3/validator/blocks/%d?graffiti=%s&randao_reveal=%s", slot, hexutil.Encode(graffiti), hexutil.Encode(randaoReveal)),
|
||||
@@ -370,7 +350,7 @@ func TestGetBeaconBlock_SSZ_BlindedCapellaValid(t *testing.T) {
|
||||
|
||||
ctx := t.Context()
|
||||
|
||||
handler := mock.NewMockHandler(ctrl)
|
||||
handler := mock.NewMockJsonRestHandler(ctrl)
|
||||
handler.EXPECT().GetSSZ(
|
||||
gomock.Any(),
|
||||
fmt.Sprintf("/eth/v3/validator/blocks/%d?graffiti=%s&randao_reveal=%s", slot, hexutil.Encode(graffiti), hexutil.Encode(randaoReveal)),
|
||||
@@ -412,7 +392,7 @@ func TestGetBeaconBlock_SSZ_DenebValid(t *testing.T) {
|
||||
|
||||
ctx := t.Context()
|
||||
|
||||
handler := mock.NewMockHandler(ctrl)
|
||||
handler := mock.NewMockJsonRestHandler(ctrl)
|
||||
handler.EXPECT().GetSSZ(
|
||||
gomock.Any(),
|
||||
fmt.Sprintf("/eth/v3/validator/blocks/%d?graffiti=%s&randao_reveal=%s", slot, hexutil.Encode(graffiti), hexutil.Encode(randaoReveal)),
|
||||
@@ -454,7 +434,7 @@ func TestGetBeaconBlock_SSZ_BlindedDenebValid(t *testing.T) {
|
||||
|
||||
ctx := t.Context()
|
||||
|
||||
handler := mock.NewMockHandler(ctrl)
|
||||
handler := mock.NewMockJsonRestHandler(ctrl)
|
||||
handler.EXPECT().GetSSZ(
|
||||
gomock.Any(),
|
||||
fmt.Sprintf("/eth/v3/validator/blocks/%d?graffiti=%s&randao_reveal=%s", slot, hexutil.Encode(graffiti), hexutil.Encode(randaoReveal)),
|
||||
@@ -496,7 +476,7 @@ func TestGetBeaconBlock_SSZ_ElectraValid(t *testing.T) {
|
||||
|
||||
ctx := t.Context()
|
||||
|
||||
handler := mock.NewMockHandler(ctrl)
|
||||
handler := mock.NewMockJsonRestHandler(ctrl)
|
||||
handler.EXPECT().GetSSZ(
|
||||
gomock.Any(),
|
||||
fmt.Sprintf("/eth/v3/validator/blocks/%d?graffiti=%s&randao_reveal=%s", slot, hexutil.Encode(graffiti), hexutil.Encode(randaoReveal)),
|
||||
@@ -538,7 +518,7 @@ func TestGetBeaconBlock_SSZ_BlindedElectraValid(t *testing.T) {
|
||||
|
||||
ctx := t.Context()
|
||||
|
||||
handler := mock.NewMockHandler(ctrl)
|
||||
handler := mock.NewMockJsonRestHandler(ctrl)
|
||||
handler.EXPECT().GetSSZ(
|
||||
gomock.Any(),
|
||||
fmt.Sprintf("/eth/v3/validator/blocks/%d?graffiti=%s&randao_reveal=%s", slot, hexutil.Encode(graffiti), hexutil.Encode(randaoReveal)),
|
||||
@@ -566,90 +546,6 @@ func TestGetBeaconBlock_SSZ_BlindedElectraValid(t *testing.T) {
|
||||
assert.DeepEqual(t, expectedBeaconBlock, beaconBlock)
|
||||
}
|
||||
|
||||
func TestGetBeaconBlock_SSZ_FuluValid(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
proto := testhelpers.GenerateProtoFuluBeaconBlockContents()
|
||||
bytes, err := proto.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
|
||||
const slot = primitives.Slot(1)
|
||||
randaoReveal := []byte{2}
|
||||
graffiti := []byte{3}
|
||||
|
||||
ctx := t.Context()
|
||||
|
||||
handler := mock.NewMockHandler(ctrl)
|
||||
handler.EXPECT().GetSSZ(
|
||||
gomock.Any(),
|
||||
fmt.Sprintf("/eth/v3/validator/blocks/%d?graffiti=%s&randao_reveal=%s", slot, hexutil.Encode(graffiti), hexutil.Encode(randaoReveal)),
|
||||
).Return(
|
||||
bytes,
|
||||
http.Header{
|
||||
"Content-Type": []string{api.OctetStreamMediaType},
|
||||
api.VersionHeader: []string{"fulu"},
|
||||
api.ExecutionPayloadBlindedHeader: []string{"false"},
|
||||
},
|
||||
nil,
|
||||
).Times(1)
|
||||
|
||||
validatorClient := &beaconApiValidatorClient{handler: handler}
|
||||
beaconBlock, err := validatorClient.beaconBlock(ctx, slot, randaoReveal, graffiti)
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedBeaconBlock := ðpb.GenericBeaconBlock{
|
||||
Block: ðpb.GenericBeaconBlock_Fulu{
|
||||
Fulu: proto,
|
||||
},
|
||||
IsBlinded: false,
|
||||
}
|
||||
|
||||
assert.DeepEqual(t, expectedBeaconBlock, beaconBlock)
|
||||
}
|
||||
|
||||
func TestGetBeaconBlock_SSZ_BlindedFuluValid(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
proto := testhelpers.GenerateProtoBlindedFuluBeaconBlock()
|
||||
bytes, err := proto.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
|
||||
const slot = primitives.Slot(1)
|
||||
randaoReveal := []byte{2}
|
||||
graffiti := []byte{3}
|
||||
|
||||
ctx := t.Context()
|
||||
|
||||
handler := mock.NewMockHandler(ctrl)
|
||||
handler.EXPECT().GetSSZ(
|
||||
gomock.Any(),
|
||||
fmt.Sprintf("/eth/v3/validator/blocks/%d?graffiti=%s&randao_reveal=%s", slot, hexutil.Encode(graffiti), hexutil.Encode(randaoReveal)),
|
||||
).Return(
|
||||
bytes,
|
||||
http.Header{
|
||||
"Content-Type": []string{api.OctetStreamMediaType},
|
||||
api.VersionHeader: []string{"fulu"},
|
||||
api.ExecutionPayloadBlindedHeader: []string{"true"},
|
||||
},
|
||||
nil,
|
||||
).Times(1)
|
||||
|
||||
validatorClient := &beaconApiValidatorClient{handler: handler}
|
||||
beaconBlock, err := validatorClient.beaconBlock(ctx, slot, randaoReveal, graffiti)
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedBeaconBlock := ðpb.GenericBeaconBlock{
|
||||
Block: ðpb.GenericBeaconBlock_BlindedFulu{
|
||||
BlindedFulu: proto,
|
||||
},
|
||||
IsBlinded: true,
|
||||
}
|
||||
|
||||
assert.DeepEqual(t, expectedBeaconBlock, beaconBlock)
|
||||
}
|
||||
|
||||
func TestGetBeaconBlock_SSZ_UnsupportedVersion(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
@@ -660,7 +556,7 @@ func TestGetBeaconBlock_SSZ_UnsupportedVersion(t *testing.T) {
|
||||
|
||||
ctx := t.Context()
|
||||
|
||||
handler := mock.NewMockHandler(ctrl)
|
||||
handler := mock.NewMockJsonRestHandler(ctrl)
|
||||
handler.EXPECT().GetSSZ(
|
||||
gomock.Any(),
|
||||
fmt.Sprintf("/eth/v3/validator/blocks/%d?graffiti=%s&randao_reveal=%s", slot, hexutil.Encode(graffiti), hexutil.Encode(randaoReveal)),
|
||||
@@ -693,7 +589,7 @@ func TestGetBeaconBlock_SSZ_InvalidBlindedHeader(t *testing.T) {
|
||||
|
||||
ctx := t.Context()
|
||||
|
||||
handler := mock.NewMockHandler(ctrl)
|
||||
handler := mock.NewMockJsonRestHandler(ctrl)
|
||||
handler.EXPECT().GetSSZ(
|
||||
gomock.Any(),
|
||||
fmt.Sprintf("/eth/v3/validator/blocks/%d?graffiti=%s&randao_reveal=%s", slot, hexutil.Encode(graffiti), hexutil.Encode(randaoReveal)),
|
||||
@@ -726,7 +622,7 @@ func TestGetBeaconBlock_SSZ_InvalidVersionHeader(t *testing.T) {
|
||||
|
||||
ctx := t.Context()
|
||||
|
||||
handler := mock.NewMockHandler(ctrl)
|
||||
handler := mock.NewMockJsonRestHandler(ctrl)
|
||||
handler.EXPECT().GetSSZ(
|
||||
gomock.Any(),
|
||||
fmt.Sprintf("/eth/v3/validator/blocks/%d?graffiti=%s&randao_reveal=%s", slot, hexutil.Encode(graffiti), hexutil.Encode(randaoReveal)),
|
||||
@@ -755,7 +651,7 @@ func TestGetBeaconBlock_SSZ_GetSSZError(t *testing.T) {
|
||||
|
||||
ctx := t.Context()
|
||||
|
||||
handler := mock.NewMockHandler(ctrl)
|
||||
handler := mock.NewMockJsonRestHandler(ctrl)
|
||||
handler.EXPECT().GetSSZ(
|
||||
gomock.Any(),
|
||||
fmt.Sprintf("/eth/v3/validator/blocks/%d?graffiti=%s&randao_reveal=%s", slot, hexutil.Encode(graffiti), hexutil.Encode(randaoReveal)),
|
||||
@@ -784,7 +680,7 @@ func TestGetBeaconBlock_SSZ_Phase0Valid(t *testing.T) {
|
||||
|
||||
ctx := t.Context()
|
||||
|
||||
handler := mock.NewMockHandler(ctrl)
|
||||
handler := mock.NewMockJsonRestHandler(ctrl)
|
||||
handler.EXPECT().GetSSZ(
|
||||
gomock.Any(),
|
||||
fmt.Sprintf("/eth/v3/validator/blocks/%d?graffiti=%s&randao_reveal=%s", slot, hexutil.Encode(graffiti), hexutil.Encode(randaoReveal)),
|
||||
@@ -826,7 +722,7 @@ func TestGetBeaconBlock_SSZ_AltairValid(t *testing.T) {
|
||||
|
||||
ctx := t.Context()
|
||||
|
||||
handler := mock.NewMockHandler(ctrl)
|
||||
handler := mock.NewMockJsonRestHandler(ctrl)
|
||||
handler.EXPECT().GetSSZ(
|
||||
gomock.Any(),
|
||||
fmt.Sprintf("/eth/v3/validator/blocks/%d?graffiti=%s&randao_reveal=%s", slot, hexutil.Encode(graffiti), hexutil.Encode(randaoReveal)),
|
||||
@@ -874,7 +770,7 @@ func TestGetBeaconBlock_AltairValid(t *testing.T) {
|
||||
Data: bytes,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
handler := mock.NewMockHandler(ctrl)
|
||||
handler := mock.NewMockJsonRestHandler(ctrl)
|
||||
handler.EXPECT().GetSSZ(
|
||||
gomock.Any(),
|
||||
fmt.Sprintf("/eth/v3/validator/blocks/%d?graffiti=%s&randao_reveal=%s", slot, hexutil.Encode(graffiti), hexutil.Encode(randaoReveal)),
|
||||
@@ -918,7 +814,7 @@ func TestGetBeaconBlock_BellatrixValid(t *testing.T) {
|
||||
Data: bytes,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
handler := mock.NewMockHandler(ctrl)
|
||||
handler := mock.NewMockJsonRestHandler(ctrl)
|
||||
handler.EXPECT().GetSSZ(
|
||||
gomock.Any(),
|
||||
fmt.Sprintf("/eth/v3/validator/blocks/%d?graffiti=%s&randao_reveal=%s", slot, hexutil.Encode(graffiti), hexutil.Encode(randaoReveal)),
|
||||
@@ -963,7 +859,7 @@ func TestGetBeaconBlock_BlindedBellatrixValid(t *testing.T) {
|
||||
Data: bytes,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
handler := mock.NewMockHandler(ctrl)
|
||||
handler := mock.NewMockJsonRestHandler(ctrl)
|
||||
handler.EXPECT().GetSSZ(
|
||||
gomock.Any(),
|
||||
fmt.Sprintf("/eth/v3/validator/blocks/%d?graffiti=%s&randao_reveal=%s", slot, hexutil.Encode(graffiti), hexutil.Encode(randaoReveal)),
|
||||
@@ -1008,7 +904,7 @@ func TestGetBeaconBlock_CapellaValid(t *testing.T) {
|
||||
Data: bytes,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
handler := mock.NewMockHandler(ctrl)
|
||||
handler := mock.NewMockJsonRestHandler(ctrl)
|
||||
handler.EXPECT().GetSSZ(
|
||||
gomock.Any(),
|
||||
fmt.Sprintf("/eth/v3/validator/blocks/%d?graffiti=%s&randao_reveal=%s", slot, hexutil.Encode(graffiti), hexutil.Encode(randaoReveal)),
|
||||
@@ -1053,7 +949,7 @@ func TestGetBeaconBlock_BlindedCapellaValid(t *testing.T) {
|
||||
Data: bytes,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
handler := mock.NewMockHandler(ctrl)
|
||||
handler := mock.NewMockJsonRestHandler(ctrl)
|
||||
handler.EXPECT().GetSSZ(
|
||||
gomock.Any(),
|
||||
fmt.Sprintf("/eth/v3/validator/blocks/%d?graffiti=%s&randao_reveal=%s", slot, hexutil.Encode(graffiti), hexutil.Encode(randaoReveal)),
|
||||
@@ -1077,96 +973,6 @@ func TestGetBeaconBlock_BlindedCapellaValid(t *testing.T) {
|
||||
assert.DeepEqual(t, expectedBeaconBlock, beaconBlock)
|
||||
}
|
||||
|
||||
func TestGetBeaconBlock_FuluValid(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
proto := testhelpers.GenerateProtoFuluBeaconBlockContents()
|
||||
block := testhelpers.GenerateJsonFuluBeaconBlockContents()
|
||||
bytes, err := json.Marshal(block)
|
||||
require.NoError(t, err)
|
||||
|
||||
const slot = primitives.Slot(1)
|
||||
randaoReveal := []byte{2}
|
||||
graffiti := []byte{3}
|
||||
|
||||
ctx := t.Context()
|
||||
|
||||
b, err := json.Marshal(structs.ProduceBlockV3Response{
|
||||
Version: "fulu",
|
||||
ExecutionPayloadBlinded: false,
|
||||
Data: bytes,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
handler := mock.NewMockHandler(ctrl)
|
||||
handler.EXPECT().GetSSZ(
|
||||
gomock.Any(),
|
||||
fmt.Sprintf("/eth/v3/validator/blocks/%d?graffiti=%s&randao_reveal=%s", slot, hexutil.Encode(graffiti), hexutil.Encode(randaoReveal)),
|
||||
).Return(
|
||||
b,
|
||||
http.Header{"Content-Type": []string{"application/json"}},
|
||||
nil,
|
||||
).Times(1)
|
||||
|
||||
validatorClient := &beaconApiValidatorClient{handler: handler}
|
||||
beaconBlock, err := validatorClient.beaconBlock(ctx, slot, randaoReveal, graffiti)
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedBeaconBlock := ðpb.GenericBeaconBlock{
|
||||
Block: ðpb.GenericBeaconBlock_Fulu{
|
||||
Fulu: proto,
|
||||
},
|
||||
IsBlinded: false,
|
||||
}
|
||||
|
||||
assert.DeepEqual(t, expectedBeaconBlock, beaconBlock)
|
||||
}
|
||||
|
||||
func TestGetBeaconBlock_BlindedFuluValid(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
proto := testhelpers.GenerateProtoBlindedFuluBeaconBlock()
|
||||
block := testhelpers.GenerateJsonBlindedFuluBeaconBlock()
|
||||
bytes, err := json.Marshal(block)
|
||||
require.NoError(t, err)
|
||||
|
||||
const slot = primitives.Slot(1)
|
||||
randaoReveal := []byte{2}
|
||||
graffiti := []byte{3}
|
||||
|
||||
ctx := t.Context()
|
||||
|
||||
b, err := json.Marshal(structs.ProduceBlockV3Response{
|
||||
Version: "fulu",
|
||||
ExecutionPayloadBlinded: true,
|
||||
Data: bytes,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
handler := mock.NewMockHandler(ctrl)
|
||||
handler.EXPECT().GetSSZ(
|
||||
gomock.Any(),
|
||||
fmt.Sprintf("/eth/v3/validator/blocks/%d?graffiti=%s&randao_reveal=%s", slot, hexutil.Encode(graffiti), hexutil.Encode(randaoReveal)),
|
||||
).Return(
|
||||
b,
|
||||
http.Header{"Content-Type": []string{"application/json"}},
|
||||
nil,
|
||||
).Times(1)
|
||||
|
||||
validatorClient := &beaconApiValidatorClient{handler: handler}
|
||||
beaconBlock, err := validatorClient.beaconBlock(ctx, slot, randaoReveal, graffiti)
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedBeaconBlock := ðpb.GenericBeaconBlock{
|
||||
Block: ðpb.GenericBeaconBlock_BlindedFulu{
|
||||
BlindedFulu: proto,
|
||||
},
|
||||
IsBlinded: true,
|
||||
}
|
||||
|
||||
assert.DeepEqual(t, expectedBeaconBlock, beaconBlock)
|
||||
}
|
||||
|
||||
func TestGetBeaconBlock_DenebValid(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
@@ -1188,7 +994,7 @@ func TestGetBeaconBlock_DenebValid(t *testing.T) {
|
||||
Data: bytes,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
handler := mock.NewMockHandler(ctrl)
|
||||
handler := mock.NewMockJsonRestHandler(ctrl)
|
||||
handler.EXPECT().GetSSZ(
|
||||
gomock.Any(),
|
||||
fmt.Sprintf("/eth/v3/validator/blocks/%d?graffiti=%s&randao_reveal=%s", slot, hexutil.Encode(graffiti), hexutil.Encode(randaoReveal)),
|
||||
@@ -1233,7 +1039,7 @@ func TestGetBeaconBlock_BlindedDenebValid(t *testing.T) {
|
||||
Data: bytes,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
handler := mock.NewMockHandler(ctrl)
|
||||
handler := mock.NewMockJsonRestHandler(ctrl)
|
||||
handler.EXPECT().GetSSZ(
|
||||
gomock.Any(),
|
||||
fmt.Sprintf("/eth/v3/validator/blocks/%d?graffiti=%s&randao_reveal=%s", slot, hexutil.Encode(graffiti), hexutil.Encode(randaoReveal)),
|
||||
@@ -1278,7 +1084,7 @@ func TestGetBeaconBlock_ElectraValid(t *testing.T) {
|
||||
Data: bytes,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
handler := mock.NewMockHandler(ctrl)
|
||||
handler := mock.NewMockJsonRestHandler(ctrl)
|
||||
handler.EXPECT().GetSSZ(
|
||||
gomock.Any(),
|
||||
fmt.Sprintf("/eth/v3/validator/blocks/%d?graffiti=%s&randao_reveal=%s", slot, hexutil.Encode(graffiti), hexutil.Encode(randaoReveal)),
|
||||
@@ -1323,7 +1129,7 @@ func TestGetBeaconBlock_BlindedElectraValid(t *testing.T) {
|
||||
Data: bytes,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
handler := mock.NewMockHandler(ctrl)
|
||||
handler := mock.NewMockJsonRestHandler(ctrl)
|
||||
handler.EXPECT().GetSSZ(
|
||||
gomock.Any(),
|
||||
fmt.Sprintf("/eth/v3/validator/blocks/%d?graffiti=%s&randao_reveal=%s", slot, hexutil.Encode(graffiti), hexutil.Encode(randaoReveal)),
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"net/http"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/ssz"
|
||||
"github.com/OffchainLabs/prysm/v7/network/httputil"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/pkg/errors"
|
||||
@@ -22,128 +21,34 @@ type blockProcessingResult struct {
|
||||
marshalJSON func() ([]byte, error)
|
||||
}
|
||||
|
||||
type sszMarshaler interface {
|
||||
MarshalSSZ() ([]byte, error)
|
||||
}
|
||||
|
||||
func buildBlockResult(
|
||||
versionName string,
|
||||
blinded bool,
|
||||
sszObj sszMarshaler,
|
||||
rootObj ssz.Hashable,
|
||||
jsonFn func() ([]byte, error),
|
||||
) (*blockProcessingResult, error) {
|
||||
beaconBlockRoot, err := rootObj.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to compute block root for %s beacon block", versionName)
|
||||
}
|
||||
|
||||
marshaledSSZ, err := sszObj.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to serialize %s beacon block", versionName)
|
||||
}
|
||||
|
||||
return &blockProcessingResult{
|
||||
consensusVersion: versionName,
|
||||
blinded: blinded,
|
||||
beaconBlockRoot: beaconBlockRoot,
|
||||
marshalledSSZ: marshaledSSZ,
|
||||
marshalJSON: jsonFn,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *beaconApiValidatorClient) proposeBeaconBlock(ctx context.Context, in *ethpb.GenericSignedBeaconBlock) (*ethpb.ProposeResponse, error) {
|
||||
var res *blockProcessingResult
|
||||
var err error
|
||||
switch blockType := in.Block.(type) {
|
||||
case *ethpb.GenericSignedBeaconBlock_Phase0:
|
||||
res, err = buildBlockResult("phase0", false, blockType.Phase0, blockType.Phase0.Block, func() ([]byte, error) {
|
||||
return json.Marshal(structs.SignedBeaconBlockPhase0FromConsensus(blockType.Phase0))
|
||||
})
|
||||
res, err = handlePhase0Block(blockType)
|
||||
case *ethpb.GenericSignedBeaconBlock_Altair:
|
||||
res, err = buildBlockResult("altair", false, blockType.Altair, blockType.Altair.Block, func() ([]byte, error) {
|
||||
return json.Marshal(structs.SignedBeaconBlockAltairFromConsensus(blockType.Altair))
|
||||
})
|
||||
res, err = handleAltairBlock(blockType)
|
||||
case *ethpb.GenericSignedBeaconBlock_Bellatrix:
|
||||
res, err = buildBlockResult("bellatrix", false, blockType.Bellatrix, blockType.Bellatrix.Block, func() ([]byte, error) {
|
||||
signedBlock, err := structs.SignedBeaconBlockBellatrixFromConsensus(blockType.Bellatrix)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to convert bellatrix beacon block")
|
||||
}
|
||||
return json.Marshal(signedBlock)
|
||||
})
|
||||
res, err = handleBellatrixBlock(blockType)
|
||||
case *ethpb.GenericSignedBeaconBlock_BlindedBellatrix:
|
||||
res, err = buildBlockResult("bellatrix", true, blockType.BlindedBellatrix, blockType.BlindedBellatrix.Block, func() ([]byte, error) {
|
||||
signedBlock, err := structs.SignedBlindedBeaconBlockBellatrixFromConsensus(blockType.BlindedBellatrix)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to convert blinded bellatrix beacon block")
|
||||
}
|
||||
return json.Marshal(signedBlock)
|
||||
})
|
||||
res, err = handleBlindedBellatrixBlock(blockType)
|
||||
case *ethpb.GenericSignedBeaconBlock_Capella:
|
||||
res, err = buildBlockResult("capella", false, blockType.Capella, blockType.Capella.Block, func() ([]byte, error) {
|
||||
signedBlock, err := structs.SignedBeaconBlockCapellaFromConsensus(blockType.Capella)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to convert capella beacon block")
|
||||
}
|
||||
return json.Marshal(signedBlock)
|
||||
})
|
||||
res, err = handleCapellaBlock(blockType)
|
||||
case *ethpb.GenericSignedBeaconBlock_BlindedCapella:
|
||||
res, err = buildBlockResult("capella", true, blockType.BlindedCapella, blockType.BlindedCapella.Block, func() ([]byte, error) {
|
||||
signedBlock, err := structs.SignedBlindedBeaconBlockCapellaFromConsensus(blockType.BlindedCapella)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to convert blinded capella beacon block")
|
||||
}
|
||||
return json.Marshal(signedBlock)
|
||||
})
|
||||
res, err = handleBlindedCapellaBlock(blockType)
|
||||
case *ethpb.GenericSignedBeaconBlock_Deneb:
|
||||
res, err = buildBlockResult("deneb", false, blockType.Deneb, blockType.Deneb.Block, func() ([]byte, error) {
|
||||
signedBlock, err := structs.SignedBeaconBlockContentsDenebFromConsensus(blockType.Deneb)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to convert deneb beacon block contents")
|
||||
}
|
||||
return json.Marshal(signedBlock)
|
||||
})
|
||||
res, err = handleDenebBlockContents(blockType)
|
||||
case *ethpb.GenericSignedBeaconBlock_BlindedDeneb:
|
||||
res, err = buildBlockResult("deneb", true, blockType.BlindedDeneb, blockType.BlindedDeneb, func() ([]byte, error) {
|
||||
signedBlock, err := structs.SignedBlindedBeaconBlockDenebFromConsensus(blockType.BlindedDeneb)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to convert deneb blinded beacon block")
|
||||
}
|
||||
return json.Marshal(signedBlock)
|
||||
})
|
||||
res, err = handleBlindedDenebBlock(blockType)
|
||||
case *ethpb.GenericSignedBeaconBlock_Electra:
|
||||
res, err = buildBlockResult("electra", false, blockType.Electra, blockType.Electra.Block, func() ([]byte, error) {
|
||||
signedBlock, err := structs.SignedBeaconBlockContentsElectraFromConsensus(blockType.Electra)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to convert electra beacon block contents")
|
||||
}
|
||||
return json.Marshal(signedBlock)
|
||||
})
|
||||
res, err = handleElectraBlockContents(blockType)
|
||||
case *ethpb.GenericSignedBeaconBlock_BlindedElectra:
|
||||
res, err = buildBlockResult("electra", true, blockType.BlindedElectra, blockType.BlindedElectra, func() ([]byte, error) {
|
||||
signedBlock, err := structs.SignedBlindedBeaconBlockElectraFromConsensus(blockType.BlindedElectra)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to convert electra blinded beacon block")
|
||||
}
|
||||
return json.Marshal(signedBlock)
|
||||
})
|
||||
res, err = handleBlindedElectraBlock(blockType)
|
||||
case *ethpb.GenericSignedBeaconBlock_Fulu:
|
||||
res, err = buildBlockResult("fulu", false, blockType.Fulu, blockType.Fulu.Block, func() ([]byte, error) {
|
||||
signedBlock, err := structs.SignedBeaconBlockContentsFuluFromConsensus(blockType.Fulu)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to convert fulu beacon block contents")
|
||||
}
|
||||
return json.Marshal(signedBlock)
|
||||
})
|
||||
res, err = handleFuluBlockContents(blockType)
|
||||
case *ethpb.GenericSignedBeaconBlock_BlindedFulu:
|
||||
res, err = buildBlockResult("fulu", true, blockType.BlindedFulu, blockType.BlindedFulu, func() ([]byte, error) {
|
||||
signedBlock, err := structs.SignedBlindedBeaconBlockFuluFromConsensus(blockType.BlindedFulu)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to convert fulu blinded beacon block")
|
||||
}
|
||||
return json.Marshal(signedBlock)
|
||||
})
|
||||
res, err = handleBlindedFuluBlock(blockType)
|
||||
default:
|
||||
return nil, errors.Errorf("unsupported block type %T", in.Block)
|
||||
}
|
||||
@@ -211,3 +116,357 @@ func (c *beaconApiValidatorClient) proposeBeaconBlock(ctx context.Context, in *e
|
||||
|
||||
return ðpb.ProposeResponse{BlockRoot: res.beaconBlockRoot[:]}, nil
|
||||
}
|
||||
|
||||
func handlePhase0Block(block *ethpb.GenericSignedBeaconBlock_Phase0) (*blockProcessingResult, error) {
|
||||
var res blockProcessingResult
|
||||
res.consensusVersion = "phase0"
|
||||
res.blinded = false
|
||||
|
||||
beaconBlockRoot, err := block.Phase0.Block.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to compute block root for phase0 beacon block")
|
||||
}
|
||||
res.beaconBlockRoot = beaconBlockRoot
|
||||
|
||||
// Marshal SSZ
|
||||
ssz, err := block.Phase0.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to serialize block for phase0 beacon block")
|
||||
}
|
||||
res.marshalledSSZ = ssz
|
||||
|
||||
// Set up JSON marshalling function for fallback
|
||||
res.marshalJSON = func() ([]byte, error) {
|
||||
signedBlock := structs.SignedBeaconBlockPhase0FromConsensus(block.Phase0)
|
||||
return json.Marshal(signedBlock)
|
||||
}
|
||||
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func handleAltairBlock(block *ethpb.GenericSignedBeaconBlock_Altair) (*blockProcessingResult, error) {
|
||||
var res blockProcessingResult
|
||||
res.consensusVersion = "altair"
|
||||
res.blinded = false
|
||||
|
||||
beaconBlockRoot, err := block.Altair.Block.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to compute block root for altair beacon block")
|
||||
}
|
||||
res.beaconBlockRoot = beaconBlockRoot
|
||||
|
||||
// Marshal SSZ
|
||||
ssz, err := block.Altair.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to serialize block for altair beacon block")
|
||||
}
|
||||
res.marshalledSSZ = ssz
|
||||
|
||||
// Set up JSON marshalling function for fallback
|
||||
res.marshalJSON = func() ([]byte, error) {
|
||||
signedBlock := structs.SignedBeaconBlockAltairFromConsensus(block.Altair)
|
||||
return json.Marshal(signedBlock)
|
||||
}
|
||||
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func handleBellatrixBlock(block *ethpb.GenericSignedBeaconBlock_Bellatrix) (*blockProcessingResult, error) {
|
||||
var res blockProcessingResult
|
||||
res.consensusVersion = "bellatrix"
|
||||
res.blinded = false
|
||||
|
||||
beaconBlockRoot, err := block.Bellatrix.Block.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to compute block root for bellatrix beacon block")
|
||||
}
|
||||
res.beaconBlockRoot = beaconBlockRoot
|
||||
|
||||
// Marshal SSZ
|
||||
ssz, err := block.Bellatrix.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to serialize block for bellatrix beacon block")
|
||||
}
|
||||
res.marshalledSSZ = ssz
|
||||
|
||||
// Set up JSON marshalling function for fallback
|
||||
res.marshalJSON = func() ([]byte, error) {
|
||||
signedBlock, err := structs.SignedBeaconBlockBellatrixFromConsensus(block.Bellatrix)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to convert bellatrix beacon block")
|
||||
}
|
||||
return json.Marshal(signedBlock)
|
||||
}
|
||||
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func handleBlindedBellatrixBlock(block *ethpb.GenericSignedBeaconBlock_BlindedBellatrix) (*blockProcessingResult, error) {
|
||||
var res blockProcessingResult
|
||||
res.consensusVersion = "bellatrix"
|
||||
res.blinded = true
|
||||
|
||||
beaconBlockRoot, err := block.BlindedBellatrix.Block.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to compute block root for bellatrix beacon block")
|
||||
}
|
||||
res.beaconBlockRoot = beaconBlockRoot
|
||||
|
||||
// Marshal SSZ
|
||||
ssz, err := block.BlindedBellatrix.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to serialize block for bellatrix beacon block")
|
||||
}
|
||||
res.marshalledSSZ = ssz
|
||||
|
||||
// Set up JSON marshalling function for fallback
|
||||
res.marshalJSON = func() ([]byte, error) {
|
||||
signedBlock, err := structs.SignedBlindedBeaconBlockBellatrixFromConsensus(block.BlindedBellatrix)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to convert blinded bellatrix beacon block")
|
||||
}
|
||||
return json.Marshal(signedBlock)
|
||||
}
|
||||
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func handleCapellaBlock(block *ethpb.GenericSignedBeaconBlock_Capella) (*blockProcessingResult, error) {
|
||||
var res blockProcessingResult
|
||||
res.consensusVersion = "capella"
|
||||
res.blinded = false
|
||||
|
||||
beaconBlockRoot, err := block.Capella.Block.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to compute block root for capella beacon block")
|
||||
}
|
||||
res.beaconBlockRoot = beaconBlockRoot
|
||||
|
||||
// Marshal SSZ
|
||||
ssz, err := block.Capella.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to serialize capella beacon block")
|
||||
}
|
||||
res.marshalledSSZ = ssz
|
||||
|
||||
// Set up JSON marshalling function for fallback
|
||||
res.marshalJSON = func() ([]byte, error) {
|
||||
signedBlock, err := structs.SignedBeaconBlockCapellaFromConsensus(block.Capella)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to convert capella beacon block")
|
||||
}
|
||||
return json.Marshal(signedBlock)
|
||||
}
|
||||
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func handleBlindedCapellaBlock(block *ethpb.GenericSignedBeaconBlock_BlindedCapella) (*blockProcessingResult, error) {
|
||||
var res blockProcessingResult
|
||||
res.consensusVersion = "capella"
|
||||
res.blinded = true
|
||||
|
||||
beaconBlockRoot, err := block.BlindedCapella.Block.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to compute block root for blinded capella beacon block")
|
||||
}
|
||||
res.beaconBlockRoot = beaconBlockRoot
|
||||
|
||||
// Marshal SSZ
|
||||
ssz, err := block.BlindedCapella.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to serialize blinded capella beacon block")
|
||||
}
|
||||
res.marshalledSSZ = ssz
|
||||
|
||||
// Set up JSON marshalling function for fallback
|
||||
res.marshalJSON = func() ([]byte, error) {
|
||||
signedBlock, err := structs.SignedBlindedBeaconBlockCapellaFromConsensus(block.BlindedCapella)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to convert blinded capella beacon block")
|
||||
}
|
||||
return json.Marshal(signedBlock)
|
||||
}
|
||||
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func handleDenebBlockContents(block *ethpb.GenericSignedBeaconBlock_Deneb) (*blockProcessingResult, error) {
|
||||
var res blockProcessingResult
|
||||
res.consensusVersion = "deneb"
|
||||
res.blinded = false
|
||||
|
||||
beaconBlockRoot, err := block.Deneb.Block.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to compute block root for deneb beacon block")
|
||||
}
|
||||
res.beaconBlockRoot = beaconBlockRoot
|
||||
|
||||
// Marshal SSZ
|
||||
ssz, err := block.Deneb.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to serialize deneb beacon block")
|
||||
}
|
||||
res.marshalledSSZ = ssz
|
||||
|
||||
// Set up JSON marshalling function for fallback
|
||||
res.marshalJSON = func() ([]byte, error) {
|
||||
signedBlock, err := structs.SignedBeaconBlockContentsDenebFromConsensus(block.Deneb)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to convert deneb beacon block contents")
|
||||
}
|
||||
return json.Marshal(signedBlock)
|
||||
}
|
||||
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func handleBlindedDenebBlock(block *ethpb.GenericSignedBeaconBlock_BlindedDeneb) (*blockProcessingResult, error) {
|
||||
var res blockProcessingResult
|
||||
res.consensusVersion = "deneb"
|
||||
res.blinded = true
|
||||
|
||||
beaconBlockRoot, err := block.BlindedDeneb.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to compute block root for deneb blinded beacon block")
|
||||
}
|
||||
res.beaconBlockRoot = beaconBlockRoot
|
||||
|
||||
// Marshal SSZ
|
||||
ssz, err := block.BlindedDeneb.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to serialize blinded deneb beacon block")
|
||||
}
|
||||
res.marshalledSSZ = ssz
|
||||
|
||||
// Set up JSON marshalling function for fallback
|
||||
res.marshalJSON = func() ([]byte, error) {
|
||||
signedBlock, err := structs.SignedBlindedBeaconBlockDenebFromConsensus(block.BlindedDeneb)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to convert deneb blinded beacon block")
|
||||
}
|
||||
return json.Marshal(signedBlock)
|
||||
}
|
||||
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func handleElectraBlockContents(block *ethpb.GenericSignedBeaconBlock_Electra) (*blockProcessingResult, error) {
|
||||
var res blockProcessingResult
|
||||
res.consensusVersion = "electra"
|
||||
res.blinded = false
|
||||
|
||||
beaconBlockRoot, err := block.Electra.Block.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to compute block root for electra beacon block")
|
||||
}
|
||||
res.beaconBlockRoot = beaconBlockRoot
|
||||
|
||||
// Marshal SSZ
|
||||
ssz, err := block.Electra.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to serialize electra beacon block")
|
||||
}
|
||||
res.marshalledSSZ = ssz
|
||||
|
||||
// Set up JSON marshalling function for fallback
|
||||
res.marshalJSON = func() ([]byte, error) {
|
||||
signedBlock, err := structs.SignedBeaconBlockContentsElectraFromConsensus(block.Electra)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to convert electra beacon block contents")
|
||||
}
|
||||
return json.Marshal(signedBlock)
|
||||
}
|
||||
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func handleBlindedElectraBlock(block *ethpb.GenericSignedBeaconBlock_BlindedElectra) (*blockProcessingResult, error) {
|
||||
var res blockProcessingResult
|
||||
res.consensusVersion = "electra"
|
||||
res.blinded = true
|
||||
|
||||
beaconBlockRoot, err := block.BlindedElectra.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to compute block root for electra blinded beacon block")
|
||||
}
|
||||
res.beaconBlockRoot = beaconBlockRoot
|
||||
|
||||
// Marshal SSZ
|
||||
ssz, err := block.BlindedElectra.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to serialize blinded electra beacon block")
|
||||
}
|
||||
res.marshalledSSZ = ssz
|
||||
|
||||
// Set up JSON marshalling function for fallback
|
||||
res.marshalJSON = func() ([]byte, error) {
|
||||
signedBlock, err := structs.SignedBlindedBeaconBlockElectraFromConsensus(block.BlindedElectra)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to convert electra blinded beacon block")
|
||||
}
|
||||
return json.Marshal(signedBlock)
|
||||
}
|
||||
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func handleFuluBlockContents(block *ethpb.GenericSignedBeaconBlock_Fulu) (*blockProcessingResult, error) {
|
||||
var res blockProcessingResult
|
||||
res.consensusVersion = "fulu"
|
||||
res.blinded = false
|
||||
|
||||
beaconBlockRoot, err := block.Fulu.Block.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to compute block root for fulu beacon block")
|
||||
}
|
||||
res.beaconBlockRoot = beaconBlockRoot
|
||||
|
||||
// Marshal SSZ
|
||||
ssz, err := block.Fulu.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to serialize fulu beacon block")
|
||||
}
|
||||
res.marshalledSSZ = ssz
|
||||
|
||||
// Set up JSON marshalling function for fallback
|
||||
res.marshalJSON = func() ([]byte, error) {
|
||||
signedBlock, err := structs.SignedBeaconBlockContentsFuluFromConsensus(block.Fulu)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to convert fulu beacon block contents")
|
||||
}
|
||||
return json.Marshal(signedBlock)
|
||||
}
|
||||
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func handleBlindedFuluBlock(block *ethpb.GenericSignedBeaconBlock_BlindedFulu) (*blockProcessingResult, error) {
|
||||
var res blockProcessingResult
|
||||
res.consensusVersion = "fulu"
|
||||
res.blinded = true
|
||||
|
||||
beaconBlockRoot, err := block.BlindedFulu.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to compute block root for fulu blinded beacon block")
|
||||
}
|
||||
res.beaconBlockRoot = beaconBlockRoot
|
||||
|
||||
// Marshal SSZ
|
||||
ssz, err := block.BlindedFulu.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to serialize blinded fulu beacon block")
|
||||
}
|
||||
res.marshalledSSZ = ssz
|
||||
|
||||
// Set up JSON marshalling function for fallback
|
||||
res.marshalJSON = func() ([]byte, error) {
|
||||
signedBlock, err := structs.SignedBlindedBeaconBlockFuluFromConsensus(block.BlindedFulu)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to convert fulu blinded beacon block")
|
||||
}
|
||||
return json.Marshal(signedBlock)
|
||||
}
|
||||
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
@@ -620,42 +620,6 @@ func TestProposeBeaconBlock_SSZFails_406_FallbackToJSON(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestProposeBeaconBlock_SSZFails_406_JSONFallbackFails(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
ctx := t.Context()
|
||||
handler := mock.NewMockHandler(ctrl)
|
||||
|
||||
handler.EXPECT().PostSSZ(
|
||||
gomock.Any(),
|
||||
"/eth/v2/beacon/blocks",
|
||||
gomock.Any(),
|
||||
gomock.Any(),
|
||||
).Return(
|
||||
nil, nil, &httputil.DefaultJsonError{
|
||||
Code: http.StatusNotAcceptable,
|
||||
Message: "SSZ not supported",
|
||||
},
|
||||
).Times(1)
|
||||
|
||||
handler.EXPECT().Post(
|
||||
gomock.Any(),
|
||||
"/eth/v2/beacon/blocks",
|
||||
gomock.Any(),
|
||||
gomock.Any(),
|
||||
nil,
|
||||
).Return(
|
||||
errors.New("json fallback failed"),
|
||||
).Times(1)
|
||||
|
||||
validatorClient := &beaconApiValidatorClient{handler: handler}
|
||||
_, err := validatorClient.proposeBeaconBlock(ctx, ðpb.GenericSignedBeaconBlock{
|
||||
Block: generateSignedPhase0Block(),
|
||||
})
|
||||
assert.ErrorContains(t, "failed to submit block via JSON fallback", err)
|
||||
}
|
||||
|
||||
func TestProposeBeaconBlock_SSZFails_Non406_NoFallback(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
@@ -712,41 +676,3 @@ func TestProposeBeaconBlock_SSZFails_Non406_NoFallback(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type badHashable struct{}
|
||||
|
||||
func (badHashable) HashTreeRoot() ([32]byte, error) {
|
||||
return [32]byte{}, errors.New("hash root error")
|
||||
}
|
||||
|
||||
type badMarshaler struct{}
|
||||
|
||||
func (badMarshaler) MarshalSSZ() ([]byte, error) {
|
||||
return nil, errors.New("marshal ssz error")
|
||||
}
|
||||
|
||||
type okMarshaler struct{}
|
||||
|
||||
func (okMarshaler) MarshalSSZ() ([]byte, error) {
|
||||
return []byte{1, 2, 3}, nil
|
||||
}
|
||||
|
||||
type okHashable struct{}
|
||||
|
||||
func (okHashable) HashTreeRoot() ([32]byte, error) {
|
||||
return [32]byte{1}, nil
|
||||
}
|
||||
|
||||
func TestBuildBlockResult_HashTreeRootError(t *testing.T) {
|
||||
_, err := buildBlockResult("phase0", false, okMarshaler{}, badHashable{}, func() ([]byte, error) {
|
||||
return []byte(`{}`), nil
|
||||
})
|
||||
assert.ErrorContains(t, "failed to compute block root for phase0 beacon block", err)
|
||||
}
|
||||
|
||||
func TestBuildBlockResult_MarshalSSZError(t *testing.T) {
|
||||
_, err := buildBlockResult("phase0", false, badMarshaler{}, okHashable{}, func() ([]byte, error) {
|
||||
return []byte(`{}`), nil
|
||||
})
|
||||
assert.ErrorContains(t, "failed to serialize phase0 beacon block", err)
|
||||
}
|
||||
|
||||
@@ -9,7 +9,6 @@ go_library(
|
||||
"capella_beacon_block_test_helpers.go",
|
||||
"deneb_beacon_block_test_helpers.go",
|
||||
"electra_beacon_block_test_helpers.go",
|
||||
"fulu_beacon_block_test_helpers.go",
|
||||
"phase0_beacon_block_test_helpers.go",
|
||||
"test_helpers.go",
|
||||
],
|
||||
|
||||
@@ -1,46 +0,0 @@
|
||||
package test_helpers
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v7/api/server/structs"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
func GenerateProtoFuluBeaconBlockContents() *ethpb.BeaconBlockContentsFulu {
|
||||
electra := GenerateProtoElectraBeaconBlockContents()
|
||||
return ðpb.BeaconBlockContentsFulu{
|
||||
Block: electra.Block,
|
||||
KzgProofs: electra.KzgProofs,
|
||||
Blobs: electra.Blobs,
|
||||
}
|
||||
}
|
||||
|
||||
func GenerateProtoBlindedFuluBeaconBlock() *ethpb.BlindedBeaconBlockFulu {
|
||||
electra := GenerateProtoBlindedElectraBeaconBlock()
|
||||
return ðpb.BlindedBeaconBlockFulu{
|
||||
Slot: electra.Slot,
|
||||
ProposerIndex: electra.ProposerIndex,
|
||||
ParentRoot: electra.ParentRoot,
|
||||
StateRoot: electra.StateRoot,
|
||||
Body: electra.Body,
|
||||
}
|
||||
}
|
||||
|
||||
func GenerateJsonFuluBeaconBlockContents() *structs.BeaconBlockContentsFulu {
|
||||
electra := GenerateJsonElectraBeaconBlockContents()
|
||||
return &structs.BeaconBlockContentsFulu{
|
||||
Block: electra.Block,
|
||||
KzgProofs: electra.KzgProofs,
|
||||
Blobs: electra.Blobs,
|
||||
}
|
||||
}
|
||||
|
||||
func GenerateJsonBlindedFuluBeaconBlock() *structs.BlindedBeaconBlockFulu {
|
||||
electra := GenerateJsonBlindedElectraBeaconBlock()
|
||||
return &structs.BlindedBeaconBlockFulu{
|
||||
Slot: electra.Slot,
|
||||
ProposerIndex: electra.ProposerIndex,
|
||||
ParentRoot: electra.ParentRoot,
|
||||
StateRoot: electra.StateRoot,
|
||||
Body: electra.Body,
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user