mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-22 11:48:13 -05:00
Compare commits
1 Commits
poc/option
...
state-desi
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c1edf3fe55 |
@@ -193,7 +193,6 @@ nogo(
|
||||
"//tools/analyzers/featureconfig:go_default_library",
|
||||
"//tools/analyzers/gocognit:go_default_library",
|
||||
"//tools/analyzers/ineffassign:go_default_library",
|
||||
"//tools/analyzers/httpwriter:go_default_library",
|
||||
"//tools/analyzers/interfacechecker:go_default_library",
|
||||
"//tools/analyzers/logcapitalization:go_default_library",
|
||||
"//tools/analyzers/logruswitherror:go_default_library",
|
||||
|
||||
@@ -48,7 +48,6 @@ go_library(
|
||||
"//beacon-chain/core/electra:go_default_library",
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/operation:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
@@ -66,7 +65,6 @@ go_library(
|
||||
"//beacon-chain/light-client:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/blstoexec:go_default_library",
|
||||
"//beacon-chain/operations/execproofs:go_default_library",
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
@@ -148,8 +146,6 @@ go_test(
|
||||
"//beacon-chain/cache/depositsnapshot:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/operation:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
|
||||
@@ -323,17 +323,14 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
|
||||
var ok bool
|
||||
e := slots.ToEpoch(slot)
|
||||
stateEpoch := slots.ToEpoch(st.Slot())
|
||||
fuluAndNextEpoch := st.Version() >= version.Fulu && e == stateEpoch+1
|
||||
if e == stateEpoch || fuluAndNextEpoch {
|
||||
if e == stateEpoch {
|
||||
val, ok = s.trackedProposer(st, slot)
|
||||
if !ok {
|
||||
return emptyAttri
|
||||
}
|
||||
}
|
||||
st = st.Copy()
|
||||
if slot > st.Slot() {
|
||||
// At this point either we know we are proposing on a future slot or we need to still compute the
|
||||
// right proposer index pre-Fulu, either way we need to copy the state to process it.
|
||||
st = st.Copy()
|
||||
var err error
|
||||
st, err = transition.ProcessSlotsUsingNextSlotCache(ctx, st, headRoot, slot)
|
||||
if err != nil {
|
||||
@@ -341,7 +338,7 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
|
||||
return emptyAttri
|
||||
}
|
||||
}
|
||||
if e > stateEpoch && !fuluAndNextEpoch {
|
||||
if e > stateEpoch {
|
||||
emptyAttri := payloadattribute.EmptyWithVersion(st.Version())
|
||||
val, ok = s.trackedProposer(st, slot)
|
||||
if !ok {
|
||||
|
||||
@@ -1053,3 +1053,40 @@ func TestKZGCommitmentToVersionedHashes(t *testing.T) {
|
||||
require.Equal(t, vhs[0].String(), vh0)
|
||||
require.Equal(t, vhs[1].String(), vh1)
|
||||
}
|
||||
|
||||
func TestComputePayloadAttribute(t *testing.T) {
|
||||
service, tr := minimalTestService(t, WithPayloadIDCache(cache.NewPayloadIDCache()))
|
||||
ctx := tr.ctx
|
||||
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
|
||||
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, Index: 0})
|
||||
// Cache hit, advance state, no fee recipient
|
||||
slot := primitives.Slot(1)
|
||||
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
signed, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(signed, [32]byte{'a'})
|
||||
require.NoError(t, err)
|
||||
cfg := &postBlockProcessConfig{
|
||||
ctx: ctx,
|
||||
roblock: roblock,
|
||||
}
|
||||
fcu := &fcuConfig{
|
||||
headState: st,
|
||||
proposingSlot: slot,
|
||||
headRoot: [32]byte{},
|
||||
}
|
||||
require.NoError(t, service.computePayloadAttributes(cfg, fcu))
|
||||
require.Equal(t, false, fcu.attributes.IsEmpty())
|
||||
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(fcu.attributes.SuggestedFeeRecipient()).String())
|
||||
|
||||
// Cache hit, advance state, has fee recipient
|
||||
suggestedAddr := common.HexToAddress("123")
|
||||
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, FeeRecipient: primitives.ExecutionAddress(suggestedAddr), Index: 0})
|
||||
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
|
||||
require.NoError(t, service.computePayloadAttributes(cfg, fcu))
|
||||
require.Equal(t, false, fcu.attributes.IsEmpty())
|
||||
require.Equal(t, suggestedAddr, common.BytesToAddress(fcu.attributes.SuggestedFeeRecipient()))
|
||||
}
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
payloadattribute "github.com/OffchainLabs/prysm/v7/consensus-types/payload-attribute"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -54,53 +53,58 @@ type fcuConfig struct {
|
||||
}
|
||||
|
||||
// sendFCU handles the logic to notify the engine of a forckhoice update
|
||||
// when processing an incoming block during regular sync. It
|
||||
// always updates the shuffling caches and handles epoch transitions .
|
||||
func (s *Service) sendFCU(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) {
|
||||
if cfg.postState.Version() < version.Fulu {
|
||||
// update the caches to compute the right proposer index
|
||||
// this function is called under a forkchoice lock which we need to release.
|
||||
s.ForkChoicer().Unlock()
|
||||
s.updateCachesPostBlockProcessing(cfg)
|
||||
s.ForkChoicer().Lock()
|
||||
// for the first time when processing an incoming block during regular sync. It
|
||||
// always updates the shuffling caches and handles epoch transitions when the
|
||||
// incoming block is late, preparing payload attributes in this case while it
|
||||
// only sends a message with empty attributes for early blocks.
|
||||
func (s *Service) sendFCU(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error {
|
||||
if !s.isNewHead(cfg.headRoot) {
|
||||
return nil
|
||||
}
|
||||
if err := s.getFCUArgs(cfg, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("Could not get forkchoice update argument")
|
||||
return
|
||||
}
|
||||
// If head has not been updated and attributes are nil, we can skip the FCU.
|
||||
if !s.isNewHead(cfg.headRoot) && (fcuArgs.attributes == nil || fcuArgs.attributes.IsEmpty()) {
|
||||
return
|
||||
}
|
||||
// If we are proposing and we aim to reorg the block, we have already sent FCU with attributes on lateBlockTasks
|
||||
if fcuArgs.attributes != nil && !fcuArgs.attributes.IsEmpty() && s.shouldOverrideFCU(cfg.headRoot, s.CurrentSlot()+1) {
|
||||
return nil
|
||||
}
|
||||
return s.forkchoiceUpdateWithExecution(cfg.ctx, fcuArgs)
|
||||
}
|
||||
|
||||
// sendFCUWithAttributes computes the payload attributes and sends an FCU message
|
||||
// to the engine if needed
|
||||
func (s *Service) sendFCUWithAttributes(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) {
|
||||
slotCtx, cancel := context.WithTimeout(context.Background(), slotDeadline)
|
||||
defer cancel()
|
||||
cfg.ctx = slotCtx
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
if err := s.computePayloadAttributes(cfg, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("Could not compute payload attributes")
|
||||
return
|
||||
}
|
||||
if s.inRegularSync() {
|
||||
go s.forkchoiceUpdateWithExecution(cfg.ctx, fcuArgs)
|
||||
if fcuArgs.attributes.IsEmpty() {
|
||||
return
|
||||
}
|
||||
|
||||
if s.isNewHead(fcuArgs.headRoot) {
|
||||
if err := s.saveHead(cfg.ctx, fcuArgs.headRoot, fcuArgs.headBlock, fcuArgs.headState); err != nil {
|
||||
log.WithError(err).Error("Could not save head")
|
||||
}
|
||||
s.pruneAttsFromPool(s.ctx, fcuArgs.headState, fcuArgs.headBlock)
|
||||
if _, err := s.notifyForkchoiceUpdate(cfg.ctx, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("Could not update forkchoice with payload attributes for proposal")
|
||||
}
|
||||
}
|
||||
|
||||
// fockchoiceUpdateWithExecution is a wrapper around notifyForkchoiceUpdate. It gets a forkchoice lock and calls the engine.
|
||||
// The caller of this function should NOT have a lock in forkchoice store.
|
||||
func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, args *fcuConfig) {
|
||||
// fockchoiceUpdateWithExecution is a wrapper around notifyForkchoiceUpdate. It decides whether a new call to FCU should be made.
|
||||
func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, args *fcuConfig) error {
|
||||
_, span := trace.StartSpan(ctx, "beacon-chain.blockchain.forkchoiceUpdateWithExecution")
|
||||
defer span.End()
|
||||
// Note: Use the service context here to avoid the parent context being ended during a forkchoice update.
|
||||
ctx = trace.NewContext(s.ctx, span)
|
||||
s.ForkChoicer().Lock()
|
||||
defer s.ForkChoicer().Unlock()
|
||||
_, err := s.notifyForkchoiceUpdate(ctx, args)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not notify forkchoice update")
|
||||
return errors.Wrap(err, "could not notify forkchoice update")
|
||||
}
|
||||
|
||||
if err := s.saveHead(ctx, args.headRoot, args.headBlock, args.headState); err != nil {
|
||||
log.WithError(err).Error("Could not save head")
|
||||
}
|
||||
|
||||
// Only need to prune attestations from pool if the head has changed.
|
||||
s.pruneAttsFromPool(s.ctx, args.headState, args.headBlock)
|
||||
return nil
|
||||
}
|
||||
|
||||
// shouldOverrideFCU checks whether the incoming block is still subject to being
|
||||
|
||||
@@ -97,7 +97,7 @@ func TestService_forkchoiceUpdateWithExecution_exceptionalCases(t *testing.T) {
|
||||
headBlock: wsb,
|
||||
proposingSlot: service.CurrentSlot() + 1,
|
||||
}
|
||||
service.forkchoiceUpdateWithExecution(ctx, args)
|
||||
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, args))
|
||||
|
||||
payloadID, has := service.cfg.PayloadIDCache.PayloadID(2, [32]byte{2})
|
||||
require.Equal(t, true, has)
|
||||
@@ -151,7 +151,7 @@ func TestService_forkchoiceUpdateWithExecution_SameHeadRootNewProposer(t *testin
|
||||
headRoot: r,
|
||||
proposingSlot: service.CurrentSlot() + 1,
|
||||
}
|
||||
service.forkchoiceUpdateWithExecution(ctx, args)
|
||||
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, args))
|
||||
}
|
||||
|
||||
func TestShouldOverrideFCU(t *testing.T) {
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/async/event"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/operation"
|
||||
statefeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/filesystem"
|
||||
@@ -14,7 +13,6 @@ import (
|
||||
lightclient "github.com/OffchainLabs/prysm/v7/beacon-chain/light-client"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/attestations"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/blstoexec"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/execproofs"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/slashings"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
|
||||
@@ -138,14 +136,6 @@ func WithBLSToExecPool(p blstoexec.PoolManager) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithExecProofsPool to keep track of execution proofs.
|
||||
func WithExecProofsPool(p execproofs.PoolManager) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.ExecProofsPool = p
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithP2PBroadcaster to broadcast messages after appropriate processing.
|
||||
func WithP2PBroadcaster(p p2p.Accessor) Option {
|
||||
return func(s *Service) error {
|
||||
@@ -276,10 +266,3 @@ func WithStartWaitingDataColumnSidecars(c chan bool) Option {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithOperationNotifier(operationNotifier operation.Notifier) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.OperationNotifier = operationNotifier
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
// The caller of this function must have a lock on forkchoice.
|
||||
func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) state.ReadOnlyBeaconState {
|
||||
headEpoch := slots.ToEpoch(s.HeadSlot())
|
||||
if c.Epoch+1 < headEpoch || c.Epoch == 0 {
|
||||
if c.Epoch < headEpoch || c.Epoch == 0 {
|
||||
return nil
|
||||
}
|
||||
// Only use head state if the head state is compatible with the target checkpoint.
|
||||
@@ -30,13 +30,11 @@ func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) st
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
// headEpoch - 1 equals c.Epoch if c is from the previous epoch and equals c.Epoch - 1 if c is from the current epoch.
|
||||
// We don't use the smaller c.Epoch - 1 because forkchoice would not have the data to answer that.
|
||||
headDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(headRoot), headEpoch-1)
|
||||
headDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(headRoot), c.Epoch-1)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
targetDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(c.Root), headEpoch-1)
|
||||
targetDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(c.Root), c.Epoch-1)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
@@ -45,7 +43,7 @@ func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) st
|
||||
}
|
||||
|
||||
// If the head state alone is enough, we can return it directly read only.
|
||||
if c.Epoch <= headEpoch {
|
||||
if c.Epoch == headEpoch {
|
||||
st, err := s.HeadStateReadOnly(ctx)
|
||||
if err != nil {
|
||||
return nil
|
||||
|
||||
@@ -170,13 +170,12 @@ func TestService_GetRecentPreState(t *testing.T) {
|
||||
err = s.SetFinalizedCheckpoint(cp0)
|
||||
require.NoError(t, err)
|
||||
|
||||
st, blk, err := prepareForkchoiceState(ctx, 31, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
|
||||
st, root, err := prepareForkchoiceState(ctx, 31, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
|
||||
service.head = &head{
|
||||
root: [32]byte(ckRoot),
|
||||
state: s,
|
||||
block: blk,
|
||||
slot: 31,
|
||||
}
|
||||
require.NotNil(t, service.getRecentPreState(ctx, ðpb.Checkpoint{Epoch: 1, Root: ckRoot}))
|
||||
@@ -198,13 +197,12 @@ func TestService_GetRecentPreState_Old_Checkpoint(t *testing.T) {
|
||||
err = s.SetFinalizedCheckpoint(cp0)
|
||||
require.NoError(t, err)
|
||||
|
||||
st, blk, err := prepareForkchoiceState(ctx, 33, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
|
||||
st, root, err := prepareForkchoiceState(ctx, 33, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
|
||||
service.head = &head{
|
||||
root: [32]byte(ckRoot),
|
||||
state: s,
|
||||
block: blk,
|
||||
slot: 33,
|
||||
}
|
||||
require.IsNil(t, service.getRecentPreState(ctx, ðpb.Checkpoint{}))
|
||||
@@ -229,7 +227,6 @@ func TestService_GetRecentPreState_Same_DependentRoots(t *testing.T) {
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
st, blk, err = prepareForkchoiceState(ctx, 64, [32]byte{'T'}, blk.Root(), [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
headBlock := blk
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
st, blk, err = prepareForkchoiceState(ctx, 33, [32]byte{'U'}, [32]byte(ckRoot), [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
@@ -238,9 +235,8 @@ func TestService_GetRecentPreState_Same_DependentRoots(t *testing.T) {
|
||||
|
||||
service.head = &head{
|
||||
root: [32]byte{'T'},
|
||||
block: headBlock,
|
||||
slot: 64,
|
||||
state: s,
|
||||
slot: 64,
|
||||
}
|
||||
require.NotNil(t, service.getRecentPreState(ctx, ðpb.Checkpoint{Epoch: 2, Root: cpRoot[:]}))
|
||||
}
|
||||
@@ -267,7 +263,6 @@ func TestService_GetRecentPreState_Different_DependentRoots(t *testing.T) {
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
st, blk, err = prepareForkchoiceState(ctx, 64, [32]byte{'U'}, blk.Root(), [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
headBlock := blk
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
st, blk, err = prepareForkchoiceState(ctx, 33, [32]byte{'V'}, [32]byte(ckRoot), [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
@@ -275,8 +270,7 @@ func TestService_GetRecentPreState_Different_DependentRoots(t *testing.T) {
|
||||
cpRoot := blk.Root()
|
||||
|
||||
service.head = &head{
|
||||
root: [32]byte{'U'},
|
||||
block: headBlock,
|
||||
root: [32]byte{'T'},
|
||||
state: s,
|
||||
slot: 64,
|
||||
}
|
||||
@@ -293,13 +287,12 @@ func TestService_GetRecentPreState_Different(t *testing.T) {
|
||||
err = s.SetFinalizedCheckpoint(cp0)
|
||||
require.NoError(t, err)
|
||||
|
||||
st, blk, err := prepareForkchoiceState(ctx, 33, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
|
||||
st, root, err := prepareForkchoiceState(ctx, 33, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
|
||||
service.head = &head{
|
||||
root: [32]byte(ckRoot),
|
||||
state: s,
|
||||
block: blk,
|
||||
slot: 33,
|
||||
}
|
||||
require.IsNil(t, service.getRecentPreState(ctx, ðpb.Checkpoint{}))
|
||||
|
||||
@@ -7,8 +7,6 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/go-bitfield"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/operation"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
coreTime "github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
|
||||
@@ -17,7 +15,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/filesystem"
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v7/beacon-chain/forkchoice/types"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
@@ -69,6 +66,9 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
|
||||
startTime := time.Now()
|
||||
fcuArgs := &fcuConfig{}
|
||||
|
||||
if s.inRegularSync() {
|
||||
defer s.handleSecondFCUCall(cfg, fcuArgs)
|
||||
}
|
||||
if features.Get().EnableLightClient && slots.ToEpoch(s.CurrentSlot()) >= params.BeaconConfig().AltairForkEpoch {
|
||||
defer s.processLightClientUpdates(cfg)
|
||||
}
|
||||
@@ -105,16 +105,12 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
|
||||
s.logNonCanonicalBlockReceived(cfg.roblock.Root(), cfg.headRoot)
|
||||
return nil
|
||||
}
|
||||
s.sendFCU(cfg, fcuArgs)
|
||||
|
||||
// Pre-Fulu the caches are updated when computing the payload attributes
|
||||
if cfg.postState.Version() >= version.Fulu {
|
||||
go func() {
|
||||
ctx, cancel := context.WithTimeout(s.ctx, slotDeadline)
|
||||
defer cancel()
|
||||
cfg.ctx = ctx
|
||||
s.updateCachesPostBlockProcessing(cfg)
|
||||
}()
|
||||
if err := s.getFCUArgs(cfg, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("Could not get forkchoice update argument")
|
||||
return nil
|
||||
}
|
||||
if err := s.sendFCU(cfg, fcuArgs); err != nil {
|
||||
return errors.Wrap(err, "could not send FCU to engine")
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -299,6 +295,14 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
return errors.Wrap(err, "could not set optimistic block to valid")
|
||||
}
|
||||
}
|
||||
arg := &fcuConfig{
|
||||
headState: preState,
|
||||
headRoot: lastBR,
|
||||
headBlock: lastB,
|
||||
}
|
||||
if _, err := s.notifyForkchoiceUpdate(ctx, arg); err != nil {
|
||||
return err
|
||||
}
|
||||
return s.saveHeadNoDB(ctx, lastB, lastBR, preState, !isValidPayload)
|
||||
}
|
||||
|
||||
@@ -326,7 +330,6 @@ func (s *Service) areSidecarsAvailable(ctx context.Context, avs das.Availability
|
||||
return nil
|
||||
}
|
||||
|
||||
// the caller of this function must not hold a lock in forkchoice store.
|
||||
func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.BeaconState) error {
|
||||
e := coreTime.CurrentEpoch(st)
|
||||
if err := helpers.UpdateCommitteeCache(ctx, st, e); err != nil {
|
||||
@@ -356,9 +359,7 @@ func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.Beacon
|
||||
if e > 0 {
|
||||
e = e - 1
|
||||
}
|
||||
s.ForkChoicer().RLock()
|
||||
target, err := s.cfg.ForkChoiceStore.TargetRootForEpoch(r, e)
|
||||
s.ForkChoicer().RUnlock()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not update proposer index state-root map")
|
||||
return nil
|
||||
@@ -371,7 +372,7 @@ func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.Beacon
|
||||
}
|
||||
|
||||
// Epoch boundary tasks: it copies the headState and updates the epoch boundary
|
||||
// caches. The caller of this function must not hold a lock in forkchoice store.
|
||||
// caches.
|
||||
func (s *Service) handleEpochBoundary(ctx context.Context, slot primitives.Slot, headState state.BeaconState, blockRoot []byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.handleEpochBoundary")
|
||||
defer span.End()
|
||||
@@ -665,17 +666,10 @@ func (s *Service) isDataAvailable(
|
||||
return errors.New("invalid nil beacon block")
|
||||
}
|
||||
|
||||
root, blockVersion := roBlock.Root(), roBlock.Version()
|
||||
root := roBlock.Root()
|
||||
blockVersion := block.Version()
|
||||
if blockVersion >= version.Fulu {
|
||||
if err := s.areExecutionProofsAvailable(ctx, root); err != nil {
|
||||
return fmt.Errorf("are execution proofs available: %w", err)
|
||||
}
|
||||
|
||||
if err := s.areDataColumnsAvailable(ctx, root, block); err != nil {
|
||||
return fmt.Errorf("are data columns available: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
return s.areDataColumnsAvailable(ctx, root, block)
|
||||
}
|
||||
|
||||
if blockVersion >= version.Deneb {
|
||||
@@ -685,67 +679,6 @@ func (s *Service) isDataAvailable(
|
||||
return nil
|
||||
}
|
||||
|
||||
// areExecutionProofsAvailable blocks until we have enough execution proofs to import the block,
|
||||
// or an error or context cancellation occurs.
|
||||
// This check is only performed for lightweight verifier nodes that need zkVM proofs
|
||||
// to validate block execution (nodes without execution layer + proof generation capability).
|
||||
// A nil result means that the data availability check is successful.
|
||||
func (s *Service) areExecutionProofsAvailable(ctx context.Context, blockRoot [fieldparams.RootLength]byte) error {
|
||||
// Return early if zkVM features are disabled (no need to check for execution proofs),
|
||||
// or if the generation proof is enabled (we will generate proofs ourselves).
|
||||
if !features.Get().EnableZkvm || len(flags.Get().ProofGenerationTypes) > 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
requiredProofCount := params.BeaconConfig().MinProofsRequired
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", blockRoot),
|
||||
"requiredProofCount": requiredProofCount,
|
||||
})
|
||||
|
||||
// Subscribe to execution proof received events.
|
||||
eventsChan := make(chan *feed.Event, 1)
|
||||
subscription := s.cfg.OperationNotifier.OperationFeed().Subscribe(eventsChan)
|
||||
defer subscription.Unsubscribe()
|
||||
|
||||
// Return early if we already have enough proofs.
|
||||
if actualProofCount := uint64(s.cfg.ExecProofsPool.Count(blockRoot)); actualProofCount >= requiredProofCount {
|
||||
log.WithField("actualProofCount", actualProofCount).Debug("Already have enough execution proofs")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Some proofs are missing; wait for them.
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case event := <-eventsChan:
|
||||
if event.Type != operation.ExecutionProofReceived {
|
||||
continue
|
||||
}
|
||||
|
||||
proofWrapper, ok := event.Data.(*operation.ExecutionProofReceivedData)
|
||||
if !ok {
|
||||
log.Error("Could not cast event data to ExecutionProofReceivedData")
|
||||
continue
|
||||
}
|
||||
|
||||
proof := proofWrapper.ExecutionProof
|
||||
|
||||
// Skip if the proof is for a different block.
|
||||
if bytesutil.ToBytes32(proof.BlockRoot) != blockRoot {
|
||||
continue
|
||||
}
|
||||
|
||||
// Return if we have enough proofs.
|
||||
if actualProofCount := uint64(s.cfg.ExecProofsPool.Count(blockRoot)); actualProofCount >= requiredProofCount {
|
||||
log.WithField("actualProofCount", actualProofCount).Debug("Got enough execution proofs")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// areDataColumnsAvailable blocks until all data columns committed to in the block are available,
|
||||
// or an error or context cancellation occurs. A nil result means that the data availability check is successful.
|
||||
func (s *Service) areDataColumnsAvailable(
|
||||
@@ -979,6 +912,8 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
if currentSlot == s.HeadSlot() {
|
||||
return
|
||||
}
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
// return early if we are in init sync
|
||||
if !s.inRegularSync() {
|
||||
return
|
||||
@@ -991,32 +926,14 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
if lastState == nil {
|
||||
lastRoot, lastState = headRoot[:], headState
|
||||
}
|
||||
// Before Fulu we need to process the next slot to find out if we are proposing.
|
||||
if lastState.Version() < version.Fulu {
|
||||
// Copy all the field tries in our cached state in the event of late
|
||||
// blocks.
|
||||
lastState.CopyAllTries()
|
||||
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
|
||||
log.WithError(err).Debug("Could not update next slot state cache")
|
||||
}
|
||||
if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil {
|
||||
log.WithError(err).Error("Could not update epoch boundary caches")
|
||||
}
|
||||
} else {
|
||||
// After Fulu, we can update the caches asynchronously after sending FCU to the engine
|
||||
defer func() {
|
||||
go func() {
|
||||
ctx, cancel := context.WithTimeout(s.ctx, slotDeadline)
|
||||
defer cancel()
|
||||
lastState.CopyAllTries()
|
||||
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
|
||||
log.WithError(err).Debug("Could not update next slot state cache")
|
||||
}
|
||||
if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil {
|
||||
log.WithError(err).Error("Could not update epoch boundary caches")
|
||||
}
|
||||
}()
|
||||
}()
|
||||
// Copy all the field tries in our cached state in the event of late
|
||||
// blocks.
|
||||
lastState.CopyAllTries()
|
||||
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
|
||||
log.WithError(err).Debug("Could not update next slot state cache")
|
||||
}
|
||||
if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil {
|
||||
log.WithError(err).Error("Could not update epoch boundary caches")
|
||||
}
|
||||
// return early if we already started building a block for the current
|
||||
// head root
|
||||
@@ -1046,8 +963,6 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
headBlock: headBlock,
|
||||
attributes: attribute,
|
||||
}
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
_, err = s.notifyForkchoiceUpdate(ctx, fcuArgs)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("could not perform late block tasks: failed to update forkchoice with engine")
|
||||
|
||||
@@ -42,8 +42,14 @@ func (s *Service) getFCUArgs(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) er
|
||||
if err := s.getFCUArgsEarlyBlock(cfg, fcuArgs); err != nil {
|
||||
return err
|
||||
}
|
||||
fcuArgs.attributes = s.getPayloadAttribute(cfg.ctx, fcuArgs.headState, fcuArgs.proposingSlot, cfg.headRoot[:])
|
||||
return nil
|
||||
if !s.inRegularSync() {
|
||||
return nil
|
||||
}
|
||||
slot := cfg.roblock.Block().Slot()
|
||||
if slots.WithinVotingWindow(s.genesisTime, slot) {
|
||||
return nil
|
||||
}
|
||||
return s.computePayloadAttributes(cfg, fcuArgs)
|
||||
}
|
||||
|
||||
func (s *Service) getFCUArgsEarlyBlock(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error {
|
||||
@@ -167,19 +173,26 @@ func (s *Service) processLightClientUpdates(cfg *postBlockProcessConfig) {
|
||||
|
||||
// updateCachesPostBlockProcessing updates the next slot cache and handles the epoch
|
||||
// boundary in order to compute the right proposer indices after processing
|
||||
// state transition. The caller of this function must not hold a lock in forkchoice store.
|
||||
func (s *Service) updateCachesPostBlockProcessing(cfg *postBlockProcessConfig) {
|
||||
// state transition. This function is called on late blocks while still locked,
|
||||
// before sending FCU to the engine.
|
||||
func (s *Service) updateCachesPostBlockProcessing(cfg *postBlockProcessConfig) error {
|
||||
slot := cfg.postState.Slot()
|
||||
root := cfg.roblock.Root()
|
||||
if err := transition.UpdateNextSlotCache(cfg.ctx, root[:], cfg.postState); err != nil {
|
||||
log.WithError(err).Error("Could not update next slot state cache")
|
||||
return
|
||||
return errors.Wrap(err, "could not update next slot state cache")
|
||||
}
|
||||
if !slots.IsEpochEnd(slot) {
|
||||
return
|
||||
return nil
|
||||
}
|
||||
if err := s.handleEpochBoundary(cfg.ctx, slot, cfg.postState, root[:]); err != nil {
|
||||
log.WithError(err).Error("Could not handle epoch boundary")
|
||||
return s.handleEpochBoundary(cfg.ctx, slot, cfg.postState, root[:])
|
||||
}
|
||||
|
||||
// handleSecondFCUCall handles a second call to FCU when syncing a new block.
|
||||
// This is useful when proposing in the next block and we want to defer the
|
||||
// computation of the next slot shuffling.
|
||||
func (s *Service) handleSecondFCUCall(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) {
|
||||
if (fcuArgs.attributes == nil || fcuArgs.attributes.IsEmpty()) && cfg.headRoot == cfg.roblock.Root() {
|
||||
go s.sendFCUWithAttributes(cfg, fcuArgs)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -189,6 +202,20 @@ func reportProcessingTime(startTime time.Time) {
|
||||
onBlockProcessingTime.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
}
|
||||
|
||||
// computePayloadAttributes modifies the passed FCU arguments to
|
||||
// contain the right payload attributes with the tracked proposer. It gets
|
||||
// called on blocks that arrive after the attestation voting window, or in a
|
||||
// background routine after syncing early blocks.
|
||||
func (s *Service) computePayloadAttributes(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error {
|
||||
if cfg.roblock.Root() == cfg.headRoot {
|
||||
if err := s.updateCachesPostBlockProcessing(cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
fcuArgs.attributes = s.getPayloadAttribute(cfg.ctx, fcuArgs.headState, fcuArgs.proposingSlot, cfg.headRoot[:])
|
||||
return nil
|
||||
}
|
||||
|
||||
// getBlockPreState returns the pre state of an incoming block. It uses the parent root of the block
|
||||
// to retrieve the state in DB. It verifies the pre state's validity and the incoming block
|
||||
// is in the correct time window.
|
||||
|
||||
@@ -13,8 +13,6 @@ import (
|
||||
mock "github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/operation"
|
||||
statefeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
@@ -740,9 +738,7 @@ func TestOnBlock_CanFinalize_WithOnTick(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
|
||||
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
|
||||
require.NoError(t, err)
|
||||
@@ -792,9 +788,7 @@ func TestOnBlock_CanFinalize(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
|
||||
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
|
||||
require.NoError(t, err)
|
||||
@@ -822,9 +816,25 @@ func TestOnBlock_NilBlock(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
signed := &consensusblocks.SignedBeaconBlock{}
|
||||
roblock := consensusblocks.ROBlock{ReadOnlySignedBeaconBlock: signed}
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err := service.postBlockProcess(&postBlockProcessConfig{tr.ctx, roblock, [32]byte{}, nil, true})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
}
|
||||
|
||||
func TestOnBlock_InvalidSignature(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
|
||||
gs, keys := util.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
|
||||
blk, err := util.GenerateFullBlock(gs, keys, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
blk.Signature = []byte{'a'} // Mutate the signature.
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
_, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
}
|
||||
|
||||
@@ -856,9 +866,7 @@ func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
testState, err = service.cfg.StateGen.StateByRoot(ctx, r)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -1331,9 +1339,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
lock.Lock()
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb1, r1)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
@@ -1345,9 +1351,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
lock.Lock()
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb2, r2)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
@@ -1359,9 +1363,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
lock.Lock()
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb3, r3)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
@@ -1373,9 +1375,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
lock.Lock()
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb4, r4)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
@@ -1400,6 +1400,197 @@ func Test_verifyBlkFinalizedSlot_invalidBlock(t *testing.T) {
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
}
|
||||
|
||||
// See the description in #10777 and #10782 for the full setup
|
||||
// We sync optimistically a chain of blocks. Block 17 is the last block in Epoch
|
||||
// 2. Block 18 justifies block 12 (the first in Epoch 2) and Block 19 returns
|
||||
// INVALID from FCU, with LVH block 17. No head is viable. We check
|
||||
// that the node is optimistic and that we can actually import a block on top of
|
||||
// 17 and recover.
|
||||
func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.SlotsPerEpoch = 6
|
||||
config.AltairForkEpoch = 1
|
||||
config.BellatrixForkEpoch = 2
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
mockEngine := &mockExecution.EngineClient{ErrNewPayload: execution.ErrAcceptedSyncingPayloadStatus, ErrForkchoiceUpdated: execution.ErrAcceptedSyncingPayloadStatus}
|
||||
service, tr := minimalTestService(t, WithExecutionEngineCaller(mockEngine))
|
||||
ctx := tr.ctx
|
||||
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
stateRoot, err := st.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
|
||||
require.NoError(t, service.saveGenesisData(ctx, st))
|
||||
|
||||
genesis := blocks.NewGenesisBlock(stateRoot[:])
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb), "Could not save genesis block")
|
||||
|
||||
parentRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
for i := 1; i < 6; i++ {
|
||||
driftGenesisTime(service, primitives.Slot(i), 0)
|
||||
st, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
|
||||
require.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
driftGenesisTime(service, primitives.Slot(i), 0)
|
||||
st, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlockAltair(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
|
||||
require.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
for i := 12; i < 18; i++ {
|
||||
driftGenesisTime(service, primitives.Slot(i), 0)
|
||||
st, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlockBellatrix(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
|
||||
require.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
// Check that we haven't justified the second epoch yet
|
||||
jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(0), jc.Epoch)
|
||||
|
||||
// import a block that justifies the second epoch
|
||||
driftGenesisTime(service, 18, 0)
|
||||
validHeadState, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlockBellatrix(validHeadState, keys, util.DefaultBlockGenConfig(), 18)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
firstInvalidRoot, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, firstInvalidRoot, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, firstInvalidRoot)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
jc = service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(2), jc.Epoch)
|
||||
|
||||
sjc := validHeadState.CurrentJustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(0), sjc.Epoch)
|
||||
lvh := b.Block.Body.ExecutionPayload.ParentHash
|
||||
// check our head
|
||||
require.Equal(t, firstInvalidRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
|
||||
// import another block to find out that it was invalid
|
||||
mockEngine = &mockExecution.EngineClient{ErrNewPayload: execution.ErrAcceptedSyncingPayloadStatus, ErrForkchoiceUpdated: execution.ErrInvalidPayloadStatus, ForkChoiceUpdatedResp: lvh}
|
||||
service.cfg.ExecutionEngineCaller = mockEngine
|
||||
driftGenesisTime(service, 19, 0)
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err = util.GenerateFullBlockBellatrix(st, keys, util.DefaultBlockGenConfig(), 19)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err = service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
|
||||
// Check that forkchoice's head is the last invalid block imported. The
|
||||
// store's headroot is the previous head (since the invalid block did
|
||||
// not finish importing) one and that the node is optimistic
|
||||
require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
headRoot, err := service.HeadRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, firstInvalidRoot, bytesutil.ToBytes32(headRoot))
|
||||
optimistic, err := service.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, optimistic)
|
||||
|
||||
// import another block based on the last valid head state
|
||||
mockEngine = &mockExecution.EngineClient{}
|
||||
service.cfg.ExecutionEngineCaller = mockEngine
|
||||
driftGenesisTime(service, 20, 0)
|
||||
b, err = util.GenerateFullBlockBellatrix(validHeadState, keys, &util.BlockGenConfig{}, 20)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err = b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
preState, err = service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
|
||||
require.NoError(t, err)
|
||||
// Check the newly imported block is head, it justified the right
|
||||
// checkpoint and the node is no longer optimistic
|
||||
require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
sjc = service.CurrentJustifiedCheckpt()
|
||||
require.Equal(t, jc.Epoch, sjc.Epoch)
|
||||
require.Equal(t, jc.Root, bytesutil.ToBytes32(sjc.Root))
|
||||
optimistic, err = service.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, optimistic)
|
||||
}
|
||||
|
||||
// See the description in #10777 and #10782 for the full setup
|
||||
// We sync optimistically a chain of blocks. Block 17 is the last block in Epoch
|
||||
// 2. Block 18 justifies block 12 (the first in Epoch 2) and Block 19 returns
|
||||
@@ -1451,9 +1642,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
@@ -1473,9 +1662,8 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
for i := 12; i < 18; i++ {
|
||||
@@ -1496,9 +1684,8 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
// Check that we haven't justified the second epoch yet
|
||||
jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
@@ -1521,9 +1708,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, firstInvalidRoot, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, firstInvalidRoot)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, err)
|
||||
jc = service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(2), jc.Epoch)
|
||||
@@ -1533,10 +1718,6 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
lvh := b.Block.Body.ExecutionPayload.ParentHash
|
||||
// check our head
|
||||
require.Equal(t, firstInvalidRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
isBlock18OptimisticAfterImport, err := service.IsOptimisticForRoot(ctx, firstInvalidRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, isBlock18OptimisticAfterImport)
|
||||
time.Sleep(20 * time.Millisecond) // wait for async forkchoice update to be processed
|
||||
|
||||
// import another block to find out that it was invalid
|
||||
mockEngine = &mockExecution.EngineClient{ErrNewPayload: execution.ErrInvalidPayloadStatus, NewPayloadResp: lvh}
|
||||
@@ -1587,9 +1768,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, err)
|
||||
// Check the newly imported block is head, it justified the right
|
||||
// checkpoint and the node is no longer optimistic
|
||||
@@ -1656,9 +1835,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
@@ -1679,9 +1856,8 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// import the merge block
|
||||
@@ -1701,9 +1877,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, lastValidRoot, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, lastValidRoot)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, err)
|
||||
// save the post state and the payload Hash of this block since it will
|
||||
// be the LVH
|
||||
@@ -1732,9 +1906,8 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, invalidRoots[i-13], wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, invalidRoots[i-13])
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
// Check that we have justified the second epoch
|
||||
jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
@@ -1802,9 +1975,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
// Check that the head is still INVALID and the node is still optimistic
|
||||
require.Equal(t, invalidHeadRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
optimistic, err = service.IsOptimistic(ctx)
|
||||
@@ -1829,9 +2000,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, err)
|
||||
st, err = service.cfg.StateGen.StateByRoot(ctx, root)
|
||||
require.NoError(t, err)
|
||||
@@ -1859,9 +2028,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
sjc = service.CurrentJustifiedCheckpt()
|
||||
@@ -1905,6 +2072,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveGenesisBlockRoot(ctx, genesisRoot), "Could not save genesis state")
|
||||
|
||||
for i := 1; i < 6; i++ {
|
||||
t.Log(i)
|
||||
driftGenesisTime(service, primitives.Slot(i), 0)
|
||||
st, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -1921,9 +2089,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
@@ -1943,9 +2109,8 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// import the merge block
|
||||
@@ -1965,9 +2130,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, lastValidRoot, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, lastValidRoot)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, err)
|
||||
// save the post state and the payload Hash of this block since it will
|
||||
// be the LVH
|
||||
@@ -1998,9 +2161,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
|
||||
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
|
||||
require.NoError(t, err)
|
||||
@@ -2121,9 +2282,7 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -2189,9 +2348,7 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -2474,10 +2631,7 @@ func TestRollbackBlock(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Rollback block insertion into db and caches.
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.ErrorContains(t, fmt.Sprintf("could not insert block %d to fork choice store", roblock.Block().Slot()), err)
|
||||
require.ErrorContains(t, fmt.Sprintf("could not insert block %d to fork choice store", roblock.Block().Slot()), service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
|
||||
// The block should no longer exist.
|
||||
require.Equal(t, false, service.cfg.BeaconDB.HasBlock(ctx, root))
|
||||
@@ -2578,9 +2732,7 @@ func TestRollbackBlock_ContextDeadline(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
|
||||
b, err = util.GenerateFullBlock(postState, keys, util.DefaultBlockGenConfig(), 34)
|
||||
require.NoError(t, err)
|
||||
@@ -2614,10 +2766,7 @@ func TestRollbackBlock_ContextDeadline(t *testing.T) {
|
||||
require.NoError(t, postState.SetFinalizedCheckpoint(cj))
|
||||
|
||||
// Rollback block insertion into db and caches.
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{cancCtx, roblock, [32]byte{}, postState, false})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.ErrorContains(t, "context canceled", err)
|
||||
require.ErrorContains(t, "context canceled", service.postBlockProcess(&postBlockProcessConfig{cancCtx, roblock, [32]byte{}, postState, false}))
|
||||
|
||||
// The block should no longer exist.
|
||||
require.Equal(t, false, service.cfg.BeaconDB.HasBlock(ctx, root))
|
||||
@@ -3003,113 +3152,6 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
err = service.isDataAvailable(ctx, roBlock)
|
||||
require.NotNil(t, err)
|
||||
})
|
||||
|
||||
t.Run("EIP-8025 (Optional Proofs) - already enough proofs", func(t *testing.T) {
|
||||
// Enable zkVM feature
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnableZkvm: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
// Set MinProofsRequired for testing
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.MinProofsRequired = 3
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Setup with sufficient data columns
|
||||
minimumColumnsCountToReconstruct := peerdas.MinimumColumnCountToReconstruct()
|
||||
indices := make([]uint64, 0, minimumColumnsCountToReconstruct)
|
||||
for i := range minimumColumnsCountToReconstruct {
|
||||
indices = append(indices, i)
|
||||
}
|
||||
|
||||
testParams := testIsAvailableParams{
|
||||
columnsToSave: indices,
|
||||
blobKzgCommitmentsCount: 3,
|
||||
}
|
||||
|
||||
ctx, _, service, root, signed := testIsAvailableSetup(t, testParams)
|
||||
|
||||
// Insert MinProofsRequired execution proofs into the pool
|
||||
for i := range cfg.MinProofsRequired {
|
||||
proof := ðpb.ExecutionProof{
|
||||
BlockRoot: root[:],
|
||||
Slot: signed.Block().Slot(),
|
||||
ProofId: primitives.ExecutionProofId(i),
|
||||
ProofData: []byte{byte(i)},
|
||||
}
|
||||
service.cfg.ExecProofsPool.Insert(proof)
|
||||
}
|
||||
|
||||
roBlock, err := consensusblocks.NewROBlockWithRoot(signed, root)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = service.isDataAvailable(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("EIP-8025 (Optional Proofs) - data columns success then wait for execution proofs", func(t *testing.T) {
|
||||
// Enable zkVM feature
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnableZkvm: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
// Set MinProofsRequired for testing
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.MinProofsRequired = 3
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Setup with sufficient data columns
|
||||
minimumColumnsCountToReconstruct := peerdas.MinimumColumnCountToReconstruct()
|
||||
indices := make([]uint64, 0, minimumColumnsCountToReconstruct)
|
||||
for i := range minimumColumnsCountToReconstruct {
|
||||
indices = append(indices, i)
|
||||
}
|
||||
|
||||
testParams := testIsAvailableParams{
|
||||
options: []Option{
|
||||
WithOperationNotifier(&mock.MockOperationNotifier{}),
|
||||
},
|
||||
columnsToSave: indices,
|
||||
blobKzgCommitmentsCount: 3,
|
||||
}
|
||||
|
||||
ctx, _, service, root, signed := testIsAvailableSetup(t, testParams)
|
||||
|
||||
// Goroutine to send execution proofs after data columns are available
|
||||
go func() {
|
||||
// Wait a bit to simulate async proof arrival
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
// Send ExecutionProofReceived events
|
||||
opfeed := service.cfg.OperationNotifier.OperationFeed()
|
||||
for i := range cfg.MinProofsRequired {
|
||||
proof := ðpb.ExecutionProof{
|
||||
BlockRoot: root[:],
|
||||
Slot: signed.Block().Slot(),
|
||||
ProofId: primitives.ExecutionProofId(i),
|
||||
ProofData: []byte{byte(i)},
|
||||
}
|
||||
service.cfg.ExecProofsPool.Insert(proof)
|
||||
|
||||
opfeed.Send(&feed.Event{
|
||||
Type: operation.ExecutionProofReceived,
|
||||
Data: &operation.ExecutionProofReceivedData{
|
||||
ExecutionProof: proof,
|
||||
},
|
||||
})
|
||||
}
|
||||
}()
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, time.Second*2)
|
||||
defer cancel()
|
||||
|
||||
roBlock, err := consensusblocks.NewROBlockWithRoot(signed, root)
|
||||
require.NoError(t, err)
|
||||
err = service.isDataAvailable(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
// Test_postBlockProcess_EventSending tests that block processed events are only sent
|
||||
@@ -3220,9 +3262,7 @@ func Test_postBlockProcess_EventSending(t *testing.T) {
|
||||
}
|
||||
|
||||
// Execute postBlockProcess
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(cfg)
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
|
||||
// Check error expectation
|
||||
if tt.expectError {
|
||||
|
||||
@@ -66,54 +66,52 @@ func (s *Service) VerifyLmdFfgConsistency(ctx context.Context, a ethpb.Att) erro
|
||||
|
||||
// This routine processes fork choice attestations from the pool to account for validator votes and fork choice.
|
||||
func (s *Service) spawnProcessAttestationsRoutine() {
|
||||
_, err := s.clockWaiter.WaitForClock(s.ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to receive genesis data")
|
||||
return
|
||||
}
|
||||
|
||||
if s.genesisTime.IsZero() {
|
||||
log.Warn("ProcessAttestations routine waiting for genesis time")
|
||||
for s.genesisTime.IsZero() {
|
||||
if err := s.ctx.Err(); err != nil {
|
||||
log.WithError(err).Error("Giving up waiting for genesis time")
|
||||
return
|
||||
}
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
log.Warn("Genesis time received, now available to process attestations")
|
||||
}
|
||||
|
||||
// Wait for node to be synced before running the routine.
|
||||
if err := s.waitForSync(); err != nil {
|
||||
log.WithError(err).Error("Could not wait to sync")
|
||||
return
|
||||
}
|
||||
|
||||
reorgInterval := time.Second*time.Duration(params.BeaconConfig().SecondsPerSlot) - reorgLateBlockCountAttestations
|
||||
ticker := slots.NewSlotTickerWithIntervals(s.genesisTime, []time.Duration{0, reorgInterval})
|
||||
for {
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
go func() {
|
||||
_, err := s.clockWaiter.WaitForClock(s.ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to receive genesis data")
|
||||
return
|
||||
case slotInterval := <-ticker.C():
|
||||
if slotInterval.Interval > 0 {
|
||||
if s.validating() {
|
||||
s.UpdateHead(s.ctx, slotInterval.Slot+1)
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
if err := s.cfg.ForkChoiceStore.NewSlot(s.ctx, slotInterval.Slot); err != nil {
|
||||
log.WithError(err).Error("Could not process new slot")
|
||||
}
|
||||
s.cfg.ForkChoiceStore.Unlock()
|
||||
|
||||
s.UpdateHead(s.ctx, slotInterval.Slot)
|
||||
}
|
||||
}
|
||||
if s.genesisTime.IsZero() {
|
||||
log.Warn("ProcessAttestations routine waiting for genesis time")
|
||||
for s.genesisTime.IsZero() {
|
||||
if err := s.ctx.Err(); err != nil {
|
||||
log.WithError(err).Error("Giving up waiting for genesis time")
|
||||
return
|
||||
}
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
log.Warn("Genesis time received, now available to process attestations")
|
||||
}
|
||||
// Wait for node to be synced before running the routine.
|
||||
if err := s.waitForSync(); err != nil {
|
||||
log.WithError(err).Error("Could not wait to sync")
|
||||
return
|
||||
}
|
||||
|
||||
reorgInterval := time.Second*time.Duration(params.BeaconConfig().SecondsPerSlot) - reorgLateBlockCountAttestations
|
||||
ticker := slots.NewSlotTickerWithIntervals(s.genesisTime, []time.Duration{0, reorgInterval})
|
||||
for {
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
case slotInterval := <-ticker.C():
|
||||
if slotInterval.Interval > 0 {
|
||||
if s.validating() {
|
||||
s.UpdateHead(s.ctx, slotInterval.Slot+1)
|
||||
}
|
||||
} else {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
if err := s.cfg.ForkChoiceStore.NewSlot(s.ctx, slotInterval.Slot); err != nil {
|
||||
log.WithError(err).Error("Could not process new slot")
|
||||
}
|
||||
s.cfg.ForkChoiceStore.Unlock()
|
||||
|
||||
s.UpdateHead(s.ctx, slotInterval.Slot)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// UpdateHead updates the canonical head of the chain based on information from fork-choice attestations and votes.
|
||||
@@ -158,15 +156,13 @@ func (s *Service) UpdateHead(ctx context.Context, proposingSlot primitives.Slot)
|
||||
}
|
||||
if s.inRegularSync() {
|
||||
fcuArgs.attributes = s.getPayloadAttribute(ctx, headState, proposingSlot, newHeadRoot[:])
|
||||
if fcuArgs.attributes != nil && s.shouldOverrideFCU(newHeadRoot, proposingSlot) {
|
||||
return
|
||||
}
|
||||
go s.forkchoiceUpdateWithExecution(s.ctx, fcuArgs)
|
||||
}
|
||||
if err := s.saveHead(s.ctx, fcuArgs.headRoot, fcuArgs.headBlock, fcuArgs.headState); err != nil {
|
||||
log.WithError(err).Error("Could not save head")
|
||||
if fcuArgs.attributes != nil && s.shouldOverrideFCU(newHeadRoot, proposingSlot) {
|
||||
return
|
||||
}
|
||||
if err := s.forkchoiceUpdateWithExecution(s.ctx, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("Could not update forkchoice")
|
||||
}
|
||||
s.pruneAttsFromPool(s.ctx, fcuArgs.headState, fcuArgs.headBlock)
|
||||
}
|
||||
|
||||
// This processes fork choice attestations from the pool to account for validator votes and fork choice.
|
||||
|
||||
@@ -117,9 +117,7 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, tRoot, wsb, postState))
|
||||
roblock, err := blocks.NewROBlockWithRoot(wsb, tRoot)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
copied, err = service.cfg.StateGen.StateByRoot(ctx, tRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, fcs.NodeCount())
|
||||
@@ -179,9 +177,7 @@ func TestService_UpdateHead_NoAtts(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, tRoot, wsb, postState))
|
||||
roblock, err := blocks.NewROBlockWithRoot(wsb, tRoot)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.Equal(t, 2, fcs.NodeCount())
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
||||
require.Equal(t, tRoot, service.head.root)
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/async/event"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/operation"
|
||||
statefeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
@@ -25,7 +24,6 @@ import (
|
||||
lightClient "github.com/OffchainLabs/prysm/v7/beacon-chain/light-client"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/attestations"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/blstoexec"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/execproofs"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/slashings"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
|
||||
@@ -87,11 +85,9 @@ type config struct {
|
||||
ExitPool voluntaryexits.PoolManager
|
||||
SlashingPool slashings.PoolManager
|
||||
BLSToExecPool blstoexec.PoolManager
|
||||
ExecProofsPool execproofs.PoolManager
|
||||
P2P p2p.Accessor
|
||||
MaxRoutines int
|
||||
StateNotifier statefeed.Notifier
|
||||
OperationNotifier operation.Notifier
|
||||
ForkChoiceStore f.ForkChoicer
|
||||
AttService *attestations.Service
|
||||
StateGen *stategen.State
|
||||
@@ -215,9 +211,7 @@ func (s *Service) Start() {
|
||||
if err := s.StartFromSavedState(s.cfg.FinalizedStateAtStartUp); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
go s.spawnProcessAttestationsRoutine()
|
||||
go s.spawnFinalizedProofsPruningRoutine()
|
||||
s.spawnProcessAttestationsRoutine()
|
||||
go s.runLateBlockTasks()
|
||||
}
|
||||
|
||||
@@ -573,46 +567,3 @@ func fuluForkSlot() (primitives.Slot, error) {
|
||||
|
||||
return forkFuluSlot, nil
|
||||
}
|
||||
|
||||
// spawnFinalizedProofsPruningRoutine prunes execution proofs pool on every epoch.
|
||||
// It removes proofs older than the finalized checkpoint to prevent unbounded
|
||||
// memory growth.
|
||||
// TODO: Manage cases where the network is not finalizing for a long time (avoid OOMs...)
|
||||
func (s *Service) spawnFinalizedProofsPruningRoutine() {
|
||||
ticker := slots.NewSlotTicker(s.genesisTime, params.BeaconConfig().SecondsPerSlot)
|
||||
defer ticker.Done()
|
||||
|
||||
for {
|
||||
select {
|
||||
case slot := <-ticker.C():
|
||||
// Only prune at the start of each epoch
|
||||
if !slots.IsEpochStart(slot) {
|
||||
continue
|
||||
}
|
||||
|
||||
finalizedCheckpoint := s.FinalizedCheckpt()
|
||||
if finalizedCheckpoint == nil {
|
||||
log.Error("Finalized checkpoint is nil, cannot prune execution proofs")
|
||||
continue
|
||||
}
|
||||
|
||||
finalizedSlot, err := slots.EpochStart(finalizedCheckpoint.Epoch)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get finalized slot")
|
||||
continue
|
||||
}
|
||||
|
||||
// Prune proofs older than finalized slot
|
||||
if count := s.cfg.ExecProofsPool.PruneUpTo(finalizedSlot); count > 0 {
|
||||
log.WithFields(logrus.Fields{
|
||||
"prunedCount": count,
|
||||
"finalizedSlot": finalizedSlot,
|
||||
}).Debug("Pruned finalized execution proofs")
|
||||
}
|
||||
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Context closed, exiting routine")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -290,3 +290,52 @@ func TestProcessBlockHeader_OK(t *testing.T) {
|
||||
}
|
||||
assert.Equal(t, true, proto.Equal(nsh, expected), "Expected %v, received %v", expected, nsh)
|
||||
}
|
||||
|
||||
func TestBlockSignatureSet_OK(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: make([]byte, 32),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
Slashed: true,
|
||||
}
|
||||
}
|
||||
|
||||
state, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, state.SetValidators(validators))
|
||||
require.NoError(t, state.SetSlot(10))
|
||||
require.NoError(t, state.SetLatestBlockHeader(util.HydrateBeaconHeader(ðpb.BeaconBlockHeader{
|
||||
Slot: 9,
|
||||
ProposerIndex: 0,
|
||||
})))
|
||||
|
||||
latestBlockSignedRoot, err := state.LatestBlockHeader().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
currentEpoch := time.CurrentEpoch(state)
|
||||
priv, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
pID, err := helpers.BeaconProposerIndex(t.Context(), state)
|
||||
require.NoError(t, err)
|
||||
block := util.NewBeaconBlock()
|
||||
block.Block.Slot = 10
|
||||
block.Block.ProposerIndex = pID
|
||||
block.Block.Body.RandaoReveal = bytesutil.PadTo([]byte{'A', 'B', 'C'}, 96)
|
||||
block.Block.ParentRoot = latestBlockSignedRoot[:]
|
||||
block.Signature, err = signing.ComputeDomainAndSign(state, currentEpoch, block.Block, params.BeaconConfig().DomainBeaconProposer, priv)
|
||||
require.NoError(t, err)
|
||||
proposerIdx, err := helpers.BeaconProposerIndex(t.Context(), state)
|
||||
require.NoError(t, err)
|
||||
validators[proposerIdx].Slashed = false
|
||||
validators[proposerIdx].PublicKey = priv.PublicKey().Marshal()
|
||||
err = state.UpdateValidatorAtIndex(proposerIdx, validators[proposerIdx])
|
||||
require.NoError(t, err)
|
||||
set, err := blocks.BlockSignatureBatch(state, block.Block.ProposerIndex, block.Signature, block.Block.HashTreeRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
verified, err := set.Verify()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, verified, "Block signature set returned a set which was unable to be verified")
|
||||
}
|
||||
|
||||
@@ -122,6 +122,24 @@ func VerifyBlockSignatureUsingCurrentFork(beaconState state.ReadOnlyBeaconState,
|
||||
return nil
|
||||
}
|
||||
|
||||
// BlockSignatureBatch retrieves the block signature batch from the provided block and its corresponding state.
|
||||
func BlockSignatureBatch(beaconState state.ReadOnlyBeaconState,
|
||||
proposerIndex primitives.ValidatorIndex,
|
||||
sig []byte,
|
||||
rootFunc func() ([32]byte, error)) (*bls.SignatureBatch, error) {
|
||||
currentEpoch := slots.ToEpoch(beaconState.Slot())
|
||||
domain, err := signing.Domain(beaconState.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorsRoot())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
proposer, err := beaconState.ValidatorAtIndex(proposerIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
proposerPubKey := proposer.PublicKey
|
||||
return signing.BlockSignatureBatch(proposerPubKey, sig, domain, rootFunc)
|
||||
}
|
||||
|
||||
// RandaoSignatureBatch retrieves the relevant randao specific signature batch object
|
||||
// from a block and its corresponding state.
|
||||
func RandaoSignatureBatch(
|
||||
|
||||
@@ -278,12 +278,12 @@ func ProcessConsolidationRequests(ctx context.Context, st state.BeaconState, req
|
||||
if uint64(curEpoch) < e {
|
||||
continue
|
||||
}
|
||||
hasBal, err := st.HasPendingBalanceToWithdraw(srcIdx)
|
||||
bal, err := st.PendingBalanceToWithdraw(srcIdx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to fetch pending balance to withdraw")
|
||||
continue
|
||||
}
|
||||
if hasBal {
|
||||
if bal > 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
@@ -46,9 +46,6 @@ const (
|
||||
|
||||
// DataColumnReceived is sent after a data column has been seen after gossip validation rules.
|
||||
DataColumnReceived = 12
|
||||
|
||||
// ExecutionProofReceived is sent after a execution proof object has been received from gossip or rpc.
|
||||
ExecutionProofReceived = 13
|
||||
)
|
||||
|
||||
// UnAggregatedAttReceivedData is the data sent with UnaggregatedAttReceived events.
|
||||
@@ -80,11 +77,6 @@ type BLSToExecutionChangeReceivedData struct {
|
||||
Change *ethpb.SignedBLSToExecutionChange
|
||||
}
|
||||
|
||||
// ExecutionProofReceivedData is the data sent with ExecutionProofReceived events.
|
||||
type ExecutionProofReceivedData struct {
|
||||
ExecutionProof *ethpb.ExecutionProof
|
||||
}
|
||||
|
||||
// BlobSidecarReceivedData is the data sent with BlobSidecarReceived events.
|
||||
type BlobSidecarReceivedData struct {
|
||||
Blob *blocks.VerifiedROBlob
|
||||
|
||||
@@ -152,7 +152,7 @@ func ActiveValidatorIndices(ctx context.Context, s state.ReadOnlyBeaconState, ep
|
||||
}
|
||||
|
||||
if err := UpdateCommitteeCache(ctx, s, epoch); err != nil {
|
||||
log.WithError(err).Error("Could not update committee cache")
|
||||
return nil, errors.Wrap(err, "could not update committee cache")
|
||||
}
|
||||
|
||||
return indices, nil
|
||||
|
||||
@@ -5,20 +5,10 @@ import (
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
var (
|
||||
dataColumnComputationTime = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "beacon_data_column_sidecar_computation_milliseconds",
|
||||
Help: "Captures the time taken to compute data column sidecars from blobs.",
|
||||
Buckets: []float64{25, 50, 100, 250, 500, 750, 1000},
|
||||
},
|
||||
)
|
||||
|
||||
cellsAndProofsFromStructuredComputationTime = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "cells_and_proofs_from_structured_computation_milliseconds",
|
||||
Help: "Captures the time taken to compute cells and proofs from structured computation.",
|
||||
Buckets: []float64{10, 20, 30, 40, 50, 100, 200},
|
||||
},
|
||||
)
|
||||
var dataColumnComputationTime = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "beacon_data_column_sidecar_computation_milliseconds",
|
||||
Help: "Captures the time taken to compute data column sidecars from blobs.",
|
||||
Buckets: []float64{25, 50, 100, 250, 500, 750, 1000},
|
||||
},
|
||||
)
|
||||
|
||||
@@ -3,7 +3,6 @@ package peerdas
|
||||
import (
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
@@ -297,42 +296,32 @@ func ComputeCellsAndProofsFromFlat(blobs [][]byte, cellProofs [][]byte) ([][]kzg
|
||||
return nil, nil, ErrBlobsCellsProofsMismatch
|
||||
}
|
||||
|
||||
var wg errgroup.Group
|
||||
|
||||
cellsPerBlob := make([][]kzg.Cell, blobCount)
|
||||
proofsPerBlob := make([][]kzg.Proof, blobCount)
|
||||
|
||||
cellsPerBlob := make([][]kzg.Cell, 0, blobCount)
|
||||
proofsPerBlob := make([][]kzg.Proof, 0, blobCount)
|
||||
for i, blob := range blobs {
|
||||
wg.Go(func() error {
|
||||
var kzgBlob kzg.Blob
|
||||
if copy(kzgBlob[:], blob) != len(kzgBlob) {
|
||||
return errors.New("wrong blob size - should never happen")
|
||||
var kzgBlob kzg.Blob
|
||||
if copy(kzgBlob[:], blob) != len(kzgBlob) {
|
||||
return nil, nil, errors.New("wrong blob size - should never happen")
|
||||
}
|
||||
|
||||
// Compute the extended cells from the (non-extended) blob.
|
||||
cells, err := kzg.ComputeCells(&kzgBlob)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "compute cells")
|
||||
}
|
||||
|
||||
var proofs []kzg.Proof
|
||||
for idx := uint64(i) * numberOfColumns; idx < (uint64(i)+1)*numberOfColumns; idx++ {
|
||||
var kzgProof kzg.Proof
|
||||
if copy(kzgProof[:], cellProofs[idx]) != len(kzgProof) {
|
||||
return nil, nil, errors.New("wrong KZG proof size - should never happen")
|
||||
}
|
||||
|
||||
// Compute the extended cells from the (non-extended) blob.
|
||||
cells, err := kzg.ComputeCells(&kzgBlob)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "compute cells")
|
||||
}
|
||||
proofs = append(proofs, kzgProof)
|
||||
}
|
||||
|
||||
proofs := make([]kzg.Proof, 0, numberOfColumns)
|
||||
for idx := uint64(i) * numberOfColumns; idx < (uint64(i)+1)*numberOfColumns; idx++ {
|
||||
var kzgProof kzg.Proof
|
||||
if copy(kzgProof[:], cellProofs[idx]) != len(kzgProof) {
|
||||
return errors.New("wrong KZG proof size - should never happen")
|
||||
}
|
||||
|
||||
proofs = append(proofs, kzgProof)
|
||||
}
|
||||
|
||||
cellsPerBlob[i] = cells
|
||||
proofsPerBlob[i] = proofs
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := wg.Wait(); err != nil {
|
||||
return nil, nil, err
|
||||
cellsPerBlob = append(cellsPerBlob, cells)
|
||||
proofsPerBlob = append(proofsPerBlob, proofs)
|
||||
}
|
||||
|
||||
return cellsPerBlob, proofsPerBlob, nil
|
||||
@@ -340,55 +329,40 @@ func ComputeCellsAndProofsFromFlat(blobs [][]byte, cellProofs [][]byte) ([][]kzg
|
||||
|
||||
// ComputeCellsAndProofsFromStructured computes the cells and proofs from blobs and cell proofs.
|
||||
func ComputeCellsAndProofsFromStructured(blobsAndProofs []*pb.BlobAndProofV2) ([][]kzg.Cell, [][]kzg.Proof, error) {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
cellsAndProofsFromStructuredComputationTime.Observe(float64(time.Since(start).Milliseconds()))
|
||||
}()
|
||||
|
||||
var wg errgroup.Group
|
||||
|
||||
cellsPerBlob := make([][]kzg.Cell, len(blobsAndProofs))
|
||||
proofsPerBlob := make([][]kzg.Proof, len(blobsAndProofs))
|
||||
|
||||
for i, blobAndProof := range blobsAndProofs {
|
||||
cellsPerBlob := make([][]kzg.Cell, 0, len(blobsAndProofs))
|
||||
proofsPerBlob := make([][]kzg.Proof, 0, len(blobsAndProofs))
|
||||
for _, blobAndProof := range blobsAndProofs {
|
||||
if blobAndProof == nil {
|
||||
return nil, nil, ErrNilBlobAndProof
|
||||
}
|
||||
|
||||
wg.Go(func() error {
|
||||
var kzgBlob kzg.Blob
|
||||
if copy(kzgBlob[:], blobAndProof.Blob) != len(kzgBlob) {
|
||||
return errors.New("wrong blob size - should never happen")
|
||||
var kzgBlob kzg.Blob
|
||||
if copy(kzgBlob[:], blobAndProof.Blob) != len(kzgBlob) {
|
||||
return nil, nil, errors.New("wrong blob size - should never happen")
|
||||
}
|
||||
|
||||
// Compute the extended cells from the (non-extended) blob.
|
||||
cells, err := kzg.ComputeCells(&kzgBlob)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "compute cells")
|
||||
}
|
||||
|
||||
kzgProofs := make([]kzg.Proof, 0, fieldparams.NumberOfColumns)
|
||||
for _, kzgProofBytes := range blobAndProof.KzgProofs {
|
||||
if len(kzgProofBytes) != kzg.BytesPerProof {
|
||||
return nil, nil, errors.New("wrong KZG proof size - should never happen")
|
||||
}
|
||||
|
||||
// Compute the extended cells from the (non-extended) blob.
|
||||
cells, err := kzg.ComputeCells(&kzgBlob)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "compute cells")
|
||||
var kzgProof kzg.Proof
|
||||
if copy(kzgProof[:], kzgProofBytes) != len(kzgProof) {
|
||||
return nil, nil, errors.New("wrong copied KZG proof size - should never happen")
|
||||
}
|
||||
|
||||
kzgProofs := make([]kzg.Proof, 0, fieldparams.NumberOfColumns)
|
||||
for _, kzgProofBytes := range blobAndProof.KzgProofs {
|
||||
if len(kzgProofBytes) != kzg.BytesPerProof {
|
||||
return errors.New("wrong KZG proof size - should never happen")
|
||||
}
|
||||
kzgProofs = append(kzgProofs, kzgProof)
|
||||
}
|
||||
|
||||
var kzgProof kzg.Proof
|
||||
if copy(kzgProof[:], kzgProofBytes) != len(kzgProof) {
|
||||
return errors.New("wrong copied KZG proof size - should never happen")
|
||||
}
|
||||
|
||||
kzgProofs = append(kzgProofs, kzgProof)
|
||||
}
|
||||
|
||||
cellsPerBlob[i] = cells
|
||||
proofsPerBlob[i] = kzgProofs
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := wg.Wait(); err != nil {
|
||||
return nil, nil, err
|
||||
cellsPerBlob = append(cellsPerBlob, cells)
|
||||
proofsPerBlob = append(proofsPerBlob, kzgProofs)
|
||||
}
|
||||
|
||||
return cellsPerBlob, proofsPerBlob, nil
|
||||
|
||||
@@ -182,6 +182,12 @@ func ProcessBlockNoVerifyAnySig(
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
sig := signed.Signature()
|
||||
bSet, err := b.BlockSignatureBatch(st, blk.ProposerIndex(), sig[:], blk.HashTreeRoot)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, nil, errors.Wrap(err, "could not retrieve block signature set")
|
||||
}
|
||||
randaoReveal := signed.Block().Body().RandaoReveal()
|
||||
rSet, err := b.RandaoSignatureBatch(ctx, st, randaoReveal[:])
|
||||
if err != nil {
|
||||
@@ -195,7 +201,7 @@ func ProcessBlockNoVerifyAnySig(
|
||||
|
||||
// Merge beacon block, randao and attestations signatures into a set.
|
||||
set := bls.NewSet()
|
||||
set.Join(rSet).Join(aSet)
|
||||
set.Join(bSet).Join(rSet).Join(aSet)
|
||||
|
||||
if blk.Version() >= version.Capella {
|
||||
changes, err := signed.Block().Body().BLSToExecutionChanges()
|
||||
|
||||
@@ -157,8 +157,9 @@ func TestProcessBlockNoVerify_SigSetContainsDescriptions(t *testing.T) {
|
||||
set, _, err := transition.ProcessBlockNoVerifyAnySig(t.Context(), beaconState, wsb)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, len(set.Signatures), len(set.Descriptions), "Signatures and descriptions do not match up")
|
||||
assert.Equal(t, "randao signature", set.Descriptions[0])
|
||||
assert.Equal(t, "attestation signature", set.Descriptions[1])
|
||||
assert.Equal(t, "block signature", set.Descriptions[0])
|
||||
assert.Equal(t, "randao signature", set.Descriptions[1])
|
||||
assert.Equal(t, "attestation signature", set.Descriptions[2])
|
||||
}
|
||||
|
||||
func TestProcessOperationsNoVerifyAttsSigs_OK(t *testing.T) {
|
||||
|
||||
@@ -67,9 +67,9 @@ func NewSyncNeeds(current CurrentSlotter, oldestSlotFlagPtr *primitives.Slot, bl
|
||||
|
||||
// Override spec minimum block retention with user-provided flag only if it is lower than the spec minimum.
|
||||
sn.blockRetention = primitives.Epoch(params.BeaconConfig().MinEpochsForBlockRequests)
|
||||
|
||||
if oldestSlotFlagPtr != nil {
|
||||
if *oldestSlotFlagPtr <= syncEpochOffset(current(), sn.blockRetention) {
|
||||
oldestEpoch := slots.ToEpoch(*oldestSlotFlagPtr)
|
||||
if oldestEpoch < sn.blockRetention {
|
||||
sn.validOldestSlotPtr = oldestSlotFlagPtr
|
||||
} else {
|
||||
log.WithField("backfill-oldest-slot", *oldestSlotFlagPtr).
|
||||
|
||||
@@ -128,9 +128,6 @@ func TestSyncNeedsInitialize(t *testing.T) {
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
minBlobEpochs := params.BeaconConfig().MinEpochsForBlobsSidecarsRequest
|
||||
minColEpochs := params.BeaconConfig().MinEpochsForDataColumnSidecarsRequest
|
||||
denebSlot := slots.UnsafeEpochStart(params.BeaconConfig().DenebForkEpoch)
|
||||
fuluSlot := slots.UnsafeEpochStart(params.BeaconConfig().FuluForkEpoch)
|
||||
minSlots := slots.UnsafeEpochStart(primitives.Epoch(params.BeaconConfig().MinEpochsForBlockRequests))
|
||||
|
||||
currentSlot := primitives.Slot(10000)
|
||||
currentFunc := func() primitives.Slot { return currentSlot }
|
||||
@@ -144,7 +141,6 @@ func TestSyncNeedsInitialize(t *testing.T) {
|
||||
expectedCol primitives.Epoch
|
||||
name string
|
||||
input SyncNeeds
|
||||
current func() primitives.Slot
|
||||
}{
|
||||
{
|
||||
name: "basic initialization with no flags",
|
||||
@@ -178,13 +174,13 @@ func TestSyncNeedsInitialize(t *testing.T) {
|
||||
{
|
||||
name: "valid oldestSlotFlagPtr (earlier than spec minimum)",
|
||||
blobRetentionFlag: 0,
|
||||
oldestSlotFlagPtr: &denebSlot,
|
||||
oldestSlotFlagPtr: func() *primitives.Slot {
|
||||
slot := primitives.Slot(10)
|
||||
return &slot
|
||||
}(),
|
||||
expectValidOldest: true,
|
||||
expectedBlob: minBlobEpochs,
|
||||
expectedCol: minColEpochs,
|
||||
current: func() primitives.Slot {
|
||||
return fuluSlot + minSlots
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid oldestSlotFlagPtr (later than spec minimum)",
|
||||
@@ -214,9 +210,6 @@ func TestSyncNeedsInitialize(t *testing.T) {
|
||||
{
|
||||
name: "both blob retention flag and oldest slot set",
|
||||
blobRetentionFlag: minBlobEpochs + 5,
|
||||
current: func() primitives.Slot {
|
||||
return fuluSlot + minSlots
|
||||
},
|
||||
oldestSlotFlagPtr: func() *primitives.Slot {
|
||||
slot := primitives.Slot(100)
|
||||
return &slot
|
||||
@@ -239,27 +232,16 @@ func TestSyncNeedsInitialize(t *testing.T) {
|
||||
expectedBlob: 5000,
|
||||
expectedCol: 5000,
|
||||
},
|
||||
{
|
||||
name: "regression for deneb start",
|
||||
blobRetentionFlag: 8212500,
|
||||
expectValidOldest: true,
|
||||
oldestSlotFlagPtr: &denebSlot,
|
||||
current: func() primitives.Slot {
|
||||
return fuluSlot + minSlots
|
||||
},
|
||||
expectedBlob: 8212500,
|
||||
expectedCol: 8212500,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
if tc.current == nil {
|
||||
tc.current = currentFunc
|
||||
}
|
||||
result, err := NewSyncNeeds(tc.current, tc.oldestSlotFlagPtr, tc.blobRetentionFlag)
|
||||
result, err := NewSyncNeeds(currentFunc, tc.oldestSlotFlagPtr, tc.blobRetentionFlag)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check that current, deneb, fulu are set correctly
|
||||
require.Equal(t, currentSlot, result.current())
|
||||
|
||||
// Check retention calculations
|
||||
require.Equal(t, tc.expectedBlob, result.blobRetention)
|
||||
require.Equal(t, tc.expectedCol, result.colRetention)
|
||||
|
||||
@@ -38,7 +38,6 @@ go_library(
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_spf13_afero//:go_default_library",
|
||||
"@org_golang_x_sync//errgroup:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -25,7 +25,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/afero"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -186,162 +185,73 @@ func (dcs *DataColumnStorage) WarmCache() {
|
||||
|
||||
highestStoredEpoch := primitives.Epoch(0)
|
||||
|
||||
// List all period directories
|
||||
periodFileInfos, err := afero.ReadDir(dcs.fs, ".")
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error reading top directory during warm cache")
|
||||
return
|
||||
}
|
||||
|
||||
// Iterate through periods
|
||||
for _, periodFileInfo := range periodFileInfos {
|
||||
if !periodFileInfo.IsDir() {
|
||||
continue
|
||||
// Walk the data column filesystem to warm up the cache.
|
||||
if err := afero.Walk(dcs.fs, ".", func(path string, info os.FileInfo, fileErr error) (err error) {
|
||||
if fileErr != nil {
|
||||
return fileErr
|
||||
}
|
||||
|
||||
periodPath := periodFileInfo.Name()
|
||||
// If not a leaf, skip.
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// List all epoch directories in this period
|
||||
epochFileInfos, err := afero.ReadDir(dcs.fs, periodPath)
|
||||
// Extract metadata from the file path.
|
||||
fileMetadata, err := extractFileMetadata(path)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("period", periodPath).Error("Error reading period directory during warm cache")
|
||||
continue
|
||||
log.WithError(err).Error("Error encountered while extracting file metadata")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Iterate through epochs
|
||||
for _, epochFileInfo := range epochFileInfos {
|
||||
if !epochFileInfo.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
epochPath := path.Join(periodPath, epochFileInfo.Name())
|
||||
|
||||
// List all .sszs files in this epoch
|
||||
files, err := listEpochFiles(dcs.fs, epochPath)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("epoch", epochPath).Error("Error listing epoch files during warm cache")
|
||||
continue
|
||||
}
|
||||
|
||||
if len(files) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Process all files in this epoch in parallel
|
||||
epochHighest, err := dcs.processEpochFiles(files)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("epoch", epochPath).Error("Error processing epoch files during warm cache")
|
||||
}
|
||||
|
||||
highestStoredEpoch = max(highestStoredEpoch, epochHighest)
|
||||
// Open the data column filesystem file.
|
||||
f, err := dcs.fs.Open(path)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error encountered while opening data column filesystem file")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close the file.
|
||||
defer func() {
|
||||
// Overwrite the existing error only if it is nil, since the close error is less important.
|
||||
closeErr := f.Close()
|
||||
if closeErr != nil && err == nil {
|
||||
err = closeErr
|
||||
}
|
||||
}()
|
||||
|
||||
// Read the metadata of the file.
|
||||
metadata, err := dcs.metadata(f)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error encountered while reading metadata from data column filesystem file")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check the indices.
|
||||
indices := metadata.indices.all()
|
||||
if len(indices) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Build the ident.
|
||||
dataColumnsIdent := DataColumnsIdent{Root: fileMetadata.blockRoot, Epoch: fileMetadata.epoch, Indices: indices}
|
||||
|
||||
// Update the highest stored epoch.
|
||||
highestStoredEpoch = max(highestStoredEpoch, fileMetadata.epoch)
|
||||
|
||||
// Set the ident in the cache.
|
||||
if err := dcs.cache.set(dataColumnsIdent); err != nil {
|
||||
log.WithError(err).Error("Error encountered while ensuring data column filesystem cache")
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
log.WithError(err).Error("Error encountered while walking data column filesystem.")
|
||||
}
|
||||
|
||||
// Prune the cache and the filesystem
|
||||
// Prune the cache and the filesystem.
|
||||
dcs.prune()
|
||||
|
||||
totalElapsed := time.Since(start)
|
||||
|
||||
// Log summary
|
||||
log.WithField("elapsed", totalElapsed).Info("Data column filesystem cache warm-up complete")
|
||||
}
|
||||
|
||||
// listEpochFiles lists all .sszs files in an epoch directory.
|
||||
func listEpochFiles(fs afero.Fs, epochPath string) ([]string, error) {
|
||||
fileInfos, err := afero.ReadDir(fs, epochPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "read epoch directory")
|
||||
}
|
||||
|
||||
files := make([]string, 0, len(fileInfos))
|
||||
for _, fileInfo := range fileInfos {
|
||||
if fileInfo.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
fileName := fileInfo.Name()
|
||||
if strings.HasSuffix(fileName, "."+dataColumnsFileExtension) {
|
||||
files = append(files, path.Join(epochPath, fileName))
|
||||
}
|
||||
}
|
||||
|
||||
return files, nil
|
||||
}
|
||||
|
||||
// processEpochFiles processes all .sszs files in an epoch directory in parallel.
|
||||
func (dcs *DataColumnStorage) processEpochFiles(files []string) (primitives.Epoch, error) {
|
||||
var (
|
||||
eg errgroup.Group
|
||||
mu sync.Mutex
|
||||
)
|
||||
|
||||
highestEpoch := primitives.Epoch(0)
|
||||
for _, filePath := range files {
|
||||
eg.Go(func() error {
|
||||
epoch, err := dcs.processFile(filePath)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("file", filePath).Error("Error processing file during warm cache")
|
||||
return nil
|
||||
}
|
||||
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
highestEpoch = max(highestEpoch, epoch)
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := eg.Wait(); err != nil {
|
||||
return highestEpoch, err
|
||||
}
|
||||
|
||||
return highestEpoch, nil
|
||||
}
|
||||
|
||||
// processFile processes a single .sszs file.
|
||||
func (dcs *DataColumnStorage) processFile(filePath string) (primitives.Epoch, error) {
|
||||
// Extract metadata from the file path
|
||||
fileMetadata, err := extractFileMetadata(filePath)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "extract file metadata")
|
||||
}
|
||||
|
||||
// Open the file (each goroutine gets its own FD)
|
||||
f, err := dcs.fs.Open(filePath)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "open file")
|
||||
}
|
||||
defer func() {
|
||||
if closeErr := f.Close(); closeErr != nil {
|
||||
log.WithError(closeErr).WithField("file", filePath).Error("Error closing file during warm cache")
|
||||
}
|
||||
}()
|
||||
|
||||
// Read metadata
|
||||
metadata, err := dcs.metadata(f)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "read metadata")
|
||||
}
|
||||
|
||||
// Extract indices
|
||||
indices := metadata.indices.all()
|
||||
if len(indices) == 0 {
|
||||
return fileMetadata.epoch, nil // No indices, skip
|
||||
}
|
||||
|
||||
// Build ident and set in cache (thread-safe)
|
||||
dataColumnsIdent := DataColumnsIdent{
|
||||
Root: fileMetadata.blockRoot,
|
||||
Epoch: fileMetadata.epoch,
|
||||
Indices: indices,
|
||||
}
|
||||
|
||||
if err := dcs.cache.set(dataColumnsIdent); err != nil {
|
||||
return 0, errors.Wrap(err, "cache set")
|
||||
}
|
||||
|
||||
return fileMetadata.epoch, nil
|
||||
log.WithField("elapsed", time.Since(start)).Info("Data column filesystem cache warm-up complete")
|
||||
}
|
||||
|
||||
// Summary returns the DataColumnStorageSummary.
|
||||
@@ -605,11 +515,6 @@ func (dcs *DataColumnStorage) Clear() error {
|
||||
|
||||
// prune clean the cache, the filesystem and mutexes.
|
||||
func (dcs *DataColumnStorage) prune() {
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
dataColumnPruneLatency.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
}()
|
||||
|
||||
highestStoredEpoch := dcs.cache.HighestEpoch()
|
||||
|
||||
// Check if we need to prune.
|
||||
@@ -717,9 +622,6 @@ func (dcs *DataColumnStorage) saveDataColumnSidecarsExistingFile(filePath string
|
||||
// Create the SSZ encoded data column sidecars.
|
||||
var sszEncodedDataColumnSidecars []byte
|
||||
|
||||
// Initialize the count of the saved SSZ encoded data column sidecar.
|
||||
storedCount := uint8(0)
|
||||
|
||||
for {
|
||||
dataColumnSidecars := pullChan(inputDataColumnSidecars)
|
||||
if len(dataColumnSidecars) == 0 {
|
||||
@@ -766,9 +668,6 @@ func (dcs *DataColumnStorage) saveDataColumnSidecarsExistingFile(filePath string
|
||||
return errors.Wrap(err, "set index")
|
||||
}
|
||||
|
||||
// Increment the count of the saved SSZ encoded data column sidecar.
|
||||
storedCount++
|
||||
|
||||
// Append the SSZ encoded data column sidecar to the SSZ encoded data column sidecars.
|
||||
sszEncodedDataColumnSidecars = append(sszEncodedDataColumnSidecars, sszEncodedDataColumnSidecar...)
|
||||
}
|
||||
@@ -793,12 +692,9 @@ func (dcs *DataColumnStorage) saveDataColumnSidecarsExistingFile(filePath string
|
||||
return errWrongBytesWritten
|
||||
}
|
||||
|
||||
syncStart := time.Now()
|
||||
if err := file.Sync(); err != nil {
|
||||
return errors.Wrap(err, "sync")
|
||||
}
|
||||
dataColumnFileSyncLatency.Observe(float64(time.Since(syncStart).Milliseconds()))
|
||||
dataColumnBatchStoreCount.Observe(float64(storedCount))
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -912,14 +808,10 @@ func (dcs *DataColumnStorage) saveDataColumnSidecarsNewFile(filePath string, inp
|
||||
return errWrongBytesWritten
|
||||
}
|
||||
|
||||
syncStart := time.Now()
|
||||
if err := file.Sync(); err != nil {
|
||||
return errors.Wrap(err, "sync")
|
||||
}
|
||||
|
||||
dataColumnFileSyncLatency.Observe(float64(time.Since(syncStart).Milliseconds()))
|
||||
dataColumnBatchStoreCount.Observe(float64(storedCount))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -36,15 +36,16 @@ var (
|
||||
})
|
||||
|
||||
// Data columns
|
||||
dataColumnBuckets = []float64{3, 5, 7, 9, 11, 13}
|
||||
dataColumnSaveLatency = promauto.NewHistogram(prometheus.HistogramOpts{
|
||||
Name: "data_column_storage_save_latency",
|
||||
Help: "Latency of DataColumnSidecar storage save operations in milliseconds",
|
||||
Buckets: []float64{10, 20, 30, 50, 100, 200, 500},
|
||||
Buckets: dataColumnBuckets,
|
||||
})
|
||||
dataColumnFetchLatency = promauto.NewHistogram(prometheus.HistogramOpts{
|
||||
Name: "data_column_storage_get_latency",
|
||||
Help: "Latency of DataColumnSidecar storage get operations in milliseconds",
|
||||
Buckets: []float64{3, 5, 7, 9, 11, 13},
|
||||
Buckets: dataColumnBuckets,
|
||||
})
|
||||
dataColumnPrunedCounter = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "data_column_pruned",
|
||||
@@ -58,16 +59,4 @@ var (
|
||||
Name: "data_column_disk_count",
|
||||
Help: "Approximate number of data columns in storage",
|
||||
})
|
||||
dataColumnFileSyncLatency = promauto.NewSummary(prometheus.SummaryOpts{
|
||||
Name: "data_column_file_sync_latency",
|
||||
Help: "Latency of sync operations when saving data columns in milliseconds",
|
||||
})
|
||||
dataColumnBatchStoreCount = promauto.NewSummary(prometheus.SummaryOpts{
|
||||
Name: "data_column_batch_store_count",
|
||||
Help: "Number of data columns stored in a batch",
|
||||
})
|
||||
dataColumnPruneLatency = promauto.NewSummary(prometheus.SummaryOpts{
|
||||
Name: "data_column_prune_latency",
|
||||
Help: "Latency of data column prune operations in milliseconds",
|
||||
})
|
||||
)
|
||||
|
||||
@@ -532,19 +532,12 @@ func (s *Service) GetBlobsV2(ctx context.Context, versionedHashes []common.Hash)
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.GetBlobsV2")
|
||||
defer span.End()
|
||||
|
||||
start := time.Now()
|
||||
|
||||
if !s.capabilityCache.has(GetBlobsV2) {
|
||||
return nil, errors.New(fmt.Sprintf("%s is not supported", GetBlobsV2))
|
||||
}
|
||||
|
||||
result := make([]*pb.BlobAndProofV2, len(versionedHashes))
|
||||
err := s.rpcClient.CallContext(ctx, &result, GetBlobsV2, versionedHashes)
|
||||
|
||||
if len(result) != 0 {
|
||||
getBlobsV2Latency.Observe(float64(time.Since(start).Milliseconds()))
|
||||
}
|
||||
|
||||
return result, handleRPCError(err)
|
||||
}
|
||||
|
||||
|
||||
@@ -27,13 +27,6 @@ var (
|
||||
Buckets: []float64{25, 50, 100, 200, 500, 1000, 2000, 4000},
|
||||
},
|
||||
)
|
||||
getBlobsV2Latency = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "get_blobs_v2_latency_milliseconds",
|
||||
Help: "Captures RPC latency for getBlobsV2 in milliseconds",
|
||||
Buckets: []float64{25, 50, 100, 200, 500, 1000, 2000, 4000},
|
||||
},
|
||||
)
|
||||
errParseCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "execution_parse_error_count",
|
||||
Help: "The number of errors that occurred while parsing execution payload",
|
||||
|
||||
@@ -37,7 +37,6 @@ go_library(
|
||||
"//beacon-chain/node/registration:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/blstoexec:go_default_library",
|
||||
"//beacon-chain/operations/execproofs:go_default_library",
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
"//beacon-chain/operations/synccommittee:go_default_library",
|
||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||
|
||||
@@ -40,7 +40,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/node/registration"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/attestations"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/blstoexec"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/execproofs"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/slashings"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/synccommittee"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/voluntaryexits"
|
||||
@@ -103,7 +102,6 @@ type BeaconNode struct {
|
||||
slashingsPool slashings.PoolManager
|
||||
syncCommitteePool synccommittee.Pool
|
||||
blsToExecPool blstoexec.PoolManager
|
||||
execProofsPool execproofs.PoolManager
|
||||
depositCache cache.DepositCache
|
||||
trackedValidatorsCache *cache.TrackedValidatorsCache
|
||||
payloadIDCache *cache.PayloadIDCache
|
||||
@@ -158,7 +156,6 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
|
||||
slashingsPool: slashings.NewPool(),
|
||||
syncCommitteePool: synccommittee.NewPool(),
|
||||
blsToExecPool: blstoexec.NewPool(),
|
||||
execProofsPool: execproofs.NewPool(),
|
||||
trackedValidatorsCache: cache.NewTrackedValidatorsCache(),
|
||||
payloadIDCache: cache.NewPayloadIDCache(),
|
||||
slasherBlockHeadersFeed: new(event.Feed),
|
||||
@@ -740,7 +737,6 @@ func (b *BeaconNode) registerBlockchainService(fc forkchoice.ForkChoicer, gs *st
|
||||
blockchain.WithExitPool(b.exitPool),
|
||||
blockchain.WithSlashingPool(b.slashingsPool),
|
||||
blockchain.WithBLSToExecPool(b.blsToExecPool),
|
||||
blockchain.WithExecProofsPool(b.execProofsPool),
|
||||
blockchain.WithP2PBroadcaster(b.fetchP2P()),
|
||||
blockchain.WithStateNotifier(b),
|
||||
blockchain.WithAttestationService(attService),
|
||||
@@ -756,7 +752,6 @@ func (b *BeaconNode) registerBlockchainService(fc forkchoice.ForkChoicer, gs *st
|
||||
blockchain.WithSyncChecker(b.syncChecker),
|
||||
blockchain.WithSlasherEnabled(b.slasherEnabled),
|
||||
blockchain.WithLightClientStore(b.lcStore),
|
||||
blockchain.WithOperationNotifier(b),
|
||||
)
|
||||
|
||||
blockchainService, err := blockchain.NewService(b.ctx, opts...)
|
||||
@@ -832,7 +827,6 @@ func (b *BeaconNode) registerSyncService(initialSyncComplete chan struct{}, bFil
|
||||
regularsync.WithSlashingPool(b.slashingsPool),
|
||||
regularsync.WithSyncCommsPool(b.syncCommitteePool),
|
||||
regularsync.WithBlsToExecPool(b.blsToExecPool),
|
||||
regularsync.WithExecProofPool(b.execProofsPool),
|
||||
regularsync.WithStateGen(b.stateGen),
|
||||
regularsync.WithSlasherAttestationsFeed(b.slasherAttestationsFeed),
|
||||
regularsync.WithSlasherBlockHeadersFeed(b.slasherBlockHeadersFeed),
|
||||
|
||||
@@ -1,16 +0,0 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["pool.go"],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/operations/execproofs",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -1,174 +0,0 @@
|
||||
package execproofs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
// ProofKey uniquely identifies an execution proof by block root and proof type.
|
||||
type ProofKey struct {
|
||||
Root [fieldparams.RootLength]byte
|
||||
ProofId primitives.ExecutionProofId
|
||||
}
|
||||
|
||||
// String returns a string representation for logging.
|
||||
func (k ProofKey) String() string {
|
||||
return fmt.Sprintf("root=%#x,proofId=%d", k.Root, k.ProofId)
|
||||
}
|
||||
|
||||
var (
|
||||
execProofInPoolTotal = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "exec_proof_pool_total",
|
||||
Help: "The number of execution proofs in the operation pool.",
|
||||
})
|
||||
)
|
||||
|
||||
var _ PoolManager = (*ExecProofPool)(nil)
|
||||
|
||||
// PoolManager maintains execution proofs received via gossip.
|
||||
// These proofs are used for data availability checks when importing blocks.
|
||||
// Lightweight verifier nodes need a minimum number of proofs from different zkVM types
|
||||
// to verify block execution correctness.
|
||||
type PoolManager interface {
|
||||
// Insert inserts a proof into the pool.
|
||||
// If a proof with the same block root and proof ID already exists, it is not added again.
|
||||
Insert(executionProof *ethpb.ExecutionProof)
|
||||
|
||||
// Get returns a copy of all proofs for a specific block root
|
||||
Get(blockRoot [fieldparams.RootLength]byte) []*ethpb.ExecutionProof
|
||||
|
||||
// Ids returns the list of (unique) proof types available for a specific block root
|
||||
Ids(blockRoot [fieldparams.RootLength]byte) []primitives.ExecutionProofId
|
||||
|
||||
// Count counts the number of proofs for a specific block root
|
||||
Count(blockRoot [fieldparams.RootLength]byte) uint64
|
||||
|
||||
// Exists checks if a proof exists for the given block root and proof ID
|
||||
Exists(blockRoot [fieldparams.RootLength]byte, proofId primitives.ExecutionProofId) bool
|
||||
|
||||
// PruneUpTo removes proofs older than the target slot
|
||||
PruneUpTo(targetSlot primitives.Slot) int
|
||||
}
|
||||
|
||||
// ExecProofPool is a concrete implementation of type ExecProofPoolManager.
|
||||
type ExecProofPool struct {
|
||||
lock sync.RWMutex
|
||||
m map[[fieldparams.RootLength]byte]map[primitives.ExecutionProofId]*ethpb.ExecutionProof
|
||||
}
|
||||
|
||||
// NewPool returns an initialized pool.
|
||||
func NewPool() *ExecProofPool {
|
||||
return &ExecProofPool{
|
||||
m: make(map[[fieldparams.RootLength]byte]map[primitives.ExecutionProofId]*ethpb.ExecutionProof),
|
||||
}
|
||||
}
|
||||
|
||||
// Insert inserts a proof into the pool.
|
||||
// If a proof with the same block root and proof ID already exists, it is not added again.
|
||||
func (p *ExecProofPool) Insert(proof *ethpb.ExecutionProof) {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
blockRoot := bytesutil.ToBytes32(proof.BlockRoot)
|
||||
|
||||
// Create the inner map if it doesn't exist
|
||||
if p.m[blockRoot] == nil {
|
||||
p.m[blockRoot] = make(map[primitives.ExecutionProofId]*ethpb.ExecutionProof)
|
||||
}
|
||||
|
||||
// Check if proof already exists
|
||||
if _, exists := p.m[blockRoot][proof.ProofId]; exists {
|
||||
return
|
||||
}
|
||||
|
||||
// Insert new proof
|
||||
p.m[blockRoot][proof.ProofId] = proof
|
||||
execProofInPoolTotal.Inc()
|
||||
}
|
||||
|
||||
// Get returns a copy of all proofs for a specific block root
|
||||
func (p *ExecProofPool) Get(blockRoot [fieldparams.RootLength]byte) []*ethpb.ExecutionProof {
|
||||
p.lock.RLock()
|
||||
defer p.lock.RUnlock()
|
||||
|
||||
proofsByType, exists := p.m[blockRoot]
|
||||
if !exists {
|
||||
return nil
|
||||
}
|
||||
|
||||
result := make([]*ethpb.ExecutionProof, 0, len(proofsByType))
|
||||
for _, proof := range proofsByType {
|
||||
result = append(result, proof.Copy())
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (p *ExecProofPool) Ids(blockRoot [fieldparams.RootLength]byte) []primitives.ExecutionProofId {
|
||||
p.lock.RLock()
|
||||
defer p.lock.RUnlock()
|
||||
|
||||
proofById, exists := p.m[blockRoot]
|
||||
if !exists {
|
||||
return nil
|
||||
}
|
||||
|
||||
ids := make([]primitives.ExecutionProofId, 0, len(proofById))
|
||||
for id := range proofById {
|
||||
ids = append(ids, id)
|
||||
}
|
||||
|
||||
return ids
|
||||
}
|
||||
|
||||
// Count counts the number of proofs for a specific block root
|
||||
func (p *ExecProofPool) Count(blockRoot [fieldparams.RootLength]byte) uint64 {
|
||||
p.lock.RLock()
|
||||
defer p.lock.RUnlock()
|
||||
|
||||
return uint64(len(p.m[blockRoot]))
|
||||
}
|
||||
|
||||
// Exists checks if a proof exists for the given block root and proof ID
|
||||
func (p *ExecProofPool) Exists(blockRoot [fieldparams.RootLength]byte, proofId primitives.ExecutionProofId) bool {
|
||||
p.lock.RLock()
|
||||
defer p.lock.RUnlock()
|
||||
|
||||
proofsByType, exists := p.m[blockRoot]
|
||||
if !exists {
|
||||
return false
|
||||
}
|
||||
|
||||
_, exists = proofsByType[proofId]
|
||||
return exists
|
||||
}
|
||||
|
||||
// PruneUpTo removes proofs older than the given slot
|
||||
func (p *ExecProofPool) PruneUpTo(targetSlot primitives.Slot) int {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
pruned := 0
|
||||
for blockRoot, proofsByType := range p.m {
|
||||
for proofId, proof := range proofsByType {
|
||||
if proof.Slot < targetSlot {
|
||||
delete(proofsByType, proofId)
|
||||
execProofInPoolTotal.Dec()
|
||||
pruned++
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up empty inner maps
|
||||
if len(proofsByType) == 0 {
|
||||
delete(p.m, blockRoot)
|
||||
}
|
||||
}
|
||||
|
||||
return pruned
|
||||
}
|
||||
@@ -165,7 +165,6 @@ go_test(
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/state/stategen/mock:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
|
||||
@@ -589,11 +589,6 @@ func (s *Service) createLocalNode(
|
||||
localNode.Set(quicEntry)
|
||||
}
|
||||
|
||||
if features.Get().EnableZkvm {
|
||||
zkvmKeyEntry := enr.WithEntry(zkvmEnabledKeyEnrKey, true)
|
||||
localNode.Set(zkvmKeyEntry)
|
||||
}
|
||||
|
||||
localNode.SetFallbackIP(ipAddr)
|
||||
localNode.SetFallbackUDP(udpPort)
|
||||
|
||||
|
||||
@@ -25,7 +25,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers/scorers"
|
||||
testp2p "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/wrapper"
|
||||
leakybucket "github.com/OffchainLabs/prysm/v7/container/leaky-bucket"
|
||||
@@ -244,19 +243,12 @@ func TestCreateLocalNode(t *testing.T) {
|
||||
name string
|
||||
cfg *Config
|
||||
expectedError bool
|
||||
zkvmEnabled bool
|
||||
}{
|
||||
{
|
||||
name: "valid config",
|
||||
cfg: &Config{},
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "valid config with zkVM enabled",
|
||||
cfg: &Config{},
|
||||
expectedError: false,
|
||||
zkvmEnabled: true,
|
||||
},
|
||||
{
|
||||
name: "invalid host address",
|
||||
cfg: &Config{HostAddress: "invalid"},
|
||||
@@ -281,15 +273,6 @@ func TestCreateLocalNode(t *testing.T) {
|
||||
|
||||
for _, tt := range testCases {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if tt.zkvmEnabled {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnableZkvm: true,
|
||||
})
|
||||
t.Cleanup(func() {
|
||||
resetCfg()
|
||||
})
|
||||
}
|
||||
|
||||
// Define ports. Use unique ports since this test validates ENR content.
|
||||
const (
|
||||
udpPort = 3100
|
||||
@@ -365,14 +348,6 @@ func TestCreateLocalNode(t *testing.T) {
|
||||
custodyGroupCount := new(uint64)
|
||||
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(params.BeaconNetworkConfig().CustodyGroupCountKey, custodyGroupCount)))
|
||||
require.Equal(t, custodyRequirement, *custodyGroupCount)
|
||||
|
||||
// Check zkVM enabled key if applicable.
|
||||
if tt.zkvmEnabled {
|
||||
zkvmEnabled := new(bool)
|
||||
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(params.BeaconNetworkConfig().ZkvmEnabledKey, zkvmEnabled)))
|
||||
require.Equal(t, features.Get().EnableZkvm, *zkvmEnabled)
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -52,9 +52,6 @@ const (
|
||||
// lightClientFinalityUpdateWeight specifies the scoring weight that we apply to
|
||||
// our light client finality update topic.
|
||||
lightClientFinalityUpdateWeight = 0.05
|
||||
// executionProofWeight specifies the scoring weight that we apply to
|
||||
// our execution proof topic.
|
||||
executionProofWeight = 0.05
|
||||
|
||||
// maxInMeshScore describes the max score a peer can attain from being in the mesh.
|
||||
maxInMeshScore = 10
|
||||
@@ -148,8 +145,6 @@ func (s *Service) topicScoreParams(topic string) (*pubsub.TopicScoreParams, erro
|
||||
return defaultLightClientOptimisticUpdateTopicParams(), nil
|
||||
case strings.Contains(topic, GossipLightClientFinalityUpdateMessage):
|
||||
return defaultLightClientFinalityUpdateTopicParams(), nil
|
||||
case strings.Contains(topic, GossipExecutionProofMessage):
|
||||
return defaultExecutionProofTopicParams(), nil
|
||||
default:
|
||||
return nil, errors.Errorf("unrecognized topic provided for parameter registration: %s", topic)
|
||||
}
|
||||
@@ -515,28 +510,6 @@ func defaultBlsToExecutionChangeTopicParams() *pubsub.TopicScoreParams {
|
||||
}
|
||||
}
|
||||
|
||||
func defaultExecutionProofTopicParams() *pubsub.TopicScoreParams {
|
||||
return &pubsub.TopicScoreParams{
|
||||
TopicWeight: executionProofWeight,
|
||||
TimeInMeshWeight: maxInMeshScore / inMeshCap(),
|
||||
TimeInMeshQuantum: inMeshTime(),
|
||||
TimeInMeshCap: inMeshCap(),
|
||||
FirstMessageDeliveriesWeight: 2,
|
||||
FirstMessageDeliveriesDecay: scoreDecay(oneHundredEpochs),
|
||||
FirstMessageDeliveriesCap: 5,
|
||||
MeshMessageDeliveriesWeight: 0,
|
||||
MeshMessageDeliveriesDecay: 0,
|
||||
MeshMessageDeliveriesCap: 0,
|
||||
MeshMessageDeliveriesThreshold: 0,
|
||||
MeshMessageDeliveriesWindow: 0,
|
||||
MeshMessageDeliveriesActivation: 0,
|
||||
MeshFailurePenaltyWeight: 0,
|
||||
MeshFailurePenaltyDecay: 0,
|
||||
InvalidMessageDeliveriesWeight: -2000,
|
||||
InvalidMessageDeliveriesDecay: scoreDecay(invalidDecayPeriod),
|
||||
}
|
||||
}
|
||||
|
||||
func defaultLightClientOptimisticUpdateTopicParams() *pubsub.TopicScoreParams {
|
||||
return &pubsub.TopicScoreParams{
|
||||
TopicWeight: lightClientOptimisticUpdateWeight,
|
||||
|
||||
@@ -25,7 +25,6 @@ var gossipTopicMappings = map[string]func() proto.Message{
|
||||
LightClientOptimisticUpdateTopicFormat: func() proto.Message { return ðpb.LightClientOptimisticUpdateAltair{} },
|
||||
LightClientFinalityUpdateTopicFormat: func() proto.Message { return ðpb.LightClientFinalityUpdateAltair{} },
|
||||
DataColumnSubnetTopicFormat: func() proto.Message { return ðpb.DataColumnSidecar{} },
|
||||
ExecutionProofSubnetTopicFormat: func() proto.Message { return ðpb.ExecutionProof{} },
|
||||
}
|
||||
|
||||
// GossipTopicMappings is a function to return the assigned data type
|
||||
|
||||
@@ -602,33 +602,6 @@ func (p *Status) All() []peer.ID {
|
||||
return pids
|
||||
}
|
||||
|
||||
// ZkvmEnabledPeers returns all connected peers that have zkvm enabled in their ENR.
|
||||
func (p *Status) ZkvmEnabledPeers() []peer.ID {
|
||||
p.store.RLock()
|
||||
defer p.store.RUnlock()
|
||||
|
||||
peers := make([]peer.ID, 0)
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
if peerData.ConnState != Connected {
|
||||
continue
|
||||
}
|
||||
if peerData.Enr == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
var enabled bool
|
||||
entry := enr.WithEntry(params.BeaconNetworkConfig().ZkvmEnabledKey, &enabled)
|
||||
if err := peerData.Enr.Load(entry); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if enabled {
|
||||
peers = append(peers, pid)
|
||||
}
|
||||
}
|
||||
return peers
|
||||
}
|
||||
|
||||
// Prune clears out and removes outdated and disconnected peers.
|
||||
func (p *Status) Prune() {
|
||||
p.store.Lock()
|
||||
|
||||
@@ -1341,75 +1341,3 @@ func createPeer(t *testing.T, p *peers.Status, addr ma.Multiaddr,
|
||||
p.SetConnectionState(id, state)
|
||||
return id
|
||||
}
|
||||
|
||||
func TestZkvmEnabledPeers(t *testing.T) {
|
||||
p := peers.NewStatus(t.Context(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: 1,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
// Create peer 1: Connected, zkVM enabled
|
||||
pid1 := addPeer(t, p, peers.Connected)
|
||||
record1 := new(enr.Record)
|
||||
zkvmEnabled := true
|
||||
record1.Set(enr.WithEntry(params.BeaconNetworkConfig().ZkvmEnabledKey, &zkvmEnabled))
|
||||
p.Add(record1, pid1, nil, network.DirOutbound)
|
||||
p.SetConnectionState(pid1, peers.Connected)
|
||||
|
||||
// Create peer 2: Connected, zkVM disabled
|
||||
pid2 := addPeer(t, p, peers.Connected)
|
||||
record2 := new(enr.Record)
|
||||
zkvmDisabled := false
|
||||
record2.Set(enr.WithEntry(params.BeaconNetworkConfig().ZkvmEnabledKey, &zkvmDisabled))
|
||||
p.Add(record2, pid2, nil, network.DirOutbound)
|
||||
p.SetConnectionState(pid2, peers.Connected)
|
||||
|
||||
// Create peer 3: Connected, zkVM enabled
|
||||
pid3 := addPeer(t, p, peers.Connected)
|
||||
record3 := new(enr.Record)
|
||||
record3.Set(enr.WithEntry(params.BeaconNetworkConfig().ZkvmEnabledKey, &zkvmEnabled))
|
||||
p.Add(record3, pid3, nil, network.DirOutbound)
|
||||
p.SetConnectionState(pid3, peers.Connected)
|
||||
|
||||
// Create peer 4: Disconnected, zkVM enabled (should not be included)
|
||||
pid4 := addPeer(t, p, peers.Disconnected)
|
||||
record4 := new(enr.Record)
|
||||
record4.Set(enr.WithEntry(params.BeaconNetworkConfig().ZkvmEnabledKey, &zkvmEnabled))
|
||||
p.Add(record4, pid4, nil, network.DirOutbound)
|
||||
p.SetConnectionState(pid4, peers.Disconnected)
|
||||
|
||||
// Create peer 5: Connected, no ENR (should not be included)
|
||||
pid5 := addPeer(t, p, peers.Connected)
|
||||
p.Add(nil, pid5, nil, network.DirOutbound)
|
||||
p.SetConnectionState(pid5, peers.Connected)
|
||||
|
||||
// Create peer 6: Connected, no zkVM key in ENR (should not be included)
|
||||
pid6 := addPeer(t, p, peers.Connected)
|
||||
record6 := new(enr.Record)
|
||||
record6.Set(enr.WithEntry("other_key", "other_value"))
|
||||
p.Add(record6, pid6, nil, network.DirOutbound)
|
||||
p.SetConnectionState(pid6, peers.Connected)
|
||||
|
||||
// Get zkVM enabled peers
|
||||
zkvmPeers := p.ZkvmEnabledPeers()
|
||||
|
||||
// Should return only pid1 and pid3 (connected peers with zkVM enabled)
|
||||
assert.Equal(t, 2, len(zkvmPeers), "Expected 2 zkVM enabled peers")
|
||||
|
||||
// Verify the returned peers are correct
|
||||
zkvmPeerMap := make(map[peer.ID]bool)
|
||||
for _, pid := range zkvmPeers {
|
||||
zkvmPeerMap[pid] = true
|
||||
}
|
||||
|
||||
assert.Equal(t, true, zkvmPeerMap[pid1], "pid1 should be in zkVM enabled peers")
|
||||
assert.Equal(t, true, zkvmPeerMap[pid3], "pid3 should be in zkVM enabled peers")
|
||||
assert.Equal(t, false, zkvmPeerMap[pid2], "pid2 should not be in zkVM enabled peers (disabled)")
|
||||
assert.Equal(t, false, zkvmPeerMap[pid4], "pid4 should not be in zkVM enabled peers (disconnected)")
|
||||
assert.Equal(t, false, zkvmPeerMap[pid5], "pid5 should not be in zkVM enabled peers (no ENR)")
|
||||
assert.Equal(t, false, zkvmPeerMap[pid6], "pid6 should not be in zkVM enabled peers (no zkVM key)")
|
||||
}
|
||||
|
||||
@@ -67,9 +67,6 @@ const (
|
||||
|
||||
// DataColumnSidecarsByRangeName is the name for the DataColumnSidecarsByRange v1 message topic.
|
||||
DataColumnSidecarsByRangeName = "/data_column_sidecars_by_range"
|
||||
|
||||
// ExecutionProofsByRootName is the name for the ExecutionProofsByRoot v1 message topic.
|
||||
ExecutionProofsByRootName = "/execution_proofs_by_root"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -109,9 +106,6 @@ const (
|
||||
// RPCDataColumnSidecarsByRangeTopicV1 is a topic for requesting data column sidecars by their slot.
|
||||
// /eth2/beacon_chain/req/data_column_sidecars_by_range/1 - New in Fulu.
|
||||
RPCDataColumnSidecarsByRangeTopicV1 = protocolPrefix + DataColumnSidecarsByRangeName + SchemaVersionV1
|
||||
// RPCExecutionProofsByRootTopicV1 is a topic for requesting execution proofs by their block root.
|
||||
// /eth2/beacon_chain/req/execution_proofs_by_root/1 - New in Fulu.
|
||||
RPCExecutionProofsByRootTopicV1 = protocolPrefix + ExecutionProofsByRootName + SchemaVersionV1
|
||||
|
||||
// V2 RPC Topics
|
||||
// RPCStatusTopicV2 defines the v1 topic for the status rpc method.
|
||||
@@ -176,9 +170,6 @@ var (
|
||||
|
||||
// DataColumnSidecarsByRoot v1 Message
|
||||
RPCDataColumnSidecarsByRootTopicV1: p2ptypes.DataColumnsByRootIdentifiers{},
|
||||
|
||||
// ExecutionProofsByRoot v1 Message
|
||||
RPCExecutionProofsByRootTopicV1: new(pb.ExecutionProofsByRootRequest),
|
||||
}
|
||||
|
||||
// Maps all registered protocol prefixes.
|
||||
@@ -202,7 +193,6 @@ var (
|
||||
LightClientOptimisticUpdateName: true,
|
||||
DataColumnSidecarsByRootName: true,
|
||||
DataColumnSidecarsByRangeName: true,
|
||||
ExecutionProofsByRootName: true,
|
||||
}
|
||||
|
||||
// Maps all the RPC messages which are to updated in altair.
|
||||
|
||||
@@ -36,7 +36,6 @@ var (
|
||||
attSubnetEnrKey = params.BeaconNetworkConfig().AttSubnetKey
|
||||
syncCommsSubnetEnrKey = params.BeaconNetworkConfig().SyncCommsSubnetKey
|
||||
custodyGroupCountEnrKey = params.BeaconNetworkConfig().CustodyGroupCountKey
|
||||
zkvmEnabledKeyEnrKey = params.BeaconNetworkConfig().ZkvmEnabledKey
|
||||
)
|
||||
|
||||
// The value used with the subnet, in order
|
||||
|
||||
@@ -46,8 +46,6 @@ const (
|
||||
GossipLightClientOptimisticUpdateMessage = "light_client_optimistic_update"
|
||||
// GossipDataColumnSidecarMessage is the name for the data column sidecar message type.
|
||||
GossipDataColumnSidecarMessage = "data_column_sidecar"
|
||||
// GossipExecutionProofMessage is the name for the execution proof message type.
|
||||
GossipExecutionProofMessage = "execution_proof"
|
||||
|
||||
// Topic Formats
|
||||
//
|
||||
@@ -77,8 +75,6 @@ const (
|
||||
LightClientOptimisticUpdateTopicFormat = GossipProtocolAndDigest + GossipLightClientOptimisticUpdateMessage
|
||||
// DataColumnSubnetTopicFormat is the topic format for the data column subnet.
|
||||
DataColumnSubnetTopicFormat = GossipProtocolAndDigest + GossipDataColumnSidecarMessage + "_%d"
|
||||
// ExecutionProofSubnetTopicFormat is the topic format for the execution proof subnet.
|
||||
ExecutionProofSubnetTopicFormat = GossipProtocolAndDigest + GossipExecutionProofMessage // + "_%d" (PoC only have one global topic)
|
||||
)
|
||||
|
||||
// topic is a struct representing a single gossipsub topic.
|
||||
@@ -162,7 +158,6 @@ func (s *Service) allTopics() []topic {
|
||||
newTopic(altair, future, empty, GossipLightClientOptimisticUpdateMessage),
|
||||
newTopic(altair, future, empty, GossipLightClientFinalityUpdateMessage),
|
||||
newTopic(capella, future, empty, GossipBlsToExecutionChangeMessage),
|
||||
newTopic(fulu, future, empty, GossipExecutionProofMessage),
|
||||
}
|
||||
last := params.GetNetworkScheduleEntry(genesis)
|
||||
schedule := []params.NetworkScheduleEntry{last}
|
||||
|
||||
@@ -204,9 +204,6 @@ func InitializeDataMaps() {
|
||||
bytesutil.ToBytes4(params.BeaconConfig().ElectraForkVersion): func() (interfaces.LightClientOptimisticUpdate, error) {
|
||||
return lightclientConsensusTypes.NewEmptyOptimisticUpdateDeneb(), nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().FuluForkVersion): func() (interfaces.LightClientOptimisticUpdate, error) {
|
||||
return lightclientConsensusTypes.NewEmptyOptimisticUpdateDeneb(), nil
|
||||
},
|
||||
}
|
||||
|
||||
// Reset our light client finality update map.
|
||||
@@ -226,8 +223,5 @@ func InitializeDataMaps() {
|
||||
bytesutil.ToBytes4(params.BeaconConfig().ElectraForkVersion): func() (interfaces.LightClientFinalityUpdate, error) {
|
||||
return lightclientConsensusTypes.NewEmptyFinalityUpdateElectra(), nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().FuluForkVersion): func() (interfaces.LightClientFinalityUpdate, error) {
|
||||
return lightclientConsensusTypes.NewEmptyFinalityUpdateElectra(), nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/api"
|
||||
"github.com/OffchainLabs/prysm/v7/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
||||
coreblocks "github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
corehelpers "github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/filters"
|
||||
@@ -958,13 +957,6 @@ func (s *Server) validateConsensus(ctx context.Context, b *eth.GenericSignedBeac
|
||||
}
|
||||
}
|
||||
}
|
||||
blockRoot, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not hash block")
|
||||
}
|
||||
if err := coreblocks.VerifyBlockSignatureUsingCurrentFork(parentState, blk, blockRoot); err != nil {
|
||||
return errors.Wrap(err, "could not verify block signature")
|
||||
}
|
||||
_, err = transition.ExecuteStateTransition(ctx, parentState, blk)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not execute state transition")
|
||||
|
||||
@@ -130,10 +130,6 @@ func (s *Server) SubmitAttestationsV2(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "beacon.SubmitAttestationsV2")
|
||||
defer span.End()
|
||||
|
||||
if shared.IsSyncing(ctx, w, s.SyncChecker, s.HeadFetcher, s.TimeFetcher, s.OptimisticModeFetcher) {
|
||||
return
|
||||
}
|
||||
|
||||
versionHeader := r.Header.Get(api.VersionHeader)
|
||||
if versionHeader == "" {
|
||||
httputil.HandleError(w, api.VersionHeader+" header is required", http.StatusBadRequest)
|
||||
@@ -242,14 +238,22 @@ func (s *Server) handleAttestationsElectra(
|
||||
},
|
||||
})
|
||||
|
||||
// Broadcast first using CommitteeId directly (fast path)
|
||||
// This matches gRPC behavior and avoids blocking on state fetching
|
||||
wantedEpoch := slots.ToEpoch(singleAtt.Data.Slot)
|
||||
targetState, err := s.AttestationStateFetcher.AttestationTargetState(ctx, singleAtt.Data.Target)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get target state for attestation")
|
||||
}
|
||||
committee, err := corehelpers.BeaconCommitteeFromState(ctx, targetState, singleAtt.Data.Slot, singleAtt.CommitteeId)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get committee for attestation")
|
||||
}
|
||||
att := singleAtt.ToAttestationElectra(committee)
|
||||
|
||||
wantedEpoch := slots.ToEpoch(att.Data.Slot)
|
||||
vals, err := s.HeadFetcher.HeadValidatorsIndices(ctx, wantedEpoch)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get head validator indices")
|
||||
}
|
||||
subnet := corehelpers.ComputeSubnetFromCommitteeAndSlot(uint64(len(vals)), singleAtt.CommitteeId, singleAtt.Data.Slot)
|
||||
subnet := corehelpers.ComputeSubnetFromCommitteeAndSlot(uint64(len(vals)), att.GetCommitteeIndex(), att.Data.Slot)
|
||||
if err = s.Broadcaster.BroadcastAttestation(ctx, subnet, singleAtt); err != nil {
|
||||
failedBroadcasts = append(failedBroadcasts, &server.IndexedError{
|
||||
Index: i,
|
||||
@@ -260,35 +264,17 @@ func (s *Server) handleAttestationsElectra(
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Save to pool after broadcast (slow path - requires state fetching)
|
||||
// Run in goroutine to avoid blocking the HTTP response
|
||||
go func() {
|
||||
for _, singleAtt := range validAttestations {
|
||||
targetState, err := s.AttestationStateFetcher.AttestationTargetState(context.Background(), singleAtt.Data.Target)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get target state for attestation")
|
||||
continue
|
||||
if features.Get().EnableExperimentalAttestationPool {
|
||||
if err = s.AttestationCache.Add(att); err != nil {
|
||||
log.WithError(err).Error("Could not save attestation")
|
||||
}
|
||||
committee, err := corehelpers.BeaconCommitteeFromState(context.Background(), targetState, singleAtt.Data.Slot, singleAtt.CommitteeId)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get committee for attestation")
|
||||
continue
|
||||
}
|
||||
att := singleAtt.ToAttestationElectra(committee)
|
||||
|
||||
if features.Get().EnableExperimentalAttestationPool {
|
||||
if err = s.AttestationCache.Add(att); err != nil {
|
||||
log.WithError(err).Error("Could not save attestation")
|
||||
}
|
||||
} else {
|
||||
if err = s.AttestationsPool.SaveUnaggregatedAttestation(att); err != nil {
|
||||
log.WithError(err).Error("Could not save attestation")
|
||||
}
|
||||
} else {
|
||||
if err = s.AttestationsPool.SaveUnaggregatedAttestation(att); err != nil {
|
||||
log.WithError(err).Error("Could not save attestation")
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
if len(failedBroadcasts) > 0 {
|
||||
log.WithFields(logrus.Fields{
|
||||
@@ -484,10 +470,6 @@ func (s *Server) SubmitSyncCommitteeSignatures(w http.ResponseWriter, r *http.Re
|
||||
ctx, span := trace.StartSpan(r.Context(), "beacon.SubmitPoolSyncCommitteeSignatures")
|
||||
defer span.End()
|
||||
|
||||
if shared.IsSyncing(ctx, w, s.SyncChecker, s.HeadFetcher, s.TimeFetcher, s.OptimisticModeFetcher) {
|
||||
return
|
||||
}
|
||||
|
||||
var req structs.SubmitSyncCommitteeSignaturesRequest
|
||||
err := json.NewDecoder(r.Body).Decode(&req.Data)
|
||||
switch {
|
||||
@@ -729,7 +711,6 @@ func (s *Server) SubmitAttesterSlashingsV2(w http.ResponseWriter, r *http.Reques
|
||||
versionHeader := r.Header.Get(api.VersionHeader)
|
||||
if versionHeader == "" {
|
||||
httputil.HandleError(w, api.VersionHeader+" header is required", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
v, err := version.FromString(versionHeader)
|
||||
if err != nil {
|
||||
|
||||
@@ -26,7 +26,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/voluntaryexits/mock"
|
||||
p2pMock "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/core"
|
||||
mockSync "github.com/OffchainLabs/prysm/v7/beacon-chain/sync/initial-sync/testing"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
@@ -623,8 +622,6 @@ func TestSubmitAttestationsV2(t *testing.T) {
|
||||
HeadFetcher: chainService,
|
||||
ChainInfoFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OperationNotifier: &blockchainmock.MockOperationNotifier{},
|
||||
AttestationStateFetcher: chainService,
|
||||
}
|
||||
@@ -657,7 +654,6 @@ func TestSubmitAttestationsV2(t *testing.T) {
|
||||
assert.Equal(t, primitives.Epoch(0), broadcaster.BroadcastAttestations[0].GetData().Source.Epoch)
|
||||
assert.Equal(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", hexutil.Encode(broadcaster.BroadcastAttestations[0].GetData().Target.Root))
|
||||
assert.Equal(t, primitives.Epoch(0), broadcaster.BroadcastAttestations[0].GetData().Target.Epoch)
|
||||
time.Sleep(100 * time.Millisecond) // Wait for async pool save
|
||||
assert.Equal(t, 1, s.AttestationsPool.UnaggregatedAttestationCount())
|
||||
})
|
||||
t.Run("multiple", func(t *testing.T) {
|
||||
@@ -677,7 +673,6 @@ func TestSubmitAttestationsV2(t *testing.T) {
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
assert.Equal(t, true, broadcaster.BroadcastCalled.Load())
|
||||
assert.Equal(t, 2, broadcaster.NumAttestations())
|
||||
time.Sleep(100 * time.Millisecond) // Wait for async pool save
|
||||
assert.Equal(t, 2, s.AttestationsPool.UnaggregatedAttestationCount())
|
||||
})
|
||||
t.Run("phase0 att post electra", func(t *testing.T) {
|
||||
@@ -798,7 +793,6 @@ func TestSubmitAttestationsV2(t *testing.T) {
|
||||
assert.Equal(t, primitives.Epoch(0), broadcaster.BroadcastAttestations[0].GetData().Source.Epoch)
|
||||
assert.Equal(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", hexutil.Encode(broadcaster.BroadcastAttestations[0].GetData().Target.Root))
|
||||
assert.Equal(t, primitives.Epoch(0), broadcaster.BroadcastAttestations[0].GetData().Target.Epoch)
|
||||
time.Sleep(100 * time.Millisecond) // Wait for async pool save
|
||||
assert.Equal(t, 1, s.AttestationsPool.UnaggregatedAttestationCount())
|
||||
})
|
||||
t.Run("multiple", func(t *testing.T) {
|
||||
@@ -818,7 +812,6 @@ func TestSubmitAttestationsV2(t *testing.T) {
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
assert.Equal(t, true, broadcaster.BroadcastCalled.Load())
|
||||
assert.Equal(t, 2, broadcaster.NumAttestations())
|
||||
time.Sleep(100 * time.Millisecond) // Wait for async pool save
|
||||
assert.Equal(t, 2, s.AttestationsPool.UnaggregatedAttestationCount())
|
||||
})
|
||||
t.Run("no body", func(t *testing.T) {
|
||||
@@ -868,27 +861,6 @@ func TestSubmitAttestationsV2(t *testing.T) {
|
||||
assert.Equal(t, true, strings.Contains(e.Failures[0].Message, "Incorrect attestation signature"))
|
||||
})
|
||||
})
|
||||
t.Run("syncing", func(t *testing.T) {
|
||||
chainService := &blockchainmock.ChainService{}
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
}
|
||||
|
||||
var body bytes.Buffer
|
||||
_, err := body.WriteString(singleAtt)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://example.com", &body)
|
||||
request.Header.Set(api.VersionHeader, version.String(version.Phase0))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.SubmitAttestationsV2(writer, request)
|
||||
assert.Equal(t, http.StatusServiceUnavailable, writer.Code)
|
||||
assert.Equal(t, true, strings.Contains(writer.Body.String(), "Beacon node is currently syncing"))
|
||||
})
|
||||
}
|
||||
|
||||
func TestListVoluntaryExits(t *testing.T) {
|
||||
@@ -1085,19 +1057,14 @@ func TestSubmitSyncCommitteeSignatures(t *testing.T) {
|
||||
|
||||
t.Run("single", func(t *testing.T) {
|
||||
broadcaster := &p2pMock.MockBroadcaster{}
|
||||
chainService := &blockchainmock.ChainService{
|
||||
State: st,
|
||||
SyncCommitteeIndices: []primitives.CommitteeIndex{0},
|
||||
}
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
CoreService: &core.Service{
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
P2P: broadcaster,
|
||||
HeadFetcher: chainService,
|
||||
HeadFetcher: &blockchainmock.ChainService{
|
||||
State: st,
|
||||
SyncCommitteeIndices: []primitives.CommitteeIndex{0},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1122,19 +1089,14 @@ func TestSubmitSyncCommitteeSignatures(t *testing.T) {
|
||||
})
|
||||
t.Run("multiple", func(t *testing.T) {
|
||||
broadcaster := &p2pMock.MockBroadcaster{}
|
||||
chainService := &blockchainmock.ChainService{
|
||||
State: st,
|
||||
SyncCommitteeIndices: []primitives.CommitteeIndex{0},
|
||||
}
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
CoreService: &core.Service{
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
P2P: broadcaster,
|
||||
HeadFetcher: chainService,
|
||||
HeadFetcher: &blockchainmock.ChainService{
|
||||
State: st,
|
||||
SyncCommitteeIndices: []primitives.CommitteeIndex{0},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1158,18 +1120,13 @@ func TestSubmitSyncCommitteeSignatures(t *testing.T) {
|
||||
})
|
||||
t.Run("invalid", func(t *testing.T) {
|
||||
broadcaster := &p2pMock.MockBroadcaster{}
|
||||
chainService := &blockchainmock.ChainService{
|
||||
State: st,
|
||||
}
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
CoreService: &core.Service{
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
P2P: broadcaster,
|
||||
HeadFetcher: chainService,
|
||||
HeadFetcher: &blockchainmock.ChainService{
|
||||
State: st,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1192,13 +1149,7 @@ func TestSubmitSyncCommitteeSignatures(t *testing.T) {
|
||||
assert.Equal(t, false, broadcaster.BroadcastCalled.Load())
|
||||
})
|
||||
t.Run("empty", func(t *testing.T) {
|
||||
chainService := &blockchainmock.ChainService{State: st}
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
s := &Server{}
|
||||
|
||||
var body bytes.Buffer
|
||||
_, err := body.WriteString("[]")
|
||||
@@ -1215,13 +1166,7 @@ func TestSubmitSyncCommitteeSignatures(t *testing.T) {
|
||||
assert.Equal(t, true, strings.Contains(e.Message, "No data submitted"))
|
||||
})
|
||||
t.Run("no body", func(t *testing.T) {
|
||||
chainService := &blockchainmock.ChainService{State: st}
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
s := &Server{}
|
||||
|
||||
request := httptest.NewRequest(http.MethodPost, "http://example.com", nil)
|
||||
writer := httptest.NewRecorder()
|
||||
@@ -1234,26 +1179,6 @@ func TestSubmitSyncCommitteeSignatures(t *testing.T) {
|
||||
assert.Equal(t, http.StatusBadRequest, e.Code)
|
||||
assert.Equal(t, true, strings.Contains(e.Message, "No data submitted"))
|
||||
})
|
||||
t.Run("syncing", func(t *testing.T) {
|
||||
chainService := &blockchainmock.ChainService{State: st}
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
}
|
||||
|
||||
var body bytes.Buffer
|
||||
_, err := body.WriteString(singleSyncCommitteeMsg)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://example.com", &body)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.SubmitSyncCommitteeSignatures(writer, request)
|
||||
assert.Equal(t, http.StatusServiceUnavailable, writer.Code)
|
||||
assert.Equal(t, true, strings.Contains(writer.Body.String(), "Beacon node is currently syncing"))
|
||||
})
|
||||
}
|
||||
|
||||
func TestListBLSToExecutionChanges(t *testing.T) {
|
||||
@@ -2187,33 +2112,6 @@ func TestSubmitAttesterSlashingsV2(t *testing.T) {
|
||||
assert.Equal(t, http.StatusBadRequest, e.Code)
|
||||
assert.StringContains(t, "Invalid attester slashing", e.Message)
|
||||
})
|
||||
|
||||
t.Run("missing-version-header", func(t *testing.T) {
|
||||
bs, err := util.NewBeaconStateElectra()
|
||||
require.NoError(t, err)
|
||||
|
||||
broadcaster := &p2pMock.MockBroadcaster{}
|
||||
s := &Server{
|
||||
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
|
||||
SlashingsPool: &slashingsmock.PoolMock{},
|
||||
Broadcaster: broadcaster,
|
||||
}
|
||||
|
||||
var body bytes.Buffer
|
||||
_, err = body.WriteString(invalidAttesterSlashing)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://example.com/beacon/pool/attester_slashings", &body)
|
||||
// Intentionally do not set api.VersionHeader to verify missing header handling.
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.SubmitAttesterSlashingsV2(writer, request)
|
||||
require.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
e := &httputil.DefaultJsonError{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusBadRequest, e.Code)
|
||||
assert.StringContains(t, api.VersionHeader+" header is required", e.Message)
|
||||
})
|
||||
}
|
||||
|
||||
func TestSubmitProposerSlashing_InvalidSlashing(t *testing.T) {
|
||||
|
||||
@@ -654,10 +654,6 @@ func (m *futureSyncMockFetcher) StateBySlot(context.Context, primitives.Slot) (s
|
||||
return m.BeaconState, nil
|
||||
}
|
||||
|
||||
func (m *futureSyncMockFetcher) StateByEpoch(context.Context, primitives.Epoch) (state.BeaconState, error) {
|
||||
return m.BeaconState, nil
|
||||
}
|
||||
|
||||
func TestGetSyncCommittees_Future(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := make([][]byte, params.BeaconConfig().SyncCommitteeSize)
|
||||
|
||||
@@ -40,7 +40,6 @@ func GetForkSchedule(w http.ResponseWriter, r *http.Request) {
|
||||
httputil.WriteJson(w, &structs.GetForkScheduleResponse{
|
||||
Data: data,
|
||||
})
|
||||
return
|
||||
}
|
||||
previous := schedule[0]
|
||||
for _, entry := range schedule {
|
||||
|
||||
@@ -169,11 +169,6 @@ func TestGetSpec(t *testing.T) {
|
||||
config.BlobsidecarSubnetCountElectra = 102
|
||||
config.SyncMessageDueBPS = 103
|
||||
|
||||
// EIP-8025
|
||||
config.MaxProofDataBytes = 200
|
||||
config.MinEpochsForExecutionProofRequests = 201
|
||||
config.MinProofsRequired = 202
|
||||
|
||||
var dbp [4]byte
|
||||
copy(dbp[:], []byte{'0', '0', '0', '1'})
|
||||
config.DomainBeaconProposer = dbp
|
||||
@@ -210,7 +205,7 @@ func TestGetSpec(t *testing.T) {
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp))
|
||||
data, ok := resp.Data.(map[string]any)
|
||||
require.Equal(t, true, ok)
|
||||
assert.Equal(t, 178, len(data))
|
||||
assert.Equal(t, 175, len(data))
|
||||
for k, v := range data {
|
||||
t.Run(k, func(t *testing.T) {
|
||||
switch k {
|
||||
@@ -582,12 +577,6 @@ func TestGetSpec(t *testing.T) {
|
||||
assert.Equal(t, "102", v)
|
||||
case "SYNC_MESSAGE_DUE_BPS":
|
||||
assert.Equal(t, "103", v)
|
||||
case "MAX_PROOF_DATA_BYTES":
|
||||
assert.Equal(t, "200", v)
|
||||
case "MIN_EPOCHS_FOR_EXECUTION_PROOF_REQUESTS":
|
||||
assert.Equal(t, "201", v)
|
||||
case "MIN_PROOFS_REQUIRED":
|
||||
assert.Equal(t, "202", v)
|
||||
case "BLOB_SCHEDULE":
|
||||
blobSchedule, ok := v.([]any)
|
||||
assert.Equal(t, true, ok)
|
||||
|
||||
@@ -116,7 +116,6 @@ func (s *Server) GetLightClientUpdatesByRange(w http.ResponseWriter, req *http.R
|
||||
for _, update := range updates {
|
||||
if ctx.Err() != nil {
|
||||
httputil.HandleError(w, "Context error: "+ctx.Err().Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
updateSlot := update.AttestedHeader().Beacon().Slot
|
||||
@@ -132,15 +131,12 @@ func (s *Server) GetLightClientUpdatesByRange(w http.ResponseWriter, req *http.R
|
||||
chunkLength = ssz.MarshalUint64(chunkLength, uint64(len(updateSSZ)+4))
|
||||
if _, err := w.Write(chunkLength); err != nil {
|
||||
httputil.HandleError(w, "Could not write chunk length: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if _, err := w.Write(updateEntry.ForkDigest[:]); err != nil {
|
||||
httputil.HandleError(w, "Could not write fork digest: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if _, err := w.Write(updateSSZ); err != nil {
|
||||
httputil.HandleError(w, "Could not write update SSZ: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@@ -149,7 +145,6 @@ func (s *Server) GetLightClientUpdatesByRange(w http.ResponseWriter, req *http.R
|
||||
for _, update := range updates {
|
||||
if ctx.Err() != nil {
|
||||
httputil.HandleError(w, "Context error: "+ctx.Err().Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
updateJson, err := structs.LightClientUpdateFromConsensus(update)
|
||||
|
||||
@@ -132,7 +132,6 @@ func (s *Server) GetHealth(w http.ResponseWriter, r *http.Request) {
|
||||
optimistic, err := s.OptimisticModeFetcher.IsOptimistic(ctx)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if s.SyncChecker.Synced() && !optimistic {
|
||||
return
|
||||
|
||||
@@ -228,7 +228,7 @@ func (s *Server) attRewardsState(w http.ResponseWriter, r *http.Request) (state.
|
||||
}
|
||||
st, err := s.Stater.StateBySlot(r.Context(), nextEpochEnd)
|
||||
if err != nil {
|
||||
shared.WriteStateFetchError(w, err)
|
||||
httputil.HandleError(w, "Could not get state for epoch's starting slot: "+err.Error(), http.StatusInternalServerError)
|
||||
return nil, false
|
||||
}
|
||||
return st, true
|
||||
|
||||
@@ -19,6 +19,7 @@ go_library(
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/feed/operation:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/synccommittee:go_default_library",
|
||||
@@ -77,7 +78,6 @@ go_test(
|
||||
"//beacon-chain/rpc/core:go_default_library",
|
||||
"//beacon-chain/rpc/eth/rewards/testing:go_default_library",
|
||||
"//beacon-chain/rpc/eth/shared/testing:go_default_library",
|
||||
"//beacon-chain/rpc/lookup:go_default_library",
|
||||
"//beacon-chain/rpc/testutil:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/builder"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/core"
|
||||
rpchelpers "github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/eth/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/eth/shared"
|
||||
@@ -897,15 +898,20 @@ func (s *Server) GetAttesterDuties(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
// For next epoch requests, we use the current epoch's state since committee
|
||||
// assignments for next epoch can be computed from current epoch's state.
|
||||
epochForState := requestedEpoch
|
||||
var startSlot primitives.Slot
|
||||
if requestedEpoch == nextEpoch {
|
||||
epochForState = currentEpoch
|
||||
startSlot, err = slots.EpochStart(currentEpoch)
|
||||
} else {
|
||||
startSlot, err = slots.EpochStart(requestedEpoch)
|
||||
}
|
||||
st, err := s.Stater.StateByEpoch(ctx, epochForState)
|
||||
if err != nil {
|
||||
shared.WriteStateFetchError(w, err)
|
||||
httputil.HandleError(w, fmt.Sprintf("Could not get start slot from epoch %d: %v", requestedEpoch, err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
st, err := s.Stater.StateBySlot(ctx, startSlot)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get state: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1014,11 +1020,39 @@ func (s *Server) GetProposerDuties(w http.ResponseWriter, r *http.Request) {
|
||||
nextEpochLookahead = true
|
||||
}
|
||||
|
||||
st, err := s.Stater.StateByEpoch(ctx, requestedEpoch)
|
||||
epochStartSlot, err := slots.EpochStart(requestedEpoch)
|
||||
if err != nil {
|
||||
shared.WriteStateFetchError(w, err)
|
||||
httputil.HandleError(w, fmt.Sprintf("Could not get start slot of epoch %d: %v", requestedEpoch, err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
var st state.BeaconState
|
||||
// if the requested epoch is new, use the head state and the next slot cache
|
||||
if requestedEpoch < currentEpoch {
|
||||
st, err = s.Stater.StateBySlot(ctx, epochStartSlot)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, fmt.Sprintf("Could not get state for slot %d: %v ", epochStartSlot, err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
st, err = s.HeadFetcher.HeadState(ctx)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, fmt.Sprintf("Could not get head state: %v ", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
// Notice that even for Fulu requests for the next epoch, we are only advancing the state to the start of the current epoch.
|
||||
if st.Slot() < epochStartSlot {
|
||||
headRoot, err := s.HeadFetcher.HeadRoot(ctx)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, fmt.Sprintf("Could not get head root: %v ", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
st, err = transition.ProcessSlotsUsingNextSlotCache(ctx, st, headRoot, epochStartSlot)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, fmt.Sprintf("Could not process slots up to %d: %v ", epochStartSlot, err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var assignments map[primitives.ValidatorIndex][]primitives.Slot
|
||||
if nextEpochLookahead {
|
||||
@@ -1069,8 +1103,7 @@ func (s *Server) GetProposerDuties(w http.ResponseWriter, r *http.Request) {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if err = sortProposerDuties(duties); err != nil {
|
||||
httputil.HandleError(w, "Could not sort proposer duties: "+err.Error(), http.StatusInternalServerError)
|
||||
if !sortProposerDuties(w, duties) {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1141,10 +1174,14 @@ func (s *Server) GetSyncCommitteeDuties(w http.ResponseWriter, r *http.Request)
|
||||
}
|
||||
|
||||
startingEpoch := min(requestedEpoch, currentEpoch)
|
||||
|
||||
st, err := s.Stater.StateByEpoch(ctx, startingEpoch)
|
||||
slot, err := slots.EpochStart(startingEpoch)
|
||||
if err != nil {
|
||||
shared.WriteStateFetchError(w, err)
|
||||
httputil.HandleError(w, "Could not get sync committee slot: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
st, err := s.Stater.State(ctx, []byte(strconv.FormatUint(uint64(slot), 10)))
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get sync committee state: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1290,7 +1327,7 @@ func (s *Server) GetLiveness(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
st, err = s.Stater.StateBySlot(ctx, epochEnd)
|
||||
if err != nil {
|
||||
shared.WriteStateFetchError(w, err)
|
||||
httputil.HandleError(w, "Could not get slot for requested epoch: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
participation, err = st.CurrentEpochParticipation()
|
||||
@@ -1410,20 +1447,22 @@ func syncCommitteeDutiesAndVals(
|
||||
return duties, vals, nil
|
||||
}
|
||||
|
||||
func sortProposerDuties(duties []*structs.ProposerDuty) error {
|
||||
var err error
|
||||
func sortProposerDuties(w http.ResponseWriter, duties []*structs.ProposerDuty) bool {
|
||||
ok := true
|
||||
sort.Slice(duties, func(i, j int) bool {
|
||||
si, parseErr := strconv.ParseUint(duties[i].Slot, 10, 64)
|
||||
if parseErr != nil {
|
||||
err = errors.Wrap(parseErr, "could not parse slot")
|
||||
si, err := strconv.ParseUint(duties[i].Slot, 10, 64)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not parse slot: "+err.Error(), http.StatusInternalServerError)
|
||||
ok = false
|
||||
return false
|
||||
}
|
||||
sj, parseErr := strconv.ParseUint(duties[j].Slot, 10, 64)
|
||||
if parseErr != nil {
|
||||
err = errors.Wrap(parseErr, "could not parse slot")
|
||||
sj, err := strconv.ParseUint(duties[j].Slot, 10, 64)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not parse slot: "+err.Error(), http.StatusInternalServerError)
|
||||
ok = false
|
||||
return false
|
||||
}
|
||||
return si < sj
|
||||
})
|
||||
return err
|
||||
return ok
|
||||
}
|
||||
|
||||
@@ -25,7 +25,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/synccommittee"
|
||||
p2pmock "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/core"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/lookup"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/testutil"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stategen"
|
||||
@@ -2007,7 +2006,6 @@ func TestGetAttesterDuties(t *testing.T) {
|
||||
TimeFetcher: chain,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: chain,
|
||||
HeadFetcher: chain,
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
@@ -2186,7 +2184,6 @@ func TestGetAttesterDuties(t *testing.T) {
|
||||
Stater: &testutil.MockStater{StatesBySlot: map[primitives.Slot]state.BeaconState{0: bs}},
|
||||
TimeFetcher: chain,
|
||||
OptimisticModeFetcher: chain,
|
||||
HeadFetcher: chain,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BeaconDB: db,
|
||||
}
|
||||
@@ -2227,62 +2224,6 @@ func TestGetAttesterDuties(t *testing.T) {
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusServiceUnavailable, e.Code)
|
||||
})
|
||||
t.Run("state not found returns 404", func(t *testing.T) {
|
||||
chainSlot := primitives.Slot(0)
|
||||
chain := &mockChain.ChainService{
|
||||
State: bs, Root: genesisRoot[:], Slot: &chainSlot,
|
||||
}
|
||||
stateNotFoundErr := lookup.NewStateNotFoundError(8192, []byte("test"))
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{CustomError: &stateNotFoundErr},
|
||||
TimeFetcher: chain,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: chain,
|
||||
HeadFetcher: chain,
|
||||
}
|
||||
|
||||
var body bytes.Buffer
|
||||
_, err = body.WriteString("[\"0\"]")
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodGet, "http://www.example.com/eth/v1/validator/duties/attester/{epoch}", &body)
|
||||
request.SetPathValue("epoch", "0")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetAttesterDuties(writer, request)
|
||||
assert.Equal(t, http.StatusNotFound, writer.Code)
|
||||
e := &httputil.DefaultJsonError{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusNotFound, e.Code)
|
||||
assert.StringContains(t, "State not found", e.Message)
|
||||
})
|
||||
t.Run("state fetch error returns 500", func(t *testing.T) {
|
||||
chainSlot := primitives.Slot(0)
|
||||
chain := &mockChain.ChainService{
|
||||
State: bs, Root: genesisRoot[:], Slot: &chainSlot,
|
||||
}
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{CustomError: errors.New("internal error")},
|
||||
TimeFetcher: chain,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: chain,
|
||||
HeadFetcher: chain,
|
||||
}
|
||||
|
||||
var body bytes.Buffer
|
||||
_, err = body.WriteString("[\"0\"]")
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodGet, "http://www.example.com/eth/v1/validator/duties/attester/{epoch}", &body)
|
||||
request.SetPathValue("epoch", "0")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetAttesterDuties(writer, request)
|
||||
assert.Equal(t, http.StatusInternalServerError, writer.Code)
|
||||
e := &httputil.DefaultJsonError{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusInternalServerError, e.Code)
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetProposerDuties(t *testing.T) {
|
||||
@@ -2486,60 +2427,6 @@ func TestGetProposerDuties(t *testing.T) {
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusServiceUnavailable, e.Code)
|
||||
})
|
||||
t.Run("state not found returns 404", func(t *testing.T) {
|
||||
bs, err := transition.GenesisBeaconState(t.Context(), deposits, 0, eth1Data)
|
||||
require.NoError(t, err)
|
||||
chainSlot := primitives.Slot(0)
|
||||
chain := &mockChain.ChainService{
|
||||
State: bs, Root: genesisRoot[:], Slot: &chainSlot,
|
||||
}
|
||||
stateNotFoundErr := lookup.NewStateNotFoundError(8192, []byte("test"))
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{CustomError: &stateNotFoundErr},
|
||||
TimeFetcher: chain,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: chain,
|
||||
HeadFetcher: chain,
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://www.example.com/eth/v1/validator/duties/proposer/{epoch}", nil)
|
||||
request.SetPathValue("epoch", "0")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetProposerDuties(writer, request)
|
||||
assert.Equal(t, http.StatusNotFound, writer.Code)
|
||||
e := &httputil.DefaultJsonError{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusNotFound, e.Code)
|
||||
assert.StringContains(t, "State not found", e.Message)
|
||||
})
|
||||
t.Run("state fetch error returns 500", func(t *testing.T) {
|
||||
bs, err := transition.GenesisBeaconState(t.Context(), deposits, 0, eth1Data)
|
||||
require.NoError(t, err)
|
||||
chainSlot := primitives.Slot(0)
|
||||
chain := &mockChain.ChainService{
|
||||
State: bs, Root: genesisRoot[:], Slot: &chainSlot,
|
||||
}
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{CustomError: errors.New("internal error")},
|
||||
TimeFetcher: chain,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: chain,
|
||||
HeadFetcher: chain,
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://www.example.com/eth/v1/validator/duties/proposer/{epoch}", nil)
|
||||
request.SetPathValue("epoch", "0")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetProposerDuties(writer, request)
|
||||
assert.Equal(t, http.StatusInternalServerError, writer.Code)
|
||||
e := &httputil.DefaultJsonError{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusInternalServerError, e.Code)
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetSyncCommitteeDuties(t *testing.T) {
|
||||
@@ -2570,7 +2457,7 @@ func TestGetSyncCommitteeDuties(t *testing.T) {
|
||||
}
|
||||
require.NoError(t, st.SetNextSyncCommittee(nextCommittee))
|
||||
|
||||
mockChainService := &mockChain.ChainService{Genesis: genesisTime, State: st}
|
||||
mockChainService := &mockChain.ChainService{Genesis: genesisTime}
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{BeaconState: st},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
@@ -2761,7 +2648,7 @@ func TestGetSyncCommitteeDuties(t *testing.T) {
|
||||
return newSyncPeriodSt
|
||||
}
|
||||
}
|
||||
mockChainService := &mockChain.ChainService{Genesis: genesisTime, Slot: &newSyncPeriodStartSlot, State: newSyncPeriodSt}
|
||||
mockChainService := &mockChain.ChainService{Genesis: genesisTime, Slot: &newSyncPeriodStartSlot}
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{BeaconState: stateFetchFn(newSyncPeriodStartSlot)},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
@@ -2842,7 +2729,8 @@ func TestGetSyncCommitteeDuties(t *testing.T) {
|
||||
slot, err := slots.EpochStart(1)
|
||||
require.NoError(t, err)
|
||||
|
||||
st2 := st.Copy()
|
||||
st2, err := util.NewBeaconStateBellatrix()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st2.SetSlot(slot))
|
||||
|
||||
mockChainService := &mockChain.ChainService{
|
||||
@@ -2856,7 +2744,7 @@ func TestGetSyncCommitteeDuties(t *testing.T) {
|
||||
State: st2,
|
||||
}
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{BeaconState: st2},
|
||||
Stater: &testutil.MockStater{BeaconState: st},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
TimeFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
@@ -2901,62 +2789,6 @@ func TestGetSyncCommitteeDuties(t *testing.T) {
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusServiceUnavailable, e.Code)
|
||||
})
|
||||
t.Run("state not found returns 404", func(t *testing.T) {
|
||||
slot := 2 * params.BeaconConfig().SlotsPerEpoch
|
||||
chainService := &mockChain.ChainService{
|
||||
Slot: &slot,
|
||||
}
|
||||
stateNotFoundErr := lookup.NewStateNotFoundError(8192, []byte("test"))
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{CustomError: &stateNotFoundErr},
|
||||
TimeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: chainService,
|
||||
HeadFetcher: chainService,
|
||||
}
|
||||
|
||||
var body bytes.Buffer
|
||||
_, err := body.WriteString("[\"1\"]")
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodGet, "http://www.example.com/eth/v1/validator/duties/sync/{epoch}", &body)
|
||||
request.SetPathValue("epoch", "1")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetSyncCommitteeDuties(writer, request)
|
||||
assert.Equal(t, http.StatusNotFound, writer.Code)
|
||||
e := &httputil.DefaultJsonError{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusNotFound, e.Code)
|
||||
assert.StringContains(t, "State not found", e.Message)
|
||||
})
|
||||
t.Run("state fetch error returns 500", func(t *testing.T) {
|
||||
slot := 2 * params.BeaconConfig().SlotsPerEpoch
|
||||
chainService := &mockChain.ChainService{
|
||||
Slot: &slot,
|
||||
}
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{CustomError: errors.New("internal error")},
|
||||
TimeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: chainService,
|
||||
HeadFetcher: chainService,
|
||||
}
|
||||
|
||||
var body bytes.Buffer
|
||||
_, err := body.WriteString("[\"1\"]")
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodGet, "http://www.example.com/eth/v1/validator/duties/sync/{epoch}", &body)
|
||||
request.SetPathValue("epoch", "1")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetSyncCommitteeDuties(writer, request)
|
||||
assert.Equal(t, http.StatusInternalServerError, writer.Code)
|
||||
e := &httputil.DefaultJsonError{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusInternalServerError, e.Code)
|
||||
})
|
||||
}
|
||||
|
||||
func TestPrepareBeaconProposer(t *testing.T) {
|
||||
|
||||
@@ -11,7 +11,6 @@ go_library(
|
||||
deps = [
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/rpc/core:go_default_library",
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stategen"
|
||||
@@ -99,7 +98,6 @@ type Stater interface {
|
||||
State(ctx context.Context, id []byte) (state.BeaconState, error)
|
||||
StateRoot(ctx context.Context, id []byte) ([]byte, error)
|
||||
StateBySlot(ctx context.Context, slot primitives.Slot) (state.BeaconState, error)
|
||||
StateByEpoch(ctx context.Context, epoch primitives.Epoch) (state.BeaconState, error)
|
||||
}
|
||||
|
||||
// BeaconDbStater is an implementation of Stater. It retrieves states from the beacon chain database.
|
||||
@@ -269,46 +267,6 @@ func (p *BeaconDbStater) StateBySlot(ctx context.Context, target primitives.Slot
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// StateByEpoch returns the state for the start of the requested epoch.
|
||||
// For current or next epoch, it uses the head state and next slot cache for efficiency.
|
||||
// For past epochs, it replays blocks from the most recent canonical state.
|
||||
func (p *BeaconDbStater) StateByEpoch(ctx context.Context, epoch primitives.Epoch) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "statefetcher.StateByEpoch")
|
||||
defer span.End()
|
||||
|
||||
targetSlot, err := slots.EpochStart(epoch)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get epoch start slot")
|
||||
}
|
||||
|
||||
currentSlot := p.GenesisTimeFetcher.CurrentSlot()
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
|
||||
// For past epochs, use the replay mechanism
|
||||
if epoch < currentEpoch {
|
||||
return p.StateBySlot(ctx, targetSlot)
|
||||
}
|
||||
|
||||
// For current or next epoch, use head state + next slot cache (much faster)
|
||||
headState, err := p.ChainInfoFetcher.HeadState(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get head state")
|
||||
}
|
||||
|
||||
// If head state is already at or past the target slot, return it
|
||||
if headState.Slot() >= targetSlot {
|
||||
return headState, nil
|
||||
}
|
||||
|
||||
// Process slots using the next slot cache
|
||||
headRoot := p.ChainInfoFetcher.CachedHeadRoot()
|
||||
st, err := transition.ProcessSlotsUsingNextSlotCache(ctx, headState, headRoot[:], targetSlot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not process slots up to %d", targetSlot)
|
||||
}
|
||||
return st, nil
|
||||
}
|
||||
|
||||
func (p *BeaconDbStater) headStateRoot(ctx context.Context) ([]byte, error) {
|
||||
b, err := p.ChainInfoFetcher.HeadBlock(ctx)
|
||||
if err != nil {
|
||||
|
||||
@@ -444,111 +444,3 @@ func TestStateBySlot_AfterHeadSlot(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, primitives.Slot(101), st.Slot())
|
||||
}
|
||||
|
||||
func TestStateByEpoch(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
|
||||
t.Run("current epoch uses head state", func(t *testing.T) {
|
||||
// Head is at slot 5 (epoch 0), requesting epoch 0
|
||||
headSlot := primitives.Slot(5)
|
||||
headSt, err := statenative.InitializeFromProtoPhase0(ðpb.BeaconState{Slot: headSlot})
|
||||
require.NoError(t, err)
|
||||
|
||||
currentSlot := headSlot
|
||||
mock := &chainMock.ChainService{State: headSt, Slot: ¤tSlot}
|
||||
p := BeaconDbStater{ChainInfoFetcher: mock, GenesisTimeFetcher: mock}
|
||||
|
||||
st, err := p.StateByEpoch(ctx, 0)
|
||||
require.NoError(t, err)
|
||||
// Should return head state since it's already past epoch start
|
||||
assert.Equal(t, headSlot, st.Slot())
|
||||
})
|
||||
|
||||
t.Run("current epoch processes slots to epoch start", func(t *testing.T) {
|
||||
// Head is at slot 5 (epoch 0), requesting epoch 1
|
||||
// Current slot is 32 (epoch 1), so epoch 1 is current epoch
|
||||
headSlot := primitives.Slot(5)
|
||||
headSt, err := statenative.InitializeFromProtoPhase0(ðpb.BeaconState{Slot: headSlot})
|
||||
require.NoError(t, err)
|
||||
|
||||
currentSlot := slotsPerEpoch // slot 32, epoch 1
|
||||
mock := &chainMock.ChainService{State: headSt, Slot: ¤tSlot}
|
||||
p := BeaconDbStater{ChainInfoFetcher: mock, GenesisTimeFetcher: mock}
|
||||
|
||||
// Note: This will fail since ProcessSlotsUsingNextSlotCache requires proper setup
|
||||
// In real usage, the transition package handles this properly
|
||||
_, err = p.StateByEpoch(ctx, 1)
|
||||
// The error is expected since we don't have a fully initialized beacon state
|
||||
// that can process slots (missing committees, etc.)
|
||||
assert.NotNil(t, err)
|
||||
})
|
||||
|
||||
t.Run("past epoch uses replay", func(t *testing.T) {
|
||||
// Head is at epoch 2, requesting epoch 0 (past)
|
||||
headSlot := slotsPerEpoch * 2 // slot 64, epoch 2
|
||||
headSt, err := statenative.InitializeFromProtoPhase0(ðpb.BeaconState{Slot: headSlot})
|
||||
require.NoError(t, err)
|
||||
|
||||
pastEpochSt, err := statenative.InitializeFromProtoPhase0(ðpb.BeaconState{Slot: 0})
|
||||
require.NoError(t, err)
|
||||
|
||||
currentSlot := headSlot
|
||||
mock := &chainMock.ChainService{State: headSt, Slot: ¤tSlot}
|
||||
mockReplayer := mockstategen.NewReplayerBuilder()
|
||||
mockReplayer.SetMockStateForSlot(pastEpochSt, 0)
|
||||
p := BeaconDbStater{ChainInfoFetcher: mock, GenesisTimeFetcher: mock, ReplayerBuilder: mockReplayer}
|
||||
|
||||
st, err := p.StateByEpoch(ctx, 0)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, primitives.Slot(0), st.Slot())
|
||||
})
|
||||
|
||||
t.Run("next epoch uses head state path", func(t *testing.T) {
|
||||
// Head is at slot 30 (epoch 0), requesting epoch 1 (next)
|
||||
// Current slot is 30 (epoch 0), so epoch 1 is next epoch
|
||||
headSlot := primitives.Slot(30)
|
||||
headSt, err := statenative.InitializeFromProtoPhase0(ðpb.BeaconState{Slot: headSlot})
|
||||
require.NoError(t, err)
|
||||
|
||||
currentSlot := headSlot
|
||||
mock := &chainMock.ChainService{State: headSt, Slot: ¤tSlot}
|
||||
p := BeaconDbStater{ChainInfoFetcher: mock, GenesisTimeFetcher: mock}
|
||||
|
||||
// Note: This will fail since ProcessSlotsUsingNextSlotCache requires proper setup
|
||||
_, err = p.StateByEpoch(ctx, 1)
|
||||
// The error is expected since we don't have a fully initialized beacon state
|
||||
assert.NotNil(t, err)
|
||||
})
|
||||
|
||||
t.Run("head state already at target slot returns immediately", func(t *testing.T) {
|
||||
// Head is at slot 32 (epoch 1 start), requesting epoch 1
|
||||
headSlot := slotsPerEpoch // slot 32
|
||||
headSt, err := statenative.InitializeFromProtoPhase0(ðpb.BeaconState{Slot: headSlot})
|
||||
require.NoError(t, err)
|
||||
|
||||
currentSlot := headSlot
|
||||
mock := &chainMock.ChainService{State: headSt, Slot: ¤tSlot}
|
||||
p := BeaconDbStater{ChainInfoFetcher: mock, GenesisTimeFetcher: mock}
|
||||
|
||||
st, err := p.StateByEpoch(ctx, 1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, headSlot, st.Slot())
|
||||
})
|
||||
|
||||
t.Run("head state past target slot returns head state", func(t *testing.T) {
|
||||
// Head is at slot 40, requesting epoch 1 (starts at slot 32)
|
||||
headSlot := primitives.Slot(40)
|
||||
headSt, err := statenative.InitializeFromProtoPhase0(ðpb.BeaconState{Slot: headSlot})
|
||||
require.NoError(t, err)
|
||||
|
||||
currentSlot := headSlot
|
||||
mock := &chainMock.ChainService{State: headSt, Slot: ¤tSlot}
|
||||
p := BeaconDbStater{ChainInfoFetcher: mock, GenesisTimeFetcher: mock}
|
||||
|
||||
st, err := p.StateByEpoch(ctx, 1)
|
||||
require.NoError(t, err)
|
||||
// Returns head state since it's already >= epoch start
|
||||
assert.Equal(t, headSlot, st.Slot())
|
||||
})
|
||||
}
|
||||
|
||||
@@ -52,27 +52,24 @@ func (vs *Server) ProposeAttestation(ctx context.Context, att *ethpb.Attestation
|
||||
ctx, span := trace.StartSpan(ctx, "AttesterServer.ProposeAttestation")
|
||||
defer span.End()
|
||||
|
||||
if vs.SyncChecker.Syncing() {
|
||||
return nil, status.Errorf(codes.Unavailable, "Syncing to latest head, not ready to respond")
|
||||
}
|
||||
|
||||
resp, err := vs.proposeAtt(ctx, att, att.GetData().CommitteeIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
go func() {
|
||||
if features.Get().EnableExperimentalAttestationPool {
|
||||
if err := vs.AttestationCache.Add(att); err != nil {
|
||||
log.WithError(err).Error("Could not save attestation")
|
||||
}
|
||||
} else {
|
||||
if features.Get().EnableExperimentalAttestationPool {
|
||||
if err = vs.AttestationCache.Add(att); err != nil {
|
||||
log.WithError(err).Error("Could not save attestation")
|
||||
}
|
||||
} else {
|
||||
go func() {
|
||||
attCopy := att.Copy()
|
||||
if err := vs.AttPool.SaveUnaggregatedAttestation(attCopy); err != nil {
|
||||
log.WithError(err).Error("Could not save unaggregated attestation")
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}()
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
@@ -85,10 +82,6 @@ func (vs *Server) ProposeAttestationElectra(ctx context.Context, singleAtt *ethp
|
||||
ctx, span := trace.StartSpan(ctx, "AttesterServer.ProposeAttestationElectra")
|
||||
defer span.End()
|
||||
|
||||
if vs.SyncChecker.Syncing() {
|
||||
return nil, status.Errorf(codes.Unavailable, "Syncing to latest head, not ready to respond")
|
||||
}
|
||||
|
||||
resp, err := vs.proposeAtt(ctx, singleAtt, singleAtt.GetCommitteeIndex())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -105,17 +98,18 @@ func (vs *Server) ProposeAttestationElectra(ctx context.Context, singleAtt *ethp
|
||||
|
||||
singleAttCopy := singleAtt.Copy()
|
||||
att := singleAttCopy.ToAttestationElectra(committee)
|
||||
go func() {
|
||||
if features.Get().EnableExperimentalAttestationPool {
|
||||
if err := vs.AttestationCache.Add(att); err != nil {
|
||||
log.WithError(err).Error("Could not save attestation")
|
||||
}
|
||||
} else {
|
||||
if features.Get().EnableExperimentalAttestationPool {
|
||||
if err = vs.AttestationCache.Add(att); err != nil {
|
||||
log.WithError(err).Error("Could not save attestation")
|
||||
}
|
||||
} else {
|
||||
go func() {
|
||||
if err := vs.AttPool.SaveUnaggregatedAttestation(att); err != nil {
|
||||
log.WithError(err).Error("Could not save unaggregated attestation")
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}()
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
@@ -38,7 +38,6 @@ func TestProposeAttestation(t *testing.T) {
|
||||
OperationNotifier: (&mock.ChainService{}).OperationNotifier(),
|
||||
TimeFetcher: chainService,
|
||||
AttestationStateFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
head := util.NewBeaconBlock()
|
||||
head.Block.Slot = 999
|
||||
@@ -142,7 +141,6 @@ func TestProposeAttestation_IncorrectSignature(t *testing.T) {
|
||||
P2P: &mockp2p.MockBroadcaster{},
|
||||
AttPool: attestations.NewPool(),
|
||||
OperationNotifier: (&mock.ChainService{}).OperationNotifier(),
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
req := util.HydrateAttestation(ðpb.Attestation{})
|
||||
@@ -151,37 +149,6 @@ func TestProposeAttestation_IncorrectSignature(t *testing.T) {
|
||||
assert.ErrorContains(t, wanted, err)
|
||||
}
|
||||
|
||||
func TestProposeAttestation_Syncing(t *testing.T) {
|
||||
attesterServer := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
}
|
||||
|
||||
req := util.HydrateAttestation(ðpb.Attestation{})
|
||||
_, err := attesterServer.ProposeAttestation(t.Context(), req)
|
||||
assert.ErrorContains(t, "Syncing to latest head", err)
|
||||
s, ok := status.FromError(err)
|
||||
require.Equal(t, true, ok)
|
||||
assert.Equal(t, codes.Unavailable, s.Code())
|
||||
}
|
||||
|
||||
func TestProposeAttestationElectra_Syncing(t *testing.T) {
|
||||
attesterServer := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
}
|
||||
|
||||
req := ðpb.SingleAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
Target: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
},
|
||||
}
|
||||
_, err := attesterServer.ProposeAttestationElectra(t.Context(), req)
|
||||
assert.ErrorContains(t, "Syncing to latest head", err)
|
||||
s, ok := status.FromError(err)
|
||||
require.Equal(t, true, ok)
|
||||
assert.Equal(t, codes.Unavailable, s.Code())
|
||||
}
|
||||
|
||||
func TestGetAttestationData_OK(t *testing.T) {
|
||||
block := util.NewBeaconBlock()
|
||||
block.Block.Slot = 3*params.BeaconConfig().SlotsPerEpoch + 1
|
||||
|
||||
@@ -26,6 +26,5 @@ go_library(
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
)
|
||||
|
||||
// MockStater is a fake implementation of lookup.Stater.
|
||||
@@ -15,7 +14,6 @@ type MockStater struct {
|
||||
StateProviderFunc func(ctx context.Context, stateId []byte) (state.BeaconState, error)
|
||||
BeaconStateRoot []byte
|
||||
StatesBySlot map[primitives.Slot]state.BeaconState
|
||||
StatesByEpoch map[primitives.Epoch]state.BeaconState
|
||||
StatesByRoot map[[32]byte]state.BeaconState
|
||||
CustomError error
|
||||
}
|
||||
@@ -45,22 +43,3 @@ func (m *MockStater) StateRoot(context.Context, []byte) ([]byte, error) {
|
||||
func (m *MockStater) StateBySlot(_ context.Context, s primitives.Slot) (state.BeaconState, error) {
|
||||
return m.StatesBySlot[s], nil
|
||||
}
|
||||
|
||||
// StateByEpoch --
|
||||
func (m *MockStater) StateByEpoch(_ context.Context, e primitives.Epoch) (state.BeaconState, error) {
|
||||
if m.CustomError != nil {
|
||||
return nil, m.CustomError
|
||||
}
|
||||
if m.StatesByEpoch != nil {
|
||||
return m.StatesByEpoch[e], nil
|
||||
}
|
||||
// Fall back to StatesBySlot if StatesByEpoch is not set
|
||||
slot, err := slots.EpochStart(e)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if m.StatesBySlot != nil {
|
||||
return m.StatesBySlot[slot], nil
|
||||
}
|
||||
return m.BeaconState, nil
|
||||
}
|
||||
|
||||
438
beacon-chain/state/state-design.md
Normal file
438
beacon-chain/state/state-design.md
Normal file
@@ -0,0 +1,438 @@
|
||||
# Beacon Chain State Design Document
|
||||
|
||||
## Package Structure
|
||||
|
||||
### `/beacon-chain/state`
|
||||
|
||||
This is the top-level state package that defines interfaces only. This package provides a clean API boundary for state access without implementation details. The package follows the interface segregation principle, breaking down access patterns into granular read-only and write-only interfaces (e.g., `ReadOnlyValidators`, `WriteOnlyValidators`, `ReadOnlyBalances`, `WriteOnlyBalances`).
|
||||
|
||||
### `/beacon-chain/state/state-native`
|
||||
|
||||
This package contains the actual `BeaconState` struct and all concrete implementations of the state interfaces. It also contains thread-safe getters and setters.
|
||||
|
||||
### `/beacon-chain/state/fieldtrie`
|
||||
|
||||
A dedicated package for field-level merkle trie functionality. The main component of the package is the `FieldTrie` struct which represents a particular field's merkle trie. The package contains several field trie operations such as recomputing tries, copying and transferring them.
|
||||
|
||||
### `/beacon-chain/state/stateutil`
|
||||
|
||||
Utility package whose main components are various state merkleization functions. Other things contained in this package include field trie helpers, the implementation of shared references, and validator tracking.
|
||||
|
||||
## Beacon State Architecture
|
||||
|
||||
### `BeaconState` Structure
|
||||
|
||||
The `BeaconState` is a single structure that supports all consensus versions. The value of the `version` field determines the active version of the state. It is used extensively throughout the codebase to determine which code path will be executed.
|
||||
|
||||
```go
|
||||
type BeaconState struct {
|
||||
version int
|
||||
id uint64 // Used for tracking states in multi-value slices
|
||||
|
||||
// Common fields (all versions)
|
||||
genesisTime uint64
|
||||
genesisValidatorsRoot [32]byte
|
||||
slot primitives.Slot
|
||||
// ...
|
||||
|
||||
// Phase0+ fields
|
||||
eth1Data *ethpb.Eth1Data
|
||||
eth1DataVotes []*ethpb.Eth1Data
|
||||
eth1DepositIndex uint64
|
||||
slashings []uint64
|
||||
previousEpochAttestations []*ethpb.PendingAttestation
|
||||
currentEpochAttestations []*ethpb.PendingAttestation
|
||||
// ...
|
||||
|
||||
// Altair+ fields
|
||||
previousEpochParticipation []byte
|
||||
currentEpochParticipation []byte
|
||||
currentSyncCommittee *ethpb.SyncCommittee
|
||||
nextSyncCommittee *ethpb.SyncCommittee
|
||||
// ...
|
||||
|
||||
// Fields for other forks...
|
||||
|
||||
// Internal state management
|
||||
lock sync.RWMutex
|
||||
dirtyFields map[types.FieldIndex]bool
|
||||
dirtyIndices map[types.FieldIndex][]uint64
|
||||
stateFieldLeaves map[types.FieldIndex]*fieldtrie.FieldTrie
|
||||
rebuildTrie map[types.FieldIndex]bool
|
||||
valMapHandler *stateutil.ValidatorMapHandler
|
||||
merkleLayers [][][]byte
|
||||
sharedFieldReferences map[types.FieldIndex]*stateutil.Reference
|
||||
}
|
||||
```
|
||||
|
||||
### Multi-Value Slices
|
||||
|
||||
Several large array fields of the state are implemented as multi-value slices. This is a specialized data structure that enables efficient sharing and modification of slices between states. A multi-value slice is preferred over a regular slice in scenarios where only a small fraction of the array is updated at a time. In such cases, using a multi-value slice results in fewer memory allocations because many values of the slice will be shared between states, whereas with a regular slice changing even a single item results in copying the full slice. Examples include `MultiValueBlockRoots` and `MultiValueBalances`.
|
||||
|
||||
**Example**
|
||||
```go
|
||||
// Create new multi-value slice
|
||||
mvBalances := NewMultiValueBalances([]uint64{32000000000, 32000000000, ...})
|
||||
|
||||
// Share across states
|
||||
state1.balancesMultiValue = mvBalances
|
||||
state2 := state1.Copy() // state2 shares the same mvBalances
|
||||
|
||||
// Modify in state2
|
||||
state2.UpdateBalancesAtIndex(0, 31000000000) // This doesn't create a new multi-value slice.
|
||||
|
||||
state1.BalanceAtIndex(0) // this returns 32000000000
|
||||
state2.BalanceAtIndex(0) // this returns 31000000000
|
||||
```
|
||||
|
||||
### Getters/Setters
|
||||
|
||||
All beacon state getters and setters follow a consistent pattern. All exported methods are protected from concurrent modification using read locks (for getters) or write locks (for setters).
|
||||
|
||||
**Getters**
|
||||
|
||||
Values are never returned directly from getters. A copy of the value is returned instead.
|
||||
|
||||
```go
|
||||
func (b *BeaconState) Fork() *ethpb.Fork {
|
||||
if b.fork == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
b.lock.RLock()
|
||||
defer b.lock.RUnlock()
|
||||
|
||||
return b.forkVal()
|
||||
}
|
||||
|
||||
func (b *BeaconState) forkVal() *ethpb.Fork {
|
||||
if b.fork == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
prevVersion := make([]byte, len(b.fork.PreviousVersion))
|
||||
copy(prevVersion, b.fork.PreviousVersion)
|
||||
currVersion := make([]byte, len(b.fork.CurrentVersion))
|
||||
copy(currVersion, b.fork.CurrentVersion)
|
||||
return ðpb.Fork{
|
||||
PreviousVersion: prevVersion,
|
||||
CurrentVersion: currVersion,
|
||||
Epoch: b.fork.Epoch,
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
```go
|
||||
func (b *BeaconState) CurrentEpochAttestations() ([]*ethpb.PendingAttestation, error) {
|
||||
if b.version != version.Phase0 {
|
||||
return nil, errNotSupported("CurrentEpochAttestations", b.version)
|
||||
}
|
||||
|
||||
if b.currentEpochAttestations == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
b.lock.RLock()
|
||||
defer b.lock.RUnlock()
|
||||
|
||||
return b.currentEpochAttestationsVal(), nil
|
||||
}
|
||||
|
||||
func (b *BeaconState) currentEpochAttestationsVal() []*ethpb.PendingAttestation {
|
||||
if b.currentEpochAttestations == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
res := make([]*ethpb.PendingAttestation, len(b.currentEpochAttestations))
|
||||
for i := range res {
|
||||
res[i] = b.currentEpochAttestations[i].Copy()
|
||||
}
|
||||
return res
|
||||
}
|
||||
```
|
||||
|
||||
In the case of fields backed by multi-value slices, the appropriate methods of the multi-value slice are invoked.
|
||||
|
||||
```go
|
||||
func (b *BeaconState) StateRoots() [][]byte {
|
||||
b.lock.RLock()
|
||||
defer b.lock.RUnlock()
|
||||
|
||||
roots := b.stateRootsVal()
|
||||
if roots == nil {
|
||||
return nil
|
||||
}
|
||||
return roots.Slice()
|
||||
}
|
||||
|
||||
func (b *BeaconState) stateRootsVal() customtypes.StateRoots {
|
||||
if b.stateRootsMultiValue == nil {
|
||||
return nil
|
||||
}
|
||||
return b.stateRootsMultiValue.Value(b)
|
||||
}
|
||||
|
||||
func (b *BeaconState) StateRootAtIndex(idx uint64) ([]byte, error) {
|
||||
b.lock.RLock()
|
||||
defer b.lock.RUnlock()
|
||||
|
||||
if b.stateRootsMultiValue == nil {
|
||||
return nil, nil
|
||||
}
|
||||
r, err := b.stateRootsMultiValue.At(b, idx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return r[:], nil
|
||||
}
|
||||
```
|
||||
|
||||
**Setters**
|
||||
|
||||
Whenever a beacon state field is set, it is marked as dirty. This is needed for hash tree root computation so that the cached merkle branch with the old value of the modified field is recomputed using the new value.
|
||||
|
||||
```go
|
||||
func (b *BeaconState) SetSlot(val primitives.Slot) error {
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
b.slot = val
|
||||
b.markFieldAsDirty(types.Slot)
|
||||
return nil
|
||||
}
|
||||
```
|
||||
|
||||
Several fields of the state are shared between states through a `Reference` mechanism. These references are stored in `b.sharedFieldReferences`. Whenever a state is copied, the reference counter for each of these fields is incremented. When a new value for any of these fields is set, the counter for the existing reference is decremented and a new reference is created for that field.
|
||||
|
||||
```go
|
||||
type Reference struct {
|
||||
refs uint // Reference counter
|
||||
lock sync.RWMutex
|
||||
}
|
||||
```
|
||||
|
||||
```go
|
||||
func (b *BeaconState) SetCurrentParticipationBits(val []byte) error {
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
if b.version == version.Phase0 {
|
||||
return errNotSupported("SetCurrentParticipationBits", b.version)
|
||||
}
|
||||
|
||||
b.sharedFieldReferences[types.CurrentEpochParticipationBits].MinusRef()
|
||||
b.sharedFieldReferences[types.CurrentEpochParticipationBits] = stateutil.NewRef(1)
|
||||
|
||||
b.currentEpochParticipation = val
|
||||
b.markFieldAsDirty(types.CurrentEpochParticipationBits)
|
||||
return nil
|
||||
}
|
||||
```
|
||||
|
||||
Updating a single value of an array field requires updating `b.dirtyIndices` to ensure the field trie for that particular field is properly recomputed.
|
||||
|
||||
```go
|
||||
func (b *BeaconState) UpdateBalancesAtIndex(idx primitives.ValidatorIndex, val uint64) error {
|
||||
if err := b.balancesMultiValue.UpdateAt(b, uint64(idx), val); err != nil {
|
||||
return errors.Wrap(err, "could not update balances")
|
||||
}
|
||||
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
b.markFieldAsDirty(types.Balances)
|
||||
b.addDirtyIndices(types.Balances, []uint64{uint64(idx)})
|
||||
return nil
|
||||
}
|
||||
```
|
||||
|
||||
As is the case with getters, for fields backed by multi-value slices the appropriate methods of the multi-value slice are invoked when updating field values. The multi-value slice keeps track of states internally, which means the `Reference` construct is unnecessary.
|
||||
|
||||
```go
|
||||
func (b *BeaconState) SetStateRoots(val [][]byte) error {
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
if b.stateRootsMultiValue != nil {
|
||||
b.stateRootsMultiValue.Detach(b)
|
||||
}
|
||||
b.stateRootsMultiValue = NewMultiValueStateRoots(val)
|
||||
|
||||
b.markFieldAsDirty(types.StateRoots)
|
||||
b.rebuildTrie[types.StateRoots] = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *BeaconState) UpdateStateRootAtIndex(idx uint64, stateRoot [32]byte) error {
|
||||
if err := b.stateRootsMultiValue.UpdateAt(b, idx, stateRoot); err != nil {
|
||||
return errors.Wrap(err, "could not update state roots")
|
||||
}
|
||||
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
b.markFieldAsDirty(types.StateRoots)
|
||||
b.addDirtyIndices(types.StateRoots, []uint64{idx})
|
||||
return nil
|
||||
}
|
||||
```
|
||||
|
||||
### Read-Only Validator
|
||||
|
||||
There are two ways in which validators can be accessed through getters. One approach is to use the methods `Validators` and `ValidatorAtIndex`, which return a copy of, respectively, the whole validator set or a particular validator. The other approach is to use the `ReadOnlyValidator` construct via the `ValidatorsReadOnly` and `ValidatorAtIndexReadOnly` methods. The `ReadOnlyValidator` structure is a wrapper around the protobuf validator. The advantage of using the read-only version is that no copy of the underlying validator is made, which helps with performance, especially when accessing a large number of validators (e.g. looping through the whole validator registry). Because the read-only wrapper exposes only getters, each of which returns a copy of the validator’s fields, it prevents accidental mutation of the underlying validator.
|
||||
|
||||
```go
|
||||
type ReadOnlyValidator struct {
|
||||
validator *ethpb.Validator
|
||||
}
|
||||
|
||||
// Only getter methods, no setters
|
||||
func (v *ReadOnlyValidator) PublicKey() [48]byte
|
||||
func (v *ReadOnlyValidator) EffectiveBalance() uint64
|
||||
func (v *ReadOnlyValidator) Slashed() bool
|
||||
// ... etc
|
||||
```
|
||||
|
||||
**Usage**
|
||||
```go
|
||||
// Returns ReadOnlyValidator
|
||||
validator, err := state.ValidatorAtIndexReadOnly(idx)
|
||||
|
||||
// Can read but not modify
|
||||
pubkey := validator.PublicKey()
|
||||
balance := validator.EffectiveBalance()
|
||||
|
||||
// To modify, must get mutable copy
|
||||
validatorCopy := validator.Copy() // Returns *ethpb.Validator
|
||||
validatorCopy.EffectiveBalance = newBalance
|
||||
state.UpdateValidatorAtIndex(idx, validatorCopy)
|
||||
```
|
||||
|
||||
## Field Trie System
|
||||
|
||||
For a few large arrays, such as block roots or the validator registry, hashing the state field at every slot would be very expensive. To avoid such re-hashing, the underlying merkle trie of the field is maintained and only the branch(es) corresponding to the changed index(es) are recomputed, instead of the whole trie.
|
||||
|
||||
Each state version has a list of active fields defined (e.g. `phase0Fields`, `altairFields`), which serves as the basis for field trie creation.
|
||||
|
||||
```go
|
||||
type FieldTrie struct {
|
||||
*sync.RWMutex
|
||||
reference *stateutil.Reference // The number of states this field trie is shared between
|
||||
fieldLayers [][]*[32]byte // Merkle trie layers
|
||||
field types.FieldIndex // Which field this field trie represents
|
||||
dataType types.DataType // Type of field's array
|
||||
length uint64 // Maximum number of elements
|
||||
numOfElems int // Number of elements
|
||||
isTransferred bool // Whether trie was transferred
|
||||
}
|
||||
```
|
||||
|
||||
The `DataType` enum indicates the type of the field's array. The possible values are:
|
||||
- `BasicArray`: Fixed-size arrays (e.g. `blockRoots`)
|
||||
- `CompositeArray`: Variable-size arrays (e.g. `validators`)
|
||||
- `CompressedArray`: Variable-size arrays that pack multiple elements per trie leaf (e.g. `balances` with 4 elements per leaf)
|
||||
|
||||
### Recomputing a Trie
|
||||
|
||||
To avoid recomputing the state root every time any value of the state changes, branches of field tries are not recomputed until the state root is actually needed. When it is necessary to recompute a trie, the `RecomputeTrie` function rebuilds the affected branches in the trie according to the provided changed indices. The changed indices of each field are tracked in the `dirtyIndices` field of the beacon state, which is a `map[types.FieldIndex][]uint64`. The recomputation algorithms for fixed-size and variable-size fields are different.
|
||||
|
||||
### Transferring a Trie
|
||||
|
||||
When it is expected that an older state won't need its trie for recomputation, its trie layers can be transferred directly to a new trie instead of copying them:
|
||||
|
||||
```go
|
||||
func (f *FieldTrie) TransferTrie() *FieldTrie {
|
||||
f.isTransferred = true
|
||||
nTrie := &FieldTrie{
|
||||
fieldLayers: f.fieldLayers, // Direct transfer, no copy
|
||||
field: f.field,
|
||||
dataType: f.dataType,
|
||||
reference: stateutil.NewRef(1),
|
||||
// ...
|
||||
}
|
||||
f.fieldLayers = nil // Zero out original
|
||||
return nTrie
|
||||
}
|
||||
```
|
||||
|
||||
The downside of this approach is that if it becomes necessary to access the older state's trie later on, the whole trie would have to be recreated since it is empty now. This is especially costly for the validator registry and that is why it is always copied instead.
|
||||
|
||||
```go
|
||||
if fTrie.FieldReference().Refs() > 1 {
|
||||
var newTrie *fieldtrie.FieldTrie
|
||||
// We choose to only copy the validator
|
||||
// trie as it is pretty expensive to regenerate.
|
||||
if index == types.Validators {
|
||||
newTrie = fTrie.CopyTrie()
|
||||
} else {
|
||||
newTrie = fTrie.TransferTrie()
|
||||
}
|
||||
fTrie.FieldReference().MinusRef()
|
||||
b.stateFieldLeaves[index] = newTrie
|
||||
fTrie = newTrie
|
||||
}
|
||||
```
|
||||
|
||||
## Hash Tree Root Computation
|
||||
|
||||
The `BeaconState` structure is not a protobuf object, so there are no SSZ-generated methods for its fields. For each state field, there exists a custom method that returns its root. One advantage of doing it this way is the possibility to have the most efficient implementation possible. As an example, when computing the root of the validator registry, validators are hashed in parallel and vectorized sha256 computation is utilized to speed up the computation.
|
||||
|
||||
```go
|
||||
func OptimizedValidatorRoots(validators []*ethpb.Validator) ([][32]byte, error) {
|
||||
// Exit early if no validators are provided.
|
||||
if len(validators) == 0 {
|
||||
return [][32]byte{}, nil
|
||||
}
|
||||
wg := sync.WaitGroup{}
|
||||
n := runtime.GOMAXPROCS(0)
|
||||
rootsSize := len(validators) * validatorFieldRoots
|
||||
groupSize := len(validators) / n
|
||||
roots := make([][32]byte, rootsSize)
|
||||
wg.Add(n - 1)
|
||||
for j := 0; j < n-1; j++ {
|
||||
go hashValidatorHelper(validators, roots, j, groupSize, &wg)
|
||||
}
|
||||
for i := (n - 1) * groupSize; i < len(validators); i++ {
|
||||
fRoots, err := ValidatorFieldRoots(validators[i])
|
||||
if err != nil {
|
||||
return [][32]byte{}, errors.Wrap(err, "could not compute validators merkleization")
|
||||
}
|
||||
for k, root := range fRoots {
|
||||
roots[i*validatorFieldRoots+k] = root
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// A validator's tree can represented with a depth of 3. As log2(8) = 3
|
||||
// Using this property we can lay out all the individual fields of a
|
||||
// validator and hash them in single level using our vectorized routine.
|
||||
for range validatorTreeDepth {
|
||||
// Overwrite input lists as we are hashing by level
|
||||
// and only need the highest level to proceed.
|
||||
roots = htr.VectorizedSha256(roots)
|
||||
}
|
||||
return roots, nil
|
||||
}
|
||||
```
|
||||
|
||||
Calculating the full state root is very expensive; therefore, it is done in a lazy fashion. Previously generated merkle layers are cached in the state and only merkle trie branches corresponding to dirty indices are regenerated before returning the final root.
|
||||
|
||||
```go
|
||||
func (b *BeaconState) HashTreeRoot(ctx context.Context) ([32]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beaconState.HashTreeRoot")
|
||||
defer span.End()
|
||||
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
// When len(b.merkleLayers) > 0, this function returns immediately
|
||||
if err := b.initializeMerkleLayers(ctx); err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
// Dirty fields are tracked in the beacon state through b.dirtyFields
|
||||
if err := b.recomputeDirtyFields(ctx); err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
return bytesutil.ToBytes32(b.merkleLayers[len(b.merkleLayers)-1][0]), nil
|
||||
}
|
||||
```
|
||||
@@ -14,7 +14,6 @@ go_library(
|
||||
"decode_pubsub.go",
|
||||
"doc.go",
|
||||
"error.go",
|
||||
"exec_proofs.go",
|
||||
"fork_watcher.go",
|
||||
"fuzz_exports.go", # keep
|
||||
"log.go",
|
||||
@@ -32,7 +31,6 @@ go_library(
|
||||
"rpc_chunked_response.go",
|
||||
"rpc_data_column_sidecars_by_range.go",
|
||||
"rpc_data_column_sidecars_by_root.go",
|
||||
"rpc_execution_proofs_by_root_topic.go",
|
||||
"rpc_goodbye.go",
|
||||
"rpc_light_client.go",
|
||||
"rpc_metadata.go",
|
||||
@@ -48,7 +46,6 @@ go_library(
|
||||
"subscriber_blob_sidecar.go",
|
||||
"subscriber_bls_to_execution_change.go",
|
||||
"subscriber_data_column_sidecar.go",
|
||||
"subscriber_execution_proofs.go",
|
||||
"subscriber_handlers.go",
|
||||
"subscriber_sync_committee_message.go",
|
||||
"subscriber_sync_contribution_proof.go",
|
||||
@@ -60,7 +57,6 @@ go_library(
|
||||
"validate_blob.go",
|
||||
"validate_bls_to_execution_change.go",
|
||||
"validate_data_column.go",
|
||||
"validate_execution_proof.go",
|
||||
"validate_light_client.go",
|
||||
"validate_proposer_slashing.go",
|
||||
"validate_sync_committee_message.go",
|
||||
@@ -97,7 +93,6 @@ go_library(
|
||||
"//beacon-chain/light-client:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/blstoexec:go_default_library",
|
||||
"//beacon-chain/operations/execproofs:go_default_library",
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
"//beacon-chain/operations/synccommittee:go_default_library",
|
||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||
@@ -192,7 +187,6 @@ go_test(
|
||||
"rpc_blob_sidecars_by_root_test.go",
|
||||
"rpc_data_column_sidecars_by_range_test.go",
|
||||
"rpc_data_column_sidecars_by_root_test.go",
|
||||
"rpc_execution_proofs_by_root_topic_test.go",
|
||||
"rpc_goodbye_test.go",
|
||||
"rpc_handler_test.go",
|
||||
"rpc_light_client_test.go",
|
||||
@@ -217,7 +211,6 @@ go_test(
|
||||
"validate_blob_test.go",
|
||||
"validate_bls_to_execution_change_test.go",
|
||||
"validate_data_column_test.go",
|
||||
"validate_execution_proof_test.go",
|
||||
"validate_light_client_test.go",
|
||||
"validate_proposer_slashing_test.go",
|
||||
"validate_sync_committee_message_test.go",
|
||||
@@ -251,7 +244,6 @@ go_test(
|
||||
"//beacon-chain/light-client:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/blstoexec:go_default_library",
|
||||
"//beacon-chain/operations/execproofs:go_default_library",
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
"//beacon-chain/operations/slashings/mock:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
|
||||
@@ -161,17 +161,13 @@ func (s *Service) validateWithKzgBatchVerifier(ctx context.Context, dataColumns
|
||||
|
||||
timeout := time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second
|
||||
|
||||
resChan := make(chan error, 1)
|
||||
resChan := make(chan error)
|
||||
verificationSet := &kzgVerifier{dataColumns: dataColumns, resChan: resChan}
|
||||
s.kzgChan <- verificationSet
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, timeout)
|
||||
defer cancel()
|
||||
|
||||
select {
|
||||
case s.kzgChan <- verificationSet:
|
||||
case <-ctx.Done():
|
||||
return pubsub.ValidationIgnore, ctx.Err()
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return pubsub.ValidationIgnore, ctx.Err() // parent context canceled, give up
|
||||
|
||||
@@ -3,7 +3,6 @@ package sync
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"maps"
|
||||
"slices"
|
||||
"sync"
|
||||
@@ -244,10 +243,8 @@ func requestDirectSidecarsFromPeers(
|
||||
}
|
||||
|
||||
// Compute missing indices by root, excluding those already in storage.
|
||||
var lastRoot [fieldparams.RootLength]byte
|
||||
missingIndicesByRoot := make(map[[fieldparams.RootLength]byte]map[uint64]bool, len(incompleteRoots))
|
||||
for root := range incompleteRoots {
|
||||
lastRoot = root
|
||||
storedIndices := storedIndicesByRoot[root]
|
||||
|
||||
missingIndices := make(map[uint64]bool, len(requestedIndices))
|
||||
@@ -262,7 +259,6 @@ func requestDirectSidecarsFromPeers(
|
||||
}
|
||||
}
|
||||
|
||||
initialMissingRootCount := len(missingIndicesByRoot)
|
||||
initialMissingCount := computeTotalCount(missingIndicesByRoot)
|
||||
|
||||
indicesByRootByPeer, err := computeIndicesByRootByPeer(params.P2P, slotByRoot, missingIndicesByRoot, connectedPeers)
|
||||
@@ -305,19 +301,11 @@ func requestDirectSidecarsFromPeers(
|
||||
}
|
||||
}
|
||||
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"duration": time.Since(start),
|
||||
"initialMissingRootCount": initialMissingRootCount,
|
||||
"initialMissingCount": initialMissingCount,
|
||||
"finalMissingRootCount": len(missingIndicesByRoot),
|
||||
"finalMissingCount": computeTotalCount(missingIndicesByRoot),
|
||||
})
|
||||
|
||||
if initialMissingRootCount == 1 {
|
||||
log = log.WithField("root", fmt.Sprintf("%#x", lastRoot))
|
||||
}
|
||||
|
||||
log.Debug("Requested direct data column sidecars from peers")
|
||||
log.WithFields(logrus.Fields{
|
||||
"duration": time.Since(start),
|
||||
"initialMissingCount": initialMissingCount,
|
||||
"finalMissingCount": computeTotalCount(missingIndicesByRoot),
|
||||
}).Debug("Requested direct data column sidecars from peers")
|
||||
|
||||
return verifiedColumnsByRoot, nil
|
||||
}
|
||||
|
||||
@@ -1,65 +0,0 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"errors"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// generateExecProof returns a dummy execution proof after the specified delay.
|
||||
func generateExecProof(roBlock blocks.ROBlock, proofID primitives.ExecutionProofId, delay time.Duration) (*ethpb.ExecutionProof, error) {
|
||||
// Simulate proof generation work
|
||||
time.Sleep(delay)
|
||||
|
||||
// Create a dummy proof with some deterministic data
|
||||
block := roBlock.Block()
|
||||
if block == nil {
|
||||
return nil, errors.New("nil block")
|
||||
}
|
||||
|
||||
body := block.Body()
|
||||
if body == nil {
|
||||
return nil, errors.New("nil block body")
|
||||
}
|
||||
|
||||
executionData, err := body.Execution()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("execution: %w", err)
|
||||
}
|
||||
|
||||
if executionData == nil {
|
||||
return nil, errors.New("nil execution data")
|
||||
}
|
||||
|
||||
hash, err := executionData.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("hash tree root: %w", err)
|
||||
}
|
||||
|
||||
proofData := []byte{
|
||||
0xFF, // Magic byte for dummy proof
|
||||
byte(proofID),
|
||||
// Include some payload hash bytes
|
||||
hash[0],
|
||||
hash[1],
|
||||
hash[2],
|
||||
hash[3],
|
||||
}
|
||||
|
||||
blockRoot := roBlock.Root()
|
||||
|
||||
proof := ðpb.ExecutionProof{
|
||||
ProofId: proofID,
|
||||
Slot: block.Slot(),
|
||||
BlockHash: hash[:],
|
||||
BlockRoot: blockRoot[:],
|
||||
ProofData: proofData,
|
||||
}
|
||||
|
||||
return proof, nil
|
||||
}
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
@@ -269,71 +268,6 @@ func TestKzgBatchVerifierFallback(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestValidateWithKzgBatchVerifier_DeadlockOnTimeout(t *testing.T) {
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.SecondsPerSlot = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
defer cancel()
|
||||
|
||||
service := &Service{
|
||||
ctx: ctx,
|
||||
kzgChan: make(chan *kzgVerifier),
|
||||
}
|
||||
go service.kzgVerifierRoutine()
|
||||
|
||||
result, err := service.validateWithKzgBatchVerifier(context.Background(), nil)
|
||||
require.Equal(t, pubsub.ValidationIgnore, result)
|
||||
require.ErrorIs(t, err, context.DeadlineExceeded)
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
_, _ = service.validateWithKzgBatchVerifier(context.Background(), nil)
|
||||
close(done)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-done:
|
||||
case <-time.After(500 * time.Millisecond):
|
||||
t.Fatal("validateWithKzgBatchVerifier blocked")
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateWithKzgBatchVerifier_ContextCanceledBeforeSend(t *testing.T) {
|
||||
cancelledCtx, cancel := context.WithCancel(t.Context())
|
||||
cancel()
|
||||
|
||||
service := &Service{
|
||||
ctx: context.Background(),
|
||||
kzgChan: make(chan *kzgVerifier),
|
||||
}
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
result, err := service.validateWithKzgBatchVerifier(cancelledCtx, nil)
|
||||
require.Equal(t, pubsub.ValidationIgnore, result)
|
||||
require.ErrorIs(t, err, context.Canceled)
|
||||
close(done)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-done:
|
||||
case <-time.After(500 * time.Millisecond):
|
||||
t.Fatal("validateWithKzgBatchVerifier did not return after context cancellation")
|
||||
}
|
||||
|
||||
select {
|
||||
case <-service.kzgChan:
|
||||
t.Fatal("verificationSet was sent to kzgChan despite canceled context")
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func createValidTestDataColumns(t *testing.T, count int) []blocks.RODataColumn {
|
||||
_, roSidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, count)
|
||||
if len(roSidecars) >= count {
|
||||
|
||||
@@ -204,13 +204,6 @@ var (
|
||||
},
|
||||
)
|
||||
|
||||
dataColumnsRecoveredFromELAttempts = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "data_columns_recovered_from_el_attempts",
|
||||
Help: "Count the number of data columns recovery attempts from the execution layer.",
|
||||
},
|
||||
)
|
||||
|
||||
dataColumnsRecoveredFromELTotal = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "data_columns_recovered_from_el_total",
|
||||
@@ -249,13 +242,6 @@ var (
|
||||
Buckets: []float64{100, 250, 500, 750, 1000, 1500, 2000, 4000, 8000, 12000, 16000},
|
||||
},
|
||||
)
|
||||
|
||||
dataColumnSidecarsObtainedViaELCount = promauto.NewSummary(
|
||||
prometheus.SummaryOpts{
|
||||
Name: "data_column_obtained_via_el_count",
|
||||
Help: "Count the number of data column sidecars obtained via the execution layer.",
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
func (s *Service) updateMetrics() {
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
lightClient "github.com/OffchainLabs/prysm/v7/beacon-chain/light-client"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/attestations"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/blstoexec"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/execproofs"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/slashings"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/synccommittee"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/voluntaryexits"
|
||||
@@ -89,13 +88,6 @@ func WithBlsToExecPool(blsToExecPool blstoexec.PoolManager) Option {
|
||||
}
|
||||
}
|
||||
|
||||
func WithExecProofPool(execProofPool execproofs.PoolManager) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.execProofPool = execProofPool
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithChainService(chain blockchainService) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.chain = chain
|
||||
|
||||
@@ -3,9 +3,9 @@ package sync
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
@@ -21,23 +21,13 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/time"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const pendingAttsLimit = 32768
|
||||
|
||||
// aggregatorIndexFilter defines how aggregator index should be handled in equality checks.
|
||||
type aggregatorIndexFilter int
|
||||
|
||||
const (
|
||||
// ignoreAggregatorIndex means aggregates differing only by aggregator index are considered equal.
|
||||
ignoreAggregatorIndex aggregatorIndexFilter = iota
|
||||
// includeAggregatorIndex means aggregator index must also match for aggregates to be considered equal.
|
||||
includeAggregatorIndex
|
||||
)
|
||||
var pendingAttsLimit = 32768
|
||||
|
||||
// This method processes pending attestations as a "known" block as arrived. With validations,
|
||||
// the valid attestations get saved into the operation mem pool, and the invalid attestations gets deleted
|
||||
@@ -60,7 +50,16 @@ func (s *Service) processPendingAttsForBlock(ctx context.Context, bRoot [32]byte
|
||||
attestations := s.blkRootToPendingAtts[bRoot]
|
||||
s.pendingAttsLock.RUnlock()
|
||||
|
||||
s.processAttestations(ctx, attestations)
|
||||
if len(attestations) > 0 {
|
||||
start := time.Now()
|
||||
s.processAttestations(ctx, attestations)
|
||||
duration := time.Since(start)
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockRoot": hex.EncodeToString(bytesutil.Trunc(bRoot[:])),
|
||||
"pendingAttsCount": len(attestations),
|
||||
"duration": duration,
|
||||
}).Debug("Verified and saved pending attestations to pool")
|
||||
}
|
||||
|
||||
randGen := rand.NewGenerator()
|
||||
// Delete the missing block root key from pending attestation queue so a node will not request for the block again.
|
||||
@@ -80,71 +79,26 @@ func (s *Service) processPendingAttsForBlock(ctx context.Context, bRoot [32]byte
|
||||
return s.sendBatchRootRequest(ctx, pendingRoots, randGen)
|
||||
}
|
||||
|
||||
// processAttestations processes a list of attestations.
|
||||
// It assumes (for logging purposes only) that all attestations pertain to the same block.
|
||||
func (s *Service) processAttestations(ctx context.Context, attestations []any) {
|
||||
if len(attestations) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
firstAttestation := attestations[0]
|
||||
var blockRoot []byte
|
||||
switch v := firstAttestation.(type) {
|
||||
case ethpb.Att:
|
||||
blockRoot = v.GetData().BeaconBlockRoot
|
||||
case ethpb.SignedAggregateAttAndProof:
|
||||
blockRoot = v.AggregateAttestationAndProof().AggregateVal().GetData().BeaconBlockRoot
|
||||
default:
|
||||
log.Warnf("Unexpected attestation type %T, skipping processing", v)
|
||||
return
|
||||
}
|
||||
|
||||
validAggregates := make([]ethpb.SignedAggregateAttAndProof, 0, len(attestations))
|
||||
startAggregate := time.Now()
|
||||
atts := make([]ethpb.Att, 0, len(attestations))
|
||||
aggregateAttAndProofCount := 0
|
||||
for _, att := range attestations {
|
||||
switch v := att.(type) {
|
||||
case ethpb.Att:
|
||||
atts = append(atts, v)
|
||||
case ethpb.SignedAggregateAttAndProof:
|
||||
aggregateAttAndProofCount++
|
||||
// Avoid processing multiple aggregates only differing by aggregator index.
|
||||
if slices.ContainsFunc(validAggregates, func(other ethpb.SignedAggregateAttAndProof) bool {
|
||||
return pendingAggregatesAreEqual(v, other, ignoreAggregatorIndex)
|
||||
}) {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := s.processAggregate(ctx, v); err != nil {
|
||||
log.WithError(err).Debug("Pending aggregate attestation could not be processed")
|
||||
continue
|
||||
}
|
||||
|
||||
validAggregates = append(validAggregates, v)
|
||||
s.processAggregate(ctx, v)
|
||||
default:
|
||||
log.Warnf("Unexpected attestation type %T, skipping", v)
|
||||
}
|
||||
}
|
||||
durationAggregateAttAndProof := time.Since(startAggregate)
|
||||
|
||||
startAtts := time.Now()
|
||||
for _, bucket := range bucketAttestationsByData(atts) {
|
||||
s.processAttestationBucket(ctx, bucket)
|
||||
}
|
||||
|
||||
durationAtts := time.Since(startAtts)
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockRoot": fmt.Sprintf("%#x", blockRoot),
|
||||
"totalCount": len(attestations),
|
||||
"aggregateAttAndProofCount": aggregateAttAndProofCount,
|
||||
"uniqueAggregateAttAndProofCount": len(validAggregates),
|
||||
"attCount": len(atts),
|
||||
"durationTotal": durationAggregateAttAndProof + durationAtts,
|
||||
"durationAggregateAttAndProof": durationAggregateAttAndProof,
|
||||
"durationAtts": durationAtts,
|
||||
}).Debug("Verified and saved pending attestations to pool")
|
||||
}
|
||||
|
||||
// attestationBucket groups attestations with the same AttestationData for batch processing.
|
||||
@@ -349,20 +303,21 @@ func (s *Service) processVerifiedAttestation(
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Service) processAggregate(ctx context.Context, aggregate ethpb.SignedAggregateAttAndProof) error {
|
||||
func (s *Service) processAggregate(ctx context.Context, aggregate ethpb.SignedAggregateAttAndProof) {
|
||||
res, err := s.validateAggregatedAtt(ctx, aggregate)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Pending aggregated attestation failed validation")
|
||||
return errors.Wrap(err, "validate aggregated att")
|
||||
return
|
||||
}
|
||||
|
||||
if res != pubsub.ValidationAccept || !s.validateBlockInAttestation(ctx, aggregate) {
|
||||
return errors.New("Pending aggregated attestation failed validation")
|
||||
log.Debug("Pending aggregated attestation failed validation")
|
||||
return
|
||||
}
|
||||
|
||||
att := aggregate.AggregateAttestationAndProof().AggregateVal()
|
||||
if err := s.saveAttestation(att); err != nil {
|
||||
return errors.Wrap(err, "save attestation")
|
||||
log.WithError(err).Debug("Could not save aggregated attestation")
|
||||
return
|
||||
}
|
||||
|
||||
_ = s.setAggregatorIndexEpochSeen(att.GetData().Target.Epoch, aggregate.AggregateAttestationAndProof().GetAggregatorIndex())
|
||||
@@ -370,8 +325,6 @@ func (s *Service) processAggregate(ctx context.Context, aggregate ethpb.SignedAg
|
||||
if err := s.cfg.p2p.Broadcast(ctx, aggregate); err != nil {
|
||||
log.WithError(err).Debug("Could not broadcast aggregated attestation")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// This defines how pending aggregates are saved in the map. The key is the
|
||||
@@ -383,7 +336,7 @@ func (s *Service) savePendingAggregate(agg ethpb.SignedAggregateAttAndProof) {
|
||||
|
||||
s.savePending(root, agg, func(other any) bool {
|
||||
a, ok := other.(ethpb.SignedAggregateAttAndProof)
|
||||
return ok && pendingAggregatesAreEqual(agg, a, includeAggregatorIndex)
|
||||
return ok && pendingAggregatesAreEqual(agg, a)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -438,19 +391,13 @@ func (s *Service) savePending(root [32]byte, pending any, isEqual func(other any
|
||||
s.blkRootToPendingAtts[root] = append(s.blkRootToPendingAtts[root], pending)
|
||||
}
|
||||
|
||||
// pendingAggregatesAreEqual checks if two pending aggregate attestations are equal.
|
||||
// The filter parameter controls whether aggregator index is considered in the equality check.
|
||||
func pendingAggregatesAreEqual(a, b ethpb.SignedAggregateAttAndProof, filter aggregatorIndexFilter) bool {
|
||||
func pendingAggregatesAreEqual(a, b ethpb.SignedAggregateAttAndProof) bool {
|
||||
if a.Version() != b.Version() {
|
||||
return false
|
||||
}
|
||||
|
||||
if filter == includeAggregatorIndex {
|
||||
if a.AggregateAttestationAndProof().GetAggregatorIndex() != b.AggregateAttestationAndProof().GetAggregatorIndex() {
|
||||
return false
|
||||
}
|
||||
if a.AggregateAttestationAndProof().GetAggregatorIndex() != b.AggregateAttestationAndProof().GetAggregatorIndex() {
|
||||
return false
|
||||
}
|
||||
|
||||
aAtt := a.AggregateAttestationAndProof().AggregateVal()
|
||||
bAtt := b.AggregateAttestationAndProof().AggregateVal()
|
||||
if aAtt.GetData().Slot != bAtt.GetData().Slot {
|
||||
|
||||
@@ -94,7 +94,7 @@ func TestProcessPendingAtts_NoBlockRequestBlock(t *testing.T) {
|
||||
// Process block A (which exists and has no pending attestations)
|
||||
// This should skip processing attestations for A and request blocks B and C
|
||||
require.NoError(t, r.processPendingAttsForBlock(t.Context(), rootA))
|
||||
require.LogsContain(t, hook, "Requesting blocks by root")
|
||||
require.LogsContain(t, hook, "Requesting block by root")
|
||||
}
|
||||
|
||||
func TestProcessPendingAtts_HasBlockSaveUnaggregatedAtt(t *testing.T) {
|
||||
@@ -911,17 +911,17 @@ func Test_pendingAggregatesAreEqual(t *testing.T) {
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b1111},
|
||||
}}}
|
||||
assert.Equal(t, true, pendingAggregatesAreEqual(a, b, includeAggregatorIndex))
|
||||
assert.Equal(t, true, pendingAggregatesAreEqual(a, b))
|
||||
})
|
||||
t.Run("different version", func(t *testing.T) {
|
||||
a := ðpb.SignedAggregateAttestationAndProof{Message: ðpb.AggregateAttestationAndProof{AggregatorIndex: 1}}
|
||||
b := ðpb.SignedAggregateAttestationAndProofElectra{Message: ðpb.AggregateAttestationAndProofElectra{AggregatorIndex: 1}}
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b, includeAggregatorIndex))
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b))
|
||||
})
|
||||
t.Run("different aggregator index", func(t *testing.T) {
|
||||
a := ðpb.SignedAggregateAttestationAndProof{Message: ðpb.AggregateAttestationAndProof{AggregatorIndex: 1}}
|
||||
b := ðpb.SignedAggregateAttestationAndProof{Message: ðpb.AggregateAttestationAndProof{AggregatorIndex: 2}}
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b, includeAggregatorIndex))
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b))
|
||||
})
|
||||
t.Run("different slot", func(t *testing.T) {
|
||||
a := ðpb.SignedAggregateAttestationAndProof{
|
||||
@@ -942,7 +942,7 @@ func Test_pendingAggregatesAreEqual(t *testing.T) {
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b1111},
|
||||
}}}
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b, includeAggregatorIndex))
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b))
|
||||
})
|
||||
t.Run("different committee index", func(t *testing.T) {
|
||||
a := ðpb.SignedAggregateAttestationAndProof{
|
||||
@@ -963,7 +963,7 @@ func Test_pendingAggregatesAreEqual(t *testing.T) {
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b1111},
|
||||
}}}
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b, includeAggregatorIndex))
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b))
|
||||
})
|
||||
t.Run("different aggregation bits", func(t *testing.T) {
|
||||
a := ðpb.SignedAggregateAttestationAndProof{
|
||||
@@ -984,30 +984,7 @@ func Test_pendingAggregatesAreEqual(t *testing.T) {
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b1000},
|
||||
}}}
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b, includeAggregatorIndex))
|
||||
})
|
||||
t.Run("different aggregator index should be equal while ignoring aggregator index", func(t *testing.T) {
|
||||
a := ðpb.SignedAggregateAttestationAndProof{
|
||||
Message: ðpb.AggregateAttestationAndProof{
|
||||
AggregatorIndex: 1,
|
||||
Aggregate: ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b1111},
|
||||
}}}
|
||||
b := ðpb.SignedAggregateAttestationAndProof{
|
||||
Message: ðpb.AggregateAttestationAndProof{
|
||||
AggregatorIndex: 2,
|
||||
Aggregate: ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b1111},
|
||||
}}}
|
||||
assert.Equal(t, true, pendingAggregatesAreEqual(a, b, ignoreAggregatorIndex))
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b))
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@ package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"slices"
|
||||
"sync"
|
||||
@@ -43,13 +44,11 @@ func (s *Service) processPendingBlocksQueue() {
|
||||
if !s.chainIsStarted() {
|
||||
return
|
||||
}
|
||||
|
||||
locker.Lock()
|
||||
defer locker.Unlock()
|
||||
|
||||
if err := s.processPendingBlocks(s.ctx); err != nil {
|
||||
log.WithError(err).Debug("Could not process pending blocks")
|
||||
}
|
||||
locker.Unlock()
|
||||
})
|
||||
}
|
||||
|
||||
@@ -74,10 +73,8 @@ func (s *Service) processPendingBlocks(ctx context.Context) error {
|
||||
randGen := rand.NewGenerator()
|
||||
var parentRoots [][32]byte
|
||||
|
||||
blkRoots := make([][32]byte, 0, len(sortedSlots)*maxBlocksPerSlot)
|
||||
|
||||
// Iterate through sorted slots.
|
||||
for i, slot := range sortedSlots {
|
||||
for _, slot := range sortedSlots {
|
||||
// Skip processing if slot is in the future.
|
||||
if slot > s.cfg.clock.CurrentSlot() {
|
||||
continue
|
||||
@@ -94,9 +91,6 @@ func (s *Service) processPendingBlocks(ctx context.Context) error {
|
||||
|
||||
// Process each block in the queue.
|
||||
for _, b := range blocksInCache {
|
||||
start := time.Now()
|
||||
totalDuration := time.Duration(0)
|
||||
|
||||
if err := blocks.BeaconBlockIsNil(b); err != nil {
|
||||
continue
|
||||
}
|
||||
@@ -153,34 +147,19 @@ func (s *Service) processPendingBlocks(ctx context.Context) error {
|
||||
}
|
||||
cancelFunction()
|
||||
|
||||
blkRoots = append(blkRoots, blkRoot)
|
||||
// Process pending attestations for this block.
|
||||
if err := s.processPendingAttsForBlock(ctx, blkRoot); err != nil {
|
||||
log.WithError(err).Debug("Failed to process pending attestations for block")
|
||||
}
|
||||
|
||||
// Remove the processed block from the queue.
|
||||
if err := s.removeBlockFromQueue(b, blkRoot); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
duration := time.Since(start)
|
||||
totalDuration += duration
|
||||
log.WithFields(logrus.Fields{
|
||||
"slotIndex": fmt.Sprintf("%d/%d", i+1, len(sortedSlots)),
|
||||
"slot": slot,
|
||||
"root": fmt.Sprintf("%#x", blkRoot),
|
||||
"duration": duration,
|
||||
"totalDuration": totalDuration,
|
||||
}).Debug("Processed pending block and cleared it in cache")
|
||||
log.WithFields(logrus.Fields{"slot": slot, "blockRoot": hex.EncodeToString(bytesutil.Trunc(blkRoot[:]))}).Debug("Processed pending block and cleared it in cache")
|
||||
}
|
||||
|
||||
span.End()
|
||||
}
|
||||
|
||||
for _, blkRoot := range blkRoots {
|
||||
// Process pending attestations for this block.
|
||||
if err := s.processPendingAttsForBlock(ctx, blkRoot); err != nil {
|
||||
log.WithError(err).Debug("Failed to process pending attestations for block")
|
||||
}
|
||||
}
|
||||
|
||||
return s.sendBatchRootRequest(ctx, parentRoots, randGen)
|
||||
}
|
||||
|
||||
@@ -259,10 +238,6 @@ func (s *Service) processBlock(ctx context.Context, b interfaces.ReadOnlySignedB
|
||||
return errors.Wrap(err, "request and save missing data column sidecars")
|
||||
}
|
||||
|
||||
if err := s.requestAndSaveMissingExecutionProofs([]blocks.ROBlock{roBlock}); err != nil {
|
||||
return errors.Wrap(err, "request and save missing execution proofs")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -404,19 +379,6 @@ func (s *Service) sendBatchRootRequest(ctx context.Context, roots [][32]byte, ra
|
||||
req = roots[:maxReqBlock]
|
||||
}
|
||||
|
||||
if logrus.GetLevel() >= logrus.DebugLevel {
|
||||
rootsStr := make([]string, 0, len(roots))
|
||||
for _, req := range roots {
|
||||
rootsStr = append(rootsStr, fmt.Sprintf("%#x", req))
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"peer": pid,
|
||||
"count": len(req),
|
||||
"roots": rootsStr,
|
||||
}).Debug("Requesting blocks by root")
|
||||
}
|
||||
|
||||
// Send the request to the peer.
|
||||
if err := s.sendBeaconBlocksRequest(ctx, &req, pid); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
@@ -476,6 +438,8 @@ func (s *Service) filterOutPendingAndSynced(roots [][fieldparams.RootLength]byte
|
||||
roots = append(roots[:i], roots[i+1:]...)
|
||||
continue
|
||||
}
|
||||
|
||||
log.WithField("blockRoot", fmt.Sprintf("%#x", r)).Debug("Requesting block by root")
|
||||
}
|
||||
return roots
|
||||
}
|
||||
|
||||
@@ -100,10 +100,6 @@ func newRateLimiter(p2pProvider p2p.P2P) *limiter {
|
||||
topicMap[addEncoding(p2p.RPCDataColumnSidecarsByRootTopicV1)] = dataColumnSidecars
|
||||
// DataColumnSidecarsByRangeV1
|
||||
topicMap[addEncoding(p2p.RPCDataColumnSidecarsByRangeTopicV1)] = dataColumnSidecars
|
||||
|
||||
executionProofs := leakybucket.NewCollector(1, defaultBurstLimit, leakyBucketPeriod, false /* deleteEmptyBuckets */);
|
||||
// ExecutionProofsByRootV1
|
||||
topicMap[addEncoding(p2p.RPCExecutionProofsByRootTopicV1)] = executionProofs
|
||||
|
||||
// General topic for all rpc requests.
|
||||
topicMap[rpcLimiterTopic] = leakybucket.NewCollector(5, defaultBurstLimit*2, leakyBucketPeriod, false /* deleteEmptyBuckets */)
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
|
||||
func TestNewRateLimiter(t *testing.T) {
|
||||
rlimiter := newRateLimiter(mockp2p.NewTestP2P(t))
|
||||
assert.Equal(t, len(rlimiter.limiterMap), 21, "correct number of topics not registered")
|
||||
assert.Equal(t, len(rlimiter.limiterMap), 20, "correct number of topics not registered")
|
||||
}
|
||||
|
||||
func TestNewRateLimiter_FreeCorrectly(t *testing.T) {
|
||||
|
||||
@@ -51,7 +51,6 @@ func (s *Service) rpcHandlerByTopicFromFork(forkIndex int) (map[string]rpcHandle
|
||||
p2p.RPCBlobSidecarsByRangeTopicV1: s.blobSidecarsByRangeRPCHandler, // Modified in Fulu
|
||||
p2p.RPCDataColumnSidecarsByRootTopicV1: s.dataColumnSidecarByRootRPCHandler, // Added in Fulu
|
||||
p2p.RPCDataColumnSidecarsByRangeTopicV1: s.dataColumnSidecarsByRangeRPCHandler, // Added in Fulu
|
||||
p2p.RPCExecutionProofsByRootTopicV1: s.executionProofsByRootRPCHandler, // Added in Fulu
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -11,13 +11,11 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/types"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/sync/verify"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
libp2pcore "github.com/libp2p/go-libp2p/core"
|
||||
@@ -89,77 +87,9 @@ func (s *Service) sendBeaconBlocksRequest(ctx context.Context, requests *types.B
|
||||
return errors.Wrap(err, "request and save missing data columns")
|
||||
}
|
||||
|
||||
if err := s.requestAndSaveMissingExecutionProofs(postFuluBlocks); err != nil {
|
||||
return errors.Wrap(err, "request and save missing execution proofs")
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *Service) requestAndSaveMissingExecutionProofs(blks []blocks.ROBlock) error {
|
||||
if len(blks) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO: Parallelize requests for multiple blocks.
|
||||
for _, blk := range blks {
|
||||
if err := s.sendAndSaveExecutionProofs(s.ctx, blk); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) sendAndSaveExecutionProofs(ctx context.Context, block blocks.ROBlock) error {
|
||||
if !features.Get().EnableZkvm {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check proof retention period.
|
||||
blockEpoch := slots.ToEpoch(block.Block().Slot())
|
||||
currentEpoch := slots.ToEpoch(s.cfg.clock.CurrentSlot())
|
||||
if !params.WithinExecutionProofPeriod(blockEpoch, currentEpoch) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check how many proofs are needed with Execution Proof Pool.
|
||||
storedIds := s.cfg.execProofPool.Ids(block.Root())
|
||||
count := uint64(len(storedIds))
|
||||
if count >= params.BeaconConfig().MinProofsRequired {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Construct request
|
||||
blockRoot := block.Root()
|
||||
req := ðpb.ExecutionProofsByRootRequest{
|
||||
BlockRoot: blockRoot[:],
|
||||
CountNeeded: params.BeaconConfig().MinProofsRequired - count,
|
||||
AlreadyHave: storedIds,
|
||||
}
|
||||
|
||||
// Call SendExecutionProofByRootRequest
|
||||
zkvmEnabledPeers := s.cfg.p2p.Peers().ZkvmEnabledPeers()
|
||||
if len(zkvmEnabledPeers) == 0 {
|
||||
return fmt.Errorf("no zkVM enabled peers available to request execution proofs")
|
||||
}
|
||||
|
||||
// TODO: For simplicity, just pick the first peer for now.
|
||||
// In the future, we can implement better peer selection logic.
|
||||
pid := zkvmEnabledPeers[0]
|
||||
proofs, err := SendExecutionProofsByRootRequest(ctx, s.cfg.clock, s.cfg.p2p, pid, req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("send execution proofs by root request: %w", err)
|
||||
}
|
||||
|
||||
// Insert ExecProofPool
|
||||
// TODO: Implement multiple proof insertion in ExecProofPool to avoid multiple locks.
|
||||
for _, proof := range proofs {
|
||||
s.cfg.execProofPool.Insert(proof)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// requestAndSaveMissingDataColumns checks if the data columns are missing for the given block.
|
||||
// If so, requests them and saves them to the storage.
|
||||
func (s *Service) requestAndSaveMissingDataColumnSidecars(blks []blocks.ROBlock) error {
|
||||
|
||||
@@ -182,21 +182,3 @@ func WriteDataColumnSidecarChunk(stream libp2pcore.Stream, tor blockchain.Tempor
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func WriteExecutionProofChunk(stream libp2pcore.Stream, encoding encoder.NetworkEncoding, proof *ethpb.ExecutionProof) error {
|
||||
// Success response code.
|
||||
if _, err := stream.Write([]byte{responseCodeSuccess}); err != nil {
|
||||
return errors.Wrap(err, "stream write")
|
||||
}
|
||||
ctxBytes := params.ForkDigest(slots.ToEpoch(proof.Slot))
|
||||
if err := writeContextToStream(ctxBytes[:], stream); err != nil {
|
||||
return errors.Wrap(err, "write context to stream")
|
||||
}
|
||||
|
||||
// Execution proof.
|
||||
if _, err := encoding.EncodeWithMaxLength(stream, proof); err != nil {
|
||||
return errors.Wrap(err, "encode with max length")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,219 +0,0 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
libp2pcore "github.com/libp2p/go-libp2p/core"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// SendExecutionProofsByRootRequest sends ExecutionProofsByRoot request and returns fetched execution proofs, if any.
|
||||
func SendExecutionProofsByRootRequest(
|
||||
ctx context.Context,
|
||||
clock blockchain.TemporalOracle,
|
||||
p2pProvider p2p.P2P,
|
||||
pid peer.ID,
|
||||
req *ethpb.ExecutionProofsByRootRequest,
|
||||
) ([]*ethpb.ExecutionProof, error) {
|
||||
// Validate request
|
||||
if req.CountNeeded == 0 {
|
||||
return nil, errors.New("count_needed must be greater than 0")
|
||||
}
|
||||
|
||||
topic, err := p2p.TopicFromMessage(p2p.ExecutionProofsByRootName, slots.ToEpoch(clock.CurrentSlot()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"topic": topic,
|
||||
"block_root": bytesutil.ToBytes32(req.BlockRoot),
|
||||
"count": req.CountNeeded,
|
||||
"already": len(req.AlreadyHave),
|
||||
}).Debug("Sending execution proofs by root request")
|
||||
|
||||
stream, err := p2pProvider.Send(ctx, req, topic, pid)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer closeStream(stream, log)
|
||||
|
||||
// Read execution proofs from stream
|
||||
proofs := make([]*ethpb.ExecutionProof, 0, req.CountNeeded)
|
||||
alreadyHaveSet := make(map[primitives.ExecutionProofId]struct{})
|
||||
for _, id := range req.AlreadyHave {
|
||||
alreadyHaveSet[id] = struct{}{}
|
||||
}
|
||||
|
||||
for i := uint64(0); i < req.CountNeeded; i++ {
|
||||
isFirstChunk := i == 0
|
||||
proof, err := ReadChunkedExecutionProof(stream, p2pProvider, isFirstChunk)
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Validate proof
|
||||
if err := validateExecutionProof(proof, req, alreadyHaveSet); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
proofs = append(proofs, proof)
|
||||
}
|
||||
|
||||
return proofs, nil
|
||||
}
|
||||
|
||||
// ReadChunkedExecutionProof reads a chunked execution proof from the stream.
|
||||
func ReadChunkedExecutionProof(
|
||||
stream libp2pcore.Stream,
|
||||
encoding p2p.EncodingProvider,
|
||||
isFirstChunk bool,
|
||||
) (*ethpb.ExecutionProof, error) {
|
||||
// Read status code for each chunk (like data columns, not like blocks)
|
||||
code, errMsg, err := ReadStatusCode(stream, encoding.Encoding())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if code != 0 {
|
||||
return nil, errors.New(errMsg)
|
||||
}
|
||||
|
||||
// Read context bytes (fork digest)
|
||||
_, err = readContextFromStream(stream)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read context from stream: %w", err)
|
||||
}
|
||||
|
||||
// Decode the proof
|
||||
proof := ðpb.ExecutionProof{}
|
||||
if err := encoding.Encoding().DecodeWithMaxLength(stream, proof); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return proof, nil
|
||||
}
|
||||
|
||||
// validateExecutionProof validates a received execution proof against the request.
|
||||
func validateExecutionProof(
|
||||
proof *ethpb.ExecutionProof,
|
||||
req *ethpb.ExecutionProofsByRootRequest,
|
||||
alreadyHaveSet map[primitives.ExecutionProofId]struct{},
|
||||
) error {
|
||||
// Check block root matches
|
||||
proofRoot := bytesutil.ToBytes32(proof.BlockRoot)
|
||||
reqRoot := bytesutil.ToBytes32(req.BlockRoot)
|
||||
if proofRoot != reqRoot {
|
||||
return fmt.Errorf("proof block root %#x does not match requested root %#x",
|
||||
proofRoot, reqRoot)
|
||||
}
|
||||
|
||||
// Check we didn't already have this proof
|
||||
if _, ok := alreadyHaveSet[proof.ProofId]; ok {
|
||||
return fmt.Errorf("received proof we already have: proof_id=%d", proof.ProofId)
|
||||
}
|
||||
|
||||
// Check proof ID is valid (within max range)
|
||||
if !proof.ProofId.IsValid() {
|
||||
return fmt.Errorf("invalid proof_id: %d", proof.ProofId)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// executionProofsByRootRPCHandler handles incoming ExecutionProofsByRoot RPC requests.
|
||||
func (s *Service) executionProofsByRootRPCHandler(ctx context.Context, msg any, stream libp2pcore.Stream) error {
|
||||
ctx, span := trace.StartSpan(ctx, "sync.executionProofsByRootRPCHandler")
|
||||
defer span.End()
|
||||
|
||||
_, cancel := context.WithTimeout(ctx, ttfbTimeout)
|
||||
defer cancel()
|
||||
|
||||
req, ok := msg.(*ethpb.ExecutionProofsByRootRequest)
|
||||
if !ok {
|
||||
return errors.New("message is not type ExecutionProofsByRootRequest")
|
||||
}
|
||||
|
||||
remotePeer := stream.Conn().RemotePeer()
|
||||
SetRPCStreamDeadlines(stream)
|
||||
|
||||
// Validate request
|
||||
if err := s.rateLimiter.validateRequest(stream, 1); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Penalize peers that send invalid requests.
|
||||
if err := validateExecutionProofsByRootRequest(req); err != nil {
|
||||
s.downscorePeer(remotePeer, "executionProofsByRootRPCHandlerValidationError")
|
||||
s.writeErrorResponseToStream(responseCodeInvalidRequest, err.Error(), stream)
|
||||
return fmt.Errorf("validate execution proofs by root request: %w", err)
|
||||
}
|
||||
|
||||
blockRoot := bytesutil.ToBytes32(req.BlockRoot)
|
||||
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"blockroot": fmt.Sprintf("%#x", blockRoot),
|
||||
"neededCount": req.CountNeeded,
|
||||
"alreadyHave": req.AlreadyHave,
|
||||
"peer": remotePeer.String(),
|
||||
})
|
||||
|
||||
s.rateLimiter.add(stream, 1)
|
||||
defer closeStream(stream, log)
|
||||
|
||||
// Get proofs from execution proof pool
|
||||
storedProofs := s.cfg.execProofPool.Get(blockRoot)
|
||||
|
||||
// Filter out not requested proofs
|
||||
alreadyHave := make(map[primitives.ExecutionProofId]bool)
|
||||
for _, id := range req.AlreadyHave {
|
||||
alreadyHave[id] = true
|
||||
}
|
||||
|
||||
// Send proofs
|
||||
sentCount := uint64(0)
|
||||
for _, proof := range storedProofs {
|
||||
if sentCount >= req.CountNeeded {
|
||||
break
|
||||
}
|
||||
|
||||
// Skip proofs the requester already has
|
||||
if alreadyHave[proof.ProofId] {
|
||||
continue
|
||||
}
|
||||
|
||||
// Write proof to stream
|
||||
SetStreamWriteDeadline(stream, defaultWriteDuration)
|
||||
if err := WriteExecutionProofChunk(stream, s.cfg.p2p.Encoding(), proof); err != nil {
|
||||
log.WithError(err).Debug("Could not send execution proof")
|
||||
s.writeErrorResponseToStream(responseCodeServerError, "could not send execution proof", stream)
|
||||
return err
|
||||
}
|
||||
|
||||
sentCount++
|
||||
}
|
||||
|
||||
log.WithField("sentCount", sentCount).Debug("Responded to execution proofs by root request")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateExecutionProofsByRootRequest(req *ethpb.ExecutionProofsByRootRequest) error {
|
||||
if req.CountNeeded == 0 {
|
||||
return errors.New("count_needed must be greater than 0")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,727 +0,0 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
chainMock "github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/testing"
|
||||
testDB "github.com/OffchainLabs/prysm/v7/beacon-chain/db/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/execproofs"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/encoder"
|
||||
p2ptest "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func TestExecutionProofsByRootRPCHandler(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.FuluForkEpoch = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
|
||||
protocolID := protocol.ID(p2p.RPCExecutionProofsByRootTopicV1) + "/" + encoder.ProtocolSuffixSSZSnappy
|
||||
|
||||
t.Run("wrong message type", func(t *testing.T) {
|
||||
service := &Service{}
|
||||
err := service.executionProofsByRootRPCHandler(t.Context(), nil, nil)
|
||||
require.ErrorContains(t, "message is not type ExecutionProofsByRootRequest", err)
|
||||
})
|
||||
|
||||
t.Run("invalid request - count_needed is 0", func(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnableZkvm: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
localP2P := p2ptest.NewTestP2P(t)
|
||||
service := &Service{cfg: &config{p2p: localP2P}, rateLimiter: newRateLimiter(localP2P)}
|
||||
remoteP2P := p2ptest.NewTestP2P(t)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
code, errMsg, err := readStatusCodeNoDeadline(stream, localP2P.Encoding())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, responseCodeInvalidRequest, code)
|
||||
require.Equal(t, "count_needed must be greater than 0", errMsg)
|
||||
})
|
||||
|
||||
localP2P.Connect(remoteP2P)
|
||||
stream, err := localP2P.BHost.NewStream(t.Context(), remoteP2P.BHost.ID(), protocolID)
|
||||
require.NoError(t, err)
|
||||
|
||||
blockRoot := bytesutil.PadTo([]byte("blockroot"), 32)
|
||||
req := ðpb.ExecutionProofsByRootRequest{
|
||||
BlockRoot: blockRoot,
|
||||
CountNeeded: 0, // Invalid: must be > 0
|
||||
AlreadyHave: []primitives.ExecutionProofId{},
|
||||
}
|
||||
|
||||
require.Equal(t, true, localP2P.Peers().Scorers().BadResponsesScorer().Score(remoteP2P.PeerID()) >= 0)
|
||||
|
||||
err = service.executionProofsByRootRPCHandler(t.Context(), req, stream)
|
||||
require.NotNil(t, err)
|
||||
require.Equal(t, true, localP2P.Peers().Scorers().BadResponsesScorer().Score(remoteP2P.PeerID()) < 0)
|
||||
|
||||
if util.WaitTimeout(&wg, 1*time.Second) {
|
||||
t.Fatal("Did not receive stream within 1 sec")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("zkVM disabled - returns empty", func(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnableZkvm: false, // Disabled
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
localP2P := p2ptest.NewTestP2P(t)
|
||||
execProofPool := execproofs.NewPool()
|
||||
service := &Service{
|
||||
cfg: &config{
|
||||
p2p: localP2P,
|
||||
execProofPool: execProofPool,
|
||||
},
|
||||
rateLimiter: newRateLimiter(localP2P),
|
||||
}
|
||||
remoteP2P := p2ptest.NewTestP2P(t)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
// Should receive no proofs (stream should end)
|
||||
_, err := ReadChunkedExecutionProof(stream, localP2P, true)
|
||||
require.ErrorIs(t, err, io.EOF)
|
||||
})
|
||||
|
||||
localP2P.Connect(remoteP2P)
|
||||
stream, err := localP2P.BHost.NewStream(t.Context(), remoteP2P.BHost.ID(), protocolID)
|
||||
require.NoError(t, err)
|
||||
|
||||
blockRoot := bytesutil.PadTo([]byte("blockroot"), 32)
|
||||
req := ðpb.ExecutionProofsByRootRequest{
|
||||
BlockRoot: blockRoot,
|
||||
CountNeeded: 2,
|
||||
AlreadyHave: []primitives.ExecutionProofId{},
|
||||
}
|
||||
|
||||
err = service.executionProofsByRootRPCHandler(t.Context(), req, stream)
|
||||
require.NoError(t, err)
|
||||
|
||||
if util.WaitTimeout(&wg, 1*time.Second) {
|
||||
t.Fatal("Did not receive stream within 1 sec")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("no proofs available", func(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnableZkvm: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
localP2P := p2ptest.NewTestP2P(t)
|
||||
execProofPool := execproofs.NewPool()
|
||||
service := &Service{
|
||||
cfg: &config{
|
||||
p2p: localP2P,
|
||||
execProofPool: execProofPool,
|
||||
},
|
||||
rateLimiter: newRateLimiter(localP2P),
|
||||
}
|
||||
remoteP2P := p2ptest.NewTestP2P(t)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
// Should receive no proofs (stream should end)
|
||||
_, err := ReadChunkedExecutionProof(stream, localP2P, true)
|
||||
require.ErrorIs(t, err, io.EOF)
|
||||
})
|
||||
|
||||
localP2P.Connect(remoteP2P)
|
||||
stream, err := localP2P.BHost.NewStream(t.Context(), remoteP2P.BHost.ID(), protocolID)
|
||||
require.NoError(t, err)
|
||||
|
||||
blockRoot := bytesutil.PadTo([]byte("blockroot"), 32)
|
||||
req := ðpb.ExecutionProofsByRootRequest{
|
||||
BlockRoot: blockRoot,
|
||||
CountNeeded: 2,
|
||||
AlreadyHave: []primitives.ExecutionProofId{},
|
||||
}
|
||||
|
||||
err = service.executionProofsByRootRPCHandler(t.Context(), req, stream)
|
||||
require.NoError(t, err)
|
||||
|
||||
if util.WaitTimeout(&wg, 1*time.Second) {
|
||||
t.Fatal("Did not receive stream within 1 sec")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("nominal - returns requested proofs", func(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnableZkvm: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
localP2P := p2ptest.NewTestP2P(t)
|
||||
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
|
||||
|
||||
// Create execution proof pool with some proofs
|
||||
execProofPool := execproofs.NewPool()
|
||||
blockRoot := [32]byte{0x01, 0x02, 0x03}
|
||||
|
||||
// Add 3 proofs for the same block
|
||||
blockHash := bytesutil.PadTo([]byte("blockhash"), 32)
|
||||
proof1 := ðpb.ExecutionProof{
|
||||
BlockRoot: blockRoot[:],
|
||||
BlockHash: blockHash,
|
||||
Slot: primitives.Slot(10),
|
||||
ProofId: primitives.ExecutionProofId(1),
|
||||
ProofData: []byte("proof1"),
|
||||
}
|
||||
proof2 := ðpb.ExecutionProof{
|
||||
BlockRoot: blockRoot[:],
|
||||
BlockHash: blockHash,
|
||||
Slot: primitives.Slot(10),
|
||||
ProofId: primitives.ExecutionProofId(2),
|
||||
ProofData: []byte("proof2"),
|
||||
}
|
||||
proof3 := ðpb.ExecutionProof{
|
||||
BlockRoot: blockRoot[:],
|
||||
BlockHash: blockHash,
|
||||
Slot: primitives.Slot(10),
|
||||
ProofId: primitives.ExecutionProofId(3),
|
||||
ProofData: []byte("proof3"),
|
||||
}
|
||||
|
||||
execProofPool.Insert(proof1)
|
||||
execProofPool.Insert(proof2)
|
||||
execProofPool.Insert(proof3)
|
||||
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := &Service{
|
||||
cfg: &config{
|
||||
p2p: localP2P,
|
||||
beaconDB: beaconDB,
|
||||
clock: clock,
|
||||
execProofPool: execProofPool,
|
||||
chain: &chainMock.ChainService{},
|
||||
},
|
||||
rateLimiter: newRateLimiter(localP2P),
|
||||
}
|
||||
|
||||
remoteP2P := p2ptest.NewTestP2P(t)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
|
||||
proofs := make([]*ethpb.ExecutionProof, 0, 2)
|
||||
|
||||
for i := range 2 {
|
||||
isFirstChunk := i == 0
|
||||
proof, err := ReadChunkedExecutionProof(stream, remoteP2P, isFirstChunk)
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
|
||||
assert.NoError(t, err)
|
||||
proofs = append(proofs, proof)
|
||||
}
|
||||
|
||||
assert.Equal(t, 2, len(proofs))
|
||||
// Should receive proof1 and proof2 (first 2 in pool)
|
||||
assert.DeepEqual(t, blockRoot[:], proofs[0].BlockRoot)
|
||||
assert.DeepEqual(t, blockRoot[:], proofs[1].BlockRoot)
|
||||
assert.Equal(t, primitives.ExecutionProofId(1), proofs[0].ProofId)
|
||||
assert.Equal(t, primitives.ExecutionProofId(2), proofs[1].ProofId)
|
||||
})
|
||||
|
||||
localP2P.Connect(remoteP2P)
|
||||
stream, err := localP2P.BHost.NewStream(t.Context(), remoteP2P.BHost.ID(), protocolID)
|
||||
require.NoError(t, err)
|
||||
|
||||
req := ðpb.ExecutionProofsByRootRequest{
|
||||
BlockRoot: blockRoot[:],
|
||||
CountNeeded: 2,
|
||||
AlreadyHave: []primitives.ExecutionProofId{},
|
||||
}
|
||||
|
||||
err = service.executionProofsByRootRPCHandler(t.Context(), req, stream)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, localP2P.Peers().Scorers().BadResponsesScorer().Score(remoteP2P.PeerID()) >= 0)
|
||||
|
||||
if util.WaitTimeout(&wg, 1*time.Second) {
|
||||
t.Fatal("Did not receive stream within 1 sec")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("filters already_have proofs", func(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnableZkvm: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
localP2P := p2ptest.NewTestP2P(t)
|
||||
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
|
||||
|
||||
// Create execution proof pool with some proofs
|
||||
execProofPool := execproofs.NewPool()
|
||||
blockRoot := [32]byte{0x01, 0x02, 0x03}
|
||||
|
||||
// Add 4 proofs for the same block
|
||||
blockHash := bytesutil.PadTo([]byte("blockhash"), 32)
|
||||
proof1 := ðpb.ExecutionProof{
|
||||
BlockRoot: blockRoot[:],
|
||||
BlockHash: blockHash,
|
||||
Slot: primitives.Slot(10),
|
||||
ProofId: primitives.ExecutionProofId(1),
|
||||
ProofData: []byte("proof1"),
|
||||
}
|
||||
proof2 := ðpb.ExecutionProof{
|
||||
BlockRoot: blockRoot[:],
|
||||
BlockHash: blockHash,
|
||||
Slot: primitives.Slot(10),
|
||||
ProofId: primitives.ExecutionProofId(2),
|
||||
ProofData: []byte("proof2"),
|
||||
}
|
||||
proof3 := ðpb.ExecutionProof{
|
||||
BlockRoot: blockRoot[:],
|
||||
BlockHash: blockHash,
|
||||
Slot: primitives.Slot(10),
|
||||
ProofId: primitives.ExecutionProofId(3),
|
||||
ProofData: []byte("proof3"),
|
||||
}
|
||||
proof4 := ðpb.ExecutionProof{
|
||||
BlockRoot: blockRoot[:],
|
||||
BlockHash: blockHash,
|
||||
Slot: primitives.Slot(10),
|
||||
ProofId: primitives.ExecutionProofId(4),
|
||||
ProofData: []byte("proof4"),
|
||||
}
|
||||
|
||||
execProofPool.Insert(proof1)
|
||||
execProofPool.Insert(proof2)
|
||||
execProofPool.Insert(proof3)
|
||||
execProofPool.Insert(proof4)
|
||||
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := &Service{
|
||||
cfg: &config{
|
||||
p2p: localP2P,
|
||||
beaconDB: beaconDB,
|
||||
clock: clock,
|
||||
execProofPool: execProofPool,
|
||||
chain: &chainMock.ChainService{},
|
||||
},
|
||||
rateLimiter: newRateLimiter(localP2P),
|
||||
}
|
||||
|
||||
remoteP2P := p2ptest.NewTestP2P(t)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
|
||||
proofs := make([]*ethpb.ExecutionProof, 0, 2)
|
||||
|
||||
for i := range 3 {
|
||||
isFirstChunk := i == 0
|
||||
proof, err := ReadChunkedExecutionProof(stream, remoteP2P, isFirstChunk)
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
|
||||
assert.NoError(t, err)
|
||||
proofs = append(proofs, proof)
|
||||
}
|
||||
|
||||
// Should skip proof1 and proof2 (already_have), and return proof3 and proof4
|
||||
assert.Equal(t, 2, len(proofs))
|
||||
assert.Equal(t, primitives.ExecutionProofId(3), proofs[0].ProofId)
|
||||
assert.Equal(t, primitives.ExecutionProofId(4), proofs[1].ProofId)
|
||||
})
|
||||
|
||||
localP2P.Connect(remoteP2P)
|
||||
stream, err := localP2P.BHost.NewStream(t.Context(), remoteP2P.BHost.ID(), protocolID)
|
||||
require.NoError(t, err)
|
||||
|
||||
req := ðpb.ExecutionProofsByRootRequest{
|
||||
BlockRoot: blockRoot[:],
|
||||
CountNeeded: 2,
|
||||
AlreadyHave: []primitives.ExecutionProofId{1, 2}, // Already have proof1 and proof2
|
||||
}
|
||||
|
||||
err = service.executionProofsByRootRPCHandler(t.Context(), req, stream)
|
||||
require.NoError(t, err)
|
||||
|
||||
if util.WaitTimeout(&wg, 1*time.Second) {
|
||||
t.Fatal("Did not receive stream within 1 sec")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("partial send - less proofs than requested", func(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnableZkvm: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
localP2P := p2ptest.NewTestP2P(t)
|
||||
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
|
||||
|
||||
// Create execution proof pool with only 2 proofs
|
||||
execProofPool := execproofs.NewPool()
|
||||
blockRoot := [32]byte{0x01, 0x02, 0x03}
|
||||
|
||||
blockHash := bytesutil.PadTo([]byte("blockhash"), 32)
|
||||
proof1 := ðpb.ExecutionProof{
|
||||
BlockRoot: blockRoot[:],
|
||||
BlockHash: blockHash,
|
||||
Slot: primitives.Slot(10),
|
||||
ProofId: primitives.ExecutionProofId(1),
|
||||
ProofData: []byte("proof1"),
|
||||
}
|
||||
proof2 := ðpb.ExecutionProof{
|
||||
BlockRoot: blockRoot[:],
|
||||
BlockHash: blockHash,
|
||||
Slot: primitives.Slot(10),
|
||||
ProofId: primitives.ExecutionProofId(2),
|
||||
ProofData: []byte("proof2"),
|
||||
}
|
||||
|
||||
execProofPool.Insert(proof1)
|
||||
execProofPool.Insert(proof2)
|
||||
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := &Service{
|
||||
cfg: &config{
|
||||
p2p: localP2P,
|
||||
beaconDB: beaconDB,
|
||||
clock: clock,
|
||||
execProofPool: execProofPool,
|
||||
chain: &chainMock.ChainService{},
|
||||
},
|
||||
rateLimiter: newRateLimiter(localP2P),
|
||||
}
|
||||
|
||||
remoteP2P := p2ptest.NewTestP2P(t)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
|
||||
proofs := make([]*ethpb.ExecutionProof, 0, 5)
|
||||
|
||||
for i := range 5 {
|
||||
isFirstChunk := i == 0
|
||||
proof, err := ReadChunkedExecutionProof(stream, remoteP2P, isFirstChunk)
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
|
||||
assert.NoError(t, err)
|
||||
proofs = append(proofs, proof)
|
||||
}
|
||||
|
||||
// Should only receive 2 proofs (not 5 as requested)
|
||||
assert.Equal(t, 2, len(proofs))
|
||||
assert.Equal(t, primitives.ExecutionProofId(1), proofs[0].ProofId)
|
||||
assert.Equal(t, primitives.ExecutionProofId(2), proofs[1].ProofId)
|
||||
})
|
||||
|
||||
localP2P.Connect(remoteP2P)
|
||||
stream, err := localP2P.BHost.NewStream(t.Context(), remoteP2P.BHost.ID(), protocolID)
|
||||
require.NoError(t, err)
|
||||
|
||||
req := ðpb.ExecutionProofsByRootRequest{
|
||||
BlockRoot: blockRoot[:],
|
||||
CountNeeded: 5, // Request 5 but only 2 available
|
||||
AlreadyHave: []primitives.ExecutionProofId{},
|
||||
}
|
||||
|
||||
err = service.executionProofsByRootRPCHandler(t.Context(), req, stream)
|
||||
require.NoError(t, err)
|
||||
|
||||
if util.WaitTimeout(&wg, 1*time.Second) {
|
||||
t.Fatal("Did not receive stream within 1 sec")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestValidateExecutionProofsByRootRequest(t *testing.T) {
|
||||
t.Run("invalid - count_needed is 0", func(t *testing.T) {
|
||||
req := ðpb.ExecutionProofsByRootRequest{
|
||||
BlockRoot: bytesutil.PadTo([]byte("blockroot"), 32),
|
||||
CountNeeded: 0,
|
||||
AlreadyHave: []primitives.ExecutionProofId{},
|
||||
}
|
||||
err := validateExecutionProofsByRootRequest(req)
|
||||
require.ErrorContains(t, "count_needed must be greater than 0", err)
|
||||
})
|
||||
|
||||
t.Run("valid", func(t *testing.T) {
|
||||
req := ðpb.ExecutionProofsByRootRequest{
|
||||
BlockRoot: bytesutil.PadTo([]byte("blockroot"), 32),
|
||||
CountNeeded: 2,
|
||||
AlreadyHave: []primitives.ExecutionProofId{},
|
||||
}
|
||||
err := validateExecutionProofsByRootRequest(req)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestSendExecutionProofsByRootRequest(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.FuluForkEpoch = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
|
||||
protocolID := protocol.ID(p2p.RPCExecutionProofsByRootTopicV1) + "/" + encoder.ProtocolSuffixSSZSnappy
|
||||
|
||||
t.Run("count_needed is 0 - returns error", func(t *testing.T) {
|
||||
localP2P := p2ptest.NewTestP2P(t)
|
||||
remoteP2P := p2ptest.NewTestP2P(t)
|
||||
localP2P.Connect(remoteP2P)
|
||||
|
||||
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
|
||||
blockRoot := bytesutil.PadTo([]byte("blockroot"), 32)
|
||||
|
||||
req := ðpb.ExecutionProofsByRootRequest{
|
||||
BlockRoot: blockRoot,
|
||||
CountNeeded: 0,
|
||||
AlreadyHave: []primitives.ExecutionProofId{},
|
||||
}
|
||||
|
||||
proofs, err := SendExecutionProofsByRootRequest(t.Context(), clock, localP2P, remoteP2P.PeerID(), req)
|
||||
require.ErrorContains(t, "count_needed must be greater than 0", err)
|
||||
require.Equal(t, 0, len(proofs))
|
||||
})
|
||||
|
||||
t.Run("success - receives requested proofs", func(t *testing.T) {
|
||||
localP2P := p2ptest.NewTestP2P(t)
|
||||
remoteP2P := p2ptest.NewTestP2P(t)
|
||||
localP2P.Connect(remoteP2P)
|
||||
|
||||
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
|
||||
blockRoot := [32]byte{0x01, 0x02, 0x03}
|
||||
blockHash := bytesutil.PadTo([]byte("blockhash"), 32)
|
||||
|
||||
// Create proofs to send back
|
||||
proof1 := ðpb.ExecutionProof{
|
||||
BlockRoot: blockRoot[:],
|
||||
BlockHash: blockHash,
|
||||
Slot: primitives.Slot(10),
|
||||
ProofId: primitives.ExecutionProofId(1),
|
||||
ProofData: []byte("proof1"),
|
||||
}
|
||||
proof2 := ðpb.ExecutionProof{
|
||||
BlockRoot: blockRoot[:],
|
||||
BlockHash: blockHash,
|
||||
Slot: primitives.Slot(10),
|
||||
ProofId: primitives.ExecutionProofId(2),
|
||||
ProofData: []byte("proof2"),
|
||||
}
|
||||
|
||||
// Setup remote to send proofs
|
||||
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
|
||||
defer func() {
|
||||
_ = stream.Close()
|
||||
}()
|
||||
|
||||
// Read the request (we don't validate it in this test)
|
||||
_ = ðpb.ExecutionProofsByRootRequest{}
|
||||
|
||||
// Send proof1
|
||||
require.NoError(t, WriteExecutionProofChunk(stream, remoteP2P.Encoding(), proof1))
|
||||
|
||||
// Send proof2
|
||||
require.NoError(t, WriteExecutionProofChunk(stream, remoteP2P.Encoding(), proof2))
|
||||
})
|
||||
|
||||
req := ðpb.ExecutionProofsByRootRequest{
|
||||
BlockRoot: blockRoot[:],
|
||||
CountNeeded: 2,
|
||||
AlreadyHave: []primitives.ExecutionProofId{},
|
||||
}
|
||||
|
||||
proofs, err := SendExecutionProofsByRootRequest(t.Context(), clock, localP2P, remoteP2P.PeerID(), req)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(proofs))
|
||||
assert.Equal(t, primitives.ExecutionProofId(1), proofs[0].ProofId)
|
||||
assert.Equal(t, primitives.ExecutionProofId(2), proofs[1].ProofId)
|
||||
assert.DeepEqual(t, blockRoot[:], proofs[0].BlockRoot)
|
||||
assert.DeepEqual(t, blockRoot[:], proofs[1].BlockRoot)
|
||||
})
|
||||
|
||||
t.Run("partial response - EOF before count_needed", func(t *testing.T) {
|
||||
localP2P := p2ptest.NewTestP2P(t)
|
||||
remoteP2P := p2ptest.NewTestP2P(t)
|
||||
localP2P.Connect(remoteP2P)
|
||||
|
||||
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
|
||||
blockRoot := [32]byte{0x01, 0x02, 0x03}
|
||||
blockHash := bytesutil.PadTo([]byte("blockhash"), 32)
|
||||
|
||||
proof1 := ðpb.ExecutionProof{
|
||||
BlockRoot: blockRoot[:],
|
||||
BlockHash: blockHash,
|
||||
Slot: primitives.Slot(10),
|
||||
ProofId: primitives.ExecutionProofId(1),
|
||||
ProofData: []byte("proof1"),
|
||||
}
|
||||
|
||||
// Setup remote to send only 1 proof (but we request 5)
|
||||
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
|
||||
defer func() {
|
||||
_ = stream.Close()
|
||||
}()
|
||||
// Send only proof1
|
||||
require.NoError(t, WriteExecutionProofChunk(stream, remoteP2P.Encoding(), proof1))
|
||||
})
|
||||
|
||||
req := ðpb.ExecutionProofsByRootRequest{
|
||||
BlockRoot: blockRoot[:],
|
||||
CountNeeded: 5, // Request 5 but only get 1
|
||||
AlreadyHave: []primitives.ExecutionProofId{},
|
||||
}
|
||||
|
||||
proofs, err := SendExecutionProofsByRootRequest(t.Context(), clock, localP2P, remoteP2P.PeerID(), req)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(proofs)) // Only received 1
|
||||
assert.Equal(t, primitives.ExecutionProofId(1), proofs[0].ProofId)
|
||||
})
|
||||
|
||||
t.Run("invalid block root - validation fails", func(t *testing.T) {
|
||||
localP2P := p2ptest.NewTestP2P(t)
|
||||
remoteP2P := p2ptest.NewTestP2P(t)
|
||||
localP2P.Connect(remoteP2P)
|
||||
|
||||
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
|
||||
requestedRoot := [32]byte{0x01, 0x02, 0x03}
|
||||
wrongRoot := [32]byte{0xFF, 0xFF, 0xFF}
|
||||
blockHash := bytesutil.PadTo([]byte("blockhash"), 32)
|
||||
|
||||
// Create proof with wrong block root
|
||||
proof1 := ðpb.ExecutionProof{
|
||||
BlockRoot: wrongRoot[:], // Wrong root!
|
||||
BlockHash: blockHash,
|
||||
Slot: primitives.Slot(10),
|
||||
ProofId: primitives.ExecutionProofId(1),
|
||||
ProofData: []byte("proof1"),
|
||||
}
|
||||
|
||||
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
|
||||
defer func() {
|
||||
_ = stream.Close()
|
||||
}()
|
||||
require.NoError(t, WriteExecutionProofChunk(stream, remoteP2P.Encoding(), proof1))
|
||||
})
|
||||
|
||||
req := ðpb.ExecutionProofsByRootRequest{
|
||||
BlockRoot: requestedRoot[:],
|
||||
CountNeeded: 1,
|
||||
AlreadyHave: []primitives.ExecutionProofId{},
|
||||
}
|
||||
|
||||
proofs, err := SendExecutionProofsByRootRequest(t.Context(), clock, localP2P, remoteP2P.PeerID(), req)
|
||||
require.ErrorContains(t, "does not match requested root", err)
|
||||
require.Equal(t, 0, len(proofs))
|
||||
})
|
||||
|
||||
t.Run("already_have proof - validation fails", func(t *testing.T) {
|
||||
localP2P := p2ptest.NewTestP2P(t)
|
||||
remoteP2P := p2ptest.NewTestP2P(t)
|
||||
localP2P.Connect(remoteP2P)
|
||||
|
||||
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
|
||||
blockRoot := [32]byte{0x01, 0x02, 0x03}
|
||||
blockHash := bytesutil.PadTo([]byte("blockhash"), 32)
|
||||
|
||||
proof1 := ðpb.ExecutionProof{
|
||||
BlockRoot: blockRoot[:],
|
||||
BlockHash: blockHash,
|
||||
Slot: primitives.Slot(10),
|
||||
ProofId: primitives.ExecutionProofId(1),
|
||||
ProofData: []byte("proof1"),
|
||||
}
|
||||
|
||||
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
|
||||
defer func() {
|
||||
_ = stream.Close()
|
||||
}()
|
||||
require.NoError(t, WriteExecutionProofChunk(stream, remoteP2P.Encoding(), proof1))
|
||||
})
|
||||
|
||||
req := ðpb.ExecutionProofsByRootRequest{
|
||||
BlockRoot: blockRoot[:],
|
||||
CountNeeded: 1,
|
||||
AlreadyHave: []primitives.ExecutionProofId{1}, // Already have proof_id 1
|
||||
}
|
||||
|
||||
proofs, err := SendExecutionProofsByRootRequest(t.Context(), clock, localP2P, remoteP2P.PeerID(), req)
|
||||
require.ErrorContains(t, "received proof we already have", err)
|
||||
require.Equal(t, 0, len(proofs))
|
||||
})
|
||||
|
||||
t.Run("invalid proof_id - validation fails", func(t *testing.T) {
|
||||
localP2P := p2ptest.NewTestP2P(t)
|
||||
remoteP2P := p2ptest.NewTestP2P(t)
|
||||
localP2P.Connect(remoteP2P)
|
||||
|
||||
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
|
||||
blockRoot := [32]byte{0x01, 0x02, 0x03}
|
||||
blockHash := bytesutil.PadTo([]byte("blockhash"), 32)
|
||||
|
||||
proof1 := ðpb.ExecutionProof{
|
||||
BlockRoot: blockRoot[:],
|
||||
BlockHash: blockHash,
|
||||
Slot: primitives.Slot(10),
|
||||
ProofId: primitives.ExecutionProofId(255), // Invalid proof_id (max valid is 7)
|
||||
ProofData: []byte("proof1"),
|
||||
}
|
||||
|
||||
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
|
||||
defer func() {
|
||||
_ = stream.Close()
|
||||
}()
|
||||
require.NoError(t, WriteExecutionProofChunk(stream, remoteP2P.Encoding(), proof1))
|
||||
})
|
||||
|
||||
req := ðpb.ExecutionProofsByRootRequest{
|
||||
BlockRoot: blockRoot[:],
|
||||
CountNeeded: 1,
|
||||
AlreadyHave: []primitives.ExecutionProofId{},
|
||||
}
|
||||
|
||||
proofs, err := SendExecutionProofsByRootRequest(t.Context(), clock, localP2P, remoteP2P.PeerID(), req)
|
||||
require.ErrorContains(t, "invalid proof_id", err)
|
||||
require.Equal(t, 0, len(proofs))
|
||||
})
|
||||
}
|
||||
@@ -23,7 +23,6 @@ import (
|
||||
lightClient "github.com/OffchainLabs/prysm/v7/beacon-chain/light-client"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/attestations"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/blstoexec"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/execproofs"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/slashings"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/synccommittee"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/voluntaryexits"
|
||||
@@ -68,7 +67,6 @@ const (
|
||||
seenProposerSlashingSize = 100
|
||||
badBlockSize = 1000
|
||||
syncMetricsInterval = 10 * time.Second
|
||||
seenExecutionProofSize = 100
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -96,7 +94,6 @@ type config struct {
|
||||
slashingPool slashings.PoolManager
|
||||
syncCommsPool synccommittee.Pool
|
||||
blsToExecPool blstoexec.PoolManager
|
||||
execProofPool execproofs.PoolManager
|
||||
chain blockchainService
|
||||
initialSync Checker
|
||||
blockNotifier blockfeed.Notifier
|
||||
@@ -238,6 +235,7 @@ func NewService(ctx context.Context, opts ...Option) *Service {
|
||||
r.subHandler = newSubTopicHandler()
|
||||
r.rateLimiter = newRateLimiter(r.cfg.p2p)
|
||||
r.initCaches()
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
|
||||
@@ -329,17 +329,6 @@ func (s *Service) registerSubscribers(nse params.NetworkScheduleEntry) bool {
|
||||
getSubnetsRequiringPeers: s.allDataColumnSubnets,
|
||||
})
|
||||
})
|
||||
|
||||
if features.Get().EnableZkvm {
|
||||
s.spawn(func() {
|
||||
s.subscribe(
|
||||
p2p.ExecutionProofSubnetTopicFormat,
|
||||
s.validateExecutionProof,
|
||||
s.executionProofSubscriber,
|
||||
nse,
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition/interop"
|
||||
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
@@ -23,7 +22,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
@@ -71,49 +69,12 @@ func (s *Service) beaconBlockSubscriber(ctx context.Context, msg proto.Message)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// We use the service context to ensure this context is not cancelled
|
||||
// when the current function returns.
|
||||
// TODO: Do not broadcast proofs for blocks we have already seen.
|
||||
go s.generateAndBroadcastExecutionProofs(s.ctx, roBlock)
|
||||
|
||||
if err := s.processPendingAttsForBlock(ctx, root); err != nil {
|
||||
return errors.Wrap(err, "process pending atts for block")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) generateAndBroadcastExecutionProofs(ctx context.Context, roBlock blocks.ROBlock) {
|
||||
const delay = 2 * time.Second
|
||||
proofTypes := flags.Get().ProofGenerationTypes
|
||||
|
||||
if len(proofTypes) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
var wg errgroup.Group
|
||||
for _, proofType := range proofTypes {
|
||||
wg.Go(func() error {
|
||||
execProof, err := generateExecProof(roBlock, primitives.ExecutionProofId(proofType), delay)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("generate exec proof: %w", err)
|
||||
}
|
||||
|
||||
if err := s.cfg.p2p.Broadcast(ctx, execProof); err != nil {
|
||||
return fmt.Errorf("broadcast exec proof: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := wg.Wait(); err != nil {
|
||||
log.WithError(err).Error("Failed to generate and broadcast execution proofs")
|
||||
}
|
||||
}
|
||||
|
||||
// processSidecarsFromExecutionFromBlock retrieves (if available) sidecars data from the execution client,
|
||||
// builds corresponding sidecars, save them to the storage, and broadcasts them over P2P if necessary.
|
||||
func (s *Service) processSidecarsFromExecutionFromBlock(ctx context.Context, roBlock blocks.ROBlock) {
|
||||
@@ -228,30 +189,12 @@ func (s *Service) processDataColumnSidecarsFromExecution(ctx context.Context, so
|
||||
ctx, cancel := context.WithTimeout(ctx, secondsPerHalfSlot)
|
||||
defer cancel()
|
||||
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", source.Root()),
|
||||
"slot": source.Slot(),
|
||||
"proposerIndex": source.ProposerIndex(),
|
||||
"type": source.Type(),
|
||||
})
|
||||
|
||||
var constructedSidecarCount uint64
|
||||
for iteration := uint64(0); ; /*no stop condition*/ iteration++ {
|
||||
log = log.WithField("iteration", iteration)
|
||||
|
||||
// Exit early if all sidecars to sample have been seen.
|
||||
if s.haveAllSidecarsBeenSeen(source.Slot(), source.ProposerIndex(), columnIndicesToSample) {
|
||||
if iteration > 0 && constructedSidecarCount == 0 {
|
||||
log.Debug("No data column sidecars constructed from the execution client")
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if iteration == 0 {
|
||||
dataColumnsRecoveredFromELAttempts.Inc()
|
||||
}
|
||||
|
||||
// Try to reconstruct data column constructedSidecars from the execution client.
|
||||
constructedSidecars, err := s.cfg.executionReconstructor.ConstructDataColumnSidecars(ctx, source)
|
||||
if err != nil {
|
||||
@@ -259,8 +202,8 @@ func (s *Service) processDataColumnSidecarsFromExecution(ctx context.Context, so
|
||||
}
|
||||
|
||||
// No sidecars are retrieved from the EL, retry later
|
||||
constructedSidecarCount = uint64(len(constructedSidecars))
|
||||
if constructedSidecarCount == 0 {
|
||||
sidecarCount := uint64(len(constructedSidecars))
|
||||
if sidecarCount == 0 {
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
@@ -269,11 +212,9 @@ func (s *Service) processDataColumnSidecarsFromExecution(ctx context.Context, so
|
||||
continue
|
||||
}
|
||||
|
||||
dataColumnsRecoveredFromELTotal.Inc()
|
||||
|
||||
// Boundary check.
|
||||
if constructedSidecarCount != fieldparams.NumberOfColumns {
|
||||
return nil, errors.Errorf("reconstruct data column sidecars returned %d sidecars, expected %d - should never happen", constructedSidecarCount, fieldparams.NumberOfColumns)
|
||||
if sidecarCount != fieldparams.NumberOfColumns {
|
||||
return nil, errors.Errorf("reconstruct data column sidecars returned %d sidecars, expected %d - should never happen", sidecarCount, fieldparams.NumberOfColumns)
|
||||
}
|
||||
|
||||
unseenIndices, err := s.broadcastAndReceiveUnseenDataColumnSidecars(ctx, source.Slot(), source.ProposerIndex(), columnIndicesToSample, constructedSidecars)
|
||||
@@ -281,12 +222,19 @@ func (s *Service) processDataColumnSidecarsFromExecution(ctx context.Context, so
|
||||
return nil, errors.Wrap(err, "broadcast and receive unseen data column sidecars")
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"count": len(unseenIndices),
|
||||
"indices": helpers.SortedPrettySliceFromMap(unseenIndices),
|
||||
}).Debug("Constructed data column sidecars from the execution client")
|
||||
if len(unseenIndices) > 0 {
|
||||
dataColumnsRecoveredFromELTotal.Inc()
|
||||
|
||||
dataColumnSidecarsObtainedViaELCount.Observe(float64(len(unseenIndices)))
|
||||
log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", source.Root()),
|
||||
"slot": source.Slot(),
|
||||
"proposerIndex": source.ProposerIndex(),
|
||||
"iteration": iteration,
|
||||
"type": source.Type(),
|
||||
"count": len(unseenIndices),
|
||||
"indices": helpers.SortedPrettySliceFromMap(unseenIndices),
|
||||
}).Debug("Constructed data column sidecars from the execution client")
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -1,31 +0,0 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed"
|
||||
opfeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/operation"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func (s *Service) executionProofSubscriber(_ context.Context, msg proto.Message) error {
|
||||
executionProof, ok := msg.(*ethpb.ExecutionProof)
|
||||
if !ok {
|
||||
return errors.Errorf("incorrect type of message received, wanted %T but got %T", ðpb.ExecutionProof{}, msg)
|
||||
}
|
||||
|
||||
// Insert the execution proof into the pool
|
||||
s.cfg.execProofPool.Insert(executionProof)
|
||||
|
||||
// Notify subscribers about the new execution proof
|
||||
s.cfg.operationNotifier.OperationFeed().Send(&feed.Event{
|
||||
Type: opfeed.ExecutionProofReceived,
|
||||
Data: &opfeed.ExecutionProofReceivedData{
|
||||
ExecutionProof: executionProof,
|
||||
},
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -3,12 +3,11 @@ package sync
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/operation"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/verification"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
@@ -52,12 +51,14 @@ func (s *Service) validateDataColumn(ctx context.Context, pid peer.ID, msg *pubs
|
||||
// Decode the message, reject if it fails.
|
||||
m, err := s.decodePubsubMessage(msg)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to decode message")
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
// Reject messages that are not of the expected type.
|
||||
dcsc, ok := m.(*eth.DataColumnSidecar)
|
||||
if !ok {
|
||||
log.WithField("message", m).Error("Message is not of type *eth.DataColumnSidecar")
|
||||
return pubsub.ValidationReject, errWrongMessage
|
||||
}
|
||||
|
||||
@@ -193,13 +194,19 @@ func (s *Service) validateDataColumn(ctx context.Context, pid peer.ID, msg *pubs
|
||||
dataColumnSidecarArrivalGossipSummary.Observe(float64(sinceSlotStartTime.Milliseconds()))
|
||||
dataColumnSidecarVerificationGossipHistogram.Observe(float64(validationTime.Milliseconds()))
|
||||
|
||||
peerGossipScore := s.cfg.p2p.Peers().Scorers().GossipScorer().Score(pid)
|
||||
|
||||
select {
|
||||
case s.dataColumnLogCh <- dataColumnLogEntry{
|
||||
slot: roDataColumn.Slot(),
|
||||
index: roDataColumn.Index,
|
||||
root: roDataColumn.BlockRoot(),
|
||||
validationTime: validationTime,
|
||||
sinceStartTime: sinceSlotStartTime,
|
||||
Slot: roDataColumn.Slot(),
|
||||
ColIdx: roDataColumn.Index,
|
||||
PropIdx: roDataColumn.ProposerIndex(),
|
||||
BlockRoot: roDataColumn.BlockRoot(),
|
||||
ParentRoot: roDataColumn.ParentRoot(),
|
||||
PeerSuffix: pid.String()[len(pid.String())-6:],
|
||||
PeerGossipScore: peerGossipScore,
|
||||
validationTime: validationTime,
|
||||
sinceStartTime: sinceSlotStartTime,
|
||||
}:
|
||||
default:
|
||||
log.WithField("slot", roDataColumn.Slot()).Warn("Failed to send data column log entry")
|
||||
@@ -244,69 +251,68 @@ func computeCacheKey(slot primitives.Slot, proposerIndex primitives.ValidatorInd
|
||||
}
|
||||
|
||||
type dataColumnLogEntry struct {
|
||||
slot primitives.Slot
|
||||
index uint64
|
||||
root [32]byte
|
||||
validationTime time.Duration
|
||||
sinceStartTime time.Duration
|
||||
Slot primitives.Slot
|
||||
ColIdx uint64
|
||||
PropIdx primitives.ValidatorIndex
|
||||
BlockRoot [32]byte
|
||||
ParentRoot [32]byte
|
||||
PeerSuffix string
|
||||
PeerGossipScore float64
|
||||
validationTime time.Duration
|
||||
sinceStartTime time.Duration
|
||||
}
|
||||
|
||||
func (s *Service) processDataColumnLogs() {
|
||||
ticker := time.NewTicker(1 * time.Second)
|
||||
slotStats := make(map[[fieldparams.RootLength]byte][]dataColumnLogEntry)
|
||||
defer ticker.Stop()
|
||||
|
||||
slotStats := make(map[primitives.Slot][fieldparams.NumberOfColumns]dataColumnLogEntry)
|
||||
|
||||
for {
|
||||
select {
|
||||
case col := <-s.dataColumnLogCh:
|
||||
cols := slotStats[col.root]
|
||||
cols = append(cols, col)
|
||||
slotStats[col.root] = cols
|
||||
case entry := <-s.dataColumnLogCh:
|
||||
cols := slotStats[entry.Slot]
|
||||
cols[entry.ColIdx] = entry
|
||||
slotStats[entry.Slot] = cols
|
||||
case <-ticker.C:
|
||||
for root, columns := range slotStats {
|
||||
indices := make([]uint64, 0, fieldparams.NumberOfColumns)
|
||||
minValidationTime, maxValidationTime, sumValidationTime := time.Duration(0), time.Duration(0), time.Duration(0)
|
||||
minSinceStartTime, maxSinceStartTime, sumSinceStartTime := time.Duration(0), time.Duration(0), time.Duration(0)
|
||||
for slot, columns := range slotStats {
|
||||
var (
|
||||
colIndices = make([]uint64, 0, fieldparams.NumberOfColumns)
|
||||
peers = make([]string, 0, fieldparams.NumberOfColumns)
|
||||
gossipScores = make([]float64, 0, fieldparams.NumberOfColumns)
|
||||
validationTimes = make([]string, 0, fieldparams.NumberOfColumns)
|
||||
sinceStartTimes = make([]string, 0, fieldparams.NumberOfColumns)
|
||||
)
|
||||
|
||||
totalReceived := 0
|
||||
for _, column := range columns {
|
||||
indices = append(indices, column.index)
|
||||
|
||||
sumValidationTime += column.validationTime
|
||||
sumSinceStartTime += column.sinceStartTime
|
||||
|
||||
if totalReceived == 0 {
|
||||
minValidationTime, maxValidationTime = column.validationTime, column.validationTime
|
||||
minSinceStartTime, maxSinceStartTime = column.sinceStartTime, column.sinceStartTime
|
||||
totalReceived++
|
||||
for _, entry := range columns {
|
||||
if entry.PeerSuffix == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
minValidationTime, maxValidationTime = min(minValidationTime, column.validationTime), max(maxValidationTime, column.validationTime)
|
||||
minSinceStartTime, maxSinceStartTime = min(minSinceStartTime, column.sinceStartTime), max(maxSinceStartTime, column.sinceStartTime)
|
||||
colIndices = append(colIndices, entry.ColIdx)
|
||||
peers = append(peers, entry.PeerSuffix)
|
||||
gossipScores = append(gossipScores, roundFloat(entry.PeerGossipScore, 2))
|
||||
validationTimes = append(validationTimes, fmt.Sprintf("%.2fms", float64(entry.validationTime.Milliseconds())))
|
||||
sinceStartTimes = append(sinceStartTimes, fmt.Sprintf("%.2fms", float64(entry.sinceStartTime.Milliseconds())))
|
||||
totalReceived++
|
||||
}
|
||||
|
||||
if totalReceived > 0 {
|
||||
slices.Sort(indices)
|
||||
avgValidationTime := sumValidationTime / time.Duration(totalReceived)
|
||||
avgSinceStartTime := sumSinceStartTime / time.Duration(totalReceived)
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": columns[0].slot,
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"count": totalReceived,
|
||||
"indices": helpers.PrettySlice(indices),
|
||||
"validationTime": prettyMinMaxAverage(minValidationTime, maxValidationTime, avgValidationTime),
|
||||
"sinceStartTime": prettyMinMaxAverage(minSinceStartTime, maxSinceStartTime, avgSinceStartTime),
|
||||
}).Debug("Accepted data column sidecars summary")
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": slot,
|
||||
"receivedCount": totalReceived,
|
||||
"columnIndices": colIndices,
|
||||
"peers": peers,
|
||||
"gossipScores": gossipScores,
|
||||
"validationTimes": validationTimes,
|
||||
"sinceStartTimes": sinceStartTimes,
|
||||
}).Debug("Accepted data column sidecars summary")
|
||||
}
|
||||
|
||||
slotStats = make(map[[fieldparams.RootLength]byte][]dataColumnLogEntry)
|
||||
slotStats = make(map[primitives.Slot][fieldparams.NumberOfColumns]dataColumnLogEntry)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func prettyMinMaxAverage(min, max, average time.Duration) string {
|
||||
return fmt.Sprintf("[min: %v, avg: %v, max: %v]", min, average, max)
|
||||
func roundFloat(f float64, decimals int) float64 {
|
||||
mult := math.Pow(10, float64(decimals))
|
||||
return math.Round(f*mult) / mult
|
||||
}
|
||||
|
||||
@@ -1,132 +0,0 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
func (s *Service) validateExecutionProof(ctx context.Context, pid peer.ID, msg *pubsub.Message) (pubsub.ValidationResult, error) {
|
||||
// Always accept messages our own messages.
|
||||
if pid == s.cfg.p2p.PeerID() {
|
||||
return pubsub.ValidationAccept, nil
|
||||
}
|
||||
|
||||
// Ignore messages during initial sync.
|
||||
if s.cfg.initialSync.Syncing() {
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
|
||||
// Reject messages with a nil topic.
|
||||
if msg.Topic == nil {
|
||||
return pubsub.ValidationReject, p2p.ErrInvalidTopic
|
||||
}
|
||||
|
||||
// Decode the message, reject if it fails.
|
||||
m, err := s.decodePubsubMessage(msg)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to decode message")
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
// Reject messages that are not of the expected type.
|
||||
executionProof, ok := m.(*ethpb.ExecutionProof)
|
||||
if !ok {
|
||||
log.WithField("message", m).Error("Message is not of type *ethpb.ExecutionProof")
|
||||
return pubsub.ValidationReject, errWrongMessage
|
||||
}
|
||||
|
||||
// 1. Verify proof is not from the future
|
||||
if err := s.proofNotFromFutureSlot(executionProof); err != nil {
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
// 2. Verify proof slot is greater than finalized slot
|
||||
if err := s.proofAboveFinalizedSlot(ctx, executionProof); err != nil {
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
// 3. Check if the proof is already in the DA checker cache (execution proof pool)
|
||||
// If it exists in the cache, we know it has already passed validation.
|
||||
blockRoot := bytesutil.ToBytes32(executionProof.BlockRoot)
|
||||
if s.isProofCachedInPool(blockRoot, executionProof.ProofId) {
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
|
||||
// 4. Verify proof size limits
|
||||
if uint64(len(executionProof.ProofData)) > params.BeaconConfig().MaxProofDataBytes {
|
||||
return pubsub.ValidationReject, fmt.Errorf("execution proof data size %d exceeds maximum allowed %d", len(executionProof.ProofData), params.BeaconConfig().MaxProofDataBytes)
|
||||
}
|
||||
|
||||
// 5. Run zkVM proof verification
|
||||
if err := s.verifyExecutionProof(executionProof); err != nil {
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
// Validation successful, return accept
|
||||
return pubsub.ValidationAccept, nil
|
||||
}
|
||||
|
||||
// TODO: Do we need encapsulation for all those verification functions?
|
||||
|
||||
// proofNotFromFutureSlot checks whether the execution proof is from a future slot.
|
||||
func (s *Service) proofNotFromFutureSlot(executionProof *ethpb.ExecutionProof) error {
|
||||
currentSlot := s.cfg.clock.CurrentSlot()
|
||||
proofSlot := executionProof.Slot
|
||||
|
||||
if currentSlot == proofSlot {
|
||||
return nil
|
||||
}
|
||||
|
||||
earliestStart, err := s.cfg.clock.SlotStart(proofSlot)
|
||||
if err != nil {
|
||||
// TODO: Should we penalize the peer for this?
|
||||
return fmt.Errorf("failed to compute start time for proof slot %d: %w", proofSlot, err)
|
||||
}
|
||||
|
||||
earliestStart = earliestStart.Add(-1 * params.BeaconConfig().MaximumGossipClockDisparityDuration())
|
||||
// If the system time is still before earliestStart, we consider the proof from a future slot and return an error.
|
||||
if s.cfg.clock.Now().Before(earliestStart) {
|
||||
return fmt.Errorf("slot %d is too far in the future (current slot: %d)", proofSlot, currentSlot)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// proofAboveFinalizedSlot checks whether the execution proof's slot is after the finalized slot.
|
||||
func (s *Service) proofAboveFinalizedSlot(ctx context.Context, executionProof *ethpb.ExecutionProof) error {
|
||||
finalizedCheckpoint, err := s.cfg.beaconDB.FinalizedCheckpoint(ctx)
|
||||
if err != nil {
|
||||
// TODO: Should we penalize the peer for this?
|
||||
return fmt.Errorf("failed to get finalized checkpoint: %w", err)
|
||||
}
|
||||
|
||||
fSlot, err := slots.EpochStart(finalizedCheckpoint.Epoch)
|
||||
if err != nil {
|
||||
// TODO: Should we penalize the peer for this?
|
||||
return fmt.Errorf("failed to compute start slot for finalized epoch %d: %w", finalizedCheckpoint.Epoch, err)
|
||||
}
|
||||
|
||||
if executionProof.Slot <= fSlot {
|
||||
return fmt.Errorf("execution proof slot %d is not after finalized slot %d", executionProof.Slot, fSlot)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// isProofCachedInPool checks if the execution proof is already present in the pool.
|
||||
func (s *Service) isProofCachedInPool(blockRoot [32]byte, proofId primitives.ExecutionProofId) bool {
|
||||
return s.cfg.execProofPool.Exists(blockRoot, proofId)
|
||||
}
|
||||
|
||||
// verifyExecutionProof performs the actual verification of the execution proof.
|
||||
func (s *Service) verifyExecutionProof(_ *ethpb.ExecutionProof) error {
|
||||
// For now, say all proof are valid.
|
||||
return nil
|
||||
}
|
||||
@@ -1,408 +0,0 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
mock "github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/testing"
|
||||
testingdb "github.com/OffchainLabs/prysm/v7/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/OffchainLabs/prysm/v7/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/execproofs"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/encoder"
|
||||
mockp2p "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stategen"
|
||||
mockSync "github.com/OffchainLabs/prysm/v7/beacon-chain/sync/initial-sync/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
pubsubpb "github.com/libp2p/go-libp2p-pubsub/pb"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
func TestValidateExecutionProof(t *testing.T) {
|
||||
beaconDB := testingdb.SetupDB(t)
|
||||
p2pService := mockp2p.NewTestP2P(t)
|
||||
ctx := context.Background()
|
||||
|
||||
fcp := ðpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
}
|
||||
|
||||
require.NoError(t, beaconDB.SaveStateSummary(ctx, ðpb.StateSummary{
|
||||
Root: params.BeaconConfig().ZeroHash[:],
|
||||
Slot: 0,
|
||||
}))
|
||||
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, fcp))
|
||||
|
||||
defaultTopic := p2p.ExecutionProofSubnetTopicFormat + "/" + encoder.ProtocolSuffixSSZSnappy
|
||||
fakeDigest := []byte{0xAB, 0x00, 0xCC, 0x9E}
|
||||
|
||||
chainService := &mock.ChainService{
|
||||
Genesis: time.Now(),
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
FinalizedCheckPoint: fcp,
|
||||
}
|
||||
|
||||
currentSlot := primitives.Slot(100)
|
||||
genesisTime := time.Now().Add(-time.Duration(uint64(currentSlot)*params.BeaconConfig().SecondsPerSlot) * time.Second)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
setupService func() *Service
|
||||
proof *ethpb.ExecutionProof
|
||||
topic *string
|
||||
pid peer.ID
|
||||
want pubsub.ValidationResult
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "Ignore when syncing",
|
||||
setupService: func() *Service {
|
||||
s := &Service{
|
||||
cfg: &config{
|
||||
p2p: p2pService,
|
||||
initialSync: &mockSync.Sync{IsSyncing: true},
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(genesisTime, [32]byte{'A'}),
|
||||
},
|
||||
}
|
||||
s.initCaches()
|
||||
return s
|
||||
},
|
||||
proof: ðpb.ExecutionProof{
|
||||
Slot: currentSlot,
|
||||
ProofId: primitives.ExecutionProofId(1),
|
||||
BlockRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
ProofData: make([]byte, 100),
|
||||
},
|
||||
topic: func() *string {
|
||||
t := fmt.Sprintf(defaultTopic, fakeDigest)
|
||||
return &t
|
||||
}(),
|
||||
pid: "random-peer",
|
||||
want: pubsub.ValidationIgnore,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "Reject nil topic",
|
||||
setupService: func() *Service {
|
||||
s := &Service{
|
||||
cfg: &config{
|
||||
p2p: p2pService,
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(genesisTime, [32]byte{'A'}),
|
||||
},
|
||||
}
|
||||
s.initCaches()
|
||||
return s
|
||||
},
|
||||
proof: ðpb.ExecutionProof{
|
||||
Slot: currentSlot,
|
||||
ProofId: primitives.ExecutionProofId(1),
|
||||
BlockRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
ProofData: make([]byte, 100),
|
||||
},
|
||||
topic: nil,
|
||||
pid: "random-peer",
|
||||
want: pubsub.ValidationReject,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "Reject proof from future slot",
|
||||
setupService: func() *Service {
|
||||
s := &Service{
|
||||
cfg: &config{
|
||||
p2p: p2pService,
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(genesisTime, [32]byte{'A'}),
|
||||
beaconDB: beaconDB,
|
||||
stateGen: stategen.New(beaconDB, doublylinkedtree.New()),
|
||||
execProofPool: execproofs.NewPool(),
|
||||
},
|
||||
}
|
||||
s.initCaches()
|
||||
return s
|
||||
},
|
||||
proof: ðpb.ExecutionProof{
|
||||
Slot: currentSlot + 1000, // Far future slot
|
||||
ProofId: primitives.ExecutionProofId(1),
|
||||
BlockRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
ProofData: make([]byte, 100),
|
||||
},
|
||||
topic: func() *string {
|
||||
t := fmt.Sprintf(defaultTopic, fakeDigest)
|
||||
return &t
|
||||
}(),
|
||||
pid: "random-peer",
|
||||
want: pubsub.ValidationReject,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "Reject proof below finalized slot",
|
||||
setupService: func() *Service {
|
||||
s := &Service{
|
||||
cfg: &config{
|
||||
p2p: p2pService,
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(genesisTime, [32]byte{'A'}),
|
||||
beaconDB: beaconDB,
|
||||
stateGen: stategen.New(beaconDB, doublylinkedtree.New()),
|
||||
execProofPool: execproofs.NewPool(),
|
||||
},
|
||||
}
|
||||
s.initCaches()
|
||||
return s
|
||||
},
|
||||
proof: ðpb.ExecutionProof{
|
||||
Slot: primitives.Slot(5), // Before finalized epoch 1
|
||||
ProofId: 1,
|
||||
BlockRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
ProofData: make([]byte, 100),
|
||||
},
|
||||
topic: func() *string {
|
||||
t := fmt.Sprintf(defaultTopic, fakeDigest)
|
||||
return &t
|
||||
}(),
|
||||
pid: "random-peer",
|
||||
want: pubsub.ValidationReject,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "Ignore already seen proof",
|
||||
setupService: func() *Service {
|
||||
s := &Service{
|
||||
cfg: &config{
|
||||
p2p: p2pService,
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(genesisTime, [32]byte{'A'}),
|
||||
beaconDB: beaconDB,
|
||||
stateGen: stategen.New(beaconDB, doublylinkedtree.New()),
|
||||
execProofPool: execproofs.NewPool(),
|
||||
},
|
||||
}
|
||||
s.initCaches()
|
||||
return s
|
||||
},
|
||||
proof: ðpb.ExecutionProof{
|
||||
Slot: currentSlot,
|
||||
ProofId: primitives.ExecutionProofId(1),
|
||||
BlockRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
ProofData: make([]byte, 100),
|
||||
},
|
||||
topic: func() *string {
|
||||
t := fmt.Sprintf(defaultTopic, fakeDigest)
|
||||
return &t
|
||||
}(),
|
||||
pid: "random-peer",
|
||||
want: pubsub.ValidationIgnore,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "Ignore proof already in pool",
|
||||
setupService: func() *Service {
|
||||
pool := execproofs.NewPool()
|
||||
pool.Insert(ðpb.ExecutionProof{
|
||||
Slot: currentSlot,
|
||||
ProofId: primitives.ExecutionProofId(1),
|
||||
BlockRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
ProofData: make([]byte, 100),
|
||||
})
|
||||
|
||||
s := &Service{
|
||||
cfg: &config{
|
||||
p2p: p2pService,
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(genesisTime, [32]byte{'A'}),
|
||||
beaconDB: beaconDB,
|
||||
stateGen: stategen.New(beaconDB, doublylinkedtree.New()),
|
||||
execProofPool: pool,
|
||||
},
|
||||
}
|
||||
s.initCaches()
|
||||
return s
|
||||
},
|
||||
proof: ðpb.ExecutionProof{
|
||||
Slot: currentSlot,
|
||||
ProofId: primitives.ExecutionProofId(1),
|
||||
BlockRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
ProofData: make([]byte, 100),
|
||||
},
|
||||
topic: func() *string {
|
||||
t := fmt.Sprintf(defaultTopic, fakeDigest)
|
||||
return &t
|
||||
}(),
|
||||
pid: "random-peer",
|
||||
want: pubsub.ValidationIgnore,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "Reject proof if no verifier found",
|
||||
setupService: func() *Service {
|
||||
s := &Service{
|
||||
cfg: &config{
|
||||
p2p: p2pService,
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(genesisTime, [32]byte{'A'}),
|
||||
beaconDB: beaconDB,
|
||||
stateGen: stategen.New(beaconDB, doublylinkedtree.New()),
|
||||
execProofPool: execproofs.NewPool(),
|
||||
},
|
||||
}
|
||||
s.initCaches()
|
||||
return s
|
||||
},
|
||||
proof: ðpb.ExecutionProof{
|
||||
Slot: currentSlot,
|
||||
ProofId: primitives.ExecutionProofId(1),
|
||||
BlockRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
ProofData: make([]byte, 100),
|
||||
},
|
||||
topic: func() *string {
|
||||
t := fmt.Sprintf(defaultTopic, fakeDigest)
|
||||
return &t
|
||||
}(),
|
||||
pid: "random-peer",
|
||||
want: pubsub.ValidationReject,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "Reject proof if verification fails",
|
||||
setupService: func() *Service {
|
||||
s := &Service{
|
||||
cfg: &config{
|
||||
p2p: p2pService,
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(genesisTime, [32]byte{'A'}),
|
||||
beaconDB: beaconDB,
|
||||
stateGen: stategen.New(beaconDB, doublylinkedtree.New()),
|
||||
execProofPool: execproofs.NewPool(),
|
||||
},
|
||||
}
|
||||
s.initCaches()
|
||||
return s
|
||||
},
|
||||
proof: ðpb.ExecutionProof{
|
||||
Slot: currentSlot,
|
||||
ProofId: primitives.ExecutionProofId(1),
|
||||
BlockRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
ProofData: make([]byte, 100),
|
||||
},
|
||||
topic: func() *string {
|
||||
t := fmt.Sprintf(defaultTopic, fakeDigest)
|
||||
return &t
|
||||
}(),
|
||||
pid: "random-peer",
|
||||
want: pubsub.ValidationReject,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "Accept valid proof",
|
||||
setupService: func() *Service {
|
||||
s := &Service{
|
||||
cfg: &config{
|
||||
p2p: p2pService,
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(genesisTime, [32]byte{'A'}),
|
||||
beaconDB: beaconDB,
|
||||
stateGen: stategen.New(beaconDB, doublylinkedtree.New()),
|
||||
execProofPool: execproofs.NewPool(),
|
||||
},
|
||||
}
|
||||
s.initCaches()
|
||||
return s
|
||||
},
|
||||
proof: ðpb.ExecutionProof{
|
||||
Slot: currentSlot,
|
||||
ProofId: primitives.ExecutionProofId(1),
|
||||
BlockRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
ProofData: make([]byte, 100),
|
||||
},
|
||||
topic: func() *string {
|
||||
t := fmt.Sprintf(defaultTopic, fakeDigest)
|
||||
return &t
|
||||
}(),
|
||||
pid: "random-peer",
|
||||
want: pubsub.ValidationAccept,
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s := tt.setupService()
|
||||
|
||||
// Create pubsub message
|
||||
buf := new(bytes.Buffer)
|
||||
_, err := p2pService.Encoding().EncodeGossip(buf, tt.proof)
|
||||
require.NoError(t, err)
|
||||
|
||||
msg := &pubsub.Message{
|
||||
Message: &pubsubpb.Message{
|
||||
Data: buf.Bytes(),
|
||||
Topic: tt.topic,
|
||||
},
|
||||
}
|
||||
|
||||
// Validate
|
||||
result, err := s.validateExecutionProof(ctx, tt.pid, msg)
|
||||
|
||||
if tt.wantErr {
|
||||
assert.NotNil(t, err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
assert.Equal(t, tt.want, result)
|
||||
|
||||
// If validation accepted, check that ValidatorData is set
|
||||
if result == pubsub.ValidationAccept {
|
||||
assert.NotNil(t, msg.ValidatorData)
|
||||
validatedProof, ok := msg.ValidatorData.(*ethpb.ExecutionProof)
|
||||
assert.Equal(t, true, ok)
|
||||
|
||||
// Check that the validated proof matches the original
|
||||
assert.Equal(t, tt.proof.ProofId, validatedProof.ProofId)
|
||||
assert.Equal(t, tt.proof.Slot, validatedProof.Slot)
|
||||
assert.DeepEqual(t, tt.proof.BlockRoot, validatedProof.BlockRoot)
|
||||
assert.DeepEqual(t, tt.proof.BlockHash, validatedProof.BlockHash)
|
||||
assert.DeepEqual(t, tt.proof.ProofData, validatedProof.ProofData)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type alwaysFailVerifier struct{}
|
||||
|
||||
func (v *alwaysFailVerifier) Verify(proof *ethpb.ExecutionProof) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (v *alwaysFailVerifier) GetProofId() primitives.ExecutionProofId {
|
||||
return primitives.ExecutionProofId(1)
|
||||
}
|
||||
@@ -54,13 +54,11 @@ func TestValidateLightClientOptimisticUpdate(t *testing.T) {
|
||||
cfg.CapellaForkEpoch = 3
|
||||
cfg.DenebForkEpoch = 4
|
||||
cfg.ElectraForkEpoch = 5
|
||||
cfg.FuluForkEpoch = 6
|
||||
cfg.ForkVersionSchedule[[4]byte{1, 0, 0, 0}] = 1
|
||||
cfg.ForkVersionSchedule[[4]byte{2, 0, 0, 0}] = 2
|
||||
cfg.ForkVersionSchedule[[4]byte{3, 0, 0, 0}] = 3
|
||||
cfg.ForkVersionSchedule[[4]byte{4, 0, 0, 0}] = 4
|
||||
cfg.ForkVersionSchedule[[4]byte{5, 0, 0, 0}] = 5
|
||||
cfg.ForkVersionSchedule[[4]byte{6, 0, 0, 0}] = 6
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
secondsPerSlot := int(params.BeaconConfig().SecondsPerSlot)
|
||||
@@ -103,10 +101,7 @@ func TestValidateLightClientOptimisticUpdate(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
for v := range version.All() {
|
||||
if v == version.Phase0 {
|
||||
continue
|
||||
}
|
||||
for v := 1; v < 6; v++ {
|
||||
t.Run(test.name+"_"+version.String(v), func(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
@@ -185,13 +180,11 @@ func TestValidateLightClientFinalityUpdate(t *testing.T) {
|
||||
cfg.CapellaForkEpoch = 3
|
||||
cfg.DenebForkEpoch = 4
|
||||
cfg.ElectraForkEpoch = 5
|
||||
cfg.FuluForkEpoch = 6
|
||||
cfg.ForkVersionSchedule[[4]byte{1, 0, 0, 0}] = 1
|
||||
cfg.ForkVersionSchedule[[4]byte{2, 0, 0, 0}] = 2
|
||||
cfg.ForkVersionSchedule[[4]byte{3, 0, 0, 0}] = 3
|
||||
cfg.ForkVersionSchedule[[4]byte{4, 0, 0, 0}] = 4
|
||||
cfg.ForkVersionSchedule[[4]byte{5, 0, 0, 0}] = 5
|
||||
cfg.ForkVersionSchedule[[4]byte{6, 0, 0, 0}] = 6
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
secondsPerSlot := int(params.BeaconConfig().SecondsPerSlot)
|
||||
@@ -234,10 +227,7 @@ func TestValidateLightClientFinalityUpdate(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
for v := range version.All() {
|
||||
if v == version.Phase0 {
|
||||
continue
|
||||
}
|
||||
for v := 1; v < 6; v++ {
|
||||
t.Run(test.name+"_"+version.String(v), func(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
|
||||
@@ -687,12 +687,6 @@ func sbrNotFound(t *testing.T, expectedRoot [32]byte) *mockStateByRooter {
|
||||
}}
|
||||
}
|
||||
|
||||
func sbrReturnsState(st state.BeaconState) *mockStateByRooter {
|
||||
return &mockStateByRooter{sbr: func(_ context.Context, _ [32]byte) (state.BeaconState, error) {
|
||||
return st, nil
|
||||
}}
|
||||
}
|
||||
|
||||
func sbrForValOverride(idx primitives.ValidatorIndex, val *ethpb.Validator) *mockStateByRooter {
|
||||
return sbrForValOverrideWithT(nil, idx, val)
|
||||
}
|
||||
|
||||
@@ -11,10 +11,12 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v7/beacon-chain/forkchoice/types"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/logging"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
@@ -359,7 +361,7 @@ func (dv *RODataColumnsVerifier) SidecarParentSeen(parentSeen func([fieldparams.
|
||||
}
|
||||
|
||||
if !dv.fc.HasNode(parentRoot) {
|
||||
return columnErrBuilder(errors.Wrapf(errSidecarParentNotSeen, "parent root: %#x", parentRoot))
|
||||
return columnErrBuilder(errSidecarParentNotSeen)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -482,19 +484,88 @@ func (dv *RODataColumnsVerifier) SidecarProposerExpected(ctx context.Context) (e
|
||||
|
||||
defer dv.recordResult(RequireSidecarProposerExpected, &err)
|
||||
|
||||
for _, dataColumn := range dv.dataColumns {
|
||||
dataColumnSlot := dataColumn.Slot()
|
||||
type slotParentRoot struct {
|
||||
slot primitives.Slot
|
||||
parentRoot [fieldparams.RootLength]byte
|
||||
}
|
||||
|
||||
// Get the verifying state, it is guaranteed to have the correct proposer in the lookahead.
|
||||
verifyingState, err := dv.getVerifyingState(ctx, dataColumn)
|
||||
if err != nil {
|
||||
return columnErrBuilder(errors.Wrap(err, "verifying state"))
|
||||
targetRootBySlotParentRoot := make(map[slotParentRoot][fieldparams.RootLength]byte)
|
||||
|
||||
var targetRootFromCache = func(slot primitives.Slot, parentRoot [fieldparams.RootLength]byte) ([fieldparams.RootLength]byte, error) {
|
||||
// Use cached values if available.
|
||||
slotParentRoot := slotParentRoot{slot: slot, parentRoot: parentRoot}
|
||||
if root, ok := targetRootBySlotParentRoot[slotParentRoot]; ok {
|
||||
return root, nil
|
||||
}
|
||||
|
||||
// Use proposer lookahead directly
|
||||
idx, err := helpers.BeaconProposerIndexAtSlot(ctx, verifyingState, dataColumnSlot)
|
||||
// Compute the epoch of the data column slot.
|
||||
dataColumnEpoch := slots.ToEpoch(slot)
|
||||
if dataColumnEpoch > 0 {
|
||||
dataColumnEpoch = dataColumnEpoch - 1
|
||||
}
|
||||
|
||||
// Compute the target root for the epoch.
|
||||
targetRoot, err := dv.fc.TargetRootForEpoch(parentRoot, dataColumnEpoch)
|
||||
if err != nil {
|
||||
return columnErrBuilder(errors.Wrap(err, "proposer from lookahead"))
|
||||
return [fieldparams.RootLength]byte{}, columnErrBuilder(errors.Wrap(err, "target root from epoch"))
|
||||
}
|
||||
|
||||
// Store the target root in the cache.
|
||||
targetRootBySlotParentRoot[slotParentRoot] = targetRoot
|
||||
|
||||
return targetRoot, nil
|
||||
}
|
||||
|
||||
for _, dataColumn := range dv.dataColumns {
|
||||
// Extract the slot of the data column.
|
||||
dataColumnSlot := dataColumn.Slot()
|
||||
|
||||
// Extract the root of the parent block corresponding to the data column.
|
||||
parentRoot := dataColumn.ParentRoot()
|
||||
|
||||
// Compute the target root for the data column.
|
||||
targetRoot, err := targetRootFromCache(dataColumnSlot, parentRoot)
|
||||
if err != nil {
|
||||
return columnErrBuilder(errors.Wrap(err, "target root"))
|
||||
}
|
||||
|
||||
// Compute the epoch of the data column slot.
|
||||
dataColumnEpoch := slots.ToEpoch(dataColumnSlot)
|
||||
if dataColumnEpoch > 0 {
|
||||
dataColumnEpoch = dataColumnEpoch - 1
|
||||
}
|
||||
|
||||
// Create a checkpoint for the target root.
|
||||
checkpoint := &forkchoicetypes.Checkpoint{Root: targetRoot, Epoch: dataColumnEpoch}
|
||||
|
||||
// Try to extract the proposer index from the data column in the cache.
|
||||
idx, cached := dv.pc.Proposer(checkpoint, dataColumnSlot)
|
||||
|
||||
if !cached {
|
||||
parentRoot := dataColumn.ParentRoot()
|
||||
// Ensure the expensive index computation is only performed once for
|
||||
// concurrent requests for the same signature data.
|
||||
idxAny, err, _ := dv.sg.Do(concatRootSlot(parentRoot, dataColumnSlot), func() (any, error) {
|
||||
verifyingState, err := dv.getVerifyingState(ctx, dataColumn)
|
||||
if err != nil {
|
||||
return nil, columnErrBuilder(errors.Wrap(err, "verifying state"))
|
||||
}
|
||||
|
||||
idx, err = helpers.BeaconProposerIndexAtSlot(ctx, verifyingState, dataColumnSlot)
|
||||
if err != nil {
|
||||
return nil, columnErrBuilder(errors.Wrap(err, "compute proposer"))
|
||||
}
|
||||
|
||||
return idx, nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var ok bool
|
||||
if idx, ok = idxAny.(primitives.ValidatorIndex); !ok {
|
||||
return columnErrBuilder(errors.New("type assertion to ValidatorIndex failed"))
|
||||
}
|
||||
}
|
||||
|
||||
if idx != dataColumn.ProposerIndex() {
|
||||
@@ -555,3 +626,7 @@ func inclusionProofKey(c blocks.RODataColumn) ([32]byte, error) {
|
||||
|
||||
return sha256.Sum256(unhashedKey), nil
|
||||
}
|
||||
|
||||
func concatRootSlot(root [fieldparams.RootLength]byte, slot primitives.Slot) string {
|
||||
return string(root[:]) + fmt.Sprintf("%d", slot)
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package verification
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -794,90 +795,87 @@ func TestDataColumnsSidecarProposerExpected(t *testing.T) {
|
||||
blobCount = 1
|
||||
)
|
||||
|
||||
ctx := t.Context()
|
||||
parentRoot := [fieldparams.RootLength]byte{}
|
||||
|
||||
// Create a Fulu state to get the expected proposer from the lookahead.
|
||||
fuluState, _ := util.DeterministicGenesisStateFulu(t, 32)
|
||||
expectedProposer, err := fuluState.ProposerLookahead()
|
||||
require.NoError(t, err)
|
||||
expectedProposerIdx := primitives.ValidatorIndex(expectedProposer[columnSlot])
|
||||
|
||||
// Generate data columns with the expected proposer index.
|
||||
matchingColumns := generateTestDataColumnsWithProposer(t, parentRoot, columnSlot, blobCount, expectedProposerIdx)
|
||||
// Generate data columns with wrong proposer index.
|
||||
wrongColumns := generateTestDataColumnsWithProposer(t, parentRoot, columnSlot, blobCount, expectedProposerIdx+1)
|
||||
|
||||
t.Run("Proposer matches", func(t *testing.T) {
|
||||
initializer := Initializer{
|
||||
shared: &sharedResources{
|
||||
sr: sbrReturnsState(fuluState),
|
||||
hsp: &mockHeadStateProvider{
|
||||
headRoot: parentRoot[:],
|
||||
headSlot: columnSlot, // Same epoch so HeadStateReadOnly is used
|
||||
headStateReadOnly: fuluState,
|
||||
},
|
||||
fc: &mockForkchoicer{},
|
||||
columns := GenerateTestDataColumns(t, parentRoot, columnSlot, blobCount)
|
||||
firstColumn := columns[0]
|
||||
ctx := t.Context()
|
||||
testCases := []struct {
|
||||
name string
|
||||
stateByRooter StateByRooter
|
||||
proposerCache proposerCache
|
||||
columns []blocks.RODataColumn
|
||||
error string
|
||||
}{
|
||||
{
|
||||
name: "Cached, matches",
|
||||
stateByRooter: nil,
|
||||
proposerCache: &mockProposerCache{
|
||||
ProposerCB: pcReturnsIdx(firstColumn.ProposerIndex()),
|
||||
},
|
||||
}
|
||||
|
||||
verifier := initializer.NewDataColumnsVerifier(matchingColumns, GossipDataColumnSidecarRequirements)
|
||||
err := verifier.SidecarProposerExpected(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, verifier.results.executed(RequireSidecarProposerExpected))
|
||||
require.NoError(t, verifier.results.result(RequireSidecarProposerExpected))
|
||||
})
|
||||
|
||||
t.Run("Proposer does not match", func(t *testing.T) {
|
||||
initializer := Initializer{
|
||||
shared: &sharedResources{
|
||||
sr: sbrReturnsState(fuluState),
|
||||
hsp: &mockHeadStateProvider{
|
||||
headRoot: parentRoot[:],
|
||||
headSlot: columnSlot, // Same epoch so HeadStateReadOnly is used
|
||||
headStateReadOnly: fuluState,
|
||||
},
|
||||
fc: &mockForkchoicer{},
|
||||
columns: columns,
|
||||
},
|
||||
{
|
||||
name: "Cached, does not match",
|
||||
stateByRooter: nil,
|
||||
proposerCache: &mockProposerCache{
|
||||
ProposerCB: pcReturnsIdx(firstColumn.ProposerIndex() + 1),
|
||||
},
|
||||
}
|
||||
|
||||
verifier := initializer.NewDataColumnsVerifier(wrongColumns, GossipDataColumnSidecarRequirements)
|
||||
err := verifier.SidecarProposerExpected(ctx)
|
||||
require.ErrorContains(t, errSidecarUnexpectedProposer.Error(), err)
|
||||
require.Equal(t, true, verifier.results.executed(RequireSidecarProposerExpected))
|
||||
require.NotNil(t, verifier.results.result(RequireSidecarProposerExpected))
|
||||
})
|
||||
|
||||
t.Run("State lookup failure", func(t *testing.T) {
|
||||
columns := GenerateTestDataColumns(t, parentRoot, columnSlot, blobCount)
|
||||
initializer := Initializer{
|
||||
shared: &sharedResources{
|
||||
sr: sbrNotFound(t, columns[0].ParentRoot()),
|
||||
hsp: &mockHeadStateProvider{},
|
||||
fc: &mockForkchoicer{},
|
||||
columns: columns,
|
||||
error: errSidecarUnexpectedProposer.Error(),
|
||||
},
|
||||
{
|
||||
name: "Not cached, state lookup failure",
|
||||
stateByRooter: sbrNotFound(t, firstColumn.ParentRoot()),
|
||||
proposerCache: &mockProposerCache{
|
||||
ProposerCB: pcReturnsNotFound(),
|
||||
},
|
||||
}
|
||||
|
||||
verifier := initializer.NewDataColumnsVerifier(columns, GossipDataColumnSidecarRequirements)
|
||||
err := verifier.SidecarProposerExpected(ctx)
|
||||
require.ErrorContains(t, "verifying state", err)
|
||||
require.Equal(t, true, verifier.results.executed(RequireSidecarProposerExpected))
|
||||
require.NotNil(t, verifier.results.result(RequireSidecarProposerExpected))
|
||||
})
|
||||
}
|
||||
|
||||
func generateTestDataColumnsWithProposer(t *testing.T, parent [fieldparams.RootLength]byte, slot primitives.Slot, blobCount int, proposer primitives.ValidatorIndex) []blocks.RODataColumn {
|
||||
roBlock, roBlobs := util.GenerateTestDenebBlockWithSidecar(t, parent, slot, blobCount, util.WithProposer(proposer))
|
||||
blobs := make([]kzg.Blob, 0, len(roBlobs))
|
||||
for i := range roBlobs {
|
||||
blobs = append(blobs, kzg.Blob(roBlobs[i].Blob))
|
||||
columns: columns,
|
||||
error: "verifying state",
|
||||
},
|
||||
}
|
||||
|
||||
cellsPerBlob, proofsPerBlob := util.GenerateCellsAndProofs(t, blobs)
|
||||
roDataColumnSidecars, err := peerdas.DataColumnSidecars(cellsPerBlob, proofsPerBlob, peerdas.PopulateFromBlock(roBlock))
|
||||
require.NoError(t, err)
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
initializer := Initializer{
|
||||
shared: &sharedResources{
|
||||
sr: tc.stateByRooter,
|
||||
pc: tc.proposerCache,
|
||||
hsp: &mockHeadStateProvider{},
|
||||
fc: &mockForkchoicer{
|
||||
TargetRootForEpochCB: fcReturnsTargetRoot([fieldparams.RootLength]byte{}),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return roDataColumnSidecars
|
||||
verifier := initializer.NewDataColumnsVerifier(tc.columns, GossipDataColumnSidecarRequirements)
|
||||
var wg sync.WaitGroup
|
||||
|
||||
var err1, err2 error
|
||||
wg.Go(func() {
|
||||
err1 = verifier.SidecarProposerExpected(ctx)
|
||||
})
|
||||
wg.Go(func() {
|
||||
err2 = verifier.SidecarProposerExpected(ctx)
|
||||
})
|
||||
wg.Wait()
|
||||
|
||||
require.Equal(t, true, verifier.results.executed(RequireSidecarProposerExpected))
|
||||
|
||||
if len(tc.error) > 0 {
|
||||
require.ErrorContains(t, tc.error, err1)
|
||||
require.ErrorContains(t, tc.error, err2)
|
||||
require.NotNil(t, verifier.results.result(RequireSidecarProposerExpected))
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err1)
|
||||
require.NoError(t, err2)
|
||||
require.NoError(t, verifier.results.result(RequireSidecarProposerExpected))
|
||||
|
||||
err := verifier.SidecarProposerExpected(ctx)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestColumnRequirementSatisfaction(t *testing.T) {
|
||||
@@ -924,3 +922,12 @@ func TestColumnRequirementSatisfaction(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestConcatRootSlot(t *testing.T) {
|
||||
root := [fieldparams.RootLength]byte{1, 2, 3}
|
||||
const slot = primitives.Slot(3210)
|
||||
|
||||
const expected = "\x01\x02\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x003210"
|
||||
|
||||
actual := concatRootSlot(root, slot)
|
||||
require.Equal(t, expected, actual)
|
||||
}
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
## Fixed
|
||||
|
||||
- Fix missing return after version header check in SubmitAttesterSlashingsV2.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Fixed
|
||||
|
||||
- Prevent blocked sends to the KZG batch verifier when the caller context is already canceled, avoiding useless queueing and potential hangs.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Fixed
|
||||
|
||||
- Fix the missing fork version object mapping for Fulu in light client p2p.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Added
|
||||
|
||||
- `primitives.BuilderIndex`: SSZ `uint64` wrapper for builder registry indices.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Fixed
|
||||
|
||||
- Fix deadlock in data column gossip KZG batch verification when a caller times out preventing result delivery.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Changed
|
||||
|
||||
- the /eth/v2/beacon/pool/attestations and /eth/v1/beacon/pool/sync_committees now returns a 503 error if the node is still syncing, the rest api is also working in a similar process to gRPC broadcasting immediately now.
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user