mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-22 11:48:13 -05:00
Compare commits
90 Commits
state-desi
...
poc/option
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
66f63aee9c | ||
|
|
698b6922f0 | ||
|
|
ca228fca44 | ||
|
|
0db74365e0 | ||
|
|
6f90101364 | ||
|
|
49e1763ec2 | ||
|
|
c2527c82cd | ||
|
|
d4ea8fafd6 | ||
|
|
07d1d6bdf9 | ||
|
|
f938da99d9 | ||
|
|
9deec69cc7 | ||
|
|
2767f08f4d | ||
|
|
4d6663b4de | ||
|
|
e713560a68 | ||
|
|
4571e50609 | ||
|
|
175738919e | ||
|
|
f1cbdc9fa6 | ||
|
|
156383c9c8 | ||
|
|
d46c620783 | ||
|
|
dd05e44ef3 | ||
|
|
9da36a5de6 | ||
|
|
7950a24926 | ||
|
|
ea51253be9 | ||
|
|
2ac30f5ce6 | ||
|
|
5ede7c8fe0 | ||
|
|
7418c00ad6 | ||
|
|
66342655fd | ||
|
|
18eca953c1 | ||
|
|
8191bb5711 | ||
|
|
d4613aee0c | ||
|
|
9fcc1a7a77 | ||
|
|
3324c7b655 | ||
|
|
d477bcfa20 | ||
|
|
38183471da | ||
|
|
3c3e2b42e9 | ||
|
|
d496f7bfab | ||
|
|
55e2663f82 | ||
|
|
75dea214ac | ||
|
|
4374e709cb | ||
|
|
be300f80bd | ||
|
|
096cba5b2d | ||
|
|
d5127233e4 | ||
|
|
3d35cc20ec | ||
|
|
5f0afd09c6 | ||
|
|
95fff68b11 | ||
|
|
d0bc0fcda8 | ||
|
|
8b2acd5f47 | ||
|
|
fb071ebe20 | ||
|
|
a174d0cd53 | ||
|
|
06655dcd1f | ||
|
|
c1dcf97c0c | ||
|
|
f596223096 | ||
|
|
a184afdfb4 | ||
|
|
056843bcae | ||
|
|
a587a9dd6e | ||
|
|
dde9dc3dd9 | ||
|
|
960d666801 | ||
|
|
1468c20c54 | ||
|
|
68d8988121 | ||
|
|
9ca5bf0119 | ||
|
|
bf8f494792 | ||
|
|
cab25267b5 | ||
|
|
b9c23dae89 | ||
|
|
7944731ccf | ||
|
|
4d2a61a2e0 | ||
|
|
8708c198c9 | ||
|
|
2857eeae6e | ||
|
|
4912c29d06 | ||
|
|
d520158510 | ||
|
|
c13d61a959 | ||
|
|
f5c61ebaea | ||
|
|
ae3d465615 | ||
|
|
f23210853d | ||
|
|
6dc49b41f2 | ||
|
|
e56550af48 | ||
|
|
20f617ecc9 | ||
|
|
adb1de9caa | ||
|
|
2d9e6ad2c8 | ||
|
|
e8eb022145 | ||
|
|
38be9400f1 | ||
|
|
b01e760e0a | ||
|
|
da4a8f1dd3 | ||
|
|
0dca170953 | ||
|
|
cd549abbfa | ||
|
|
28a661518e | ||
|
|
4ab5888c4c | ||
|
|
0d818bc687 | ||
|
|
0e90a0f2d8 | ||
|
|
2de069d543 | ||
|
|
50e88045bb |
@@ -193,6 +193,7 @@ nogo(
|
||||
"//tools/analyzers/featureconfig:go_default_library",
|
||||
"//tools/analyzers/gocognit:go_default_library",
|
||||
"//tools/analyzers/ineffassign:go_default_library",
|
||||
"//tools/analyzers/httpwriter:go_default_library",
|
||||
"//tools/analyzers/interfacechecker:go_default_library",
|
||||
"//tools/analyzers/logcapitalization:go_default_library",
|
||||
"//tools/analyzers/logruswitherror:go_default_library",
|
||||
|
||||
@@ -48,6 +48,7 @@ go_library(
|
||||
"//beacon-chain/core/electra:go_default_library",
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/operation:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
@@ -65,6 +66,7 @@ go_library(
|
||||
"//beacon-chain/light-client:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/blstoexec:go_default_library",
|
||||
"//beacon-chain/operations/execproofs:go_default_library",
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
@@ -146,6 +148,8 @@ go_test(
|
||||
"//beacon-chain/cache/depositsnapshot:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/operation:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
|
||||
@@ -323,14 +323,17 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
|
||||
var ok bool
|
||||
e := slots.ToEpoch(slot)
|
||||
stateEpoch := slots.ToEpoch(st.Slot())
|
||||
if e == stateEpoch {
|
||||
fuluAndNextEpoch := st.Version() >= version.Fulu && e == stateEpoch+1
|
||||
if e == stateEpoch || fuluAndNextEpoch {
|
||||
val, ok = s.trackedProposer(st, slot)
|
||||
if !ok {
|
||||
return emptyAttri
|
||||
}
|
||||
}
|
||||
st = st.Copy()
|
||||
if slot > st.Slot() {
|
||||
// At this point either we know we are proposing on a future slot or we need to still compute the
|
||||
// right proposer index pre-Fulu, either way we need to copy the state to process it.
|
||||
st = st.Copy()
|
||||
var err error
|
||||
st, err = transition.ProcessSlotsUsingNextSlotCache(ctx, st, headRoot, slot)
|
||||
if err != nil {
|
||||
@@ -338,7 +341,7 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
|
||||
return emptyAttri
|
||||
}
|
||||
}
|
||||
if e > stateEpoch {
|
||||
if e > stateEpoch && !fuluAndNextEpoch {
|
||||
emptyAttri := payloadattribute.EmptyWithVersion(st.Version())
|
||||
val, ok = s.trackedProposer(st, slot)
|
||||
if !ok {
|
||||
|
||||
@@ -1053,40 +1053,3 @@ func TestKZGCommitmentToVersionedHashes(t *testing.T) {
|
||||
require.Equal(t, vhs[0].String(), vh0)
|
||||
require.Equal(t, vhs[1].String(), vh1)
|
||||
}
|
||||
|
||||
func TestComputePayloadAttribute(t *testing.T) {
|
||||
service, tr := minimalTestService(t, WithPayloadIDCache(cache.NewPayloadIDCache()))
|
||||
ctx := tr.ctx
|
||||
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
|
||||
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, Index: 0})
|
||||
// Cache hit, advance state, no fee recipient
|
||||
slot := primitives.Slot(1)
|
||||
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
signed, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(signed, [32]byte{'a'})
|
||||
require.NoError(t, err)
|
||||
cfg := &postBlockProcessConfig{
|
||||
ctx: ctx,
|
||||
roblock: roblock,
|
||||
}
|
||||
fcu := &fcuConfig{
|
||||
headState: st,
|
||||
proposingSlot: slot,
|
||||
headRoot: [32]byte{},
|
||||
}
|
||||
require.NoError(t, service.computePayloadAttributes(cfg, fcu))
|
||||
require.Equal(t, false, fcu.attributes.IsEmpty())
|
||||
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(fcu.attributes.SuggestedFeeRecipient()).String())
|
||||
|
||||
// Cache hit, advance state, has fee recipient
|
||||
suggestedAddr := common.HexToAddress("123")
|
||||
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, FeeRecipient: primitives.ExecutionAddress(suggestedAddr), Index: 0})
|
||||
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
|
||||
require.NoError(t, service.computePayloadAttributes(cfg, fcu))
|
||||
require.Equal(t, false, fcu.attributes.IsEmpty())
|
||||
require.Equal(t, suggestedAddr, common.BytesToAddress(fcu.attributes.SuggestedFeeRecipient()))
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
payloadattribute "github.com/OffchainLabs/prysm/v7/consensus-types/payload-attribute"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -53,58 +54,53 @@ type fcuConfig struct {
|
||||
}
|
||||
|
||||
// sendFCU handles the logic to notify the engine of a forckhoice update
|
||||
// for the first time when processing an incoming block during regular sync. It
|
||||
// always updates the shuffling caches and handles epoch transitions when the
|
||||
// incoming block is late, preparing payload attributes in this case while it
|
||||
// only sends a message with empty attributes for early blocks.
|
||||
func (s *Service) sendFCU(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error {
|
||||
if !s.isNewHead(cfg.headRoot) {
|
||||
return nil
|
||||
// when processing an incoming block during regular sync. It
|
||||
// always updates the shuffling caches and handles epoch transitions .
|
||||
func (s *Service) sendFCU(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) {
|
||||
if cfg.postState.Version() < version.Fulu {
|
||||
// update the caches to compute the right proposer index
|
||||
// this function is called under a forkchoice lock which we need to release.
|
||||
s.ForkChoicer().Unlock()
|
||||
s.updateCachesPostBlockProcessing(cfg)
|
||||
s.ForkChoicer().Lock()
|
||||
}
|
||||
if err := s.getFCUArgs(cfg, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("Could not get forkchoice update argument")
|
||||
return
|
||||
}
|
||||
// If head has not been updated and attributes are nil, we can skip the FCU.
|
||||
if !s.isNewHead(cfg.headRoot) && (fcuArgs.attributes == nil || fcuArgs.attributes.IsEmpty()) {
|
||||
return
|
||||
}
|
||||
// If we are proposing and we aim to reorg the block, we have already sent FCU with attributes on lateBlockTasks
|
||||
if fcuArgs.attributes != nil && !fcuArgs.attributes.IsEmpty() && s.shouldOverrideFCU(cfg.headRoot, s.CurrentSlot()+1) {
|
||||
return nil
|
||||
return
|
||||
}
|
||||
if s.inRegularSync() {
|
||||
go s.forkchoiceUpdateWithExecution(cfg.ctx, fcuArgs)
|
||||
}
|
||||
return s.forkchoiceUpdateWithExecution(cfg.ctx, fcuArgs)
|
||||
}
|
||||
|
||||
// sendFCUWithAttributes computes the payload attributes and sends an FCU message
|
||||
// to the engine if needed
|
||||
func (s *Service) sendFCUWithAttributes(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) {
|
||||
slotCtx, cancel := context.WithTimeout(context.Background(), slotDeadline)
|
||||
defer cancel()
|
||||
cfg.ctx = slotCtx
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
if err := s.computePayloadAttributes(cfg, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("Could not compute payload attributes")
|
||||
return
|
||||
}
|
||||
if fcuArgs.attributes.IsEmpty() {
|
||||
return
|
||||
}
|
||||
if _, err := s.notifyForkchoiceUpdate(cfg.ctx, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("Could not update forkchoice with payload attributes for proposal")
|
||||
if s.isNewHead(fcuArgs.headRoot) {
|
||||
if err := s.saveHead(cfg.ctx, fcuArgs.headRoot, fcuArgs.headBlock, fcuArgs.headState); err != nil {
|
||||
log.WithError(err).Error("Could not save head")
|
||||
}
|
||||
s.pruneAttsFromPool(s.ctx, fcuArgs.headState, fcuArgs.headBlock)
|
||||
}
|
||||
}
|
||||
|
||||
// fockchoiceUpdateWithExecution is a wrapper around notifyForkchoiceUpdate. It decides whether a new call to FCU should be made.
|
||||
func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, args *fcuConfig) error {
|
||||
// fockchoiceUpdateWithExecution is a wrapper around notifyForkchoiceUpdate. It gets a forkchoice lock and calls the engine.
|
||||
// The caller of this function should NOT have a lock in forkchoice store.
|
||||
func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, args *fcuConfig) {
|
||||
_, span := trace.StartSpan(ctx, "beacon-chain.blockchain.forkchoiceUpdateWithExecution")
|
||||
defer span.End()
|
||||
// Note: Use the service context here to avoid the parent context being ended during a forkchoice update.
|
||||
ctx = trace.NewContext(s.ctx, span)
|
||||
s.ForkChoicer().Lock()
|
||||
defer s.ForkChoicer().Unlock()
|
||||
_, err := s.notifyForkchoiceUpdate(ctx, args)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not notify forkchoice update")
|
||||
log.WithError(err).Error("Could not notify forkchoice update")
|
||||
}
|
||||
|
||||
if err := s.saveHead(ctx, args.headRoot, args.headBlock, args.headState); err != nil {
|
||||
log.WithError(err).Error("Could not save head")
|
||||
}
|
||||
|
||||
// Only need to prune attestations from pool if the head has changed.
|
||||
s.pruneAttsFromPool(s.ctx, args.headState, args.headBlock)
|
||||
return nil
|
||||
}
|
||||
|
||||
// shouldOverrideFCU checks whether the incoming block is still subject to being
|
||||
|
||||
@@ -97,7 +97,7 @@ func TestService_forkchoiceUpdateWithExecution_exceptionalCases(t *testing.T) {
|
||||
headBlock: wsb,
|
||||
proposingSlot: service.CurrentSlot() + 1,
|
||||
}
|
||||
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, args))
|
||||
service.forkchoiceUpdateWithExecution(ctx, args)
|
||||
|
||||
payloadID, has := service.cfg.PayloadIDCache.PayloadID(2, [32]byte{2})
|
||||
require.Equal(t, true, has)
|
||||
@@ -151,7 +151,7 @@ func TestService_forkchoiceUpdateWithExecution_SameHeadRootNewProposer(t *testin
|
||||
headRoot: r,
|
||||
proposingSlot: service.CurrentSlot() + 1,
|
||||
}
|
||||
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, args))
|
||||
service.forkchoiceUpdateWithExecution(ctx, args)
|
||||
}
|
||||
|
||||
func TestShouldOverrideFCU(t *testing.T) {
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/async/event"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/operation"
|
||||
statefeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/filesystem"
|
||||
@@ -13,6 +14,7 @@ import (
|
||||
lightclient "github.com/OffchainLabs/prysm/v7/beacon-chain/light-client"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/attestations"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/blstoexec"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/execproofs"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/slashings"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
|
||||
@@ -136,6 +138,14 @@ func WithBLSToExecPool(p blstoexec.PoolManager) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithExecProofsPool to keep track of execution proofs.
|
||||
func WithExecProofsPool(p execproofs.PoolManager) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.ExecProofsPool = p
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithP2PBroadcaster to broadcast messages after appropriate processing.
|
||||
func WithP2PBroadcaster(p p2p.Accessor) Option {
|
||||
return func(s *Service) error {
|
||||
@@ -266,3 +276,10 @@ func WithStartWaitingDataColumnSidecars(c chan bool) Option {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithOperationNotifier(operationNotifier operation.Notifier) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.OperationNotifier = operationNotifier
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
// The caller of this function must have a lock on forkchoice.
|
||||
func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) state.ReadOnlyBeaconState {
|
||||
headEpoch := slots.ToEpoch(s.HeadSlot())
|
||||
if c.Epoch < headEpoch || c.Epoch == 0 {
|
||||
if c.Epoch+1 < headEpoch || c.Epoch == 0 {
|
||||
return nil
|
||||
}
|
||||
// Only use head state if the head state is compatible with the target checkpoint.
|
||||
@@ -30,11 +30,13 @@ func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) st
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
headDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(headRoot), c.Epoch-1)
|
||||
// headEpoch - 1 equals c.Epoch if c is from the previous epoch and equals c.Epoch - 1 if c is from the current epoch.
|
||||
// We don't use the smaller c.Epoch - 1 because forkchoice would not have the data to answer that.
|
||||
headDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(headRoot), headEpoch-1)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
targetDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(c.Root), c.Epoch-1)
|
||||
targetDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(c.Root), headEpoch-1)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
@@ -43,7 +45,7 @@ func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) st
|
||||
}
|
||||
|
||||
// If the head state alone is enough, we can return it directly read only.
|
||||
if c.Epoch == headEpoch {
|
||||
if c.Epoch <= headEpoch {
|
||||
st, err := s.HeadStateReadOnly(ctx)
|
||||
if err != nil {
|
||||
return nil
|
||||
|
||||
@@ -170,12 +170,13 @@ func TestService_GetRecentPreState(t *testing.T) {
|
||||
err = s.SetFinalizedCheckpoint(cp0)
|
||||
require.NoError(t, err)
|
||||
|
||||
st, root, err := prepareForkchoiceState(ctx, 31, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
|
||||
st, blk, err := prepareForkchoiceState(ctx, 31, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
service.head = &head{
|
||||
root: [32]byte(ckRoot),
|
||||
state: s,
|
||||
block: blk,
|
||||
slot: 31,
|
||||
}
|
||||
require.NotNil(t, service.getRecentPreState(ctx, ðpb.Checkpoint{Epoch: 1, Root: ckRoot}))
|
||||
@@ -197,12 +198,13 @@ func TestService_GetRecentPreState_Old_Checkpoint(t *testing.T) {
|
||||
err = s.SetFinalizedCheckpoint(cp0)
|
||||
require.NoError(t, err)
|
||||
|
||||
st, root, err := prepareForkchoiceState(ctx, 33, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
|
||||
st, blk, err := prepareForkchoiceState(ctx, 33, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
service.head = &head{
|
||||
root: [32]byte(ckRoot),
|
||||
state: s,
|
||||
block: blk,
|
||||
slot: 33,
|
||||
}
|
||||
require.IsNil(t, service.getRecentPreState(ctx, ðpb.Checkpoint{}))
|
||||
@@ -227,6 +229,7 @@ func TestService_GetRecentPreState_Same_DependentRoots(t *testing.T) {
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
st, blk, err = prepareForkchoiceState(ctx, 64, [32]byte{'T'}, blk.Root(), [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
headBlock := blk
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
st, blk, err = prepareForkchoiceState(ctx, 33, [32]byte{'U'}, [32]byte(ckRoot), [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
@@ -235,8 +238,9 @@ func TestService_GetRecentPreState_Same_DependentRoots(t *testing.T) {
|
||||
|
||||
service.head = &head{
|
||||
root: [32]byte{'T'},
|
||||
state: s,
|
||||
block: headBlock,
|
||||
slot: 64,
|
||||
state: s,
|
||||
}
|
||||
require.NotNil(t, service.getRecentPreState(ctx, ðpb.Checkpoint{Epoch: 2, Root: cpRoot[:]}))
|
||||
}
|
||||
@@ -263,6 +267,7 @@ func TestService_GetRecentPreState_Different_DependentRoots(t *testing.T) {
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
st, blk, err = prepareForkchoiceState(ctx, 64, [32]byte{'U'}, blk.Root(), [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
headBlock := blk
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
st, blk, err = prepareForkchoiceState(ctx, 33, [32]byte{'V'}, [32]byte(ckRoot), [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
@@ -270,7 +275,8 @@ func TestService_GetRecentPreState_Different_DependentRoots(t *testing.T) {
|
||||
cpRoot := blk.Root()
|
||||
|
||||
service.head = &head{
|
||||
root: [32]byte{'T'},
|
||||
root: [32]byte{'U'},
|
||||
block: headBlock,
|
||||
state: s,
|
||||
slot: 64,
|
||||
}
|
||||
@@ -287,12 +293,13 @@ func TestService_GetRecentPreState_Different(t *testing.T) {
|
||||
err = s.SetFinalizedCheckpoint(cp0)
|
||||
require.NoError(t, err)
|
||||
|
||||
st, root, err := prepareForkchoiceState(ctx, 33, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
|
||||
st, blk, err := prepareForkchoiceState(ctx, 33, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
service.head = &head{
|
||||
root: [32]byte(ckRoot),
|
||||
state: s,
|
||||
block: blk,
|
||||
slot: 33,
|
||||
}
|
||||
require.IsNil(t, service.getRecentPreState(ctx, ðpb.Checkpoint{}))
|
||||
|
||||
@@ -7,6 +7,8 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/go-bitfield"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/operation"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
coreTime "github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
|
||||
@@ -15,6 +17,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/filesystem"
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v7/beacon-chain/forkchoice/types"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
@@ -66,9 +69,6 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
|
||||
startTime := time.Now()
|
||||
fcuArgs := &fcuConfig{}
|
||||
|
||||
if s.inRegularSync() {
|
||||
defer s.handleSecondFCUCall(cfg, fcuArgs)
|
||||
}
|
||||
if features.Get().EnableLightClient && slots.ToEpoch(s.CurrentSlot()) >= params.BeaconConfig().AltairForkEpoch {
|
||||
defer s.processLightClientUpdates(cfg)
|
||||
}
|
||||
@@ -105,12 +105,16 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
|
||||
s.logNonCanonicalBlockReceived(cfg.roblock.Root(), cfg.headRoot)
|
||||
return nil
|
||||
}
|
||||
if err := s.getFCUArgs(cfg, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("Could not get forkchoice update argument")
|
||||
return nil
|
||||
}
|
||||
if err := s.sendFCU(cfg, fcuArgs); err != nil {
|
||||
return errors.Wrap(err, "could not send FCU to engine")
|
||||
s.sendFCU(cfg, fcuArgs)
|
||||
|
||||
// Pre-Fulu the caches are updated when computing the payload attributes
|
||||
if cfg.postState.Version() >= version.Fulu {
|
||||
go func() {
|
||||
ctx, cancel := context.WithTimeout(s.ctx, slotDeadline)
|
||||
defer cancel()
|
||||
cfg.ctx = ctx
|
||||
s.updateCachesPostBlockProcessing(cfg)
|
||||
}()
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -295,14 +299,6 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
return errors.Wrap(err, "could not set optimistic block to valid")
|
||||
}
|
||||
}
|
||||
arg := &fcuConfig{
|
||||
headState: preState,
|
||||
headRoot: lastBR,
|
||||
headBlock: lastB,
|
||||
}
|
||||
if _, err := s.notifyForkchoiceUpdate(ctx, arg); err != nil {
|
||||
return err
|
||||
}
|
||||
return s.saveHeadNoDB(ctx, lastB, lastBR, preState, !isValidPayload)
|
||||
}
|
||||
|
||||
@@ -330,6 +326,7 @@ func (s *Service) areSidecarsAvailable(ctx context.Context, avs das.Availability
|
||||
return nil
|
||||
}
|
||||
|
||||
// the caller of this function must not hold a lock in forkchoice store.
|
||||
func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.BeaconState) error {
|
||||
e := coreTime.CurrentEpoch(st)
|
||||
if err := helpers.UpdateCommitteeCache(ctx, st, e); err != nil {
|
||||
@@ -359,7 +356,9 @@ func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.Beacon
|
||||
if e > 0 {
|
||||
e = e - 1
|
||||
}
|
||||
s.ForkChoicer().RLock()
|
||||
target, err := s.cfg.ForkChoiceStore.TargetRootForEpoch(r, e)
|
||||
s.ForkChoicer().RUnlock()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not update proposer index state-root map")
|
||||
return nil
|
||||
@@ -372,7 +371,7 @@ func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.Beacon
|
||||
}
|
||||
|
||||
// Epoch boundary tasks: it copies the headState and updates the epoch boundary
|
||||
// caches.
|
||||
// caches. The caller of this function must not hold a lock in forkchoice store.
|
||||
func (s *Service) handleEpochBoundary(ctx context.Context, slot primitives.Slot, headState state.BeaconState, blockRoot []byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.handleEpochBoundary")
|
||||
defer span.End()
|
||||
@@ -666,10 +665,17 @@ func (s *Service) isDataAvailable(
|
||||
return errors.New("invalid nil beacon block")
|
||||
}
|
||||
|
||||
root := roBlock.Root()
|
||||
blockVersion := block.Version()
|
||||
root, blockVersion := roBlock.Root(), roBlock.Version()
|
||||
if blockVersion >= version.Fulu {
|
||||
return s.areDataColumnsAvailable(ctx, root, block)
|
||||
if err := s.areExecutionProofsAvailable(ctx, root); err != nil {
|
||||
return fmt.Errorf("are execution proofs available: %w", err)
|
||||
}
|
||||
|
||||
if err := s.areDataColumnsAvailable(ctx, root, block); err != nil {
|
||||
return fmt.Errorf("are data columns available: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if blockVersion >= version.Deneb {
|
||||
@@ -679,6 +685,67 @@ func (s *Service) isDataAvailable(
|
||||
return nil
|
||||
}
|
||||
|
||||
// areExecutionProofsAvailable blocks until we have enough execution proofs to import the block,
|
||||
// or an error or context cancellation occurs.
|
||||
// This check is only performed for lightweight verifier nodes that need zkVM proofs
|
||||
// to validate block execution (nodes without execution layer + proof generation capability).
|
||||
// A nil result means that the data availability check is successful.
|
||||
func (s *Service) areExecutionProofsAvailable(ctx context.Context, blockRoot [fieldparams.RootLength]byte) error {
|
||||
// Return early if zkVM features are disabled (no need to check for execution proofs),
|
||||
// or if the generation proof is enabled (we will generate proofs ourselves).
|
||||
if !features.Get().EnableZkvm || len(flags.Get().ProofGenerationTypes) > 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
requiredProofCount := params.BeaconConfig().MinProofsRequired
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", blockRoot),
|
||||
"requiredProofCount": requiredProofCount,
|
||||
})
|
||||
|
||||
// Subscribe to execution proof received events.
|
||||
eventsChan := make(chan *feed.Event, 1)
|
||||
subscription := s.cfg.OperationNotifier.OperationFeed().Subscribe(eventsChan)
|
||||
defer subscription.Unsubscribe()
|
||||
|
||||
// Return early if we already have enough proofs.
|
||||
if actualProofCount := uint64(s.cfg.ExecProofsPool.Count(blockRoot)); actualProofCount >= requiredProofCount {
|
||||
log.WithField("actualProofCount", actualProofCount).Debug("Already have enough execution proofs")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Some proofs are missing; wait for them.
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case event := <-eventsChan:
|
||||
if event.Type != operation.ExecutionProofReceived {
|
||||
continue
|
||||
}
|
||||
|
||||
proofWrapper, ok := event.Data.(*operation.ExecutionProofReceivedData)
|
||||
if !ok {
|
||||
log.Error("Could not cast event data to ExecutionProofReceivedData")
|
||||
continue
|
||||
}
|
||||
|
||||
proof := proofWrapper.ExecutionProof
|
||||
|
||||
// Skip if the proof is for a different block.
|
||||
if bytesutil.ToBytes32(proof.BlockRoot) != blockRoot {
|
||||
continue
|
||||
}
|
||||
|
||||
// Return if we have enough proofs.
|
||||
if actualProofCount := uint64(s.cfg.ExecProofsPool.Count(blockRoot)); actualProofCount >= requiredProofCount {
|
||||
log.WithField("actualProofCount", actualProofCount).Debug("Got enough execution proofs")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// areDataColumnsAvailable blocks until all data columns committed to in the block are available,
|
||||
// or an error or context cancellation occurs. A nil result means that the data availability check is successful.
|
||||
func (s *Service) areDataColumnsAvailable(
|
||||
@@ -912,8 +979,6 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
if currentSlot == s.HeadSlot() {
|
||||
return
|
||||
}
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
// return early if we are in init sync
|
||||
if !s.inRegularSync() {
|
||||
return
|
||||
@@ -926,14 +991,32 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
if lastState == nil {
|
||||
lastRoot, lastState = headRoot[:], headState
|
||||
}
|
||||
// Copy all the field tries in our cached state in the event of late
|
||||
// blocks.
|
||||
lastState.CopyAllTries()
|
||||
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
|
||||
log.WithError(err).Debug("Could not update next slot state cache")
|
||||
}
|
||||
if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil {
|
||||
log.WithError(err).Error("Could not update epoch boundary caches")
|
||||
// Before Fulu we need to process the next slot to find out if we are proposing.
|
||||
if lastState.Version() < version.Fulu {
|
||||
// Copy all the field tries in our cached state in the event of late
|
||||
// blocks.
|
||||
lastState.CopyAllTries()
|
||||
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
|
||||
log.WithError(err).Debug("Could not update next slot state cache")
|
||||
}
|
||||
if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil {
|
||||
log.WithError(err).Error("Could not update epoch boundary caches")
|
||||
}
|
||||
} else {
|
||||
// After Fulu, we can update the caches asynchronously after sending FCU to the engine
|
||||
defer func() {
|
||||
go func() {
|
||||
ctx, cancel := context.WithTimeout(s.ctx, slotDeadline)
|
||||
defer cancel()
|
||||
lastState.CopyAllTries()
|
||||
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
|
||||
log.WithError(err).Debug("Could not update next slot state cache")
|
||||
}
|
||||
if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil {
|
||||
log.WithError(err).Error("Could not update epoch boundary caches")
|
||||
}
|
||||
}()
|
||||
}()
|
||||
}
|
||||
// return early if we already started building a block for the current
|
||||
// head root
|
||||
@@ -963,6 +1046,8 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
headBlock: headBlock,
|
||||
attributes: attribute,
|
||||
}
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
_, err = s.notifyForkchoiceUpdate(ctx, fcuArgs)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("could not perform late block tasks: failed to update forkchoice with engine")
|
||||
|
||||
@@ -42,14 +42,8 @@ func (s *Service) getFCUArgs(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) er
|
||||
if err := s.getFCUArgsEarlyBlock(cfg, fcuArgs); err != nil {
|
||||
return err
|
||||
}
|
||||
if !s.inRegularSync() {
|
||||
return nil
|
||||
}
|
||||
slot := cfg.roblock.Block().Slot()
|
||||
if slots.WithinVotingWindow(s.genesisTime, slot) {
|
||||
return nil
|
||||
}
|
||||
return s.computePayloadAttributes(cfg, fcuArgs)
|
||||
fcuArgs.attributes = s.getPayloadAttribute(cfg.ctx, fcuArgs.headState, fcuArgs.proposingSlot, cfg.headRoot[:])
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) getFCUArgsEarlyBlock(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error {
|
||||
@@ -173,26 +167,19 @@ func (s *Service) processLightClientUpdates(cfg *postBlockProcessConfig) {
|
||||
|
||||
// updateCachesPostBlockProcessing updates the next slot cache and handles the epoch
|
||||
// boundary in order to compute the right proposer indices after processing
|
||||
// state transition. This function is called on late blocks while still locked,
|
||||
// before sending FCU to the engine.
|
||||
func (s *Service) updateCachesPostBlockProcessing(cfg *postBlockProcessConfig) error {
|
||||
// state transition. The caller of this function must not hold a lock in forkchoice store.
|
||||
func (s *Service) updateCachesPostBlockProcessing(cfg *postBlockProcessConfig) {
|
||||
slot := cfg.postState.Slot()
|
||||
root := cfg.roblock.Root()
|
||||
if err := transition.UpdateNextSlotCache(cfg.ctx, root[:], cfg.postState); err != nil {
|
||||
return errors.Wrap(err, "could not update next slot state cache")
|
||||
log.WithError(err).Error("Could not update next slot state cache")
|
||||
return
|
||||
}
|
||||
if !slots.IsEpochEnd(slot) {
|
||||
return nil
|
||||
return
|
||||
}
|
||||
return s.handleEpochBoundary(cfg.ctx, slot, cfg.postState, root[:])
|
||||
}
|
||||
|
||||
// handleSecondFCUCall handles a second call to FCU when syncing a new block.
|
||||
// This is useful when proposing in the next block and we want to defer the
|
||||
// computation of the next slot shuffling.
|
||||
func (s *Service) handleSecondFCUCall(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) {
|
||||
if (fcuArgs.attributes == nil || fcuArgs.attributes.IsEmpty()) && cfg.headRoot == cfg.roblock.Root() {
|
||||
go s.sendFCUWithAttributes(cfg, fcuArgs)
|
||||
if err := s.handleEpochBoundary(cfg.ctx, slot, cfg.postState, root[:]); err != nil {
|
||||
log.WithError(err).Error("Could not handle epoch boundary")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -202,20 +189,6 @@ func reportProcessingTime(startTime time.Time) {
|
||||
onBlockProcessingTime.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
}
|
||||
|
||||
// computePayloadAttributes modifies the passed FCU arguments to
|
||||
// contain the right payload attributes with the tracked proposer. It gets
|
||||
// called on blocks that arrive after the attestation voting window, or in a
|
||||
// background routine after syncing early blocks.
|
||||
func (s *Service) computePayloadAttributes(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error {
|
||||
if cfg.roblock.Root() == cfg.headRoot {
|
||||
if err := s.updateCachesPostBlockProcessing(cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
fcuArgs.attributes = s.getPayloadAttribute(cfg.ctx, fcuArgs.headState, fcuArgs.proposingSlot, cfg.headRoot[:])
|
||||
return nil
|
||||
}
|
||||
|
||||
// getBlockPreState returns the pre state of an incoming block. It uses the parent root of the block
|
||||
// to retrieve the state in DB. It verifies the pre state's validity and the incoming block
|
||||
// is in the correct time window.
|
||||
|
||||
@@ -13,6 +13,8 @@ import (
|
||||
mock "github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/operation"
|
||||
statefeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
@@ -738,7 +740,9 @@ func TestOnBlock_CanFinalize_WithOnTick(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
|
||||
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
|
||||
require.NoError(t, err)
|
||||
@@ -788,7 +792,9 @@ func TestOnBlock_CanFinalize(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
|
||||
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
|
||||
require.NoError(t, err)
|
||||
@@ -816,25 +822,9 @@ func TestOnBlock_NilBlock(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
signed := &consensusblocks.SignedBeaconBlock{}
|
||||
roblock := consensusblocks.ROBlock{ReadOnlySignedBeaconBlock: signed}
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err := service.postBlockProcess(&postBlockProcessConfig{tr.ctx, roblock, [32]byte{}, nil, true})
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
}
|
||||
|
||||
func TestOnBlock_InvalidSignature(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
|
||||
gs, keys := util.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
|
||||
blk, err := util.GenerateFullBlock(gs, keys, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
blk.Signature = []byte{'a'} // Mutate the signature.
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
_, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
}
|
||||
|
||||
@@ -866,7 +856,9 @@ func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
testState, err = service.cfg.StateGen.StateByRoot(ctx, r)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -1339,7 +1331,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
lock.Lock()
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb1, r1)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
@@ -1351,7 +1345,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
lock.Lock()
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb2, r2)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
@@ -1363,7 +1359,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
lock.Lock()
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb3, r3)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
@@ -1375,7 +1373,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
lock.Lock()
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb4, r4)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
@@ -1400,197 +1400,6 @@ func Test_verifyBlkFinalizedSlot_invalidBlock(t *testing.T) {
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
}
|
||||
|
||||
// See the description in #10777 and #10782 for the full setup
|
||||
// We sync optimistically a chain of blocks. Block 17 is the last block in Epoch
|
||||
// 2. Block 18 justifies block 12 (the first in Epoch 2) and Block 19 returns
|
||||
// INVALID from FCU, with LVH block 17. No head is viable. We check
|
||||
// that the node is optimistic and that we can actually import a block on top of
|
||||
// 17 and recover.
|
||||
func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.SlotsPerEpoch = 6
|
||||
config.AltairForkEpoch = 1
|
||||
config.BellatrixForkEpoch = 2
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
mockEngine := &mockExecution.EngineClient{ErrNewPayload: execution.ErrAcceptedSyncingPayloadStatus, ErrForkchoiceUpdated: execution.ErrAcceptedSyncingPayloadStatus}
|
||||
service, tr := minimalTestService(t, WithExecutionEngineCaller(mockEngine))
|
||||
ctx := tr.ctx
|
||||
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
stateRoot, err := st.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
|
||||
require.NoError(t, service.saveGenesisData(ctx, st))
|
||||
|
||||
genesis := blocks.NewGenesisBlock(stateRoot[:])
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb), "Could not save genesis block")
|
||||
|
||||
parentRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
for i := 1; i < 6; i++ {
|
||||
driftGenesisTime(service, primitives.Slot(i), 0)
|
||||
st, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
|
||||
require.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
driftGenesisTime(service, primitives.Slot(i), 0)
|
||||
st, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlockAltair(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
|
||||
require.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
for i := 12; i < 18; i++ {
|
||||
driftGenesisTime(service, primitives.Slot(i), 0)
|
||||
st, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlockBellatrix(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
|
||||
require.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
// Check that we haven't justified the second epoch yet
|
||||
jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(0), jc.Epoch)
|
||||
|
||||
// import a block that justifies the second epoch
|
||||
driftGenesisTime(service, 18, 0)
|
||||
validHeadState, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlockBellatrix(validHeadState, keys, util.DefaultBlockGenConfig(), 18)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
firstInvalidRoot, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, firstInvalidRoot, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, firstInvalidRoot)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
jc = service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(2), jc.Epoch)
|
||||
|
||||
sjc := validHeadState.CurrentJustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(0), sjc.Epoch)
|
||||
lvh := b.Block.Body.ExecutionPayload.ParentHash
|
||||
// check our head
|
||||
require.Equal(t, firstInvalidRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
|
||||
// import another block to find out that it was invalid
|
||||
mockEngine = &mockExecution.EngineClient{ErrNewPayload: execution.ErrAcceptedSyncingPayloadStatus, ErrForkchoiceUpdated: execution.ErrInvalidPayloadStatus, ForkChoiceUpdatedResp: lvh}
|
||||
service.cfg.ExecutionEngineCaller = mockEngine
|
||||
driftGenesisTime(service, 19, 0)
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err = util.GenerateFullBlockBellatrix(st, keys, util.DefaultBlockGenConfig(), 19)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err = service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
|
||||
// Check that forkchoice's head is the last invalid block imported. The
|
||||
// store's headroot is the previous head (since the invalid block did
|
||||
// not finish importing) one and that the node is optimistic
|
||||
require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
headRoot, err := service.HeadRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, firstInvalidRoot, bytesutil.ToBytes32(headRoot))
|
||||
optimistic, err := service.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, optimistic)
|
||||
|
||||
// import another block based on the last valid head state
|
||||
mockEngine = &mockExecution.EngineClient{}
|
||||
service.cfg.ExecutionEngineCaller = mockEngine
|
||||
driftGenesisTime(service, 20, 0)
|
||||
b, err = util.GenerateFullBlockBellatrix(validHeadState, keys, &util.BlockGenConfig{}, 20)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err = b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
preState, err = service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
|
||||
require.NoError(t, err)
|
||||
// Check the newly imported block is head, it justified the right
|
||||
// checkpoint and the node is no longer optimistic
|
||||
require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
sjc = service.CurrentJustifiedCheckpt()
|
||||
require.Equal(t, jc.Epoch, sjc.Epoch)
|
||||
require.Equal(t, jc.Root, bytesutil.ToBytes32(sjc.Root))
|
||||
optimistic, err = service.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, optimistic)
|
||||
}
|
||||
|
||||
// See the description in #10777 and #10782 for the full setup
|
||||
// We sync optimistically a chain of blocks. Block 17 is the last block in Epoch
|
||||
// 2. Block 18 justifies block 12 (the first in Epoch 2) and Block 19 returns
|
||||
@@ -1642,7 +1451,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
@@ -1662,8 +1473,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
|
||||
for i := 12; i < 18; i++ {
|
||||
@@ -1684,8 +1496,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
// Check that we haven't justified the second epoch yet
|
||||
jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
@@ -1708,7 +1521,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, firstInvalidRoot, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, firstInvalidRoot)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, err)
|
||||
jc = service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(2), jc.Epoch)
|
||||
@@ -1718,6 +1533,10 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
lvh := b.Block.Body.ExecutionPayload.ParentHash
|
||||
// check our head
|
||||
require.Equal(t, firstInvalidRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
isBlock18OptimisticAfterImport, err := service.IsOptimisticForRoot(ctx, firstInvalidRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, isBlock18OptimisticAfterImport)
|
||||
time.Sleep(20 * time.Millisecond) // wait for async forkchoice update to be processed
|
||||
|
||||
// import another block to find out that it was invalid
|
||||
mockEngine = &mockExecution.EngineClient{ErrNewPayload: execution.ErrInvalidPayloadStatus, NewPayloadResp: lvh}
|
||||
@@ -1768,7 +1587,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, err)
|
||||
// Check the newly imported block is head, it justified the right
|
||||
// checkpoint and the node is no longer optimistic
|
||||
@@ -1835,7 +1656,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
@@ -1856,8 +1679,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
|
||||
// import the merge block
|
||||
@@ -1877,7 +1701,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, lastValidRoot, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, lastValidRoot)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, err)
|
||||
// save the post state and the payload Hash of this block since it will
|
||||
// be the LVH
|
||||
@@ -1906,8 +1732,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, invalidRoots[i-13], wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, invalidRoots[i-13])
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
// Check that we have justified the second epoch
|
||||
jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
@@ -1975,7 +1802,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
// Check that the head is still INVALID and the node is still optimistic
|
||||
require.Equal(t, invalidHeadRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
optimistic, err = service.IsOptimistic(ctx)
|
||||
@@ -2000,7 +1829,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, err)
|
||||
st, err = service.cfg.StateGen.StateByRoot(ctx, root)
|
||||
require.NoError(t, err)
|
||||
@@ -2028,7 +1859,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
sjc = service.CurrentJustifiedCheckpt()
|
||||
@@ -2072,7 +1905,6 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveGenesisBlockRoot(ctx, genesisRoot), "Could not save genesis state")
|
||||
|
||||
for i := 1; i < 6; i++ {
|
||||
t.Log(i)
|
||||
driftGenesisTime(service, primitives.Slot(i), 0)
|
||||
st, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -2089,7 +1921,9 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
@@ -2109,8 +1943,9 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
|
||||
// import the merge block
|
||||
@@ -2130,7 +1965,9 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, lastValidRoot, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, lastValidRoot)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, err)
|
||||
// save the post state and the payload Hash of this block since it will
|
||||
// be the LVH
|
||||
@@ -2161,7 +1998,9 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
|
||||
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
|
||||
require.NoError(t, err)
|
||||
@@ -2282,7 +2121,9 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -2348,7 +2189,9 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -2631,7 +2474,10 @@ func TestRollbackBlock(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Rollback block insertion into db and caches.
|
||||
require.ErrorContains(t, fmt.Sprintf("could not insert block %d to fork choice store", roblock.Block().Slot()), service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.ErrorContains(t, fmt.Sprintf("could not insert block %d to fork choice store", roblock.Block().Slot()), err)
|
||||
|
||||
// The block should no longer exist.
|
||||
require.Equal(t, false, service.cfg.BeaconDB.HasBlock(ctx, root))
|
||||
@@ -2732,7 +2578,9 @@ func TestRollbackBlock_ContextDeadline(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
|
||||
b, err = util.GenerateFullBlock(postState, keys, util.DefaultBlockGenConfig(), 34)
|
||||
require.NoError(t, err)
|
||||
@@ -2766,7 +2614,10 @@ func TestRollbackBlock_ContextDeadline(t *testing.T) {
|
||||
require.NoError(t, postState.SetFinalizedCheckpoint(cj))
|
||||
|
||||
// Rollback block insertion into db and caches.
|
||||
require.ErrorContains(t, "context canceled", service.postBlockProcess(&postBlockProcessConfig{cancCtx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{cancCtx, roblock, [32]byte{}, postState, false})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.ErrorContains(t, "context canceled", err)
|
||||
|
||||
// The block should no longer exist.
|
||||
require.Equal(t, false, service.cfg.BeaconDB.HasBlock(ctx, root))
|
||||
@@ -3152,6 +3003,113 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
err = service.isDataAvailable(ctx, roBlock)
|
||||
require.NotNil(t, err)
|
||||
})
|
||||
|
||||
t.Run("EIP-8025 (Optional Proofs) - already enough proofs", func(t *testing.T) {
|
||||
// Enable zkVM feature
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnableZkvm: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
// Set MinProofsRequired for testing
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.MinProofsRequired = 3
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Setup with sufficient data columns
|
||||
minimumColumnsCountToReconstruct := peerdas.MinimumColumnCountToReconstruct()
|
||||
indices := make([]uint64, 0, minimumColumnsCountToReconstruct)
|
||||
for i := range minimumColumnsCountToReconstruct {
|
||||
indices = append(indices, i)
|
||||
}
|
||||
|
||||
testParams := testIsAvailableParams{
|
||||
columnsToSave: indices,
|
||||
blobKzgCommitmentsCount: 3,
|
||||
}
|
||||
|
||||
ctx, _, service, root, signed := testIsAvailableSetup(t, testParams)
|
||||
|
||||
// Insert MinProofsRequired execution proofs into the pool
|
||||
for i := range cfg.MinProofsRequired {
|
||||
proof := ðpb.ExecutionProof{
|
||||
BlockRoot: root[:],
|
||||
Slot: signed.Block().Slot(),
|
||||
ProofId: primitives.ExecutionProofId(i),
|
||||
ProofData: []byte{byte(i)},
|
||||
}
|
||||
service.cfg.ExecProofsPool.Insert(proof)
|
||||
}
|
||||
|
||||
roBlock, err := consensusblocks.NewROBlockWithRoot(signed, root)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = service.isDataAvailable(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("EIP-8025 (Optional Proofs) - data columns success then wait for execution proofs", func(t *testing.T) {
|
||||
// Enable zkVM feature
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnableZkvm: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
// Set MinProofsRequired for testing
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.MinProofsRequired = 3
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Setup with sufficient data columns
|
||||
minimumColumnsCountToReconstruct := peerdas.MinimumColumnCountToReconstruct()
|
||||
indices := make([]uint64, 0, minimumColumnsCountToReconstruct)
|
||||
for i := range minimumColumnsCountToReconstruct {
|
||||
indices = append(indices, i)
|
||||
}
|
||||
|
||||
testParams := testIsAvailableParams{
|
||||
options: []Option{
|
||||
WithOperationNotifier(&mock.MockOperationNotifier{}),
|
||||
},
|
||||
columnsToSave: indices,
|
||||
blobKzgCommitmentsCount: 3,
|
||||
}
|
||||
|
||||
ctx, _, service, root, signed := testIsAvailableSetup(t, testParams)
|
||||
|
||||
// Goroutine to send execution proofs after data columns are available
|
||||
go func() {
|
||||
// Wait a bit to simulate async proof arrival
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
// Send ExecutionProofReceived events
|
||||
opfeed := service.cfg.OperationNotifier.OperationFeed()
|
||||
for i := range cfg.MinProofsRequired {
|
||||
proof := ðpb.ExecutionProof{
|
||||
BlockRoot: root[:],
|
||||
Slot: signed.Block().Slot(),
|
||||
ProofId: primitives.ExecutionProofId(i),
|
||||
ProofData: []byte{byte(i)},
|
||||
}
|
||||
service.cfg.ExecProofsPool.Insert(proof)
|
||||
|
||||
opfeed.Send(&feed.Event{
|
||||
Type: operation.ExecutionProofReceived,
|
||||
Data: &operation.ExecutionProofReceivedData{
|
||||
ExecutionProof: proof,
|
||||
},
|
||||
})
|
||||
}
|
||||
}()
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, time.Second*2)
|
||||
defer cancel()
|
||||
|
||||
roBlock, err := consensusblocks.NewROBlockWithRoot(signed, root)
|
||||
require.NoError(t, err)
|
||||
err = service.isDataAvailable(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
// Test_postBlockProcess_EventSending tests that block processed events are only sent
|
||||
@@ -3262,7 +3220,9 @@ func Test_postBlockProcess_EventSending(t *testing.T) {
|
||||
}
|
||||
|
||||
// Execute postBlockProcess
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(cfg)
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
|
||||
// Check error expectation
|
||||
if tt.expectError {
|
||||
|
||||
@@ -66,52 +66,54 @@ func (s *Service) VerifyLmdFfgConsistency(ctx context.Context, a ethpb.Att) erro
|
||||
|
||||
// This routine processes fork choice attestations from the pool to account for validator votes and fork choice.
|
||||
func (s *Service) spawnProcessAttestationsRoutine() {
|
||||
go func() {
|
||||
_, err := s.clockWaiter.WaitForClock(s.ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to receive genesis data")
|
||||
return
|
||||
}
|
||||
if s.genesisTime.IsZero() {
|
||||
log.Warn("ProcessAttestations routine waiting for genesis time")
|
||||
for s.genesisTime.IsZero() {
|
||||
if err := s.ctx.Err(); err != nil {
|
||||
log.WithError(err).Error("Giving up waiting for genesis time")
|
||||
return
|
||||
}
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
log.Warn("Genesis time received, now available to process attestations")
|
||||
}
|
||||
// Wait for node to be synced before running the routine.
|
||||
if err := s.waitForSync(); err != nil {
|
||||
log.WithError(err).Error("Could not wait to sync")
|
||||
return
|
||||
}
|
||||
_, err := s.clockWaiter.WaitForClock(s.ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to receive genesis data")
|
||||
return
|
||||
}
|
||||
|
||||
reorgInterval := time.Second*time.Duration(params.BeaconConfig().SecondsPerSlot) - reorgLateBlockCountAttestations
|
||||
ticker := slots.NewSlotTickerWithIntervals(s.genesisTime, []time.Duration{0, reorgInterval})
|
||||
for {
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
if s.genesisTime.IsZero() {
|
||||
log.Warn("ProcessAttestations routine waiting for genesis time")
|
||||
for s.genesisTime.IsZero() {
|
||||
if err := s.ctx.Err(); err != nil {
|
||||
log.WithError(err).Error("Giving up waiting for genesis time")
|
||||
return
|
||||
case slotInterval := <-ticker.C():
|
||||
if slotInterval.Interval > 0 {
|
||||
if s.validating() {
|
||||
s.UpdateHead(s.ctx, slotInterval.Slot+1)
|
||||
}
|
||||
} else {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
if err := s.cfg.ForkChoiceStore.NewSlot(s.ctx, slotInterval.Slot); err != nil {
|
||||
log.WithError(err).Error("Could not process new slot")
|
||||
}
|
||||
s.cfg.ForkChoiceStore.Unlock()
|
||||
|
||||
s.UpdateHead(s.ctx, slotInterval.Slot)
|
||||
}
|
||||
}
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
}()
|
||||
log.Warn("Genesis time received, now available to process attestations")
|
||||
}
|
||||
|
||||
// Wait for node to be synced before running the routine.
|
||||
if err := s.waitForSync(); err != nil {
|
||||
log.WithError(err).Error("Could not wait to sync")
|
||||
return
|
||||
}
|
||||
|
||||
reorgInterval := time.Second*time.Duration(params.BeaconConfig().SecondsPerSlot) - reorgLateBlockCountAttestations
|
||||
ticker := slots.NewSlotTickerWithIntervals(s.genesisTime, []time.Duration{0, reorgInterval})
|
||||
for {
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
case slotInterval := <-ticker.C():
|
||||
if slotInterval.Interval > 0 {
|
||||
if s.validating() {
|
||||
s.UpdateHead(s.ctx, slotInterval.Slot+1)
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
if err := s.cfg.ForkChoiceStore.NewSlot(s.ctx, slotInterval.Slot); err != nil {
|
||||
log.WithError(err).Error("Could not process new slot")
|
||||
}
|
||||
s.cfg.ForkChoiceStore.Unlock()
|
||||
|
||||
s.UpdateHead(s.ctx, slotInterval.Slot)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateHead updates the canonical head of the chain based on information from fork-choice attestations and votes.
|
||||
@@ -156,13 +158,15 @@ func (s *Service) UpdateHead(ctx context.Context, proposingSlot primitives.Slot)
|
||||
}
|
||||
if s.inRegularSync() {
|
||||
fcuArgs.attributes = s.getPayloadAttribute(ctx, headState, proposingSlot, newHeadRoot[:])
|
||||
if fcuArgs.attributes != nil && s.shouldOverrideFCU(newHeadRoot, proposingSlot) {
|
||||
return
|
||||
}
|
||||
go s.forkchoiceUpdateWithExecution(s.ctx, fcuArgs)
|
||||
}
|
||||
if fcuArgs.attributes != nil && s.shouldOverrideFCU(newHeadRoot, proposingSlot) {
|
||||
return
|
||||
}
|
||||
if err := s.forkchoiceUpdateWithExecution(s.ctx, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("Could not update forkchoice")
|
||||
if err := s.saveHead(s.ctx, fcuArgs.headRoot, fcuArgs.headBlock, fcuArgs.headState); err != nil {
|
||||
log.WithError(err).Error("Could not save head")
|
||||
}
|
||||
s.pruneAttsFromPool(s.ctx, fcuArgs.headState, fcuArgs.headBlock)
|
||||
}
|
||||
|
||||
// This processes fork choice attestations from the pool to account for validator votes and fork choice.
|
||||
|
||||
@@ -117,7 +117,9 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, tRoot, wsb, postState))
|
||||
roblock, err := blocks.NewROBlockWithRoot(wsb, tRoot)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
copied, err = service.cfg.StateGen.StateByRoot(ctx, tRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, fcs.NodeCount())
|
||||
@@ -177,7 +179,9 @@ func TestService_UpdateHead_NoAtts(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, tRoot, wsb, postState))
|
||||
roblock, err := blocks.NewROBlockWithRoot(wsb, tRoot)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.Equal(t, 2, fcs.NodeCount())
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
||||
require.Equal(t, tRoot, service.head.root)
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/async/event"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/operation"
|
||||
statefeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
@@ -24,6 +25,7 @@ import (
|
||||
lightClient "github.com/OffchainLabs/prysm/v7/beacon-chain/light-client"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/attestations"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/blstoexec"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/execproofs"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/slashings"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
|
||||
@@ -85,9 +87,11 @@ type config struct {
|
||||
ExitPool voluntaryexits.PoolManager
|
||||
SlashingPool slashings.PoolManager
|
||||
BLSToExecPool blstoexec.PoolManager
|
||||
ExecProofsPool execproofs.PoolManager
|
||||
P2P p2p.Accessor
|
||||
MaxRoutines int
|
||||
StateNotifier statefeed.Notifier
|
||||
OperationNotifier operation.Notifier
|
||||
ForkChoiceStore f.ForkChoicer
|
||||
AttService *attestations.Service
|
||||
StateGen *stategen.State
|
||||
@@ -211,7 +215,9 @@ func (s *Service) Start() {
|
||||
if err := s.StartFromSavedState(s.cfg.FinalizedStateAtStartUp); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
s.spawnProcessAttestationsRoutine()
|
||||
|
||||
go s.spawnProcessAttestationsRoutine()
|
||||
go s.spawnFinalizedProofsPruningRoutine()
|
||||
go s.runLateBlockTasks()
|
||||
}
|
||||
|
||||
@@ -567,3 +573,46 @@ func fuluForkSlot() (primitives.Slot, error) {
|
||||
|
||||
return forkFuluSlot, nil
|
||||
}
|
||||
|
||||
// spawnFinalizedProofsPruningRoutine prunes execution proofs pool on every epoch.
|
||||
// It removes proofs older than the finalized checkpoint to prevent unbounded
|
||||
// memory growth.
|
||||
// TODO: Manage cases where the network is not finalizing for a long time (avoid OOMs...)
|
||||
func (s *Service) spawnFinalizedProofsPruningRoutine() {
|
||||
ticker := slots.NewSlotTicker(s.genesisTime, params.BeaconConfig().SecondsPerSlot)
|
||||
defer ticker.Done()
|
||||
|
||||
for {
|
||||
select {
|
||||
case slot := <-ticker.C():
|
||||
// Only prune at the start of each epoch
|
||||
if !slots.IsEpochStart(slot) {
|
||||
continue
|
||||
}
|
||||
|
||||
finalizedCheckpoint := s.FinalizedCheckpt()
|
||||
if finalizedCheckpoint == nil {
|
||||
log.Error("Finalized checkpoint is nil, cannot prune execution proofs")
|
||||
continue
|
||||
}
|
||||
|
||||
finalizedSlot, err := slots.EpochStart(finalizedCheckpoint.Epoch)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get finalized slot")
|
||||
continue
|
||||
}
|
||||
|
||||
// Prune proofs older than finalized slot
|
||||
if count := s.cfg.ExecProofsPool.PruneUpTo(finalizedSlot); count > 0 {
|
||||
log.WithFields(logrus.Fields{
|
||||
"prunedCount": count,
|
||||
"finalizedSlot": finalizedSlot,
|
||||
}).Debug("Pruned finalized execution proofs")
|
||||
}
|
||||
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Context closed, exiting routine")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -290,52 +290,3 @@ func TestProcessBlockHeader_OK(t *testing.T) {
|
||||
}
|
||||
assert.Equal(t, true, proto.Equal(nsh, expected), "Expected %v, received %v", expected, nsh)
|
||||
}
|
||||
|
||||
func TestBlockSignatureSet_OK(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: make([]byte, 32),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
Slashed: true,
|
||||
}
|
||||
}
|
||||
|
||||
state, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, state.SetValidators(validators))
|
||||
require.NoError(t, state.SetSlot(10))
|
||||
require.NoError(t, state.SetLatestBlockHeader(util.HydrateBeaconHeader(ðpb.BeaconBlockHeader{
|
||||
Slot: 9,
|
||||
ProposerIndex: 0,
|
||||
})))
|
||||
|
||||
latestBlockSignedRoot, err := state.LatestBlockHeader().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
currentEpoch := time.CurrentEpoch(state)
|
||||
priv, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
pID, err := helpers.BeaconProposerIndex(t.Context(), state)
|
||||
require.NoError(t, err)
|
||||
block := util.NewBeaconBlock()
|
||||
block.Block.Slot = 10
|
||||
block.Block.ProposerIndex = pID
|
||||
block.Block.Body.RandaoReveal = bytesutil.PadTo([]byte{'A', 'B', 'C'}, 96)
|
||||
block.Block.ParentRoot = latestBlockSignedRoot[:]
|
||||
block.Signature, err = signing.ComputeDomainAndSign(state, currentEpoch, block.Block, params.BeaconConfig().DomainBeaconProposer, priv)
|
||||
require.NoError(t, err)
|
||||
proposerIdx, err := helpers.BeaconProposerIndex(t.Context(), state)
|
||||
require.NoError(t, err)
|
||||
validators[proposerIdx].Slashed = false
|
||||
validators[proposerIdx].PublicKey = priv.PublicKey().Marshal()
|
||||
err = state.UpdateValidatorAtIndex(proposerIdx, validators[proposerIdx])
|
||||
require.NoError(t, err)
|
||||
set, err := blocks.BlockSignatureBatch(state, block.Block.ProposerIndex, block.Signature, block.Block.HashTreeRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
verified, err := set.Verify()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, verified, "Block signature set returned a set which was unable to be verified")
|
||||
}
|
||||
|
||||
@@ -122,24 +122,6 @@ func VerifyBlockSignatureUsingCurrentFork(beaconState state.ReadOnlyBeaconState,
|
||||
return nil
|
||||
}
|
||||
|
||||
// BlockSignatureBatch retrieves the block signature batch from the provided block and its corresponding state.
|
||||
func BlockSignatureBatch(beaconState state.ReadOnlyBeaconState,
|
||||
proposerIndex primitives.ValidatorIndex,
|
||||
sig []byte,
|
||||
rootFunc func() ([32]byte, error)) (*bls.SignatureBatch, error) {
|
||||
currentEpoch := slots.ToEpoch(beaconState.Slot())
|
||||
domain, err := signing.Domain(beaconState.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorsRoot())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
proposer, err := beaconState.ValidatorAtIndex(proposerIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
proposerPubKey := proposer.PublicKey
|
||||
return signing.BlockSignatureBatch(proposerPubKey, sig, domain, rootFunc)
|
||||
}
|
||||
|
||||
// RandaoSignatureBatch retrieves the relevant randao specific signature batch object
|
||||
// from a block and its corresponding state.
|
||||
func RandaoSignatureBatch(
|
||||
|
||||
@@ -278,12 +278,12 @@ func ProcessConsolidationRequests(ctx context.Context, st state.BeaconState, req
|
||||
if uint64(curEpoch) < e {
|
||||
continue
|
||||
}
|
||||
bal, err := st.PendingBalanceToWithdraw(srcIdx)
|
||||
hasBal, err := st.HasPendingBalanceToWithdraw(srcIdx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to fetch pending balance to withdraw")
|
||||
continue
|
||||
}
|
||||
if bal > 0 {
|
||||
if hasBal {
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
@@ -46,6 +46,9 @@ const (
|
||||
|
||||
// DataColumnReceived is sent after a data column has been seen after gossip validation rules.
|
||||
DataColumnReceived = 12
|
||||
|
||||
// ExecutionProofReceived is sent after a execution proof object has been received from gossip or rpc.
|
||||
ExecutionProofReceived = 13
|
||||
)
|
||||
|
||||
// UnAggregatedAttReceivedData is the data sent with UnaggregatedAttReceived events.
|
||||
@@ -77,6 +80,11 @@ type BLSToExecutionChangeReceivedData struct {
|
||||
Change *ethpb.SignedBLSToExecutionChange
|
||||
}
|
||||
|
||||
// ExecutionProofReceivedData is the data sent with ExecutionProofReceived events.
|
||||
type ExecutionProofReceivedData struct {
|
||||
ExecutionProof *ethpb.ExecutionProof
|
||||
}
|
||||
|
||||
// BlobSidecarReceivedData is the data sent with BlobSidecarReceived events.
|
||||
type BlobSidecarReceivedData struct {
|
||||
Blob *blocks.VerifiedROBlob
|
||||
|
||||
@@ -152,7 +152,7 @@ func ActiveValidatorIndices(ctx context.Context, s state.ReadOnlyBeaconState, ep
|
||||
}
|
||||
|
||||
if err := UpdateCommitteeCache(ctx, s, epoch); err != nil {
|
||||
return nil, errors.Wrap(err, "could not update committee cache")
|
||||
log.WithError(err).Error("Could not update committee cache")
|
||||
}
|
||||
|
||||
return indices, nil
|
||||
|
||||
@@ -5,10 +5,20 @@ import (
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
var dataColumnComputationTime = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "beacon_data_column_sidecar_computation_milliseconds",
|
||||
Help: "Captures the time taken to compute data column sidecars from blobs.",
|
||||
Buckets: []float64{25, 50, 100, 250, 500, 750, 1000},
|
||||
},
|
||||
var (
|
||||
dataColumnComputationTime = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "beacon_data_column_sidecar_computation_milliseconds",
|
||||
Help: "Captures the time taken to compute data column sidecars from blobs.",
|
||||
Buckets: []float64{25, 50, 100, 250, 500, 750, 1000},
|
||||
},
|
||||
)
|
||||
|
||||
cellsAndProofsFromStructuredComputationTime = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "cells_and_proofs_from_structured_computation_milliseconds",
|
||||
Help: "Captures the time taken to compute cells and proofs from structured computation.",
|
||||
Buckets: []float64{10, 20, 30, 40, 50, 100, 200},
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
@@ -3,6 +3,7 @@ package peerdas
|
||||
import (
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
@@ -296,32 +297,42 @@ func ComputeCellsAndProofsFromFlat(blobs [][]byte, cellProofs [][]byte) ([][]kzg
|
||||
return nil, nil, ErrBlobsCellsProofsMismatch
|
||||
}
|
||||
|
||||
cellsPerBlob := make([][]kzg.Cell, 0, blobCount)
|
||||
proofsPerBlob := make([][]kzg.Proof, 0, blobCount)
|
||||
var wg errgroup.Group
|
||||
|
||||
cellsPerBlob := make([][]kzg.Cell, blobCount)
|
||||
proofsPerBlob := make([][]kzg.Proof, blobCount)
|
||||
|
||||
for i, blob := range blobs {
|
||||
var kzgBlob kzg.Blob
|
||||
if copy(kzgBlob[:], blob) != len(kzgBlob) {
|
||||
return nil, nil, errors.New("wrong blob size - should never happen")
|
||||
}
|
||||
|
||||
// Compute the extended cells from the (non-extended) blob.
|
||||
cells, err := kzg.ComputeCells(&kzgBlob)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "compute cells")
|
||||
}
|
||||
|
||||
var proofs []kzg.Proof
|
||||
for idx := uint64(i) * numberOfColumns; idx < (uint64(i)+1)*numberOfColumns; idx++ {
|
||||
var kzgProof kzg.Proof
|
||||
if copy(kzgProof[:], cellProofs[idx]) != len(kzgProof) {
|
||||
return nil, nil, errors.New("wrong KZG proof size - should never happen")
|
||||
wg.Go(func() error {
|
||||
var kzgBlob kzg.Blob
|
||||
if copy(kzgBlob[:], blob) != len(kzgBlob) {
|
||||
return errors.New("wrong blob size - should never happen")
|
||||
}
|
||||
|
||||
proofs = append(proofs, kzgProof)
|
||||
}
|
||||
// Compute the extended cells from the (non-extended) blob.
|
||||
cells, err := kzg.ComputeCells(&kzgBlob)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "compute cells")
|
||||
}
|
||||
|
||||
cellsPerBlob = append(cellsPerBlob, cells)
|
||||
proofsPerBlob = append(proofsPerBlob, proofs)
|
||||
proofs := make([]kzg.Proof, 0, numberOfColumns)
|
||||
for idx := uint64(i) * numberOfColumns; idx < (uint64(i)+1)*numberOfColumns; idx++ {
|
||||
var kzgProof kzg.Proof
|
||||
if copy(kzgProof[:], cellProofs[idx]) != len(kzgProof) {
|
||||
return errors.New("wrong KZG proof size - should never happen")
|
||||
}
|
||||
|
||||
proofs = append(proofs, kzgProof)
|
||||
}
|
||||
|
||||
cellsPerBlob[i] = cells
|
||||
proofsPerBlob[i] = proofs
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := wg.Wait(); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return cellsPerBlob, proofsPerBlob, nil
|
||||
@@ -329,40 +340,55 @@ func ComputeCellsAndProofsFromFlat(blobs [][]byte, cellProofs [][]byte) ([][]kzg
|
||||
|
||||
// ComputeCellsAndProofsFromStructured computes the cells and proofs from blobs and cell proofs.
|
||||
func ComputeCellsAndProofsFromStructured(blobsAndProofs []*pb.BlobAndProofV2) ([][]kzg.Cell, [][]kzg.Proof, error) {
|
||||
cellsPerBlob := make([][]kzg.Cell, 0, len(blobsAndProofs))
|
||||
proofsPerBlob := make([][]kzg.Proof, 0, len(blobsAndProofs))
|
||||
for _, blobAndProof := range blobsAndProofs {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
cellsAndProofsFromStructuredComputationTime.Observe(float64(time.Since(start).Milliseconds()))
|
||||
}()
|
||||
|
||||
var wg errgroup.Group
|
||||
|
||||
cellsPerBlob := make([][]kzg.Cell, len(blobsAndProofs))
|
||||
proofsPerBlob := make([][]kzg.Proof, len(blobsAndProofs))
|
||||
|
||||
for i, blobAndProof := range blobsAndProofs {
|
||||
if blobAndProof == nil {
|
||||
return nil, nil, ErrNilBlobAndProof
|
||||
}
|
||||
|
||||
var kzgBlob kzg.Blob
|
||||
if copy(kzgBlob[:], blobAndProof.Blob) != len(kzgBlob) {
|
||||
return nil, nil, errors.New("wrong blob size - should never happen")
|
||||
}
|
||||
|
||||
// Compute the extended cells from the (non-extended) blob.
|
||||
cells, err := kzg.ComputeCells(&kzgBlob)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "compute cells")
|
||||
}
|
||||
|
||||
kzgProofs := make([]kzg.Proof, 0, fieldparams.NumberOfColumns)
|
||||
for _, kzgProofBytes := range blobAndProof.KzgProofs {
|
||||
if len(kzgProofBytes) != kzg.BytesPerProof {
|
||||
return nil, nil, errors.New("wrong KZG proof size - should never happen")
|
||||
wg.Go(func() error {
|
||||
var kzgBlob kzg.Blob
|
||||
if copy(kzgBlob[:], blobAndProof.Blob) != len(kzgBlob) {
|
||||
return errors.New("wrong blob size - should never happen")
|
||||
}
|
||||
|
||||
var kzgProof kzg.Proof
|
||||
if copy(kzgProof[:], kzgProofBytes) != len(kzgProof) {
|
||||
return nil, nil, errors.New("wrong copied KZG proof size - should never happen")
|
||||
// Compute the extended cells from the (non-extended) blob.
|
||||
cells, err := kzg.ComputeCells(&kzgBlob)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "compute cells")
|
||||
}
|
||||
|
||||
kzgProofs = append(kzgProofs, kzgProof)
|
||||
}
|
||||
kzgProofs := make([]kzg.Proof, 0, fieldparams.NumberOfColumns)
|
||||
for _, kzgProofBytes := range blobAndProof.KzgProofs {
|
||||
if len(kzgProofBytes) != kzg.BytesPerProof {
|
||||
return errors.New("wrong KZG proof size - should never happen")
|
||||
}
|
||||
|
||||
cellsPerBlob = append(cellsPerBlob, cells)
|
||||
proofsPerBlob = append(proofsPerBlob, kzgProofs)
|
||||
var kzgProof kzg.Proof
|
||||
if copy(kzgProof[:], kzgProofBytes) != len(kzgProof) {
|
||||
return errors.New("wrong copied KZG proof size - should never happen")
|
||||
}
|
||||
|
||||
kzgProofs = append(kzgProofs, kzgProof)
|
||||
}
|
||||
|
||||
cellsPerBlob[i] = cells
|
||||
proofsPerBlob[i] = kzgProofs
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := wg.Wait(); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return cellsPerBlob, proofsPerBlob, nil
|
||||
|
||||
@@ -182,12 +182,6 @@ func ProcessBlockNoVerifyAnySig(
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
sig := signed.Signature()
|
||||
bSet, err := b.BlockSignatureBatch(st, blk.ProposerIndex(), sig[:], blk.HashTreeRoot)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, nil, errors.Wrap(err, "could not retrieve block signature set")
|
||||
}
|
||||
randaoReveal := signed.Block().Body().RandaoReveal()
|
||||
rSet, err := b.RandaoSignatureBatch(ctx, st, randaoReveal[:])
|
||||
if err != nil {
|
||||
@@ -201,7 +195,7 @@ func ProcessBlockNoVerifyAnySig(
|
||||
|
||||
// Merge beacon block, randao and attestations signatures into a set.
|
||||
set := bls.NewSet()
|
||||
set.Join(bSet).Join(rSet).Join(aSet)
|
||||
set.Join(rSet).Join(aSet)
|
||||
|
||||
if blk.Version() >= version.Capella {
|
||||
changes, err := signed.Block().Body().BLSToExecutionChanges()
|
||||
|
||||
@@ -157,9 +157,8 @@ func TestProcessBlockNoVerify_SigSetContainsDescriptions(t *testing.T) {
|
||||
set, _, err := transition.ProcessBlockNoVerifyAnySig(t.Context(), beaconState, wsb)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, len(set.Signatures), len(set.Descriptions), "Signatures and descriptions do not match up")
|
||||
assert.Equal(t, "block signature", set.Descriptions[0])
|
||||
assert.Equal(t, "randao signature", set.Descriptions[1])
|
||||
assert.Equal(t, "attestation signature", set.Descriptions[2])
|
||||
assert.Equal(t, "randao signature", set.Descriptions[0])
|
||||
assert.Equal(t, "attestation signature", set.Descriptions[1])
|
||||
}
|
||||
|
||||
func TestProcessOperationsNoVerifyAttsSigs_OK(t *testing.T) {
|
||||
|
||||
@@ -67,9 +67,9 @@ func NewSyncNeeds(current CurrentSlotter, oldestSlotFlagPtr *primitives.Slot, bl
|
||||
|
||||
// Override spec minimum block retention with user-provided flag only if it is lower than the spec minimum.
|
||||
sn.blockRetention = primitives.Epoch(params.BeaconConfig().MinEpochsForBlockRequests)
|
||||
|
||||
if oldestSlotFlagPtr != nil {
|
||||
oldestEpoch := slots.ToEpoch(*oldestSlotFlagPtr)
|
||||
if oldestEpoch < sn.blockRetention {
|
||||
if *oldestSlotFlagPtr <= syncEpochOffset(current(), sn.blockRetention) {
|
||||
sn.validOldestSlotPtr = oldestSlotFlagPtr
|
||||
} else {
|
||||
log.WithField("backfill-oldest-slot", *oldestSlotFlagPtr).
|
||||
|
||||
@@ -128,6 +128,9 @@ func TestSyncNeedsInitialize(t *testing.T) {
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
minBlobEpochs := params.BeaconConfig().MinEpochsForBlobsSidecarsRequest
|
||||
minColEpochs := params.BeaconConfig().MinEpochsForDataColumnSidecarsRequest
|
||||
denebSlot := slots.UnsafeEpochStart(params.BeaconConfig().DenebForkEpoch)
|
||||
fuluSlot := slots.UnsafeEpochStart(params.BeaconConfig().FuluForkEpoch)
|
||||
minSlots := slots.UnsafeEpochStart(primitives.Epoch(params.BeaconConfig().MinEpochsForBlockRequests))
|
||||
|
||||
currentSlot := primitives.Slot(10000)
|
||||
currentFunc := func() primitives.Slot { return currentSlot }
|
||||
@@ -141,6 +144,7 @@ func TestSyncNeedsInitialize(t *testing.T) {
|
||||
expectedCol primitives.Epoch
|
||||
name string
|
||||
input SyncNeeds
|
||||
current func() primitives.Slot
|
||||
}{
|
||||
{
|
||||
name: "basic initialization with no flags",
|
||||
@@ -174,13 +178,13 @@ func TestSyncNeedsInitialize(t *testing.T) {
|
||||
{
|
||||
name: "valid oldestSlotFlagPtr (earlier than spec minimum)",
|
||||
blobRetentionFlag: 0,
|
||||
oldestSlotFlagPtr: func() *primitives.Slot {
|
||||
slot := primitives.Slot(10)
|
||||
return &slot
|
||||
}(),
|
||||
oldestSlotFlagPtr: &denebSlot,
|
||||
expectValidOldest: true,
|
||||
expectedBlob: minBlobEpochs,
|
||||
expectedCol: minColEpochs,
|
||||
current: func() primitives.Slot {
|
||||
return fuluSlot + minSlots
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid oldestSlotFlagPtr (later than spec minimum)",
|
||||
@@ -210,6 +214,9 @@ func TestSyncNeedsInitialize(t *testing.T) {
|
||||
{
|
||||
name: "both blob retention flag and oldest slot set",
|
||||
blobRetentionFlag: minBlobEpochs + 5,
|
||||
current: func() primitives.Slot {
|
||||
return fuluSlot + minSlots
|
||||
},
|
||||
oldestSlotFlagPtr: func() *primitives.Slot {
|
||||
slot := primitives.Slot(100)
|
||||
return &slot
|
||||
@@ -232,16 +239,27 @@ func TestSyncNeedsInitialize(t *testing.T) {
|
||||
expectedBlob: 5000,
|
||||
expectedCol: 5000,
|
||||
},
|
||||
{
|
||||
name: "regression for deneb start",
|
||||
blobRetentionFlag: 8212500,
|
||||
expectValidOldest: true,
|
||||
oldestSlotFlagPtr: &denebSlot,
|
||||
current: func() primitives.Slot {
|
||||
return fuluSlot + minSlots
|
||||
},
|
||||
expectedBlob: 8212500,
|
||||
expectedCol: 8212500,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
result, err := NewSyncNeeds(currentFunc, tc.oldestSlotFlagPtr, tc.blobRetentionFlag)
|
||||
if tc.current == nil {
|
||||
tc.current = currentFunc
|
||||
}
|
||||
result, err := NewSyncNeeds(tc.current, tc.oldestSlotFlagPtr, tc.blobRetentionFlag)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check that current, deneb, fulu are set correctly
|
||||
require.Equal(t, currentSlot, result.current())
|
||||
|
||||
// Check retention calculations
|
||||
require.Equal(t, tc.expectedBlob, result.blobRetention)
|
||||
require.Equal(t, tc.expectedCol, result.colRetention)
|
||||
|
||||
@@ -38,6 +38,7 @@ go_library(
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_spf13_afero//:go_default_library",
|
||||
"@org_golang_x_sync//errgroup:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/afero"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -185,73 +186,162 @@ func (dcs *DataColumnStorage) WarmCache() {
|
||||
|
||||
highestStoredEpoch := primitives.Epoch(0)
|
||||
|
||||
// Walk the data column filesystem to warm up the cache.
|
||||
if err := afero.Walk(dcs.fs, ".", func(path string, info os.FileInfo, fileErr error) (err error) {
|
||||
if fileErr != nil {
|
||||
return fileErr
|
||||
}
|
||||
|
||||
// If not a leaf, skip.
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Extract metadata from the file path.
|
||||
fileMetadata, err := extractFileMetadata(path)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error encountered while extracting file metadata")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Open the data column filesystem file.
|
||||
f, err := dcs.fs.Open(path)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error encountered while opening data column filesystem file")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close the file.
|
||||
defer func() {
|
||||
// Overwrite the existing error only if it is nil, since the close error is less important.
|
||||
closeErr := f.Close()
|
||||
if closeErr != nil && err == nil {
|
||||
err = closeErr
|
||||
}
|
||||
}()
|
||||
|
||||
// Read the metadata of the file.
|
||||
metadata, err := dcs.metadata(f)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error encountered while reading metadata from data column filesystem file")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check the indices.
|
||||
indices := metadata.indices.all()
|
||||
if len(indices) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Build the ident.
|
||||
dataColumnsIdent := DataColumnsIdent{Root: fileMetadata.blockRoot, Epoch: fileMetadata.epoch, Indices: indices}
|
||||
|
||||
// Update the highest stored epoch.
|
||||
highestStoredEpoch = max(highestStoredEpoch, fileMetadata.epoch)
|
||||
|
||||
// Set the ident in the cache.
|
||||
if err := dcs.cache.set(dataColumnsIdent); err != nil {
|
||||
log.WithError(err).Error("Error encountered while ensuring data column filesystem cache")
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
log.WithError(err).Error("Error encountered while walking data column filesystem.")
|
||||
// List all period directories
|
||||
periodFileInfos, err := afero.ReadDir(dcs.fs, ".")
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error reading top directory during warm cache")
|
||||
return
|
||||
}
|
||||
|
||||
// Prune the cache and the filesystem.
|
||||
// Iterate through periods
|
||||
for _, periodFileInfo := range periodFileInfos {
|
||||
if !periodFileInfo.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
periodPath := periodFileInfo.Name()
|
||||
|
||||
// List all epoch directories in this period
|
||||
epochFileInfos, err := afero.ReadDir(dcs.fs, periodPath)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("period", periodPath).Error("Error reading period directory during warm cache")
|
||||
continue
|
||||
}
|
||||
|
||||
// Iterate through epochs
|
||||
for _, epochFileInfo := range epochFileInfos {
|
||||
if !epochFileInfo.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
epochPath := path.Join(periodPath, epochFileInfo.Name())
|
||||
|
||||
// List all .sszs files in this epoch
|
||||
files, err := listEpochFiles(dcs.fs, epochPath)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("epoch", epochPath).Error("Error listing epoch files during warm cache")
|
||||
continue
|
||||
}
|
||||
|
||||
if len(files) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Process all files in this epoch in parallel
|
||||
epochHighest, err := dcs.processEpochFiles(files)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("epoch", epochPath).Error("Error processing epoch files during warm cache")
|
||||
}
|
||||
|
||||
highestStoredEpoch = max(highestStoredEpoch, epochHighest)
|
||||
}
|
||||
}
|
||||
|
||||
// Prune the cache and the filesystem
|
||||
dcs.prune()
|
||||
|
||||
log.WithField("elapsed", time.Since(start)).Info("Data column filesystem cache warm-up complete")
|
||||
totalElapsed := time.Since(start)
|
||||
|
||||
// Log summary
|
||||
log.WithField("elapsed", totalElapsed).Info("Data column filesystem cache warm-up complete")
|
||||
}
|
||||
|
||||
// listEpochFiles lists all .sszs files in an epoch directory.
|
||||
func listEpochFiles(fs afero.Fs, epochPath string) ([]string, error) {
|
||||
fileInfos, err := afero.ReadDir(fs, epochPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "read epoch directory")
|
||||
}
|
||||
|
||||
files := make([]string, 0, len(fileInfos))
|
||||
for _, fileInfo := range fileInfos {
|
||||
if fileInfo.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
fileName := fileInfo.Name()
|
||||
if strings.HasSuffix(fileName, "."+dataColumnsFileExtension) {
|
||||
files = append(files, path.Join(epochPath, fileName))
|
||||
}
|
||||
}
|
||||
|
||||
return files, nil
|
||||
}
|
||||
|
||||
// processEpochFiles processes all .sszs files in an epoch directory in parallel.
|
||||
func (dcs *DataColumnStorage) processEpochFiles(files []string) (primitives.Epoch, error) {
|
||||
var (
|
||||
eg errgroup.Group
|
||||
mu sync.Mutex
|
||||
)
|
||||
|
||||
highestEpoch := primitives.Epoch(0)
|
||||
for _, filePath := range files {
|
||||
eg.Go(func() error {
|
||||
epoch, err := dcs.processFile(filePath)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("file", filePath).Error("Error processing file during warm cache")
|
||||
return nil
|
||||
}
|
||||
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
highestEpoch = max(highestEpoch, epoch)
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := eg.Wait(); err != nil {
|
||||
return highestEpoch, err
|
||||
}
|
||||
|
||||
return highestEpoch, nil
|
||||
}
|
||||
|
||||
// processFile processes a single .sszs file.
|
||||
func (dcs *DataColumnStorage) processFile(filePath string) (primitives.Epoch, error) {
|
||||
// Extract metadata from the file path
|
||||
fileMetadata, err := extractFileMetadata(filePath)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "extract file metadata")
|
||||
}
|
||||
|
||||
// Open the file (each goroutine gets its own FD)
|
||||
f, err := dcs.fs.Open(filePath)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "open file")
|
||||
}
|
||||
defer func() {
|
||||
if closeErr := f.Close(); closeErr != nil {
|
||||
log.WithError(closeErr).WithField("file", filePath).Error("Error closing file during warm cache")
|
||||
}
|
||||
}()
|
||||
|
||||
// Read metadata
|
||||
metadata, err := dcs.metadata(f)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "read metadata")
|
||||
}
|
||||
|
||||
// Extract indices
|
||||
indices := metadata.indices.all()
|
||||
if len(indices) == 0 {
|
||||
return fileMetadata.epoch, nil // No indices, skip
|
||||
}
|
||||
|
||||
// Build ident and set in cache (thread-safe)
|
||||
dataColumnsIdent := DataColumnsIdent{
|
||||
Root: fileMetadata.blockRoot,
|
||||
Epoch: fileMetadata.epoch,
|
||||
Indices: indices,
|
||||
}
|
||||
|
||||
if err := dcs.cache.set(dataColumnsIdent); err != nil {
|
||||
return 0, errors.Wrap(err, "cache set")
|
||||
}
|
||||
|
||||
return fileMetadata.epoch, nil
|
||||
}
|
||||
|
||||
// Summary returns the DataColumnStorageSummary.
|
||||
@@ -515,6 +605,11 @@ func (dcs *DataColumnStorage) Clear() error {
|
||||
|
||||
// prune clean the cache, the filesystem and mutexes.
|
||||
func (dcs *DataColumnStorage) prune() {
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
dataColumnPruneLatency.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
}()
|
||||
|
||||
highestStoredEpoch := dcs.cache.HighestEpoch()
|
||||
|
||||
// Check if we need to prune.
|
||||
@@ -622,6 +717,9 @@ func (dcs *DataColumnStorage) saveDataColumnSidecarsExistingFile(filePath string
|
||||
// Create the SSZ encoded data column sidecars.
|
||||
var sszEncodedDataColumnSidecars []byte
|
||||
|
||||
// Initialize the count of the saved SSZ encoded data column sidecar.
|
||||
storedCount := uint8(0)
|
||||
|
||||
for {
|
||||
dataColumnSidecars := pullChan(inputDataColumnSidecars)
|
||||
if len(dataColumnSidecars) == 0 {
|
||||
@@ -668,6 +766,9 @@ func (dcs *DataColumnStorage) saveDataColumnSidecarsExistingFile(filePath string
|
||||
return errors.Wrap(err, "set index")
|
||||
}
|
||||
|
||||
// Increment the count of the saved SSZ encoded data column sidecar.
|
||||
storedCount++
|
||||
|
||||
// Append the SSZ encoded data column sidecar to the SSZ encoded data column sidecars.
|
||||
sszEncodedDataColumnSidecars = append(sszEncodedDataColumnSidecars, sszEncodedDataColumnSidecar...)
|
||||
}
|
||||
@@ -692,9 +793,12 @@ func (dcs *DataColumnStorage) saveDataColumnSidecarsExistingFile(filePath string
|
||||
return errWrongBytesWritten
|
||||
}
|
||||
|
||||
syncStart := time.Now()
|
||||
if err := file.Sync(); err != nil {
|
||||
return errors.Wrap(err, "sync")
|
||||
}
|
||||
dataColumnFileSyncLatency.Observe(float64(time.Since(syncStart).Milliseconds()))
|
||||
dataColumnBatchStoreCount.Observe(float64(storedCount))
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -808,10 +912,14 @@ func (dcs *DataColumnStorage) saveDataColumnSidecarsNewFile(filePath string, inp
|
||||
return errWrongBytesWritten
|
||||
}
|
||||
|
||||
syncStart := time.Now()
|
||||
if err := file.Sync(); err != nil {
|
||||
return errors.Wrap(err, "sync")
|
||||
}
|
||||
|
||||
dataColumnFileSyncLatency.Observe(float64(time.Since(syncStart).Milliseconds()))
|
||||
dataColumnBatchStoreCount.Observe(float64(storedCount))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -36,16 +36,15 @@ var (
|
||||
})
|
||||
|
||||
// Data columns
|
||||
dataColumnBuckets = []float64{3, 5, 7, 9, 11, 13}
|
||||
dataColumnSaveLatency = promauto.NewHistogram(prometheus.HistogramOpts{
|
||||
Name: "data_column_storage_save_latency",
|
||||
Help: "Latency of DataColumnSidecar storage save operations in milliseconds",
|
||||
Buckets: dataColumnBuckets,
|
||||
Buckets: []float64{10, 20, 30, 50, 100, 200, 500},
|
||||
})
|
||||
dataColumnFetchLatency = promauto.NewHistogram(prometheus.HistogramOpts{
|
||||
Name: "data_column_storage_get_latency",
|
||||
Help: "Latency of DataColumnSidecar storage get operations in milliseconds",
|
||||
Buckets: dataColumnBuckets,
|
||||
Buckets: []float64{3, 5, 7, 9, 11, 13},
|
||||
})
|
||||
dataColumnPrunedCounter = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "data_column_pruned",
|
||||
@@ -59,4 +58,16 @@ var (
|
||||
Name: "data_column_disk_count",
|
||||
Help: "Approximate number of data columns in storage",
|
||||
})
|
||||
dataColumnFileSyncLatency = promauto.NewSummary(prometheus.SummaryOpts{
|
||||
Name: "data_column_file_sync_latency",
|
||||
Help: "Latency of sync operations when saving data columns in milliseconds",
|
||||
})
|
||||
dataColumnBatchStoreCount = promauto.NewSummary(prometheus.SummaryOpts{
|
||||
Name: "data_column_batch_store_count",
|
||||
Help: "Number of data columns stored in a batch",
|
||||
})
|
||||
dataColumnPruneLatency = promauto.NewSummary(prometheus.SummaryOpts{
|
||||
Name: "data_column_prune_latency",
|
||||
Help: "Latency of data column prune operations in milliseconds",
|
||||
})
|
||||
)
|
||||
|
||||
@@ -532,12 +532,19 @@ func (s *Service) GetBlobsV2(ctx context.Context, versionedHashes []common.Hash)
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.GetBlobsV2")
|
||||
defer span.End()
|
||||
|
||||
start := time.Now()
|
||||
|
||||
if !s.capabilityCache.has(GetBlobsV2) {
|
||||
return nil, errors.New(fmt.Sprintf("%s is not supported", GetBlobsV2))
|
||||
}
|
||||
|
||||
result := make([]*pb.BlobAndProofV2, len(versionedHashes))
|
||||
err := s.rpcClient.CallContext(ctx, &result, GetBlobsV2, versionedHashes)
|
||||
|
||||
if len(result) != 0 {
|
||||
getBlobsV2Latency.Observe(float64(time.Since(start).Milliseconds()))
|
||||
}
|
||||
|
||||
return result, handleRPCError(err)
|
||||
}
|
||||
|
||||
|
||||
@@ -27,6 +27,13 @@ var (
|
||||
Buckets: []float64{25, 50, 100, 200, 500, 1000, 2000, 4000},
|
||||
},
|
||||
)
|
||||
getBlobsV2Latency = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "get_blobs_v2_latency_milliseconds",
|
||||
Help: "Captures RPC latency for getBlobsV2 in milliseconds",
|
||||
Buckets: []float64{25, 50, 100, 200, 500, 1000, 2000, 4000},
|
||||
},
|
||||
)
|
||||
errParseCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "execution_parse_error_count",
|
||||
Help: "The number of errors that occurred while parsing execution payload",
|
||||
|
||||
@@ -37,6 +37,7 @@ go_library(
|
||||
"//beacon-chain/node/registration:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/blstoexec:go_default_library",
|
||||
"//beacon-chain/operations/execproofs:go_default_library",
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
"//beacon-chain/operations/synccommittee:go_default_library",
|
||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||
|
||||
@@ -40,6 +40,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/node/registration"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/attestations"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/blstoexec"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/execproofs"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/slashings"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/synccommittee"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/voluntaryexits"
|
||||
@@ -102,6 +103,7 @@ type BeaconNode struct {
|
||||
slashingsPool slashings.PoolManager
|
||||
syncCommitteePool synccommittee.Pool
|
||||
blsToExecPool blstoexec.PoolManager
|
||||
execProofsPool execproofs.PoolManager
|
||||
depositCache cache.DepositCache
|
||||
trackedValidatorsCache *cache.TrackedValidatorsCache
|
||||
payloadIDCache *cache.PayloadIDCache
|
||||
@@ -156,6 +158,7 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
|
||||
slashingsPool: slashings.NewPool(),
|
||||
syncCommitteePool: synccommittee.NewPool(),
|
||||
blsToExecPool: blstoexec.NewPool(),
|
||||
execProofsPool: execproofs.NewPool(),
|
||||
trackedValidatorsCache: cache.NewTrackedValidatorsCache(),
|
||||
payloadIDCache: cache.NewPayloadIDCache(),
|
||||
slasherBlockHeadersFeed: new(event.Feed),
|
||||
@@ -737,6 +740,7 @@ func (b *BeaconNode) registerBlockchainService(fc forkchoice.ForkChoicer, gs *st
|
||||
blockchain.WithExitPool(b.exitPool),
|
||||
blockchain.WithSlashingPool(b.slashingsPool),
|
||||
blockchain.WithBLSToExecPool(b.blsToExecPool),
|
||||
blockchain.WithExecProofsPool(b.execProofsPool),
|
||||
blockchain.WithP2PBroadcaster(b.fetchP2P()),
|
||||
blockchain.WithStateNotifier(b),
|
||||
blockchain.WithAttestationService(attService),
|
||||
@@ -752,6 +756,7 @@ func (b *BeaconNode) registerBlockchainService(fc forkchoice.ForkChoicer, gs *st
|
||||
blockchain.WithSyncChecker(b.syncChecker),
|
||||
blockchain.WithSlasherEnabled(b.slasherEnabled),
|
||||
blockchain.WithLightClientStore(b.lcStore),
|
||||
blockchain.WithOperationNotifier(b),
|
||||
)
|
||||
|
||||
blockchainService, err := blockchain.NewService(b.ctx, opts...)
|
||||
@@ -827,6 +832,7 @@ func (b *BeaconNode) registerSyncService(initialSyncComplete chan struct{}, bFil
|
||||
regularsync.WithSlashingPool(b.slashingsPool),
|
||||
regularsync.WithSyncCommsPool(b.syncCommitteePool),
|
||||
regularsync.WithBlsToExecPool(b.blsToExecPool),
|
||||
regularsync.WithExecProofPool(b.execProofsPool),
|
||||
regularsync.WithStateGen(b.stateGen),
|
||||
regularsync.WithSlasherAttestationsFeed(b.slasherAttestationsFeed),
|
||||
regularsync.WithSlasherBlockHeadersFeed(b.slasherBlockHeadersFeed),
|
||||
|
||||
16
beacon-chain/operations/execproofs/BUILD.bazel
Normal file
16
beacon-chain/operations/execproofs/BUILD.bazel
Normal file
@@ -0,0 +1,16 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["pool.go"],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/operations/execproofs",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
],
|
||||
)
|
||||
174
beacon-chain/operations/execproofs/pool.go
Normal file
174
beacon-chain/operations/execproofs/pool.go
Normal file
@@ -0,0 +1,174 @@
|
||||
package execproofs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
// ProofKey uniquely identifies an execution proof by block root and proof type.
|
||||
type ProofKey struct {
|
||||
Root [fieldparams.RootLength]byte
|
||||
ProofId primitives.ExecutionProofId
|
||||
}
|
||||
|
||||
// String returns a string representation for logging.
|
||||
func (k ProofKey) String() string {
|
||||
return fmt.Sprintf("root=%#x,proofId=%d", k.Root, k.ProofId)
|
||||
}
|
||||
|
||||
var (
|
||||
execProofInPoolTotal = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "exec_proof_pool_total",
|
||||
Help: "The number of execution proofs in the operation pool.",
|
||||
})
|
||||
)
|
||||
|
||||
var _ PoolManager = (*ExecProofPool)(nil)
|
||||
|
||||
// PoolManager maintains execution proofs received via gossip.
|
||||
// These proofs are used for data availability checks when importing blocks.
|
||||
// Lightweight verifier nodes need a minimum number of proofs from different zkVM types
|
||||
// to verify block execution correctness.
|
||||
type PoolManager interface {
|
||||
// Insert inserts a proof into the pool.
|
||||
// If a proof with the same block root and proof ID already exists, it is not added again.
|
||||
Insert(executionProof *ethpb.ExecutionProof)
|
||||
|
||||
// Get returns a copy of all proofs for a specific block root
|
||||
Get(blockRoot [fieldparams.RootLength]byte) []*ethpb.ExecutionProof
|
||||
|
||||
// Ids returns the list of (unique) proof types available for a specific block root
|
||||
Ids(blockRoot [fieldparams.RootLength]byte) []primitives.ExecutionProofId
|
||||
|
||||
// Count counts the number of proofs for a specific block root
|
||||
Count(blockRoot [fieldparams.RootLength]byte) uint64
|
||||
|
||||
// Exists checks if a proof exists for the given block root and proof ID
|
||||
Exists(blockRoot [fieldparams.RootLength]byte, proofId primitives.ExecutionProofId) bool
|
||||
|
||||
// PruneUpTo removes proofs older than the target slot
|
||||
PruneUpTo(targetSlot primitives.Slot) int
|
||||
}
|
||||
|
||||
// ExecProofPool is a concrete implementation of type ExecProofPoolManager.
|
||||
type ExecProofPool struct {
|
||||
lock sync.RWMutex
|
||||
m map[[fieldparams.RootLength]byte]map[primitives.ExecutionProofId]*ethpb.ExecutionProof
|
||||
}
|
||||
|
||||
// NewPool returns an initialized pool.
|
||||
func NewPool() *ExecProofPool {
|
||||
return &ExecProofPool{
|
||||
m: make(map[[fieldparams.RootLength]byte]map[primitives.ExecutionProofId]*ethpb.ExecutionProof),
|
||||
}
|
||||
}
|
||||
|
||||
// Insert inserts a proof into the pool.
|
||||
// If a proof with the same block root and proof ID already exists, it is not added again.
|
||||
func (p *ExecProofPool) Insert(proof *ethpb.ExecutionProof) {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
blockRoot := bytesutil.ToBytes32(proof.BlockRoot)
|
||||
|
||||
// Create the inner map if it doesn't exist
|
||||
if p.m[blockRoot] == nil {
|
||||
p.m[blockRoot] = make(map[primitives.ExecutionProofId]*ethpb.ExecutionProof)
|
||||
}
|
||||
|
||||
// Check if proof already exists
|
||||
if _, exists := p.m[blockRoot][proof.ProofId]; exists {
|
||||
return
|
||||
}
|
||||
|
||||
// Insert new proof
|
||||
p.m[blockRoot][proof.ProofId] = proof
|
||||
execProofInPoolTotal.Inc()
|
||||
}
|
||||
|
||||
// Get returns a copy of all proofs for a specific block root
|
||||
func (p *ExecProofPool) Get(blockRoot [fieldparams.RootLength]byte) []*ethpb.ExecutionProof {
|
||||
p.lock.RLock()
|
||||
defer p.lock.RUnlock()
|
||||
|
||||
proofsByType, exists := p.m[blockRoot]
|
||||
if !exists {
|
||||
return nil
|
||||
}
|
||||
|
||||
result := make([]*ethpb.ExecutionProof, 0, len(proofsByType))
|
||||
for _, proof := range proofsByType {
|
||||
result = append(result, proof.Copy())
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (p *ExecProofPool) Ids(blockRoot [fieldparams.RootLength]byte) []primitives.ExecutionProofId {
|
||||
p.lock.RLock()
|
||||
defer p.lock.RUnlock()
|
||||
|
||||
proofById, exists := p.m[blockRoot]
|
||||
if !exists {
|
||||
return nil
|
||||
}
|
||||
|
||||
ids := make([]primitives.ExecutionProofId, 0, len(proofById))
|
||||
for id := range proofById {
|
||||
ids = append(ids, id)
|
||||
}
|
||||
|
||||
return ids
|
||||
}
|
||||
|
||||
// Count counts the number of proofs for a specific block root
|
||||
func (p *ExecProofPool) Count(blockRoot [fieldparams.RootLength]byte) uint64 {
|
||||
p.lock.RLock()
|
||||
defer p.lock.RUnlock()
|
||||
|
||||
return uint64(len(p.m[blockRoot]))
|
||||
}
|
||||
|
||||
// Exists checks if a proof exists for the given block root and proof ID
|
||||
func (p *ExecProofPool) Exists(blockRoot [fieldparams.RootLength]byte, proofId primitives.ExecutionProofId) bool {
|
||||
p.lock.RLock()
|
||||
defer p.lock.RUnlock()
|
||||
|
||||
proofsByType, exists := p.m[blockRoot]
|
||||
if !exists {
|
||||
return false
|
||||
}
|
||||
|
||||
_, exists = proofsByType[proofId]
|
||||
return exists
|
||||
}
|
||||
|
||||
// PruneUpTo removes proofs older than the given slot
|
||||
func (p *ExecProofPool) PruneUpTo(targetSlot primitives.Slot) int {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
pruned := 0
|
||||
for blockRoot, proofsByType := range p.m {
|
||||
for proofId, proof := range proofsByType {
|
||||
if proof.Slot < targetSlot {
|
||||
delete(proofsByType, proofId)
|
||||
execProofInPoolTotal.Dec()
|
||||
pruned++
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up empty inner maps
|
||||
if len(proofsByType) == 0 {
|
||||
delete(p.m, blockRoot)
|
||||
}
|
||||
}
|
||||
|
||||
return pruned
|
||||
}
|
||||
@@ -165,6 +165,7 @@ go_test(
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/state/stategen/mock:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
|
||||
@@ -589,6 +589,11 @@ func (s *Service) createLocalNode(
|
||||
localNode.Set(quicEntry)
|
||||
}
|
||||
|
||||
if features.Get().EnableZkvm {
|
||||
zkvmKeyEntry := enr.WithEntry(zkvmEnabledKeyEnrKey, true)
|
||||
localNode.Set(zkvmKeyEntry)
|
||||
}
|
||||
|
||||
localNode.SetFallbackIP(ipAddr)
|
||||
localNode.SetFallbackUDP(udpPort)
|
||||
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers/scorers"
|
||||
testp2p "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/wrapper"
|
||||
leakybucket "github.com/OffchainLabs/prysm/v7/container/leaky-bucket"
|
||||
@@ -243,12 +244,19 @@ func TestCreateLocalNode(t *testing.T) {
|
||||
name string
|
||||
cfg *Config
|
||||
expectedError bool
|
||||
zkvmEnabled bool
|
||||
}{
|
||||
{
|
||||
name: "valid config",
|
||||
cfg: &Config{},
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "valid config with zkVM enabled",
|
||||
cfg: &Config{},
|
||||
expectedError: false,
|
||||
zkvmEnabled: true,
|
||||
},
|
||||
{
|
||||
name: "invalid host address",
|
||||
cfg: &Config{HostAddress: "invalid"},
|
||||
@@ -273,6 +281,15 @@ func TestCreateLocalNode(t *testing.T) {
|
||||
|
||||
for _, tt := range testCases {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if tt.zkvmEnabled {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnableZkvm: true,
|
||||
})
|
||||
t.Cleanup(func() {
|
||||
resetCfg()
|
||||
})
|
||||
}
|
||||
|
||||
// Define ports. Use unique ports since this test validates ENR content.
|
||||
const (
|
||||
udpPort = 3100
|
||||
@@ -348,6 +365,14 @@ func TestCreateLocalNode(t *testing.T) {
|
||||
custodyGroupCount := new(uint64)
|
||||
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(params.BeaconNetworkConfig().CustodyGroupCountKey, custodyGroupCount)))
|
||||
require.Equal(t, custodyRequirement, *custodyGroupCount)
|
||||
|
||||
// Check zkVM enabled key if applicable.
|
||||
if tt.zkvmEnabled {
|
||||
zkvmEnabled := new(bool)
|
||||
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(params.BeaconNetworkConfig().ZkvmEnabledKey, zkvmEnabled)))
|
||||
require.Equal(t, features.Get().EnableZkvm, *zkvmEnabled)
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -52,6 +52,9 @@ const (
|
||||
// lightClientFinalityUpdateWeight specifies the scoring weight that we apply to
|
||||
// our light client finality update topic.
|
||||
lightClientFinalityUpdateWeight = 0.05
|
||||
// executionProofWeight specifies the scoring weight that we apply to
|
||||
// our execution proof topic.
|
||||
executionProofWeight = 0.05
|
||||
|
||||
// maxInMeshScore describes the max score a peer can attain from being in the mesh.
|
||||
maxInMeshScore = 10
|
||||
@@ -145,6 +148,8 @@ func (s *Service) topicScoreParams(topic string) (*pubsub.TopicScoreParams, erro
|
||||
return defaultLightClientOptimisticUpdateTopicParams(), nil
|
||||
case strings.Contains(topic, GossipLightClientFinalityUpdateMessage):
|
||||
return defaultLightClientFinalityUpdateTopicParams(), nil
|
||||
case strings.Contains(topic, GossipExecutionProofMessage):
|
||||
return defaultExecutionProofTopicParams(), nil
|
||||
default:
|
||||
return nil, errors.Errorf("unrecognized topic provided for parameter registration: %s", topic)
|
||||
}
|
||||
@@ -510,6 +515,28 @@ func defaultBlsToExecutionChangeTopicParams() *pubsub.TopicScoreParams {
|
||||
}
|
||||
}
|
||||
|
||||
func defaultExecutionProofTopicParams() *pubsub.TopicScoreParams {
|
||||
return &pubsub.TopicScoreParams{
|
||||
TopicWeight: executionProofWeight,
|
||||
TimeInMeshWeight: maxInMeshScore / inMeshCap(),
|
||||
TimeInMeshQuantum: inMeshTime(),
|
||||
TimeInMeshCap: inMeshCap(),
|
||||
FirstMessageDeliveriesWeight: 2,
|
||||
FirstMessageDeliveriesDecay: scoreDecay(oneHundredEpochs),
|
||||
FirstMessageDeliveriesCap: 5,
|
||||
MeshMessageDeliveriesWeight: 0,
|
||||
MeshMessageDeliveriesDecay: 0,
|
||||
MeshMessageDeliveriesCap: 0,
|
||||
MeshMessageDeliveriesThreshold: 0,
|
||||
MeshMessageDeliveriesWindow: 0,
|
||||
MeshMessageDeliveriesActivation: 0,
|
||||
MeshFailurePenaltyWeight: 0,
|
||||
MeshFailurePenaltyDecay: 0,
|
||||
InvalidMessageDeliveriesWeight: -2000,
|
||||
InvalidMessageDeliveriesDecay: scoreDecay(invalidDecayPeriod),
|
||||
}
|
||||
}
|
||||
|
||||
func defaultLightClientOptimisticUpdateTopicParams() *pubsub.TopicScoreParams {
|
||||
return &pubsub.TopicScoreParams{
|
||||
TopicWeight: lightClientOptimisticUpdateWeight,
|
||||
|
||||
@@ -25,6 +25,7 @@ var gossipTopicMappings = map[string]func() proto.Message{
|
||||
LightClientOptimisticUpdateTopicFormat: func() proto.Message { return ðpb.LightClientOptimisticUpdateAltair{} },
|
||||
LightClientFinalityUpdateTopicFormat: func() proto.Message { return ðpb.LightClientFinalityUpdateAltair{} },
|
||||
DataColumnSubnetTopicFormat: func() proto.Message { return ðpb.DataColumnSidecar{} },
|
||||
ExecutionProofSubnetTopicFormat: func() proto.Message { return ðpb.ExecutionProof{} },
|
||||
}
|
||||
|
||||
// GossipTopicMappings is a function to return the assigned data type
|
||||
|
||||
@@ -602,6 +602,33 @@ func (p *Status) All() []peer.ID {
|
||||
return pids
|
||||
}
|
||||
|
||||
// ZkvmEnabledPeers returns all connected peers that have zkvm enabled in their ENR.
|
||||
func (p *Status) ZkvmEnabledPeers() []peer.ID {
|
||||
p.store.RLock()
|
||||
defer p.store.RUnlock()
|
||||
|
||||
peers := make([]peer.ID, 0)
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
if peerData.ConnState != Connected {
|
||||
continue
|
||||
}
|
||||
if peerData.Enr == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
var enabled bool
|
||||
entry := enr.WithEntry(params.BeaconNetworkConfig().ZkvmEnabledKey, &enabled)
|
||||
if err := peerData.Enr.Load(entry); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if enabled {
|
||||
peers = append(peers, pid)
|
||||
}
|
||||
}
|
||||
return peers
|
||||
}
|
||||
|
||||
// Prune clears out and removes outdated and disconnected peers.
|
||||
func (p *Status) Prune() {
|
||||
p.store.Lock()
|
||||
|
||||
@@ -1341,3 +1341,75 @@ func createPeer(t *testing.T, p *peers.Status, addr ma.Multiaddr,
|
||||
p.SetConnectionState(id, state)
|
||||
return id
|
||||
}
|
||||
|
||||
func TestZkvmEnabledPeers(t *testing.T) {
|
||||
p := peers.NewStatus(t.Context(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: 1,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
// Create peer 1: Connected, zkVM enabled
|
||||
pid1 := addPeer(t, p, peers.Connected)
|
||||
record1 := new(enr.Record)
|
||||
zkvmEnabled := true
|
||||
record1.Set(enr.WithEntry(params.BeaconNetworkConfig().ZkvmEnabledKey, &zkvmEnabled))
|
||||
p.Add(record1, pid1, nil, network.DirOutbound)
|
||||
p.SetConnectionState(pid1, peers.Connected)
|
||||
|
||||
// Create peer 2: Connected, zkVM disabled
|
||||
pid2 := addPeer(t, p, peers.Connected)
|
||||
record2 := new(enr.Record)
|
||||
zkvmDisabled := false
|
||||
record2.Set(enr.WithEntry(params.BeaconNetworkConfig().ZkvmEnabledKey, &zkvmDisabled))
|
||||
p.Add(record2, pid2, nil, network.DirOutbound)
|
||||
p.SetConnectionState(pid2, peers.Connected)
|
||||
|
||||
// Create peer 3: Connected, zkVM enabled
|
||||
pid3 := addPeer(t, p, peers.Connected)
|
||||
record3 := new(enr.Record)
|
||||
record3.Set(enr.WithEntry(params.BeaconNetworkConfig().ZkvmEnabledKey, &zkvmEnabled))
|
||||
p.Add(record3, pid3, nil, network.DirOutbound)
|
||||
p.SetConnectionState(pid3, peers.Connected)
|
||||
|
||||
// Create peer 4: Disconnected, zkVM enabled (should not be included)
|
||||
pid4 := addPeer(t, p, peers.Disconnected)
|
||||
record4 := new(enr.Record)
|
||||
record4.Set(enr.WithEntry(params.BeaconNetworkConfig().ZkvmEnabledKey, &zkvmEnabled))
|
||||
p.Add(record4, pid4, nil, network.DirOutbound)
|
||||
p.SetConnectionState(pid4, peers.Disconnected)
|
||||
|
||||
// Create peer 5: Connected, no ENR (should not be included)
|
||||
pid5 := addPeer(t, p, peers.Connected)
|
||||
p.Add(nil, pid5, nil, network.DirOutbound)
|
||||
p.SetConnectionState(pid5, peers.Connected)
|
||||
|
||||
// Create peer 6: Connected, no zkVM key in ENR (should not be included)
|
||||
pid6 := addPeer(t, p, peers.Connected)
|
||||
record6 := new(enr.Record)
|
||||
record6.Set(enr.WithEntry("other_key", "other_value"))
|
||||
p.Add(record6, pid6, nil, network.DirOutbound)
|
||||
p.SetConnectionState(pid6, peers.Connected)
|
||||
|
||||
// Get zkVM enabled peers
|
||||
zkvmPeers := p.ZkvmEnabledPeers()
|
||||
|
||||
// Should return only pid1 and pid3 (connected peers with zkVM enabled)
|
||||
assert.Equal(t, 2, len(zkvmPeers), "Expected 2 zkVM enabled peers")
|
||||
|
||||
// Verify the returned peers are correct
|
||||
zkvmPeerMap := make(map[peer.ID]bool)
|
||||
for _, pid := range zkvmPeers {
|
||||
zkvmPeerMap[pid] = true
|
||||
}
|
||||
|
||||
assert.Equal(t, true, zkvmPeerMap[pid1], "pid1 should be in zkVM enabled peers")
|
||||
assert.Equal(t, true, zkvmPeerMap[pid3], "pid3 should be in zkVM enabled peers")
|
||||
assert.Equal(t, false, zkvmPeerMap[pid2], "pid2 should not be in zkVM enabled peers (disabled)")
|
||||
assert.Equal(t, false, zkvmPeerMap[pid4], "pid4 should not be in zkVM enabled peers (disconnected)")
|
||||
assert.Equal(t, false, zkvmPeerMap[pid5], "pid5 should not be in zkVM enabled peers (no ENR)")
|
||||
assert.Equal(t, false, zkvmPeerMap[pid6], "pid6 should not be in zkVM enabled peers (no zkVM key)")
|
||||
}
|
||||
|
||||
@@ -67,6 +67,9 @@ const (
|
||||
|
||||
// DataColumnSidecarsByRangeName is the name for the DataColumnSidecarsByRange v1 message topic.
|
||||
DataColumnSidecarsByRangeName = "/data_column_sidecars_by_range"
|
||||
|
||||
// ExecutionProofsByRootName is the name for the ExecutionProofsByRoot v1 message topic.
|
||||
ExecutionProofsByRootName = "/execution_proofs_by_root"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -106,6 +109,9 @@ const (
|
||||
// RPCDataColumnSidecarsByRangeTopicV1 is a topic for requesting data column sidecars by their slot.
|
||||
// /eth2/beacon_chain/req/data_column_sidecars_by_range/1 - New in Fulu.
|
||||
RPCDataColumnSidecarsByRangeTopicV1 = protocolPrefix + DataColumnSidecarsByRangeName + SchemaVersionV1
|
||||
// RPCExecutionProofsByRootTopicV1 is a topic for requesting execution proofs by their block root.
|
||||
// /eth2/beacon_chain/req/execution_proofs_by_root/1 - New in Fulu.
|
||||
RPCExecutionProofsByRootTopicV1 = protocolPrefix + ExecutionProofsByRootName + SchemaVersionV1
|
||||
|
||||
// V2 RPC Topics
|
||||
// RPCStatusTopicV2 defines the v1 topic for the status rpc method.
|
||||
@@ -170,6 +176,9 @@ var (
|
||||
|
||||
// DataColumnSidecarsByRoot v1 Message
|
||||
RPCDataColumnSidecarsByRootTopicV1: p2ptypes.DataColumnsByRootIdentifiers{},
|
||||
|
||||
// ExecutionProofsByRoot v1 Message
|
||||
RPCExecutionProofsByRootTopicV1: new(pb.ExecutionProofsByRootRequest),
|
||||
}
|
||||
|
||||
// Maps all registered protocol prefixes.
|
||||
@@ -193,6 +202,7 @@ var (
|
||||
LightClientOptimisticUpdateName: true,
|
||||
DataColumnSidecarsByRootName: true,
|
||||
DataColumnSidecarsByRangeName: true,
|
||||
ExecutionProofsByRootName: true,
|
||||
}
|
||||
|
||||
// Maps all the RPC messages which are to updated in altair.
|
||||
|
||||
@@ -36,6 +36,7 @@ var (
|
||||
attSubnetEnrKey = params.BeaconNetworkConfig().AttSubnetKey
|
||||
syncCommsSubnetEnrKey = params.BeaconNetworkConfig().SyncCommsSubnetKey
|
||||
custodyGroupCountEnrKey = params.BeaconNetworkConfig().CustodyGroupCountKey
|
||||
zkvmEnabledKeyEnrKey = params.BeaconNetworkConfig().ZkvmEnabledKey
|
||||
)
|
||||
|
||||
// The value used with the subnet, in order
|
||||
|
||||
@@ -46,6 +46,8 @@ const (
|
||||
GossipLightClientOptimisticUpdateMessage = "light_client_optimistic_update"
|
||||
// GossipDataColumnSidecarMessage is the name for the data column sidecar message type.
|
||||
GossipDataColumnSidecarMessage = "data_column_sidecar"
|
||||
// GossipExecutionProofMessage is the name for the execution proof message type.
|
||||
GossipExecutionProofMessage = "execution_proof"
|
||||
|
||||
// Topic Formats
|
||||
//
|
||||
@@ -75,6 +77,8 @@ const (
|
||||
LightClientOptimisticUpdateTopicFormat = GossipProtocolAndDigest + GossipLightClientOptimisticUpdateMessage
|
||||
// DataColumnSubnetTopicFormat is the topic format for the data column subnet.
|
||||
DataColumnSubnetTopicFormat = GossipProtocolAndDigest + GossipDataColumnSidecarMessage + "_%d"
|
||||
// ExecutionProofSubnetTopicFormat is the topic format for the execution proof subnet.
|
||||
ExecutionProofSubnetTopicFormat = GossipProtocolAndDigest + GossipExecutionProofMessage // + "_%d" (PoC only have one global topic)
|
||||
)
|
||||
|
||||
// topic is a struct representing a single gossipsub topic.
|
||||
@@ -158,6 +162,7 @@ func (s *Service) allTopics() []topic {
|
||||
newTopic(altair, future, empty, GossipLightClientOptimisticUpdateMessage),
|
||||
newTopic(altair, future, empty, GossipLightClientFinalityUpdateMessage),
|
||||
newTopic(capella, future, empty, GossipBlsToExecutionChangeMessage),
|
||||
newTopic(fulu, future, empty, GossipExecutionProofMessage),
|
||||
}
|
||||
last := params.GetNetworkScheduleEntry(genesis)
|
||||
schedule := []params.NetworkScheduleEntry{last}
|
||||
|
||||
@@ -204,6 +204,9 @@ func InitializeDataMaps() {
|
||||
bytesutil.ToBytes4(params.BeaconConfig().ElectraForkVersion): func() (interfaces.LightClientOptimisticUpdate, error) {
|
||||
return lightclientConsensusTypes.NewEmptyOptimisticUpdateDeneb(), nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().FuluForkVersion): func() (interfaces.LightClientOptimisticUpdate, error) {
|
||||
return lightclientConsensusTypes.NewEmptyOptimisticUpdateDeneb(), nil
|
||||
},
|
||||
}
|
||||
|
||||
// Reset our light client finality update map.
|
||||
@@ -223,5 +226,8 @@ func InitializeDataMaps() {
|
||||
bytesutil.ToBytes4(params.BeaconConfig().ElectraForkVersion): func() (interfaces.LightClientFinalityUpdate, error) {
|
||||
return lightclientConsensusTypes.NewEmptyFinalityUpdateElectra(), nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().FuluForkVersion): func() (interfaces.LightClientFinalityUpdate, error) {
|
||||
return lightclientConsensusTypes.NewEmptyFinalityUpdateElectra(), nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/api"
|
||||
"github.com/OffchainLabs/prysm/v7/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
||||
coreblocks "github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
corehelpers "github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/filters"
|
||||
@@ -957,6 +958,13 @@ func (s *Server) validateConsensus(ctx context.Context, b *eth.GenericSignedBeac
|
||||
}
|
||||
}
|
||||
}
|
||||
blockRoot, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not hash block")
|
||||
}
|
||||
if err := coreblocks.VerifyBlockSignatureUsingCurrentFork(parentState, blk, blockRoot); err != nil {
|
||||
return errors.Wrap(err, "could not verify block signature")
|
||||
}
|
||||
_, err = transition.ExecuteStateTransition(ctx, parentState, blk)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not execute state transition")
|
||||
|
||||
@@ -130,6 +130,10 @@ func (s *Server) SubmitAttestationsV2(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "beacon.SubmitAttestationsV2")
|
||||
defer span.End()
|
||||
|
||||
if shared.IsSyncing(ctx, w, s.SyncChecker, s.HeadFetcher, s.TimeFetcher, s.OptimisticModeFetcher) {
|
||||
return
|
||||
}
|
||||
|
||||
versionHeader := r.Header.Get(api.VersionHeader)
|
||||
if versionHeader == "" {
|
||||
httputil.HandleError(w, api.VersionHeader+" header is required", http.StatusBadRequest)
|
||||
@@ -238,22 +242,14 @@ func (s *Server) handleAttestationsElectra(
|
||||
},
|
||||
})
|
||||
|
||||
targetState, err := s.AttestationStateFetcher.AttestationTargetState(ctx, singleAtt.Data.Target)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get target state for attestation")
|
||||
}
|
||||
committee, err := corehelpers.BeaconCommitteeFromState(ctx, targetState, singleAtt.Data.Slot, singleAtt.CommitteeId)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get committee for attestation")
|
||||
}
|
||||
att := singleAtt.ToAttestationElectra(committee)
|
||||
|
||||
wantedEpoch := slots.ToEpoch(att.Data.Slot)
|
||||
// Broadcast first using CommitteeId directly (fast path)
|
||||
// This matches gRPC behavior and avoids blocking on state fetching
|
||||
wantedEpoch := slots.ToEpoch(singleAtt.Data.Slot)
|
||||
vals, err := s.HeadFetcher.HeadValidatorsIndices(ctx, wantedEpoch)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get head validator indices")
|
||||
}
|
||||
subnet := corehelpers.ComputeSubnetFromCommitteeAndSlot(uint64(len(vals)), att.GetCommitteeIndex(), att.Data.Slot)
|
||||
subnet := corehelpers.ComputeSubnetFromCommitteeAndSlot(uint64(len(vals)), singleAtt.CommitteeId, singleAtt.Data.Slot)
|
||||
if err = s.Broadcaster.BroadcastAttestation(ctx, subnet, singleAtt); err != nil {
|
||||
failedBroadcasts = append(failedBroadcasts, &server.IndexedError{
|
||||
Index: i,
|
||||
@@ -264,17 +260,35 @@ func (s *Server) handleAttestationsElectra(
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if features.Get().EnableExperimentalAttestationPool {
|
||||
if err = s.AttestationCache.Add(att); err != nil {
|
||||
log.WithError(err).Error("Could not save attestation")
|
||||
// Save to pool after broadcast (slow path - requires state fetching)
|
||||
// Run in goroutine to avoid blocking the HTTP response
|
||||
go func() {
|
||||
for _, singleAtt := range validAttestations {
|
||||
targetState, err := s.AttestationStateFetcher.AttestationTargetState(context.Background(), singleAtt.Data.Target)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get target state for attestation")
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
if err = s.AttestationsPool.SaveUnaggregatedAttestation(att); err != nil {
|
||||
log.WithError(err).Error("Could not save attestation")
|
||||
committee, err := corehelpers.BeaconCommitteeFromState(context.Background(), targetState, singleAtt.Data.Slot, singleAtt.CommitteeId)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get committee for attestation")
|
||||
continue
|
||||
}
|
||||
att := singleAtt.ToAttestationElectra(committee)
|
||||
|
||||
if features.Get().EnableExperimentalAttestationPool {
|
||||
if err = s.AttestationCache.Add(att); err != nil {
|
||||
log.WithError(err).Error("Could not save attestation")
|
||||
}
|
||||
} else {
|
||||
if err = s.AttestationsPool.SaveUnaggregatedAttestation(att); err != nil {
|
||||
log.WithError(err).Error("Could not save attestation")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
if len(failedBroadcasts) > 0 {
|
||||
log.WithFields(logrus.Fields{
|
||||
@@ -470,6 +484,10 @@ func (s *Server) SubmitSyncCommitteeSignatures(w http.ResponseWriter, r *http.Re
|
||||
ctx, span := trace.StartSpan(r.Context(), "beacon.SubmitPoolSyncCommitteeSignatures")
|
||||
defer span.End()
|
||||
|
||||
if shared.IsSyncing(ctx, w, s.SyncChecker, s.HeadFetcher, s.TimeFetcher, s.OptimisticModeFetcher) {
|
||||
return
|
||||
}
|
||||
|
||||
var req structs.SubmitSyncCommitteeSignaturesRequest
|
||||
err := json.NewDecoder(r.Body).Decode(&req.Data)
|
||||
switch {
|
||||
@@ -711,6 +729,7 @@ func (s *Server) SubmitAttesterSlashingsV2(w http.ResponseWriter, r *http.Reques
|
||||
versionHeader := r.Header.Get(api.VersionHeader)
|
||||
if versionHeader == "" {
|
||||
httputil.HandleError(w, api.VersionHeader+" header is required", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
v, err := version.FromString(versionHeader)
|
||||
if err != nil {
|
||||
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/voluntaryexits/mock"
|
||||
p2pMock "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/core"
|
||||
mockSync "github.com/OffchainLabs/prysm/v7/beacon-chain/sync/initial-sync/testing"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
@@ -622,6 +623,8 @@ func TestSubmitAttestationsV2(t *testing.T) {
|
||||
HeadFetcher: chainService,
|
||||
ChainInfoFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OperationNotifier: &blockchainmock.MockOperationNotifier{},
|
||||
AttestationStateFetcher: chainService,
|
||||
}
|
||||
@@ -654,6 +657,7 @@ func TestSubmitAttestationsV2(t *testing.T) {
|
||||
assert.Equal(t, primitives.Epoch(0), broadcaster.BroadcastAttestations[0].GetData().Source.Epoch)
|
||||
assert.Equal(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", hexutil.Encode(broadcaster.BroadcastAttestations[0].GetData().Target.Root))
|
||||
assert.Equal(t, primitives.Epoch(0), broadcaster.BroadcastAttestations[0].GetData().Target.Epoch)
|
||||
time.Sleep(100 * time.Millisecond) // Wait for async pool save
|
||||
assert.Equal(t, 1, s.AttestationsPool.UnaggregatedAttestationCount())
|
||||
})
|
||||
t.Run("multiple", func(t *testing.T) {
|
||||
@@ -673,6 +677,7 @@ func TestSubmitAttestationsV2(t *testing.T) {
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
assert.Equal(t, true, broadcaster.BroadcastCalled.Load())
|
||||
assert.Equal(t, 2, broadcaster.NumAttestations())
|
||||
time.Sleep(100 * time.Millisecond) // Wait for async pool save
|
||||
assert.Equal(t, 2, s.AttestationsPool.UnaggregatedAttestationCount())
|
||||
})
|
||||
t.Run("phase0 att post electra", func(t *testing.T) {
|
||||
@@ -793,6 +798,7 @@ func TestSubmitAttestationsV2(t *testing.T) {
|
||||
assert.Equal(t, primitives.Epoch(0), broadcaster.BroadcastAttestations[0].GetData().Source.Epoch)
|
||||
assert.Equal(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", hexutil.Encode(broadcaster.BroadcastAttestations[0].GetData().Target.Root))
|
||||
assert.Equal(t, primitives.Epoch(0), broadcaster.BroadcastAttestations[0].GetData().Target.Epoch)
|
||||
time.Sleep(100 * time.Millisecond) // Wait for async pool save
|
||||
assert.Equal(t, 1, s.AttestationsPool.UnaggregatedAttestationCount())
|
||||
})
|
||||
t.Run("multiple", func(t *testing.T) {
|
||||
@@ -812,6 +818,7 @@ func TestSubmitAttestationsV2(t *testing.T) {
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
assert.Equal(t, true, broadcaster.BroadcastCalled.Load())
|
||||
assert.Equal(t, 2, broadcaster.NumAttestations())
|
||||
time.Sleep(100 * time.Millisecond) // Wait for async pool save
|
||||
assert.Equal(t, 2, s.AttestationsPool.UnaggregatedAttestationCount())
|
||||
})
|
||||
t.Run("no body", func(t *testing.T) {
|
||||
@@ -861,6 +868,27 @@ func TestSubmitAttestationsV2(t *testing.T) {
|
||||
assert.Equal(t, true, strings.Contains(e.Failures[0].Message, "Incorrect attestation signature"))
|
||||
})
|
||||
})
|
||||
t.Run("syncing", func(t *testing.T) {
|
||||
chainService := &blockchainmock.ChainService{}
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
}
|
||||
|
||||
var body bytes.Buffer
|
||||
_, err := body.WriteString(singleAtt)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://example.com", &body)
|
||||
request.Header.Set(api.VersionHeader, version.String(version.Phase0))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.SubmitAttestationsV2(writer, request)
|
||||
assert.Equal(t, http.StatusServiceUnavailable, writer.Code)
|
||||
assert.Equal(t, true, strings.Contains(writer.Body.String(), "Beacon node is currently syncing"))
|
||||
})
|
||||
}
|
||||
|
||||
func TestListVoluntaryExits(t *testing.T) {
|
||||
@@ -1057,14 +1085,19 @@ func TestSubmitSyncCommitteeSignatures(t *testing.T) {
|
||||
|
||||
t.Run("single", func(t *testing.T) {
|
||||
broadcaster := &p2pMock.MockBroadcaster{}
|
||||
chainService := &blockchainmock.ChainService{
|
||||
State: st,
|
||||
SyncCommitteeIndices: []primitives.CommitteeIndex{0},
|
||||
}
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
CoreService: &core.Service{
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
P2P: broadcaster,
|
||||
HeadFetcher: &blockchainmock.ChainService{
|
||||
State: st,
|
||||
SyncCommitteeIndices: []primitives.CommitteeIndex{0},
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1089,14 +1122,19 @@ func TestSubmitSyncCommitteeSignatures(t *testing.T) {
|
||||
})
|
||||
t.Run("multiple", func(t *testing.T) {
|
||||
broadcaster := &p2pMock.MockBroadcaster{}
|
||||
chainService := &blockchainmock.ChainService{
|
||||
State: st,
|
||||
SyncCommitteeIndices: []primitives.CommitteeIndex{0},
|
||||
}
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
CoreService: &core.Service{
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
P2P: broadcaster,
|
||||
HeadFetcher: &blockchainmock.ChainService{
|
||||
State: st,
|
||||
SyncCommitteeIndices: []primitives.CommitteeIndex{0},
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1120,13 +1158,18 @@ func TestSubmitSyncCommitteeSignatures(t *testing.T) {
|
||||
})
|
||||
t.Run("invalid", func(t *testing.T) {
|
||||
broadcaster := &p2pMock.MockBroadcaster{}
|
||||
chainService := &blockchainmock.ChainService{
|
||||
State: st,
|
||||
}
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
CoreService: &core.Service{
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
P2P: broadcaster,
|
||||
HeadFetcher: &blockchainmock.ChainService{
|
||||
State: st,
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1149,7 +1192,13 @@ func TestSubmitSyncCommitteeSignatures(t *testing.T) {
|
||||
assert.Equal(t, false, broadcaster.BroadcastCalled.Load())
|
||||
})
|
||||
t.Run("empty", func(t *testing.T) {
|
||||
s := &Server{}
|
||||
chainService := &blockchainmock.ChainService{State: st}
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
var body bytes.Buffer
|
||||
_, err := body.WriteString("[]")
|
||||
@@ -1166,7 +1215,13 @@ func TestSubmitSyncCommitteeSignatures(t *testing.T) {
|
||||
assert.Equal(t, true, strings.Contains(e.Message, "No data submitted"))
|
||||
})
|
||||
t.Run("no body", func(t *testing.T) {
|
||||
s := &Server{}
|
||||
chainService := &blockchainmock.ChainService{State: st}
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodPost, "http://example.com", nil)
|
||||
writer := httptest.NewRecorder()
|
||||
@@ -1179,6 +1234,26 @@ func TestSubmitSyncCommitteeSignatures(t *testing.T) {
|
||||
assert.Equal(t, http.StatusBadRequest, e.Code)
|
||||
assert.Equal(t, true, strings.Contains(e.Message, "No data submitted"))
|
||||
})
|
||||
t.Run("syncing", func(t *testing.T) {
|
||||
chainService := &blockchainmock.ChainService{State: st}
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
}
|
||||
|
||||
var body bytes.Buffer
|
||||
_, err := body.WriteString(singleSyncCommitteeMsg)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://example.com", &body)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.SubmitSyncCommitteeSignatures(writer, request)
|
||||
assert.Equal(t, http.StatusServiceUnavailable, writer.Code)
|
||||
assert.Equal(t, true, strings.Contains(writer.Body.String(), "Beacon node is currently syncing"))
|
||||
})
|
||||
}
|
||||
|
||||
func TestListBLSToExecutionChanges(t *testing.T) {
|
||||
@@ -2112,6 +2187,33 @@ func TestSubmitAttesterSlashingsV2(t *testing.T) {
|
||||
assert.Equal(t, http.StatusBadRequest, e.Code)
|
||||
assert.StringContains(t, "Invalid attester slashing", e.Message)
|
||||
})
|
||||
|
||||
t.Run("missing-version-header", func(t *testing.T) {
|
||||
bs, err := util.NewBeaconStateElectra()
|
||||
require.NoError(t, err)
|
||||
|
||||
broadcaster := &p2pMock.MockBroadcaster{}
|
||||
s := &Server{
|
||||
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
|
||||
SlashingsPool: &slashingsmock.PoolMock{},
|
||||
Broadcaster: broadcaster,
|
||||
}
|
||||
|
||||
var body bytes.Buffer
|
||||
_, err = body.WriteString(invalidAttesterSlashing)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://example.com/beacon/pool/attester_slashings", &body)
|
||||
// Intentionally do not set api.VersionHeader to verify missing header handling.
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.SubmitAttesterSlashingsV2(writer, request)
|
||||
require.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
e := &httputil.DefaultJsonError{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusBadRequest, e.Code)
|
||||
assert.StringContains(t, api.VersionHeader+" header is required", e.Message)
|
||||
})
|
||||
}
|
||||
|
||||
func TestSubmitProposerSlashing_InvalidSlashing(t *testing.T) {
|
||||
|
||||
@@ -654,6 +654,10 @@ func (m *futureSyncMockFetcher) StateBySlot(context.Context, primitives.Slot) (s
|
||||
return m.BeaconState, nil
|
||||
}
|
||||
|
||||
func (m *futureSyncMockFetcher) StateByEpoch(context.Context, primitives.Epoch) (state.BeaconState, error) {
|
||||
return m.BeaconState, nil
|
||||
}
|
||||
|
||||
func TestGetSyncCommittees_Future(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := make([][]byte, params.BeaconConfig().SyncCommitteeSize)
|
||||
|
||||
@@ -40,6 +40,7 @@ func GetForkSchedule(w http.ResponseWriter, r *http.Request) {
|
||||
httputil.WriteJson(w, &structs.GetForkScheduleResponse{
|
||||
Data: data,
|
||||
})
|
||||
return
|
||||
}
|
||||
previous := schedule[0]
|
||||
for _, entry := range schedule {
|
||||
|
||||
@@ -169,6 +169,11 @@ func TestGetSpec(t *testing.T) {
|
||||
config.BlobsidecarSubnetCountElectra = 102
|
||||
config.SyncMessageDueBPS = 103
|
||||
|
||||
// EIP-8025
|
||||
config.MaxProofDataBytes = 200
|
||||
config.MinEpochsForExecutionProofRequests = 201
|
||||
config.MinProofsRequired = 202
|
||||
|
||||
var dbp [4]byte
|
||||
copy(dbp[:], []byte{'0', '0', '0', '1'})
|
||||
config.DomainBeaconProposer = dbp
|
||||
@@ -205,7 +210,7 @@ func TestGetSpec(t *testing.T) {
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp))
|
||||
data, ok := resp.Data.(map[string]any)
|
||||
require.Equal(t, true, ok)
|
||||
assert.Equal(t, 175, len(data))
|
||||
assert.Equal(t, 178, len(data))
|
||||
for k, v := range data {
|
||||
t.Run(k, func(t *testing.T) {
|
||||
switch k {
|
||||
@@ -577,6 +582,12 @@ func TestGetSpec(t *testing.T) {
|
||||
assert.Equal(t, "102", v)
|
||||
case "SYNC_MESSAGE_DUE_BPS":
|
||||
assert.Equal(t, "103", v)
|
||||
case "MAX_PROOF_DATA_BYTES":
|
||||
assert.Equal(t, "200", v)
|
||||
case "MIN_EPOCHS_FOR_EXECUTION_PROOF_REQUESTS":
|
||||
assert.Equal(t, "201", v)
|
||||
case "MIN_PROOFS_REQUIRED":
|
||||
assert.Equal(t, "202", v)
|
||||
case "BLOB_SCHEDULE":
|
||||
blobSchedule, ok := v.([]any)
|
||||
assert.Equal(t, true, ok)
|
||||
|
||||
@@ -116,6 +116,7 @@ func (s *Server) GetLightClientUpdatesByRange(w http.ResponseWriter, req *http.R
|
||||
for _, update := range updates {
|
||||
if ctx.Err() != nil {
|
||||
httputil.HandleError(w, "Context error: "+ctx.Err().Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
updateSlot := update.AttestedHeader().Beacon().Slot
|
||||
@@ -131,12 +132,15 @@ func (s *Server) GetLightClientUpdatesByRange(w http.ResponseWriter, req *http.R
|
||||
chunkLength = ssz.MarshalUint64(chunkLength, uint64(len(updateSSZ)+4))
|
||||
if _, err := w.Write(chunkLength); err != nil {
|
||||
httputil.HandleError(w, "Could not write chunk length: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if _, err := w.Write(updateEntry.ForkDigest[:]); err != nil {
|
||||
httputil.HandleError(w, "Could not write fork digest: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if _, err := w.Write(updateSSZ); err != nil {
|
||||
httputil.HandleError(w, "Could not write update SSZ: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@@ -145,6 +149,7 @@ func (s *Server) GetLightClientUpdatesByRange(w http.ResponseWriter, req *http.R
|
||||
for _, update := range updates {
|
||||
if ctx.Err() != nil {
|
||||
httputil.HandleError(w, "Context error: "+ctx.Err().Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
updateJson, err := structs.LightClientUpdateFromConsensus(update)
|
||||
|
||||
@@ -132,6 +132,7 @@ func (s *Server) GetHealth(w http.ResponseWriter, r *http.Request) {
|
||||
optimistic, err := s.OptimisticModeFetcher.IsOptimistic(ctx)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if s.SyncChecker.Synced() && !optimistic {
|
||||
return
|
||||
|
||||
@@ -228,7 +228,7 @@ func (s *Server) attRewardsState(w http.ResponseWriter, r *http.Request) (state.
|
||||
}
|
||||
st, err := s.Stater.StateBySlot(r.Context(), nextEpochEnd)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get state for epoch's starting slot: "+err.Error(), http.StatusInternalServerError)
|
||||
shared.WriteStateFetchError(w, err)
|
||||
return nil, false
|
||||
}
|
||||
return st, true
|
||||
|
||||
@@ -19,7 +19,6 @@ go_library(
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/feed/operation:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/synccommittee:go_default_library",
|
||||
@@ -78,6 +77,7 @@ go_test(
|
||||
"//beacon-chain/rpc/core:go_default_library",
|
||||
"//beacon-chain/rpc/eth/rewards/testing:go_default_library",
|
||||
"//beacon-chain/rpc/eth/shared/testing:go_default_library",
|
||||
"//beacon-chain/rpc/lookup:go_default_library",
|
||||
"//beacon-chain/rpc/testutil:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
|
||||
@@ -19,7 +19,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/builder"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/core"
|
||||
rpchelpers "github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/eth/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/eth/shared"
|
||||
@@ -898,20 +897,15 @@ func (s *Server) GetAttesterDuties(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
var startSlot primitives.Slot
|
||||
// For next epoch requests, we use the current epoch's state since committee
|
||||
// assignments for next epoch can be computed from current epoch's state.
|
||||
epochForState := requestedEpoch
|
||||
if requestedEpoch == nextEpoch {
|
||||
startSlot, err = slots.EpochStart(currentEpoch)
|
||||
} else {
|
||||
startSlot, err = slots.EpochStart(requestedEpoch)
|
||||
epochForState = currentEpoch
|
||||
}
|
||||
st, err := s.Stater.StateByEpoch(ctx, epochForState)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, fmt.Sprintf("Could not get start slot from epoch %d: %v", requestedEpoch, err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
st, err := s.Stater.StateBySlot(ctx, startSlot)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get state: "+err.Error(), http.StatusInternalServerError)
|
||||
shared.WriteStateFetchError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1020,39 +1014,11 @@ func (s *Server) GetProposerDuties(w http.ResponseWriter, r *http.Request) {
|
||||
nextEpochLookahead = true
|
||||
}
|
||||
|
||||
epochStartSlot, err := slots.EpochStart(requestedEpoch)
|
||||
st, err := s.Stater.StateByEpoch(ctx, requestedEpoch)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, fmt.Sprintf("Could not get start slot of epoch %d: %v", requestedEpoch, err), http.StatusInternalServerError)
|
||||
shared.WriteStateFetchError(w, err)
|
||||
return
|
||||
}
|
||||
var st state.BeaconState
|
||||
// if the requested epoch is new, use the head state and the next slot cache
|
||||
if requestedEpoch < currentEpoch {
|
||||
st, err = s.Stater.StateBySlot(ctx, epochStartSlot)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, fmt.Sprintf("Could not get state for slot %d: %v ", epochStartSlot, err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
st, err = s.HeadFetcher.HeadState(ctx)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, fmt.Sprintf("Could not get head state: %v ", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
// Notice that even for Fulu requests for the next epoch, we are only advancing the state to the start of the current epoch.
|
||||
if st.Slot() < epochStartSlot {
|
||||
headRoot, err := s.HeadFetcher.HeadRoot(ctx)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, fmt.Sprintf("Could not get head root: %v ", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
st, err = transition.ProcessSlotsUsingNextSlotCache(ctx, st, headRoot, epochStartSlot)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, fmt.Sprintf("Could not process slots up to %d: %v ", epochStartSlot, err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var assignments map[primitives.ValidatorIndex][]primitives.Slot
|
||||
if nextEpochLookahead {
|
||||
@@ -1103,7 +1069,8 @@ func (s *Server) GetProposerDuties(w http.ResponseWriter, r *http.Request) {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if !sortProposerDuties(w, duties) {
|
||||
if err = sortProposerDuties(duties); err != nil {
|
||||
httputil.HandleError(w, "Could not sort proposer duties: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1174,14 +1141,10 @@ func (s *Server) GetSyncCommitteeDuties(w http.ResponseWriter, r *http.Request)
|
||||
}
|
||||
|
||||
startingEpoch := min(requestedEpoch, currentEpoch)
|
||||
slot, err := slots.EpochStart(startingEpoch)
|
||||
|
||||
st, err := s.Stater.StateByEpoch(ctx, startingEpoch)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get sync committee slot: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
st, err := s.Stater.State(ctx, []byte(strconv.FormatUint(uint64(slot), 10)))
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get sync committee state: "+err.Error(), http.StatusInternalServerError)
|
||||
shared.WriteStateFetchError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1327,7 +1290,7 @@ func (s *Server) GetLiveness(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
st, err = s.Stater.StateBySlot(ctx, epochEnd)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get slot for requested epoch: "+err.Error(), http.StatusInternalServerError)
|
||||
shared.WriteStateFetchError(w, err)
|
||||
return
|
||||
}
|
||||
participation, err = st.CurrentEpochParticipation()
|
||||
@@ -1447,22 +1410,20 @@ func syncCommitteeDutiesAndVals(
|
||||
return duties, vals, nil
|
||||
}
|
||||
|
||||
func sortProposerDuties(w http.ResponseWriter, duties []*structs.ProposerDuty) bool {
|
||||
ok := true
|
||||
func sortProposerDuties(duties []*structs.ProposerDuty) error {
|
||||
var err error
|
||||
sort.Slice(duties, func(i, j int) bool {
|
||||
si, err := strconv.ParseUint(duties[i].Slot, 10, 64)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not parse slot: "+err.Error(), http.StatusInternalServerError)
|
||||
ok = false
|
||||
si, parseErr := strconv.ParseUint(duties[i].Slot, 10, 64)
|
||||
if parseErr != nil {
|
||||
err = errors.Wrap(parseErr, "could not parse slot")
|
||||
return false
|
||||
}
|
||||
sj, err := strconv.ParseUint(duties[j].Slot, 10, 64)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not parse slot: "+err.Error(), http.StatusInternalServerError)
|
||||
ok = false
|
||||
sj, parseErr := strconv.ParseUint(duties[j].Slot, 10, 64)
|
||||
if parseErr != nil {
|
||||
err = errors.Wrap(parseErr, "could not parse slot")
|
||||
return false
|
||||
}
|
||||
return si < sj
|
||||
})
|
||||
return ok
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/synccommittee"
|
||||
p2pmock "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/core"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/lookup"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/testutil"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stategen"
|
||||
@@ -2006,6 +2007,7 @@ func TestGetAttesterDuties(t *testing.T) {
|
||||
TimeFetcher: chain,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: chain,
|
||||
HeadFetcher: chain,
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
@@ -2184,6 +2186,7 @@ func TestGetAttesterDuties(t *testing.T) {
|
||||
Stater: &testutil.MockStater{StatesBySlot: map[primitives.Slot]state.BeaconState{0: bs}},
|
||||
TimeFetcher: chain,
|
||||
OptimisticModeFetcher: chain,
|
||||
HeadFetcher: chain,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BeaconDB: db,
|
||||
}
|
||||
@@ -2224,6 +2227,62 @@ func TestGetAttesterDuties(t *testing.T) {
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusServiceUnavailable, e.Code)
|
||||
})
|
||||
t.Run("state not found returns 404", func(t *testing.T) {
|
||||
chainSlot := primitives.Slot(0)
|
||||
chain := &mockChain.ChainService{
|
||||
State: bs, Root: genesisRoot[:], Slot: &chainSlot,
|
||||
}
|
||||
stateNotFoundErr := lookup.NewStateNotFoundError(8192, []byte("test"))
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{CustomError: &stateNotFoundErr},
|
||||
TimeFetcher: chain,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: chain,
|
||||
HeadFetcher: chain,
|
||||
}
|
||||
|
||||
var body bytes.Buffer
|
||||
_, err = body.WriteString("[\"0\"]")
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodGet, "http://www.example.com/eth/v1/validator/duties/attester/{epoch}", &body)
|
||||
request.SetPathValue("epoch", "0")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetAttesterDuties(writer, request)
|
||||
assert.Equal(t, http.StatusNotFound, writer.Code)
|
||||
e := &httputil.DefaultJsonError{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusNotFound, e.Code)
|
||||
assert.StringContains(t, "State not found", e.Message)
|
||||
})
|
||||
t.Run("state fetch error returns 500", func(t *testing.T) {
|
||||
chainSlot := primitives.Slot(0)
|
||||
chain := &mockChain.ChainService{
|
||||
State: bs, Root: genesisRoot[:], Slot: &chainSlot,
|
||||
}
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{CustomError: errors.New("internal error")},
|
||||
TimeFetcher: chain,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: chain,
|
||||
HeadFetcher: chain,
|
||||
}
|
||||
|
||||
var body bytes.Buffer
|
||||
_, err = body.WriteString("[\"0\"]")
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodGet, "http://www.example.com/eth/v1/validator/duties/attester/{epoch}", &body)
|
||||
request.SetPathValue("epoch", "0")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetAttesterDuties(writer, request)
|
||||
assert.Equal(t, http.StatusInternalServerError, writer.Code)
|
||||
e := &httputil.DefaultJsonError{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusInternalServerError, e.Code)
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetProposerDuties(t *testing.T) {
|
||||
@@ -2427,6 +2486,60 @@ func TestGetProposerDuties(t *testing.T) {
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusServiceUnavailable, e.Code)
|
||||
})
|
||||
t.Run("state not found returns 404", func(t *testing.T) {
|
||||
bs, err := transition.GenesisBeaconState(t.Context(), deposits, 0, eth1Data)
|
||||
require.NoError(t, err)
|
||||
chainSlot := primitives.Slot(0)
|
||||
chain := &mockChain.ChainService{
|
||||
State: bs, Root: genesisRoot[:], Slot: &chainSlot,
|
||||
}
|
||||
stateNotFoundErr := lookup.NewStateNotFoundError(8192, []byte("test"))
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{CustomError: &stateNotFoundErr},
|
||||
TimeFetcher: chain,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: chain,
|
||||
HeadFetcher: chain,
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://www.example.com/eth/v1/validator/duties/proposer/{epoch}", nil)
|
||||
request.SetPathValue("epoch", "0")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetProposerDuties(writer, request)
|
||||
assert.Equal(t, http.StatusNotFound, writer.Code)
|
||||
e := &httputil.DefaultJsonError{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusNotFound, e.Code)
|
||||
assert.StringContains(t, "State not found", e.Message)
|
||||
})
|
||||
t.Run("state fetch error returns 500", func(t *testing.T) {
|
||||
bs, err := transition.GenesisBeaconState(t.Context(), deposits, 0, eth1Data)
|
||||
require.NoError(t, err)
|
||||
chainSlot := primitives.Slot(0)
|
||||
chain := &mockChain.ChainService{
|
||||
State: bs, Root: genesisRoot[:], Slot: &chainSlot,
|
||||
}
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{CustomError: errors.New("internal error")},
|
||||
TimeFetcher: chain,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: chain,
|
||||
HeadFetcher: chain,
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://www.example.com/eth/v1/validator/duties/proposer/{epoch}", nil)
|
||||
request.SetPathValue("epoch", "0")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetProposerDuties(writer, request)
|
||||
assert.Equal(t, http.StatusInternalServerError, writer.Code)
|
||||
e := &httputil.DefaultJsonError{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusInternalServerError, e.Code)
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetSyncCommitteeDuties(t *testing.T) {
|
||||
@@ -2457,7 +2570,7 @@ func TestGetSyncCommitteeDuties(t *testing.T) {
|
||||
}
|
||||
require.NoError(t, st.SetNextSyncCommittee(nextCommittee))
|
||||
|
||||
mockChainService := &mockChain.ChainService{Genesis: genesisTime}
|
||||
mockChainService := &mockChain.ChainService{Genesis: genesisTime, State: st}
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{BeaconState: st},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
@@ -2648,7 +2761,7 @@ func TestGetSyncCommitteeDuties(t *testing.T) {
|
||||
return newSyncPeriodSt
|
||||
}
|
||||
}
|
||||
mockChainService := &mockChain.ChainService{Genesis: genesisTime, Slot: &newSyncPeriodStartSlot}
|
||||
mockChainService := &mockChain.ChainService{Genesis: genesisTime, Slot: &newSyncPeriodStartSlot, State: newSyncPeriodSt}
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{BeaconState: stateFetchFn(newSyncPeriodStartSlot)},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
@@ -2729,8 +2842,7 @@ func TestGetSyncCommitteeDuties(t *testing.T) {
|
||||
slot, err := slots.EpochStart(1)
|
||||
require.NoError(t, err)
|
||||
|
||||
st2, err := util.NewBeaconStateBellatrix()
|
||||
require.NoError(t, err)
|
||||
st2 := st.Copy()
|
||||
require.NoError(t, st2.SetSlot(slot))
|
||||
|
||||
mockChainService := &mockChain.ChainService{
|
||||
@@ -2744,7 +2856,7 @@ func TestGetSyncCommitteeDuties(t *testing.T) {
|
||||
State: st2,
|
||||
}
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{BeaconState: st},
|
||||
Stater: &testutil.MockStater{BeaconState: st2},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
TimeFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
@@ -2789,6 +2901,62 @@ func TestGetSyncCommitteeDuties(t *testing.T) {
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusServiceUnavailable, e.Code)
|
||||
})
|
||||
t.Run("state not found returns 404", func(t *testing.T) {
|
||||
slot := 2 * params.BeaconConfig().SlotsPerEpoch
|
||||
chainService := &mockChain.ChainService{
|
||||
Slot: &slot,
|
||||
}
|
||||
stateNotFoundErr := lookup.NewStateNotFoundError(8192, []byte("test"))
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{CustomError: &stateNotFoundErr},
|
||||
TimeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: chainService,
|
||||
HeadFetcher: chainService,
|
||||
}
|
||||
|
||||
var body bytes.Buffer
|
||||
_, err := body.WriteString("[\"1\"]")
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodGet, "http://www.example.com/eth/v1/validator/duties/sync/{epoch}", &body)
|
||||
request.SetPathValue("epoch", "1")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetSyncCommitteeDuties(writer, request)
|
||||
assert.Equal(t, http.StatusNotFound, writer.Code)
|
||||
e := &httputil.DefaultJsonError{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusNotFound, e.Code)
|
||||
assert.StringContains(t, "State not found", e.Message)
|
||||
})
|
||||
t.Run("state fetch error returns 500", func(t *testing.T) {
|
||||
slot := 2 * params.BeaconConfig().SlotsPerEpoch
|
||||
chainService := &mockChain.ChainService{
|
||||
Slot: &slot,
|
||||
}
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{CustomError: errors.New("internal error")},
|
||||
TimeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: chainService,
|
||||
HeadFetcher: chainService,
|
||||
}
|
||||
|
||||
var body bytes.Buffer
|
||||
_, err := body.WriteString("[\"1\"]")
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodGet, "http://www.example.com/eth/v1/validator/duties/sync/{epoch}", &body)
|
||||
request.SetPathValue("epoch", "1")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetSyncCommitteeDuties(writer, request)
|
||||
assert.Equal(t, http.StatusInternalServerError, writer.Code)
|
||||
e := &httputil.DefaultJsonError{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusInternalServerError, e.Code)
|
||||
})
|
||||
}
|
||||
|
||||
func TestPrepareBeaconProposer(t *testing.T) {
|
||||
|
||||
@@ -11,6 +11,7 @@ go_library(
|
||||
deps = [
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/rpc/core:go_default_library",
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stategen"
|
||||
@@ -98,6 +99,7 @@ type Stater interface {
|
||||
State(ctx context.Context, id []byte) (state.BeaconState, error)
|
||||
StateRoot(ctx context.Context, id []byte) ([]byte, error)
|
||||
StateBySlot(ctx context.Context, slot primitives.Slot) (state.BeaconState, error)
|
||||
StateByEpoch(ctx context.Context, epoch primitives.Epoch) (state.BeaconState, error)
|
||||
}
|
||||
|
||||
// BeaconDbStater is an implementation of Stater. It retrieves states from the beacon chain database.
|
||||
@@ -267,6 +269,46 @@ func (p *BeaconDbStater) StateBySlot(ctx context.Context, target primitives.Slot
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// StateByEpoch returns the state for the start of the requested epoch.
|
||||
// For current or next epoch, it uses the head state and next slot cache for efficiency.
|
||||
// For past epochs, it replays blocks from the most recent canonical state.
|
||||
func (p *BeaconDbStater) StateByEpoch(ctx context.Context, epoch primitives.Epoch) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "statefetcher.StateByEpoch")
|
||||
defer span.End()
|
||||
|
||||
targetSlot, err := slots.EpochStart(epoch)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get epoch start slot")
|
||||
}
|
||||
|
||||
currentSlot := p.GenesisTimeFetcher.CurrentSlot()
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
|
||||
// For past epochs, use the replay mechanism
|
||||
if epoch < currentEpoch {
|
||||
return p.StateBySlot(ctx, targetSlot)
|
||||
}
|
||||
|
||||
// For current or next epoch, use head state + next slot cache (much faster)
|
||||
headState, err := p.ChainInfoFetcher.HeadState(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get head state")
|
||||
}
|
||||
|
||||
// If head state is already at or past the target slot, return it
|
||||
if headState.Slot() >= targetSlot {
|
||||
return headState, nil
|
||||
}
|
||||
|
||||
// Process slots using the next slot cache
|
||||
headRoot := p.ChainInfoFetcher.CachedHeadRoot()
|
||||
st, err := transition.ProcessSlotsUsingNextSlotCache(ctx, headState, headRoot[:], targetSlot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not process slots up to %d", targetSlot)
|
||||
}
|
||||
return st, nil
|
||||
}
|
||||
|
||||
func (p *BeaconDbStater) headStateRoot(ctx context.Context) ([]byte, error) {
|
||||
b, err := p.ChainInfoFetcher.HeadBlock(ctx)
|
||||
if err != nil {
|
||||
|
||||
@@ -444,3 +444,111 @@ func TestStateBySlot_AfterHeadSlot(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, primitives.Slot(101), st.Slot())
|
||||
}
|
||||
|
||||
func TestStateByEpoch(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
|
||||
t.Run("current epoch uses head state", func(t *testing.T) {
|
||||
// Head is at slot 5 (epoch 0), requesting epoch 0
|
||||
headSlot := primitives.Slot(5)
|
||||
headSt, err := statenative.InitializeFromProtoPhase0(ðpb.BeaconState{Slot: headSlot})
|
||||
require.NoError(t, err)
|
||||
|
||||
currentSlot := headSlot
|
||||
mock := &chainMock.ChainService{State: headSt, Slot: ¤tSlot}
|
||||
p := BeaconDbStater{ChainInfoFetcher: mock, GenesisTimeFetcher: mock}
|
||||
|
||||
st, err := p.StateByEpoch(ctx, 0)
|
||||
require.NoError(t, err)
|
||||
// Should return head state since it's already past epoch start
|
||||
assert.Equal(t, headSlot, st.Slot())
|
||||
})
|
||||
|
||||
t.Run("current epoch processes slots to epoch start", func(t *testing.T) {
|
||||
// Head is at slot 5 (epoch 0), requesting epoch 1
|
||||
// Current slot is 32 (epoch 1), so epoch 1 is current epoch
|
||||
headSlot := primitives.Slot(5)
|
||||
headSt, err := statenative.InitializeFromProtoPhase0(ðpb.BeaconState{Slot: headSlot})
|
||||
require.NoError(t, err)
|
||||
|
||||
currentSlot := slotsPerEpoch // slot 32, epoch 1
|
||||
mock := &chainMock.ChainService{State: headSt, Slot: ¤tSlot}
|
||||
p := BeaconDbStater{ChainInfoFetcher: mock, GenesisTimeFetcher: mock}
|
||||
|
||||
// Note: This will fail since ProcessSlotsUsingNextSlotCache requires proper setup
|
||||
// In real usage, the transition package handles this properly
|
||||
_, err = p.StateByEpoch(ctx, 1)
|
||||
// The error is expected since we don't have a fully initialized beacon state
|
||||
// that can process slots (missing committees, etc.)
|
||||
assert.NotNil(t, err)
|
||||
})
|
||||
|
||||
t.Run("past epoch uses replay", func(t *testing.T) {
|
||||
// Head is at epoch 2, requesting epoch 0 (past)
|
||||
headSlot := slotsPerEpoch * 2 // slot 64, epoch 2
|
||||
headSt, err := statenative.InitializeFromProtoPhase0(ðpb.BeaconState{Slot: headSlot})
|
||||
require.NoError(t, err)
|
||||
|
||||
pastEpochSt, err := statenative.InitializeFromProtoPhase0(ðpb.BeaconState{Slot: 0})
|
||||
require.NoError(t, err)
|
||||
|
||||
currentSlot := headSlot
|
||||
mock := &chainMock.ChainService{State: headSt, Slot: ¤tSlot}
|
||||
mockReplayer := mockstategen.NewReplayerBuilder()
|
||||
mockReplayer.SetMockStateForSlot(pastEpochSt, 0)
|
||||
p := BeaconDbStater{ChainInfoFetcher: mock, GenesisTimeFetcher: mock, ReplayerBuilder: mockReplayer}
|
||||
|
||||
st, err := p.StateByEpoch(ctx, 0)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, primitives.Slot(0), st.Slot())
|
||||
})
|
||||
|
||||
t.Run("next epoch uses head state path", func(t *testing.T) {
|
||||
// Head is at slot 30 (epoch 0), requesting epoch 1 (next)
|
||||
// Current slot is 30 (epoch 0), so epoch 1 is next epoch
|
||||
headSlot := primitives.Slot(30)
|
||||
headSt, err := statenative.InitializeFromProtoPhase0(ðpb.BeaconState{Slot: headSlot})
|
||||
require.NoError(t, err)
|
||||
|
||||
currentSlot := headSlot
|
||||
mock := &chainMock.ChainService{State: headSt, Slot: ¤tSlot}
|
||||
p := BeaconDbStater{ChainInfoFetcher: mock, GenesisTimeFetcher: mock}
|
||||
|
||||
// Note: This will fail since ProcessSlotsUsingNextSlotCache requires proper setup
|
||||
_, err = p.StateByEpoch(ctx, 1)
|
||||
// The error is expected since we don't have a fully initialized beacon state
|
||||
assert.NotNil(t, err)
|
||||
})
|
||||
|
||||
t.Run("head state already at target slot returns immediately", func(t *testing.T) {
|
||||
// Head is at slot 32 (epoch 1 start), requesting epoch 1
|
||||
headSlot := slotsPerEpoch // slot 32
|
||||
headSt, err := statenative.InitializeFromProtoPhase0(ðpb.BeaconState{Slot: headSlot})
|
||||
require.NoError(t, err)
|
||||
|
||||
currentSlot := headSlot
|
||||
mock := &chainMock.ChainService{State: headSt, Slot: ¤tSlot}
|
||||
p := BeaconDbStater{ChainInfoFetcher: mock, GenesisTimeFetcher: mock}
|
||||
|
||||
st, err := p.StateByEpoch(ctx, 1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, headSlot, st.Slot())
|
||||
})
|
||||
|
||||
t.Run("head state past target slot returns head state", func(t *testing.T) {
|
||||
// Head is at slot 40, requesting epoch 1 (starts at slot 32)
|
||||
headSlot := primitives.Slot(40)
|
||||
headSt, err := statenative.InitializeFromProtoPhase0(ðpb.BeaconState{Slot: headSlot})
|
||||
require.NoError(t, err)
|
||||
|
||||
currentSlot := headSlot
|
||||
mock := &chainMock.ChainService{State: headSt, Slot: ¤tSlot}
|
||||
p := BeaconDbStater{ChainInfoFetcher: mock, GenesisTimeFetcher: mock}
|
||||
|
||||
st, err := p.StateByEpoch(ctx, 1)
|
||||
require.NoError(t, err)
|
||||
// Returns head state since it's already >= epoch start
|
||||
assert.Equal(t, headSlot, st.Slot())
|
||||
})
|
||||
}
|
||||
|
||||
@@ -52,24 +52,27 @@ func (vs *Server) ProposeAttestation(ctx context.Context, att *ethpb.Attestation
|
||||
ctx, span := trace.StartSpan(ctx, "AttesterServer.ProposeAttestation")
|
||||
defer span.End()
|
||||
|
||||
if vs.SyncChecker.Syncing() {
|
||||
return nil, status.Errorf(codes.Unavailable, "Syncing to latest head, not ready to respond")
|
||||
}
|
||||
|
||||
resp, err := vs.proposeAtt(ctx, att, att.GetData().CommitteeIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if features.Get().EnableExperimentalAttestationPool {
|
||||
if err = vs.AttestationCache.Add(att); err != nil {
|
||||
log.WithError(err).Error("Could not save attestation")
|
||||
}
|
||||
} else {
|
||||
go func() {
|
||||
go func() {
|
||||
if features.Get().EnableExperimentalAttestationPool {
|
||||
if err := vs.AttestationCache.Add(att); err != nil {
|
||||
log.WithError(err).Error("Could not save attestation")
|
||||
}
|
||||
} else {
|
||||
attCopy := att.Copy()
|
||||
if err := vs.AttPool.SaveUnaggregatedAttestation(attCopy); err != nil {
|
||||
log.WithError(err).Error("Could not save unaggregated attestation")
|
||||
return
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
@@ -82,6 +85,10 @@ func (vs *Server) ProposeAttestationElectra(ctx context.Context, singleAtt *ethp
|
||||
ctx, span := trace.StartSpan(ctx, "AttesterServer.ProposeAttestationElectra")
|
||||
defer span.End()
|
||||
|
||||
if vs.SyncChecker.Syncing() {
|
||||
return nil, status.Errorf(codes.Unavailable, "Syncing to latest head, not ready to respond")
|
||||
}
|
||||
|
||||
resp, err := vs.proposeAtt(ctx, singleAtt, singleAtt.GetCommitteeIndex())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -98,18 +105,17 @@ func (vs *Server) ProposeAttestationElectra(ctx context.Context, singleAtt *ethp
|
||||
|
||||
singleAttCopy := singleAtt.Copy()
|
||||
att := singleAttCopy.ToAttestationElectra(committee)
|
||||
if features.Get().EnableExperimentalAttestationPool {
|
||||
if err = vs.AttestationCache.Add(att); err != nil {
|
||||
log.WithError(err).Error("Could not save attestation")
|
||||
}
|
||||
} else {
|
||||
go func() {
|
||||
go func() {
|
||||
if features.Get().EnableExperimentalAttestationPool {
|
||||
if err := vs.AttestationCache.Add(att); err != nil {
|
||||
log.WithError(err).Error("Could not save attestation")
|
||||
}
|
||||
} else {
|
||||
if err := vs.AttPool.SaveUnaggregatedAttestation(att); err != nil {
|
||||
log.WithError(err).Error("Could not save unaggregated attestation")
|
||||
return
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
@@ -38,6 +38,7 @@ func TestProposeAttestation(t *testing.T) {
|
||||
OperationNotifier: (&mock.ChainService{}).OperationNotifier(),
|
||||
TimeFetcher: chainService,
|
||||
AttestationStateFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
head := util.NewBeaconBlock()
|
||||
head.Block.Slot = 999
|
||||
@@ -141,6 +142,7 @@ func TestProposeAttestation_IncorrectSignature(t *testing.T) {
|
||||
P2P: &mockp2p.MockBroadcaster{},
|
||||
AttPool: attestations.NewPool(),
|
||||
OperationNotifier: (&mock.ChainService{}).OperationNotifier(),
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
req := util.HydrateAttestation(ðpb.Attestation{})
|
||||
@@ -149,6 +151,37 @@ func TestProposeAttestation_IncorrectSignature(t *testing.T) {
|
||||
assert.ErrorContains(t, wanted, err)
|
||||
}
|
||||
|
||||
func TestProposeAttestation_Syncing(t *testing.T) {
|
||||
attesterServer := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
}
|
||||
|
||||
req := util.HydrateAttestation(ðpb.Attestation{})
|
||||
_, err := attesterServer.ProposeAttestation(t.Context(), req)
|
||||
assert.ErrorContains(t, "Syncing to latest head", err)
|
||||
s, ok := status.FromError(err)
|
||||
require.Equal(t, true, ok)
|
||||
assert.Equal(t, codes.Unavailable, s.Code())
|
||||
}
|
||||
|
||||
func TestProposeAttestationElectra_Syncing(t *testing.T) {
|
||||
attesterServer := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
}
|
||||
|
||||
req := ðpb.SingleAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
Target: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
},
|
||||
}
|
||||
_, err := attesterServer.ProposeAttestationElectra(t.Context(), req)
|
||||
assert.ErrorContains(t, "Syncing to latest head", err)
|
||||
s, ok := status.FromError(err)
|
||||
require.Equal(t, true, ok)
|
||||
assert.Equal(t, codes.Unavailable, s.Code())
|
||||
}
|
||||
|
||||
func TestGetAttestationData_OK(t *testing.T) {
|
||||
block := util.NewBeaconBlock()
|
||||
block.Block.Slot = 3*params.BeaconConfig().SlotsPerEpoch + 1
|
||||
|
||||
@@ -26,5 +26,6 @@ go_library(
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
)
|
||||
|
||||
// MockStater is a fake implementation of lookup.Stater.
|
||||
@@ -14,6 +15,7 @@ type MockStater struct {
|
||||
StateProviderFunc func(ctx context.Context, stateId []byte) (state.BeaconState, error)
|
||||
BeaconStateRoot []byte
|
||||
StatesBySlot map[primitives.Slot]state.BeaconState
|
||||
StatesByEpoch map[primitives.Epoch]state.BeaconState
|
||||
StatesByRoot map[[32]byte]state.BeaconState
|
||||
CustomError error
|
||||
}
|
||||
@@ -43,3 +45,22 @@ func (m *MockStater) StateRoot(context.Context, []byte) ([]byte, error) {
|
||||
func (m *MockStater) StateBySlot(_ context.Context, s primitives.Slot) (state.BeaconState, error) {
|
||||
return m.StatesBySlot[s], nil
|
||||
}
|
||||
|
||||
// StateByEpoch --
|
||||
func (m *MockStater) StateByEpoch(_ context.Context, e primitives.Epoch) (state.BeaconState, error) {
|
||||
if m.CustomError != nil {
|
||||
return nil, m.CustomError
|
||||
}
|
||||
if m.StatesByEpoch != nil {
|
||||
return m.StatesByEpoch[e], nil
|
||||
}
|
||||
// Fall back to StatesBySlot if StatesByEpoch is not set
|
||||
slot, err := slots.EpochStart(e)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if m.StatesBySlot != nil {
|
||||
return m.StatesBySlot[slot], nil
|
||||
}
|
||||
return m.BeaconState, nil
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ go_library(
|
||||
"decode_pubsub.go",
|
||||
"doc.go",
|
||||
"error.go",
|
||||
"exec_proofs.go",
|
||||
"fork_watcher.go",
|
||||
"fuzz_exports.go", # keep
|
||||
"log.go",
|
||||
@@ -31,6 +32,7 @@ go_library(
|
||||
"rpc_chunked_response.go",
|
||||
"rpc_data_column_sidecars_by_range.go",
|
||||
"rpc_data_column_sidecars_by_root.go",
|
||||
"rpc_execution_proofs_by_root_topic.go",
|
||||
"rpc_goodbye.go",
|
||||
"rpc_light_client.go",
|
||||
"rpc_metadata.go",
|
||||
@@ -46,6 +48,7 @@ go_library(
|
||||
"subscriber_blob_sidecar.go",
|
||||
"subscriber_bls_to_execution_change.go",
|
||||
"subscriber_data_column_sidecar.go",
|
||||
"subscriber_execution_proofs.go",
|
||||
"subscriber_handlers.go",
|
||||
"subscriber_sync_committee_message.go",
|
||||
"subscriber_sync_contribution_proof.go",
|
||||
@@ -57,6 +60,7 @@ go_library(
|
||||
"validate_blob.go",
|
||||
"validate_bls_to_execution_change.go",
|
||||
"validate_data_column.go",
|
||||
"validate_execution_proof.go",
|
||||
"validate_light_client.go",
|
||||
"validate_proposer_slashing.go",
|
||||
"validate_sync_committee_message.go",
|
||||
@@ -93,6 +97,7 @@ go_library(
|
||||
"//beacon-chain/light-client:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/blstoexec:go_default_library",
|
||||
"//beacon-chain/operations/execproofs:go_default_library",
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
"//beacon-chain/operations/synccommittee:go_default_library",
|
||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||
@@ -187,6 +192,7 @@ go_test(
|
||||
"rpc_blob_sidecars_by_root_test.go",
|
||||
"rpc_data_column_sidecars_by_range_test.go",
|
||||
"rpc_data_column_sidecars_by_root_test.go",
|
||||
"rpc_execution_proofs_by_root_topic_test.go",
|
||||
"rpc_goodbye_test.go",
|
||||
"rpc_handler_test.go",
|
||||
"rpc_light_client_test.go",
|
||||
@@ -211,6 +217,7 @@ go_test(
|
||||
"validate_blob_test.go",
|
||||
"validate_bls_to_execution_change_test.go",
|
||||
"validate_data_column_test.go",
|
||||
"validate_execution_proof_test.go",
|
||||
"validate_light_client_test.go",
|
||||
"validate_proposer_slashing_test.go",
|
||||
"validate_sync_committee_message_test.go",
|
||||
@@ -244,6 +251,7 @@ go_test(
|
||||
"//beacon-chain/light-client:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/blstoexec:go_default_library",
|
||||
"//beacon-chain/operations/execproofs:go_default_library",
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
"//beacon-chain/operations/slashings/mock:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
|
||||
@@ -161,13 +161,17 @@ func (s *Service) validateWithKzgBatchVerifier(ctx context.Context, dataColumns
|
||||
|
||||
timeout := time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second
|
||||
|
||||
resChan := make(chan error)
|
||||
resChan := make(chan error, 1)
|
||||
verificationSet := &kzgVerifier{dataColumns: dataColumns, resChan: resChan}
|
||||
s.kzgChan <- verificationSet
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, timeout)
|
||||
defer cancel()
|
||||
|
||||
select {
|
||||
case s.kzgChan <- verificationSet:
|
||||
case <-ctx.Done():
|
||||
return pubsub.ValidationIgnore, ctx.Err()
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return pubsub.ValidationIgnore, ctx.Err() // parent context canceled, give up
|
||||
|
||||
@@ -3,6 +3,7 @@ package sync
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"maps"
|
||||
"slices"
|
||||
"sync"
|
||||
@@ -243,8 +244,10 @@ func requestDirectSidecarsFromPeers(
|
||||
}
|
||||
|
||||
// Compute missing indices by root, excluding those already in storage.
|
||||
var lastRoot [fieldparams.RootLength]byte
|
||||
missingIndicesByRoot := make(map[[fieldparams.RootLength]byte]map[uint64]bool, len(incompleteRoots))
|
||||
for root := range incompleteRoots {
|
||||
lastRoot = root
|
||||
storedIndices := storedIndicesByRoot[root]
|
||||
|
||||
missingIndices := make(map[uint64]bool, len(requestedIndices))
|
||||
@@ -259,6 +262,7 @@ func requestDirectSidecarsFromPeers(
|
||||
}
|
||||
}
|
||||
|
||||
initialMissingRootCount := len(missingIndicesByRoot)
|
||||
initialMissingCount := computeTotalCount(missingIndicesByRoot)
|
||||
|
||||
indicesByRootByPeer, err := computeIndicesByRootByPeer(params.P2P, slotByRoot, missingIndicesByRoot, connectedPeers)
|
||||
@@ -301,11 +305,19 @@ func requestDirectSidecarsFromPeers(
|
||||
}
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"duration": time.Since(start),
|
||||
"initialMissingCount": initialMissingCount,
|
||||
"finalMissingCount": computeTotalCount(missingIndicesByRoot),
|
||||
}).Debug("Requested direct data column sidecars from peers")
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"duration": time.Since(start),
|
||||
"initialMissingRootCount": initialMissingRootCount,
|
||||
"initialMissingCount": initialMissingCount,
|
||||
"finalMissingRootCount": len(missingIndicesByRoot),
|
||||
"finalMissingCount": computeTotalCount(missingIndicesByRoot),
|
||||
})
|
||||
|
||||
if initialMissingRootCount == 1 {
|
||||
log = log.WithField("root", fmt.Sprintf("%#x", lastRoot))
|
||||
}
|
||||
|
||||
log.Debug("Requested direct data column sidecars from peers")
|
||||
|
||||
return verifiedColumnsByRoot, nil
|
||||
}
|
||||
|
||||
65
beacon-chain/sync/exec_proofs.go
Normal file
65
beacon-chain/sync/exec_proofs.go
Normal file
@@ -0,0 +1,65 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"errors"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// generateExecProof returns a dummy execution proof after the specified delay.
|
||||
func generateExecProof(roBlock blocks.ROBlock, proofID primitives.ExecutionProofId, delay time.Duration) (*ethpb.ExecutionProof, error) {
|
||||
// Simulate proof generation work
|
||||
time.Sleep(delay)
|
||||
|
||||
// Create a dummy proof with some deterministic data
|
||||
block := roBlock.Block()
|
||||
if block == nil {
|
||||
return nil, errors.New("nil block")
|
||||
}
|
||||
|
||||
body := block.Body()
|
||||
if body == nil {
|
||||
return nil, errors.New("nil block body")
|
||||
}
|
||||
|
||||
executionData, err := body.Execution()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("execution: %w", err)
|
||||
}
|
||||
|
||||
if executionData == nil {
|
||||
return nil, errors.New("nil execution data")
|
||||
}
|
||||
|
||||
hash, err := executionData.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("hash tree root: %w", err)
|
||||
}
|
||||
|
||||
proofData := []byte{
|
||||
0xFF, // Magic byte for dummy proof
|
||||
byte(proofID),
|
||||
// Include some payload hash bytes
|
||||
hash[0],
|
||||
hash[1],
|
||||
hash[2],
|
||||
hash[3],
|
||||
}
|
||||
|
||||
blockRoot := roBlock.Root()
|
||||
|
||||
proof := ðpb.ExecutionProof{
|
||||
ProofId: proofID,
|
||||
Slot: block.Slot(),
|
||||
BlockHash: hash[:],
|
||||
BlockRoot: blockRoot[:],
|
||||
ProofData: proofData,
|
||||
}
|
||||
|
||||
return proof, nil
|
||||
}
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
@@ -268,6 +269,71 @@ func TestKzgBatchVerifierFallback(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestValidateWithKzgBatchVerifier_DeadlockOnTimeout(t *testing.T) {
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.SecondsPerSlot = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
defer cancel()
|
||||
|
||||
service := &Service{
|
||||
ctx: ctx,
|
||||
kzgChan: make(chan *kzgVerifier),
|
||||
}
|
||||
go service.kzgVerifierRoutine()
|
||||
|
||||
result, err := service.validateWithKzgBatchVerifier(context.Background(), nil)
|
||||
require.Equal(t, pubsub.ValidationIgnore, result)
|
||||
require.ErrorIs(t, err, context.DeadlineExceeded)
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
_, _ = service.validateWithKzgBatchVerifier(context.Background(), nil)
|
||||
close(done)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-done:
|
||||
case <-time.After(500 * time.Millisecond):
|
||||
t.Fatal("validateWithKzgBatchVerifier blocked")
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateWithKzgBatchVerifier_ContextCanceledBeforeSend(t *testing.T) {
|
||||
cancelledCtx, cancel := context.WithCancel(t.Context())
|
||||
cancel()
|
||||
|
||||
service := &Service{
|
||||
ctx: context.Background(),
|
||||
kzgChan: make(chan *kzgVerifier),
|
||||
}
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
result, err := service.validateWithKzgBatchVerifier(cancelledCtx, nil)
|
||||
require.Equal(t, pubsub.ValidationIgnore, result)
|
||||
require.ErrorIs(t, err, context.Canceled)
|
||||
close(done)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-done:
|
||||
case <-time.After(500 * time.Millisecond):
|
||||
t.Fatal("validateWithKzgBatchVerifier did not return after context cancellation")
|
||||
}
|
||||
|
||||
select {
|
||||
case <-service.kzgChan:
|
||||
t.Fatal("verificationSet was sent to kzgChan despite canceled context")
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func createValidTestDataColumns(t *testing.T, count int) []blocks.RODataColumn {
|
||||
_, roSidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, count)
|
||||
if len(roSidecars) >= count {
|
||||
|
||||
@@ -204,6 +204,13 @@ var (
|
||||
},
|
||||
)
|
||||
|
||||
dataColumnsRecoveredFromELAttempts = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "data_columns_recovered_from_el_attempts",
|
||||
Help: "Count the number of data columns recovery attempts from the execution layer.",
|
||||
},
|
||||
)
|
||||
|
||||
dataColumnsRecoveredFromELTotal = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "data_columns_recovered_from_el_total",
|
||||
@@ -242,6 +249,13 @@ var (
|
||||
Buckets: []float64{100, 250, 500, 750, 1000, 1500, 2000, 4000, 8000, 12000, 16000},
|
||||
},
|
||||
)
|
||||
|
||||
dataColumnSidecarsObtainedViaELCount = promauto.NewSummary(
|
||||
prometheus.SummaryOpts{
|
||||
Name: "data_column_obtained_via_el_count",
|
||||
Help: "Count the number of data column sidecars obtained via the execution layer.",
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
func (s *Service) updateMetrics() {
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
lightClient "github.com/OffchainLabs/prysm/v7/beacon-chain/light-client"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/attestations"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/blstoexec"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/execproofs"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/slashings"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/synccommittee"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/voluntaryexits"
|
||||
@@ -88,6 +89,13 @@ func WithBlsToExecPool(blsToExecPool blstoexec.PoolManager) Option {
|
||||
}
|
||||
}
|
||||
|
||||
func WithExecProofPool(execProofPool execproofs.PoolManager) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.execProofPool = execProofPool
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithChainService(chain blockchainService) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.chain = chain
|
||||
|
||||
@@ -3,9 +3,9 @@ package sync
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
@@ -21,13 +21,23 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/time"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var pendingAttsLimit = 32768
|
||||
const pendingAttsLimit = 32768
|
||||
|
||||
// aggregatorIndexFilter defines how aggregator index should be handled in equality checks.
|
||||
type aggregatorIndexFilter int
|
||||
|
||||
const (
|
||||
// ignoreAggregatorIndex means aggregates differing only by aggregator index are considered equal.
|
||||
ignoreAggregatorIndex aggregatorIndexFilter = iota
|
||||
// includeAggregatorIndex means aggregator index must also match for aggregates to be considered equal.
|
||||
includeAggregatorIndex
|
||||
)
|
||||
|
||||
// This method processes pending attestations as a "known" block as arrived. With validations,
|
||||
// the valid attestations get saved into the operation mem pool, and the invalid attestations gets deleted
|
||||
@@ -50,16 +60,7 @@ func (s *Service) processPendingAttsForBlock(ctx context.Context, bRoot [32]byte
|
||||
attestations := s.blkRootToPendingAtts[bRoot]
|
||||
s.pendingAttsLock.RUnlock()
|
||||
|
||||
if len(attestations) > 0 {
|
||||
start := time.Now()
|
||||
s.processAttestations(ctx, attestations)
|
||||
duration := time.Since(start)
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockRoot": hex.EncodeToString(bytesutil.Trunc(bRoot[:])),
|
||||
"pendingAttsCount": len(attestations),
|
||||
"duration": duration,
|
||||
}).Debug("Verified and saved pending attestations to pool")
|
||||
}
|
||||
s.processAttestations(ctx, attestations)
|
||||
|
||||
randGen := rand.NewGenerator()
|
||||
// Delete the missing block root key from pending attestation queue so a node will not request for the block again.
|
||||
@@ -79,26 +80,71 @@ func (s *Service) processPendingAttsForBlock(ctx context.Context, bRoot [32]byte
|
||||
return s.sendBatchRootRequest(ctx, pendingRoots, randGen)
|
||||
}
|
||||
|
||||
// processAttestations processes a list of attestations.
|
||||
// It assumes (for logging purposes only) that all attestations pertain to the same block.
|
||||
func (s *Service) processAttestations(ctx context.Context, attestations []any) {
|
||||
if len(attestations) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
firstAttestation := attestations[0]
|
||||
var blockRoot []byte
|
||||
switch v := firstAttestation.(type) {
|
||||
case ethpb.Att:
|
||||
blockRoot = v.GetData().BeaconBlockRoot
|
||||
case ethpb.SignedAggregateAttAndProof:
|
||||
blockRoot = v.AggregateAttestationAndProof().AggregateVal().GetData().BeaconBlockRoot
|
||||
default:
|
||||
log.Warnf("Unexpected attestation type %T, skipping processing", v)
|
||||
return
|
||||
}
|
||||
|
||||
validAggregates := make([]ethpb.SignedAggregateAttAndProof, 0, len(attestations))
|
||||
startAggregate := time.Now()
|
||||
atts := make([]ethpb.Att, 0, len(attestations))
|
||||
aggregateAttAndProofCount := 0
|
||||
for _, att := range attestations {
|
||||
switch v := att.(type) {
|
||||
case ethpb.Att:
|
||||
atts = append(atts, v)
|
||||
case ethpb.SignedAggregateAttAndProof:
|
||||
s.processAggregate(ctx, v)
|
||||
aggregateAttAndProofCount++
|
||||
// Avoid processing multiple aggregates only differing by aggregator index.
|
||||
if slices.ContainsFunc(validAggregates, func(other ethpb.SignedAggregateAttAndProof) bool {
|
||||
return pendingAggregatesAreEqual(v, other, ignoreAggregatorIndex)
|
||||
}) {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := s.processAggregate(ctx, v); err != nil {
|
||||
log.WithError(err).Debug("Pending aggregate attestation could not be processed")
|
||||
continue
|
||||
}
|
||||
|
||||
validAggregates = append(validAggregates, v)
|
||||
default:
|
||||
log.Warnf("Unexpected attestation type %T, skipping", v)
|
||||
}
|
||||
}
|
||||
durationAggregateAttAndProof := time.Since(startAggregate)
|
||||
|
||||
startAtts := time.Now()
|
||||
for _, bucket := range bucketAttestationsByData(atts) {
|
||||
s.processAttestationBucket(ctx, bucket)
|
||||
}
|
||||
|
||||
durationAtts := time.Since(startAtts)
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockRoot": fmt.Sprintf("%#x", blockRoot),
|
||||
"totalCount": len(attestations),
|
||||
"aggregateAttAndProofCount": aggregateAttAndProofCount,
|
||||
"uniqueAggregateAttAndProofCount": len(validAggregates),
|
||||
"attCount": len(atts),
|
||||
"durationTotal": durationAggregateAttAndProof + durationAtts,
|
||||
"durationAggregateAttAndProof": durationAggregateAttAndProof,
|
||||
"durationAtts": durationAtts,
|
||||
}).Debug("Verified and saved pending attestations to pool")
|
||||
}
|
||||
|
||||
// attestationBucket groups attestations with the same AttestationData for batch processing.
|
||||
@@ -303,21 +349,20 @@ func (s *Service) processVerifiedAttestation(
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Service) processAggregate(ctx context.Context, aggregate ethpb.SignedAggregateAttAndProof) {
|
||||
func (s *Service) processAggregate(ctx context.Context, aggregate ethpb.SignedAggregateAttAndProof) error {
|
||||
res, err := s.validateAggregatedAtt(ctx, aggregate)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Pending aggregated attestation failed validation")
|
||||
return
|
||||
return errors.Wrap(err, "validate aggregated att")
|
||||
}
|
||||
|
||||
if res != pubsub.ValidationAccept || !s.validateBlockInAttestation(ctx, aggregate) {
|
||||
log.Debug("Pending aggregated attestation failed validation")
|
||||
return
|
||||
return errors.New("Pending aggregated attestation failed validation")
|
||||
}
|
||||
|
||||
att := aggregate.AggregateAttestationAndProof().AggregateVal()
|
||||
if err := s.saveAttestation(att); err != nil {
|
||||
log.WithError(err).Debug("Could not save aggregated attestation")
|
||||
return
|
||||
return errors.Wrap(err, "save attestation")
|
||||
}
|
||||
|
||||
_ = s.setAggregatorIndexEpochSeen(att.GetData().Target.Epoch, aggregate.AggregateAttestationAndProof().GetAggregatorIndex())
|
||||
@@ -325,6 +370,8 @@ func (s *Service) processAggregate(ctx context.Context, aggregate ethpb.SignedAg
|
||||
if err := s.cfg.p2p.Broadcast(ctx, aggregate); err != nil {
|
||||
log.WithError(err).Debug("Could not broadcast aggregated attestation")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// This defines how pending aggregates are saved in the map. The key is the
|
||||
@@ -336,7 +383,7 @@ func (s *Service) savePendingAggregate(agg ethpb.SignedAggregateAttAndProof) {
|
||||
|
||||
s.savePending(root, agg, func(other any) bool {
|
||||
a, ok := other.(ethpb.SignedAggregateAttAndProof)
|
||||
return ok && pendingAggregatesAreEqual(agg, a)
|
||||
return ok && pendingAggregatesAreEqual(agg, a, includeAggregatorIndex)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -391,13 +438,19 @@ func (s *Service) savePending(root [32]byte, pending any, isEqual func(other any
|
||||
s.blkRootToPendingAtts[root] = append(s.blkRootToPendingAtts[root], pending)
|
||||
}
|
||||
|
||||
func pendingAggregatesAreEqual(a, b ethpb.SignedAggregateAttAndProof) bool {
|
||||
// pendingAggregatesAreEqual checks if two pending aggregate attestations are equal.
|
||||
// The filter parameter controls whether aggregator index is considered in the equality check.
|
||||
func pendingAggregatesAreEqual(a, b ethpb.SignedAggregateAttAndProof, filter aggregatorIndexFilter) bool {
|
||||
if a.Version() != b.Version() {
|
||||
return false
|
||||
}
|
||||
if a.AggregateAttestationAndProof().GetAggregatorIndex() != b.AggregateAttestationAndProof().GetAggregatorIndex() {
|
||||
return false
|
||||
|
||||
if filter == includeAggregatorIndex {
|
||||
if a.AggregateAttestationAndProof().GetAggregatorIndex() != b.AggregateAttestationAndProof().GetAggregatorIndex() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
aAtt := a.AggregateAttestationAndProof().AggregateVal()
|
||||
bAtt := b.AggregateAttestationAndProof().AggregateVal()
|
||||
if aAtt.GetData().Slot != bAtt.GetData().Slot {
|
||||
|
||||
@@ -94,7 +94,7 @@ func TestProcessPendingAtts_NoBlockRequestBlock(t *testing.T) {
|
||||
// Process block A (which exists and has no pending attestations)
|
||||
// This should skip processing attestations for A and request blocks B and C
|
||||
require.NoError(t, r.processPendingAttsForBlock(t.Context(), rootA))
|
||||
require.LogsContain(t, hook, "Requesting block by root")
|
||||
require.LogsContain(t, hook, "Requesting blocks by root")
|
||||
}
|
||||
|
||||
func TestProcessPendingAtts_HasBlockSaveUnaggregatedAtt(t *testing.T) {
|
||||
@@ -911,17 +911,17 @@ func Test_pendingAggregatesAreEqual(t *testing.T) {
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b1111},
|
||||
}}}
|
||||
assert.Equal(t, true, pendingAggregatesAreEqual(a, b))
|
||||
assert.Equal(t, true, pendingAggregatesAreEqual(a, b, includeAggregatorIndex))
|
||||
})
|
||||
t.Run("different version", func(t *testing.T) {
|
||||
a := ðpb.SignedAggregateAttestationAndProof{Message: ðpb.AggregateAttestationAndProof{AggregatorIndex: 1}}
|
||||
b := ðpb.SignedAggregateAttestationAndProofElectra{Message: ðpb.AggregateAttestationAndProofElectra{AggregatorIndex: 1}}
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b))
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b, includeAggregatorIndex))
|
||||
})
|
||||
t.Run("different aggregator index", func(t *testing.T) {
|
||||
a := ðpb.SignedAggregateAttestationAndProof{Message: ðpb.AggregateAttestationAndProof{AggregatorIndex: 1}}
|
||||
b := ðpb.SignedAggregateAttestationAndProof{Message: ðpb.AggregateAttestationAndProof{AggregatorIndex: 2}}
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b))
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b, includeAggregatorIndex))
|
||||
})
|
||||
t.Run("different slot", func(t *testing.T) {
|
||||
a := ðpb.SignedAggregateAttestationAndProof{
|
||||
@@ -942,7 +942,7 @@ func Test_pendingAggregatesAreEqual(t *testing.T) {
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b1111},
|
||||
}}}
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b))
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b, includeAggregatorIndex))
|
||||
})
|
||||
t.Run("different committee index", func(t *testing.T) {
|
||||
a := ðpb.SignedAggregateAttestationAndProof{
|
||||
@@ -963,7 +963,7 @@ func Test_pendingAggregatesAreEqual(t *testing.T) {
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b1111},
|
||||
}}}
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b))
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b, includeAggregatorIndex))
|
||||
})
|
||||
t.Run("different aggregation bits", func(t *testing.T) {
|
||||
a := ðpb.SignedAggregateAttestationAndProof{
|
||||
@@ -984,7 +984,30 @@ func Test_pendingAggregatesAreEqual(t *testing.T) {
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b1000},
|
||||
}}}
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b))
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b, includeAggregatorIndex))
|
||||
})
|
||||
t.Run("different aggregator index should be equal while ignoring aggregator index", func(t *testing.T) {
|
||||
a := ðpb.SignedAggregateAttestationAndProof{
|
||||
Message: ðpb.AggregateAttestationAndProof{
|
||||
AggregatorIndex: 1,
|
||||
Aggregate: ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b1111},
|
||||
}}}
|
||||
b := ðpb.SignedAggregateAttestationAndProof{
|
||||
Message: ðpb.AggregateAttestationAndProof{
|
||||
AggregatorIndex: 2,
|
||||
Aggregate: ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b1111},
|
||||
}}}
|
||||
assert.Equal(t, true, pendingAggregatesAreEqual(a, b, ignoreAggregatorIndex))
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -2,7 +2,6 @@ package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"slices"
|
||||
"sync"
|
||||
@@ -44,11 +43,13 @@ func (s *Service) processPendingBlocksQueue() {
|
||||
if !s.chainIsStarted() {
|
||||
return
|
||||
}
|
||||
|
||||
locker.Lock()
|
||||
defer locker.Unlock()
|
||||
|
||||
if err := s.processPendingBlocks(s.ctx); err != nil {
|
||||
log.WithError(err).Debug("Could not process pending blocks")
|
||||
}
|
||||
locker.Unlock()
|
||||
})
|
||||
}
|
||||
|
||||
@@ -73,8 +74,10 @@ func (s *Service) processPendingBlocks(ctx context.Context) error {
|
||||
randGen := rand.NewGenerator()
|
||||
var parentRoots [][32]byte
|
||||
|
||||
blkRoots := make([][32]byte, 0, len(sortedSlots)*maxBlocksPerSlot)
|
||||
|
||||
// Iterate through sorted slots.
|
||||
for _, slot := range sortedSlots {
|
||||
for i, slot := range sortedSlots {
|
||||
// Skip processing if slot is in the future.
|
||||
if slot > s.cfg.clock.CurrentSlot() {
|
||||
continue
|
||||
@@ -91,6 +94,9 @@ func (s *Service) processPendingBlocks(ctx context.Context) error {
|
||||
|
||||
// Process each block in the queue.
|
||||
for _, b := range blocksInCache {
|
||||
start := time.Now()
|
||||
totalDuration := time.Duration(0)
|
||||
|
||||
if err := blocks.BeaconBlockIsNil(b); err != nil {
|
||||
continue
|
||||
}
|
||||
@@ -147,19 +153,34 @@ func (s *Service) processPendingBlocks(ctx context.Context) error {
|
||||
}
|
||||
cancelFunction()
|
||||
|
||||
// Process pending attestations for this block.
|
||||
if err := s.processPendingAttsForBlock(ctx, blkRoot); err != nil {
|
||||
log.WithError(err).Debug("Failed to process pending attestations for block")
|
||||
}
|
||||
blkRoots = append(blkRoots, blkRoot)
|
||||
|
||||
// Remove the processed block from the queue.
|
||||
if err := s.removeBlockFromQueue(b, blkRoot); err != nil {
|
||||
return err
|
||||
}
|
||||
log.WithFields(logrus.Fields{"slot": slot, "blockRoot": hex.EncodeToString(bytesutil.Trunc(blkRoot[:]))}).Debug("Processed pending block and cleared it in cache")
|
||||
|
||||
duration := time.Since(start)
|
||||
totalDuration += duration
|
||||
log.WithFields(logrus.Fields{
|
||||
"slotIndex": fmt.Sprintf("%d/%d", i+1, len(sortedSlots)),
|
||||
"slot": slot,
|
||||
"root": fmt.Sprintf("%#x", blkRoot),
|
||||
"duration": duration,
|
||||
"totalDuration": totalDuration,
|
||||
}).Debug("Processed pending block and cleared it in cache")
|
||||
}
|
||||
|
||||
span.End()
|
||||
}
|
||||
|
||||
for _, blkRoot := range blkRoots {
|
||||
// Process pending attestations for this block.
|
||||
if err := s.processPendingAttsForBlock(ctx, blkRoot); err != nil {
|
||||
log.WithError(err).Debug("Failed to process pending attestations for block")
|
||||
}
|
||||
}
|
||||
|
||||
return s.sendBatchRootRequest(ctx, parentRoots, randGen)
|
||||
}
|
||||
|
||||
@@ -238,6 +259,10 @@ func (s *Service) processBlock(ctx context.Context, b interfaces.ReadOnlySignedB
|
||||
return errors.Wrap(err, "request and save missing data column sidecars")
|
||||
}
|
||||
|
||||
if err := s.requestAndSaveMissingExecutionProofs([]blocks.ROBlock{roBlock}); err != nil {
|
||||
return errors.Wrap(err, "request and save missing execution proofs")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -379,6 +404,19 @@ func (s *Service) sendBatchRootRequest(ctx context.Context, roots [][32]byte, ra
|
||||
req = roots[:maxReqBlock]
|
||||
}
|
||||
|
||||
if logrus.GetLevel() >= logrus.DebugLevel {
|
||||
rootsStr := make([]string, 0, len(roots))
|
||||
for _, req := range roots {
|
||||
rootsStr = append(rootsStr, fmt.Sprintf("%#x", req))
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"peer": pid,
|
||||
"count": len(req),
|
||||
"roots": rootsStr,
|
||||
}).Debug("Requesting blocks by root")
|
||||
}
|
||||
|
||||
// Send the request to the peer.
|
||||
if err := s.sendBeaconBlocksRequest(ctx, &req, pid); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
@@ -438,8 +476,6 @@ func (s *Service) filterOutPendingAndSynced(roots [][fieldparams.RootLength]byte
|
||||
roots = append(roots[:i], roots[i+1:]...)
|
||||
continue
|
||||
}
|
||||
|
||||
log.WithField("blockRoot", fmt.Sprintf("%#x", r)).Debug("Requesting block by root")
|
||||
}
|
||||
return roots
|
||||
}
|
||||
|
||||
@@ -100,6 +100,10 @@ func newRateLimiter(p2pProvider p2p.P2P) *limiter {
|
||||
topicMap[addEncoding(p2p.RPCDataColumnSidecarsByRootTopicV1)] = dataColumnSidecars
|
||||
// DataColumnSidecarsByRangeV1
|
||||
topicMap[addEncoding(p2p.RPCDataColumnSidecarsByRangeTopicV1)] = dataColumnSidecars
|
||||
|
||||
executionProofs := leakybucket.NewCollector(1, defaultBurstLimit, leakyBucketPeriod, false /* deleteEmptyBuckets */);
|
||||
// ExecutionProofsByRootV1
|
||||
topicMap[addEncoding(p2p.RPCExecutionProofsByRootTopicV1)] = executionProofs
|
||||
|
||||
// General topic for all rpc requests.
|
||||
topicMap[rpcLimiterTopic] = leakybucket.NewCollector(5, defaultBurstLimit*2, leakyBucketPeriod, false /* deleteEmptyBuckets */)
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
|
||||
func TestNewRateLimiter(t *testing.T) {
|
||||
rlimiter := newRateLimiter(mockp2p.NewTestP2P(t))
|
||||
assert.Equal(t, len(rlimiter.limiterMap), 20, "correct number of topics not registered")
|
||||
assert.Equal(t, len(rlimiter.limiterMap), 21, "correct number of topics not registered")
|
||||
}
|
||||
|
||||
func TestNewRateLimiter_FreeCorrectly(t *testing.T) {
|
||||
|
||||
@@ -51,6 +51,7 @@ func (s *Service) rpcHandlerByTopicFromFork(forkIndex int) (map[string]rpcHandle
|
||||
p2p.RPCBlobSidecarsByRangeTopicV1: s.blobSidecarsByRangeRPCHandler, // Modified in Fulu
|
||||
p2p.RPCDataColumnSidecarsByRootTopicV1: s.dataColumnSidecarByRootRPCHandler, // Added in Fulu
|
||||
p2p.RPCDataColumnSidecarsByRangeTopicV1: s.dataColumnSidecarsByRangeRPCHandler, // Added in Fulu
|
||||
p2p.RPCExecutionProofsByRootTopicV1: s.executionProofsByRootRPCHandler, // Added in Fulu
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -11,11 +11,13 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/types"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/sync/verify"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
libp2pcore "github.com/libp2p/go-libp2p/core"
|
||||
@@ -87,9 +89,77 @@ func (s *Service) sendBeaconBlocksRequest(ctx context.Context, requests *types.B
|
||||
return errors.Wrap(err, "request and save missing data columns")
|
||||
}
|
||||
|
||||
if err := s.requestAndSaveMissingExecutionProofs(postFuluBlocks); err != nil {
|
||||
return errors.Wrap(err, "request and save missing execution proofs")
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *Service) requestAndSaveMissingExecutionProofs(blks []blocks.ROBlock) error {
|
||||
if len(blks) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO: Parallelize requests for multiple blocks.
|
||||
for _, blk := range blks {
|
||||
if err := s.sendAndSaveExecutionProofs(s.ctx, blk); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) sendAndSaveExecutionProofs(ctx context.Context, block blocks.ROBlock) error {
|
||||
if !features.Get().EnableZkvm {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check proof retention period.
|
||||
blockEpoch := slots.ToEpoch(block.Block().Slot())
|
||||
currentEpoch := slots.ToEpoch(s.cfg.clock.CurrentSlot())
|
||||
if !params.WithinExecutionProofPeriod(blockEpoch, currentEpoch) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check how many proofs are needed with Execution Proof Pool.
|
||||
storedIds := s.cfg.execProofPool.Ids(block.Root())
|
||||
count := uint64(len(storedIds))
|
||||
if count >= params.BeaconConfig().MinProofsRequired {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Construct request
|
||||
blockRoot := block.Root()
|
||||
req := ðpb.ExecutionProofsByRootRequest{
|
||||
BlockRoot: blockRoot[:],
|
||||
CountNeeded: params.BeaconConfig().MinProofsRequired - count,
|
||||
AlreadyHave: storedIds,
|
||||
}
|
||||
|
||||
// Call SendExecutionProofByRootRequest
|
||||
zkvmEnabledPeers := s.cfg.p2p.Peers().ZkvmEnabledPeers()
|
||||
if len(zkvmEnabledPeers) == 0 {
|
||||
return fmt.Errorf("no zkVM enabled peers available to request execution proofs")
|
||||
}
|
||||
|
||||
// TODO: For simplicity, just pick the first peer for now.
|
||||
// In the future, we can implement better peer selection logic.
|
||||
pid := zkvmEnabledPeers[0]
|
||||
proofs, err := SendExecutionProofsByRootRequest(ctx, s.cfg.clock, s.cfg.p2p, pid, req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("send execution proofs by root request: %w", err)
|
||||
}
|
||||
|
||||
// Insert ExecProofPool
|
||||
// TODO: Implement multiple proof insertion in ExecProofPool to avoid multiple locks.
|
||||
for _, proof := range proofs {
|
||||
s.cfg.execProofPool.Insert(proof)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// requestAndSaveMissingDataColumns checks if the data columns are missing for the given block.
|
||||
// If so, requests them and saves them to the storage.
|
||||
func (s *Service) requestAndSaveMissingDataColumnSidecars(blks []blocks.ROBlock) error {
|
||||
|
||||
@@ -182,3 +182,21 @@ func WriteDataColumnSidecarChunk(stream libp2pcore.Stream, tor blockchain.Tempor
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func WriteExecutionProofChunk(stream libp2pcore.Stream, encoding encoder.NetworkEncoding, proof *ethpb.ExecutionProof) error {
|
||||
// Success response code.
|
||||
if _, err := stream.Write([]byte{responseCodeSuccess}); err != nil {
|
||||
return errors.Wrap(err, "stream write")
|
||||
}
|
||||
ctxBytes := params.ForkDigest(slots.ToEpoch(proof.Slot))
|
||||
if err := writeContextToStream(ctxBytes[:], stream); err != nil {
|
||||
return errors.Wrap(err, "write context to stream")
|
||||
}
|
||||
|
||||
// Execution proof.
|
||||
if _, err := encoding.EncodeWithMaxLength(stream, proof); err != nil {
|
||||
return errors.Wrap(err, "encode with max length")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
219
beacon-chain/sync/rpc_execution_proofs_by_root_topic.go
Normal file
219
beacon-chain/sync/rpc_execution_proofs_by_root_topic.go
Normal file
@@ -0,0 +1,219 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
libp2pcore "github.com/libp2p/go-libp2p/core"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// SendExecutionProofsByRootRequest sends ExecutionProofsByRoot request and returns fetched execution proofs, if any.
|
||||
func SendExecutionProofsByRootRequest(
|
||||
ctx context.Context,
|
||||
clock blockchain.TemporalOracle,
|
||||
p2pProvider p2p.P2P,
|
||||
pid peer.ID,
|
||||
req *ethpb.ExecutionProofsByRootRequest,
|
||||
) ([]*ethpb.ExecutionProof, error) {
|
||||
// Validate request
|
||||
if req.CountNeeded == 0 {
|
||||
return nil, errors.New("count_needed must be greater than 0")
|
||||
}
|
||||
|
||||
topic, err := p2p.TopicFromMessage(p2p.ExecutionProofsByRootName, slots.ToEpoch(clock.CurrentSlot()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"topic": topic,
|
||||
"block_root": bytesutil.ToBytes32(req.BlockRoot),
|
||||
"count": req.CountNeeded,
|
||||
"already": len(req.AlreadyHave),
|
||||
}).Debug("Sending execution proofs by root request")
|
||||
|
||||
stream, err := p2pProvider.Send(ctx, req, topic, pid)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer closeStream(stream, log)
|
||||
|
||||
// Read execution proofs from stream
|
||||
proofs := make([]*ethpb.ExecutionProof, 0, req.CountNeeded)
|
||||
alreadyHaveSet := make(map[primitives.ExecutionProofId]struct{})
|
||||
for _, id := range req.AlreadyHave {
|
||||
alreadyHaveSet[id] = struct{}{}
|
||||
}
|
||||
|
||||
for i := uint64(0); i < req.CountNeeded; i++ {
|
||||
isFirstChunk := i == 0
|
||||
proof, err := ReadChunkedExecutionProof(stream, p2pProvider, isFirstChunk)
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Validate proof
|
||||
if err := validateExecutionProof(proof, req, alreadyHaveSet); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
proofs = append(proofs, proof)
|
||||
}
|
||||
|
||||
return proofs, nil
|
||||
}
|
||||
|
||||
// ReadChunkedExecutionProof reads a chunked execution proof from the stream.
|
||||
func ReadChunkedExecutionProof(
|
||||
stream libp2pcore.Stream,
|
||||
encoding p2p.EncodingProvider,
|
||||
isFirstChunk bool,
|
||||
) (*ethpb.ExecutionProof, error) {
|
||||
// Read status code for each chunk (like data columns, not like blocks)
|
||||
code, errMsg, err := ReadStatusCode(stream, encoding.Encoding())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if code != 0 {
|
||||
return nil, errors.New(errMsg)
|
||||
}
|
||||
|
||||
// Read context bytes (fork digest)
|
||||
_, err = readContextFromStream(stream)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read context from stream: %w", err)
|
||||
}
|
||||
|
||||
// Decode the proof
|
||||
proof := ðpb.ExecutionProof{}
|
||||
if err := encoding.Encoding().DecodeWithMaxLength(stream, proof); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return proof, nil
|
||||
}
|
||||
|
||||
// validateExecutionProof validates a received execution proof against the request.
|
||||
func validateExecutionProof(
|
||||
proof *ethpb.ExecutionProof,
|
||||
req *ethpb.ExecutionProofsByRootRequest,
|
||||
alreadyHaveSet map[primitives.ExecutionProofId]struct{},
|
||||
) error {
|
||||
// Check block root matches
|
||||
proofRoot := bytesutil.ToBytes32(proof.BlockRoot)
|
||||
reqRoot := bytesutil.ToBytes32(req.BlockRoot)
|
||||
if proofRoot != reqRoot {
|
||||
return fmt.Errorf("proof block root %#x does not match requested root %#x",
|
||||
proofRoot, reqRoot)
|
||||
}
|
||||
|
||||
// Check we didn't already have this proof
|
||||
if _, ok := alreadyHaveSet[proof.ProofId]; ok {
|
||||
return fmt.Errorf("received proof we already have: proof_id=%d", proof.ProofId)
|
||||
}
|
||||
|
||||
// Check proof ID is valid (within max range)
|
||||
if !proof.ProofId.IsValid() {
|
||||
return fmt.Errorf("invalid proof_id: %d", proof.ProofId)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// executionProofsByRootRPCHandler handles incoming ExecutionProofsByRoot RPC requests.
|
||||
func (s *Service) executionProofsByRootRPCHandler(ctx context.Context, msg any, stream libp2pcore.Stream) error {
|
||||
ctx, span := trace.StartSpan(ctx, "sync.executionProofsByRootRPCHandler")
|
||||
defer span.End()
|
||||
|
||||
_, cancel := context.WithTimeout(ctx, ttfbTimeout)
|
||||
defer cancel()
|
||||
|
||||
req, ok := msg.(*ethpb.ExecutionProofsByRootRequest)
|
||||
if !ok {
|
||||
return errors.New("message is not type ExecutionProofsByRootRequest")
|
||||
}
|
||||
|
||||
remotePeer := stream.Conn().RemotePeer()
|
||||
SetRPCStreamDeadlines(stream)
|
||||
|
||||
// Validate request
|
||||
if err := s.rateLimiter.validateRequest(stream, 1); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Penalize peers that send invalid requests.
|
||||
if err := validateExecutionProofsByRootRequest(req); err != nil {
|
||||
s.downscorePeer(remotePeer, "executionProofsByRootRPCHandlerValidationError")
|
||||
s.writeErrorResponseToStream(responseCodeInvalidRequest, err.Error(), stream)
|
||||
return fmt.Errorf("validate execution proofs by root request: %w", err)
|
||||
}
|
||||
|
||||
blockRoot := bytesutil.ToBytes32(req.BlockRoot)
|
||||
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"blockroot": fmt.Sprintf("%#x", blockRoot),
|
||||
"neededCount": req.CountNeeded,
|
||||
"alreadyHave": req.AlreadyHave,
|
||||
"peer": remotePeer.String(),
|
||||
})
|
||||
|
||||
s.rateLimiter.add(stream, 1)
|
||||
defer closeStream(stream, log)
|
||||
|
||||
// Get proofs from execution proof pool
|
||||
storedProofs := s.cfg.execProofPool.Get(blockRoot)
|
||||
|
||||
// Filter out not requested proofs
|
||||
alreadyHave := make(map[primitives.ExecutionProofId]bool)
|
||||
for _, id := range req.AlreadyHave {
|
||||
alreadyHave[id] = true
|
||||
}
|
||||
|
||||
// Send proofs
|
||||
sentCount := uint64(0)
|
||||
for _, proof := range storedProofs {
|
||||
if sentCount >= req.CountNeeded {
|
||||
break
|
||||
}
|
||||
|
||||
// Skip proofs the requester already has
|
||||
if alreadyHave[proof.ProofId] {
|
||||
continue
|
||||
}
|
||||
|
||||
// Write proof to stream
|
||||
SetStreamWriteDeadline(stream, defaultWriteDuration)
|
||||
if err := WriteExecutionProofChunk(stream, s.cfg.p2p.Encoding(), proof); err != nil {
|
||||
log.WithError(err).Debug("Could not send execution proof")
|
||||
s.writeErrorResponseToStream(responseCodeServerError, "could not send execution proof", stream)
|
||||
return err
|
||||
}
|
||||
|
||||
sentCount++
|
||||
}
|
||||
|
||||
log.WithField("sentCount", sentCount).Debug("Responded to execution proofs by root request")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateExecutionProofsByRootRequest(req *ethpb.ExecutionProofsByRootRequest) error {
|
||||
if req.CountNeeded == 0 {
|
||||
return errors.New("count_needed must be greater than 0")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
727
beacon-chain/sync/rpc_execution_proofs_by_root_topic_test.go
Normal file
727
beacon-chain/sync/rpc_execution_proofs_by_root_topic_test.go
Normal file
@@ -0,0 +1,727 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
chainMock "github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/testing"
|
||||
testDB "github.com/OffchainLabs/prysm/v7/beacon-chain/db/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/execproofs"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/encoder"
|
||||
p2ptest "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func TestExecutionProofsByRootRPCHandler(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.FuluForkEpoch = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
|
||||
protocolID := protocol.ID(p2p.RPCExecutionProofsByRootTopicV1) + "/" + encoder.ProtocolSuffixSSZSnappy
|
||||
|
||||
t.Run("wrong message type", func(t *testing.T) {
|
||||
service := &Service{}
|
||||
err := service.executionProofsByRootRPCHandler(t.Context(), nil, nil)
|
||||
require.ErrorContains(t, "message is not type ExecutionProofsByRootRequest", err)
|
||||
})
|
||||
|
||||
t.Run("invalid request - count_needed is 0", func(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnableZkvm: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
localP2P := p2ptest.NewTestP2P(t)
|
||||
service := &Service{cfg: &config{p2p: localP2P}, rateLimiter: newRateLimiter(localP2P)}
|
||||
remoteP2P := p2ptest.NewTestP2P(t)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
code, errMsg, err := readStatusCodeNoDeadline(stream, localP2P.Encoding())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, responseCodeInvalidRequest, code)
|
||||
require.Equal(t, "count_needed must be greater than 0", errMsg)
|
||||
})
|
||||
|
||||
localP2P.Connect(remoteP2P)
|
||||
stream, err := localP2P.BHost.NewStream(t.Context(), remoteP2P.BHost.ID(), protocolID)
|
||||
require.NoError(t, err)
|
||||
|
||||
blockRoot := bytesutil.PadTo([]byte("blockroot"), 32)
|
||||
req := ðpb.ExecutionProofsByRootRequest{
|
||||
BlockRoot: blockRoot,
|
||||
CountNeeded: 0, // Invalid: must be > 0
|
||||
AlreadyHave: []primitives.ExecutionProofId{},
|
||||
}
|
||||
|
||||
require.Equal(t, true, localP2P.Peers().Scorers().BadResponsesScorer().Score(remoteP2P.PeerID()) >= 0)
|
||||
|
||||
err = service.executionProofsByRootRPCHandler(t.Context(), req, stream)
|
||||
require.NotNil(t, err)
|
||||
require.Equal(t, true, localP2P.Peers().Scorers().BadResponsesScorer().Score(remoteP2P.PeerID()) < 0)
|
||||
|
||||
if util.WaitTimeout(&wg, 1*time.Second) {
|
||||
t.Fatal("Did not receive stream within 1 sec")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("zkVM disabled - returns empty", func(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnableZkvm: false, // Disabled
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
localP2P := p2ptest.NewTestP2P(t)
|
||||
execProofPool := execproofs.NewPool()
|
||||
service := &Service{
|
||||
cfg: &config{
|
||||
p2p: localP2P,
|
||||
execProofPool: execProofPool,
|
||||
},
|
||||
rateLimiter: newRateLimiter(localP2P),
|
||||
}
|
||||
remoteP2P := p2ptest.NewTestP2P(t)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
// Should receive no proofs (stream should end)
|
||||
_, err := ReadChunkedExecutionProof(stream, localP2P, true)
|
||||
require.ErrorIs(t, err, io.EOF)
|
||||
})
|
||||
|
||||
localP2P.Connect(remoteP2P)
|
||||
stream, err := localP2P.BHost.NewStream(t.Context(), remoteP2P.BHost.ID(), protocolID)
|
||||
require.NoError(t, err)
|
||||
|
||||
blockRoot := bytesutil.PadTo([]byte("blockroot"), 32)
|
||||
req := ðpb.ExecutionProofsByRootRequest{
|
||||
BlockRoot: blockRoot,
|
||||
CountNeeded: 2,
|
||||
AlreadyHave: []primitives.ExecutionProofId{},
|
||||
}
|
||||
|
||||
err = service.executionProofsByRootRPCHandler(t.Context(), req, stream)
|
||||
require.NoError(t, err)
|
||||
|
||||
if util.WaitTimeout(&wg, 1*time.Second) {
|
||||
t.Fatal("Did not receive stream within 1 sec")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("no proofs available", func(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnableZkvm: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
localP2P := p2ptest.NewTestP2P(t)
|
||||
execProofPool := execproofs.NewPool()
|
||||
service := &Service{
|
||||
cfg: &config{
|
||||
p2p: localP2P,
|
||||
execProofPool: execProofPool,
|
||||
},
|
||||
rateLimiter: newRateLimiter(localP2P),
|
||||
}
|
||||
remoteP2P := p2ptest.NewTestP2P(t)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
// Should receive no proofs (stream should end)
|
||||
_, err := ReadChunkedExecutionProof(stream, localP2P, true)
|
||||
require.ErrorIs(t, err, io.EOF)
|
||||
})
|
||||
|
||||
localP2P.Connect(remoteP2P)
|
||||
stream, err := localP2P.BHost.NewStream(t.Context(), remoteP2P.BHost.ID(), protocolID)
|
||||
require.NoError(t, err)
|
||||
|
||||
blockRoot := bytesutil.PadTo([]byte("blockroot"), 32)
|
||||
req := ðpb.ExecutionProofsByRootRequest{
|
||||
BlockRoot: blockRoot,
|
||||
CountNeeded: 2,
|
||||
AlreadyHave: []primitives.ExecutionProofId{},
|
||||
}
|
||||
|
||||
err = service.executionProofsByRootRPCHandler(t.Context(), req, stream)
|
||||
require.NoError(t, err)
|
||||
|
||||
if util.WaitTimeout(&wg, 1*time.Second) {
|
||||
t.Fatal("Did not receive stream within 1 sec")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("nominal - returns requested proofs", func(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnableZkvm: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
localP2P := p2ptest.NewTestP2P(t)
|
||||
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
|
||||
|
||||
// Create execution proof pool with some proofs
|
||||
execProofPool := execproofs.NewPool()
|
||||
blockRoot := [32]byte{0x01, 0x02, 0x03}
|
||||
|
||||
// Add 3 proofs for the same block
|
||||
blockHash := bytesutil.PadTo([]byte("blockhash"), 32)
|
||||
proof1 := ðpb.ExecutionProof{
|
||||
BlockRoot: blockRoot[:],
|
||||
BlockHash: blockHash,
|
||||
Slot: primitives.Slot(10),
|
||||
ProofId: primitives.ExecutionProofId(1),
|
||||
ProofData: []byte("proof1"),
|
||||
}
|
||||
proof2 := ðpb.ExecutionProof{
|
||||
BlockRoot: blockRoot[:],
|
||||
BlockHash: blockHash,
|
||||
Slot: primitives.Slot(10),
|
||||
ProofId: primitives.ExecutionProofId(2),
|
||||
ProofData: []byte("proof2"),
|
||||
}
|
||||
proof3 := ðpb.ExecutionProof{
|
||||
BlockRoot: blockRoot[:],
|
||||
BlockHash: blockHash,
|
||||
Slot: primitives.Slot(10),
|
||||
ProofId: primitives.ExecutionProofId(3),
|
||||
ProofData: []byte("proof3"),
|
||||
}
|
||||
|
||||
execProofPool.Insert(proof1)
|
||||
execProofPool.Insert(proof2)
|
||||
execProofPool.Insert(proof3)
|
||||
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := &Service{
|
||||
cfg: &config{
|
||||
p2p: localP2P,
|
||||
beaconDB: beaconDB,
|
||||
clock: clock,
|
||||
execProofPool: execProofPool,
|
||||
chain: &chainMock.ChainService{},
|
||||
},
|
||||
rateLimiter: newRateLimiter(localP2P),
|
||||
}
|
||||
|
||||
remoteP2P := p2ptest.NewTestP2P(t)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
|
||||
proofs := make([]*ethpb.ExecutionProof, 0, 2)
|
||||
|
||||
for i := range 2 {
|
||||
isFirstChunk := i == 0
|
||||
proof, err := ReadChunkedExecutionProof(stream, remoteP2P, isFirstChunk)
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
|
||||
assert.NoError(t, err)
|
||||
proofs = append(proofs, proof)
|
||||
}
|
||||
|
||||
assert.Equal(t, 2, len(proofs))
|
||||
// Should receive proof1 and proof2 (first 2 in pool)
|
||||
assert.DeepEqual(t, blockRoot[:], proofs[0].BlockRoot)
|
||||
assert.DeepEqual(t, blockRoot[:], proofs[1].BlockRoot)
|
||||
assert.Equal(t, primitives.ExecutionProofId(1), proofs[0].ProofId)
|
||||
assert.Equal(t, primitives.ExecutionProofId(2), proofs[1].ProofId)
|
||||
})
|
||||
|
||||
localP2P.Connect(remoteP2P)
|
||||
stream, err := localP2P.BHost.NewStream(t.Context(), remoteP2P.BHost.ID(), protocolID)
|
||||
require.NoError(t, err)
|
||||
|
||||
req := ðpb.ExecutionProofsByRootRequest{
|
||||
BlockRoot: blockRoot[:],
|
||||
CountNeeded: 2,
|
||||
AlreadyHave: []primitives.ExecutionProofId{},
|
||||
}
|
||||
|
||||
err = service.executionProofsByRootRPCHandler(t.Context(), req, stream)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, localP2P.Peers().Scorers().BadResponsesScorer().Score(remoteP2P.PeerID()) >= 0)
|
||||
|
||||
if util.WaitTimeout(&wg, 1*time.Second) {
|
||||
t.Fatal("Did not receive stream within 1 sec")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("filters already_have proofs", func(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnableZkvm: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
localP2P := p2ptest.NewTestP2P(t)
|
||||
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
|
||||
|
||||
// Create execution proof pool with some proofs
|
||||
execProofPool := execproofs.NewPool()
|
||||
blockRoot := [32]byte{0x01, 0x02, 0x03}
|
||||
|
||||
// Add 4 proofs for the same block
|
||||
blockHash := bytesutil.PadTo([]byte("blockhash"), 32)
|
||||
proof1 := ðpb.ExecutionProof{
|
||||
BlockRoot: blockRoot[:],
|
||||
BlockHash: blockHash,
|
||||
Slot: primitives.Slot(10),
|
||||
ProofId: primitives.ExecutionProofId(1),
|
||||
ProofData: []byte("proof1"),
|
||||
}
|
||||
proof2 := ðpb.ExecutionProof{
|
||||
BlockRoot: blockRoot[:],
|
||||
BlockHash: blockHash,
|
||||
Slot: primitives.Slot(10),
|
||||
ProofId: primitives.ExecutionProofId(2),
|
||||
ProofData: []byte("proof2"),
|
||||
}
|
||||
proof3 := ðpb.ExecutionProof{
|
||||
BlockRoot: blockRoot[:],
|
||||
BlockHash: blockHash,
|
||||
Slot: primitives.Slot(10),
|
||||
ProofId: primitives.ExecutionProofId(3),
|
||||
ProofData: []byte("proof3"),
|
||||
}
|
||||
proof4 := ðpb.ExecutionProof{
|
||||
BlockRoot: blockRoot[:],
|
||||
BlockHash: blockHash,
|
||||
Slot: primitives.Slot(10),
|
||||
ProofId: primitives.ExecutionProofId(4),
|
||||
ProofData: []byte("proof4"),
|
||||
}
|
||||
|
||||
execProofPool.Insert(proof1)
|
||||
execProofPool.Insert(proof2)
|
||||
execProofPool.Insert(proof3)
|
||||
execProofPool.Insert(proof4)
|
||||
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := &Service{
|
||||
cfg: &config{
|
||||
p2p: localP2P,
|
||||
beaconDB: beaconDB,
|
||||
clock: clock,
|
||||
execProofPool: execProofPool,
|
||||
chain: &chainMock.ChainService{},
|
||||
},
|
||||
rateLimiter: newRateLimiter(localP2P),
|
||||
}
|
||||
|
||||
remoteP2P := p2ptest.NewTestP2P(t)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
|
||||
proofs := make([]*ethpb.ExecutionProof, 0, 2)
|
||||
|
||||
for i := range 3 {
|
||||
isFirstChunk := i == 0
|
||||
proof, err := ReadChunkedExecutionProof(stream, remoteP2P, isFirstChunk)
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
|
||||
assert.NoError(t, err)
|
||||
proofs = append(proofs, proof)
|
||||
}
|
||||
|
||||
// Should skip proof1 and proof2 (already_have), and return proof3 and proof4
|
||||
assert.Equal(t, 2, len(proofs))
|
||||
assert.Equal(t, primitives.ExecutionProofId(3), proofs[0].ProofId)
|
||||
assert.Equal(t, primitives.ExecutionProofId(4), proofs[1].ProofId)
|
||||
})
|
||||
|
||||
localP2P.Connect(remoteP2P)
|
||||
stream, err := localP2P.BHost.NewStream(t.Context(), remoteP2P.BHost.ID(), protocolID)
|
||||
require.NoError(t, err)
|
||||
|
||||
req := ðpb.ExecutionProofsByRootRequest{
|
||||
BlockRoot: blockRoot[:],
|
||||
CountNeeded: 2,
|
||||
AlreadyHave: []primitives.ExecutionProofId{1, 2}, // Already have proof1 and proof2
|
||||
}
|
||||
|
||||
err = service.executionProofsByRootRPCHandler(t.Context(), req, stream)
|
||||
require.NoError(t, err)
|
||||
|
||||
if util.WaitTimeout(&wg, 1*time.Second) {
|
||||
t.Fatal("Did not receive stream within 1 sec")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("partial send - less proofs than requested", func(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnableZkvm: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
localP2P := p2ptest.NewTestP2P(t)
|
||||
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
|
||||
|
||||
// Create execution proof pool with only 2 proofs
|
||||
execProofPool := execproofs.NewPool()
|
||||
blockRoot := [32]byte{0x01, 0x02, 0x03}
|
||||
|
||||
blockHash := bytesutil.PadTo([]byte("blockhash"), 32)
|
||||
proof1 := ðpb.ExecutionProof{
|
||||
BlockRoot: blockRoot[:],
|
||||
BlockHash: blockHash,
|
||||
Slot: primitives.Slot(10),
|
||||
ProofId: primitives.ExecutionProofId(1),
|
||||
ProofData: []byte("proof1"),
|
||||
}
|
||||
proof2 := ðpb.ExecutionProof{
|
||||
BlockRoot: blockRoot[:],
|
||||
BlockHash: blockHash,
|
||||
Slot: primitives.Slot(10),
|
||||
ProofId: primitives.ExecutionProofId(2),
|
||||
ProofData: []byte("proof2"),
|
||||
}
|
||||
|
||||
execProofPool.Insert(proof1)
|
||||
execProofPool.Insert(proof2)
|
||||
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := &Service{
|
||||
cfg: &config{
|
||||
p2p: localP2P,
|
||||
beaconDB: beaconDB,
|
||||
clock: clock,
|
||||
execProofPool: execProofPool,
|
||||
chain: &chainMock.ChainService{},
|
||||
},
|
||||
rateLimiter: newRateLimiter(localP2P),
|
||||
}
|
||||
|
||||
remoteP2P := p2ptest.NewTestP2P(t)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
|
||||
proofs := make([]*ethpb.ExecutionProof, 0, 5)
|
||||
|
||||
for i := range 5 {
|
||||
isFirstChunk := i == 0
|
||||
proof, err := ReadChunkedExecutionProof(stream, remoteP2P, isFirstChunk)
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
|
||||
assert.NoError(t, err)
|
||||
proofs = append(proofs, proof)
|
||||
}
|
||||
|
||||
// Should only receive 2 proofs (not 5 as requested)
|
||||
assert.Equal(t, 2, len(proofs))
|
||||
assert.Equal(t, primitives.ExecutionProofId(1), proofs[0].ProofId)
|
||||
assert.Equal(t, primitives.ExecutionProofId(2), proofs[1].ProofId)
|
||||
})
|
||||
|
||||
localP2P.Connect(remoteP2P)
|
||||
stream, err := localP2P.BHost.NewStream(t.Context(), remoteP2P.BHost.ID(), protocolID)
|
||||
require.NoError(t, err)
|
||||
|
||||
req := ðpb.ExecutionProofsByRootRequest{
|
||||
BlockRoot: blockRoot[:],
|
||||
CountNeeded: 5, // Request 5 but only 2 available
|
||||
AlreadyHave: []primitives.ExecutionProofId{},
|
||||
}
|
||||
|
||||
err = service.executionProofsByRootRPCHandler(t.Context(), req, stream)
|
||||
require.NoError(t, err)
|
||||
|
||||
if util.WaitTimeout(&wg, 1*time.Second) {
|
||||
t.Fatal("Did not receive stream within 1 sec")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestValidateExecutionProofsByRootRequest(t *testing.T) {
|
||||
t.Run("invalid - count_needed is 0", func(t *testing.T) {
|
||||
req := ðpb.ExecutionProofsByRootRequest{
|
||||
BlockRoot: bytesutil.PadTo([]byte("blockroot"), 32),
|
||||
CountNeeded: 0,
|
||||
AlreadyHave: []primitives.ExecutionProofId{},
|
||||
}
|
||||
err := validateExecutionProofsByRootRequest(req)
|
||||
require.ErrorContains(t, "count_needed must be greater than 0", err)
|
||||
})
|
||||
|
||||
t.Run("valid", func(t *testing.T) {
|
||||
req := ðpb.ExecutionProofsByRootRequest{
|
||||
BlockRoot: bytesutil.PadTo([]byte("blockroot"), 32),
|
||||
CountNeeded: 2,
|
||||
AlreadyHave: []primitives.ExecutionProofId{},
|
||||
}
|
||||
err := validateExecutionProofsByRootRequest(req)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestSendExecutionProofsByRootRequest(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.FuluForkEpoch = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
|
||||
protocolID := protocol.ID(p2p.RPCExecutionProofsByRootTopicV1) + "/" + encoder.ProtocolSuffixSSZSnappy
|
||||
|
||||
t.Run("count_needed is 0 - returns error", func(t *testing.T) {
|
||||
localP2P := p2ptest.NewTestP2P(t)
|
||||
remoteP2P := p2ptest.NewTestP2P(t)
|
||||
localP2P.Connect(remoteP2P)
|
||||
|
||||
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
|
||||
blockRoot := bytesutil.PadTo([]byte("blockroot"), 32)
|
||||
|
||||
req := ðpb.ExecutionProofsByRootRequest{
|
||||
BlockRoot: blockRoot,
|
||||
CountNeeded: 0,
|
||||
AlreadyHave: []primitives.ExecutionProofId{},
|
||||
}
|
||||
|
||||
proofs, err := SendExecutionProofsByRootRequest(t.Context(), clock, localP2P, remoteP2P.PeerID(), req)
|
||||
require.ErrorContains(t, "count_needed must be greater than 0", err)
|
||||
require.Equal(t, 0, len(proofs))
|
||||
})
|
||||
|
||||
t.Run("success - receives requested proofs", func(t *testing.T) {
|
||||
localP2P := p2ptest.NewTestP2P(t)
|
||||
remoteP2P := p2ptest.NewTestP2P(t)
|
||||
localP2P.Connect(remoteP2P)
|
||||
|
||||
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
|
||||
blockRoot := [32]byte{0x01, 0x02, 0x03}
|
||||
blockHash := bytesutil.PadTo([]byte("blockhash"), 32)
|
||||
|
||||
// Create proofs to send back
|
||||
proof1 := ðpb.ExecutionProof{
|
||||
BlockRoot: blockRoot[:],
|
||||
BlockHash: blockHash,
|
||||
Slot: primitives.Slot(10),
|
||||
ProofId: primitives.ExecutionProofId(1),
|
||||
ProofData: []byte("proof1"),
|
||||
}
|
||||
proof2 := ðpb.ExecutionProof{
|
||||
BlockRoot: blockRoot[:],
|
||||
BlockHash: blockHash,
|
||||
Slot: primitives.Slot(10),
|
||||
ProofId: primitives.ExecutionProofId(2),
|
||||
ProofData: []byte("proof2"),
|
||||
}
|
||||
|
||||
// Setup remote to send proofs
|
||||
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
|
||||
defer func() {
|
||||
_ = stream.Close()
|
||||
}()
|
||||
|
||||
// Read the request (we don't validate it in this test)
|
||||
_ = ðpb.ExecutionProofsByRootRequest{}
|
||||
|
||||
// Send proof1
|
||||
require.NoError(t, WriteExecutionProofChunk(stream, remoteP2P.Encoding(), proof1))
|
||||
|
||||
// Send proof2
|
||||
require.NoError(t, WriteExecutionProofChunk(stream, remoteP2P.Encoding(), proof2))
|
||||
})
|
||||
|
||||
req := ðpb.ExecutionProofsByRootRequest{
|
||||
BlockRoot: blockRoot[:],
|
||||
CountNeeded: 2,
|
||||
AlreadyHave: []primitives.ExecutionProofId{},
|
||||
}
|
||||
|
||||
proofs, err := SendExecutionProofsByRootRequest(t.Context(), clock, localP2P, remoteP2P.PeerID(), req)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(proofs))
|
||||
assert.Equal(t, primitives.ExecutionProofId(1), proofs[0].ProofId)
|
||||
assert.Equal(t, primitives.ExecutionProofId(2), proofs[1].ProofId)
|
||||
assert.DeepEqual(t, blockRoot[:], proofs[0].BlockRoot)
|
||||
assert.DeepEqual(t, blockRoot[:], proofs[1].BlockRoot)
|
||||
})
|
||||
|
||||
t.Run("partial response - EOF before count_needed", func(t *testing.T) {
|
||||
localP2P := p2ptest.NewTestP2P(t)
|
||||
remoteP2P := p2ptest.NewTestP2P(t)
|
||||
localP2P.Connect(remoteP2P)
|
||||
|
||||
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
|
||||
blockRoot := [32]byte{0x01, 0x02, 0x03}
|
||||
blockHash := bytesutil.PadTo([]byte("blockhash"), 32)
|
||||
|
||||
proof1 := ðpb.ExecutionProof{
|
||||
BlockRoot: blockRoot[:],
|
||||
BlockHash: blockHash,
|
||||
Slot: primitives.Slot(10),
|
||||
ProofId: primitives.ExecutionProofId(1),
|
||||
ProofData: []byte("proof1"),
|
||||
}
|
||||
|
||||
// Setup remote to send only 1 proof (but we request 5)
|
||||
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
|
||||
defer func() {
|
||||
_ = stream.Close()
|
||||
}()
|
||||
// Send only proof1
|
||||
require.NoError(t, WriteExecutionProofChunk(stream, remoteP2P.Encoding(), proof1))
|
||||
})
|
||||
|
||||
req := ðpb.ExecutionProofsByRootRequest{
|
||||
BlockRoot: blockRoot[:],
|
||||
CountNeeded: 5, // Request 5 but only get 1
|
||||
AlreadyHave: []primitives.ExecutionProofId{},
|
||||
}
|
||||
|
||||
proofs, err := SendExecutionProofsByRootRequest(t.Context(), clock, localP2P, remoteP2P.PeerID(), req)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(proofs)) // Only received 1
|
||||
assert.Equal(t, primitives.ExecutionProofId(1), proofs[0].ProofId)
|
||||
})
|
||||
|
||||
t.Run("invalid block root - validation fails", func(t *testing.T) {
|
||||
localP2P := p2ptest.NewTestP2P(t)
|
||||
remoteP2P := p2ptest.NewTestP2P(t)
|
||||
localP2P.Connect(remoteP2P)
|
||||
|
||||
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
|
||||
requestedRoot := [32]byte{0x01, 0x02, 0x03}
|
||||
wrongRoot := [32]byte{0xFF, 0xFF, 0xFF}
|
||||
blockHash := bytesutil.PadTo([]byte("blockhash"), 32)
|
||||
|
||||
// Create proof with wrong block root
|
||||
proof1 := ðpb.ExecutionProof{
|
||||
BlockRoot: wrongRoot[:], // Wrong root!
|
||||
BlockHash: blockHash,
|
||||
Slot: primitives.Slot(10),
|
||||
ProofId: primitives.ExecutionProofId(1),
|
||||
ProofData: []byte("proof1"),
|
||||
}
|
||||
|
||||
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
|
||||
defer func() {
|
||||
_ = stream.Close()
|
||||
}()
|
||||
require.NoError(t, WriteExecutionProofChunk(stream, remoteP2P.Encoding(), proof1))
|
||||
})
|
||||
|
||||
req := ðpb.ExecutionProofsByRootRequest{
|
||||
BlockRoot: requestedRoot[:],
|
||||
CountNeeded: 1,
|
||||
AlreadyHave: []primitives.ExecutionProofId{},
|
||||
}
|
||||
|
||||
proofs, err := SendExecutionProofsByRootRequest(t.Context(), clock, localP2P, remoteP2P.PeerID(), req)
|
||||
require.ErrorContains(t, "does not match requested root", err)
|
||||
require.Equal(t, 0, len(proofs))
|
||||
})
|
||||
|
||||
t.Run("already_have proof - validation fails", func(t *testing.T) {
|
||||
localP2P := p2ptest.NewTestP2P(t)
|
||||
remoteP2P := p2ptest.NewTestP2P(t)
|
||||
localP2P.Connect(remoteP2P)
|
||||
|
||||
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
|
||||
blockRoot := [32]byte{0x01, 0x02, 0x03}
|
||||
blockHash := bytesutil.PadTo([]byte("blockhash"), 32)
|
||||
|
||||
proof1 := ðpb.ExecutionProof{
|
||||
BlockRoot: blockRoot[:],
|
||||
BlockHash: blockHash,
|
||||
Slot: primitives.Slot(10),
|
||||
ProofId: primitives.ExecutionProofId(1),
|
||||
ProofData: []byte("proof1"),
|
||||
}
|
||||
|
||||
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
|
||||
defer func() {
|
||||
_ = stream.Close()
|
||||
}()
|
||||
require.NoError(t, WriteExecutionProofChunk(stream, remoteP2P.Encoding(), proof1))
|
||||
})
|
||||
|
||||
req := ðpb.ExecutionProofsByRootRequest{
|
||||
BlockRoot: blockRoot[:],
|
||||
CountNeeded: 1,
|
||||
AlreadyHave: []primitives.ExecutionProofId{1}, // Already have proof_id 1
|
||||
}
|
||||
|
||||
proofs, err := SendExecutionProofsByRootRequest(t.Context(), clock, localP2P, remoteP2P.PeerID(), req)
|
||||
require.ErrorContains(t, "received proof we already have", err)
|
||||
require.Equal(t, 0, len(proofs))
|
||||
})
|
||||
|
||||
t.Run("invalid proof_id - validation fails", func(t *testing.T) {
|
||||
localP2P := p2ptest.NewTestP2P(t)
|
||||
remoteP2P := p2ptest.NewTestP2P(t)
|
||||
localP2P.Connect(remoteP2P)
|
||||
|
||||
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
|
||||
blockRoot := [32]byte{0x01, 0x02, 0x03}
|
||||
blockHash := bytesutil.PadTo([]byte("blockhash"), 32)
|
||||
|
||||
proof1 := ðpb.ExecutionProof{
|
||||
BlockRoot: blockRoot[:],
|
||||
BlockHash: blockHash,
|
||||
Slot: primitives.Slot(10),
|
||||
ProofId: primitives.ExecutionProofId(255), // Invalid proof_id (max valid is 7)
|
||||
ProofData: []byte("proof1"),
|
||||
}
|
||||
|
||||
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
|
||||
defer func() {
|
||||
_ = stream.Close()
|
||||
}()
|
||||
require.NoError(t, WriteExecutionProofChunk(stream, remoteP2P.Encoding(), proof1))
|
||||
})
|
||||
|
||||
req := ðpb.ExecutionProofsByRootRequest{
|
||||
BlockRoot: blockRoot[:],
|
||||
CountNeeded: 1,
|
||||
AlreadyHave: []primitives.ExecutionProofId{},
|
||||
}
|
||||
|
||||
proofs, err := SendExecutionProofsByRootRequest(t.Context(), clock, localP2P, remoteP2P.PeerID(), req)
|
||||
require.ErrorContains(t, "invalid proof_id", err)
|
||||
require.Equal(t, 0, len(proofs))
|
||||
})
|
||||
}
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
lightClient "github.com/OffchainLabs/prysm/v7/beacon-chain/light-client"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/attestations"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/blstoexec"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/execproofs"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/slashings"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/synccommittee"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/voluntaryexits"
|
||||
@@ -67,6 +68,7 @@ const (
|
||||
seenProposerSlashingSize = 100
|
||||
badBlockSize = 1000
|
||||
syncMetricsInterval = 10 * time.Second
|
||||
seenExecutionProofSize = 100
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -94,6 +96,7 @@ type config struct {
|
||||
slashingPool slashings.PoolManager
|
||||
syncCommsPool synccommittee.Pool
|
||||
blsToExecPool blstoexec.PoolManager
|
||||
execProofPool execproofs.PoolManager
|
||||
chain blockchainService
|
||||
initialSync Checker
|
||||
blockNotifier blockfeed.Notifier
|
||||
@@ -235,7 +238,6 @@ func NewService(ctx context.Context, opts ...Option) *Service {
|
||||
r.subHandler = newSubTopicHandler()
|
||||
r.rateLimiter = newRateLimiter(r.cfg.p2p)
|
||||
r.initCaches()
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
|
||||
@@ -329,6 +329,17 @@ func (s *Service) registerSubscribers(nse params.NetworkScheduleEntry) bool {
|
||||
getSubnetsRequiringPeers: s.allDataColumnSubnets,
|
||||
})
|
||||
})
|
||||
|
||||
if features.Get().EnableZkvm {
|
||||
s.spawn(func() {
|
||||
s.subscribe(
|
||||
p2p.ExecutionProofSubnetTopicFormat,
|
||||
s.validateExecutionProof,
|
||||
s.executionProofSubscriber,
|
||||
nse,
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition/interop"
|
||||
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
@@ -22,6 +23,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
@@ -69,12 +71,49 @@ func (s *Service) beaconBlockSubscriber(ctx context.Context, msg proto.Message)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// We use the service context to ensure this context is not cancelled
|
||||
// when the current function returns.
|
||||
// TODO: Do not broadcast proofs for blocks we have already seen.
|
||||
go s.generateAndBroadcastExecutionProofs(s.ctx, roBlock)
|
||||
|
||||
if err := s.processPendingAttsForBlock(ctx, root); err != nil {
|
||||
return errors.Wrap(err, "process pending atts for block")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) generateAndBroadcastExecutionProofs(ctx context.Context, roBlock blocks.ROBlock) {
|
||||
const delay = 2 * time.Second
|
||||
proofTypes := flags.Get().ProofGenerationTypes
|
||||
|
||||
if len(proofTypes) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
var wg errgroup.Group
|
||||
for _, proofType := range proofTypes {
|
||||
wg.Go(func() error {
|
||||
execProof, err := generateExecProof(roBlock, primitives.ExecutionProofId(proofType), delay)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("generate exec proof: %w", err)
|
||||
}
|
||||
|
||||
if err := s.cfg.p2p.Broadcast(ctx, execProof); err != nil {
|
||||
return fmt.Errorf("broadcast exec proof: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := wg.Wait(); err != nil {
|
||||
log.WithError(err).Error("Failed to generate and broadcast execution proofs")
|
||||
}
|
||||
}
|
||||
|
||||
// processSidecarsFromExecutionFromBlock retrieves (if available) sidecars data from the execution client,
|
||||
// builds corresponding sidecars, save them to the storage, and broadcasts them over P2P if necessary.
|
||||
func (s *Service) processSidecarsFromExecutionFromBlock(ctx context.Context, roBlock blocks.ROBlock) {
|
||||
@@ -189,12 +228,30 @@ func (s *Service) processDataColumnSidecarsFromExecution(ctx context.Context, so
|
||||
ctx, cancel := context.WithTimeout(ctx, secondsPerHalfSlot)
|
||||
defer cancel()
|
||||
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", source.Root()),
|
||||
"slot": source.Slot(),
|
||||
"proposerIndex": source.ProposerIndex(),
|
||||
"type": source.Type(),
|
||||
})
|
||||
|
||||
var constructedSidecarCount uint64
|
||||
for iteration := uint64(0); ; /*no stop condition*/ iteration++ {
|
||||
log = log.WithField("iteration", iteration)
|
||||
|
||||
// Exit early if all sidecars to sample have been seen.
|
||||
if s.haveAllSidecarsBeenSeen(source.Slot(), source.ProposerIndex(), columnIndicesToSample) {
|
||||
if iteration > 0 && constructedSidecarCount == 0 {
|
||||
log.Debug("No data column sidecars constructed from the execution client")
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if iteration == 0 {
|
||||
dataColumnsRecoveredFromELAttempts.Inc()
|
||||
}
|
||||
|
||||
// Try to reconstruct data column constructedSidecars from the execution client.
|
||||
constructedSidecars, err := s.cfg.executionReconstructor.ConstructDataColumnSidecars(ctx, source)
|
||||
if err != nil {
|
||||
@@ -202,8 +259,8 @@ func (s *Service) processDataColumnSidecarsFromExecution(ctx context.Context, so
|
||||
}
|
||||
|
||||
// No sidecars are retrieved from the EL, retry later
|
||||
sidecarCount := uint64(len(constructedSidecars))
|
||||
if sidecarCount == 0 {
|
||||
constructedSidecarCount = uint64(len(constructedSidecars))
|
||||
if constructedSidecarCount == 0 {
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
@@ -212,9 +269,11 @@ func (s *Service) processDataColumnSidecarsFromExecution(ctx context.Context, so
|
||||
continue
|
||||
}
|
||||
|
||||
dataColumnsRecoveredFromELTotal.Inc()
|
||||
|
||||
// Boundary check.
|
||||
if sidecarCount != fieldparams.NumberOfColumns {
|
||||
return nil, errors.Errorf("reconstruct data column sidecars returned %d sidecars, expected %d - should never happen", sidecarCount, fieldparams.NumberOfColumns)
|
||||
if constructedSidecarCount != fieldparams.NumberOfColumns {
|
||||
return nil, errors.Errorf("reconstruct data column sidecars returned %d sidecars, expected %d - should never happen", constructedSidecarCount, fieldparams.NumberOfColumns)
|
||||
}
|
||||
|
||||
unseenIndices, err := s.broadcastAndReceiveUnseenDataColumnSidecars(ctx, source.Slot(), source.ProposerIndex(), columnIndicesToSample, constructedSidecars)
|
||||
@@ -222,19 +281,12 @@ func (s *Service) processDataColumnSidecarsFromExecution(ctx context.Context, so
|
||||
return nil, errors.Wrap(err, "broadcast and receive unseen data column sidecars")
|
||||
}
|
||||
|
||||
if len(unseenIndices) > 0 {
|
||||
dataColumnsRecoveredFromELTotal.Inc()
|
||||
log.WithFields(logrus.Fields{
|
||||
"count": len(unseenIndices),
|
||||
"indices": helpers.SortedPrettySliceFromMap(unseenIndices),
|
||||
}).Debug("Constructed data column sidecars from the execution client")
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", source.Root()),
|
||||
"slot": source.Slot(),
|
||||
"proposerIndex": source.ProposerIndex(),
|
||||
"iteration": iteration,
|
||||
"type": source.Type(),
|
||||
"count": len(unseenIndices),
|
||||
"indices": helpers.SortedPrettySliceFromMap(unseenIndices),
|
||||
}).Debug("Constructed data column sidecars from the execution client")
|
||||
}
|
||||
dataColumnSidecarsObtainedViaELCount.Observe(float64(len(unseenIndices)))
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
31
beacon-chain/sync/subscriber_execution_proofs.go
Normal file
31
beacon-chain/sync/subscriber_execution_proofs.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed"
|
||||
opfeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/operation"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func (s *Service) executionProofSubscriber(_ context.Context, msg proto.Message) error {
|
||||
executionProof, ok := msg.(*ethpb.ExecutionProof)
|
||||
if !ok {
|
||||
return errors.Errorf("incorrect type of message received, wanted %T but got %T", ðpb.ExecutionProof{}, msg)
|
||||
}
|
||||
|
||||
// Insert the execution proof into the pool
|
||||
s.cfg.execProofPool.Insert(executionProof)
|
||||
|
||||
// Notify subscribers about the new execution proof
|
||||
s.cfg.operationNotifier.OperationFeed().Send(&feed.Event{
|
||||
Type: opfeed.ExecutionProofReceived,
|
||||
Data: &opfeed.ExecutionProofReceivedData{
|
||||
ExecutionProof: executionProof,
|
||||
},
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -3,11 +3,12 @@ package sync
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/operation"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/verification"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
@@ -51,14 +52,12 @@ func (s *Service) validateDataColumn(ctx context.Context, pid peer.ID, msg *pubs
|
||||
// Decode the message, reject if it fails.
|
||||
m, err := s.decodePubsubMessage(msg)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to decode message")
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
// Reject messages that are not of the expected type.
|
||||
dcsc, ok := m.(*eth.DataColumnSidecar)
|
||||
if !ok {
|
||||
log.WithField("message", m).Error("Message is not of type *eth.DataColumnSidecar")
|
||||
return pubsub.ValidationReject, errWrongMessage
|
||||
}
|
||||
|
||||
@@ -194,19 +193,13 @@ func (s *Service) validateDataColumn(ctx context.Context, pid peer.ID, msg *pubs
|
||||
dataColumnSidecarArrivalGossipSummary.Observe(float64(sinceSlotStartTime.Milliseconds()))
|
||||
dataColumnSidecarVerificationGossipHistogram.Observe(float64(validationTime.Milliseconds()))
|
||||
|
||||
peerGossipScore := s.cfg.p2p.Peers().Scorers().GossipScorer().Score(pid)
|
||||
|
||||
select {
|
||||
case s.dataColumnLogCh <- dataColumnLogEntry{
|
||||
Slot: roDataColumn.Slot(),
|
||||
ColIdx: roDataColumn.Index,
|
||||
PropIdx: roDataColumn.ProposerIndex(),
|
||||
BlockRoot: roDataColumn.BlockRoot(),
|
||||
ParentRoot: roDataColumn.ParentRoot(),
|
||||
PeerSuffix: pid.String()[len(pid.String())-6:],
|
||||
PeerGossipScore: peerGossipScore,
|
||||
validationTime: validationTime,
|
||||
sinceStartTime: sinceSlotStartTime,
|
||||
slot: roDataColumn.Slot(),
|
||||
index: roDataColumn.Index,
|
||||
root: roDataColumn.BlockRoot(),
|
||||
validationTime: validationTime,
|
||||
sinceStartTime: sinceSlotStartTime,
|
||||
}:
|
||||
default:
|
||||
log.WithField("slot", roDataColumn.Slot()).Warn("Failed to send data column log entry")
|
||||
@@ -251,68 +244,69 @@ func computeCacheKey(slot primitives.Slot, proposerIndex primitives.ValidatorInd
|
||||
}
|
||||
|
||||
type dataColumnLogEntry struct {
|
||||
Slot primitives.Slot
|
||||
ColIdx uint64
|
||||
PropIdx primitives.ValidatorIndex
|
||||
BlockRoot [32]byte
|
||||
ParentRoot [32]byte
|
||||
PeerSuffix string
|
||||
PeerGossipScore float64
|
||||
validationTime time.Duration
|
||||
sinceStartTime time.Duration
|
||||
slot primitives.Slot
|
||||
index uint64
|
||||
root [32]byte
|
||||
validationTime time.Duration
|
||||
sinceStartTime time.Duration
|
||||
}
|
||||
|
||||
func (s *Service) processDataColumnLogs() {
|
||||
ticker := time.NewTicker(1 * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
slotStats := make(map[primitives.Slot][fieldparams.NumberOfColumns]dataColumnLogEntry)
|
||||
slotStats := make(map[[fieldparams.RootLength]byte][]dataColumnLogEntry)
|
||||
|
||||
for {
|
||||
select {
|
||||
case entry := <-s.dataColumnLogCh:
|
||||
cols := slotStats[entry.Slot]
|
||||
cols[entry.ColIdx] = entry
|
||||
slotStats[entry.Slot] = cols
|
||||
case col := <-s.dataColumnLogCh:
|
||||
cols := slotStats[col.root]
|
||||
cols = append(cols, col)
|
||||
slotStats[col.root] = cols
|
||||
case <-ticker.C:
|
||||
for slot, columns := range slotStats {
|
||||
var (
|
||||
colIndices = make([]uint64, 0, fieldparams.NumberOfColumns)
|
||||
peers = make([]string, 0, fieldparams.NumberOfColumns)
|
||||
gossipScores = make([]float64, 0, fieldparams.NumberOfColumns)
|
||||
validationTimes = make([]string, 0, fieldparams.NumberOfColumns)
|
||||
sinceStartTimes = make([]string, 0, fieldparams.NumberOfColumns)
|
||||
)
|
||||
for root, columns := range slotStats {
|
||||
indices := make([]uint64, 0, fieldparams.NumberOfColumns)
|
||||
minValidationTime, maxValidationTime, sumValidationTime := time.Duration(0), time.Duration(0), time.Duration(0)
|
||||
minSinceStartTime, maxSinceStartTime, sumSinceStartTime := time.Duration(0), time.Duration(0), time.Duration(0)
|
||||
|
||||
totalReceived := 0
|
||||
for _, entry := range columns {
|
||||
if entry.PeerSuffix == "" {
|
||||
for _, column := range columns {
|
||||
indices = append(indices, column.index)
|
||||
|
||||
sumValidationTime += column.validationTime
|
||||
sumSinceStartTime += column.sinceStartTime
|
||||
|
||||
if totalReceived == 0 {
|
||||
minValidationTime, maxValidationTime = column.validationTime, column.validationTime
|
||||
minSinceStartTime, maxSinceStartTime = column.sinceStartTime, column.sinceStartTime
|
||||
totalReceived++
|
||||
continue
|
||||
}
|
||||
colIndices = append(colIndices, entry.ColIdx)
|
||||
peers = append(peers, entry.PeerSuffix)
|
||||
gossipScores = append(gossipScores, roundFloat(entry.PeerGossipScore, 2))
|
||||
validationTimes = append(validationTimes, fmt.Sprintf("%.2fms", float64(entry.validationTime.Milliseconds())))
|
||||
sinceStartTimes = append(sinceStartTimes, fmt.Sprintf("%.2fms", float64(entry.sinceStartTime.Milliseconds())))
|
||||
|
||||
minValidationTime, maxValidationTime = min(minValidationTime, column.validationTime), max(maxValidationTime, column.validationTime)
|
||||
minSinceStartTime, maxSinceStartTime = min(minSinceStartTime, column.sinceStartTime), max(maxSinceStartTime, column.sinceStartTime)
|
||||
totalReceived++
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": slot,
|
||||
"receivedCount": totalReceived,
|
||||
"columnIndices": colIndices,
|
||||
"peers": peers,
|
||||
"gossipScores": gossipScores,
|
||||
"validationTimes": validationTimes,
|
||||
"sinceStartTimes": sinceStartTimes,
|
||||
}).Debug("Accepted data column sidecars summary")
|
||||
if totalReceived > 0 {
|
||||
slices.Sort(indices)
|
||||
avgValidationTime := sumValidationTime / time.Duration(totalReceived)
|
||||
avgSinceStartTime := sumSinceStartTime / time.Duration(totalReceived)
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": columns[0].slot,
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"count": totalReceived,
|
||||
"indices": helpers.PrettySlice(indices),
|
||||
"validationTime": prettyMinMaxAverage(minValidationTime, maxValidationTime, avgValidationTime),
|
||||
"sinceStartTime": prettyMinMaxAverage(minSinceStartTime, maxSinceStartTime, avgSinceStartTime),
|
||||
}).Debug("Accepted data column sidecars summary")
|
||||
}
|
||||
}
|
||||
slotStats = make(map[primitives.Slot][fieldparams.NumberOfColumns]dataColumnLogEntry)
|
||||
|
||||
slotStats = make(map[[fieldparams.RootLength]byte][]dataColumnLogEntry)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func roundFloat(f float64, decimals int) float64 {
|
||||
mult := math.Pow(10, float64(decimals))
|
||||
return math.Round(f*mult) / mult
|
||||
func prettyMinMaxAverage(min, max, average time.Duration) string {
|
||||
return fmt.Sprintf("[min: %v, avg: %v, max: %v]", min, average, max)
|
||||
}
|
||||
|
||||
132
beacon-chain/sync/validate_execution_proof.go
Normal file
132
beacon-chain/sync/validate_execution_proof.go
Normal file
@@ -0,0 +1,132 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
func (s *Service) validateExecutionProof(ctx context.Context, pid peer.ID, msg *pubsub.Message) (pubsub.ValidationResult, error) {
|
||||
// Always accept messages our own messages.
|
||||
if pid == s.cfg.p2p.PeerID() {
|
||||
return pubsub.ValidationAccept, nil
|
||||
}
|
||||
|
||||
// Ignore messages during initial sync.
|
||||
if s.cfg.initialSync.Syncing() {
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
|
||||
// Reject messages with a nil topic.
|
||||
if msg.Topic == nil {
|
||||
return pubsub.ValidationReject, p2p.ErrInvalidTopic
|
||||
}
|
||||
|
||||
// Decode the message, reject if it fails.
|
||||
m, err := s.decodePubsubMessage(msg)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to decode message")
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
// Reject messages that are not of the expected type.
|
||||
executionProof, ok := m.(*ethpb.ExecutionProof)
|
||||
if !ok {
|
||||
log.WithField("message", m).Error("Message is not of type *ethpb.ExecutionProof")
|
||||
return pubsub.ValidationReject, errWrongMessage
|
||||
}
|
||||
|
||||
// 1. Verify proof is not from the future
|
||||
if err := s.proofNotFromFutureSlot(executionProof); err != nil {
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
// 2. Verify proof slot is greater than finalized slot
|
||||
if err := s.proofAboveFinalizedSlot(ctx, executionProof); err != nil {
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
// 3. Check if the proof is already in the DA checker cache (execution proof pool)
|
||||
// If it exists in the cache, we know it has already passed validation.
|
||||
blockRoot := bytesutil.ToBytes32(executionProof.BlockRoot)
|
||||
if s.isProofCachedInPool(blockRoot, executionProof.ProofId) {
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
|
||||
// 4. Verify proof size limits
|
||||
if uint64(len(executionProof.ProofData)) > params.BeaconConfig().MaxProofDataBytes {
|
||||
return pubsub.ValidationReject, fmt.Errorf("execution proof data size %d exceeds maximum allowed %d", len(executionProof.ProofData), params.BeaconConfig().MaxProofDataBytes)
|
||||
}
|
||||
|
||||
// 5. Run zkVM proof verification
|
||||
if err := s.verifyExecutionProof(executionProof); err != nil {
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
// Validation successful, return accept
|
||||
return pubsub.ValidationAccept, nil
|
||||
}
|
||||
|
||||
// TODO: Do we need encapsulation for all those verification functions?
|
||||
|
||||
// proofNotFromFutureSlot checks whether the execution proof is from a future slot.
|
||||
func (s *Service) proofNotFromFutureSlot(executionProof *ethpb.ExecutionProof) error {
|
||||
currentSlot := s.cfg.clock.CurrentSlot()
|
||||
proofSlot := executionProof.Slot
|
||||
|
||||
if currentSlot == proofSlot {
|
||||
return nil
|
||||
}
|
||||
|
||||
earliestStart, err := s.cfg.clock.SlotStart(proofSlot)
|
||||
if err != nil {
|
||||
// TODO: Should we penalize the peer for this?
|
||||
return fmt.Errorf("failed to compute start time for proof slot %d: %w", proofSlot, err)
|
||||
}
|
||||
|
||||
earliestStart = earliestStart.Add(-1 * params.BeaconConfig().MaximumGossipClockDisparityDuration())
|
||||
// If the system time is still before earliestStart, we consider the proof from a future slot and return an error.
|
||||
if s.cfg.clock.Now().Before(earliestStart) {
|
||||
return fmt.Errorf("slot %d is too far in the future (current slot: %d)", proofSlot, currentSlot)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// proofAboveFinalizedSlot checks whether the execution proof's slot is after the finalized slot.
|
||||
func (s *Service) proofAboveFinalizedSlot(ctx context.Context, executionProof *ethpb.ExecutionProof) error {
|
||||
finalizedCheckpoint, err := s.cfg.beaconDB.FinalizedCheckpoint(ctx)
|
||||
if err != nil {
|
||||
// TODO: Should we penalize the peer for this?
|
||||
return fmt.Errorf("failed to get finalized checkpoint: %w", err)
|
||||
}
|
||||
|
||||
fSlot, err := slots.EpochStart(finalizedCheckpoint.Epoch)
|
||||
if err != nil {
|
||||
// TODO: Should we penalize the peer for this?
|
||||
return fmt.Errorf("failed to compute start slot for finalized epoch %d: %w", finalizedCheckpoint.Epoch, err)
|
||||
}
|
||||
|
||||
if executionProof.Slot <= fSlot {
|
||||
return fmt.Errorf("execution proof slot %d is not after finalized slot %d", executionProof.Slot, fSlot)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// isProofCachedInPool checks if the execution proof is already present in the pool.
|
||||
func (s *Service) isProofCachedInPool(blockRoot [32]byte, proofId primitives.ExecutionProofId) bool {
|
||||
return s.cfg.execProofPool.Exists(blockRoot, proofId)
|
||||
}
|
||||
|
||||
// verifyExecutionProof performs the actual verification of the execution proof.
|
||||
func (s *Service) verifyExecutionProof(_ *ethpb.ExecutionProof) error {
|
||||
// For now, say all proof are valid.
|
||||
return nil
|
||||
}
|
||||
408
beacon-chain/sync/validate_execution_proof_test.go
Normal file
408
beacon-chain/sync/validate_execution_proof_test.go
Normal file
@@ -0,0 +1,408 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
mock "github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/testing"
|
||||
testingdb "github.com/OffchainLabs/prysm/v7/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/OffchainLabs/prysm/v7/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/execproofs"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/encoder"
|
||||
mockp2p "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stategen"
|
||||
mockSync "github.com/OffchainLabs/prysm/v7/beacon-chain/sync/initial-sync/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
pubsubpb "github.com/libp2p/go-libp2p-pubsub/pb"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
func TestValidateExecutionProof(t *testing.T) {
|
||||
beaconDB := testingdb.SetupDB(t)
|
||||
p2pService := mockp2p.NewTestP2P(t)
|
||||
ctx := context.Background()
|
||||
|
||||
fcp := ðpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
}
|
||||
|
||||
require.NoError(t, beaconDB.SaveStateSummary(ctx, ðpb.StateSummary{
|
||||
Root: params.BeaconConfig().ZeroHash[:],
|
||||
Slot: 0,
|
||||
}))
|
||||
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, fcp))
|
||||
|
||||
defaultTopic := p2p.ExecutionProofSubnetTopicFormat + "/" + encoder.ProtocolSuffixSSZSnappy
|
||||
fakeDigest := []byte{0xAB, 0x00, 0xCC, 0x9E}
|
||||
|
||||
chainService := &mock.ChainService{
|
||||
Genesis: time.Now(),
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
FinalizedCheckPoint: fcp,
|
||||
}
|
||||
|
||||
currentSlot := primitives.Slot(100)
|
||||
genesisTime := time.Now().Add(-time.Duration(uint64(currentSlot)*params.BeaconConfig().SecondsPerSlot) * time.Second)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
setupService func() *Service
|
||||
proof *ethpb.ExecutionProof
|
||||
topic *string
|
||||
pid peer.ID
|
||||
want pubsub.ValidationResult
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "Ignore when syncing",
|
||||
setupService: func() *Service {
|
||||
s := &Service{
|
||||
cfg: &config{
|
||||
p2p: p2pService,
|
||||
initialSync: &mockSync.Sync{IsSyncing: true},
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(genesisTime, [32]byte{'A'}),
|
||||
},
|
||||
}
|
||||
s.initCaches()
|
||||
return s
|
||||
},
|
||||
proof: ðpb.ExecutionProof{
|
||||
Slot: currentSlot,
|
||||
ProofId: primitives.ExecutionProofId(1),
|
||||
BlockRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
ProofData: make([]byte, 100),
|
||||
},
|
||||
topic: func() *string {
|
||||
t := fmt.Sprintf(defaultTopic, fakeDigest)
|
||||
return &t
|
||||
}(),
|
||||
pid: "random-peer",
|
||||
want: pubsub.ValidationIgnore,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "Reject nil topic",
|
||||
setupService: func() *Service {
|
||||
s := &Service{
|
||||
cfg: &config{
|
||||
p2p: p2pService,
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(genesisTime, [32]byte{'A'}),
|
||||
},
|
||||
}
|
||||
s.initCaches()
|
||||
return s
|
||||
},
|
||||
proof: ðpb.ExecutionProof{
|
||||
Slot: currentSlot,
|
||||
ProofId: primitives.ExecutionProofId(1),
|
||||
BlockRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
ProofData: make([]byte, 100),
|
||||
},
|
||||
topic: nil,
|
||||
pid: "random-peer",
|
||||
want: pubsub.ValidationReject,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "Reject proof from future slot",
|
||||
setupService: func() *Service {
|
||||
s := &Service{
|
||||
cfg: &config{
|
||||
p2p: p2pService,
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(genesisTime, [32]byte{'A'}),
|
||||
beaconDB: beaconDB,
|
||||
stateGen: stategen.New(beaconDB, doublylinkedtree.New()),
|
||||
execProofPool: execproofs.NewPool(),
|
||||
},
|
||||
}
|
||||
s.initCaches()
|
||||
return s
|
||||
},
|
||||
proof: ðpb.ExecutionProof{
|
||||
Slot: currentSlot + 1000, // Far future slot
|
||||
ProofId: primitives.ExecutionProofId(1),
|
||||
BlockRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
ProofData: make([]byte, 100),
|
||||
},
|
||||
topic: func() *string {
|
||||
t := fmt.Sprintf(defaultTopic, fakeDigest)
|
||||
return &t
|
||||
}(),
|
||||
pid: "random-peer",
|
||||
want: pubsub.ValidationReject,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "Reject proof below finalized slot",
|
||||
setupService: func() *Service {
|
||||
s := &Service{
|
||||
cfg: &config{
|
||||
p2p: p2pService,
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(genesisTime, [32]byte{'A'}),
|
||||
beaconDB: beaconDB,
|
||||
stateGen: stategen.New(beaconDB, doublylinkedtree.New()),
|
||||
execProofPool: execproofs.NewPool(),
|
||||
},
|
||||
}
|
||||
s.initCaches()
|
||||
return s
|
||||
},
|
||||
proof: ðpb.ExecutionProof{
|
||||
Slot: primitives.Slot(5), // Before finalized epoch 1
|
||||
ProofId: 1,
|
||||
BlockRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
ProofData: make([]byte, 100),
|
||||
},
|
||||
topic: func() *string {
|
||||
t := fmt.Sprintf(defaultTopic, fakeDigest)
|
||||
return &t
|
||||
}(),
|
||||
pid: "random-peer",
|
||||
want: pubsub.ValidationReject,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "Ignore already seen proof",
|
||||
setupService: func() *Service {
|
||||
s := &Service{
|
||||
cfg: &config{
|
||||
p2p: p2pService,
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(genesisTime, [32]byte{'A'}),
|
||||
beaconDB: beaconDB,
|
||||
stateGen: stategen.New(beaconDB, doublylinkedtree.New()),
|
||||
execProofPool: execproofs.NewPool(),
|
||||
},
|
||||
}
|
||||
s.initCaches()
|
||||
return s
|
||||
},
|
||||
proof: ðpb.ExecutionProof{
|
||||
Slot: currentSlot,
|
||||
ProofId: primitives.ExecutionProofId(1),
|
||||
BlockRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
ProofData: make([]byte, 100),
|
||||
},
|
||||
topic: func() *string {
|
||||
t := fmt.Sprintf(defaultTopic, fakeDigest)
|
||||
return &t
|
||||
}(),
|
||||
pid: "random-peer",
|
||||
want: pubsub.ValidationIgnore,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "Ignore proof already in pool",
|
||||
setupService: func() *Service {
|
||||
pool := execproofs.NewPool()
|
||||
pool.Insert(ðpb.ExecutionProof{
|
||||
Slot: currentSlot,
|
||||
ProofId: primitives.ExecutionProofId(1),
|
||||
BlockRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
ProofData: make([]byte, 100),
|
||||
})
|
||||
|
||||
s := &Service{
|
||||
cfg: &config{
|
||||
p2p: p2pService,
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(genesisTime, [32]byte{'A'}),
|
||||
beaconDB: beaconDB,
|
||||
stateGen: stategen.New(beaconDB, doublylinkedtree.New()),
|
||||
execProofPool: pool,
|
||||
},
|
||||
}
|
||||
s.initCaches()
|
||||
return s
|
||||
},
|
||||
proof: ðpb.ExecutionProof{
|
||||
Slot: currentSlot,
|
||||
ProofId: primitives.ExecutionProofId(1),
|
||||
BlockRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
ProofData: make([]byte, 100),
|
||||
},
|
||||
topic: func() *string {
|
||||
t := fmt.Sprintf(defaultTopic, fakeDigest)
|
||||
return &t
|
||||
}(),
|
||||
pid: "random-peer",
|
||||
want: pubsub.ValidationIgnore,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "Reject proof if no verifier found",
|
||||
setupService: func() *Service {
|
||||
s := &Service{
|
||||
cfg: &config{
|
||||
p2p: p2pService,
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(genesisTime, [32]byte{'A'}),
|
||||
beaconDB: beaconDB,
|
||||
stateGen: stategen.New(beaconDB, doublylinkedtree.New()),
|
||||
execProofPool: execproofs.NewPool(),
|
||||
},
|
||||
}
|
||||
s.initCaches()
|
||||
return s
|
||||
},
|
||||
proof: ðpb.ExecutionProof{
|
||||
Slot: currentSlot,
|
||||
ProofId: primitives.ExecutionProofId(1),
|
||||
BlockRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
ProofData: make([]byte, 100),
|
||||
},
|
||||
topic: func() *string {
|
||||
t := fmt.Sprintf(defaultTopic, fakeDigest)
|
||||
return &t
|
||||
}(),
|
||||
pid: "random-peer",
|
||||
want: pubsub.ValidationReject,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "Reject proof if verification fails",
|
||||
setupService: func() *Service {
|
||||
s := &Service{
|
||||
cfg: &config{
|
||||
p2p: p2pService,
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(genesisTime, [32]byte{'A'}),
|
||||
beaconDB: beaconDB,
|
||||
stateGen: stategen.New(beaconDB, doublylinkedtree.New()),
|
||||
execProofPool: execproofs.NewPool(),
|
||||
},
|
||||
}
|
||||
s.initCaches()
|
||||
return s
|
||||
},
|
||||
proof: ðpb.ExecutionProof{
|
||||
Slot: currentSlot,
|
||||
ProofId: primitives.ExecutionProofId(1),
|
||||
BlockRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
ProofData: make([]byte, 100),
|
||||
},
|
||||
topic: func() *string {
|
||||
t := fmt.Sprintf(defaultTopic, fakeDigest)
|
||||
return &t
|
||||
}(),
|
||||
pid: "random-peer",
|
||||
want: pubsub.ValidationReject,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "Accept valid proof",
|
||||
setupService: func() *Service {
|
||||
s := &Service{
|
||||
cfg: &config{
|
||||
p2p: p2pService,
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(genesisTime, [32]byte{'A'}),
|
||||
beaconDB: beaconDB,
|
||||
stateGen: stategen.New(beaconDB, doublylinkedtree.New()),
|
||||
execProofPool: execproofs.NewPool(),
|
||||
},
|
||||
}
|
||||
s.initCaches()
|
||||
return s
|
||||
},
|
||||
proof: ðpb.ExecutionProof{
|
||||
Slot: currentSlot,
|
||||
ProofId: primitives.ExecutionProofId(1),
|
||||
BlockRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
ProofData: make([]byte, 100),
|
||||
},
|
||||
topic: func() *string {
|
||||
t := fmt.Sprintf(defaultTopic, fakeDigest)
|
||||
return &t
|
||||
}(),
|
||||
pid: "random-peer",
|
||||
want: pubsub.ValidationAccept,
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s := tt.setupService()
|
||||
|
||||
// Create pubsub message
|
||||
buf := new(bytes.Buffer)
|
||||
_, err := p2pService.Encoding().EncodeGossip(buf, tt.proof)
|
||||
require.NoError(t, err)
|
||||
|
||||
msg := &pubsub.Message{
|
||||
Message: &pubsubpb.Message{
|
||||
Data: buf.Bytes(),
|
||||
Topic: tt.topic,
|
||||
},
|
||||
}
|
||||
|
||||
// Validate
|
||||
result, err := s.validateExecutionProof(ctx, tt.pid, msg)
|
||||
|
||||
if tt.wantErr {
|
||||
assert.NotNil(t, err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
assert.Equal(t, tt.want, result)
|
||||
|
||||
// If validation accepted, check that ValidatorData is set
|
||||
if result == pubsub.ValidationAccept {
|
||||
assert.NotNil(t, msg.ValidatorData)
|
||||
validatedProof, ok := msg.ValidatorData.(*ethpb.ExecutionProof)
|
||||
assert.Equal(t, true, ok)
|
||||
|
||||
// Check that the validated proof matches the original
|
||||
assert.Equal(t, tt.proof.ProofId, validatedProof.ProofId)
|
||||
assert.Equal(t, tt.proof.Slot, validatedProof.Slot)
|
||||
assert.DeepEqual(t, tt.proof.BlockRoot, validatedProof.BlockRoot)
|
||||
assert.DeepEqual(t, tt.proof.BlockHash, validatedProof.BlockHash)
|
||||
assert.DeepEqual(t, tt.proof.ProofData, validatedProof.ProofData)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type alwaysFailVerifier struct{}
|
||||
|
||||
func (v *alwaysFailVerifier) Verify(proof *ethpb.ExecutionProof) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (v *alwaysFailVerifier) GetProofId() primitives.ExecutionProofId {
|
||||
return primitives.ExecutionProofId(1)
|
||||
}
|
||||
@@ -54,11 +54,13 @@ func TestValidateLightClientOptimisticUpdate(t *testing.T) {
|
||||
cfg.CapellaForkEpoch = 3
|
||||
cfg.DenebForkEpoch = 4
|
||||
cfg.ElectraForkEpoch = 5
|
||||
cfg.FuluForkEpoch = 6
|
||||
cfg.ForkVersionSchedule[[4]byte{1, 0, 0, 0}] = 1
|
||||
cfg.ForkVersionSchedule[[4]byte{2, 0, 0, 0}] = 2
|
||||
cfg.ForkVersionSchedule[[4]byte{3, 0, 0, 0}] = 3
|
||||
cfg.ForkVersionSchedule[[4]byte{4, 0, 0, 0}] = 4
|
||||
cfg.ForkVersionSchedule[[4]byte{5, 0, 0, 0}] = 5
|
||||
cfg.ForkVersionSchedule[[4]byte{6, 0, 0, 0}] = 6
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
secondsPerSlot := int(params.BeaconConfig().SecondsPerSlot)
|
||||
@@ -101,7 +103,10 @@ func TestValidateLightClientOptimisticUpdate(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
for v := 1; v < 6; v++ {
|
||||
for v := range version.All() {
|
||||
if v == version.Phase0 {
|
||||
continue
|
||||
}
|
||||
t.Run(test.name+"_"+version.String(v), func(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
@@ -180,11 +185,13 @@ func TestValidateLightClientFinalityUpdate(t *testing.T) {
|
||||
cfg.CapellaForkEpoch = 3
|
||||
cfg.DenebForkEpoch = 4
|
||||
cfg.ElectraForkEpoch = 5
|
||||
cfg.FuluForkEpoch = 6
|
||||
cfg.ForkVersionSchedule[[4]byte{1, 0, 0, 0}] = 1
|
||||
cfg.ForkVersionSchedule[[4]byte{2, 0, 0, 0}] = 2
|
||||
cfg.ForkVersionSchedule[[4]byte{3, 0, 0, 0}] = 3
|
||||
cfg.ForkVersionSchedule[[4]byte{4, 0, 0, 0}] = 4
|
||||
cfg.ForkVersionSchedule[[4]byte{5, 0, 0, 0}] = 5
|
||||
cfg.ForkVersionSchedule[[4]byte{6, 0, 0, 0}] = 6
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
secondsPerSlot := int(params.BeaconConfig().SecondsPerSlot)
|
||||
@@ -227,7 +234,10 @@ func TestValidateLightClientFinalityUpdate(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
for v := 1; v < 6; v++ {
|
||||
for v := range version.All() {
|
||||
if v == version.Phase0 {
|
||||
continue
|
||||
}
|
||||
t.Run(test.name+"_"+version.String(v), func(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
|
||||
@@ -687,6 +687,12 @@ func sbrNotFound(t *testing.T, expectedRoot [32]byte) *mockStateByRooter {
|
||||
}}
|
||||
}
|
||||
|
||||
func sbrReturnsState(st state.BeaconState) *mockStateByRooter {
|
||||
return &mockStateByRooter{sbr: func(_ context.Context, _ [32]byte) (state.BeaconState, error) {
|
||||
return st, nil
|
||||
}}
|
||||
}
|
||||
|
||||
func sbrForValOverride(idx primitives.ValidatorIndex, val *ethpb.Validator) *mockStateByRooter {
|
||||
return sbrForValOverrideWithT(nil, idx, val)
|
||||
}
|
||||
|
||||
@@ -11,12 +11,10 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v7/beacon-chain/forkchoice/types"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/logging"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
@@ -361,7 +359,7 @@ func (dv *RODataColumnsVerifier) SidecarParentSeen(parentSeen func([fieldparams.
|
||||
}
|
||||
|
||||
if !dv.fc.HasNode(parentRoot) {
|
||||
return columnErrBuilder(errSidecarParentNotSeen)
|
||||
return columnErrBuilder(errors.Wrapf(errSidecarParentNotSeen, "parent root: %#x", parentRoot))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -484,88 +482,19 @@ func (dv *RODataColumnsVerifier) SidecarProposerExpected(ctx context.Context) (e
|
||||
|
||||
defer dv.recordResult(RequireSidecarProposerExpected, &err)
|
||||
|
||||
type slotParentRoot struct {
|
||||
slot primitives.Slot
|
||||
parentRoot [fieldparams.RootLength]byte
|
||||
}
|
||||
|
||||
targetRootBySlotParentRoot := make(map[slotParentRoot][fieldparams.RootLength]byte)
|
||||
|
||||
var targetRootFromCache = func(slot primitives.Slot, parentRoot [fieldparams.RootLength]byte) ([fieldparams.RootLength]byte, error) {
|
||||
// Use cached values if available.
|
||||
slotParentRoot := slotParentRoot{slot: slot, parentRoot: parentRoot}
|
||||
if root, ok := targetRootBySlotParentRoot[slotParentRoot]; ok {
|
||||
return root, nil
|
||||
}
|
||||
|
||||
// Compute the epoch of the data column slot.
|
||||
dataColumnEpoch := slots.ToEpoch(slot)
|
||||
if dataColumnEpoch > 0 {
|
||||
dataColumnEpoch = dataColumnEpoch - 1
|
||||
}
|
||||
|
||||
// Compute the target root for the epoch.
|
||||
targetRoot, err := dv.fc.TargetRootForEpoch(parentRoot, dataColumnEpoch)
|
||||
if err != nil {
|
||||
return [fieldparams.RootLength]byte{}, columnErrBuilder(errors.Wrap(err, "target root from epoch"))
|
||||
}
|
||||
|
||||
// Store the target root in the cache.
|
||||
targetRootBySlotParentRoot[slotParentRoot] = targetRoot
|
||||
|
||||
return targetRoot, nil
|
||||
}
|
||||
|
||||
for _, dataColumn := range dv.dataColumns {
|
||||
// Extract the slot of the data column.
|
||||
dataColumnSlot := dataColumn.Slot()
|
||||
|
||||
// Extract the root of the parent block corresponding to the data column.
|
||||
parentRoot := dataColumn.ParentRoot()
|
||||
|
||||
// Compute the target root for the data column.
|
||||
targetRoot, err := targetRootFromCache(dataColumnSlot, parentRoot)
|
||||
// Get the verifying state, it is guaranteed to have the correct proposer in the lookahead.
|
||||
verifyingState, err := dv.getVerifyingState(ctx, dataColumn)
|
||||
if err != nil {
|
||||
return columnErrBuilder(errors.Wrap(err, "target root"))
|
||||
return columnErrBuilder(errors.Wrap(err, "verifying state"))
|
||||
}
|
||||
|
||||
// Compute the epoch of the data column slot.
|
||||
dataColumnEpoch := slots.ToEpoch(dataColumnSlot)
|
||||
if dataColumnEpoch > 0 {
|
||||
dataColumnEpoch = dataColumnEpoch - 1
|
||||
}
|
||||
|
||||
// Create a checkpoint for the target root.
|
||||
checkpoint := &forkchoicetypes.Checkpoint{Root: targetRoot, Epoch: dataColumnEpoch}
|
||||
|
||||
// Try to extract the proposer index from the data column in the cache.
|
||||
idx, cached := dv.pc.Proposer(checkpoint, dataColumnSlot)
|
||||
|
||||
if !cached {
|
||||
parentRoot := dataColumn.ParentRoot()
|
||||
// Ensure the expensive index computation is only performed once for
|
||||
// concurrent requests for the same signature data.
|
||||
idxAny, err, _ := dv.sg.Do(concatRootSlot(parentRoot, dataColumnSlot), func() (any, error) {
|
||||
verifyingState, err := dv.getVerifyingState(ctx, dataColumn)
|
||||
if err != nil {
|
||||
return nil, columnErrBuilder(errors.Wrap(err, "verifying state"))
|
||||
}
|
||||
|
||||
idx, err = helpers.BeaconProposerIndexAtSlot(ctx, verifyingState, dataColumnSlot)
|
||||
if err != nil {
|
||||
return nil, columnErrBuilder(errors.Wrap(err, "compute proposer"))
|
||||
}
|
||||
|
||||
return idx, nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var ok bool
|
||||
if idx, ok = idxAny.(primitives.ValidatorIndex); !ok {
|
||||
return columnErrBuilder(errors.New("type assertion to ValidatorIndex failed"))
|
||||
}
|
||||
// Use proposer lookahead directly
|
||||
idx, err := helpers.BeaconProposerIndexAtSlot(ctx, verifyingState, dataColumnSlot)
|
||||
if err != nil {
|
||||
return columnErrBuilder(errors.Wrap(err, "proposer from lookahead"))
|
||||
}
|
||||
|
||||
if idx != dataColumn.ProposerIndex() {
|
||||
@@ -626,7 +555,3 @@ func inclusionProofKey(c blocks.RODataColumn) ([32]byte, error) {
|
||||
|
||||
return sha256.Sum256(unhashedKey), nil
|
||||
}
|
||||
|
||||
func concatRootSlot(root [fieldparams.RootLength]byte, slot primitives.Slot) string {
|
||||
return string(root[:]) + fmt.Sprintf("%d", slot)
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package verification
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -795,87 +794,90 @@ func TestDataColumnsSidecarProposerExpected(t *testing.T) {
|
||||
blobCount = 1
|
||||
)
|
||||
|
||||
parentRoot := [fieldparams.RootLength]byte{}
|
||||
columns := GenerateTestDataColumns(t, parentRoot, columnSlot, blobCount)
|
||||
firstColumn := columns[0]
|
||||
ctx := t.Context()
|
||||
testCases := []struct {
|
||||
name string
|
||||
stateByRooter StateByRooter
|
||||
proposerCache proposerCache
|
||||
columns []blocks.RODataColumn
|
||||
error string
|
||||
}{
|
||||
{
|
||||
name: "Cached, matches",
|
||||
stateByRooter: nil,
|
||||
proposerCache: &mockProposerCache{
|
||||
ProposerCB: pcReturnsIdx(firstColumn.ProposerIndex()),
|
||||
},
|
||||
columns: columns,
|
||||
},
|
||||
{
|
||||
name: "Cached, does not match",
|
||||
stateByRooter: nil,
|
||||
proposerCache: &mockProposerCache{
|
||||
ProposerCB: pcReturnsIdx(firstColumn.ProposerIndex() + 1),
|
||||
},
|
||||
columns: columns,
|
||||
error: errSidecarUnexpectedProposer.Error(),
|
||||
},
|
||||
{
|
||||
name: "Not cached, state lookup failure",
|
||||
stateByRooter: sbrNotFound(t, firstColumn.ParentRoot()),
|
||||
proposerCache: &mockProposerCache{
|
||||
ProposerCB: pcReturnsNotFound(),
|
||||
},
|
||||
columns: columns,
|
||||
error: "verifying state",
|
||||
},
|
||||
}
|
||||
parentRoot := [fieldparams.RootLength]byte{}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
initializer := Initializer{
|
||||
shared: &sharedResources{
|
||||
sr: tc.stateByRooter,
|
||||
pc: tc.proposerCache,
|
||||
hsp: &mockHeadStateProvider{},
|
||||
fc: &mockForkchoicer{
|
||||
TargetRootForEpochCB: fcReturnsTargetRoot([fieldparams.RootLength]byte{}),
|
||||
},
|
||||
// Create a Fulu state to get the expected proposer from the lookahead.
|
||||
fuluState, _ := util.DeterministicGenesisStateFulu(t, 32)
|
||||
expectedProposer, err := fuluState.ProposerLookahead()
|
||||
require.NoError(t, err)
|
||||
expectedProposerIdx := primitives.ValidatorIndex(expectedProposer[columnSlot])
|
||||
|
||||
// Generate data columns with the expected proposer index.
|
||||
matchingColumns := generateTestDataColumnsWithProposer(t, parentRoot, columnSlot, blobCount, expectedProposerIdx)
|
||||
// Generate data columns with wrong proposer index.
|
||||
wrongColumns := generateTestDataColumnsWithProposer(t, parentRoot, columnSlot, blobCount, expectedProposerIdx+1)
|
||||
|
||||
t.Run("Proposer matches", func(t *testing.T) {
|
||||
initializer := Initializer{
|
||||
shared: &sharedResources{
|
||||
sr: sbrReturnsState(fuluState),
|
||||
hsp: &mockHeadStateProvider{
|
||||
headRoot: parentRoot[:],
|
||||
headSlot: columnSlot, // Same epoch so HeadStateReadOnly is used
|
||||
headStateReadOnly: fuluState,
|
||||
},
|
||||
}
|
||||
fc: &mockForkchoicer{},
|
||||
},
|
||||
}
|
||||
|
||||
verifier := initializer.NewDataColumnsVerifier(tc.columns, GossipDataColumnSidecarRequirements)
|
||||
var wg sync.WaitGroup
|
||||
verifier := initializer.NewDataColumnsVerifier(matchingColumns, GossipDataColumnSidecarRequirements)
|
||||
err := verifier.SidecarProposerExpected(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, verifier.results.executed(RequireSidecarProposerExpected))
|
||||
require.NoError(t, verifier.results.result(RequireSidecarProposerExpected))
|
||||
})
|
||||
|
||||
var err1, err2 error
|
||||
wg.Go(func() {
|
||||
err1 = verifier.SidecarProposerExpected(ctx)
|
||||
})
|
||||
wg.Go(func() {
|
||||
err2 = verifier.SidecarProposerExpected(ctx)
|
||||
})
|
||||
wg.Wait()
|
||||
t.Run("Proposer does not match", func(t *testing.T) {
|
||||
initializer := Initializer{
|
||||
shared: &sharedResources{
|
||||
sr: sbrReturnsState(fuluState),
|
||||
hsp: &mockHeadStateProvider{
|
||||
headRoot: parentRoot[:],
|
||||
headSlot: columnSlot, // Same epoch so HeadStateReadOnly is used
|
||||
headStateReadOnly: fuluState,
|
||||
},
|
||||
fc: &mockForkchoicer{},
|
||||
},
|
||||
}
|
||||
|
||||
require.Equal(t, true, verifier.results.executed(RequireSidecarProposerExpected))
|
||||
verifier := initializer.NewDataColumnsVerifier(wrongColumns, GossipDataColumnSidecarRequirements)
|
||||
err := verifier.SidecarProposerExpected(ctx)
|
||||
require.ErrorContains(t, errSidecarUnexpectedProposer.Error(), err)
|
||||
require.Equal(t, true, verifier.results.executed(RequireSidecarProposerExpected))
|
||||
require.NotNil(t, verifier.results.result(RequireSidecarProposerExpected))
|
||||
})
|
||||
|
||||
if len(tc.error) > 0 {
|
||||
require.ErrorContains(t, tc.error, err1)
|
||||
require.ErrorContains(t, tc.error, err2)
|
||||
require.NotNil(t, verifier.results.result(RequireSidecarProposerExpected))
|
||||
return
|
||||
}
|
||||
t.Run("State lookup failure", func(t *testing.T) {
|
||||
columns := GenerateTestDataColumns(t, parentRoot, columnSlot, blobCount)
|
||||
initializer := Initializer{
|
||||
shared: &sharedResources{
|
||||
sr: sbrNotFound(t, columns[0].ParentRoot()),
|
||||
hsp: &mockHeadStateProvider{},
|
||||
fc: &mockForkchoicer{},
|
||||
},
|
||||
}
|
||||
|
||||
require.NoError(t, err1)
|
||||
require.NoError(t, err2)
|
||||
require.NoError(t, verifier.results.result(RequireSidecarProposerExpected))
|
||||
verifier := initializer.NewDataColumnsVerifier(columns, GossipDataColumnSidecarRequirements)
|
||||
err := verifier.SidecarProposerExpected(ctx)
|
||||
require.ErrorContains(t, "verifying state", err)
|
||||
require.Equal(t, true, verifier.results.executed(RequireSidecarProposerExpected))
|
||||
require.NotNil(t, verifier.results.result(RequireSidecarProposerExpected))
|
||||
})
|
||||
}
|
||||
|
||||
err := verifier.SidecarProposerExpected(ctx)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
func generateTestDataColumnsWithProposer(t *testing.T, parent [fieldparams.RootLength]byte, slot primitives.Slot, blobCount int, proposer primitives.ValidatorIndex) []blocks.RODataColumn {
|
||||
roBlock, roBlobs := util.GenerateTestDenebBlockWithSidecar(t, parent, slot, blobCount, util.WithProposer(proposer))
|
||||
blobs := make([]kzg.Blob, 0, len(roBlobs))
|
||||
for i := range roBlobs {
|
||||
blobs = append(blobs, kzg.Blob(roBlobs[i].Blob))
|
||||
}
|
||||
|
||||
cellsPerBlob, proofsPerBlob := util.GenerateCellsAndProofs(t, blobs)
|
||||
roDataColumnSidecars, err := peerdas.DataColumnSidecars(cellsPerBlob, proofsPerBlob, peerdas.PopulateFromBlock(roBlock))
|
||||
require.NoError(t, err)
|
||||
|
||||
return roDataColumnSidecars
|
||||
}
|
||||
|
||||
func TestColumnRequirementSatisfaction(t *testing.T) {
|
||||
@@ -922,12 +924,3 @@ func TestColumnRequirementSatisfaction(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestConcatRootSlot(t *testing.T) {
|
||||
root := [fieldparams.RootLength]byte{1, 2, 3}
|
||||
const slot = primitives.Slot(3210)
|
||||
|
||||
const expected = "\x01\x02\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x003210"
|
||||
|
||||
actual := concatRootSlot(root, slot)
|
||||
require.Equal(t, expected, actual)
|
||||
}
|
||||
|
||||
3
changelog/SashaMalysehko_fix-return-after-check.md
Normal file
3
changelog/SashaMalysehko_fix-return-after-check.md
Normal file
@@ -0,0 +1,3 @@
|
||||
## Fixed
|
||||
|
||||
- Fix missing return after version header check in SubmitAttesterSlashingsV2.
|
||||
3
changelog/avoid_kzg_send_after_context_cancel.md
Normal file
3
changelog/avoid_kzg_send_after_context_cancel.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- Prevent blocked sends to the KZG batch verifier when the caller context is already canceled, avoiding useless queueing and potential hangs.
|
||||
3
changelog/bastin_fix-lcp2p-bug.md
Normal file
3
changelog/bastin_fix-lcp2p-bug.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- Fix the missing fork version object mapping for Fulu in light client p2p.
|
||||
3
changelog/builder-index.md
Normal file
3
changelog/builder-index.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Added
|
||||
|
||||
- `primitives.BuilderIndex`: SSZ `uint64` wrapper for builder registry indices.
|
||||
3
changelog/fix_kzg_batch_verifier_timeout_deadlock.md
Normal file
3
changelog/fix_kzg_batch_verifier_timeout_deadlock.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- Fix deadlock in data column gossip KZG batch verification when a caller times out preventing result delivery.
|
||||
3
changelog/james-prysm_align-atter-pool-apis.md
Normal file
3
changelog/james-prysm_align-atter-pool-apis.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- the /eth/v2/beacon/pool/attestations and /eth/v1/beacon/pool/sync_committees now returns a 503 error if the node is still syncing, the rest api is also working in a similar process to gRPC broadcasting immediately now.
|
||||
3
changelog/james-prysm_fix-rest-replay-state.md
Normal file
3
changelog/james-prysm_fix-rest-replay-state.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- fixed replay state issue in rest api caused by attester and sync committee duties endpoints
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user