Compare commits

..

29 Commits

Author SHA1 Message Date
Aarsh Shah
7c5ab84f78 docs and suggestions 2026-01-27 15:58:57 +04:00
Marco Munizaga
e7c99085cf add todo 2026-01-26 17:48:23 -05:00
Marco Munizaga
c76be28115 return valid if there are no datacolumns to validate 2026-01-26 17:47:05 -05:00
Marco Munizaga
5de8b0f660 cache partial data column header by group ID 2026-01-26 16:21:18 -05:00
Marco Munizaga
a2da060021 Add partial message metrics 2026-01-26 16:21:18 -05:00
Marco Munizaga
db0c78a025 more context around errors 2026-01-21 19:52:32 -05:00
Marco Munizaga
bc275e5a76 fix test typo 2026-01-21 19:52:31 -05:00
Marco Munizaga
0924ba8bc8 eagerly push the partial data column header 2026-01-21 19:52:31 -05:00
Marco Munizaga
5d0f4c76ad Include the version byte in the group ID 2026-01-21 14:31:38 -05:00
Marco Munizaga
370d34a68a add partial data column header 2026-01-21 14:02:39 -05:00
Marco Munizaga
84e592f45c Update go-libp2p-pubsub 2026-01-21 13:32:36 -05:00
Marco Munizaga
592b260950 add todo 2025-12-19 09:58:04 -08:00
Marco Munizaga
7e12f45d3a add partial-data-columns flag 2025-12-19 09:58:04 -08:00
Marco Munizaga
a7be4e6eb4 beacon-chain/sync: subscribe to partial columns 2025-12-19 09:58:04 -08:00
Marco Munizaga
ef7021132e publish partial columns when proposing a block 2025-12-19 09:58:04 -08:00
Marco Munizaga
d39ba8801c beacon-chain/execution: return partial columns and use getBlobsV3
... if available

debug
2025-12-19 09:58:04 -08:00
Marco Munizaga
4282ce2277 core/peerdas: Add PartialColumns helper 2025-12-19 09:58:04 -08:00
Marco Munizaga
8c33a1d54b core/peerdas: support partial responses 2025-12-19 09:58:04 -08:00
Marco Munizaga
d01a2c97aa beacon-chain/p2p: own and start PartialColumnBroadcaster 2025-12-19 09:58:04 -08:00
Marco Munizaga
b5cec39191 Add metrics for gossipsub message sizes 2025-12-19 09:58:04 -08:00
Marco Munizaga
dbda80919e Implement PartialColumnBroadcaster 2025-12-19 09:58:03 -08:00
Marco Munizaga
ce421f0534 refactor DataColumn Cell KZG Proof verification 2025-12-17 15:54:04 -08:00
Marco Munizaga
eafbcdcbd7 avoid needless copy 2025-12-17 15:54:04 -08:00
Marco Munizaga
090cbbdb94 Add PartialDataColumn type 2025-12-17 15:54:03 -08:00
Marco Munizaga
d8bc2aa754 proto: Add PartialDataColumnSidecar 2025-12-17 15:54:03 -08:00
Marco Munizaga
27ab90eed2 clone slice in testing util 2025-12-17 15:54:03 -08:00
Marco Munizaga
e3ed9dc79d logrusadapter for slog 2025-12-17 15:54:03 -08:00
Marco Munizaga
1d2f0a7e0a fix multiaddr comparison 2025-12-17 15:54:03 -08:00
Marco Munizaga
6caa29da1f deps: update libp2p deps
for partial message support and simnet support
2025-12-17 15:54:03 -08:00
168 changed files with 4262 additions and 5160 deletions

View File

@@ -193,7 +193,7 @@ nogo(
"//tools/analyzers/featureconfig:go_default_library",
"//tools/analyzers/gocognit:go_default_library",
"//tools/analyzers/ineffassign:go_default_library",
"//tools/analyzers/httpwriter:go_default_library",
"//tools/analyzers/httperror:go_default_library",
"//tools/analyzers/interfacechecker:go_default_library",
"//tools/analyzers/logcapitalization:go_default_library",
"//tools/analyzers/logruswitherror:go_default_library",

View File

@@ -48,7 +48,6 @@ go_library(
"//beacon-chain/core/electra:go_default_library",
"//beacon-chain/core/epoch/precompute:go_default_library",
"//beacon-chain/core/feed:go_default_library",
"//beacon-chain/core/feed/operation:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/peerdas:go_default_library",
@@ -66,7 +65,6 @@ go_library(
"//beacon-chain/light-client:go_default_library",
"//beacon-chain/operations/attestations:go_default_library",
"//beacon-chain/operations/blstoexec:go_default_library",
"//beacon-chain/operations/execproofs:go_default_library",
"//beacon-chain/operations/slashings:go_default_library",
"//beacon-chain/operations/voluntaryexits:go_default_library",
"//beacon-chain/p2p:go_default_library",
@@ -148,8 +146,6 @@ go_test(
"//beacon-chain/cache/depositsnapshot:go_default_library",
"//beacon-chain/core/altair:go_default_library",
"//beacon-chain/core/blocks:go_default_library",
"//beacon-chain/core/feed:go_default_library",
"//beacon-chain/core/feed/operation:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/peerdas:go_default_library",

View File

@@ -323,17 +323,14 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
var ok bool
e := slots.ToEpoch(slot)
stateEpoch := slots.ToEpoch(st.Slot())
fuluAndNextEpoch := st.Version() >= version.Fulu && e == stateEpoch+1
if e == stateEpoch || fuluAndNextEpoch {
if e == stateEpoch {
val, ok = s.trackedProposer(st, slot)
if !ok {
return emptyAttri
}
}
st = st.Copy()
if slot > st.Slot() {
// At this point either we know we are proposing on a future slot or we need to still compute the
// right proposer index pre-Fulu, either way we need to copy the state to process it.
st = st.Copy()
var err error
st, err = transition.ProcessSlotsUsingNextSlotCache(ctx, st, headRoot, slot)
if err != nil {
@@ -341,7 +338,7 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
return emptyAttri
}
}
if e > stateEpoch && !fuluAndNextEpoch {
if e > stateEpoch {
emptyAttri := payloadattribute.EmptyWithVersion(st.Version())
val, ok = s.trackedProposer(st, slot)
if !ok {

View File

@@ -1053,3 +1053,40 @@ func TestKZGCommitmentToVersionedHashes(t *testing.T) {
require.Equal(t, vhs[0].String(), vh0)
require.Equal(t, vhs[1].String(), vh1)
}
func TestComputePayloadAttribute(t *testing.T) {
service, tr := minimalTestService(t, WithPayloadIDCache(cache.NewPayloadIDCache()))
ctx := tr.ctx
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, Index: 0})
// Cache hit, advance state, no fee recipient
slot := primitives.Slot(1)
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
blk := util.NewBeaconBlockBellatrix()
signed, err := consensusblocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
roblock, err := consensusblocks.NewROBlockWithRoot(signed, [32]byte{'a'})
require.NoError(t, err)
cfg := &postBlockProcessConfig{
ctx: ctx,
roblock: roblock,
}
fcu := &fcuConfig{
headState: st,
proposingSlot: slot,
headRoot: [32]byte{},
}
require.NoError(t, service.computePayloadAttributes(cfg, fcu))
require.Equal(t, false, fcu.attributes.IsEmpty())
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(fcu.attributes.SuggestedFeeRecipient()).String())
// Cache hit, advance state, has fee recipient
suggestedAddr := common.HexToAddress("123")
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, FeeRecipient: primitives.ExecutionAddress(suggestedAddr), Index: 0})
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
require.NoError(t, service.computePayloadAttributes(cfg, fcu))
require.Equal(t, false, fcu.attributes.IsEmpty())
require.Equal(t, suggestedAddr, common.BytesToAddress(fcu.attributes.SuggestedFeeRecipient()))
}

View File

@@ -12,7 +12,6 @@ import (
payloadattribute "github.com/OffchainLabs/prysm/v7/consensus-types/payload-attribute"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/OffchainLabs/prysm/v7/time/slots"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -54,53 +53,58 @@ type fcuConfig struct {
}
// sendFCU handles the logic to notify the engine of a forckhoice update
// when processing an incoming block during regular sync. It
// always updates the shuffling caches and handles epoch transitions .
func (s *Service) sendFCU(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) {
if cfg.postState.Version() < version.Fulu {
// update the caches to compute the right proposer index
// this function is called under a forkchoice lock which we need to release.
s.ForkChoicer().Unlock()
s.updateCachesPostBlockProcessing(cfg)
s.ForkChoicer().Lock()
// for the first time when processing an incoming block during regular sync. It
// always updates the shuffling caches and handles epoch transitions when the
// incoming block is late, preparing payload attributes in this case while it
// only sends a message with empty attributes for early blocks.
func (s *Service) sendFCU(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error {
if !s.isNewHead(cfg.headRoot) {
return nil
}
if err := s.getFCUArgs(cfg, fcuArgs); err != nil {
log.WithError(err).Error("Could not get forkchoice update argument")
return
}
// If head has not been updated and attributes are nil, we can skip the FCU.
if !s.isNewHead(cfg.headRoot) && (fcuArgs.attributes == nil || fcuArgs.attributes.IsEmpty()) {
return
}
// If we are proposing and we aim to reorg the block, we have already sent FCU with attributes on lateBlockTasks
if fcuArgs.attributes != nil && !fcuArgs.attributes.IsEmpty() && s.shouldOverrideFCU(cfg.headRoot, s.CurrentSlot()+1) {
return nil
}
return s.forkchoiceUpdateWithExecution(cfg.ctx, fcuArgs)
}
// sendFCUWithAttributes computes the payload attributes and sends an FCU message
// to the engine if needed
func (s *Service) sendFCUWithAttributes(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) {
slotCtx, cancel := context.WithTimeout(context.Background(), slotDeadline)
defer cancel()
cfg.ctx = slotCtx
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
if err := s.computePayloadAttributes(cfg, fcuArgs); err != nil {
log.WithError(err).Error("Could not compute payload attributes")
return
}
if s.inRegularSync() {
go s.forkchoiceUpdateWithExecution(cfg.ctx, fcuArgs)
if fcuArgs.attributes.IsEmpty() {
return
}
if s.isNewHead(fcuArgs.headRoot) {
if err := s.saveHead(cfg.ctx, fcuArgs.headRoot, fcuArgs.headBlock, fcuArgs.headState); err != nil {
log.WithError(err).Error("Could not save head")
}
s.pruneAttsFromPool(s.ctx, fcuArgs.headState, fcuArgs.headBlock)
if _, err := s.notifyForkchoiceUpdate(cfg.ctx, fcuArgs); err != nil {
log.WithError(err).Error("Could not update forkchoice with payload attributes for proposal")
}
}
// fockchoiceUpdateWithExecution is a wrapper around notifyForkchoiceUpdate. It gets a forkchoice lock and calls the engine.
// The caller of this function should NOT have a lock in forkchoice store.
func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, args *fcuConfig) {
// fockchoiceUpdateWithExecution is a wrapper around notifyForkchoiceUpdate. It decides whether a new call to FCU should be made.
func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, args *fcuConfig) error {
_, span := trace.StartSpan(ctx, "beacon-chain.blockchain.forkchoiceUpdateWithExecution")
defer span.End()
// Note: Use the service context here to avoid the parent context being ended during a forkchoice update.
ctx = trace.NewContext(s.ctx, span)
s.ForkChoicer().Lock()
defer s.ForkChoicer().Unlock()
_, err := s.notifyForkchoiceUpdate(ctx, args)
if err != nil {
log.WithError(err).Error("Could not notify forkchoice update")
return errors.Wrap(err, "could not notify forkchoice update")
}
if err := s.saveHead(ctx, args.headRoot, args.headBlock, args.headState); err != nil {
log.WithError(err).Error("Could not save head")
}
// Only need to prune attestations from pool if the head has changed.
s.pruneAttsFromPool(s.ctx, args.headState, args.headBlock)
return nil
}
// shouldOverrideFCU checks whether the incoming block is still subject to being

View File

@@ -97,7 +97,7 @@ func TestService_forkchoiceUpdateWithExecution_exceptionalCases(t *testing.T) {
headBlock: wsb,
proposingSlot: service.CurrentSlot() + 1,
}
service.forkchoiceUpdateWithExecution(ctx, args)
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, args))
payloadID, has := service.cfg.PayloadIDCache.PayloadID(2, [32]byte{2})
require.Equal(t, true, has)
@@ -151,7 +151,7 @@ func TestService_forkchoiceUpdateWithExecution_SameHeadRootNewProposer(t *testin
headRoot: r,
proposingSlot: service.CurrentSlot() + 1,
}
service.forkchoiceUpdateWithExecution(ctx, args)
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, args))
}
func TestShouldOverrideFCU(t *testing.T) {

View File

@@ -110,7 +110,7 @@ func VerifyCellKZGProofBatch(commitmentsBytes []Bytes48, cellIndices []uint64, c
ckzgCells := make([]ckzg4844.Cell, len(cells))
for i := range cells {
copy(ckzgCells[i][:], cells[i][:])
ckzgCells[i] = ckzg4844.Cell(cells[i])
}
return ckzg4844.VerifyCellKZGProofBatch(commitmentsBytes, cellIndices, ckzgCells, proofsBytes)
}

View File

@@ -5,7 +5,6 @@ import (
"github.com/OffchainLabs/prysm/v7/async/event"
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/operation"
statefeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/state"
"github.com/OffchainLabs/prysm/v7/beacon-chain/db"
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/filesystem"
@@ -14,7 +13,6 @@ import (
lightclient "github.com/OffchainLabs/prysm/v7/beacon-chain/light-client"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/attestations"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/blstoexec"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/execproofs"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/slashings"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/voluntaryexits"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
@@ -138,14 +136,6 @@ func WithBLSToExecPool(p blstoexec.PoolManager) Option {
}
}
// WithExecProofsPool to keep track of execution proofs.
func WithExecProofsPool(p execproofs.PoolManager) Option {
return func(s *Service) error {
s.cfg.ExecProofsPool = p
return nil
}
}
// WithP2PBroadcaster to broadcast messages after appropriate processing.
func WithP2PBroadcaster(p p2p.Accessor) Option {
return func(s *Service) error {
@@ -276,10 +266,3 @@ func WithStartWaitingDataColumnSidecars(c chan bool) Option {
return nil
}
}
func WithOperationNotifier(operationNotifier operation.Notifier) Option {
return func(s *Service) error {
s.cfg.OperationNotifier = operationNotifier
return nil
}
}

View File

@@ -22,7 +22,7 @@ import (
// The caller of this function must have a lock on forkchoice.
func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) state.ReadOnlyBeaconState {
headEpoch := slots.ToEpoch(s.HeadSlot())
if c.Epoch+1 < headEpoch || c.Epoch == 0 {
if c.Epoch < headEpoch || c.Epoch == 0 {
return nil
}
// Only use head state if the head state is compatible with the target checkpoint.
@@ -30,13 +30,11 @@ func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) st
if err != nil {
return nil
}
// headEpoch - 1 equals c.Epoch if c is from the previous epoch and equals c.Epoch - 1 if c is from the current epoch.
// We don't use the smaller c.Epoch - 1 because forkchoice would not have the data to answer that.
headDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(headRoot), headEpoch-1)
headDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(headRoot), c.Epoch-1)
if err != nil {
return nil
}
targetDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(c.Root), headEpoch-1)
targetDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(c.Root), c.Epoch-1)
if err != nil {
return nil
}
@@ -45,7 +43,7 @@ func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) st
}
// If the head state alone is enough, we can return it directly read only.
if c.Epoch <= headEpoch {
if c.Epoch == headEpoch {
st, err := s.HeadStateReadOnly(ctx)
if err != nil {
return nil

View File

@@ -170,13 +170,12 @@ func TestService_GetRecentPreState(t *testing.T) {
err = s.SetFinalizedCheckpoint(cp0)
require.NoError(t, err)
st, blk, err := prepareForkchoiceState(ctx, 31, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
st, root, err := prepareForkchoiceState(ctx, 31, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
service.head = &head{
root: [32]byte(ckRoot),
state: s,
block: blk,
slot: 31,
}
require.NotNil(t, service.getRecentPreState(ctx, &ethpb.Checkpoint{Epoch: 1, Root: ckRoot}))
@@ -198,13 +197,12 @@ func TestService_GetRecentPreState_Old_Checkpoint(t *testing.T) {
err = s.SetFinalizedCheckpoint(cp0)
require.NoError(t, err)
st, blk, err := prepareForkchoiceState(ctx, 33, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
st, root, err := prepareForkchoiceState(ctx, 33, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
service.head = &head{
root: [32]byte(ckRoot),
state: s,
block: blk,
slot: 33,
}
require.IsNil(t, service.getRecentPreState(ctx, &ethpb.Checkpoint{}))
@@ -229,7 +227,6 @@ func TestService_GetRecentPreState_Same_DependentRoots(t *testing.T) {
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
st, blk, err = prepareForkchoiceState(ctx, 64, [32]byte{'T'}, blk.Root(), [32]byte{}, cp0, cp0)
require.NoError(t, err)
headBlock := blk
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
st, blk, err = prepareForkchoiceState(ctx, 33, [32]byte{'U'}, [32]byte(ckRoot), [32]byte{}, cp0, cp0)
require.NoError(t, err)
@@ -238,9 +235,8 @@ func TestService_GetRecentPreState_Same_DependentRoots(t *testing.T) {
service.head = &head{
root: [32]byte{'T'},
block: headBlock,
slot: 64,
state: s,
slot: 64,
}
require.NotNil(t, service.getRecentPreState(ctx, &ethpb.Checkpoint{Epoch: 2, Root: cpRoot[:]}))
}
@@ -267,7 +263,6 @@ func TestService_GetRecentPreState_Different_DependentRoots(t *testing.T) {
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
st, blk, err = prepareForkchoiceState(ctx, 64, [32]byte{'U'}, blk.Root(), [32]byte{}, cp0, cp0)
require.NoError(t, err)
headBlock := blk
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
st, blk, err = prepareForkchoiceState(ctx, 33, [32]byte{'V'}, [32]byte(ckRoot), [32]byte{}, cp0, cp0)
require.NoError(t, err)
@@ -275,8 +270,7 @@ func TestService_GetRecentPreState_Different_DependentRoots(t *testing.T) {
cpRoot := blk.Root()
service.head = &head{
root: [32]byte{'U'},
block: headBlock,
root: [32]byte{'T'},
state: s,
slot: 64,
}
@@ -293,13 +287,12 @@ func TestService_GetRecentPreState_Different(t *testing.T) {
err = s.SetFinalizedCheckpoint(cp0)
require.NoError(t, err)
st, blk, err := prepareForkchoiceState(ctx, 33, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
st, root, err := prepareForkchoiceState(ctx, 33, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
service.head = &head{
root: [32]byte(ckRoot),
state: s,
block: blk,
slot: 33,
}
require.IsNil(t, service.getRecentPreState(ctx, &ethpb.Checkpoint{}))

View File

@@ -7,8 +7,6 @@ import (
"github.com/OffchainLabs/go-bitfield"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/operation"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
coreTime "github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
@@ -17,7 +15,6 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/filesystem"
forkchoicetypes "github.com/OffchainLabs/prysm/v7/beacon-chain/forkchoice/types"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
"github.com/OffchainLabs/prysm/v7/config/features"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
@@ -69,6 +66,9 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
startTime := time.Now()
fcuArgs := &fcuConfig{}
if s.inRegularSync() {
defer s.handleSecondFCUCall(cfg, fcuArgs)
}
if features.Get().EnableLightClient && slots.ToEpoch(s.CurrentSlot()) >= params.BeaconConfig().AltairForkEpoch {
defer s.processLightClientUpdates(cfg)
}
@@ -105,16 +105,12 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
s.logNonCanonicalBlockReceived(cfg.roblock.Root(), cfg.headRoot)
return nil
}
s.sendFCU(cfg, fcuArgs)
// Pre-Fulu the caches are updated when computing the payload attributes
if cfg.postState.Version() >= version.Fulu {
go func() {
ctx, cancel := context.WithTimeout(s.ctx, slotDeadline)
defer cancel()
cfg.ctx = ctx
s.updateCachesPostBlockProcessing(cfg)
}()
if err := s.getFCUArgs(cfg, fcuArgs); err != nil {
log.WithError(err).Error("Could not get forkchoice update argument")
return nil
}
if err := s.sendFCU(cfg, fcuArgs); err != nil {
return errors.Wrap(err, "could not send FCU to engine")
}
return nil
@@ -299,6 +295,14 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
return errors.Wrap(err, "could not set optimistic block to valid")
}
}
arg := &fcuConfig{
headState: preState,
headRoot: lastBR,
headBlock: lastB,
}
if _, err := s.notifyForkchoiceUpdate(ctx, arg); err != nil {
return err
}
return s.saveHeadNoDB(ctx, lastB, lastBR, preState, !isValidPayload)
}
@@ -326,7 +330,6 @@ func (s *Service) areSidecarsAvailable(ctx context.Context, avs das.Availability
return nil
}
// the caller of this function must not hold a lock in forkchoice store.
func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.BeaconState) error {
e := coreTime.CurrentEpoch(st)
if err := helpers.UpdateCommitteeCache(ctx, st, e); err != nil {
@@ -356,9 +359,7 @@ func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.Beacon
if e > 0 {
e = e - 1
}
s.ForkChoicer().RLock()
target, err := s.cfg.ForkChoiceStore.TargetRootForEpoch(r, e)
s.ForkChoicer().RUnlock()
if err != nil {
log.WithError(err).Error("Could not update proposer index state-root map")
return nil
@@ -371,7 +372,7 @@ func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.Beacon
}
// Epoch boundary tasks: it copies the headState and updates the epoch boundary
// caches. The caller of this function must not hold a lock in forkchoice store.
// caches.
func (s *Service) handleEpochBoundary(ctx context.Context, slot primitives.Slot, headState state.BeaconState, blockRoot []byte) error {
ctx, span := trace.StartSpan(ctx, "blockChain.handleEpochBoundary")
defer span.End()
@@ -665,17 +666,10 @@ func (s *Service) isDataAvailable(
return errors.New("invalid nil beacon block")
}
root, blockVersion := roBlock.Root(), roBlock.Version()
root := roBlock.Root()
blockVersion := block.Version()
if blockVersion >= version.Fulu {
if err := s.areExecutionProofsAvailable(ctx, roBlock); err != nil {
return fmt.Errorf("are execution proofs available: %w", err)
}
if err := s.areDataColumnsAvailable(ctx, root, block); err != nil {
return fmt.Errorf("are data columns available: %w", err)
}
return nil
return s.areDataColumnsAvailable(ctx, root, block)
}
if blockVersion >= version.Deneb {
@@ -685,70 +679,6 @@ func (s *Service) isDataAvailable(
return nil
}
// areExecutionProofsAvailable blocks until we have enough execution proofs to import the block,
// or an error or context cancellation occurs.
// This check is only performed for lightweight verifier nodes that need zkVM proofs
// to validate block execution (nodes without execution layer + proof generation capability).
// A nil result means that the data availability check is successful.
func (s *Service) areExecutionProofsAvailable(ctx context.Context, roBlock consensusblocks.ROBlock) error {
// Return early if zkVM features are disabled (no need to check for execution proofs),
// or if the generation proof is enabled (we will generate proofs ourselves).
if !features.Get().EnableZkvm || len(flags.Get().ProofGenerationTypes) > 0 {
return nil
}
root, slot := roBlock.Root(), roBlock.Block().Slot()
requiredProofCount := params.BeaconConfig().MinProofsRequired
log := log.WithFields(logrus.Fields{
"root": fmt.Sprintf("%#x", root),
"slot": slot,
"requiredProofCount": requiredProofCount,
})
// Subscribe to execution proof received events.
eventsChan := make(chan *feed.Event, 1)
subscription := s.cfg.OperationNotifier.OperationFeed().Subscribe(eventsChan)
defer subscription.Unsubscribe()
// Return early if we already have enough proofs.
if actualProofCount := uint64(s.cfg.ExecProofsPool.Count(root)); actualProofCount >= requiredProofCount {
log.WithField("actualProofCount", actualProofCount).Debug("Already have enough execution proofs")
return nil
}
// Some proofs are missing; wait for them.
for {
select {
case <-ctx.Done():
return ctx.Err()
case event := <-eventsChan:
if event.Type != operation.ExecutionProofReceived {
continue
}
proofWrapper, ok := event.Data.(*operation.ExecutionProofReceivedData)
if !ok {
log.Error("Could not cast event data to ExecutionProofReceivedData")
continue
}
proof := proofWrapper.ExecutionProof
// Skip if the proof is for a different block.
if bytesutil.ToBytes32(proof.BlockRoot) != root {
continue
}
// Return if we have enough proofs.
if actualProofCount := uint64(s.cfg.ExecProofsPool.Count(root)); actualProofCount >= requiredProofCount {
log.WithField("actualProofCount", actualProofCount).Debug("Got enough execution proofs")
return nil
}
}
}
}
// areDataColumnsAvailable blocks until all data columns committed to in the block are available,
// or an error or context cancellation occurs. A nil result means that the data availability check is successful.
func (s *Service) areDataColumnsAvailable(
@@ -982,6 +912,8 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
if currentSlot == s.HeadSlot() {
return
}
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
// return early if we are in init sync
if !s.inRegularSync() {
return
@@ -994,32 +926,14 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
if lastState == nil {
lastRoot, lastState = headRoot[:], headState
}
// Before Fulu we need to process the next slot to find out if we are proposing.
if lastState.Version() < version.Fulu {
// Copy all the field tries in our cached state in the event of late
// blocks.
lastState.CopyAllTries()
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
log.WithError(err).Debug("Could not update next slot state cache")
}
if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil {
log.WithError(err).Error("Could not update epoch boundary caches")
}
} else {
// After Fulu, we can update the caches asynchronously after sending FCU to the engine
defer func() {
go func() {
ctx, cancel := context.WithTimeout(s.ctx, slotDeadline)
defer cancel()
lastState.CopyAllTries()
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
log.WithError(err).Debug("Could not update next slot state cache")
}
if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil {
log.WithError(err).Error("Could not update epoch boundary caches")
}
}()
}()
// Copy all the field tries in our cached state in the event of late
// blocks.
lastState.CopyAllTries()
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
log.WithError(err).Debug("Could not update next slot state cache")
}
if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil {
log.WithError(err).Error("Could not update epoch boundary caches")
}
// return early if we already started building a block for the current
// head root
@@ -1049,8 +963,6 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
headBlock: headBlock,
attributes: attribute,
}
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
_, err = s.notifyForkchoiceUpdate(ctx, fcuArgs)
if err != nil {
log.WithError(err).Debug("could not perform late block tasks: failed to update forkchoice with engine")

View File

@@ -42,8 +42,14 @@ func (s *Service) getFCUArgs(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) er
if err := s.getFCUArgsEarlyBlock(cfg, fcuArgs); err != nil {
return err
}
fcuArgs.attributes = s.getPayloadAttribute(cfg.ctx, fcuArgs.headState, fcuArgs.proposingSlot, cfg.headRoot[:])
return nil
if !s.inRegularSync() {
return nil
}
slot := cfg.roblock.Block().Slot()
if slots.WithinVotingWindow(s.genesisTime, slot) {
return nil
}
return s.computePayloadAttributes(cfg, fcuArgs)
}
func (s *Service) getFCUArgsEarlyBlock(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error {
@@ -167,19 +173,26 @@ func (s *Service) processLightClientUpdates(cfg *postBlockProcessConfig) {
// updateCachesPostBlockProcessing updates the next slot cache and handles the epoch
// boundary in order to compute the right proposer indices after processing
// state transition. The caller of this function must not hold a lock in forkchoice store.
func (s *Service) updateCachesPostBlockProcessing(cfg *postBlockProcessConfig) {
// state transition. This function is called on late blocks while still locked,
// before sending FCU to the engine.
func (s *Service) updateCachesPostBlockProcessing(cfg *postBlockProcessConfig) error {
slot := cfg.postState.Slot()
root := cfg.roblock.Root()
if err := transition.UpdateNextSlotCache(cfg.ctx, root[:], cfg.postState); err != nil {
log.WithError(err).Error("Could not update next slot state cache")
return
return errors.Wrap(err, "could not update next slot state cache")
}
if !slots.IsEpochEnd(slot) {
return
return nil
}
if err := s.handleEpochBoundary(cfg.ctx, slot, cfg.postState, root[:]); err != nil {
log.WithError(err).Error("Could not handle epoch boundary")
return s.handleEpochBoundary(cfg.ctx, slot, cfg.postState, root[:])
}
// handleSecondFCUCall handles a second call to FCU when syncing a new block.
// This is useful when proposing in the next block and we want to defer the
// computation of the next slot shuffling.
func (s *Service) handleSecondFCUCall(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) {
if (fcuArgs.attributes == nil || fcuArgs.attributes.IsEmpty()) && cfg.headRoot == cfg.roblock.Root() {
go s.sendFCUWithAttributes(cfg, fcuArgs)
}
}
@@ -189,6 +202,20 @@ func reportProcessingTime(startTime time.Time) {
onBlockProcessingTime.Observe(float64(time.Since(startTime).Milliseconds()))
}
// computePayloadAttributes modifies the passed FCU arguments to
// contain the right payload attributes with the tracked proposer. It gets
// called on blocks that arrive after the attestation voting window, or in a
// background routine after syncing early blocks.
func (s *Service) computePayloadAttributes(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error {
if cfg.roblock.Root() == cfg.headRoot {
if err := s.updateCachesPostBlockProcessing(cfg); err != nil {
return err
}
}
fcuArgs.attributes = s.getPayloadAttribute(cfg.ctx, fcuArgs.headState, fcuArgs.proposingSlot, cfg.headRoot[:])
return nil
}
// getBlockPreState returns the pre state of an incoming block. It uses the parent root of the block
// to retrieve the state in DB. It verifies the pre state's validity and the incoming block
// is in the correct time window.

View File

@@ -13,8 +13,6 @@ import (
mock "github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/operation"
statefeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/state"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
@@ -740,9 +738,7 @@ func TestOnBlock_CanFinalize_WithOnTick(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
service.cfg.ForkChoiceStore.Unlock()
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
require.NoError(t, err)
@@ -792,9 +788,7 @@ func TestOnBlock_CanFinalize(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
service.cfg.ForkChoiceStore.Unlock()
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
require.NoError(t, err)
@@ -822,9 +816,25 @@ func TestOnBlock_NilBlock(t *testing.T) {
service, tr := minimalTestService(t)
signed := &consensusblocks.SignedBeaconBlock{}
roblock := consensusblocks.ROBlock{ReadOnlySignedBeaconBlock: signed}
service.cfg.ForkChoiceStore.Lock()
err := service.postBlockProcess(&postBlockProcessConfig{tr.ctx, roblock, [32]byte{}, nil, true})
service.cfg.ForkChoiceStore.Unlock()
require.Equal(t, true, IsInvalidBlock(err))
}
func TestOnBlock_InvalidSignature(t *testing.T) {
service, tr := minimalTestService(t)
ctx := tr.ctx
gs, keys := util.DeterministicGenesisState(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
blk, err := util.GenerateFullBlock(gs, keys, util.DefaultBlockGenConfig(), 1)
require.NoError(t, err)
blk.Signature = []byte{'a'} // Mutate the signature.
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
_, err = service.validateStateTransition(ctx, preState, wsb)
require.Equal(t, true, IsInvalidBlock(err))
}
@@ -856,9 +866,7 @@ func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
testState, err = service.cfg.StateGen.StateByRoot(ctx, r)
require.NoError(t, err)
}
@@ -1331,9 +1339,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
lock.Lock()
roblock, err := consensusblocks.NewROBlockWithRoot(wsb1, r1)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
service.cfg.ForkChoiceStore.Unlock()
lock.Unlock()
wg.Done()
}()
@@ -1345,9 +1351,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
lock.Lock()
roblock, err := consensusblocks.NewROBlockWithRoot(wsb2, r2)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
service.cfg.ForkChoiceStore.Unlock()
lock.Unlock()
wg.Done()
}()
@@ -1359,9 +1363,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
lock.Lock()
roblock, err := consensusblocks.NewROBlockWithRoot(wsb3, r3)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
service.cfg.ForkChoiceStore.Unlock()
lock.Unlock()
wg.Done()
}()
@@ -1373,9 +1375,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
lock.Lock()
roblock, err := consensusblocks.NewROBlockWithRoot(wsb4, r4)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
service.cfg.ForkChoiceStore.Unlock()
lock.Unlock()
wg.Done()
}()
@@ -1400,6 +1400,197 @@ func Test_verifyBlkFinalizedSlot_invalidBlock(t *testing.T) {
require.Equal(t, true, IsInvalidBlock(err))
}
// See the description in #10777 and #10782 for the full setup
// We sync optimistically a chain of blocks. Block 17 is the last block in Epoch
// 2. Block 18 justifies block 12 (the first in Epoch 2) and Block 19 returns
// INVALID from FCU, with LVH block 17. No head is viable. We check
// that the node is optimistic and that we can actually import a block on top of
// 17 and recover.
func TestStore_NoViableHead_FCU(t *testing.T) {
params.SetupTestConfigCleanup(t)
config := params.BeaconConfig()
config.SlotsPerEpoch = 6
config.AltairForkEpoch = 1
config.BellatrixForkEpoch = 2
params.OverrideBeaconConfig(config)
mockEngine := &mockExecution.EngineClient{ErrNewPayload: execution.ErrAcceptedSyncingPayloadStatus, ErrForkchoiceUpdated: execution.ErrAcceptedSyncingPayloadStatus}
service, tr := minimalTestService(t, WithExecutionEngineCaller(mockEngine))
ctx := tr.ctx
st, keys := util.DeterministicGenesisState(t, 64)
stateRoot, err := st.HashTreeRoot(ctx)
require.NoError(t, err, "Could not hash genesis state")
require.NoError(t, service.saveGenesisData(ctx, st))
genesis := blocks.NewGenesisBlock(stateRoot[:])
wsb, err := consensusblocks.NewSignedBeaconBlock(genesis)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb), "Could not save genesis block")
parentRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err, "Could not get signing root")
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, parentRoot), "Could not save genesis state")
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
for i := 1; i < 6; i++ {
driftGenesisTime(service, primitives.Slot(i), 0)
st, err := service.HeadState(ctx)
require.NoError(t, err)
b, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
require.NoError(t, err)
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
}
for i := 6; i < 12; i++ {
driftGenesisTime(service, primitives.Slot(i), 0)
st, err := service.HeadState(ctx)
require.NoError(t, err)
b, err := util.GenerateFullBlockAltair(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
require.NoError(t, err)
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
require.NoError(t, err)
}
for i := 12; i < 18; i++ {
driftGenesisTime(service, primitives.Slot(i), 0)
st, err := service.HeadState(ctx)
require.NoError(t, err)
b, err := util.GenerateFullBlockBellatrix(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
require.NoError(t, err)
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
require.NoError(t, err)
}
// Check that we haven't justified the second epoch yet
jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint()
require.Equal(t, primitives.Epoch(0), jc.Epoch)
// import a block that justifies the second epoch
driftGenesisTime(service, 18, 0)
validHeadState, err := service.HeadState(ctx)
require.NoError(t, err)
b, err := util.GenerateFullBlockBellatrix(validHeadState, keys, util.DefaultBlockGenConfig(), 18)
require.NoError(t, err)
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
firstInvalidRoot, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, firstInvalidRoot, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, firstInvalidRoot)
require.NoError(t, err)
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
require.NoError(t, err)
jc = service.cfg.ForkChoiceStore.JustifiedCheckpoint()
require.Equal(t, primitives.Epoch(2), jc.Epoch)
sjc := validHeadState.CurrentJustifiedCheckpoint()
require.Equal(t, primitives.Epoch(0), sjc.Epoch)
lvh := b.Block.Body.ExecutionPayload.ParentHash
// check our head
require.Equal(t, firstInvalidRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
// import another block to find out that it was invalid
mockEngine = &mockExecution.EngineClient{ErrNewPayload: execution.ErrAcceptedSyncingPayloadStatus, ErrForkchoiceUpdated: execution.ErrInvalidPayloadStatus, ForkChoiceUpdatedResp: lvh}
service.cfg.ExecutionEngineCaller = mockEngine
driftGenesisTime(service, 19, 0)
st, err = service.HeadState(ctx)
require.NoError(t, err)
b, err = util.GenerateFullBlockBellatrix(st, keys, util.DefaultBlockGenConfig(), 19)
require.NoError(t, err)
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err = service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err = service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
// Check that forkchoice's head is the last invalid block imported. The
// store's headroot is the previous head (since the invalid block did
// not finish importing) one and that the node is optimistic
require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot())
headRoot, err := service.HeadRoot(ctx)
require.NoError(t, err)
require.Equal(t, firstInvalidRoot, bytesutil.ToBytes32(headRoot))
optimistic, err := service.IsOptimistic(ctx)
require.NoError(t, err)
require.Equal(t, true, optimistic)
// import another block based on the last valid head state
mockEngine = &mockExecution.EngineClient{}
service.cfg.ExecutionEngineCaller = mockEngine
driftGenesisTime(service, 20, 0)
b, err = util.GenerateFullBlockBellatrix(validHeadState, keys, &util.BlockGenConfig{}, 20)
require.NoError(t, err)
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
root, err = b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err = service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err = service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
require.NoError(t, err)
// Check the newly imported block is head, it justified the right
// checkpoint and the node is no longer optimistic
require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot())
sjc = service.CurrentJustifiedCheckpt()
require.Equal(t, jc.Epoch, sjc.Epoch)
require.Equal(t, jc.Root, bytesutil.ToBytes32(sjc.Root))
optimistic, err = service.IsOptimistic(ctx)
require.NoError(t, err)
require.Equal(t, false, optimistic)
}
// See the description in #10777 and #10782 for the full setup
// We sync optimistically a chain of blocks. Block 17 is the last block in Epoch
// 2. Block 18 justifies block 12 (the first in Epoch 2) and Block 19 returns
@@ -1451,9 +1642,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
}
for i := 6; i < 12; i++ {
@@ -1473,9 +1662,8 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
require.NoError(t, err)
}
for i := 12; i < 18; i++ {
@@ -1496,9 +1684,8 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
require.NoError(t, err)
}
// Check that we haven't justified the second epoch yet
jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint()
@@ -1521,9 +1708,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, firstInvalidRoot, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, firstInvalidRoot)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
service.cfg.ForkChoiceStore.Unlock()
require.NoError(t, err)
jc = service.cfg.ForkChoiceStore.JustifiedCheckpoint()
require.Equal(t, primitives.Epoch(2), jc.Epoch)
@@ -1533,10 +1718,6 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
lvh := b.Block.Body.ExecutionPayload.ParentHash
// check our head
require.Equal(t, firstInvalidRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
isBlock18OptimisticAfterImport, err := service.IsOptimisticForRoot(ctx, firstInvalidRoot)
require.NoError(t, err)
require.Equal(t, true, isBlock18OptimisticAfterImport)
time.Sleep(20 * time.Millisecond) // wait for async forkchoice update to be processed
// import another block to find out that it was invalid
mockEngine = &mockExecution.EngineClient{ErrNewPayload: execution.ErrInvalidPayloadStatus, NewPayloadResp: lvh}
@@ -1587,9 +1768,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
service.cfg.ForkChoiceStore.Unlock()
require.NoError(t, err)
// Check the newly imported block is head, it justified the right
// checkpoint and the node is no longer optimistic
@@ -1656,9 +1835,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
}
for i := 6; i < 12; i++ {
@@ -1679,9 +1856,8 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
require.NoError(t, err)
}
// import the merge block
@@ -1701,9 +1877,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, lastValidRoot, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, lastValidRoot)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
service.cfg.ForkChoiceStore.Unlock()
require.NoError(t, err)
// save the post state and the payload Hash of this block since it will
// be the LVH
@@ -1732,9 +1906,8 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, invalidRoots[i-13], wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, invalidRoots[i-13])
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
require.NoError(t, err)
}
// Check that we have justified the second epoch
jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint()
@@ -1802,9 +1975,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
service.cfg.ForkChoiceStore.Unlock()
// Check that the head is still INVALID and the node is still optimistic
require.Equal(t, invalidHeadRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
optimistic, err = service.IsOptimistic(ctx)
@@ -1829,9 +2000,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
service.cfg.ForkChoiceStore.Unlock()
require.NoError(t, err)
st, err = service.cfg.StateGen.StateByRoot(ctx, root)
require.NoError(t, err)
@@ -1859,9 +2028,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
service.cfg.ForkChoiceStore.Unlock()
require.NoError(t, err)
require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot())
sjc = service.CurrentJustifiedCheckpt()
@@ -1905,6 +2072,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
require.NoError(t, service.cfg.BeaconDB.SaveGenesisBlockRoot(ctx, genesisRoot), "Could not save genesis state")
for i := 1; i < 6; i++ {
t.Log(i)
driftGenesisTime(service, primitives.Slot(i), 0)
st, err := service.HeadState(ctx)
require.NoError(t, err)
@@ -1921,9 +2089,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
}
for i := 6; i < 12; i++ {
@@ -1943,9 +2109,8 @@ func TestNoViableHead_Reboot(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
require.NoError(t, err)
}
// import the merge block
@@ -1965,9 +2130,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, lastValidRoot, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, lastValidRoot)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
service.cfg.ForkChoiceStore.Unlock()
require.NoError(t, err)
// save the post state and the payload Hash of this block since it will
// be the LVH
@@ -1998,9 +2161,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
require.NoError(t, err)
@@ -2121,9 +2282,7 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
st, err = service.HeadState(ctx)
require.NoError(t, err)
@@ -2189,9 +2348,7 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
st, err = service.HeadState(ctx)
require.NoError(t, err)
@@ -2474,10 +2631,7 @@ func TestRollbackBlock(t *testing.T) {
require.NoError(t, err)
// Rollback block insertion into db and caches.
service.cfg.ForkChoiceStore.Lock()
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
service.cfg.ForkChoiceStore.Unlock()
require.ErrorContains(t, fmt.Sprintf("could not insert block %d to fork choice store", roblock.Block().Slot()), err)
require.ErrorContains(t, fmt.Sprintf("could not insert block %d to fork choice store", roblock.Block().Slot()), service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
// The block should no longer exist.
require.Equal(t, false, service.cfg.BeaconDB.HasBlock(ctx, root))
@@ -2578,9 +2732,7 @@ func TestRollbackBlock_ContextDeadline(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
b, err = util.GenerateFullBlock(postState, keys, util.DefaultBlockGenConfig(), 34)
require.NoError(t, err)
@@ -2614,10 +2766,7 @@ func TestRollbackBlock_ContextDeadline(t *testing.T) {
require.NoError(t, postState.SetFinalizedCheckpoint(cj))
// Rollback block insertion into db and caches.
service.cfg.ForkChoiceStore.Lock()
err = service.postBlockProcess(&postBlockProcessConfig{cancCtx, roblock, [32]byte{}, postState, false})
service.cfg.ForkChoiceStore.Unlock()
require.ErrorContains(t, "context canceled", err)
require.ErrorContains(t, "context canceled", service.postBlockProcess(&postBlockProcessConfig{cancCtx, roblock, [32]byte{}, postState, false}))
// The block should no longer exist.
require.Equal(t, false, service.cfg.BeaconDB.HasBlock(ctx, root))
@@ -3003,113 +3152,6 @@ func TestIsDataAvailable(t *testing.T) {
err = service.isDataAvailable(ctx, roBlock)
require.NotNil(t, err)
})
t.Run("EIP-8025 (Optional Proofs) - already enough proofs", func(t *testing.T) {
// Enable zkVM feature
resetCfg := features.InitWithReset(&features.Flags{
EnableZkvm: true,
})
defer resetCfg()
// Set MinProofsRequired for testing
cfg := params.BeaconConfig().Copy()
cfg.MinProofsRequired = 3
params.OverrideBeaconConfig(cfg)
// Setup with sufficient data columns
minimumColumnsCountToReconstruct := peerdas.MinimumColumnCountToReconstruct()
indices := make([]uint64, 0, minimumColumnsCountToReconstruct)
for i := range minimumColumnsCountToReconstruct {
indices = append(indices, i)
}
testParams := testIsAvailableParams{
columnsToSave: indices,
blobKzgCommitmentsCount: 3,
}
ctx, _, service, root, signed := testIsAvailableSetup(t, testParams)
// Insert MinProofsRequired execution proofs into the pool
for i := range cfg.MinProofsRequired {
proof := &ethpb.ExecutionProof{
BlockRoot: root[:],
Slot: signed.Block().Slot(),
ProofId: primitives.ExecutionProofId(i),
ProofData: []byte{byte(i)},
}
service.cfg.ExecProofsPool.Insert(proof)
}
roBlock, err := consensusblocks.NewROBlockWithRoot(signed, root)
require.NoError(t, err)
err = service.isDataAvailable(ctx, roBlock)
require.NoError(t, err)
})
t.Run("EIP-8025 (Optional Proofs) - data columns success then wait for execution proofs", func(t *testing.T) {
// Enable zkVM feature
resetCfg := features.InitWithReset(&features.Flags{
EnableZkvm: true,
})
defer resetCfg()
// Set MinProofsRequired for testing
cfg := params.BeaconConfig().Copy()
cfg.MinProofsRequired = 3
params.OverrideBeaconConfig(cfg)
// Setup with sufficient data columns
minimumColumnsCountToReconstruct := peerdas.MinimumColumnCountToReconstruct()
indices := make([]uint64, 0, minimumColumnsCountToReconstruct)
for i := range minimumColumnsCountToReconstruct {
indices = append(indices, i)
}
testParams := testIsAvailableParams{
options: []Option{
WithOperationNotifier(&mock.MockOperationNotifier{}),
},
columnsToSave: indices,
blobKzgCommitmentsCount: 3,
}
ctx, _, service, root, signed := testIsAvailableSetup(t, testParams)
// Goroutine to send execution proofs after data columns are available
go func() {
// Wait a bit to simulate async proof arrival
time.Sleep(50 * time.Millisecond)
// Send ExecutionProofReceived events
opfeed := service.cfg.OperationNotifier.OperationFeed()
for i := range cfg.MinProofsRequired {
proof := &ethpb.ExecutionProof{
BlockRoot: root[:],
Slot: signed.Block().Slot(),
ProofId: primitives.ExecutionProofId(i),
ProofData: []byte{byte(i)},
}
service.cfg.ExecProofsPool.Insert(proof)
opfeed.Send(&feed.Event{
Type: operation.ExecutionProofReceived,
Data: &operation.ExecutionProofReceivedData{
ExecutionProof: proof,
},
})
}
}()
ctx, cancel := context.WithTimeout(ctx, time.Second*2)
defer cancel()
roBlock, err := consensusblocks.NewROBlockWithRoot(signed, root)
require.NoError(t, err)
err = service.isDataAvailable(ctx, roBlock)
require.NoError(t, err)
})
}
// Test_postBlockProcess_EventSending tests that block processed events are only sent
@@ -3220,9 +3262,7 @@ func Test_postBlockProcess_EventSending(t *testing.T) {
}
// Execute postBlockProcess
service.cfg.ForkChoiceStore.Lock()
err = service.postBlockProcess(cfg)
service.cfg.ForkChoiceStore.Unlock()
// Check error expectation
if tt.expectError {

View File

@@ -66,54 +66,52 @@ func (s *Service) VerifyLmdFfgConsistency(ctx context.Context, a ethpb.Att) erro
// This routine processes fork choice attestations from the pool to account for validator votes and fork choice.
func (s *Service) spawnProcessAttestationsRoutine() {
_, err := s.clockWaiter.WaitForClock(s.ctx)
if err != nil {
log.WithError(err).Error("Failed to receive genesis data")
return
}
if s.genesisTime.IsZero() {
log.Warn("ProcessAttestations routine waiting for genesis time")
for s.genesisTime.IsZero() {
if err := s.ctx.Err(); err != nil {
log.WithError(err).Error("Giving up waiting for genesis time")
return
}
time.Sleep(1 * time.Second)
}
log.Warn("Genesis time received, now available to process attestations")
}
// Wait for node to be synced before running the routine.
if err := s.waitForSync(); err != nil {
log.WithError(err).Error("Could not wait to sync")
return
}
reorgInterval := time.Second*time.Duration(params.BeaconConfig().SecondsPerSlot) - reorgLateBlockCountAttestations
ticker := slots.NewSlotTickerWithIntervals(s.genesisTime, []time.Duration{0, reorgInterval})
for {
select {
case <-s.ctx.Done():
go func() {
_, err := s.clockWaiter.WaitForClock(s.ctx)
if err != nil {
log.WithError(err).Error("Failed to receive genesis data")
return
case slotInterval := <-ticker.C():
if slotInterval.Interval > 0 {
if s.validating() {
s.UpdateHead(s.ctx, slotInterval.Slot+1)
}
continue
}
s.cfg.ForkChoiceStore.Lock()
if err := s.cfg.ForkChoiceStore.NewSlot(s.ctx, slotInterval.Slot); err != nil {
log.WithError(err).Error("Could not process new slot")
}
s.cfg.ForkChoiceStore.Unlock()
s.UpdateHead(s.ctx, slotInterval.Slot)
}
}
if s.genesisTime.IsZero() {
log.Warn("ProcessAttestations routine waiting for genesis time")
for s.genesisTime.IsZero() {
if err := s.ctx.Err(); err != nil {
log.WithError(err).Error("Giving up waiting for genesis time")
return
}
time.Sleep(1 * time.Second)
}
log.Warn("Genesis time received, now available to process attestations")
}
// Wait for node to be synced before running the routine.
if err := s.waitForSync(); err != nil {
log.WithError(err).Error("Could not wait to sync")
return
}
reorgInterval := time.Second*time.Duration(params.BeaconConfig().SecondsPerSlot) - reorgLateBlockCountAttestations
ticker := slots.NewSlotTickerWithIntervals(s.genesisTime, []time.Duration{0, reorgInterval})
for {
select {
case <-s.ctx.Done():
return
case slotInterval := <-ticker.C():
if slotInterval.Interval > 0 {
if s.validating() {
s.UpdateHead(s.ctx, slotInterval.Slot+1)
}
} else {
s.cfg.ForkChoiceStore.Lock()
if err := s.cfg.ForkChoiceStore.NewSlot(s.ctx, slotInterval.Slot); err != nil {
log.WithError(err).Error("Could not process new slot")
}
s.cfg.ForkChoiceStore.Unlock()
s.UpdateHead(s.ctx, slotInterval.Slot)
}
}
}
}()
}
// UpdateHead updates the canonical head of the chain based on information from fork-choice attestations and votes.
@@ -158,15 +156,13 @@ func (s *Service) UpdateHead(ctx context.Context, proposingSlot primitives.Slot)
}
if s.inRegularSync() {
fcuArgs.attributes = s.getPayloadAttribute(ctx, headState, proposingSlot, newHeadRoot[:])
if fcuArgs.attributes != nil && s.shouldOverrideFCU(newHeadRoot, proposingSlot) {
return
}
go s.forkchoiceUpdateWithExecution(s.ctx, fcuArgs)
}
if err := s.saveHead(s.ctx, fcuArgs.headRoot, fcuArgs.headBlock, fcuArgs.headState); err != nil {
log.WithError(err).Error("Could not save head")
if fcuArgs.attributes != nil && s.shouldOverrideFCU(newHeadRoot, proposingSlot) {
return
}
if err := s.forkchoiceUpdateWithExecution(s.ctx, fcuArgs); err != nil {
log.WithError(err).Error("Could not update forkchoice")
}
s.pruneAttsFromPool(s.ctx, fcuArgs.headState, fcuArgs.headBlock)
}
// This processes fork choice attestations from the pool to account for validator votes and fork choice.

View File

@@ -117,9 +117,7 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, tRoot, wsb, postState))
roblock, err := blocks.NewROBlockWithRoot(wsb, tRoot)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
copied, err = service.cfg.StateGen.StateByRoot(ctx, tRoot)
require.NoError(t, err)
require.Equal(t, 2, fcs.NodeCount())
@@ -179,9 +177,7 @@ func TestService_UpdateHead_NoAtts(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, tRoot, wsb, postState))
roblock, err := blocks.NewROBlockWithRoot(wsb, tRoot)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
require.Equal(t, 2, fcs.NodeCount())
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
require.Equal(t, tRoot, service.head.root)

View File

@@ -12,7 +12,6 @@ import (
"github.com/OffchainLabs/prysm/v7/async/event"
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/operation"
statefeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/state"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
@@ -25,7 +24,6 @@ import (
lightClient "github.com/OffchainLabs/prysm/v7/beacon-chain/light-client"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/attestations"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/blstoexec"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/execproofs"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/slashings"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/voluntaryexits"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
@@ -87,11 +85,9 @@ type config struct {
ExitPool voluntaryexits.PoolManager
SlashingPool slashings.PoolManager
BLSToExecPool blstoexec.PoolManager
ExecProofsPool execproofs.PoolManager
P2P p2p.Accessor
MaxRoutines int
StateNotifier statefeed.Notifier
OperationNotifier operation.Notifier
ForkChoiceStore f.ForkChoicer
AttService *attestations.Service
StateGen *stategen.State
@@ -215,9 +211,7 @@ func (s *Service) Start() {
if err := s.StartFromSavedState(s.cfg.FinalizedStateAtStartUp); err != nil {
log.Fatal(err)
}
go s.spawnProcessAttestationsRoutine()
go s.spawnFinalizedProofsPruningRoutine()
s.spawnProcessAttestationsRoutine()
go s.runLateBlockTasks()
}
@@ -573,46 +567,3 @@ func fuluForkSlot() (primitives.Slot, error) {
return forkFuluSlot, nil
}
// spawnFinalizedProofsPruningRoutine prunes execution proofs pool on every epoch.
// It removes proofs older than the finalized checkpoint to prevent unbounded
// memory growth.
// TODO: Manage cases where the network is not finalizing for a long time (avoid OOMs...)
func (s *Service) spawnFinalizedProofsPruningRoutine() {
ticker := slots.NewSlotTicker(s.genesisTime, params.BeaconConfig().SecondsPerSlot)
defer ticker.Done()
for {
select {
case slot := <-ticker.C():
// Only prune at the start of each epoch
if !slots.IsEpochStart(slot) {
continue
}
finalizedCheckpoint := s.FinalizedCheckpt()
if finalizedCheckpoint == nil {
log.Error("Finalized checkpoint is nil, cannot prune execution proofs")
continue
}
finalizedSlot, err := slots.EpochStart(finalizedCheckpoint.Epoch)
if err != nil {
log.WithError(err).Error("Could not get finalized slot")
continue
}
// Prune proofs older than finalized slot
if count := s.cfg.ExecProofsPool.PruneUpTo(finalizedSlot); count > 0 {
log.WithFields(logrus.Fields{
"prunedCount": count,
"finalizedSlot": finalizedSlot,
}).Debug("Pruned finalized execution proofs")
}
case <-s.ctx.Done():
log.Debug("Context closed, exiting routine")
return
}
}
}

View File

@@ -89,7 +89,7 @@ func (mb *mockBroadcaster) BroadcastLightClientFinalityUpdate(_ context.Context,
return nil
}
func (mb *mockBroadcaster) BroadcastDataColumnSidecars(_ context.Context, _ []blocks.VerifiedRODataColumn) error {
func (mb *mockBroadcaster) BroadcastDataColumnSidecars(_ context.Context, _ []blocks.VerifiedRODataColumn, _ []blocks.PartialDataColumn) error {
mb.broadcastCalled = true
return nil
}

View File

@@ -290,3 +290,52 @@ func TestProcessBlockHeader_OK(t *testing.T) {
}
assert.Equal(t, true, proto.Equal(nsh, expected), "Expected %v, received %v", expected, nsh)
}
func TestBlockSignatureSet_OK(t *testing.T) {
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
for i := range validators {
validators[i] = &ethpb.Validator{
PublicKey: make([]byte, 32),
WithdrawalCredentials: make([]byte, 32),
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
Slashed: true,
}
}
state, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, state.SetValidators(validators))
require.NoError(t, state.SetSlot(10))
require.NoError(t, state.SetLatestBlockHeader(util.HydrateBeaconHeader(&ethpb.BeaconBlockHeader{
Slot: 9,
ProposerIndex: 0,
})))
latestBlockSignedRoot, err := state.LatestBlockHeader().HashTreeRoot()
require.NoError(t, err)
currentEpoch := time.CurrentEpoch(state)
priv, err := bls.RandKey()
require.NoError(t, err)
pID, err := helpers.BeaconProposerIndex(t.Context(), state)
require.NoError(t, err)
block := util.NewBeaconBlock()
block.Block.Slot = 10
block.Block.ProposerIndex = pID
block.Block.Body.RandaoReveal = bytesutil.PadTo([]byte{'A', 'B', 'C'}, 96)
block.Block.ParentRoot = latestBlockSignedRoot[:]
block.Signature, err = signing.ComputeDomainAndSign(state, currentEpoch, block.Block, params.BeaconConfig().DomainBeaconProposer, priv)
require.NoError(t, err)
proposerIdx, err := helpers.BeaconProposerIndex(t.Context(), state)
require.NoError(t, err)
validators[proposerIdx].Slashed = false
validators[proposerIdx].PublicKey = priv.PublicKey().Marshal()
err = state.UpdateValidatorAtIndex(proposerIdx, validators[proposerIdx])
require.NoError(t, err)
set, err := blocks.BlockSignatureBatch(state, block.Block.ProposerIndex, block.Signature, block.Block.HashTreeRoot)
require.NoError(t, err)
verified, err := set.Verify()
require.NoError(t, err)
assert.Equal(t, true, verified, "Block signature set returned a set which was unable to be verified")
}

View File

@@ -122,6 +122,24 @@ func VerifyBlockSignatureUsingCurrentFork(beaconState state.ReadOnlyBeaconState,
return nil
}
// BlockSignatureBatch retrieves the block signature batch from the provided block and its corresponding state.
func BlockSignatureBatch(beaconState state.ReadOnlyBeaconState,
proposerIndex primitives.ValidatorIndex,
sig []byte,
rootFunc func() ([32]byte, error)) (*bls.SignatureBatch, error) {
currentEpoch := slots.ToEpoch(beaconState.Slot())
domain, err := signing.Domain(beaconState.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorsRoot())
if err != nil {
return nil, err
}
proposer, err := beaconState.ValidatorAtIndex(proposerIndex)
if err != nil {
return nil, err
}
proposerPubKey := proposer.PublicKey
return signing.BlockSignatureBatch(proposerPubKey, sig, domain, rootFunc)
}
// RandaoSignatureBatch retrieves the relevant randao specific signature batch object
// from a block and its corresponding state.
func RandaoSignatureBatch(

View File

@@ -278,12 +278,12 @@ func ProcessConsolidationRequests(ctx context.Context, st state.BeaconState, req
if uint64(curEpoch) < e {
continue
}
hasBal, err := st.HasPendingBalanceToWithdraw(srcIdx)
bal, err := st.PendingBalanceToWithdraw(srcIdx)
if err != nil {
log.WithError(err).Error("Failed to fetch pending balance to withdraw")
continue
}
if hasBal {
if bal > 0 {
continue
}

View File

@@ -46,9 +46,6 @@ const (
// DataColumnReceived is sent after a data column has been seen after gossip validation rules.
DataColumnReceived = 12
// ExecutionProofReceived is sent after a execution proof object has been received from gossip or rpc.
ExecutionProofReceived = 13
)
// UnAggregatedAttReceivedData is the data sent with UnaggregatedAttReceived events.
@@ -80,11 +77,6 @@ type BLSToExecutionChangeReceivedData struct {
Change *ethpb.SignedBLSToExecutionChange
}
// ExecutionProofReceivedData is the data sent with ExecutionProofReceived events.
type ExecutionProofReceivedData struct {
ExecutionProof *ethpb.ExecutionProof
}
// BlobSidecarReceivedData is the data sent with BlobSidecarReceived events.
type BlobSidecarReceivedData struct {
Blob *blocks.VerifiedROBlob

View File

@@ -33,6 +33,7 @@ go_library(
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@org_golang_x_sync//errgroup:go_default_library",
],
)

View File

@@ -1,11 +1,15 @@
package peerdas
import (
stderrors "errors"
"iter"
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/container/trie"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/ethereum/go-ethereum/p2p/enr"
"github.com/pkg/errors"
)
@@ -16,6 +20,7 @@ var (
ErrIndexTooLarge = errors.New("column index is larger than the specified columns count")
ErrNoKzgCommitments = errors.New("no KZG commitments found")
ErrMismatchLength = errors.New("mismatch in the length of the column, commitments or proofs")
ErrEmptySegment = errors.New("empty segment in batch")
ErrInvalidKZGProof = errors.New("invalid KZG proof")
ErrBadRootLength = errors.New("bad root length")
ErrInvalidInclusionProof = errors.New("invalid inclusion proof")
@@ -57,80 +62,122 @@ func VerifyDataColumnSidecar(sidecar blocks.RODataColumn) error {
return nil
}
// VerifyDataColumnsSidecarKZGProofs verifies if the KZG proofs are correct.
// CellProofBundleSegment is returned when a batch fails. The caller can call
// the `.Verify` method to verify just this segment.
type CellProofBundleSegment struct {
indices []uint64 // column index i.e in [0-127] for each cell
commitments []kzg.Bytes48 // KZG commitment for the blob that contains the cell that needs to be verified
cells []kzg.Cell // actual cell data for each cell (2KB each)
proofs []kzg.Bytes48 // Cell proof for each cell
// Note: All the above arrays are of the same length.
}
// Verify verifies this segment without batching.
func (s CellProofBundleSegment) Verify() error {
if len(s.cells) == 0 {
return ErrEmptySegment
}
verified, err := kzg.VerifyCellKZGProofBatch(s.commitments, s.indices, s.cells, s.proofs)
if err != nil {
return stderrors.Join(err, ErrInvalidKZGProof)
}
if !verified {
return ErrInvalidKZGProof
}
return nil
}
func VerifyDataColumnsCellsKZGProofs(sizeHint int, cellProofsIter iter.Seq[blocks.CellProofBundle]) error {
// The BatchVerifyDataColumnsCellsKZGProofs call also returns the failed segments if beatch verification fails
// so we can verify each segment seperately to identify the failed cell ("BatchVerifyDataColumnsCellsKZGProofs" is all or nothing).
// But we ignore the failed segment list since we are just passing in one segment.
_, err := BatchVerifyDataColumnsCellsKZGProofs(sizeHint, []iter.Seq[blocks.CellProofBundle]{cellProofsIter})
return err
}
// BatchVerifyDataColumnsCellsKZGProofs verifies if the KZG proofs are correct.
// Note: We are slightly deviating from the specification here:
// The specification verifies the KZG proofs for each sidecar separately,
// while we are verifying all the KZG proofs from multiple sidecars in a batch.
// This is done to improve performance since the internal KZG library is way more
// efficient when verifying in batch.
// efficient when verifying in batch. If the batch fails, the failed segments
// are returned to the caller so that they may try segment by segment without
// batching. On success the failed segment list is empty.
//
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#verify_data_column_sidecar_kzg_proofs
func VerifyDataColumnsSidecarKZGProofs(sidecars []blocks.RODataColumn) error {
// Compute the total count.
count := 0
for _, sidecar := range sidecars {
count += len(sidecar.Column)
}
// sizeHint is the total number of cells to verify across all sidecars.
func BatchVerifyDataColumnsCellsKZGProofs(sizeHint int, cellProofsIters []iter.Seq[blocks.CellProofBundle]) ( /* failed segment list */ []CellProofBundleSegment, error) {
commitments := make([]kzg.Bytes48, 0, sizeHint)
indices := make([]uint64, 0, sizeHint)
cells := make([]kzg.Cell, 0, sizeHint)
proofs := make([]kzg.Bytes48, 0, sizeHint)
commitments := make([]kzg.Bytes48, 0, count)
indices := make([]uint64, 0, count)
cells := make([]kzg.Cell, 0, count)
proofs := make([]kzg.Bytes48, 0, count)
for _, sidecar := range sidecars {
for i := range sidecar.Column {
var anySegmentEmpty bool
var segments []CellProofBundleSegment
for _, cellProofsIter := range cellProofsIters {
startIdx := len(cells) // starts at 0, and increments by the number of cells in the current segment
for bundle := range cellProofsIter {
var (
commitment kzg.Bytes48
cell kzg.Cell
proof kzg.Bytes48
)
commitmentBytes := sidecar.KzgCommitments[i]
cellBytes := sidecar.Column[i]
proofBytes := sidecar.KzgProofs[i]
if len(commitmentBytes) != len(commitment) ||
len(cellBytes) != len(cell) ||
len(proofBytes) != len(proof) {
return ErrMismatchLength
// sanity check that each commitment is 48 bytes, each cell is 2KB and each proof is 48 bytes.
if len(bundle.Commitment) != len(commitment) ||
len(bundle.Cell) != len(cell) ||
len(bundle.Proof) != len(proof) {
return nil, ErrMismatchLength
}
copy(commitment[:], commitmentBytes)
copy(cell[:], cellBytes)
copy(proof[:], proofBytes)
copy(commitment[:], bundle.Commitment)
copy(cell[:], bundle.Cell)
copy(proof[:], bundle.Proof)
commitments = append(commitments, commitment)
indices = append(indices, sidecar.Index)
indices = append(indices, bundle.ColumnIndex)
cells = append(cells, cell)
proofs = append(proofs, proof)
}
if len(cells[startIdx:]) == 0 {
// this basically means that the current segment i.e. the current "bundle" we're
// iterating over is empty because "startIdx" did not increment after the last iteration.
anySegmentEmpty = true
}
segments = append(segments, CellProofBundleSegment{
indices: indices[startIdx:],
commitments: commitments[startIdx:],
cells: cells[startIdx:],
proofs: proofs[startIdx:],
})
}
if anySegmentEmpty {
// if any segment is empty, we don't verify anything as it's a malformed batch and we return all
// the segments here so caller can verify each segment seperately.
return segments, ErrEmptySegment
}
// Batch verify that the cells match the corresponding commitments and proofs.
verified, err := kzg.VerifyCellKZGProofBatch(commitments, indices, cells, proofs)
if err != nil {
return errors.Wrap(err, "verify cell KZG proof batch")
return segments, stderrors.Join(err, ErrInvalidKZGProof)
}
if !verified {
return ErrInvalidKZGProof
return segments, ErrInvalidKZGProof
}
return nil
return nil, nil
}
// VerifyDataColumnSidecarInclusionProof verifies if the given KZG commitments included in the given beacon block.
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#verify_data_column_sidecar_inclusion_proof
func VerifyDataColumnSidecarInclusionProof(sidecar blocks.RODataColumn) error {
if sidecar.SignedBlockHeader == nil || sidecar.SignedBlockHeader.Header == nil {
return ErrNilBlockHeader
}
root := sidecar.SignedBlockHeader.Header.BodyRoot
if len(root) != fieldparams.RootLength {
// verifyKzgCommitmentsInclusionProof is the shared implementation for inclusion proof verification.
func verifyKzgCommitmentsInclusionProof(bodyRoot []byte, kzgCommitments [][]byte, inclusionProof [][]byte) error {
if len(bodyRoot) != fieldparams.RootLength {
return ErrBadRootLength
}
leaves := blocks.LeavesFromCommitments(sidecar.KzgCommitments)
leaves := blocks.LeavesFromCommitments(kzgCommitments)
sparse, err := trie.GenerateTrieFromItems(leaves, fieldparams.LogMaxBlobCommitments)
if err != nil {
@@ -142,7 +189,7 @@ func VerifyDataColumnSidecarInclusionProof(sidecar blocks.RODataColumn) error {
return errors.Wrap(err, "hash tree root")
}
verified := trie.VerifyMerkleProof(root, hashTreeRoot[:], kzgPosition, sidecar.KzgCommitmentsInclusionProof)
verified := trie.VerifyMerkleProof(bodyRoot, hashTreeRoot[:], kzgPosition, inclusionProof)
if !verified {
return ErrInvalidInclusionProof
}
@@ -150,6 +197,31 @@ func VerifyDataColumnSidecarInclusionProof(sidecar blocks.RODataColumn) error {
return nil
}
// VerifyDataColumnSidecarInclusionProof verifies if the given KZG commitments included in the given beacon block.
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#verify_data_column_sidecar_inclusion_proof
func VerifyDataColumnSidecarInclusionProof(sidecar blocks.RODataColumn) error {
if sidecar.SignedBlockHeader == nil || sidecar.SignedBlockHeader.Header == nil {
return ErrNilBlockHeader
}
return verifyKzgCommitmentsInclusionProof(
sidecar.SignedBlockHeader.Header.BodyRoot,
sidecar.KzgCommitments,
sidecar.KzgCommitmentsInclusionProof,
)
}
// VerifyPartialDataColumnHeaderInclusionProof verifies if the KZG commitments are included in the beacon block.
func VerifyPartialDataColumnHeaderInclusionProof(header *ethpb.PartialDataColumnHeader) error {
if header.SignedBlockHeader == nil || header.SignedBlockHeader.Header == nil {
return ErrNilBlockHeader
}
return verifyKzgCommitmentsInclusionProof(
header.SignedBlockHeader.Header.BodyRoot,
header.KzgCommitments,
header.KzgCommitmentsInclusionProof,
)
}
// ComputeSubnetForDataColumnSidecar computes the subnet for a data column sidecar.
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#compute_subnet_for_data_column_sidecar
func ComputeSubnetForDataColumnSidecar(columnIndex uint64) uint64 {

View File

@@ -3,6 +3,7 @@ package peerdas_test
import (
"crypto/rand"
"fmt"
"iter"
"testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
@@ -72,7 +73,7 @@ func TestVerifyDataColumnSidecarKZGProofs(t *testing.T) {
sidecars := generateRandomSidecars(t, seed, blobCount)
sidecars[0].Column[0] = sidecars[0].Column[0][:len(sidecars[0].Column[0])-1] // Remove one byte to create size mismatch
err := peerdas.VerifyDataColumnsSidecarKZGProofs(sidecars)
err := peerdas.VerifyDataColumnsCellsKZGProofs(0, blocks.RODataColumnsToCellProofBundles(sidecars))
require.ErrorIs(t, err, peerdas.ErrMismatchLength)
})
@@ -80,14 +81,15 @@ func TestVerifyDataColumnSidecarKZGProofs(t *testing.T) {
sidecars := generateRandomSidecars(t, seed, blobCount)
sidecars[0].Column[0][0]++ // It is OK to overflow
err := peerdas.VerifyDataColumnsSidecarKZGProofs(sidecars)
err := peerdas.VerifyDataColumnsCellsKZGProofs(0, blocks.RODataColumnsToCellProofBundles(sidecars))
require.ErrorIs(t, err, peerdas.ErrInvalidKZGProof)
})
t.Run("nominal", func(t *testing.T) {
sidecars := generateRandomSidecars(t, seed, blobCount)
err := peerdas.VerifyDataColumnsSidecarKZGProofs(sidecars)
failedSegments, err := peerdas.BatchVerifyDataColumnsCellsKZGProofs(blobCount, []iter.Seq[blocks.CellProofBundle]{blocks.RODataColumnsToCellProofBundles(sidecars)})
require.NoError(t, err)
require.Equal(t, 0, len(failedSegments))
})
}
@@ -273,7 +275,7 @@ func BenchmarkVerifyDataColumnSidecarKZGProofs_SameCommitments_NoBatch(b *testin
for _, sidecar := range sidecars {
sidecars := []blocks.RODataColumn{sidecar}
b.StartTimer()
err := peerdas.VerifyDataColumnsSidecarKZGProofs(sidecars)
err := peerdas.VerifyDataColumnsCellsKZGProofs(0, blocks.RODataColumnsToCellProofBundles(sidecars))
b.StopTimer()
require.NoError(b, err)
}
@@ -308,7 +310,7 @@ func BenchmarkVerifyDataColumnSidecarKZGProofs_DiffCommitments_Batch(b *testing.
}
b.StartTimer()
err := peerdas.VerifyDataColumnsSidecarKZGProofs(allSidecars)
err := peerdas.VerifyDataColumnsCellsKZGProofs(0, blocks.RODataColumnsToCellProofBundles(allSidecars))
b.StopTimer()
require.NoError(b, err)
}
@@ -341,7 +343,7 @@ func BenchmarkVerifyDataColumnSidecarKZGProofs_DiffCommitments_Batch4(b *testing
for _, sidecars := range allSidecars {
b.StartTimer()
err := peerdas.VerifyDataColumnsSidecarKZGProofs(sidecars)
err := peerdas.VerifyDataColumnsCellsKZGProofs(len(allSidecars), blocks.RODataColumnsToCellProofBundles(sidecars))
b.StopTimer()
require.NoError(b, err)
}

View File

@@ -5,6 +5,7 @@ import (
"sync"
"time"
"github.com/OffchainLabs/go-bitfield"
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
@@ -339,7 +340,10 @@ func ComputeCellsAndProofsFromFlat(blobs [][]byte, cellProofs [][]byte) ([][]kzg
}
// ComputeCellsAndProofsFromStructured computes the cells and proofs from blobs and cell proofs.
func ComputeCellsAndProofsFromStructured(blobsAndProofs []*pb.BlobAndProofV2) ([][]kzg.Cell, [][]kzg.Proof, error) {
// commitmentCount (i.e. number of blobs) is required to return the correct sized bitlist even if we see a nil slice of blobsAndProofs.
// Returns the bitlist of which blobs are present, the cells for each blob that is present, and the proofs for each cell in each blob that is present.
// The returned cells and proofs are compacted and will not contain entries for missing blobs.
func ComputeCellsAndProofsFromStructured(commitmentCount uint64, blobsAndProofs []*pb.BlobAndProofV2) (bitfield.Bitlist /* parts included */, [][]kzg.Cell, [][]kzg.Proof, error) {
start := time.Now()
defer func() {
cellsAndProofsFromStructuredComputationTime.Observe(float64(time.Since(start).Milliseconds()))
@@ -347,16 +351,27 @@ func ComputeCellsAndProofsFromStructured(blobsAndProofs []*pb.BlobAndProofV2) ([
var wg errgroup.Group
cellsPerBlob := make([][]kzg.Cell, len(blobsAndProofs))
proofsPerBlob := make([][]kzg.Proof, len(blobsAndProofs))
var blobsPresent int
for _, blobAndProof := range blobsAndProofs {
if blobAndProof != nil {
blobsPresent++
}
}
cellsPerBlob := make([][]kzg.Cell, blobsPresent) // array of cells for each blob
proofsPerBlob := make([][]kzg.Proof, blobsPresent) // array of proofs for each blob
included := bitfield.NewBitlist(commitmentCount) // bitlist of which blobs are present
var j int
for i, blobAndProof := range blobsAndProofs {
if blobAndProof == nil {
return nil, nil, ErrNilBlobAndProof
continue
}
included.SetBitAt(uint64(i), true) // blob at index i is present
compactIndex := j // compact index is the index of the blob in the returned arrays after accounting for nil/missing blocks.
wg.Go(func() error {
var kzgBlob kzg.Blob
// the number of copied bytes should be equal to the expected size of a blob.
if copy(kzgBlob[:], blobAndProof.Blob) != len(kzgBlob) {
return errors.New("wrong blob size - should never happen")
}
@@ -381,17 +396,18 @@ func ComputeCellsAndProofsFromStructured(blobsAndProofs []*pb.BlobAndProofV2) ([
kzgProofs = append(kzgProofs, kzgProof)
}
cellsPerBlob[i] = cells
proofsPerBlob[i] = kzgProofs
cellsPerBlob[compactIndex] = cells
proofsPerBlob[compactIndex] = kzgProofs
return nil
})
j++
}
if err := wg.Wait(); err != nil {
return nil, nil, err
return nil, nil, nil, err
}
return cellsPerBlob, proofsPerBlob, nil
return included, cellsPerBlob, proofsPerBlob, nil
}
// ReconstructBlobs reconstructs blobs from data column sidecars without computing KZG proofs or creating sidecars.

View File

@@ -479,8 +479,9 @@ func TestComputeCellsAndProofsFromFlat(t *testing.T) {
func TestComputeCellsAndProofsFromStructured(t *testing.T) {
t.Run("nil blob and proof", func(t *testing.T) {
_, _, err := peerdas.ComputeCellsAndProofsFromStructured([]*pb.BlobAndProofV2{nil})
require.ErrorIs(t, err, peerdas.ErrNilBlobAndProof)
included, _, _, err := peerdas.ComputeCellsAndProofsFromStructured(0, []*pb.BlobAndProofV2{nil})
require.NoError(t, err)
require.Equal(t, uint64(0), included.Count())
})
t.Run("nominal", func(t *testing.T) {
@@ -533,7 +534,8 @@ func TestComputeCellsAndProofsFromStructured(t *testing.T) {
require.NoError(t, err)
// Test ComputeCellsAndProofs
actualCellsPerBlob, actualProofsPerBlob, err := peerdas.ComputeCellsAndProofsFromStructured(blobsAndProofs)
included, actualCellsPerBlob, actualProofsPerBlob, err := peerdas.ComputeCellsAndProofsFromStructured(uint64(len(blobsAndProofs)), blobsAndProofs)
require.Equal(t, included.Count(), uint64(len(actualCellsPerBlob)))
require.NoError(t, err)
require.Equal(t, blobCount, len(actualCellsPerBlob))

View File

@@ -3,6 +3,7 @@ package peerdas
import (
"time"
"github.com/OffchainLabs/go-bitfield"
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
beaconState "github.com/OffchainLabs/prysm/v7/beacon-chain/state"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
@@ -143,6 +144,51 @@ func DataColumnSidecars(cellsPerBlob [][]kzg.Cell, proofsPerBlob [][]kzg.Proof,
return roSidecars, nil
}
// Note: cellsPerBlob
// We should also assert that "included.Count() (i.e. number of bits set) == uint64(len(cellsPerBlob))" otherwise "cells[idx] = cells[idx][1:]" below will be out of bounds.
func PartialColumns(included bitfield.Bitlist, cellsPerBlob [][]kzg.Cell, proofsPerBlob [][]kzg.Proof, src ConstructionPopulator) ([]blocks.PartialDataColumn, error) {
start := time.Now()
const numberOfColumns = uint64(fieldparams.NumberOfColumns)
// rotate the cells and proofs from being per blob(i.e. "row indexed") to being per column ("column indexed").
// The returned arrays are of size "numberOfColumns" in length and the "included" bitfield is used to filter out cells that are not present in each column.
cells, proofs, err := rotateRowsToCols(cellsPerBlob, proofsPerBlob, numberOfColumns)
if err != nil {
return nil, errors.Wrap(err, "rotate cells and proofs")
}
info, err := src.extract()
if err != nil {
return nil, errors.Wrap(err, "extract block info")
}
dataColumns := make([]blocks.PartialDataColumn, 0, numberOfColumns)
for idx := range numberOfColumns {
dc, err := blocks.NewPartialDataColumn(info.signedBlockHeader, idx, info.kzgCommitments, info.kzgInclusionProof)
if err != nil {
return nil, errors.Wrap(err, "new ro data column")
}
// info.kzgCommitments is the array of KZG commitments for each blob in the block so it's length is the number of blobs in the block.
// The included bitlist is the bitlist of which blobs are present i.e. which are the blobs we have the blob data for.
// we're now iterating over each row and extending the column one cell at a time for each blob that is present.
for i := range len(info.kzgCommitments) {
if !included.BitAt(uint64(i)) {
continue
}
// TODO: Check for "out of bounds" here. How do we know that cells always have upto "numberOfColumns" entries?
// Okay, this probably works because "rotateRowsToCols" above allocates "numberOfColumns" entries for each cell and proof.
// The "included" bitlist is used to filter out cells that are not present in each column so we only set the cells that are present in
// this call to "ExtendFromVerfifiedCell".
dc.ExtendFromVerfifiedCell(uint64(i), cells[idx][0], proofs[idx][0])
cells[idx] = cells[idx][1:]
proofs[idx] = proofs[idx][1:]
}
dataColumns = append(dataColumns, dc)
}
dataColumnComputationTime.Observe(float64(time.Since(start).Milliseconds()))
return dataColumns, nil
}
// Slot returns the slot of the source
func (s *BlockReconstructionSource) Slot() primitives.Slot {
return s.Block().Slot()

View File

@@ -182,6 +182,12 @@ func ProcessBlockNoVerifyAnySig(
return nil, nil, err
}
sig := signed.Signature()
bSet, err := b.BlockSignatureBatch(st, blk.ProposerIndex(), sig[:], blk.HashTreeRoot)
if err != nil {
tracing.AnnotateError(span, err)
return nil, nil, errors.Wrap(err, "could not retrieve block signature set")
}
randaoReveal := signed.Block().Body().RandaoReveal()
rSet, err := b.RandaoSignatureBatch(ctx, st, randaoReveal[:])
if err != nil {
@@ -195,7 +201,7 @@ func ProcessBlockNoVerifyAnySig(
// Merge beacon block, randao and attestations signatures into a set.
set := bls.NewSet()
set.Join(rSet).Join(aSet)
set.Join(bSet).Join(rSet).Join(aSet)
if blk.Version() >= version.Capella {
changes, err := signed.Block().Body().BLSToExecutionChanges()

View File

@@ -157,8 +157,9 @@ func TestProcessBlockNoVerify_SigSetContainsDescriptions(t *testing.T) {
set, _, err := transition.ProcessBlockNoVerifyAnySig(t.Context(), beaconState, wsb)
require.NoError(t, err)
assert.Equal(t, len(set.Signatures), len(set.Descriptions), "Signatures and descriptions do not match up")
assert.Equal(t, "randao signature", set.Descriptions[0])
assert.Equal(t, "attestation signature", set.Descriptions[1])
assert.Equal(t, "block signature", set.Descriptions[0])
assert.Equal(t, "randao signature", set.Descriptions[1])
assert.Equal(t, "attestation signature", set.Descriptions[2])
}
func TestProcessOperationsNoVerifyAttsSigs_OK(t *testing.T) {

View File

@@ -67,9 +67,9 @@ func NewSyncNeeds(current CurrentSlotter, oldestSlotFlagPtr *primitives.Slot, bl
// Override spec minimum block retention with user-provided flag only if it is lower than the spec minimum.
sn.blockRetention = primitives.Epoch(params.BeaconConfig().MinEpochsForBlockRequests)
if oldestSlotFlagPtr != nil {
if *oldestSlotFlagPtr <= syncEpochOffset(current(), sn.blockRetention) {
oldestEpoch := slots.ToEpoch(*oldestSlotFlagPtr)
if oldestEpoch < sn.blockRetention {
sn.validOldestSlotPtr = oldestSlotFlagPtr
} else {
log.WithField("backfill-oldest-slot", *oldestSlotFlagPtr).

View File

@@ -128,9 +128,6 @@ func TestSyncNeedsInitialize(t *testing.T) {
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
minBlobEpochs := params.BeaconConfig().MinEpochsForBlobsSidecarsRequest
minColEpochs := params.BeaconConfig().MinEpochsForDataColumnSidecarsRequest
denebSlot := slots.UnsafeEpochStart(params.BeaconConfig().DenebForkEpoch)
fuluSlot := slots.UnsafeEpochStart(params.BeaconConfig().FuluForkEpoch)
minSlots := slots.UnsafeEpochStart(primitives.Epoch(params.BeaconConfig().MinEpochsForBlockRequests))
currentSlot := primitives.Slot(10000)
currentFunc := func() primitives.Slot { return currentSlot }
@@ -144,7 +141,6 @@ func TestSyncNeedsInitialize(t *testing.T) {
expectedCol primitives.Epoch
name string
input SyncNeeds
current func() primitives.Slot
}{
{
name: "basic initialization with no flags",
@@ -178,13 +174,13 @@ func TestSyncNeedsInitialize(t *testing.T) {
{
name: "valid oldestSlotFlagPtr (earlier than spec minimum)",
blobRetentionFlag: 0,
oldestSlotFlagPtr: &denebSlot,
oldestSlotFlagPtr: func() *primitives.Slot {
slot := primitives.Slot(10)
return &slot
}(),
expectValidOldest: true,
expectedBlob: minBlobEpochs,
expectedCol: minColEpochs,
current: func() primitives.Slot {
return fuluSlot + minSlots
},
},
{
name: "invalid oldestSlotFlagPtr (later than spec minimum)",
@@ -214,9 +210,6 @@ func TestSyncNeedsInitialize(t *testing.T) {
{
name: "both blob retention flag and oldest slot set",
blobRetentionFlag: minBlobEpochs + 5,
current: func() primitives.Slot {
return fuluSlot + minSlots
},
oldestSlotFlagPtr: func() *primitives.Slot {
slot := primitives.Slot(100)
return &slot
@@ -239,27 +232,16 @@ func TestSyncNeedsInitialize(t *testing.T) {
expectedBlob: 5000,
expectedCol: 5000,
},
{
name: "regression for deneb start",
blobRetentionFlag: 8212500,
expectValidOldest: true,
oldestSlotFlagPtr: &denebSlot,
current: func() primitives.Slot {
return fuluSlot + minSlots
},
expectedBlob: 8212500,
expectedCol: 8212500,
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
if tc.current == nil {
tc.current = currentFunc
}
result, err := NewSyncNeeds(tc.current, tc.oldestSlotFlagPtr, tc.blobRetentionFlag)
result, err := NewSyncNeeds(currentFunc, tc.oldestSlotFlagPtr, tc.blobRetentionFlag)
require.NoError(t, err)
// Check that current, deneb, fulu are set correctly
require.Equal(t, currentSlot, result.current())
// Check retention calculations
require.Equal(t, tc.expectedBlob, result.blobRetention)
require.Equal(t, tc.expectedCol, result.colRetention)

View File

@@ -38,7 +38,6 @@ go_library(
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_spf13_afero//:go_default_library",
"@org_golang_x_sync//errgroup:go_default_library",
],
)

View File

@@ -25,7 +25,6 @@ import (
"github.com/OffchainLabs/prysm/v7/time/slots"
"github.com/pkg/errors"
"github.com/spf13/afero"
"golang.org/x/sync/errgroup"
)
const (
@@ -186,162 +185,73 @@ func (dcs *DataColumnStorage) WarmCache() {
highestStoredEpoch := primitives.Epoch(0)
// List all period directories
periodFileInfos, err := afero.ReadDir(dcs.fs, ".")
if err != nil {
log.WithError(err).Error("Error reading top directory during warm cache")
return
}
// Iterate through periods
for _, periodFileInfo := range periodFileInfos {
if !periodFileInfo.IsDir() {
continue
// Walk the data column filesystem to warm up the cache.
if err := afero.Walk(dcs.fs, ".", func(path string, info os.FileInfo, fileErr error) (err error) {
if fileErr != nil {
return fileErr
}
periodPath := periodFileInfo.Name()
// If not a leaf, skip.
if info.IsDir() {
return nil
}
// List all epoch directories in this period
epochFileInfos, err := afero.ReadDir(dcs.fs, periodPath)
// Extract metadata from the file path.
fileMetadata, err := extractFileMetadata(path)
if err != nil {
log.WithError(err).WithField("period", periodPath).Error("Error reading period directory during warm cache")
continue
log.WithError(err).Error("Error encountered while extracting file metadata")
return nil
}
// Iterate through epochs
for _, epochFileInfo := range epochFileInfos {
if !epochFileInfo.IsDir() {
continue
}
epochPath := path.Join(periodPath, epochFileInfo.Name())
// List all .sszs files in this epoch
files, err := listEpochFiles(dcs.fs, epochPath)
if err != nil {
log.WithError(err).WithField("epoch", epochPath).Error("Error listing epoch files during warm cache")
continue
}
if len(files) == 0 {
continue
}
// Process all files in this epoch in parallel
epochHighest, err := dcs.processEpochFiles(files)
if err != nil {
log.WithError(err).WithField("epoch", epochPath).Error("Error processing epoch files during warm cache")
}
highestStoredEpoch = max(highestStoredEpoch, epochHighest)
// Open the data column filesystem file.
f, err := dcs.fs.Open(path)
if err != nil {
log.WithError(err).Error("Error encountered while opening data column filesystem file")
return nil
}
// Close the file.
defer func() {
// Overwrite the existing error only if it is nil, since the close error is less important.
closeErr := f.Close()
if closeErr != nil && err == nil {
err = closeErr
}
}()
// Read the metadata of the file.
metadata, err := dcs.metadata(f)
if err != nil {
log.WithError(err).Error("Error encountered while reading metadata from data column filesystem file")
return nil
}
// Check the indices.
indices := metadata.indices.all()
if len(indices) == 0 {
return nil
}
// Build the ident.
dataColumnsIdent := DataColumnsIdent{Root: fileMetadata.blockRoot, Epoch: fileMetadata.epoch, Indices: indices}
// Update the highest stored epoch.
highestStoredEpoch = max(highestStoredEpoch, fileMetadata.epoch)
// Set the ident in the cache.
if err := dcs.cache.set(dataColumnsIdent); err != nil {
log.WithError(err).Error("Error encountered while ensuring data column filesystem cache")
}
return nil
}); err != nil {
log.WithError(err).Error("Error encountered while walking data column filesystem.")
}
// Prune the cache and the filesystem
// Prune the cache and the filesystem.
dcs.prune()
totalElapsed := time.Since(start)
// Log summary
log.WithField("elapsed", totalElapsed).Info("Data column filesystem cache warm-up complete")
}
// listEpochFiles lists all .sszs files in an epoch directory.
func listEpochFiles(fs afero.Fs, epochPath string) ([]string, error) {
fileInfos, err := afero.ReadDir(fs, epochPath)
if err != nil {
return nil, errors.Wrap(err, "read epoch directory")
}
files := make([]string, 0, len(fileInfos))
for _, fileInfo := range fileInfos {
if fileInfo.IsDir() {
continue
}
fileName := fileInfo.Name()
if strings.HasSuffix(fileName, "."+dataColumnsFileExtension) {
files = append(files, path.Join(epochPath, fileName))
}
}
return files, nil
}
// processEpochFiles processes all .sszs files in an epoch directory in parallel.
func (dcs *DataColumnStorage) processEpochFiles(files []string) (primitives.Epoch, error) {
var (
eg errgroup.Group
mu sync.Mutex
)
highestEpoch := primitives.Epoch(0)
for _, filePath := range files {
eg.Go(func() error {
epoch, err := dcs.processFile(filePath)
if err != nil {
log.WithError(err).WithField("file", filePath).Error("Error processing file during warm cache")
return nil
}
mu.Lock()
defer mu.Unlock()
highestEpoch = max(highestEpoch, epoch)
return nil
})
}
if err := eg.Wait(); err != nil {
return highestEpoch, err
}
return highestEpoch, nil
}
// processFile processes a single .sszs file.
func (dcs *DataColumnStorage) processFile(filePath string) (primitives.Epoch, error) {
// Extract metadata from the file path
fileMetadata, err := extractFileMetadata(filePath)
if err != nil {
return 0, errors.Wrap(err, "extract file metadata")
}
// Open the file (each goroutine gets its own FD)
f, err := dcs.fs.Open(filePath)
if err != nil {
return 0, errors.Wrap(err, "open file")
}
defer func() {
if closeErr := f.Close(); closeErr != nil {
log.WithError(closeErr).WithField("file", filePath).Error("Error closing file during warm cache")
}
}()
// Read metadata
metadata, err := dcs.metadata(f)
if err != nil {
return 0, errors.Wrap(err, "read metadata")
}
// Extract indices
indices := metadata.indices.all()
if len(indices) == 0 {
return fileMetadata.epoch, nil // No indices, skip
}
// Build ident and set in cache (thread-safe)
dataColumnsIdent := DataColumnsIdent{
Root: fileMetadata.blockRoot,
Epoch: fileMetadata.epoch,
Indices: indices,
}
if err := dcs.cache.set(dataColumnsIdent); err != nil {
return 0, errors.Wrap(err, "cache set")
}
return fileMetadata.epoch, nil
log.WithField("elapsed", time.Since(start)).Info("Data column filesystem cache warm-up complete")
}
// Summary returns the DataColumnStorageSummary.

View File

@@ -72,6 +72,7 @@ go_library(
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@io_k8s_client_go//tools/cache:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",

View File

@@ -7,6 +7,7 @@ import (
"strings"
"time"
"github.com/OffchainLabs/go-bitfield"
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v7/beacon-chain/execution/types"
@@ -57,6 +58,7 @@ var (
fuluEngineEndpoints = []string{
GetPayloadMethodV5,
GetBlobsV2,
GetBlobsV3,
}
)
@@ -98,6 +100,8 @@ const (
GetBlobsV1 = "engine_getBlobsV1"
// GetBlobsV2 request string for JSON-RPC.
GetBlobsV2 = "engine_getBlobsV2"
// GetBlobsV3 request string for JSON-RPC.
GetBlobsV3 = "engine_getBlobsV3"
// Defines the seconds before timing out engine endpoints with non-block execution semantics.
defaultEngineTimeout = time.Second
)
@@ -121,7 +125,7 @@ type Reconstructor interface {
ctx context.Context, blindedBlocks []interfaces.ReadOnlySignedBeaconBlock,
) ([]interfaces.SignedBeaconBlock, error)
ReconstructBlobSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [fieldparams.RootLength]byte, hi func(uint64) bool) ([]blocks.VerifiedROBlob, error)
ConstructDataColumnSidecars(ctx context.Context, populator peerdas.ConstructionPopulator) ([]blocks.VerifiedRODataColumn, error)
ConstructDataColumnSidecars(ctx context.Context, populator peerdas.ConstructionPopulator) ([]blocks.VerifiedRODataColumn, []blocks.PartialDataColumn, error)
}
// EngineCaller defines a client that can interact with an Ethereum
@@ -548,6 +552,22 @@ func (s *Service) GetBlobsV2(ctx context.Context, versionedHashes []common.Hash)
return result, handleRPCError(err)
}
func (s *Service) GetBlobsV3(ctx context.Context, versionedHashes []common.Hash) ([]*pb.BlobAndProofV2, error) {
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.GetBlobsV3")
defer span.End()
start := time.Now()
if !s.capabilityCache.has(GetBlobsV3) {
return nil, errors.New(fmt.Sprintf("%s is not supported", GetBlobsV3))
}
getBlobsV3RequestsTotal.Inc()
result := make([]*pb.BlobAndProofV2, len(versionedHashes))
err := s.rpcClient.CallContext(ctx, &result, GetBlobsV3, versionedHashes)
getBlobsV3Latency.Observe(time.Since(start).Seconds())
return result, handleRPCError(err)
}
// ReconstructFullBlock takes in a blinded beacon block and reconstructs
// a beacon block with a full execution payload via the engine API.
func (s *Service) ReconstructFullBlock(
@@ -658,40 +678,51 @@ func (s *Service) ReconstructBlobSidecars(ctx context.Context, block interfaces.
return verifiedBlobs, nil
}
func (s *Service) ConstructDataColumnSidecars(ctx context.Context, populator peerdas.ConstructionPopulator) ([]blocks.VerifiedRODataColumn, error) {
func (s *Service) ConstructDataColumnSidecars(ctx context.Context, populator peerdas.ConstructionPopulator) ([]blocks.VerifiedRODataColumn, []blocks.PartialDataColumn, error) {
root := populator.Root()
// Fetch cells and proofs from the execution client using the KZG commitments from the sidecar.
commitments, err := populator.Commitments()
if err != nil {
return nil, wrapWithBlockRoot(err, root, "commitments")
return nil, nil, wrapWithBlockRoot(err, root, "commitments")
}
cellsPerBlob, proofsPerBlob, err := s.fetchCellsAndProofsFromExecution(ctx, commitments)
included, cellsPerBlob, proofsPerBlob, err := s.fetchCellsAndProofsFromExecution(ctx, commitments)
log.Info("Received cells and proofs from execution client", "included", included, "cells count", len(cellsPerBlob), "err", err)
if err != nil {
return nil, wrapWithBlockRoot(err, root, "fetch cells and proofs from execution client")
return nil, nil, wrapWithBlockRoot(err, root, "fetch cells and proofs from execution client")
}
// Return early if nothing is returned from the EL.
if len(cellsPerBlob) == 0 {
return nil, nil
partialColumns, err := peerdas.PartialColumns(included, cellsPerBlob, proofsPerBlob, populator)
haveAllBlobs := included.Count() == uint64(len(commitments))
log.Info("Constructed partial columns", "haveAllBlobs", haveAllBlobs)
// TODO: Check for err==nil before we go here.
if haveAllBlobs {
// Construct data column sidears from the signed block and cells and proofs.
roSidecars, err := peerdas.DataColumnSidecars(cellsPerBlob, proofsPerBlob, populator)
if err != nil {
return nil, nil, wrapWithBlockRoot(err, populator.Root(), "data column sidcars from column sidecar")
}
// Upgrade the sidecars to verified sidecars.
// We trust the execution layer we are connected to, so we can upgrade the sidecar into a verified one.
verifiedROSidecars := upgradeSidecarsToVerifiedSidecars(roSidecars)
return verifiedROSidecars, partialColumns, nil
}
// Construct data column sidears from the signed block and cells and proofs.
roSidecars, err := peerdas.DataColumnSidecars(cellsPerBlob, proofsPerBlob, populator)
if err != nil {
return nil, wrapWithBlockRoot(err, populator.Root(), "data column sidcars from column sidecar")
return nil, nil, wrapWithBlockRoot(err, populator.Root(), "partial columns from column sidecar")
}
// Upgrade the sidecars to verified sidecars.
// We trust the execution layer we are connected to, so we can upgrade the sidecar into a verified one.
verifiedROSidecars := upgradeSidecarsToVerifiedSidecars(roSidecars)
return verifiedROSidecars, nil
return nil, partialColumns, nil
}
// fetchCellsAndProofsFromExecution fetches cells and proofs from the execution client (using engine_getBlobsV2 execution API method)
func (s *Service) fetchCellsAndProofsFromExecution(ctx context.Context, kzgCommitments [][]byte) ([][]kzg.Cell, [][]kzg.Proof, error) {
// The returned cells and proofs are compacted and will not contain entries for missing blobs.
// The returned bitlist is the bitlist of which blobs are present i.e. which are the blobs we have the blob data for. This
// will help index into the cells and proofs arrays.
func (s *Service) fetchCellsAndProofsFromExecution(ctx context.Context, kzgCommitments [][]byte) (bitfield.Bitlist /* included parts */, [][]kzg.Cell, [][]kzg.Proof, error) {
// Collect KZG hashes for all blobs.
versionedHashes := make([]common.Hash, 0, len(kzgCommitments))
for _, commitment := range kzgCommitments {
@@ -699,24 +730,44 @@ func (s *Service) fetchCellsAndProofsFromExecution(ctx context.Context, kzgCommi
versionedHashes = append(versionedHashes, versionedHash)
}
var blobAndProofs []*pb.BlobAndProofV2
// Fetch all blobsAndCellsProofs from the execution client.
blobAndProofV2s, err := s.GetBlobsV2(ctx, versionedHashes)
if err != nil {
return nil, nil, errors.Wrapf(err, "get blobs V2")
var err error
useV3 := s.capabilityCache.has(GetBlobsV3)
if useV3 {
// v3 can return a partial response. V2 is all or nothing
blobAndProofs, err = s.GetBlobsV3(ctx, versionedHashes)
} else {
blobAndProofs, err = s.GetBlobsV2(ctx, versionedHashes)
}
// Return early if nothing is returned from the EL.
if len(blobAndProofV2s) == 0 {
return nil, nil, nil
if err != nil {
return nil, nil, nil, errors.Wrapf(err, "get blobs V2/3")
}
// Track complete vs partial responses for V3
if useV3 && len(blobAndProofs) > 0 {
nonNilCount := 0
for _, bp := range blobAndProofs {
if bp != nil {
nonNilCount++
}
}
if nonNilCount == len(versionedHashes) {
getBlobsV3CompleteResponsesTotal.Inc()
} else if nonNilCount > 0 {
getBlobsV3PartialResponsesTotal.Inc()
}
}
// Compute cells and proofs from the blobs and cell proofs.
cellsPerBlob, proofsPerBlob, err := peerdas.ComputeCellsAndProofsFromStructured(blobAndProofV2s)
included, cellsPerBlob, proofsPerBlob, err := peerdas.ComputeCellsAndProofsFromStructured(uint64(len(kzgCommitments)), blobAndProofs)
if err != nil {
return nil, nil, errors.Wrap(err, "compute cells and proofs")
return nil, nil, nil, errors.Wrap(err, "compute cells and proofs")
}
return cellsPerBlob, proofsPerBlob, nil
return included, cellsPerBlob, proofsPerBlob, nil
}
// upgradeSidecarsToVerifiedSidecars upgrades a list of data column sidecars into verified data column sidecars.

View File

@@ -2587,7 +2587,7 @@ func TestConstructDataColumnSidecars(t *testing.T) {
ctx := context.Background()
t.Run("GetBlobsV2 is not supported", func(t *testing.T) {
_, err := client.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromBlock(roBlock))
_, _, err := client.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromBlock(roBlock))
require.ErrorContains(t, "engine_getBlobsV2 is not supported", err)
})
@@ -2598,7 +2598,7 @@ func TestConstructDataColumnSidecars(t *testing.T) {
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
defer rpcClient.Close()
dataColumns, err := client.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromBlock(roBlock))
dataColumns, _, err := client.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromBlock(roBlock))
require.NoError(t, err)
require.Equal(t, 0, len(dataColumns))
})
@@ -2611,7 +2611,7 @@ func TestConstructDataColumnSidecars(t *testing.T) {
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
defer rpcClient.Close()
dataColumns, err := client.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromBlock(roBlock))
dataColumns, _, err := client.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromBlock(roBlock))
require.NoError(t, err)
require.Equal(t, 128, len(dataColumns))
})

View File

@@ -34,6 +34,25 @@ var (
Buckets: []float64{25, 50, 100, 200, 500, 1000, 2000, 4000},
},
)
getBlobsV3RequestsTotal = promauto.NewCounter(prometheus.CounterOpts{
Name: "beacon_engine_getBlobsV3_requests_total",
Help: "Total number of engine_getBlobsV3 requests sent",
})
getBlobsV3CompleteResponsesTotal = promauto.NewCounter(prometheus.CounterOpts{
Name: "beacon_engine_getBlobsV3_complete_responses_total",
Help: "Total number of complete engine_getBlobsV3 successful responses received",
})
getBlobsV3PartialResponsesTotal = promauto.NewCounter(prometheus.CounterOpts{
Name: "beacon_engine_getBlobsV3_partial_responses_total",
Help: "Total number of engine_getBlobsV3 partial responses received",
})
getBlobsV3Latency = promauto.NewHistogram(
prometheus.HistogramOpts{
Name: "beacon_engine_getBlobsV3_request_duration_seconds",
Help: "Duration of engine_getBlobsV3 requests in seconds",
Buckets: []float64{0.025, 0.05, 0.1, 0.2, 0.5, 1, 2, 4},
},
)
errParseCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "execution_parse_error_count",
Help: "The number of errors that occurred while parsing execution payload",

View File

@@ -118,8 +118,8 @@ func (e *EngineClient) ReconstructBlobSidecars(context.Context, interfaces.ReadO
}
// ConstructDataColumnSidecars is a mock implementation of the ConstructDataColumnSidecars method.
func (e *EngineClient) ConstructDataColumnSidecars(context.Context, peerdas.ConstructionPopulator) ([]blocks.VerifiedRODataColumn, error) {
return e.DataColumnSidecars, e.ErrorDataColumnSidecars
func (e *EngineClient) ConstructDataColumnSidecars(context.Context, peerdas.ConstructionPopulator) ([]blocks.VerifiedRODataColumn, []blocks.PartialDataColumn, error) {
return e.DataColumnSidecars, nil, e.ErrorDataColumnSidecars
}
// GetTerminalBlockHash --

View File

@@ -37,7 +37,6 @@ go_library(
"//beacon-chain/node/registration:go_default_library",
"//beacon-chain/operations/attestations:go_default_library",
"//beacon-chain/operations/blstoexec:go_default_library",
"//beacon-chain/operations/execproofs:go_default_library",
"//beacon-chain/operations/slashings:go_default_library",
"//beacon-chain/operations/synccommittee:go_default_library",
"//beacon-chain/operations/voluntaryexits:go_default_library",

View File

@@ -40,7 +40,6 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/node/registration"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/attestations"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/blstoexec"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/execproofs"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/slashings"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/synccommittee"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/voluntaryexits"
@@ -103,7 +102,6 @@ type BeaconNode struct {
slashingsPool slashings.PoolManager
syncCommitteePool synccommittee.Pool
blsToExecPool blstoexec.PoolManager
execProofsPool execproofs.PoolManager
depositCache cache.DepositCache
trackedValidatorsCache *cache.TrackedValidatorsCache
payloadIDCache *cache.PayloadIDCache
@@ -158,7 +156,6 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
slashingsPool: slashings.NewPool(),
syncCommitteePool: synccommittee.NewPool(),
blsToExecPool: blstoexec.NewPool(),
execProofsPool: execproofs.NewPool(),
trackedValidatorsCache: cache.NewTrackedValidatorsCache(),
payloadIDCache: cache.NewPayloadIDCache(),
slasherBlockHeadersFeed: new(event.Feed),
@@ -671,6 +668,7 @@ func (b *BeaconNode) registerP2P(cliCtx *cli.Context) error {
DB: b.db,
StateGen: b.stateGen,
ClockWaiter: b.ClockWaiter,
PartialDataColumns: b.cliCtx.Bool(flags.PartialDataColumns.Name),
})
if err != nil {
return err
@@ -740,7 +738,6 @@ func (b *BeaconNode) registerBlockchainService(fc forkchoice.ForkChoicer, gs *st
blockchain.WithExitPool(b.exitPool),
blockchain.WithSlashingPool(b.slashingsPool),
blockchain.WithBLSToExecPool(b.blsToExecPool),
blockchain.WithExecProofsPool(b.execProofsPool),
blockchain.WithP2PBroadcaster(b.fetchP2P()),
blockchain.WithStateNotifier(b),
blockchain.WithAttestationService(attService),
@@ -756,7 +753,6 @@ func (b *BeaconNode) registerBlockchainService(fc forkchoice.ForkChoicer, gs *st
blockchain.WithSyncChecker(b.syncChecker),
blockchain.WithSlasherEnabled(b.slasherEnabled),
blockchain.WithLightClientStore(b.lcStore),
blockchain.WithOperationNotifier(b),
)
blockchainService, err := blockchain.NewService(b.ctx, opts...)
@@ -832,7 +828,6 @@ func (b *BeaconNode) registerSyncService(initialSyncComplete chan struct{}, bFil
regularsync.WithSlashingPool(b.slashingsPool),
regularsync.WithSyncCommsPool(b.syncCommitteePool),
regularsync.WithBlsToExecPool(b.blsToExecPool),
regularsync.WithExecProofPool(b.execProofsPool),
regularsync.WithStateGen(b.stateGen),
regularsync.WithSlasherAttestationsFeed(b.slasherAttestationsFeed),
regularsync.WithSlasherBlockHeadersFeed(b.slasherBlockHeadersFeed),

View File

@@ -1,16 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["pool.go"],
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/operations/execproofs",
visibility = ["//visibility:public"],
deps = [
"//config/fieldparams:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
],
)

View File

@@ -1,174 +0,0 @@
package execproofs
import (
"fmt"
"sync"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
// ProofKey uniquely identifies an execution proof by block root and proof type.
type ProofKey struct {
Root [fieldparams.RootLength]byte
ProofId primitives.ExecutionProofId
}
// String returns a string representation for logging.
func (k ProofKey) String() string {
return fmt.Sprintf("root=%#x,proofId=%d", k.Root, k.ProofId)
}
var (
execProofInPoolTotal = promauto.NewGauge(prometheus.GaugeOpts{
Name: "exec_proof_pool_total",
Help: "The number of execution proofs in the operation pool.",
})
)
var _ PoolManager = (*ExecProofPool)(nil)
// PoolManager maintains execution proofs received via gossip.
// These proofs are used for data availability checks when importing blocks.
// Lightweight verifier nodes need a minimum number of proofs from different zkVM types
// to verify block execution correctness.
type PoolManager interface {
// Insert inserts a proof into the pool.
// If a proof with the same block root and proof ID already exists, it is not added again.
Insert(executionProof *ethpb.ExecutionProof)
// Get returns a copy of all proofs for a specific block root
Get(blockRoot [fieldparams.RootLength]byte) []*ethpb.ExecutionProof
// Ids returns the list of (unique) proof types available for a specific block root
Ids(blockRoot [fieldparams.RootLength]byte) []primitives.ExecutionProofId
// Count counts the number of proofs for a specific block root
Count(blockRoot [fieldparams.RootLength]byte) uint64
// Exists checks if a proof exists for the given block root and proof ID
Exists(blockRoot [fieldparams.RootLength]byte, proofId primitives.ExecutionProofId) bool
// PruneUpTo removes proofs older than the target slot
PruneUpTo(targetSlot primitives.Slot) int
}
// ExecProofPool is a concrete implementation of type ExecProofPoolManager.
type ExecProofPool struct {
lock sync.RWMutex
m map[[fieldparams.RootLength]byte]map[primitives.ExecutionProofId]*ethpb.ExecutionProof
}
// NewPool returns an initialized pool.
func NewPool() *ExecProofPool {
return &ExecProofPool{
m: make(map[[fieldparams.RootLength]byte]map[primitives.ExecutionProofId]*ethpb.ExecutionProof),
}
}
// Insert inserts a proof into the pool.
// If a proof with the same block root and proof ID already exists, it is not added again.
func (p *ExecProofPool) Insert(proof *ethpb.ExecutionProof) {
p.lock.Lock()
defer p.lock.Unlock()
blockRoot := bytesutil.ToBytes32(proof.BlockRoot)
// Create the inner map if it doesn't exist
if p.m[blockRoot] == nil {
p.m[blockRoot] = make(map[primitives.ExecutionProofId]*ethpb.ExecutionProof)
}
// Check if proof already exists
if _, exists := p.m[blockRoot][proof.ProofId]; exists {
return
}
// Insert new proof
p.m[blockRoot][proof.ProofId] = proof
execProofInPoolTotal.Inc()
}
// Get returns a copy of all proofs for a specific block root
func (p *ExecProofPool) Get(blockRoot [fieldparams.RootLength]byte) []*ethpb.ExecutionProof {
p.lock.RLock()
defer p.lock.RUnlock()
proofsByType, exists := p.m[blockRoot]
if !exists {
return nil
}
result := make([]*ethpb.ExecutionProof, 0, len(proofsByType))
for _, proof := range proofsByType {
result = append(result, proof.Copy())
}
return result
}
func (p *ExecProofPool) Ids(blockRoot [fieldparams.RootLength]byte) []primitives.ExecutionProofId {
p.lock.RLock()
defer p.lock.RUnlock()
proofById, exists := p.m[blockRoot]
if !exists {
return nil
}
ids := make([]primitives.ExecutionProofId, 0, len(proofById))
for id := range proofById {
ids = append(ids, id)
}
return ids
}
// Count counts the number of proofs for a specific block root
func (p *ExecProofPool) Count(blockRoot [fieldparams.RootLength]byte) uint64 {
p.lock.RLock()
defer p.lock.RUnlock()
return uint64(len(p.m[blockRoot]))
}
// Exists checks if a proof exists for the given block root and proof ID
func (p *ExecProofPool) Exists(blockRoot [fieldparams.RootLength]byte, proofId primitives.ExecutionProofId) bool {
p.lock.RLock()
defer p.lock.RUnlock()
proofsByType, exists := p.m[blockRoot]
if !exists {
return false
}
_, exists = proofsByType[proofId]
return exists
}
// PruneUpTo removes proofs older than the given slot
func (p *ExecProofPool) PruneUpTo(targetSlot primitives.Slot) int {
p.lock.Lock()
defer p.lock.Unlock()
pruned := 0
for blockRoot, proofsByType := range p.m {
for proofId, proof := range proofsByType {
if proof.Slot < targetSlot {
delete(proofsByType, proofId)
execProofInPoolTotal.Dec()
pruned++
}
}
// Clean up empty inner maps
if len(proofsByType) == 0 {
delete(p.m, blockRoot)
}
}
return pruned
}

View File

@@ -51,6 +51,7 @@ go_library(
"//beacon-chain/db:go_default_library",
"//beacon-chain/db/kv:go_default_library",
"//beacon-chain/p2p/encoder:go_default_library",
"//beacon-chain/p2p/partialdatacolumnbroadcaster:go_default_library",
"//beacon-chain/p2p/peers:go_default_library",
"//beacon-chain/p2p/peers/peerdata:go_default_library",
"//beacon-chain/p2p/peers/scorers:go_default_library",
@@ -165,7 +166,6 @@ go_test(
"//beacon-chain/startup:go_default_library",
"//beacon-chain/state/stategen/mock:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",

View File

@@ -342,7 +342,7 @@ func (s *Service) BroadcastLightClientFinalityUpdate(ctx context.Context, update
// there is at least one peer in each needed subnet. If not, it will attempt to find one before broadcasting.
// This function is non-blocking. It stops trying to broadcast a given sidecar when more than one slot has passed, or the context is
// cancelled (whichever comes first).
func (s *Service) BroadcastDataColumnSidecars(ctx context.Context, sidecars []blocks.VerifiedRODataColumn) error {
func (s *Service) BroadcastDataColumnSidecars(ctx context.Context, sidecars []blocks.VerifiedRODataColumn, partialColumns []blocks.PartialDataColumn) error {
// Increase the number of broadcast attempts.
dataColumnSidecarBroadcastAttempts.Add(float64(len(sidecars)))
@@ -352,7 +352,7 @@ func (s *Service) BroadcastDataColumnSidecars(ctx context.Context, sidecars []bl
return errors.Wrap(err, "current fork digest")
}
go s.broadcastDataColumnSidecars(ctx, forkDigest, sidecars)
go s.broadcastDataColumnSidecars(ctx, forkDigest, sidecars, partialColumns)
return nil
}
@@ -360,7 +360,7 @@ func (s *Service) BroadcastDataColumnSidecars(ctx context.Context, sidecars []bl
// broadcastDataColumnSidecars broadcasts multiple data column sidecars to the p2p network, after ensuring
// there is at least one peer in each needed subnet. If not, it will attempt to find one before broadcasting.
// It returns when all broadcasts are complete, or the context is cancelled (whichever comes first).
func (s *Service) broadcastDataColumnSidecars(ctx context.Context, forkDigest [fieldparams.VersionLength]byte, sidecars []blocks.VerifiedRODataColumn) {
func (s *Service) broadcastDataColumnSidecars(ctx context.Context, forkDigest [fieldparams.VersionLength]byte, sidecars []blocks.VerifiedRODataColumn, partialColumns []blocks.PartialDataColumn) {
type rootAndIndex struct {
root [fieldparams.RootLength]byte
index uint64
@@ -374,7 +374,7 @@ func (s *Service) broadcastDataColumnSidecars(ctx context.Context, forkDigest [f
logLevel := logrus.GetLevel()
slotPerRoot := make(map[[fieldparams.RootLength]byte]primitives.Slot, 1)
for _, sidecar := range sidecars {
for i, sidecar := range sidecars {
slotPerRoot[sidecar.BlockRoot()] = sidecar.Slot()
wg.Go(func() {
@@ -398,6 +398,14 @@ func (s *Service) broadcastDataColumnSidecars(ctx context.Context, forkDigest [f
return
}
if s.partialColumnBroadcaster != nil && i < len(partialColumns) {
fullTopicStr := topic + s.Encoding().ProtocolSuffix()
if err := s.partialColumnBroadcaster.Publish(fullTopicStr, partialColumns[i]); err != nil {
tracing.AnnotateError(span, err)
log.WithError(err).Error("Cannot partial broadcast data column sidecar")
}
}
// Broadcast the data column sidecar to the network.
if err := s.broadcastObject(ctx, sidecar, topic); err != nil {
tracing.AnnotateError(span, err)

View File

@@ -773,7 +773,7 @@ func TestService_BroadcastDataColumn(t *testing.T) {
time.Sleep(50 * time.Millisecond)
// Broadcast to peers and wait.
err = service.BroadcastDataColumnSidecars(ctx, []blocks.VerifiedRODataColumn{verifiedRoSidecar})
err = service.BroadcastDataColumnSidecars(ctx, []blocks.VerifiedRODataColumn{verifiedRoSidecar}, nil)
require.NoError(t, err)
// Receive the message.

View File

@@ -26,6 +26,7 @@ const (
// Config for the p2p service. These parameters are set from application level flags
// to initialize the p2p service.
type Config struct {
PartialDataColumns bool
NoDiscovery bool
EnableUPnP bool
StaticPeerID bool

View File

@@ -589,11 +589,6 @@ func (s *Service) createLocalNode(
localNode.Set(quicEntry)
}
if features.Get().EnableZkvm {
zkvmKeyEntry := enr.WithEntry(zkvmEnabledKeyEnrKey, true)
localNode.Set(zkvmKeyEntry)
}
localNode.SetFallbackIP(ipAddr)
localNode.SetFallbackUDP(udpPort)

View File

@@ -25,7 +25,6 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers/scorers"
testp2p "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/startup"
"github.com/OffchainLabs/prysm/v7/config/features"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/wrapper"
leakybucket "github.com/OffchainLabs/prysm/v7/container/leaky-bucket"
@@ -244,19 +243,12 @@ func TestCreateLocalNode(t *testing.T) {
name string
cfg *Config
expectedError bool
zkvmEnabled bool
}{
{
name: "valid config",
cfg: &Config{},
expectedError: false,
},
{
name: "valid config with zkVM enabled",
cfg: &Config{},
expectedError: false,
zkvmEnabled: true,
},
{
name: "invalid host address",
cfg: &Config{HostAddress: "invalid"},
@@ -281,15 +273,6 @@ func TestCreateLocalNode(t *testing.T) {
for _, tt := range testCases {
t.Run(tt.name, func(t *testing.T) {
if tt.zkvmEnabled {
resetCfg := features.InitWithReset(&features.Flags{
EnableZkvm: true,
})
t.Cleanup(func() {
resetCfg()
})
}
// Define ports. Use unique ports since this test validates ENR content.
const (
udpPort = 3100
@@ -365,14 +348,6 @@ func TestCreateLocalNode(t *testing.T) {
custodyGroupCount := new(uint64)
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(params.BeaconNetworkConfig().CustodyGroupCountKey, custodyGroupCount)))
require.Equal(t, custodyRequirement, *custodyGroupCount)
// Check zkVM enabled key if applicable.
if tt.zkvmEnabled {
zkvmEnabled := new(bool)
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(params.BeaconNetworkConfig().ZkvmEnabledKey, zkvmEnabled)))
require.Equal(t, features.Get().EnableZkvm, *zkvmEnabled)
}
})
}
}

View File

@@ -52,9 +52,6 @@ const (
// lightClientFinalityUpdateWeight specifies the scoring weight that we apply to
// our light client finality update topic.
lightClientFinalityUpdateWeight = 0.05
// executionProofWeight specifies the scoring weight that we apply to
// our execution proof topic.
executionProofWeight = 0.05
// maxInMeshScore describes the max score a peer can attain from being in the mesh.
maxInMeshScore = 10
@@ -148,8 +145,6 @@ func (s *Service) topicScoreParams(topic string) (*pubsub.TopicScoreParams, erro
return defaultLightClientOptimisticUpdateTopicParams(), nil
case strings.Contains(topic, GossipLightClientFinalityUpdateMessage):
return defaultLightClientFinalityUpdateTopicParams(), nil
case strings.Contains(topic, GossipExecutionProofMessage):
return defaultExecutionProofTopicParams(), nil
default:
return nil, errors.Errorf("unrecognized topic provided for parameter registration: %s", topic)
}
@@ -515,28 +510,6 @@ func defaultBlsToExecutionChangeTopicParams() *pubsub.TopicScoreParams {
}
}
func defaultExecutionProofTopicParams() *pubsub.TopicScoreParams {
return &pubsub.TopicScoreParams{
TopicWeight: executionProofWeight,
TimeInMeshWeight: maxInMeshScore / inMeshCap(),
TimeInMeshQuantum: inMeshTime(),
TimeInMeshCap: inMeshCap(),
FirstMessageDeliveriesWeight: 2,
FirstMessageDeliveriesDecay: scoreDecay(oneHundredEpochs),
FirstMessageDeliveriesCap: 5,
MeshMessageDeliveriesWeight: 0,
MeshMessageDeliveriesDecay: 0,
MeshMessageDeliveriesCap: 0,
MeshMessageDeliveriesThreshold: 0,
MeshMessageDeliveriesWindow: 0,
MeshMessageDeliveriesActivation: 0,
MeshFailurePenaltyWeight: 0,
MeshFailurePenaltyDecay: 0,
InvalidMessageDeliveriesWeight: -2000,
InvalidMessageDeliveriesDecay: scoreDecay(invalidDecayPeriod),
}
}
func defaultLightClientOptimisticUpdateTopicParams() *pubsub.TopicScoreParams {
return &pubsub.TopicScoreParams{
TopicWeight: lightClientOptimisticUpdateWeight,

View File

@@ -25,7 +25,6 @@ var gossipTopicMappings = map[string]func() proto.Message{
LightClientOptimisticUpdateTopicFormat: func() proto.Message { return &ethpb.LightClientOptimisticUpdateAltair{} },
LightClientFinalityUpdateTopicFormat: func() proto.Message { return &ethpb.LightClientFinalityUpdateAltair{} },
DataColumnSubnetTopicFormat: func() proto.Message { return &ethpb.DataColumnSidecar{} },
ExecutionProofSubnetTopicFormat: func() proto.Message { return &ethpb.ExecutionProof{} },
}
// GossipTopicMappings is a function to return the assigned data type

View File

@@ -4,6 +4,7 @@ import (
"context"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/encoder"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/partialdatacolumnbroadcaster"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
@@ -28,6 +29,7 @@ type (
Broadcaster
SetStreamHandler
PubSubProvider
PartialColumnBroadcasterProvider
PubSubTopicUser
SenderEncoder
PeerManager
@@ -52,7 +54,7 @@ type (
BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.BlobSidecar) error
BroadcastLightClientOptimisticUpdate(ctx context.Context, update interfaces.LightClientOptimisticUpdate) error
BroadcastLightClientFinalityUpdate(ctx context.Context, update interfaces.LightClientFinalityUpdate) error
BroadcastDataColumnSidecars(ctx context.Context, sidecars []blocks.VerifiedRODataColumn) error
BroadcastDataColumnSidecars(ctx context.Context, sidecars []blocks.VerifiedRODataColumn, partialColumns []blocks.PartialDataColumn) error
}
// SetStreamHandler configures p2p to handle streams of a certain topic ID.
@@ -92,6 +94,11 @@ type (
PubSub() *pubsub.PubSub
}
// PubSubProvider provides the p2p pubsub protocol.
PartialColumnBroadcasterProvider interface {
PartialColumnBroadcaster() *partialdatacolumnbroadcaster.PartialColumnBroadcaster
}
// PeerManager abstracts some peer management methods from libp2p.
PeerManager interface {
Disconnect(peer.ID) error

View File

@@ -157,6 +157,11 @@ var (
Help: "The number of publish messages received via rpc for a particular topic",
},
[]string{"topic"})
pubsubRPCPubRecvSize = promauto.NewCounterVec(prometheus.CounterOpts{
Name: "p2p_pubsub_rpc_recv_pub_size_total",
Help: "The total size of publish messages received via rpc for a particular topic",
},
[]string{"topic", "is_partial"})
pubsubRPCDrop = promauto.NewCounterVec(prometheus.CounterOpts{
Name: "p2p_pubsub_rpc_drop_total",
Help: "The number of messages dropped via rpc for a particular control message",
@@ -171,6 +176,11 @@ var (
Help: "The number of publish messages dropped via rpc for a particular topic",
},
[]string{"topic"})
pubsubRPCPubDropSize = promauto.NewCounterVec(prometheus.CounterOpts{
Name: "p2p_pubsub_rpc_drop_pub_size_total",
Help: "The total size of publish messages dropped via rpc for a particular topic",
},
[]string{"topic", "is_partial"})
pubsubRPCSent = promauto.NewCounterVec(prometheus.CounterOpts{
Name: "p2p_pubsub_rpc_sent_total",
Help: "The number of messages sent via rpc for a particular control message",
@@ -185,6 +195,11 @@ var (
Help: "The number of publish messages sent via rpc for a particular topic",
},
[]string{"topic"})
pubsubRPCPubSentSize = promauto.NewCounterVec(prometheus.CounterOpts{
Name: "gossipsub_pubsub_rpc_sent_pub_size_total",
Help: "The total size of publish messages sent via rpc for a particular topic",
},
[]string{"topic", "is_partial"})
)
func (s *Service) updateMetrics() {

View File

@@ -0,0 +1,27 @@
load("@prysm//tools/go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"metrics.go",
"partial.go",
],
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/partialdatacolumnbroadcaster",
visibility = ["//visibility:public"],
deps = [
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//internal/logrusadapter:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
"@com_github_libp2p_go_libp2p_pubsub//:go_default_library",
"@com_github_libp2p_go_libp2p_pubsub//partialmessages:go_default_library",
"@com_github_libp2p_go_libp2p_pubsub//partialmessages/bitmap:go_default_library",
"@com_github_libp2p_go_libp2p_pubsub//pb:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
)

View File

@@ -0,0 +1,25 @@
load("@prysm//tools/go:def.bzl", "go_test")
go_test(
name = "go_default_test",
size = "medium",
srcs = ["two_node_test.go"],
deps = [
"//beacon-chain/core/peerdas:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/p2p/encoder:go_default_library",
"//beacon-chain/p2p/partialdatacolumnbroadcaster:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
"@com_github_libp2p_go_libp2p//x/simlibp2p:go_default_library",
"@com_github_libp2p_go_libp2p_pubsub//:go_default_library",
"@com_github_marcopolo_simnet//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
)

View File

@@ -0,0 +1,238 @@
package integrationtest
import (
"context"
"crypto/rand"
"fmt"
"testing"
"testing/synctest"
"time"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/encoder"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/partialdatacolumnbroadcaster"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/assert"
"github.com/OffchainLabs/prysm/v7/testing/require"
"github.com/OffchainLabs/prysm/v7/testing/util"
pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/libp2p/go-libp2p/core/peer"
simlibp2p "github.com/libp2p/go-libp2p/x/simlibp2p"
"github.com/marcopolo/simnet"
"github.com/sirupsen/logrus"
)
// TestTwoNodePartialColumnExchange tests that two nodes can exchange partial columns
// and reconstruct the complete column. Node 1 has cells 0-2, Node 2 has cells 3-5.
// After exchange, both should have all cells.
func TestTwoNodePartialColumnExchange(t *testing.T) {
synctest.Test(t, func(t *testing.T) {
// Create a simulated libp2p network
latency := time.Millisecond * 10
network, meta, err := simlibp2p.SimpleLibp2pNetwork([]simlibp2p.NodeLinkSettingsAndCount{
{LinkSettings: simnet.NodeBiDiLinkSettings{
Downlink: simnet.LinkSettings{BitsPerSecond: 20 * simlibp2p.OneMbps, Latency: latency / 2},
Uplink: simnet.LinkSettings{BitsPerSecond: 20 * simlibp2p.OneMbps, Latency: latency / 2},
}, Count: 2},
}, simlibp2p.NetworkSettings{UseBlankHost: true})
require.NoError(t, err)
require.NoError(t, network.Start())
defer func() {
require.NoError(t, network.Close())
}()
defer func() {
for _, node := range meta.Nodes {
err := node.Close()
if err != nil {
panic(err)
}
}
}()
h1 := meta.Nodes[0]
h2 := meta.Nodes[1]
logger := logrus.New()
logger.SetLevel(logrus.DebugLevel)
broadcaster1 := partialdatacolumnbroadcaster.NewBroadcaster(logger)
broadcaster2 := partialdatacolumnbroadcaster.NewBroadcaster(logger)
opts1 := broadcaster1.AppendPubSubOpts([]pubsub.Option{
pubsub.WithMessageSigning(false),
pubsub.WithStrictSignatureVerification(false),
})
opts2 := broadcaster2.AppendPubSubOpts([]pubsub.Option{
pubsub.WithMessageSigning(false),
pubsub.WithStrictSignatureVerification(false),
})
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ps1, err := pubsub.NewGossipSub(ctx, h1, opts1...)
require.NoError(t, err)
ps2, err := pubsub.NewGossipSub(ctx, h2, opts2...)
require.NoError(t, err)
go broadcaster1.Start()
go broadcaster2.Start()
defer func() {
broadcaster1.Stop()
broadcaster2.Stop()
}()
// Generate Test Data
var blockRoot [fieldparams.RootLength]byte
copy(blockRoot[:], []byte("test-block-root"))
numCells := 6
commitments := make([][]byte, numCells)
cells := make([][]byte, numCells)
proofs := make([][]byte, numCells)
for i := range numCells {
commitments[i] = make([]byte, 48)
cells[i] = make([]byte, 2048)
rand.Read(cells[i])
proofs[i] = make([]byte, 48)
fmt.Appendf(proofs[i][:0], "proof %d", i)
}
roDC, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{
{
BodyRoot: blockRoot[:],
KzgCommitments: commitments,
Column: cells,
KzgProofs: proofs,
},
})
pc1, err := blocks.NewPartialDataColumn(roDC[0].DataColumnSidecar.SignedBlockHeader, roDC[0].Index, roDC[0].KzgCommitments, roDC[0].KzgCommitmentsInclusionProof)
require.NoError(t, err)
pc2, err := blocks.NewPartialDataColumn(roDC[0].DataColumnSidecar.SignedBlockHeader, roDC[0].Index, roDC[0].KzgCommitments, roDC[0].KzgCommitmentsInclusionProof)
require.NoError(t, err)
// Split data
for i := range numCells {
if i%2 == 0 {
pc1.ExtendFromVerfifiedCell(uint64(i), roDC[0].Column[i], roDC[0].KzgProofs[i])
} else {
pc2.ExtendFromVerfifiedCell(uint64(i), roDC[0].Column[i], roDC[0].KzgProofs[i])
}
}
// Setup Topic and Subscriptions
digest := params.ForkDigest(0)
columnIndex := uint64(12)
subnet := peerdas.ComputeSubnetForDataColumnSidecar(columnIndex)
topicStr := fmt.Sprintf(p2p.DataColumnSubnetTopicFormat, digest, subnet) +
encoder.SszNetworkEncoder{}.ProtocolSuffix()
time.Sleep(100 * time.Millisecond)
topic1, err := ps1.Join(topicStr, pubsub.RequestPartialMessages())
require.NoError(t, err)
topic2, err := ps2.Join(topicStr, pubsub.RequestPartialMessages())
require.NoError(t, err)
// Header validator that verifies the inclusion proof
headerValidator := func(header *ethpb.PartialDataColumnHeader) (reject bool, err error) {
if header == nil {
return false, fmt.Errorf("nil header")
}
if header.SignedBlockHeader == nil || header.SignedBlockHeader.Header == nil {
return true, fmt.Errorf("nil signed block header")
}
if len(header.KzgCommitments) == 0 {
return true, fmt.Errorf("empty kzg commitments")
}
// Verify inclusion proof
if err := peerdas.VerifyPartialDataColumnHeaderInclusionProof(header); err != nil {
return true, fmt.Errorf("invalid inclusion proof: %w", err)
}
t.Log("Header validation passed")
return false, nil
}
cellValidator := func(_ []blocks.CellProofBundle) error {
return nil
}
node1Complete := make(chan blocks.VerifiedRODataColumn, 1)
node2Complete := make(chan blocks.VerifiedRODataColumn, 1)
handler1 := func(topic string, col blocks.VerifiedRODataColumn) {
t.Logf("Node 1: Completed! Column has %d cells", len(col.Column))
node1Complete <- col
}
handler2 := func(topic string, col blocks.VerifiedRODataColumn) {
t.Logf("Node 2: Completed! Column has %d cells", len(col.Column))
node2Complete <- col
}
// Connect hosts
err = h1.Connect(context.Background(), peer.AddrInfo{
ID: h2.ID(),
Addrs: h2.Addrs(),
})
require.NoError(t, err)
time.Sleep(300 * time.Millisecond)
// Subscribe to regular GossipSub (critical for partial message RPC exchange!)
sub1, err := topic1.Subscribe()
require.NoError(t, err)
defer sub1.Cancel()
sub2, err := topic2.Subscribe()
require.NoError(t, err)
defer sub2.Cancel()
err = broadcaster1.Subscribe(topic1, headerValidator, cellValidator, handler1)
require.NoError(t, err)
err = broadcaster2.Subscribe(topic2, headerValidator, cellValidator, handler2)
require.NoError(t, err)
// Wait for mesh to form
time.Sleep(2 * time.Second)
// Publish
t.Log("Publishing from Node 1")
err = broadcaster1.Publish(topicStr, pc1)
require.NoError(t, err)
time.Sleep(200 * time.Millisecond)
t.Log("Publishing from Node 2")
err = broadcaster2.Publish(topicStr, pc2)
require.NoError(t, err)
// Wait for Completion
timeout := time.After(10 * time.Second)
var col1, col2 blocks.VerifiedRODataColumn
receivedCount := 0
for receivedCount < 2 {
select {
case col1 = <-node1Complete:
t.Log("Node 1 completed reconstruction")
receivedCount++
case col2 = <-node2Complete:
t.Log("Node 2 completed reconstruction")
receivedCount++
case <-timeout:
t.Fatalf("Timeout: Only %d/2 nodes completed", receivedCount)
}
}
// Verify both columns have all cells
assert.Equal(t, numCells, len(col1.Column), "Node 1 should have all cells")
assert.Equal(t, numCells, len(col2.Column), "Node 2 should have all cells")
assert.DeepSSZEqual(t, cells, col1.Column, "Node 1 cell mismatch")
assert.DeepSSZEqual(t, cells, col2.Column, "Node 2 cell mismatch")
})
}

View File

@@ -0,0 +1,18 @@
package partialdatacolumnbroadcaster
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
var (
partialMessageUsefulCellsTotal = promauto.NewCounterVec(prometheus.CounterOpts{
Name: "beacon_partial_message_useful_cells_total",
Help: "Number of useful cells received via a partial message",
}, []string{"column_index"})
partialMessageCellsReceivedTotal = promauto.NewCounterVec(prometheus.CounterOpts{
Name: "beacon_partial_message_cells_received_total",
Help: "Number of total cells received via a partial message",
}, []string{"column_index"})
)

View File

@@ -0,0 +1,544 @@
package partialdatacolumnbroadcaster
import (
"bytes"
"log/slog"
"regexp"
"strconv"
"time"
"github.com/OffchainLabs/go-bitfield"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/internal/logrusadapter"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/libp2p/go-libp2p-pubsub/partialmessages"
"github.com/libp2p/go-libp2p-pubsub/partialmessages/bitmap"
pubsub_pb "github.com/libp2p/go-libp2p-pubsub/pb"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// TODOs:
// different eager push strategies:
// - no eager push
// - full column eager push
// - With debouncing - some factor of RTT
// - eager push missing cells
const TTLInSlots = 3
const maxConcurrentValidators = 128
var dataColumnTopicRegex = regexp.MustCompile(`data_column_sidecar_(\d+)`)
func extractColumnIndexFromTopic(topic string) (uint64, error) {
matches := dataColumnTopicRegex.FindStringSubmatch(topic)
if len(matches) < 2 {
return 0, errors.New("could not extract column index from topic")
}
return strconv.ParseUint(matches[1], 10, 64)
}
// HeaderValidator validates a PartialDataColumnHeader.
// Returns (reject, err) where:
// - reject=true, err!=nil: REJECT - peer should be penalized
// - reject=false, err!=nil: IGNORE - don't penalize, just ignore
// - reject=false, err=nil: valid header
type HeaderValidator func(header *ethpb.PartialDataColumnHeader) (reject bool, err error)
type ColumnValidator func(cells []blocks.CellProofBundle) error
type PartialColumnBroadcaster struct {
logger *logrus.Logger
ps *pubsub.PubSub
stop chan struct{}
// map topic -> headerValidators
headerValidators map[string]HeaderValidator
// map topic -> Validator
validators map[string]ColumnValidator
// map topic -> handler
handlers map[string]SubHandler
// map topic -> *pubsub.Topic
topics map[string]*pubsub.Topic
concurrentValidatorSemaphore chan struct{}
// map topic -> map[groupID]PartialColumn
partialMsgStore map[string]map[string]*blocks.PartialDataColumn
groupTTL map[string]int8
// validHeaderCache caches validated headers by group ID (works across topics)
validHeaderCache map[string]*ethpb.PartialDataColumnHeader
incomingReq chan request
}
type requestKind uint8
const (
requestKindPublish requestKind = iota
requestKindSubscribe
requestKindUnsubscribe
requestKindHandleIncomingRPC
requestKindCellsValidated
)
type request struct {
kind requestKind
response chan error
sub subscribe
unsub unsubscribe
publish publish
incomingRPC rpcWithFrom
cellsValidated *cellsValidated
}
type publish struct {
topic string
c blocks.PartialDataColumn
}
type subscribe struct {
t *pubsub.Topic
headerValidator HeaderValidator
validator ColumnValidator
handler SubHandler
}
type unsubscribe struct {
topic string
}
type rpcWithFrom struct {
*pubsub_pb.PartialMessagesExtension
from peer.ID
}
type cellsValidated struct {
validationTook time.Duration
topic string
group []byte
cellIndices []uint64
cells []blocks.CellProofBundle
}
func NewBroadcaster(logger *logrus.Logger) *PartialColumnBroadcaster {
return &PartialColumnBroadcaster{
validators: make(map[string]ColumnValidator),
headerValidators: make(map[string]HeaderValidator),
handlers: make(map[string]SubHandler),
topics: make(map[string]*pubsub.Topic),
partialMsgStore: make(map[string]map[string]*blocks.PartialDataColumn),
groupTTL: make(map[string]int8),
validHeaderCache: make(map[string]*ethpb.PartialDataColumnHeader),
// GossipSub sends the messages to this channel. The buffer should be
// big enough to avoid dropping messages. We don't want to block the gossipsub event loop for this.
incomingReq: make(chan request, 128*16),
logger: logger,
concurrentValidatorSemaphore: make(chan struct{}, maxConcurrentValidators),
}
}
// AppendPubSubOpts adds the necessary pubsub options to enable partial messages.
func (p *PartialColumnBroadcaster) AppendPubSubOpts(opts []pubsub.Option) []pubsub.Option {
slogger := slog.New(logrusadapter.Handler{Logger: p.logger})
opts = append(opts,
pubsub.WithPartialMessagesExtension(&partialmessages.PartialMessagesExtension{
Logger: slogger,
MergePartsMetadata: func(topic string, left, right partialmessages.PartsMetadata) partialmessages.PartsMetadata {
if len(left) == 0 {
return right
}
merged, err := bitfield.Bitlist(left).Or(bitfield.Bitlist(right))
if err != nil {
p.logger.Warn("Failed to merge bitfields", "err", err, "left", left, "right", right)
return left
}
return partialmessages.PartsMetadata(merged)
},
ValidateRPC: func(from peer.ID, rpc *pubsub_pb.PartialMessagesExtension) error {
// TODO. Add some basic and fast sanity checks
return nil
},
OnIncomingRPC: func(from peer.ID, rpc *pubsub_pb.PartialMessagesExtension) error {
select {
case p.incomingReq <- request{
kind: requestKindHandleIncomingRPC,
incomingRPC: rpcWithFrom{rpc, from},
}:
default:
p.logger.Warn("Dropping incoming partial RPC", "rpc", rpc)
}
return nil
},
}),
func(ps *pubsub.PubSub) error {
p.ps = ps
return nil
},
)
return opts
}
// Start starts the event loop of the PartialColumnBroadcaster. Should be called
// within a goroutine (go p.Start())
func (p *PartialColumnBroadcaster) Start() {
if p.stop != nil {
return
}
p.stop = make(chan struct{})
p.loop()
}
func (p *PartialColumnBroadcaster) loop() {
cleanup := time.NewTicker(time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot))
defer cleanup.Stop()
for {
select {
case <-p.stop:
return
case <-cleanup.C:
for groupID, ttl := range p.groupTTL {
if ttl > 0 {
p.groupTTL[groupID] = ttl - 1
continue
}
delete(p.groupTTL, groupID)
delete(p.validHeaderCache, groupID)
for topic, msgStore := range p.partialMsgStore {
delete(msgStore, groupID)
if len(msgStore) == 0 {
delete(p.partialMsgStore, topic)
}
}
}
case req := <-p.incomingReq:
switch req.kind {
case requestKindPublish:
req.response <- p.publish(req.publish.topic, req.publish.c)
case requestKindSubscribe:
req.response <- p.subscribe(req.sub.t, req.sub.headerValidator, req.sub.validator, req.sub.handler)
case requestKindUnsubscribe:
req.response <- p.unsubscribe(req.unsub.topic)
case requestKindHandleIncomingRPC:
err := p.handleIncomingRPC(req.incomingRPC)
if err != nil {
p.logger.Error("Failed to handle incoming partial RPC", "err", err)
}
case requestKindCellsValidated:
err := p.handleCellsValidated(req.cellsValidated)
if err != nil {
p.logger.Error("Failed to handle cells validated", "err", err)
}
default:
p.logger.Error("Unknown request kind", "kind", req.kind)
}
}
}
}
func (p *PartialColumnBroadcaster) getDataColumn(topic string, group []byte) *blocks.PartialDataColumn {
topicStore, ok := p.partialMsgStore[topic]
if !ok {
return nil
}
msg, ok := topicStore[string(group)]
if !ok {
return nil
}
return msg
}
func (p *PartialColumnBroadcaster) handleIncomingRPC(rpcWithFrom rpcWithFrom) error {
if p.ps == nil {
return errors.New("pubsub not initialized")
}
hasMessage := len(rpcWithFrom.PartialMessage) > 0
var message ethpb.PartialDataColumnSidecar
if hasMessage {
err := message.UnmarshalSSZ(rpcWithFrom.PartialMessage)
if err != nil {
return errors.Wrap(err, "failed to unmarshal partial message data")
}
}
topicID := rpcWithFrom.GetTopicID()
groupID := rpcWithFrom.GroupID
ourDataColumn := p.getDataColumn(topicID, groupID)
var shouldRepublish bool
if ourDataColumn == nil && hasMessage {
var header *ethpb.PartialDataColumnHeader
// Check cache first for this group
if cachedHeader, ok := p.validHeaderCache[string(groupID)]; ok {
header = cachedHeader
} else {
// We haven't seen this group before. Check if we have a valid header.
if len(message.Header) == 0 {
p.logger.Debug("No partial column found and no header in message, ignoring")
return nil
}
header = message.Header[0]
headerValidator, ok := p.headerValidators[topicID]
if !ok || headerValidator == nil {
p.logger.Debug("No header validator registered for topic")
return nil
}
reject, err := headerValidator(header)
if err != nil {
p.logger.Debug("Header validation failed", "err", err, "reject", reject)
if reject {
// REJECT case: penalize the peer
_ = p.ps.PeerFeedback(topicID, rpcWithFrom.from, pubsub.PeerFeedbackInvalidMessage)
}
// Both REJECT and IGNORE: don't process further
return nil
}
// Cache the valid header
p.validHeaderCache[string(groupID)] = header
// TODO: We now have the information we need to call GetBlobsV3, we should do that to see what we have locally.
}
columnIndex, err := extractColumnIndexFromTopic(topicID)
if err != nil {
return err
}
newColumn, err := blocks.NewPartialDataColumn(
header.SignedBlockHeader,
columnIndex,
header.KzgCommitments,
header.KzgCommitmentsInclusionProof,
)
if err != nil {
p.logger.WithError(err).WithFields(logrus.Fields{
"topic": topicID,
"columnIndex": columnIndex,
"numCommitments": len(header.KzgCommitments),
}).Error("Failed to create partial data column from header")
return err
}
// Save to store
topicStore, ok := p.partialMsgStore[topicID]
if !ok {
topicStore = make(map[string]*blocks.PartialDataColumn)
p.partialMsgStore[topicID] = topicStore
}
topicStore[string(newColumn.GroupID())] = &newColumn
p.groupTTL[string(newColumn.GroupID())] = TTLInSlots
ourDataColumn = &newColumn
shouldRepublish = true
}
if ourDataColumn == nil {
// We don't have a partial column for this. Can happen if we got cells
// without a header.
return nil
}
logger := p.logger.WithFields(logrus.Fields{
"from": rpcWithFrom.from,
"topic": topicID,
"group": groupID,
})
validator, validatorOK := p.validators[topicID]
if len(rpcWithFrom.PartialMessage) > 0 && validatorOK {
// TODO: is there any penalty we want to consider for giving us data we didn't request?
// Note that we need to be careful around race conditions and eager data.
// Also note that protobufs by design allow extra data that we don't parse.
// Marco's thoughts. No, we don't need to do anything else here.
cellIndices, cellsToVerify, err := ourDataColumn.CellsToVerifyFromPartialMessage(&message)
if err != nil {
return err
}
// Track cells received via partial message
if len(cellIndices) > 0 {
columnIndexStr := strconv.FormatUint(ourDataColumn.Index, 10)
partialMessageCellsReceivedTotal.WithLabelValues(columnIndexStr).Add(float64(len(cellIndices)))
}
if len(cellsToVerify) > 0 {
p.concurrentValidatorSemaphore <- struct{}{}
go func() {
defer func() {
<-p.concurrentValidatorSemaphore
}()
start := time.Now()
err := validator(cellsToVerify)
if err != nil {
logger.Error("failed to validate cells", "err", err)
_ = p.ps.PeerFeedback(topicID, rpcWithFrom.from, pubsub.PeerFeedbackInvalidMessage)
return
}
_ = p.ps.PeerFeedback(topicID, rpcWithFrom.from, pubsub.PeerFeedbackUsefulMessage)
p.incomingReq <- request{
kind: requestKindCellsValidated,
cellsValidated: &cellsValidated{
validationTook: time.Since(start),
topic: topicID,
group: groupID,
cells: cellsToVerify,
cellIndices: cellIndices,
},
}
}()
}
}
peerHas := bitmap.Bitmap(rpcWithFrom.PartsMetadata)
iHave := bitmap.Bitmap(ourDataColumn.PartsMetadata())
if !shouldRepublish && len(peerHas) > 0 && !bytes.Equal(peerHas, iHave) {
// Either we have something they don't or vice versa
shouldRepublish = true
logger.Debug("republishing due to parts metadata difference")
}
if shouldRepublish {
err := p.ps.PublishPartialMessage(topicID, ourDataColumn, partialmessages.PublishOptions{})
if err != nil {
return err
}
}
return nil
}
func (p *PartialColumnBroadcaster) handleCellsValidated(cells *cellsValidated) error {
ourDataColumn := p.getDataColumn(cells.topic, cells.group)
if ourDataColumn == nil {
return errors.New("data column not found for verified cells")
}
extended := ourDataColumn.ExtendFromVerfifiedCells(cells.cellIndices, cells.cells)
p.logger.Debug("Extended partial message", "duration", cells.validationTook, "extended", extended)
columnIndexStr := strconv.FormatUint(ourDataColumn.Index, 10)
if extended {
// Track useful cells (cells that extended our data)
partialMessageUsefulCellsTotal.WithLabelValues(columnIndexStr).Add(float64(len(cells.cells)))
// TODO: we could use the heuristic here that if this data was
// useful to us, it's likely useful to our peers and we should
// republish eagerly
if col, ok := ourDataColumn.Complete(p.logger); ok {
p.logger.Info("Completed partial column", "topic", cells.topic, "group", cells.group)
handler, handlerOK := p.handlers[cells.topic]
if handlerOK {
go handler(cells.topic, col)
}
} else {
p.logger.Info("Extended partial column", "topic", cells.topic, "group", cells.group)
}
err := p.ps.PublishPartialMessage(cells.topic, ourDataColumn, partialmessages.PublishOptions{})
if err != nil {
return err
}
}
return nil
}
func (p *PartialColumnBroadcaster) Stop() {
if p.stop != nil {
close(p.stop)
p.stop = nil
}
}
// Publish publishes the partial column.
func (p *PartialColumnBroadcaster) Publish(topic string, c blocks.PartialDataColumn) error {
if p.ps == nil {
return errors.New("pubsub not initialized")
}
respCh := make(chan error)
p.incomingReq <- request{
kind: requestKindPublish,
response: respCh,
publish: publish{
topic: topic,
c: c,
},
}
return <-respCh
}
func (p *PartialColumnBroadcaster) publish(topic string, c blocks.PartialDataColumn) error {
topicStore, ok := p.partialMsgStore[topic]
if !ok {
topicStore = make(map[string]*blocks.PartialDataColumn)
p.partialMsgStore[topic] = topicStore
}
topicStore[string(c.GroupID())] = &c
p.groupTTL[string(c.GroupID())] = TTLInSlots
return p.ps.PublishPartialMessage(topic, &c, partialmessages.PublishOptions{})
}
type SubHandler func(topic string, col blocks.VerifiedRODataColumn)
func (p *PartialColumnBroadcaster) Subscribe(t *pubsub.Topic, headerValidator HeaderValidator, validator ColumnValidator, handler SubHandler) error {
respCh := make(chan error)
p.incomingReq <- request{
kind: requestKindSubscribe,
sub: subscribe{
t: t,
headerValidator: headerValidator,
validator: validator,
handler: handler,
},
response: respCh,
}
return <-respCh
}
func (p *PartialColumnBroadcaster) subscribe(t *pubsub.Topic, headerValidator HeaderValidator, validator ColumnValidator, handler SubHandler) error {
topic := t.String()
if _, ok := p.topics[topic]; ok {
return errors.New("already subscribed")
}
p.topics[topic] = t
p.headerValidators[topic] = headerValidator
p.validators[topic] = validator
p.handlers[topic] = handler
return nil
}
func (p *PartialColumnBroadcaster) Unsubscribe(topic string) error {
respCh := make(chan error)
p.incomingReq <- request{
kind: requestKindUnsubscribe,
unsub: unsubscribe{
topic: topic,
},
response: respCh,
}
return <-respCh
}
func (p *PartialColumnBroadcaster) unsubscribe(topic string) error {
t, ok := p.topics[topic]
if !ok {
return errors.New("topic not found")
}
delete(p.topics, topic)
delete(p.partialMsgStore, topic)
delete(p.headerValidators, topic)
delete(p.validators, topic)
delete(p.handlers, topic)
return t.Close()
}

View File

@@ -602,33 +602,6 @@ func (p *Status) All() []peer.ID {
return pids
}
// ZkvmEnabledPeers returns all connected peers that have zkvm enabled in their ENR.
func (p *Status) ZkvmEnabledPeers() []peer.ID {
p.store.RLock()
defer p.store.RUnlock()
peers := make([]peer.ID, 0)
for pid, peerData := range p.store.Peers() {
if peerData.ConnState != Connected {
continue
}
if peerData.Enr == nil {
continue
}
var enabled bool
entry := enr.WithEntry(params.BeaconNetworkConfig().ZkvmEnabledKey, &enabled)
if err := peerData.Enr.Load(entry); err != nil {
continue
}
if enabled {
peers = append(peers, pid)
}
}
return peers
}
// Prune clears out and removes outdated and disconnected peers.
func (p *Status) Prune() {
p.store.Lock()

View File

@@ -58,7 +58,7 @@ func TestPeerExplicitAdd(t *testing.T) {
resAddress, err := p.Address(id)
require.NoError(t, err)
assert.Equal(t, address, resAddress, "Unexpected address")
assert.Equal(t, address.Equal(resAddress), true, "Unexpected address")
resDirection, err := p.Direction(id)
require.NoError(t, err)
@@ -72,7 +72,7 @@ func TestPeerExplicitAdd(t *testing.T) {
resAddress2, err := p.Address(id)
require.NoError(t, err)
assert.Equal(t, address2, resAddress2, "Unexpected address")
assert.Equal(t, address2.Equal(resAddress2), true, "Unexpected address")
resDirection2, err := p.Direction(id)
require.NoError(t, err)
@@ -1341,75 +1341,3 @@ func createPeer(t *testing.T, p *peers.Status, addr ma.Multiaddr,
p.SetConnectionState(id, state)
return id
}
func TestZkvmEnabledPeers(t *testing.T) {
p := peers.NewStatus(t.Context(), &peers.StatusConfig{
PeerLimit: 30,
ScorerParams: &scorers.Config{
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
Threshold: 1,
},
},
})
// Create peer 1: Connected, zkVM enabled
pid1 := addPeer(t, p, peers.Connected)
record1 := new(enr.Record)
zkvmEnabled := true
record1.Set(enr.WithEntry(params.BeaconNetworkConfig().ZkvmEnabledKey, &zkvmEnabled))
p.Add(record1, pid1, nil, network.DirOutbound)
p.SetConnectionState(pid1, peers.Connected)
// Create peer 2: Connected, zkVM disabled
pid2 := addPeer(t, p, peers.Connected)
record2 := new(enr.Record)
zkvmDisabled := false
record2.Set(enr.WithEntry(params.BeaconNetworkConfig().ZkvmEnabledKey, &zkvmDisabled))
p.Add(record2, pid2, nil, network.DirOutbound)
p.SetConnectionState(pid2, peers.Connected)
// Create peer 3: Connected, zkVM enabled
pid3 := addPeer(t, p, peers.Connected)
record3 := new(enr.Record)
record3.Set(enr.WithEntry(params.BeaconNetworkConfig().ZkvmEnabledKey, &zkvmEnabled))
p.Add(record3, pid3, nil, network.DirOutbound)
p.SetConnectionState(pid3, peers.Connected)
// Create peer 4: Disconnected, zkVM enabled (should not be included)
pid4 := addPeer(t, p, peers.Disconnected)
record4 := new(enr.Record)
record4.Set(enr.WithEntry(params.BeaconNetworkConfig().ZkvmEnabledKey, &zkvmEnabled))
p.Add(record4, pid4, nil, network.DirOutbound)
p.SetConnectionState(pid4, peers.Disconnected)
// Create peer 5: Connected, no ENR (should not be included)
pid5 := addPeer(t, p, peers.Connected)
p.Add(nil, pid5, nil, network.DirOutbound)
p.SetConnectionState(pid5, peers.Connected)
// Create peer 6: Connected, no zkVM key in ENR (should not be included)
pid6 := addPeer(t, p, peers.Connected)
record6 := new(enr.Record)
record6.Set(enr.WithEntry("other_key", "other_value"))
p.Add(record6, pid6, nil, network.DirOutbound)
p.SetConnectionState(pid6, peers.Connected)
// Get zkVM enabled peers
zkvmPeers := p.ZkvmEnabledPeers()
// Should return only pid1 and pid3 (connected peers with zkVM enabled)
assert.Equal(t, 2, len(zkvmPeers), "Expected 2 zkVM enabled peers")
// Verify the returned peers are correct
zkvmPeerMap := make(map[peer.ID]bool)
for _, pid := range zkvmPeers {
zkvmPeerMap[pid] = true
}
assert.Equal(t, true, zkvmPeerMap[pid1], "pid1 should be in zkVM enabled peers")
assert.Equal(t, true, zkvmPeerMap[pid3], "pid3 should be in zkVM enabled peers")
assert.Equal(t, false, zkvmPeerMap[pid2], "pid2 should not be in zkVM enabled peers (disabled)")
assert.Equal(t, false, zkvmPeerMap[pid4], "pid4 should not be in zkVM enabled peers (disconnected)")
assert.Equal(t, false, zkvmPeerMap[pid5], "pid5 should not be in zkVM enabled peers (no ENR)")
assert.Equal(t, false, zkvmPeerMap[pid6], "pid6 should not be in zkVM enabled peers (no zkVM key)")
}

View File

@@ -160,6 +160,9 @@ func (s *Service) pubsubOptions() []pubsub.Option {
}
psOpts = append(psOpts, pubsub.WithDirectPeers(directPeersAddrInfos))
}
if s.partialColumnBroadcaster != nil {
psOpts = s.partialColumnBroadcaster.AppendPubSubOpts(psOpts)
}
return psOpts
}

View File

@@ -88,20 +88,20 @@ func (g gossipTracer) ThrottlePeer(p peer.ID) {
// RecvRPC .
func (g gossipTracer) RecvRPC(rpc *pubsub.RPC) {
g.setMetricFromRPC(recv, pubsubRPCSubRecv, pubsubRPCPubRecv, pubsubRPCRecv, rpc)
g.setMetricFromRPC(recv, pubsubRPCSubRecv, pubsubRPCPubRecv, pubsubRPCPubRecvSize, pubsubRPCRecv, rpc)
}
// SendRPC .
func (g gossipTracer) SendRPC(rpc *pubsub.RPC, p peer.ID) {
g.setMetricFromRPC(send, pubsubRPCSubSent, pubsubRPCPubSent, pubsubRPCSent, rpc)
g.setMetricFromRPC(send, pubsubRPCSubSent, pubsubRPCPubSent, pubsubRPCPubSentSize, pubsubRPCSent, rpc)
}
// DropRPC .
func (g gossipTracer) DropRPC(rpc *pubsub.RPC, p peer.ID) {
g.setMetricFromRPC(drop, pubsubRPCSubDrop, pubsubRPCPubDrop, pubsubRPCDrop, rpc)
g.setMetricFromRPC(drop, pubsubRPCSubDrop, pubsubRPCPubDrop, pubsubRPCPubDropSize, pubsubRPCDrop, rpc)
}
func (g gossipTracer) setMetricFromRPC(act action, subCtr prometheus.Counter, pubCtr, ctrlCtr *prometheus.CounterVec, rpc *pubsub.RPC) {
func (g gossipTracer) setMetricFromRPC(act action, subCtr prometheus.Counter, pubCtr, pubSizeCtr, ctrlCtr *prometheus.CounterVec, rpc *pubsub.RPC) {
subCtr.Add(float64(len(rpc.Subscriptions)))
if rpc.Control != nil {
ctrlCtr.WithLabelValues("graft").Add(float64(len(rpc.Control.Graft)))
@@ -110,12 +110,17 @@ func (g gossipTracer) setMetricFromRPC(act action, subCtr prometheus.Counter, pu
ctrlCtr.WithLabelValues("iwant").Add(float64(len(rpc.Control.Iwant)))
ctrlCtr.WithLabelValues("idontwant").Add(float64(len(rpc.Control.Idontwant)))
}
// For incoming messages from pubsub, we do not record metrics for them as these values
// could be junk.
if act == recv {
return
}
for _, msg := range rpc.Publish {
// For incoming messages from pubsub, we do not record metrics for them as these values
// could be junk.
if act == recv {
continue
}
pubCtr.WithLabelValues(*msg.Topic).Inc()
pubCtr.WithLabelValues(msg.GetTopic()).Inc()
pubSizeCtr.WithLabelValues(msg.GetTopic(), "false").Add(float64(msg.Size()))
}
if rpc.Partial != nil {
pubCtr.WithLabelValues(rpc.Partial.GetTopicID()).Inc()
pubSizeCtr.WithLabelValues(rpc.Partial.GetTopicID(), "true").Add(float64(rpc.Partial.Size()))
}
}

View File

@@ -67,9 +67,6 @@ const (
// DataColumnSidecarsByRangeName is the name for the DataColumnSidecarsByRange v1 message topic.
DataColumnSidecarsByRangeName = "/data_column_sidecars_by_range"
// ExecutionProofsByRootName is the name for the ExecutionProofsByRoot v1 message topic.
ExecutionProofsByRootName = "/execution_proofs_by_root"
)
const (
@@ -109,9 +106,6 @@ const (
// RPCDataColumnSidecarsByRangeTopicV1 is a topic for requesting data column sidecars by their slot.
// /eth2/beacon_chain/req/data_column_sidecars_by_range/1 - New in Fulu.
RPCDataColumnSidecarsByRangeTopicV1 = protocolPrefix + DataColumnSidecarsByRangeName + SchemaVersionV1
// RPCExecutionProofsByRootTopicV1 is a topic for requesting execution proofs by their block root.
// /eth2/beacon_chain/req/execution_proofs_by_root/1 - New in Fulu.
RPCExecutionProofsByRootTopicV1 = protocolPrefix + ExecutionProofsByRootName + SchemaVersionV1
// V2 RPC Topics
// RPCStatusTopicV2 defines the v1 topic for the status rpc method.
@@ -176,9 +170,6 @@ var (
// DataColumnSidecarsByRoot v1 Message
RPCDataColumnSidecarsByRootTopicV1: p2ptypes.DataColumnsByRootIdentifiers{},
// ExecutionProofsByRoot v1 Message
RPCExecutionProofsByRootTopicV1: new(pb.ExecutionProofsByRootRequest),
}
// Maps all registered protocol prefixes.
@@ -202,7 +193,6 @@ var (
LightClientOptimisticUpdateName: true,
DataColumnSidecarsByRootName: true,
DataColumnSidecarsByRangeName: true,
ExecutionProofsByRootName: true,
}
// Maps all the RPC messages which are to updated in altair.

View File

@@ -11,6 +11,7 @@ import (
"github.com/OffchainLabs/prysm/v7/async"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/encoder"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/partialdatacolumnbroadcaster"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers/scorers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/types"
@@ -77,6 +78,7 @@ type Service struct {
privKey *ecdsa.PrivateKey
metaData metadata.Metadata
pubsub *pubsub.PubSub
partialColumnBroadcaster *partialdatacolumnbroadcaster.PartialColumnBroadcaster
joinedTopics map[string]*pubsub.Topic
joinedTopicsLock sync.RWMutex
subnetsLock map[uint64]*sync.RWMutex
@@ -147,6 +149,10 @@ func NewService(ctx context.Context, cfg *Config) (*Service, error) {
custodyInfoSet: make(chan struct{}),
}
if cfg.PartialDataColumns {
s.partialColumnBroadcaster = partialdatacolumnbroadcaster.NewBroadcaster(log.Logger)
}
ipAddr := prysmnetwork.IPAddr()
opts, err := s.buildOptions(ipAddr, s.privKey)
@@ -305,6 +311,10 @@ func (s *Service) Start() {
logExternalDNSAddr(s.host.ID(), p2pHostDNS, p2pTCPPort)
}
go s.forkWatcher()
if s.partialColumnBroadcaster != nil {
go s.partialColumnBroadcaster.Start()
}
}
// Stop the p2p service and terminate all peer connections.
@@ -314,6 +324,10 @@ func (s *Service) Stop() error {
if s.dv5Listener != nil {
s.dv5Listener.Close()
}
if s.partialColumnBroadcaster != nil {
s.partialColumnBroadcaster.Stop()
}
return nil
}
@@ -350,6 +364,10 @@ func (s *Service) PubSub() *pubsub.PubSub {
return s.pubsub
}
func (s *Service) PartialColumnBroadcaster() *partialdatacolumnbroadcaster.PartialColumnBroadcaster {
return s.partialColumnBroadcaster
}
// Host returns the currently running libp2p
// host of the service.
func (s *Service) Host() host.Host {

View File

@@ -36,7 +36,6 @@ var (
attSubnetEnrKey = params.BeaconNetworkConfig().AttSubnetKey
syncCommsSubnetEnrKey = params.BeaconNetworkConfig().SyncCommsSubnetKey
custodyGroupCountEnrKey = params.BeaconNetworkConfig().CustodyGroupCountKey
zkvmEnabledKeyEnrKey = params.BeaconNetworkConfig().ZkvmEnabledKey
)
// The value used with the subnet, in order

View File

@@ -21,6 +21,7 @@ go_library(
deps = [
"//beacon-chain/core/peerdas:go_default_library",
"//beacon-chain/p2p/encoder:go_default_library",
"//beacon-chain/p2p/partialdatacolumnbroadcaster:go_default_library",
"//beacon-chain/p2p/peers:go_default_library",
"//beacon-chain/p2p/peers/scorers:go_default_library",
"//config/fieldparams:go_default_library",

View File

@@ -4,6 +4,7 @@ import (
"context"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/encoder"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/partialdatacolumnbroadcaster"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
@@ -108,6 +109,10 @@ func (*FakeP2P) PubSub() *pubsub.PubSub {
return nil
}
func (*FakeP2P) PartialColumnBroadcaster() *partialdatacolumnbroadcaster.PartialColumnBroadcaster {
return nil
}
// MetadataSeq -- fake.
func (*FakeP2P) MetadataSeq() uint64 {
return 0
@@ -169,7 +174,7 @@ func (*FakeP2P) BroadcastLightClientFinalityUpdate(_ context.Context, _ interfac
}
// BroadcastDataColumnSidecar -- fake.
func (*FakeP2P) BroadcastDataColumnSidecars(_ context.Context, _ []blocks.VerifiedRODataColumn) error {
func (*FakeP2P) BroadcastDataColumnSidecars(_ context.Context, _ []blocks.VerifiedRODataColumn, _ []blocks.PartialDataColumn) error {
return nil
}

View File

@@ -63,7 +63,7 @@ func (m *MockBroadcaster) BroadcastLightClientFinalityUpdate(_ context.Context,
}
// BroadcastDataColumnSidecar broadcasts a data column for mock.
func (m *MockBroadcaster) BroadcastDataColumnSidecars(context.Context, []blocks.VerifiedRODataColumn) error {
func (m *MockBroadcaster) BroadcastDataColumnSidecars(context.Context, []blocks.VerifiedRODataColumn, []blocks.PartialDataColumn) error {
m.BroadcastCalled.Store(true)
return nil
}

View File

@@ -12,6 +12,7 @@ import (
"time"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/partialdatacolumnbroadcaster"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/encoder"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers/scorers"
@@ -233,7 +234,7 @@ func (p *TestP2P) BroadcastLightClientFinalityUpdate(_ context.Context, _ interf
}
// BroadcastDataColumnSidecar broadcasts a data column for mock.
func (p *TestP2P) BroadcastDataColumnSidecars(context.Context, []blocks.VerifiedRODataColumn) error {
func (p *TestP2P) BroadcastDataColumnSidecars(context.Context, []blocks.VerifiedRODataColumn, []blocks.PartialDataColumn) error {
p.BroadcastCalled.Store(true)
return nil
}
@@ -299,6 +300,10 @@ func (p *TestP2P) PubSub() *pubsub.PubSub {
return p.pubsub
}
func (p *TestP2P) PartialColumnBroadcaster() *partialdatacolumnbroadcaster.PartialColumnBroadcaster {
return nil
}
// Disconnect from a peer.
func (p *TestP2P) Disconnect(pid peer.ID) error {
return p.BHost.Network().ClosePeer(pid)

View File

@@ -46,8 +46,6 @@ const (
GossipLightClientOptimisticUpdateMessage = "light_client_optimistic_update"
// GossipDataColumnSidecarMessage is the name for the data column sidecar message type.
GossipDataColumnSidecarMessage = "data_column_sidecar"
// GossipExecutionProofMessage is the name for the execution proof message type.
GossipExecutionProofMessage = "execution_proof"
// Topic Formats
//
@@ -77,8 +75,6 @@ const (
LightClientOptimisticUpdateTopicFormat = GossipProtocolAndDigest + GossipLightClientOptimisticUpdateMessage
// DataColumnSubnetTopicFormat is the topic format for the data column subnet.
DataColumnSubnetTopicFormat = GossipProtocolAndDigest + GossipDataColumnSidecarMessage + "_%d"
// ExecutionProofSubnetTopicFormat is the topic format for the execution proof subnet.
ExecutionProofSubnetTopicFormat = GossipProtocolAndDigest + GossipExecutionProofMessage // + "_%d" (PoC only have one global topic)
)
// topic is a struct representing a single gossipsub topic.
@@ -162,7 +158,6 @@ func (s *Service) allTopics() []topic {
newTopic(altair, future, empty, GossipLightClientOptimisticUpdateMessage),
newTopic(altair, future, empty, GossipLightClientFinalityUpdateMessage),
newTopic(capella, future, empty, GossipBlsToExecutionChangeMessage),
newTopic(fulu, future, empty, GossipExecutionProofMessage),
}
last := params.GetNetworkScheduleEntry(genesis)
schedule := []params.NetworkScheduleEntry{last}

View File

@@ -14,7 +14,6 @@ import (
"github.com/OffchainLabs/prysm/v7/api"
"github.com/OffchainLabs/prysm/v7/api/server/structs"
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
coreblocks "github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
corehelpers "github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/filters"
@@ -958,13 +957,6 @@ func (s *Server) validateConsensus(ctx context.Context, b *eth.GenericSignedBeac
}
}
}
blockRoot, err := blk.Block().HashTreeRoot()
if err != nil {
return errors.Wrap(err, "could not hash block")
}
if err := coreblocks.VerifyBlockSignatureUsingCurrentFork(parentState, blk, blockRoot); err != nil {
return errors.Wrap(err, "could not verify block signature")
}
_, err = transition.ExecuteStateTransition(ctx, parentState, blk)
if err != nil {
return errors.Wrap(err, "could not execute state transition")

View File

@@ -130,10 +130,6 @@ func (s *Server) SubmitAttestationsV2(w http.ResponseWriter, r *http.Request) {
ctx, span := trace.StartSpan(r.Context(), "beacon.SubmitAttestationsV2")
defer span.End()
if shared.IsSyncing(ctx, w, s.SyncChecker, s.HeadFetcher, s.TimeFetcher, s.OptimisticModeFetcher) {
return
}
versionHeader := r.Header.Get(api.VersionHeader)
if versionHeader == "" {
httputil.HandleError(w, api.VersionHeader+" header is required", http.StatusBadRequest)
@@ -242,14 +238,22 @@ func (s *Server) handleAttestationsElectra(
},
})
// Broadcast first using CommitteeId directly (fast path)
// This matches gRPC behavior and avoids blocking on state fetching
wantedEpoch := slots.ToEpoch(singleAtt.Data.Slot)
targetState, err := s.AttestationStateFetcher.AttestationTargetState(ctx, singleAtt.Data.Target)
if err != nil {
return nil, nil, errors.Wrap(err, "could not get target state for attestation")
}
committee, err := corehelpers.BeaconCommitteeFromState(ctx, targetState, singleAtt.Data.Slot, singleAtt.CommitteeId)
if err != nil {
return nil, nil, errors.Wrap(err, "could not get committee for attestation")
}
att := singleAtt.ToAttestationElectra(committee)
wantedEpoch := slots.ToEpoch(att.Data.Slot)
vals, err := s.HeadFetcher.HeadValidatorsIndices(ctx, wantedEpoch)
if err != nil {
return nil, nil, errors.Wrap(err, "could not get head validator indices")
}
subnet := corehelpers.ComputeSubnetFromCommitteeAndSlot(uint64(len(vals)), singleAtt.CommitteeId, singleAtt.Data.Slot)
subnet := corehelpers.ComputeSubnetFromCommitteeAndSlot(uint64(len(vals)), att.GetCommitteeIndex(), att.Data.Slot)
if err = s.Broadcaster.BroadcastAttestation(ctx, subnet, singleAtt); err != nil {
failedBroadcasts = append(failedBroadcasts, &server.IndexedError{
Index: i,
@@ -260,35 +264,17 @@ func (s *Server) handleAttestationsElectra(
}
continue
}
}
// Save to pool after broadcast (slow path - requires state fetching)
// Run in goroutine to avoid blocking the HTTP response
go func() {
for _, singleAtt := range validAttestations {
targetState, err := s.AttestationStateFetcher.AttestationTargetState(context.Background(), singleAtt.Data.Target)
if err != nil {
log.WithError(err).Error("Could not get target state for attestation")
continue
if features.Get().EnableExperimentalAttestationPool {
if err = s.AttestationCache.Add(att); err != nil {
log.WithError(err).Error("Could not save attestation")
}
committee, err := corehelpers.BeaconCommitteeFromState(context.Background(), targetState, singleAtt.Data.Slot, singleAtt.CommitteeId)
if err != nil {
log.WithError(err).Error("Could not get committee for attestation")
continue
}
att := singleAtt.ToAttestationElectra(committee)
if features.Get().EnableExperimentalAttestationPool {
if err = s.AttestationCache.Add(att); err != nil {
log.WithError(err).Error("Could not save attestation")
}
} else {
if err = s.AttestationsPool.SaveUnaggregatedAttestation(att); err != nil {
log.WithError(err).Error("Could not save attestation")
}
} else {
if err = s.AttestationsPool.SaveUnaggregatedAttestation(att); err != nil {
log.WithError(err).Error("Could not save attestation")
}
}
}()
}
if len(failedBroadcasts) > 0 {
log.WithFields(logrus.Fields{
@@ -484,10 +470,6 @@ func (s *Server) SubmitSyncCommitteeSignatures(w http.ResponseWriter, r *http.Re
ctx, span := trace.StartSpan(r.Context(), "beacon.SubmitPoolSyncCommitteeSignatures")
defer span.End()
if shared.IsSyncing(ctx, w, s.SyncChecker, s.HeadFetcher, s.TimeFetcher, s.OptimisticModeFetcher) {
return
}
var req structs.SubmitSyncCommitteeSignaturesRequest
err := json.NewDecoder(r.Body).Decode(&req.Data)
switch {

View File

@@ -26,7 +26,6 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/voluntaryexits/mock"
p2pMock "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/core"
mockSync "github.com/OffchainLabs/prysm/v7/beacon-chain/sync/initial-sync/testing"
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
@@ -623,8 +622,6 @@ func TestSubmitAttestationsV2(t *testing.T) {
HeadFetcher: chainService,
ChainInfoFetcher: chainService,
TimeFetcher: chainService,
OptimisticModeFetcher: chainService,
SyncChecker: &mockSync.Sync{IsSyncing: false},
OperationNotifier: &blockchainmock.MockOperationNotifier{},
AttestationStateFetcher: chainService,
}
@@ -657,7 +654,6 @@ func TestSubmitAttestationsV2(t *testing.T) {
assert.Equal(t, primitives.Epoch(0), broadcaster.BroadcastAttestations[0].GetData().Source.Epoch)
assert.Equal(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", hexutil.Encode(broadcaster.BroadcastAttestations[0].GetData().Target.Root))
assert.Equal(t, primitives.Epoch(0), broadcaster.BroadcastAttestations[0].GetData().Target.Epoch)
time.Sleep(100 * time.Millisecond) // Wait for async pool save
assert.Equal(t, 1, s.AttestationsPool.UnaggregatedAttestationCount())
})
t.Run("multiple", func(t *testing.T) {
@@ -677,7 +673,6 @@ func TestSubmitAttestationsV2(t *testing.T) {
assert.Equal(t, http.StatusOK, writer.Code)
assert.Equal(t, true, broadcaster.BroadcastCalled.Load())
assert.Equal(t, 2, broadcaster.NumAttestations())
time.Sleep(100 * time.Millisecond) // Wait for async pool save
assert.Equal(t, 2, s.AttestationsPool.UnaggregatedAttestationCount())
})
t.Run("phase0 att post electra", func(t *testing.T) {
@@ -798,7 +793,6 @@ func TestSubmitAttestationsV2(t *testing.T) {
assert.Equal(t, primitives.Epoch(0), broadcaster.BroadcastAttestations[0].GetData().Source.Epoch)
assert.Equal(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", hexutil.Encode(broadcaster.BroadcastAttestations[0].GetData().Target.Root))
assert.Equal(t, primitives.Epoch(0), broadcaster.BroadcastAttestations[0].GetData().Target.Epoch)
time.Sleep(100 * time.Millisecond) // Wait for async pool save
assert.Equal(t, 1, s.AttestationsPool.UnaggregatedAttestationCount())
})
t.Run("multiple", func(t *testing.T) {
@@ -818,7 +812,6 @@ func TestSubmitAttestationsV2(t *testing.T) {
assert.Equal(t, http.StatusOK, writer.Code)
assert.Equal(t, true, broadcaster.BroadcastCalled.Load())
assert.Equal(t, 2, broadcaster.NumAttestations())
time.Sleep(100 * time.Millisecond) // Wait for async pool save
assert.Equal(t, 2, s.AttestationsPool.UnaggregatedAttestationCount())
})
t.Run("no body", func(t *testing.T) {
@@ -868,27 +861,6 @@ func TestSubmitAttestationsV2(t *testing.T) {
assert.Equal(t, true, strings.Contains(e.Failures[0].Message, "Incorrect attestation signature"))
})
})
t.Run("syncing", func(t *testing.T) {
chainService := &blockchainmock.ChainService{}
s := &Server{
HeadFetcher: chainService,
TimeFetcher: chainService,
OptimisticModeFetcher: chainService,
SyncChecker: &mockSync.Sync{IsSyncing: true},
}
var body bytes.Buffer
_, err := body.WriteString(singleAtt)
require.NoError(t, err)
request := httptest.NewRequest(http.MethodPost, "http://example.com", &body)
request.Header.Set(api.VersionHeader, version.String(version.Phase0))
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.SubmitAttestationsV2(writer, request)
assert.Equal(t, http.StatusServiceUnavailable, writer.Code)
assert.Equal(t, true, strings.Contains(writer.Body.String(), "Beacon node is currently syncing"))
})
}
func TestListVoluntaryExits(t *testing.T) {
@@ -1085,19 +1057,14 @@ func TestSubmitSyncCommitteeSignatures(t *testing.T) {
t.Run("single", func(t *testing.T) {
broadcaster := &p2pMock.MockBroadcaster{}
chainService := &blockchainmock.ChainService{
State: st,
SyncCommitteeIndices: []primitives.CommitteeIndex{0},
}
s := &Server{
HeadFetcher: chainService,
TimeFetcher: chainService,
OptimisticModeFetcher: chainService,
SyncChecker: &mockSync.Sync{IsSyncing: false},
CoreService: &core.Service{
SyncCommitteePool: synccommittee.NewStore(),
P2P: broadcaster,
HeadFetcher: chainService,
HeadFetcher: &blockchainmock.ChainService{
State: st,
SyncCommitteeIndices: []primitives.CommitteeIndex{0},
},
},
}
@@ -1122,19 +1089,14 @@ func TestSubmitSyncCommitteeSignatures(t *testing.T) {
})
t.Run("multiple", func(t *testing.T) {
broadcaster := &p2pMock.MockBroadcaster{}
chainService := &blockchainmock.ChainService{
State: st,
SyncCommitteeIndices: []primitives.CommitteeIndex{0},
}
s := &Server{
HeadFetcher: chainService,
TimeFetcher: chainService,
OptimisticModeFetcher: chainService,
SyncChecker: &mockSync.Sync{IsSyncing: false},
CoreService: &core.Service{
SyncCommitteePool: synccommittee.NewStore(),
P2P: broadcaster,
HeadFetcher: chainService,
HeadFetcher: &blockchainmock.ChainService{
State: st,
SyncCommitteeIndices: []primitives.CommitteeIndex{0},
},
},
}
@@ -1158,18 +1120,13 @@ func TestSubmitSyncCommitteeSignatures(t *testing.T) {
})
t.Run("invalid", func(t *testing.T) {
broadcaster := &p2pMock.MockBroadcaster{}
chainService := &blockchainmock.ChainService{
State: st,
}
s := &Server{
HeadFetcher: chainService,
TimeFetcher: chainService,
OptimisticModeFetcher: chainService,
SyncChecker: &mockSync.Sync{IsSyncing: false},
CoreService: &core.Service{
SyncCommitteePool: synccommittee.NewStore(),
P2P: broadcaster,
HeadFetcher: chainService,
HeadFetcher: &blockchainmock.ChainService{
State: st,
},
},
}
@@ -1192,13 +1149,7 @@ func TestSubmitSyncCommitteeSignatures(t *testing.T) {
assert.Equal(t, false, broadcaster.BroadcastCalled.Load())
})
t.Run("empty", func(t *testing.T) {
chainService := &blockchainmock.ChainService{State: st}
s := &Server{
HeadFetcher: chainService,
TimeFetcher: chainService,
OptimisticModeFetcher: chainService,
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
s := &Server{}
var body bytes.Buffer
_, err := body.WriteString("[]")
@@ -1215,13 +1166,7 @@ func TestSubmitSyncCommitteeSignatures(t *testing.T) {
assert.Equal(t, true, strings.Contains(e.Message, "No data submitted"))
})
t.Run("no body", func(t *testing.T) {
chainService := &blockchainmock.ChainService{State: st}
s := &Server{
HeadFetcher: chainService,
TimeFetcher: chainService,
OptimisticModeFetcher: chainService,
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
s := &Server{}
request := httptest.NewRequest(http.MethodPost, "http://example.com", nil)
writer := httptest.NewRecorder()
@@ -1234,26 +1179,6 @@ func TestSubmitSyncCommitteeSignatures(t *testing.T) {
assert.Equal(t, http.StatusBadRequest, e.Code)
assert.Equal(t, true, strings.Contains(e.Message, "No data submitted"))
})
t.Run("syncing", func(t *testing.T) {
chainService := &blockchainmock.ChainService{State: st}
s := &Server{
HeadFetcher: chainService,
TimeFetcher: chainService,
OptimisticModeFetcher: chainService,
SyncChecker: &mockSync.Sync{IsSyncing: true},
}
var body bytes.Buffer
_, err := body.WriteString(singleSyncCommitteeMsg)
require.NoError(t, err)
request := httptest.NewRequest(http.MethodPost, "http://example.com", &body)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.SubmitSyncCommitteeSignatures(writer, request)
assert.Equal(t, http.StatusServiceUnavailable, writer.Code)
assert.Equal(t, true, strings.Contains(writer.Body.String(), "Beacon node is currently syncing"))
})
}
func TestListBLSToExecutionChanges(t *testing.T) {

View File

@@ -40,7 +40,6 @@ func GetForkSchedule(w http.ResponseWriter, r *http.Request) {
httputil.WriteJson(w, &structs.GetForkScheduleResponse{
Data: data,
})
return
}
previous := schedule[0]
for _, entry := range schedule {

View File

@@ -169,11 +169,6 @@ func TestGetSpec(t *testing.T) {
config.BlobsidecarSubnetCountElectra = 102
config.SyncMessageDueBPS = 103
// EIP-8025
config.MaxProofDataBytes = 200
config.MinEpochsForExecutionProofRequests = 201
config.MinProofsRequired = 202
var dbp [4]byte
copy(dbp[:], []byte{'0', '0', '0', '1'})
config.DomainBeaconProposer = dbp
@@ -210,7 +205,7 @@ func TestGetSpec(t *testing.T) {
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp))
data, ok := resp.Data.(map[string]any)
require.Equal(t, true, ok)
assert.Equal(t, 178, len(data))
assert.Equal(t, 175, len(data))
for k, v := range data {
t.Run(k, func(t *testing.T) {
switch k {
@@ -582,12 +577,6 @@ func TestGetSpec(t *testing.T) {
assert.Equal(t, "102", v)
case "SYNC_MESSAGE_DUE_BPS":
assert.Equal(t, "103", v)
case "MAX_PROOF_DATA_BYTES":
assert.Equal(t, "200", v)
case "MIN_EPOCHS_FOR_EXECUTION_PROOF_REQUESTS":
assert.Equal(t, "201", v)
case "MIN_PROOFS_REQUIRED":
assert.Equal(t, "202", v)
case "BLOB_SCHEDULE":
blobSchedule, ok := v.([]any)
assert.Equal(t, true, ok)

View File

@@ -42,7 +42,6 @@ go_library(
"//beacon-chain/blockchain/kzg:go_default_library",
"//beacon-chain/builder:go_default_library",
"//beacon-chain/cache:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
"//beacon-chain/cache/depositsnapshot:go_default_library",
"//beacon-chain/core/blocks:go_default_library",
"//beacon-chain/core/electra:go_default_library",

View File

@@ -52,27 +52,24 @@ func (vs *Server) ProposeAttestation(ctx context.Context, att *ethpb.Attestation
ctx, span := trace.StartSpan(ctx, "AttesterServer.ProposeAttestation")
defer span.End()
if vs.SyncChecker.Syncing() {
return nil, status.Errorf(codes.Unavailable, "Syncing to latest head, not ready to respond")
}
resp, err := vs.proposeAtt(ctx, att, att.GetData().CommitteeIndex)
if err != nil {
return nil, err
}
go func() {
if features.Get().EnableExperimentalAttestationPool {
if err := vs.AttestationCache.Add(att); err != nil {
log.WithError(err).Error("Could not save attestation")
}
} else {
if features.Get().EnableExperimentalAttestationPool {
if err = vs.AttestationCache.Add(att); err != nil {
log.WithError(err).Error("Could not save attestation")
}
} else {
go func() {
attCopy := att.Copy()
if err := vs.AttPool.SaveUnaggregatedAttestation(attCopy); err != nil {
log.WithError(err).Error("Could not save unaggregated attestation")
return
}
}
}()
}()
}
return resp, nil
}
@@ -85,10 +82,6 @@ func (vs *Server) ProposeAttestationElectra(ctx context.Context, singleAtt *ethp
ctx, span := trace.StartSpan(ctx, "AttesterServer.ProposeAttestationElectra")
defer span.End()
if vs.SyncChecker.Syncing() {
return nil, status.Errorf(codes.Unavailable, "Syncing to latest head, not ready to respond")
}
resp, err := vs.proposeAtt(ctx, singleAtt, singleAtt.GetCommitteeIndex())
if err != nil {
return nil, err
@@ -105,17 +98,18 @@ func (vs *Server) ProposeAttestationElectra(ctx context.Context, singleAtt *ethp
singleAttCopy := singleAtt.Copy()
att := singleAttCopy.ToAttestationElectra(committee)
go func() {
if features.Get().EnableExperimentalAttestationPool {
if err := vs.AttestationCache.Add(att); err != nil {
log.WithError(err).Error("Could not save attestation")
}
} else {
if features.Get().EnableExperimentalAttestationPool {
if err = vs.AttestationCache.Add(att); err != nil {
log.WithError(err).Error("Could not save attestation")
}
} else {
go func() {
if err := vs.AttPool.SaveUnaggregatedAttestation(att); err != nil {
log.WithError(err).Error("Could not save unaggregated attestation")
return
}
}
}()
}()
}
return resp, nil
}

View File

@@ -38,7 +38,6 @@ func TestProposeAttestation(t *testing.T) {
OperationNotifier: (&mock.ChainService{}).OperationNotifier(),
TimeFetcher: chainService,
AttestationStateFetcher: chainService,
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
head := util.NewBeaconBlock()
head.Block.Slot = 999
@@ -142,7 +141,6 @@ func TestProposeAttestation_IncorrectSignature(t *testing.T) {
P2P: &mockp2p.MockBroadcaster{},
AttPool: attestations.NewPool(),
OperationNotifier: (&mock.ChainService{}).OperationNotifier(),
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
req := util.HydrateAttestation(&ethpb.Attestation{})
@@ -151,37 +149,6 @@ func TestProposeAttestation_IncorrectSignature(t *testing.T) {
assert.ErrorContains(t, wanted, err)
}
func TestProposeAttestation_Syncing(t *testing.T) {
attesterServer := &Server{
SyncChecker: &mockSync.Sync{IsSyncing: true},
}
req := util.HydrateAttestation(&ethpb.Attestation{})
_, err := attesterServer.ProposeAttestation(t.Context(), req)
assert.ErrorContains(t, "Syncing to latest head", err)
s, ok := status.FromError(err)
require.Equal(t, true, ok)
assert.Equal(t, codes.Unavailable, s.Code())
}
func TestProposeAttestationElectra_Syncing(t *testing.T) {
attesterServer := &Server{
SyncChecker: &mockSync.Sync{IsSyncing: true},
}
req := &ethpb.SingleAttestation{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Root: make([]byte, 32)},
Target: &ethpb.Checkpoint{Root: make([]byte, 32)},
},
}
_, err := attesterServer.ProposeAttestationElectra(t.Context(), req)
assert.ErrorContains(t, "Syncing to latest head", err)
s, ok := status.FromError(err)
require.Equal(t, true, ok)
assert.Equal(t, codes.Unavailable, s.Code())
}
func TestGetAttestationData_OK(t *testing.T) {
block := util.NewBeaconBlock()
block.Block.Slot = 3*params.BeaconConfig().SlotsPerEpoch + 1

View File

@@ -7,6 +7,7 @@ import (
"sync"
"time"
"github.com/OffchainLabs/go-bitfield"
builderapi "github.com/OffchainLabs/prysm/v7/api/client/builder"
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain"
"github.com/OffchainLabs/prysm/v7/beacon-chain/builder"
@@ -19,7 +20,6 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/kv"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
@@ -309,6 +309,7 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
}
rob, err := blocks.NewROBlockWithRoot(block, root)
var partialColumns []blocks.PartialDataColumn
if block.IsBlinded() {
block, blobSidecars, err = vs.handleBlindedBlock(ctx, block)
if errors.Is(err, builderapi.ErrBadGateway) {
@@ -316,92 +317,47 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
return &ethpb.ProposeResponse{BlockRoot: root[:]}, nil
}
} else if block.Version() >= version.Deneb {
blobSidecars, dataColumnSidecars, err = vs.handleUnblindedBlock(rob, req)
blobSidecars, dataColumnSidecars, partialColumns, err = vs.handleUnblindedBlock(rob, req)
}
if err != nil {
return nil, status.Errorf(codes.Internal, "%s: %v", "handle block failed", err)
}
var wg errgroup.Group
blockBroadcastDone := make(chan bool)
var wg sync.WaitGroup
errChan := make(chan error, 1)
wg.Go(func() error {
if err := vs.broadcastReceiveBlock(ctx, blockBroadcastDone, block, root); err != nil {
return fmt.Errorf("broadcast receive block: %w", err)
wg.Add(1)
go func() {
if err := vs.broadcastReceiveBlock(ctx, &wg, block, root); err != nil {
errChan <- errors.Wrap(err, "broadcast/receive block failed")
return
}
errChan <- nil
}()
return nil
})
wg.Wait()
wg.Go(func() error {
if err := vs.broadcastAndReceiveSidecars(ctx, blockBroadcastDone, block, root, blobSidecars, dataColumnSidecars); err != nil {
return fmt.Errorf("broadcast and receive sidecars: %w", err)
}
return nil
})
if err := wg.Wait(); err != nil {
return nil, status.Errorf(codes.Internal, "Could not broadcast/receive block/sidecars: %v", err)
if err := vs.broadcastAndReceiveSidecars(ctx, block, root, blobSidecars, dataColumnSidecars, partialColumns); err != nil {
return nil, status.Errorf(codes.Internal, "Could not broadcast/receive sidecars: %v", err)
}
if err := <-errChan; err != nil {
return nil, status.Errorf(codes.Internal, "Could not broadcast/receive block: %v", err)
}
// Generate and broadcast execution proofs.
go vs.generateAndBroadcastExecutionProofs(ctx, rob)
return &ethpb.ProposeResponse{BlockRoot: root[:]}, nil
}
// TODO: This is a duplicate from the same function in the sync package.
func (vs *Server) generateAndBroadcastExecutionProofs(ctx context.Context, roBlock blocks.ROBlock) {
const delay = 2 * time.Second
proofTypes := flags.Get().ProofGenerationTypes
if len(proofTypes) == 0 {
return
}
var wg errgroup.Group
for _, proofType := range proofTypes {
wg.Go(func() error {
execProof, err := generateExecProof(roBlock, primitives.ExecutionProofId(proofType), delay)
if err != nil {
return fmt.Errorf("generate exec proof: %w", err)
}
if err := vs.P2P.Broadcast(ctx, execProof); err != nil {
return fmt.Errorf("broadcast exec proof: %w", err)
}
return nil
})
}
if err := wg.Wait(); err != nil {
log.WithError(err).Error("Failed to generate and broadcast execution proofs")
}
log.WithFields(logrus.Fields{
"root": fmt.Sprintf("%#x", roBlock.Root()),
"slot": roBlock.Block().Slot(),
"count": len(proofTypes),
}).Debug("Generated and broadcasted execution proofs")
}
// broadcastAndReceiveSidecars broadcasts and receives sidecars.
func (vs *Server) broadcastAndReceiveSidecars(
ctx context.Context,
blockBroadcastDone <-chan bool,
block interfaces.SignedBeaconBlock,
root [fieldparams.RootLength]byte,
blobSidecars []*ethpb.BlobSidecar,
dataColumnSidecars []blocks.RODataColumn,
partialColumns []blocks.PartialDataColumn,
) error {
// Wait for block broadcast to complete before broadcasting sidecars.
<-blockBroadcastDone
if block.Version() >= version.Fulu {
if err := vs.broadcastAndReceiveDataColumns(ctx, dataColumnSidecars); err != nil {
if err := vs.broadcastAndReceiveDataColumns(ctx, dataColumnSidecars, partialColumns); err != nil {
return errors.Wrap(err, "broadcast and receive data columns")
}
return nil
@@ -450,45 +406,49 @@ func (vs *Server) handleBlindedBlock(ctx context.Context, block interfaces.Signe
func (vs *Server) handleUnblindedBlock(
block blocks.ROBlock,
req *ethpb.GenericSignedBeaconBlock,
) ([]*ethpb.BlobSidecar, []blocks.RODataColumn, error) {
) ([]*ethpb.BlobSidecar, []blocks.RODataColumn, []blocks.PartialDataColumn, error) {
rawBlobs, proofs, err := blobsAndProofs(req)
if err != nil {
return nil, nil, err
return nil, nil, nil, err
}
if block.Version() >= version.Fulu {
// Compute cells and proofs from the blobs and cell proofs.
cellsPerBlob, proofsPerBlob, err := peerdas.ComputeCellsAndProofsFromFlat(rawBlobs, proofs)
if err != nil {
return nil, nil, errors.Wrap(err, "compute cells and proofs")
return nil, nil, nil, errors.Wrap(err, "compute cells and proofs")
}
// Construct data column sidecars from the signed block and cells and proofs.
roDataColumnSidecars, err := peerdas.DataColumnSidecars(cellsPerBlob, proofsPerBlob, peerdas.PopulateFromBlock(block))
if err != nil {
return nil, nil, errors.Wrap(err, "data column sidcars")
return nil, nil, nil, errors.Wrap(err, "data column sidcars")
}
return nil, roDataColumnSidecars, nil
included := bitfield.NewBitlist(uint64(len(cellsPerBlob)))
included = included.Not() // all bits set to 1
partialColumns, err := peerdas.PartialColumns(included, cellsPerBlob, proofsPerBlob, peerdas.PopulateFromBlock(block))
if err != nil {
return nil, nil, nil, errors.Wrap(err, "data column sidcars")
}
return nil, roDataColumnSidecars, partialColumns, nil
}
blobSidecars, err := BuildBlobSidecars(block, rawBlobs, proofs)
if err != nil {
return nil, nil, errors.Wrap(err, "build blob sidecars")
return nil, nil, nil, errors.Wrap(err, "build blob sidecars")
}
return blobSidecars, nil, nil
return blobSidecars, nil, nil, nil
}
// broadcastReceiveBlock broadcasts a block and handles its reception.
// It closes the blockBroadcastDone channel once broadcasting is complete (but before receiving the block).
func (vs *Server) broadcastReceiveBlock(ctx context.Context, blockBroadcastDone chan<- bool, block interfaces.SignedBeaconBlock, root [fieldparams.RootLength]byte) error {
if err := vs.broadcastBlock(ctx, block, root); err != nil {
func (vs *Server) broadcastReceiveBlock(ctx context.Context, wg *sync.WaitGroup, block interfaces.SignedBeaconBlock, root [fieldparams.RootLength]byte) error {
if err := vs.broadcastBlock(ctx, wg, block, root); err != nil {
return errors.Wrap(err, "broadcast block")
}
close(blockBroadcastDone)
vs.BlockNotifier.BlockFeed().Send(&feed.Event{
Type: blockfeed.ReceivedBlock,
Data: &blockfeed.ReceivedBlockData{SignedBlock: block},
@@ -501,7 +461,9 @@ func (vs *Server) broadcastReceiveBlock(ctx context.Context, blockBroadcastDone
return nil
}
func (vs *Server) broadcastBlock(ctx context.Context, block interfaces.SignedBeaconBlock, root [fieldparams.RootLength]byte) error {
func (vs *Server) broadcastBlock(ctx context.Context, wg *sync.WaitGroup, block interfaces.SignedBeaconBlock, root [fieldparams.RootLength]byte) error {
defer wg.Done()
protoBlock, err := block.Proto()
if err != nil {
return errors.Wrap(err, "protobuf conversion failed")
@@ -545,7 +507,7 @@ func (vs *Server) broadcastAndReceiveBlobs(ctx context.Context, sidecars []*ethp
}
// broadcastAndReceiveDataColumns handles the broadcasting and reception of data columns sidecars.
func (vs *Server) broadcastAndReceiveDataColumns(ctx context.Context, roSidecars []blocks.RODataColumn) error {
func (vs *Server) broadcastAndReceiveDataColumns(ctx context.Context, roSidecars []blocks.RODataColumn, partialColumns []blocks.PartialDataColumn) error {
// We built this block ourselves, so we can upgrade the read only data column sidecar into a verified one.
verifiedSidecars := make([]blocks.VerifiedRODataColumn, 0, len(roSidecars))
for _, sidecar := range roSidecars {
@@ -554,7 +516,7 @@ func (vs *Server) broadcastAndReceiveDataColumns(ctx context.Context, roSidecars
}
// Broadcast sidecars (non blocking).
if err := vs.P2P.BroadcastDataColumnSidecars(ctx, verifiedSidecars); err != nil {
if err := vs.P2P.BroadcastDataColumnSidecars(ctx, verifiedSidecars, partialColumns); err != nil {
return errors.Wrap(err, "broadcast data column sidecars")
}
@@ -698,57 +660,3 @@ func blobsAndProofs(req *ethpb.GenericSignedBeaconBlock) ([][]byte, [][]byte, er
return nil, nil, errors.Errorf("unknown request type provided: %T", req)
}
}
// generateExecProof returns a dummy execution proof after the specified delay.
// TODO: This is a duplicate from the same function in the sync package.
func generateExecProof(roBlock blocks.ROBlock, proofID primitives.ExecutionProofId, delay time.Duration) (*ethpb.ExecutionProof, error) {
// Simulate proof generation work
time.Sleep(delay)
// Create a dummy proof with some deterministic data
block := roBlock.Block()
if block == nil {
return nil, errors.New("nil block")
}
body := block.Body()
if body == nil {
return nil, errors.New("nil block body")
}
executionData, err := body.Execution()
if err != nil {
return nil, fmt.Errorf("execution: %w", err)
}
if executionData == nil {
return nil, errors.New("nil execution data")
}
hash, err := executionData.HashTreeRoot()
if err != nil {
return nil, fmt.Errorf("hash tree root: %w", err)
}
proofData := []byte{
0xFF, // Magic byte for dummy proof
byte(proofID),
// Include some payload hash bytes
hash[0],
hash[1],
hash[2],
hash[3],
}
blockRoot := roBlock.Root()
proof := &ethpb.ExecutionProof{
ProofId: proofID,
Slot: block.Slot(),
BlockHash: hash[:],
BlockRoot: blockRoot[:],
ProofData: proofData,
}
return proof, nil
}

View File

@@ -14,7 +14,6 @@ go_library(
"decode_pubsub.go",
"doc.go",
"error.go",
"exec_proofs.go",
"fork_watcher.go",
"fuzz_exports.go", # keep
"log.go",
@@ -32,7 +31,6 @@ go_library(
"rpc_chunked_response.go",
"rpc_data_column_sidecars_by_range.go",
"rpc_data_column_sidecars_by_root.go",
"rpc_execution_proofs_by_root_topic.go",
"rpc_goodbye.go",
"rpc_light_client.go",
"rpc_metadata.go",
@@ -48,7 +46,6 @@ go_library(
"subscriber_blob_sidecar.go",
"subscriber_bls_to_execution_change.go",
"subscriber_data_column_sidecar.go",
"subscriber_execution_proofs.go",
"subscriber_handlers.go",
"subscriber_sync_committee_message.go",
"subscriber_sync_contribution_proof.go",
@@ -60,8 +57,8 @@ go_library(
"validate_blob.go",
"validate_bls_to_execution_change.go",
"validate_data_column.go",
"validate_execution_proof.go",
"validate_light_client.go",
"validate_partial_header.go",
"validate_proposer_slashing.go",
"validate_sync_committee_message.go",
"validate_sync_contribution_proof.go",
@@ -97,12 +94,12 @@ go_library(
"//beacon-chain/light-client:go_default_library",
"//beacon-chain/operations/attestations:go_default_library",
"//beacon-chain/operations/blstoexec:go_default_library",
"//beacon-chain/operations/execproofs:go_default_library",
"//beacon-chain/operations/slashings:go_default_library",
"//beacon-chain/operations/synccommittee:go_default_library",
"//beacon-chain/operations/voluntaryexits:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/p2p/encoder:go_default_library",
"//beacon-chain/p2p/partialdatacolumnbroadcaster:go_default_library",
"//beacon-chain/p2p/peers:go_default_library",
"//beacon-chain/p2p/types:go_default_library",
"//beacon-chain/slasher/types:go_default_library",
@@ -192,7 +189,6 @@ go_test(
"rpc_blob_sidecars_by_root_test.go",
"rpc_data_column_sidecars_by_range_test.go",
"rpc_data_column_sidecars_by_root_test.go",
"rpc_execution_proofs_by_root_topic_test.go",
"rpc_goodbye_test.go",
"rpc_handler_test.go",
"rpc_light_client_test.go",
@@ -217,7 +213,6 @@ go_test(
"validate_blob_test.go",
"validate_bls_to_execution_change_test.go",
"validate_data_column_test.go",
"validate_execution_proof_test.go",
"validate_light_client_test.go",
"validate_proposer_slashing_test.go",
"validate_sync_committee_message_test.go",
@@ -251,7 +246,6 @@ go_test(
"//beacon-chain/light-client:go_default_library",
"//beacon-chain/operations/attestations:go_default_library",
"//beacon-chain/operations/blstoexec:go_default_library",
"//beacon-chain/operations/execproofs:go_default_library",
"//beacon-chain/operations/slashings:go_default_library",
"//beacon-chain/operations/slashings/mock:go_default_library",
"//beacon-chain/p2p:go_default_library",

View File

@@ -2,6 +2,7 @@ package sync
import (
"context"
"iter"
"time"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
@@ -22,9 +23,16 @@ type signatureVerifier struct {
resChan chan error
}
type errorWithSegment struct {
err error
// segment is only available if the batched verification failed
segment peerdas.CellProofBundleSegment
}
type kzgVerifier struct {
dataColumns []blocks.RODataColumn
resChan chan error
sizeHint int
cellProofs iter.Seq[blocks.CellProofBundle]
resChan chan errorWithSegment
}
// A routine that runs in the background to perform batch
@@ -59,8 +67,9 @@ func (s *Service) verifierRoutine() {
// A routine that runs in the background to perform batch
// KZG verifications by draining the channel and processing all pending requests.
func (s *Service) kzgVerifierRoutine() {
kzgBatch := make([]*kzgVerifier, 0, 1)
for {
kzgBatch := make([]*kzgVerifier, 0, 1)
kzgBatch = kzgBatch[:0]
select {
case <-s.ctx.Done():
return
@@ -156,46 +165,74 @@ func performBatchAggregation(aggSet *bls.SignatureBatch) (*bls.SignatureBatch, e
}
func (s *Service) validateWithKzgBatchVerifier(ctx context.Context, dataColumns []blocks.RODataColumn) (pubsub.ValidationResult, error) {
_, span := trace.StartSpan(ctx, "sync.validateWithKzgBatchVerifier")
if len(dataColumns) == 0 {
return pubsub.ValidationReject, errors.New("no data columns provided")
}
// If there are no cells in this column, we can return early since there's nothing to validate.
allEmpty := true
for _, column := range dataColumns {
if len(column.Column) != 0 {
allEmpty = false
break
}
}
if allEmpty {
return pubsub.ValidationAccept, nil
}
sizeHint := len(dataColumns[0].Column) * len(dataColumns)
err := s.validateKZGProofs(ctx, sizeHint, blocks.RODataColumnsToCellProofBundles(dataColumns))
if ctx.Err() != nil {
return pubsub.ValidationIgnore, ctx.Err()
} else if err != nil {
return pubsub.ValidationReject, err
}
return pubsub.ValidationAccept, nil
}
func (s *Service) validateKZGProofs(ctx context.Context, sizeHint int, cellProofs iter.Seq[blocks.CellProofBundle]) error {
_, span := trace.StartSpan(ctx, "sync.validateKZGProofs")
defer span.End()
timeout := time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second
resChan := make(chan error, 1)
verificationSet := &kzgVerifier{dataColumns: dataColumns, resChan: resChan}
resChan := make(chan errorWithSegment, 1)
verificationSet := &kzgVerifier{sizeHint: sizeHint, cellProofs: cellProofs, resChan: resChan}
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
select {
case s.kzgChan <- verificationSet:
case <-ctx.Done():
return pubsub.ValidationIgnore, ctx.Err()
return ctx.Err()
}
select {
case <-ctx.Done():
return pubsub.ValidationIgnore, ctx.Err() // parent context canceled, give up
case err := <-resChan:
if err != nil {
log.WithError(err).Trace("Could not perform batch verification")
return ctx.Err() // parent context canceled, give up
case errWithSegment := <-resChan:
if errWithSegment.err != nil {
err := errWithSegment.err
log.WithError(err).Trace("Could not perform batch verification of cells")
tracing.AnnotateError(span, err)
return s.validateUnbatchedColumnsKzg(ctx, dataColumns)
// We failed batch verification. Try again in this goroutine without batching
return validateUnbatchedKZGProofs(ctx, errWithSegment.segment)
}
}
return pubsub.ValidationAccept, nil
return nil
}
func (s *Service) validateUnbatchedColumnsKzg(ctx context.Context, columns []blocks.RODataColumn) (pubsub.ValidationResult, error) {
func validateUnbatchedKZGProofs(ctx context.Context, segment peerdas.CellProofBundleSegment) error {
_, span := trace.StartSpan(ctx, "sync.validateUnbatchedColumnsKzg")
defer span.End()
start := time.Now()
if err := peerdas.VerifyDataColumnsSidecarKZGProofs(columns); err != nil {
if err := segment.Verify(); err != nil {
err = errors.Wrap(err, "could not verify")
tracing.AnnotateError(span, err)
return pubsub.ValidationReject, err
return err
}
verification.DataColumnBatchKZGVerificationHistogram.WithLabelValues("fallback").Observe(float64(time.Since(start).Milliseconds()))
return pubsub.ValidationAccept, nil
return nil
}
func verifyKzgBatch(kzgBatch []*kzgVerifier) {
@@ -203,14 +240,16 @@ func verifyKzgBatch(kzgBatch []*kzgVerifier) {
return
}
allDataColumns := make([]blocks.RODataColumn, 0, len(kzgBatch))
cellProofIters := make([]iter.Seq[blocks.CellProofBundle], 0, len(kzgBatch))
var sizeHint int
for _, kzgVerifier := range kzgBatch {
allDataColumns = append(allDataColumns, kzgVerifier.dataColumns...)
sizeHint += kzgVerifier.sizeHint
cellProofIters = append(cellProofIters, kzgVerifier.cellProofs)
}
var verificationErr error
start := time.Now()
err := peerdas.VerifyDataColumnsSidecarKZGProofs(allDataColumns)
segments, err := peerdas.BatchVerifyDataColumnsCellsKZGProofs(sizeHint, cellProofIters)
if err != nil {
verificationErr = errors.Wrap(err, "batch KZG verification failed")
} else {
@@ -218,7 +257,14 @@ func verifyKzgBatch(kzgBatch []*kzgVerifier) {
}
// Send the same result to all verifiers in the batch
for _, verifier := range kzgBatch {
verifier.resChan <- verificationErr
for i, verifier := range kzgBatch {
var segment peerdas.CellProofBundleSegment
if verificationErr != nil {
segment = segments[i]
}
verifier.resChan <- errorWithSegment{
err: verificationErr,
segment: segment,
}
}
}

View File

@@ -3,7 +3,6 @@ package sync
import (
"bytes"
"context"
"fmt"
"maps"
"slices"
"sync"
@@ -244,10 +243,8 @@ func requestDirectSidecarsFromPeers(
}
// Compute missing indices by root, excluding those already in storage.
var lastRoot [fieldparams.RootLength]byte
missingIndicesByRoot := make(map[[fieldparams.RootLength]byte]map[uint64]bool, len(incompleteRoots))
for root := range incompleteRoots {
lastRoot = root
storedIndices := storedIndicesByRoot[root]
missingIndices := make(map[uint64]bool, len(requestedIndices))
@@ -262,7 +259,6 @@ func requestDirectSidecarsFromPeers(
}
}
initialMissingRootCount := len(missingIndicesByRoot)
initialMissingCount := computeTotalCount(missingIndicesByRoot)
indicesByRootByPeer, err := computeIndicesByRootByPeer(params.P2P, slotByRoot, missingIndicesByRoot, connectedPeers)
@@ -305,19 +301,11 @@ func requestDirectSidecarsFromPeers(
}
}
log := log.WithFields(logrus.Fields{
"duration": time.Since(start),
"initialMissingRootCount": initialMissingRootCount,
"initialMissingCount": initialMissingCount,
"finalMissingRootCount": len(missingIndicesByRoot),
"finalMissingCount": computeTotalCount(missingIndicesByRoot),
})
if initialMissingRootCount == 1 {
log = log.WithField("root", fmt.Sprintf("%#x", lastRoot))
}
log.Debug("Requested direct data column sidecars from peers")
log.WithFields(logrus.Fields{
"duration": time.Since(start),
"initialMissingCount": initialMissingCount,
"finalMissingCount": computeTotalCount(missingIndicesByRoot),
}).Debug("Requested direct data column sidecars from peers")
return verifiedColumnsByRoot, nil
}

View File

@@ -1,65 +0,0 @@
package sync
import (
"fmt"
"time"
"errors"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
)
// generateExecProof returns a dummy execution proof after the specified delay.
func generateExecProof(roBlock blocks.ROBlock, proofID primitives.ExecutionProofId, delay time.Duration) (*ethpb.ExecutionProof, error) {
// Simulate proof generation work
time.Sleep(delay)
// Create a dummy proof with some deterministic data
block := roBlock.Block()
if block == nil {
return nil, errors.New("nil block")
}
body := block.Body()
if body == nil {
return nil, errors.New("nil block body")
}
executionData, err := body.Execution()
if err != nil {
return nil, fmt.Errorf("execution: %w", err)
}
if executionData == nil {
return nil, errors.New("nil execution data")
}
hash, err := executionData.HashTreeRoot()
if err != nil {
return nil, fmt.Errorf("hash tree root: %w", err)
}
proofData := []byte{
0xFF, // Magic byte for dummy proof
byte(proofID),
// Include some payload hash bytes
hash[0],
hash[1],
hash[2],
hash[3],
}
blockRoot := roBlock.Root()
proof := &ethpb.ExecutionProof{
ProofId: proofID,
Slot: block.Slot(),
BlockHash: hash[:],
BlockRoot: blockRoot[:],
ProofData: proofData,
}
return proof, nil
}

View File

@@ -88,12 +88,12 @@ func TestVerifierRoutine(t *testing.T) {
go service.kzgVerifierRoutine()
dataColumns := createValidTestDataColumns(t, 1)
resChan := make(chan error, 1)
service.kzgChan <- &kzgVerifier{dataColumns: dataColumns, resChan: resChan}
resChan := make(chan errorWithSegment, 1)
service.kzgChan <- &kzgVerifier{sizeHint: 1, cellProofs: blocks.RODataColumnsToCellProofBundles(dataColumns), resChan: resChan}
select {
case err := <-resChan:
require.NoError(t, err)
case errWithSegment := <-resChan:
require.NoError(t, errWithSegment.err)
case <-time.After(time.Second):
t.Fatal("timeout waiting for verification result")
}
@@ -109,19 +109,19 @@ func TestVerifierRoutine(t *testing.T) {
go service.kzgVerifierRoutine()
const numRequests = 5
resChans := make([]chan error, numRequests)
resChans := make([]chan errorWithSegment, numRequests)
for i := range numRequests {
dataColumns := createValidTestDataColumns(t, 1)
resChan := make(chan error, 1)
resChan := make(chan errorWithSegment, 1)
resChans[i] = resChan
service.kzgChan <- &kzgVerifier{dataColumns: dataColumns, resChan: resChan}
service.kzgChan <- &kzgVerifier{sizeHint: 1, cellProofs: blocks.RODataColumnsToCellProofBundles(dataColumns), resChan: resChan}
}
for i := range numRequests {
select {
case err := <-resChans[i]:
require.NoError(t, err)
case errWithSegment := <-resChans[i]:
require.NoError(t, errWithSegment.err)
case <-time.After(time.Second):
t.Fatalf("timeout waiting for verification result %d", i)
}
@@ -158,14 +158,14 @@ func TestVerifyKzgBatch(t *testing.T) {
t.Run("all valid data columns succeed", func(t *testing.T) {
dataColumns := createValidTestDataColumns(t, 3)
resChan := make(chan error, 1)
kzgVerifiers := []*kzgVerifier{{dataColumns: dataColumns, resChan: resChan}}
resChan := make(chan errorWithSegment, 1)
kzgVerifiers := []*kzgVerifier{{sizeHint: 3, cellProofs: blocks.RODataColumnsToCellProofBundles(dataColumns), resChan: resChan}}
verifyKzgBatch(kzgVerifiers)
select {
case err := <-resChan:
require.NoError(t, err)
case errWithSegment := <-resChan:
require.NoError(t, errWithSegment.err)
case <-time.After(time.Second):
t.Fatal("timeout waiting for batch verification")
}
@@ -176,14 +176,14 @@ func TestVerifyKzgBatch(t *testing.T) {
invalidColumns := createInvalidTestDataColumns(t, 1)
allColumns := append(validColumns, invalidColumns...)
resChan := make(chan error, 1)
kzgVerifiers := []*kzgVerifier{{dataColumns: allColumns, resChan: resChan}}
resChan := make(chan errorWithSegment, 1)
kzgVerifiers := []*kzgVerifier{{sizeHint: 2, cellProofs: blocks.RODataColumnsToCellProofBundles(allColumns), resChan: resChan}}
verifyKzgBatch(kzgVerifiers)
select {
case err := <-resChan:
assert.NotNil(t, err)
case errWithSegment := <-resChan:
assert.NotNil(t, errWithSegment.err)
case <-time.After(time.Second):
t.Fatal("timeout waiting for batch verification")
}

View File

@@ -256,6 +256,16 @@ var (
Help: "Count the number of data column sidecars obtained via the execution layer.",
},
)
usefulFullColumnsReceivedTotal = promauto.NewCounterVec(prometheus.CounterOpts{
Name: "beacon_useful_full_columns_received_total",
Help: "Number of useful full columns (any cell being useful) received",
}, []string{"column_index"})
partialMessageColumnCompletionsTotal = promauto.NewCounterVec(prometheus.CounterOpts{
Name: "beacon_partial_message_column_completions_total",
Help: "How often the partial message first completed the column",
}, []string{"column_index"})
)
func (s *Service) updateMetrics() {

View File

@@ -12,7 +12,6 @@ import (
lightClient "github.com/OffchainLabs/prysm/v7/beacon-chain/light-client"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/attestations"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/blstoexec"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/execproofs"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/slashings"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/synccommittee"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/voluntaryexits"
@@ -89,13 +88,6 @@ func WithBlsToExecPool(blsToExecPool blstoexec.PoolManager) Option {
}
}
func WithExecProofPool(execProofPool execproofs.PoolManager) Option {
return func(s *Service) error {
s.cfg.execProofPool = execProofPool
return nil
}
}
func WithChainService(chain blockchainService) Option {
return func(s *Service) error {
s.cfg.chain = chain

View File

@@ -3,9 +3,9 @@ package sync
import (
"bytes"
"context"
"encoding/hex"
"fmt"
"slices"
"time"
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
@@ -21,23 +21,13 @@ import (
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/OffchainLabs/prysm/v7/time"
"github.com/OffchainLabs/prysm/v7/time/slots"
pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
const pendingAttsLimit = 32768
// aggregatorIndexFilter defines how aggregator index should be handled in equality checks.
type aggregatorIndexFilter int
const (
// ignoreAggregatorIndex means aggregates differing only by aggregator index are considered equal.
ignoreAggregatorIndex aggregatorIndexFilter = iota
// includeAggregatorIndex means aggregator index must also match for aggregates to be considered equal.
includeAggregatorIndex
)
var pendingAttsLimit = 32768
// This method processes pending attestations as a "known" block as arrived. With validations,
// the valid attestations get saved into the operation mem pool, and the invalid attestations gets deleted
@@ -60,7 +50,16 @@ func (s *Service) processPendingAttsForBlock(ctx context.Context, bRoot [32]byte
attestations := s.blkRootToPendingAtts[bRoot]
s.pendingAttsLock.RUnlock()
s.processAttestations(ctx, attestations)
if len(attestations) > 0 {
start := time.Now()
s.processAttestations(ctx, attestations)
duration := time.Since(start)
log.WithFields(logrus.Fields{
"blockRoot": hex.EncodeToString(bytesutil.Trunc(bRoot[:])),
"pendingAttsCount": len(attestations),
"duration": duration,
}).Debug("Verified and saved pending attestations to pool")
}
randGen := rand.NewGenerator()
// Delete the missing block root key from pending attestation queue so a node will not request for the block again.
@@ -80,71 +79,26 @@ func (s *Service) processPendingAttsForBlock(ctx context.Context, bRoot [32]byte
return s.sendBatchRootRequest(ctx, pendingRoots, randGen)
}
// processAttestations processes a list of attestations.
// It assumes (for logging purposes only) that all attestations pertain to the same block.
func (s *Service) processAttestations(ctx context.Context, attestations []any) {
if len(attestations) == 0 {
return
}
firstAttestation := attestations[0]
var blockRoot []byte
switch v := firstAttestation.(type) {
case ethpb.Att:
blockRoot = v.GetData().BeaconBlockRoot
case ethpb.SignedAggregateAttAndProof:
blockRoot = v.AggregateAttestationAndProof().AggregateVal().GetData().BeaconBlockRoot
default:
log.Warnf("Unexpected attestation type %T, skipping processing", v)
return
}
validAggregates := make([]ethpb.SignedAggregateAttAndProof, 0, len(attestations))
startAggregate := time.Now()
atts := make([]ethpb.Att, 0, len(attestations))
aggregateAttAndProofCount := 0
for _, att := range attestations {
switch v := att.(type) {
case ethpb.Att:
atts = append(atts, v)
case ethpb.SignedAggregateAttAndProof:
aggregateAttAndProofCount++
// Avoid processing multiple aggregates only differing by aggregator index.
if slices.ContainsFunc(validAggregates, func(other ethpb.SignedAggregateAttAndProof) bool {
return pendingAggregatesAreEqual(v, other, ignoreAggregatorIndex)
}) {
continue
}
if err := s.processAggregate(ctx, v); err != nil {
log.WithError(err).Debug("Pending aggregate attestation could not be processed")
continue
}
validAggregates = append(validAggregates, v)
s.processAggregate(ctx, v)
default:
log.Warnf("Unexpected attestation type %T, skipping", v)
}
}
durationAggregateAttAndProof := time.Since(startAggregate)
startAtts := time.Now()
for _, bucket := range bucketAttestationsByData(atts) {
s.processAttestationBucket(ctx, bucket)
}
durationAtts := time.Since(startAtts)
log.WithFields(logrus.Fields{
"blockRoot": fmt.Sprintf("%#x", blockRoot),
"totalCount": len(attestations),
"aggregateAttAndProofCount": aggregateAttAndProofCount,
"uniqueAggregateAttAndProofCount": len(validAggregates),
"attCount": len(atts),
"durationTotal": durationAggregateAttAndProof + durationAtts,
"durationAggregateAttAndProof": durationAggregateAttAndProof,
"durationAtts": durationAtts,
}).Debug("Verified and saved pending attestations to pool")
}
// attestationBucket groups attestations with the same AttestationData for batch processing.
@@ -349,20 +303,21 @@ func (s *Service) processVerifiedAttestation(
})
}
func (s *Service) processAggregate(ctx context.Context, aggregate ethpb.SignedAggregateAttAndProof) error {
func (s *Service) processAggregate(ctx context.Context, aggregate ethpb.SignedAggregateAttAndProof) {
res, err := s.validateAggregatedAtt(ctx, aggregate)
if err != nil {
log.WithError(err).Debug("Pending aggregated attestation failed validation")
return errors.Wrap(err, "validate aggregated att")
return
}
if res != pubsub.ValidationAccept || !s.validateBlockInAttestation(ctx, aggregate) {
return errors.New("Pending aggregated attestation failed validation")
log.Debug("Pending aggregated attestation failed validation")
return
}
att := aggregate.AggregateAttestationAndProof().AggregateVal()
if err := s.saveAttestation(att); err != nil {
return errors.Wrap(err, "save attestation")
log.WithError(err).Debug("Could not save aggregated attestation")
return
}
_ = s.setAggregatorIndexEpochSeen(att.GetData().Target.Epoch, aggregate.AggregateAttestationAndProof().GetAggregatorIndex())
@@ -370,8 +325,6 @@ func (s *Service) processAggregate(ctx context.Context, aggregate ethpb.SignedAg
if err := s.cfg.p2p.Broadcast(ctx, aggregate); err != nil {
log.WithError(err).Debug("Could not broadcast aggregated attestation")
}
return nil
}
// This defines how pending aggregates are saved in the map. The key is the
@@ -383,7 +336,7 @@ func (s *Service) savePendingAggregate(agg ethpb.SignedAggregateAttAndProof) {
s.savePending(root, agg, func(other any) bool {
a, ok := other.(ethpb.SignedAggregateAttAndProof)
return ok && pendingAggregatesAreEqual(agg, a, includeAggregatorIndex)
return ok && pendingAggregatesAreEqual(agg, a)
})
}
@@ -438,19 +391,13 @@ func (s *Service) savePending(root [32]byte, pending any, isEqual func(other any
s.blkRootToPendingAtts[root] = append(s.blkRootToPendingAtts[root], pending)
}
// pendingAggregatesAreEqual checks if two pending aggregate attestations are equal.
// The filter parameter controls whether aggregator index is considered in the equality check.
func pendingAggregatesAreEqual(a, b ethpb.SignedAggregateAttAndProof, filter aggregatorIndexFilter) bool {
func pendingAggregatesAreEqual(a, b ethpb.SignedAggregateAttAndProof) bool {
if a.Version() != b.Version() {
return false
}
if filter == includeAggregatorIndex {
if a.AggregateAttestationAndProof().GetAggregatorIndex() != b.AggregateAttestationAndProof().GetAggregatorIndex() {
return false
}
if a.AggregateAttestationAndProof().GetAggregatorIndex() != b.AggregateAttestationAndProof().GetAggregatorIndex() {
return false
}
aAtt := a.AggregateAttestationAndProof().AggregateVal()
bAtt := b.AggregateAttestationAndProof().AggregateVal()
if aAtt.GetData().Slot != bAtt.GetData().Slot {

View File

@@ -94,7 +94,7 @@ func TestProcessPendingAtts_NoBlockRequestBlock(t *testing.T) {
// Process block A (which exists and has no pending attestations)
// This should skip processing attestations for A and request blocks B and C
require.NoError(t, r.processPendingAttsForBlock(t.Context(), rootA))
require.LogsContain(t, hook, "Requesting blocks by root")
require.LogsContain(t, hook, "Requesting block by root")
}
func TestProcessPendingAtts_HasBlockSaveUnaggregatedAtt(t *testing.T) {
@@ -911,17 +911,17 @@ func Test_pendingAggregatesAreEqual(t *testing.T) {
},
AggregationBits: bitfield.Bitlist{0b1111},
}}}
assert.Equal(t, true, pendingAggregatesAreEqual(a, b, includeAggregatorIndex))
assert.Equal(t, true, pendingAggregatesAreEqual(a, b))
})
t.Run("different version", func(t *testing.T) {
a := &ethpb.SignedAggregateAttestationAndProof{Message: &ethpb.AggregateAttestationAndProof{AggregatorIndex: 1}}
b := &ethpb.SignedAggregateAttestationAndProofElectra{Message: &ethpb.AggregateAttestationAndProofElectra{AggregatorIndex: 1}}
assert.Equal(t, false, pendingAggregatesAreEqual(a, b, includeAggregatorIndex))
assert.Equal(t, false, pendingAggregatesAreEqual(a, b))
})
t.Run("different aggregator index", func(t *testing.T) {
a := &ethpb.SignedAggregateAttestationAndProof{Message: &ethpb.AggregateAttestationAndProof{AggregatorIndex: 1}}
b := &ethpb.SignedAggregateAttestationAndProof{Message: &ethpb.AggregateAttestationAndProof{AggregatorIndex: 2}}
assert.Equal(t, false, pendingAggregatesAreEqual(a, b, includeAggregatorIndex))
assert.Equal(t, false, pendingAggregatesAreEqual(a, b))
})
t.Run("different slot", func(t *testing.T) {
a := &ethpb.SignedAggregateAttestationAndProof{
@@ -942,7 +942,7 @@ func Test_pendingAggregatesAreEqual(t *testing.T) {
},
AggregationBits: bitfield.Bitlist{0b1111},
}}}
assert.Equal(t, false, pendingAggregatesAreEqual(a, b, includeAggregatorIndex))
assert.Equal(t, false, pendingAggregatesAreEqual(a, b))
})
t.Run("different committee index", func(t *testing.T) {
a := &ethpb.SignedAggregateAttestationAndProof{
@@ -963,7 +963,7 @@ func Test_pendingAggregatesAreEqual(t *testing.T) {
},
AggregationBits: bitfield.Bitlist{0b1111},
}}}
assert.Equal(t, false, pendingAggregatesAreEqual(a, b, includeAggregatorIndex))
assert.Equal(t, false, pendingAggregatesAreEqual(a, b))
})
t.Run("different aggregation bits", func(t *testing.T) {
a := &ethpb.SignedAggregateAttestationAndProof{
@@ -984,30 +984,7 @@ func Test_pendingAggregatesAreEqual(t *testing.T) {
},
AggregationBits: bitfield.Bitlist{0b1000},
}}}
assert.Equal(t, false, pendingAggregatesAreEqual(a, b, includeAggregatorIndex))
})
t.Run("different aggregator index should be equal while ignoring aggregator index", func(t *testing.T) {
a := &ethpb.SignedAggregateAttestationAndProof{
Message: &ethpb.AggregateAttestationAndProof{
AggregatorIndex: 1,
Aggregate: &ethpb.Attestation{
Data: &ethpb.AttestationData{
Slot: 1,
CommitteeIndex: 1,
},
AggregationBits: bitfield.Bitlist{0b1111},
}}}
b := &ethpb.SignedAggregateAttestationAndProof{
Message: &ethpb.AggregateAttestationAndProof{
AggregatorIndex: 2,
Aggregate: &ethpb.Attestation{
Data: &ethpb.AttestationData{
Slot: 1,
CommitteeIndex: 1,
},
AggregationBits: bitfield.Bitlist{0b1111},
}}}
assert.Equal(t, true, pendingAggregatesAreEqual(a, b, ignoreAggregatorIndex))
assert.Equal(t, false, pendingAggregatesAreEqual(a, b))
})
}

View File

@@ -2,6 +2,7 @@ package sync
import (
"context"
"encoding/hex"
"fmt"
"slices"
"sync"
@@ -43,13 +44,11 @@ func (s *Service) processPendingBlocksQueue() {
if !s.chainIsStarted() {
return
}
locker.Lock()
defer locker.Unlock()
if err := s.processPendingBlocks(s.ctx); err != nil {
log.WithError(err).Debug("Could not process pending blocks")
}
locker.Unlock()
})
}
@@ -74,10 +73,8 @@ func (s *Service) processPendingBlocks(ctx context.Context) error {
randGen := rand.NewGenerator()
var parentRoots [][32]byte
blkRoots := make([][32]byte, 0, len(sortedSlots)*maxBlocksPerSlot)
// Iterate through sorted slots.
for i, slot := range sortedSlots {
for _, slot := range sortedSlots {
// Skip processing if slot is in the future.
if slot > s.cfg.clock.CurrentSlot() {
continue
@@ -94,9 +91,6 @@ func (s *Service) processPendingBlocks(ctx context.Context) error {
// Process each block in the queue.
for _, b := range blocksInCache {
start := time.Now()
totalDuration := time.Duration(0)
if err := blocks.BeaconBlockIsNil(b); err != nil {
continue
}
@@ -153,34 +147,19 @@ func (s *Service) processPendingBlocks(ctx context.Context) error {
}
cancelFunction()
blkRoots = append(blkRoots, blkRoot)
// Process pending attestations for this block.
if err := s.processPendingAttsForBlock(ctx, blkRoot); err != nil {
log.WithError(err).Debug("Failed to process pending attestations for block")
}
// Remove the processed block from the queue.
if err := s.removeBlockFromQueue(b, blkRoot); err != nil {
return err
}
duration := time.Since(start)
totalDuration += duration
log.WithFields(logrus.Fields{
"slotIndex": fmt.Sprintf("%d/%d", i+1, len(sortedSlots)),
"slot": slot,
"root": fmt.Sprintf("%#x", blkRoot),
"duration": duration,
"totalDuration": totalDuration,
}).Debug("Processed pending block and cleared it in cache")
log.WithFields(logrus.Fields{"slot": slot, "blockRoot": hex.EncodeToString(bytesutil.Trunc(blkRoot[:]))}).Debug("Processed pending block and cleared it in cache")
}
span.End()
}
for _, blkRoot := range blkRoots {
// Process pending attestations for this block.
if err := s.processPendingAttsForBlock(ctx, blkRoot); err != nil {
log.WithError(err).Debug("Failed to process pending attestations for block")
}
}
return s.sendBatchRootRequest(ctx, parentRoots, randGen)
}
@@ -259,10 +238,6 @@ func (s *Service) processBlock(ctx context.Context, b interfaces.ReadOnlySignedB
return errors.Wrap(err, "request and save missing data column sidecars")
}
if err := s.requestAndSaveMissingExecutionProofs([]blocks.ROBlock{roBlock}); err != nil {
return errors.Wrap(err, "request and save missing execution proofs")
}
return nil
}
@@ -404,19 +379,6 @@ func (s *Service) sendBatchRootRequest(ctx context.Context, roots [][32]byte, ra
req = roots[:maxReqBlock]
}
if logrus.GetLevel() >= logrus.DebugLevel {
rootsStr := make([]string, 0, len(roots))
for _, req := range roots {
rootsStr = append(rootsStr, fmt.Sprintf("%#x", req))
}
log.WithFields(logrus.Fields{
"peer": pid,
"count": len(req),
"roots": rootsStr,
}).Debug("Requesting blocks by root")
}
// Send the request to the peer.
if err := s.sendBeaconBlocksRequest(ctx, &req, pid); err != nil {
tracing.AnnotateError(span, err)
@@ -476,6 +438,8 @@ func (s *Service) filterOutPendingAndSynced(roots [][fieldparams.RootLength]byte
roots = append(roots[:i], roots[i+1:]...)
continue
}
log.WithField("blockRoot", fmt.Sprintf("%#x", r)).Debug("Requesting block by root")
}
return roots
}

View File

@@ -100,10 +100,6 @@ func newRateLimiter(p2pProvider p2p.P2P) *limiter {
topicMap[addEncoding(p2p.RPCDataColumnSidecarsByRootTopicV1)] = dataColumnSidecars
// DataColumnSidecarsByRangeV1
topicMap[addEncoding(p2p.RPCDataColumnSidecarsByRangeTopicV1)] = dataColumnSidecars
executionProofs := leakybucket.NewCollector(1, defaultBurstLimit, leakyBucketPeriod, false /* deleteEmptyBuckets */);
// ExecutionProofsByRootV1
topicMap[addEncoding(p2p.RPCExecutionProofsByRootTopicV1)] = executionProofs
// General topic for all rpc requests.
topicMap[rpcLimiterTopic] = leakybucket.NewCollector(5, defaultBurstLimit*2, leakyBucketPeriod, false /* deleteEmptyBuckets */)

View File

@@ -17,7 +17,7 @@ import (
func TestNewRateLimiter(t *testing.T) {
rlimiter := newRateLimiter(mockp2p.NewTestP2P(t))
assert.Equal(t, len(rlimiter.limiterMap), 21, "correct number of topics not registered")
assert.Equal(t, len(rlimiter.limiterMap), 20, "correct number of topics not registered")
}
func TestNewRateLimiter_FreeCorrectly(t *testing.T) {

View File

@@ -51,7 +51,6 @@ func (s *Service) rpcHandlerByTopicFromFork(forkIndex int) (map[string]rpcHandle
p2p.RPCBlobSidecarsByRangeTopicV1: s.blobSidecarsByRangeRPCHandler, // Modified in Fulu
p2p.RPCDataColumnSidecarsByRootTopicV1: s.dataColumnSidecarByRootRPCHandler, // Added in Fulu
p2p.RPCDataColumnSidecarsByRangeTopicV1: s.dataColumnSidecarsByRangeRPCHandler, // Added in Fulu
p2p.RPCExecutionProofsByRootTopicV1: s.executionProofsByRootRPCHandler, // Added in Fulu
}, nil
}

View File

@@ -11,13 +11,11 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/types"
"github.com/OffchainLabs/prysm/v7/beacon-chain/sync/verify"
"github.com/OffchainLabs/prysm/v7/beacon-chain/verification"
"github.com/OffchainLabs/prysm/v7/config/features"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/OffchainLabs/prysm/v7/time/slots"
libp2pcore "github.com/libp2p/go-libp2p/core"
@@ -89,77 +87,9 @@ func (s *Service) sendBeaconBlocksRequest(ctx context.Context, requests *types.B
return errors.Wrap(err, "request and save missing data columns")
}
if err := s.requestAndSaveMissingExecutionProofs(postFuluBlocks); err != nil {
return errors.Wrap(err, "request and save missing execution proofs")
}
return err
}
func (s *Service) requestAndSaveMissingExecutionProofs(blks []blocks.ROBlock) error {
if len(blks) == 0 {
return nil
}
// TODO: Parallelize requests for multiple blocks.
for _, blk := range blks {
if err := s.sendAndSaveExecutionProofs(s.ctx, blk); err != nil {
return err
}
}
return nil
}
func (s *Service) sendAndSaveExecutionProofs(ctx context.Context, block blocks.ROBlock) error {
if !features.Get().EnableZkvm {
return nil
}
// Check proof retention period.
blockEpoch := slots.ToEpoch(block.Block().Slot())
currentEpoch := slots.ToEpoch(s.cfg.clock.CurrentSlot())
if !params.WithinExecutionProofPeriod(blockEpoch, currentEpoch) {
return nil
}
// Check how many proofs are needed with Execution Proof Pool.
storedIds := s.cfg.execProofPool.Ids(block.Root())
count := uint64(len(storedIds))
if count >= params.BeaconConfig().MinProofsRequired {
return nil
}
// Construct request
blockRoot := block.Root()
req := &ethpb.ExecutionProofsByRootRequest{
BlockRoot: blockRoot[:],
CountNeeded: params.BeaconConfig().MinProofsRequired - count,
AlreadyHave: storedIds,
}
// Call SendExecutionProofByRootRequest
zkvmEnabledPeers := s.cfg.p2p.Peers().ZkvmEnabledPeers()
if len(zkvmEnabledPeers) == 0 {
return fmt.Errorf("no zkVM enabled peers available to request execution proofs")
}
// TODO: For simplicity, just pick the first peer for now.
// In the future, we can implement better peer selection logic.
pid := zkvmEnabledPeers[0]
proofs, err := SendExecutionProofsByRootRequest(ctx, s.cfg.clock, s.cfg.p2p, pid, req)
if err != nil {
return fmt.Errorf("send execution proofs by root request: %w", err)
}
// Insert ExecProofPool
// TODO: Implement multiple proof insertion in ExecProofPool to avoid multiple locks.
for _, proof := range proofs {
s.cfg.execProofPool.Insert(proof)
}
return nil
}
// requestAndSaveMissingDataColumns checks if the data columns are missing for the given block.
// If so, requests them and saves them to the storage.
func (s *Service) requestAndSaveMissingDataColumnSidecars(blks []blocks.ROBlock) error {

View File

@@ -182,21 +182,3 @@ func WriteDataColumnSidecarChunk(stream libp2pcore.Stream, tor blockchain.Tempor
return nil
}
func WriteExecutionProofChunk(stream libp2pcore.Stream, encoding encoder.NetworkEncoding, proof *ethpb.ExecutionProof) error {
// Success response code.
if _, err := stream.Write([]byte{responseCodeSuccess}); err != nil {
return errors.Wrap(err, "stream write")
}
ctxBytes := params.ForkDigest(slots.ToEpoch(proof.Slot))
if err := writeContextToStream(ctxBytes[:], stream); err != nil {
return errors.Wrap(err, "write context to stream")
}
// Execution proof.
if _, err := encoding.EncodeWithMaxLength(stream, proof); err != nil {
return errors.Wrap(err, "encode with max length")
}
return nil
}

View File

@@ -1,219 +0,0 @@
package sync
import (
"context"
"errors"
"fmt"
"io"
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/time/slots"
libp2pcore "github.com/libp2p/go-libp2p/core"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/sirupsen/logrus"
)
// SendExecutionProofsByRootRequest sends ExecutionProofsByRoot request and returns fetched execution proofs, if any.
func SendExecutionProofsByRootRequest(
ctx context.Context,
clock blockchain.TemporalOracle,
p2pProvider p2p.P2P,
pid peer.ID,
req *ethpb.ExecutionProofsByRootRequest,
) ([]*ethpb.ExecutionProof, error) {
// Validate request
if req.CountNeeded == 0 {
return nil, errors.New("count_needed must be greater than 0")
}
topic, err := p2p.TopicFromMessage(p2p.ExecutionProofsByRootName, slots.ToEpoch(clock.CurrentSlot()))
if err != nil {
return nil, err
}
log.WithFields(logrus.Fields{
"topic": topic,
"block_root": bytesutil.ToBytes32(req.BlockRoot),
"count": req.CountNeeded,
"already": len(req.AlreadyHave),
}).Debug("Sending execution proofs by root request")
stream, err := p2pProvider.Send(ctx, req, topic, pid)
if err != nil {
return nil, err
}
defer closeStream(stream, log)
// Read execution proofs from stream
proofs := make([]*ethpb.ExecutionProof, 0, req.CountNeeded)
alreadyHaveSet := make(map[primitives.ExecutionProofId]struct{})
for _, id := range req.AlreadyHave {
alreadyHaveSet[id] = struct{}{}
}
for i := uint64(0); i < req.CountNeeded; i++ {
isFirstChunk := i == 0
proof, err := ReadChunkedExecutionProof(stream, p2pProvider, isFirstChunk)
if errors.Is(err, io.EOF) {
break
}
if err != nil {
return nil, err
}
// Validate proof
if err := validateExecutionProof(proof, req, alreadyHaveSet); err != nil {
return nil, err
}
proofs = append(proofs, proof)
}
return proofs, nil
}
// ReadChunkedExecutionProof reads a chunked execution proof from the stream.
func ReadChunkedExecutionProof(
stream libp2pcore.Stream,
encoding p2p.EncodingProvider,
isFirstChunk bool,
) (*ethpb.ExecutionProof, error) {
// Read status code for each chunk (like data columns, not like blocks)
code, errMsg, err := ReadStatusCode(stream, encoding.Encoding())
if err != nil {
return nil, err
}
if code != 0 {
return nil, errors.New(errMsg)
}
// Read context bytes (fork digest)
_, err = readContextFromStream(stream)
if err != nil {
return nil, fmt.Errorf("read context from stream: %w", err)
}
// Decode the proof
proof := &ethpb.ExecutionProof{}
if err := encoding.Encoding().DecodeWithMaxLength(stream, proof); err != nil {
return nil, err
}
return proof, nil
}
// validateExecutionProof validates a received execution proof against the request.
func validateExecutionProof(
proof *ethpb.ExecutionProof,
req *ethpb.ExecutionProofsByRootRequest,
alreadyHaveSet map[primitives.ExecutionProofId]struct{},
) error {
// Check block root matches
proofRoot := bytesutil.ToBytes32(proof.BlockRoot)
reqRoot := bytesutil.ToBytes32(req.BlockRoot)
if proofRoot != reqRoot {
return fmt.Errorf("proof block root %#x does not match requested root %#x",
proofRoot, reqRoot)
}
// Check we didn't already have this proof
if _, ok := alreadyHaveSet[proof.ProofId]; ok {
return fmt.Errorf("received proof we already have: proof_id=%d", proof.ProofId)
}
// Check proof ID is valid (within max range)
if !proof.ProofId.IsValid() {
return fmt.Errorf("invalid proof_id: %d", proof.ProofId)
}
return nil
}
// executionProofsByRootRPCHandler handles incoming ExecutionProofsByRoot RPC requests.
func (s *Service) executionProofsByRootRPCHandler(ctx context.Context, msg any, stream libp2pcore.Stream) error {
ctx, span := trace.StartSpan(ctx, "sync.executionProofsByRootRPCHandler")
defer span.End()
_, cancel := context.WithTimeout(ctx, ttfbTimeout)
defer cancel()
req, ok := msg.(*ethpb.ExecutionProofsByRootRequest)
if !ok {
return errors.New("message is not type ExecutionProofsByRootRequest")
}
remotePeer := stream.Conn().RemotePeer()
SetRPCStreamDeadlines(stream)
// Validate request
if err := s.rateLimiter.validateRequest(stream, 1); err != nil {
return err
}
// Penalize peers that send invalid requests.
if err := validateExecutionProofsByRootRequest(req); err != nil {
s.downscorePeer(remotePeer, "executionProofsByRootRPCHandlerValidationError")
s.writeErrorResponseToStream(responseCodeInvalidRequest, err.Error(), stream)
return fmt.Errorf("validate execution proofs by root request: %w", err)
}
blockRoot := bytesutil.ToBytes32(req.BlockRoot)
log := log.WithFields(logrus.Fields{
"blockroot": fmt.Sprintf("%#x", blockRoot),
"neededCount": req.CountNeeded,
"alreadyHave": req.AlreadyHave,
"peer": remotePeer.String(),
})
s.rateLimiter.add(stream, 1)
defer closeStream(stream, log)
// Get proofs from execution proof pool
storedProofs := s.cfg.execProofPool.Get(blockRoot)
// Filter out not requested proofs
alreadyHave := make(map[primitives.ExecutionProofId]bool)
for _, id := range req.AlreadyHave {
alreadyHave[id] = true
}
// Send proofs
sentCount := uint64(0)
for _, proof := range storedProofs {
if sentCount >= req.CountNeeded {
break
}
// Skip proofs the requester already has
if alreadyHave[proof.ProofId] {
continue
}
// Write proof to stream
SetStreamWriteDeadline(stream, defaultWriteDuration)
if err := WriteExecutionProofChunk(stream, s.cfg.p2p.Encoding(), proof); err != nil {
log.WithError(err).Debug("Could not send execution proof")
s.writeErrorResponseToStream(responseCodeServerError, "could not send execution proof", stream)
return err
}
sentCount++
}
log.WithField("sentCount", sentCount).Debug("Responded to execution proofs by root request")
return nil
}
func validateExecutionProofsByRootRequest(req *ethpb.ExecutionProofsByRootRequest) error {
if req.CountNeeded == 0 {
return errors.New("count_needed must be greater than 0")
}
return nil
}

View File

@@ -1,727 +0,0 @@
package sync
import (
"io"
"sync"
"testing"
"time"
chainMock "github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/testing"
testDB "github.com/OffchainLabs/prysm/v7/beacon-chain/db/testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/execproofs"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/encoder"
p2ptest "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/startup"
"github.com/OffchainLabs/prysm/v7/config/features"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/assert"
"github.com/OffchainLabs/prysm/v7/testing/require"
"github.com/OffchainLabs/prysm/v7/testing/util"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/protocol"
"github.com/pkg/errors"
)
func TestExecutionProofsByRootRPCHandler(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.FuluForkEpoch = 0
params.OverrideBeaconConfig(cfg)
params.BeaconConfig().InitializeForkSchedule()
protocolID := protocol.ID(p2p.RPCExecutionProofsByRootTopicV1) + "/" + encoder.ProtocolSuffixSSZSnappy
t.Run("wrong message type", func(t *testing.T) {
service := &Service{}
err := service.executionProofsByRootRPCHandler(t.Context(), nil, nil)
require.ErrorContains(t, "message is not type ExecutionProofsByRootRequest", err)
})
t.Run("invalid request - count_needed is 0", func(t *testing.T) {
resetCfg := features.InitWithReset(&features.Flags{
EnableZkvm: true,
})
defer resetCfg()
localP2P := p2ptest.NewTestP2P(t)
service := &Service{cfg: &config{p2p: localP2P}, rateLimiter: newRateLimiter(localP2P)}
remoteP2P := p2ptest.NewTestP2P(t)
var wg sync.WaitGroup
wg.Add(1)
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
defer wg.Done()
code, errMsg, err := readStatusCodeNoDeadline(stream, localP2P.Encoding())
require.NoError(t, err)
require.Equal(t, responseCodeInvalidRequest, code)
require.Equal(t, "count_needed must be greater than 0", errMsg)
})
localP2P.Connect(remoteP2P)
stream, err := localP2P.BHost.NewStream(t.Context(), remoteP2P.BHost.ID(), protocolID)
require.NoError(t, err)
blockRoot := bytesutil.PadTo([]byte("blockroot"), 32)
req := &ethpb.ExecutionProofsByRootRequest{
BlockRoot: blockRoot,
CountNeeded: 0, // Invalid: must be > 0
AlreadyHave: []primitives.ExecutionProofId{},
}
require.Equal(t, true, localP2P.Peers().Scorers().BadResponsesScorer().Score(remoteP2P.PeerID()) >= 0)
err = service.executionProofsByRootRPCHandler(t.Context(), req, stream)
require.NotNil(t, err)
require.Equal(t, true, localP2P.Peers().Scorers().BadResponsesScorer().Score(remoteP2P.PeerID()) < 0)
if util.WaitTimeout(&wg, 1*time.Second) {
t.Fatal("Did not receive stream within 1 sec")
}
})
t.Run("zkVM disabled - returns empty", func(t *testing.T) {
resetCfg := features.InitWithReset(&features.Flags{
EnableZkvm: false, // Disabled
})
defer resetCfg()
localP2P := p2ptest.NewTestP2P(t)
execProofPool := execproofs.NewPool()
service := &Service{
cfg: &config{
p2p: localP2P,
execProofPool: execProofPool,
},
rateLimiter: newRateLimiter(localP2P),
}
remoteP2P := p2ptest.NewTestP2P(t)
var wg sync.WaitGroup
wg.Add(1)
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
defer wg.Done()
// Should receive no proofs (stream should end)
_, err := ReadChunkedExecutionProof(stream, localP2P, true)
require.ErrorIs(t, err, io.EOF)
})
localP2P.Connect(remoteP2P)
stream, err := localP2P.BHost.NewStream(t.Context(), remoteP2P.BHost.ID(), protocolID)
require.NoError(t, err)
blockRoot := bytesutil.PadTo([]byte("blockroot"), 32)
req := &ethpb.ExecutionProofsByRootRequest{
BlockRoot: blockRoot,
CountNeeded: 2,
AlreadyHave: []primitives.ExecutionProofId{},
}
err = service.executionProofsByRootRPCHandler(t.Context(), req, stream)
require.NoError(t, err)
if util.WaitTimeout(&wg, 1*time.Second) {
t.Fatal("Did not receive stream within 1 sec")
}
})
t.Run("no proofs available", func(t *testing.T) {
resetCfg := features.InitWithReset(&features.Flags{
EnableZkvm: true,
})
defer resetCfg()
localP2P := p2ptest.NewTestP2P(t)
execProofPool := execproofs.NewPool()
service := &Service{
cfg: &config{
p2p: localP2P,
execProofPool: execProofPool,
},
rateLimiter: newRateLimiter(localP2P),
}
remoteP2P := p2ptest.NewTestP2P(t)
var wg sync.WaitGroup
wg.Add(1)
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
defer wg.Done()
// Should receive no proofs (stream should end)
_, err := ReadChunkedExecutionProof(stream, localP2P, true)
require.ErrorIs(t, err, io.EOF)
})
localP2P.Connect(remoteP2P)
stream, err := localP2P.BHost.NewStream(t.Context(), remoteP2P.BHost.ID(), protocolID)
require.NoError(t, err)
blockRoot := bytesutil.PadTo([]byte("blockroot"), 32)
req := &ethpb.ExecutionProofsByRootRequest{
BlockRoot: blockRoot,
CountNeeded: 2,
AlreadyHave: []primitives.ExecutionProofId{},
}
err = service.executionProofsByRootRPCHandler(t.Context(), req, stream)
require.NoError(t, err)
if util.WaitTimeout(&wg, 1*time.Second) {
t.Fatal("Did not receive stream within 1 sec")
}
})
t.Run("nominal - returns requested proofs", func(t *testing.T) {
resetCfg := features.InitWithReset(&features.Flags{
EnableZkvm: true,
})
defer resetCfg()
localP2P := p2ptest.NewTestP2P(t)
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
// Create execution proof pool with some proofs
execProofPool := execproofs.NewPool()
blockRoot := [32]byte{0x01, 0x02, 0x03}
// Add 3 proofs for the same block
blockHash := bytesutil.PadTo([]byte("blockhash"), 32)
proof1 := &ethpb.ExecutionProof{
BlockRoot: blockRoot[:],
BlockHash: blockHash,
Slot: primitives.Slot(10),
ProofId: primitives.ExecutionProofId(1),
ProofData: []byte("proof1"),
}
proof2 := &ethpb.ExecutionProof{
BlockRoot: blockRoot[:],
BlockHash: blockHash,
Slot: primitives.Slot(10),
ProofId: primitives.ExecutionProofId(2),
ProofData: []byte("proof2"),
}
proof3 := &ethpb.ExecutionProof{
BlockRoot: blockRoot[:],
BlockHash: blockHash,
Slot: primitives.Slot(10),
ProofId: primitives.ExecutionProofId(3),
ProofData: []byte("proof3"),
}
execProofPool.Insert(proof1)
execProofPool.Insert(proof2)
execProofPool.Insert(proof3)
beaconDB := testDB.SetupDB(t)
service := &Service{
cfg: &config{
p2p: localP2P,
beaconDB: beaconDB,
clock: clock,
execProofPool: execProofPool,
chain: &chainMock.ChainService{},
},
rateLimiter: newRateLimiter(localP2P),
}
remoteP2P := p2ptest.NewTestP2P(t)
var wg sync.WaitGroup
wg.Add(1)
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
defer wg.Done()
proofs := make([]*ethpb.ExecutionProof, 0, 2)
for i := range 2 {
isFirstChunk := i == 0
proof, err := ReadChunkedExecutionProof(stream, remoteP2P, isFirstChunk)
if errors.Is(err, io.EOF) {
break
}
assert.NoError(t, err)
proofs = append(proofs, proof)
}
assert.Equal(t, 2, len(proofs))
// Should receive proof1 and proof2 (first 2 in pool)
assert.DeepEqual(t, blockRoot[:], proofs[0].BlockRoot)
assert.DeepEqual(t, blockRoot[:], proofs[1].BlockRoot)
assert.Equal(t, primitives.ExecutionProofId(1), proofs[0].ProofId)
assert.Equal(t, primitives.ExecutionProofId(2), proofs[1].ProofId)
})
localP2P.Connect(remoteP2P)
stream, err := localP2P.BHost.NewStream(t.Context(), remoteP2P.BHost.ID(), protocolID)
require.NoError(t, err)
req := &ethpb.ExecutionProofsByRootRequest{
BlockRoot: blockRoot[:],
CountNeeded: 2,
AlreadyHave: []primitives.ExecutionProofId{},
}
err = service.executionProofsByRootRPCHandler(t.Context(), req, stream)
require.NoError(t, err)
require.Equal(t, true, localP2P.Peers().Scorers().BadResponsesScorer().Score(remoteP2P.PeerID()) >= 0)
if util.WaitTimeout(&wg, 1*time.Second) {
t.Fatal("Did not receive stream within 1 sec")
}
})
t.Run("filters already_have proofs", func(t *testing.T) {
resetCfg := features.InitWithReset(&features.Flags{
EnableZkvm: true,
})
defer resetCfg()
localP2P := p2ptest.NewTestP2P(t)
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
// Create execution proof pool with some proofs
execProofPool := execproofs.NewPool()
blockRoot := [32]byte{0x01, 0x02, 0x03}
// Add 4 proofs for the same block
blockHash := bytesutil.PadTo([]byte("blockhash"), 32)
proof1 := &ethpb.ExecutionProof{
BlockRoot: blockRoot[:],
BlockHash: blockHash,
Slot: primitives.Slot(10),
ProofId: primitives.ExecutionProofId(1),
ProofData: []byte("proof1"),
}
proof2 := &ethpb.ExecutionProof{
BlockRoot: blockRoot[:],
BlockHash: blockHash,
Slot: primitives.Slot(10),
ProofId: primitives.ExecutionProofId(2),
ProofData: []byte("proof2"),
}
proof3 := &ethpb.ExecutionProof{
BlockRoot: blockRoot[:],
BlockHash: blockHash,
Slot: primitives.Slot(10),
ProofId: primitives.ExecutionProofId(3),
ProofData: []byte("proof3"),
}
proof4 := &ethpb.ExecutionProof{
BlockRoot: blockRoot[:],
BlockHash: blockHash,
Slot: primitives.Slot(10),
ProofId: primitives.ExecutionProofId(4),
ProofData: []byte("proof4"),
}
execProofPool.Insert(proof1)
execProofPool.Insert(proof2)
execProofPool.Insert(proof3)
execProofPool.Insert(proof4)
beaconDB := testDB.SetupDB(t)
service := &Service{
cfg: &config{
p2p: localP2P,
beaconDB: beaconDB,
clock: clock,
execProofPool: execProofPool,
chain: &chainMock.ChainService{},
},
rateLimiter: newRateLimiter(localP2P),
}
remoteP2P := p2ptest.NewTestP2P(t)
var wg sync.WaitGroup
wg.Add(1)
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
defer wg.Done()
proofs := make([]*ethpb.ExecutionProof, 0, 2)
for i := range 3 {
isFirstChunk := i == 0
proof, err := ReadChunkedExecutionProof(stream, remoteP2P, isFirstChunk)
if errors.Is(err, io.EOF) {
break
}
assert.NoError(t, err)
proofs = append(proofs, proof)
}
// Should skip proof1 and proof2 (already_have), and return proof3 and proof4
assert.Equal(t, 2, len(proofs))
assert.Equal(t, primitives.ExecutionProofId(3), proofs[0].ProofId)
assert.Equal(t, primitives.ExecutionProofId(4), proofs[1].ProofId)
})
localP2P.Connect(remoteP2P)
stream, err := localP2P.BHost.NewStream(t.Context(), remoteP2P.BHost.ID(), protocolID)
require.NoError(t, err)
req := &ethpb.ExecutionProofsByRootRequest{
BlockRoot: blockRoot[:],
CountNeeded: 2,
AlreadyHave: []primitives.ExecutionProofId{1, 2}, // Already have proof1 and proof2
}
err = service.executionProofsByRootRPCHandler(t.Context(), req, stream)
require.NoError(t, err)
if util.WaitTimeout(&wg, 1*time.Second) {
t.Fatal("Did not receive stream within 1 sec")
}
})
t.Run("partial send - less proofs than requested", func(t *testing.T) {
resetCfg := features.InitWithReset(&features.Flags{
EnableZkvm: true,
})
defer resetCfg()
localP2P := p2ptest.NewTestP2P(t)
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
// Create execution proof pool with only 2 proofs
execProofPool := execproofs.NewPool()
blockRoot := [32]byte{0x01, 0x02, 0x03}
blockHash := bytesutil.PadTo([]byte("blockhash"), 32)
proof1 := &ethpb.ExecutionProof{
BlockRoot: blockRoot[:],
BlockHash: blockHash,
Slot: primitives.Slot(10),
ProofId: primitives.ExecutionProofId(1),
ProofData: []byte("proof1"),
}
proof2 := &ethpb.ExecutionProof{
BlockRoot: blockRoot[:],
BlockHash: blockHash,
Slot: primitives.Slot(10),
ProofId: primitives.ExecutionProofId(2),
ProofData: []byte("proof2"),
}
execProofPool.Insert(proof1)
execProofPool.Insert(proof2)
beaconDB := testDB.SetupDB(t)
service := &Service{
cfg: &config{
p2p: localP2P,
beaconDB: beaconDB,
clock: clock,
execProofPool: execProofPool,
chain: &chainMock.ChainService{},
},
rateLimiter: newRateLimiter(localP2P),
}
remoteP2P := p2ptest.NewTestP2P(t)
var wg sync.WaitGroup
wg.Add(1)
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
defer wg.Done()
proofs := make([]*ethpb.ExecutionProof, 0, 5)
for i := range 5 {
isFirstChunk := i == 0
proof, err := ReadChunkedExecutionProof(stream, remoteP2P, isFirstChunk)
if errors.Is(err, io.EOF) {
break
}
assert.NoError(t, err)
proofs = append(proofs, proof)
}
// Should only receive 2 proofs (not 5 as requested)
assert.Equal(t, 2, len(proofs))
assert.Equal(t, primitives.ExecutionProofId(1), proofs[0].ProofId)
assert.Equal(t, primitives.ExecutionProofId(2), proofs[1].ProofId)
})
localP2P.Connect(remoteP2P)
stream, err := localP2P.BHost.NewStream(t.Context(), remoteP2P.BHost.ID(), protocolID)
require.NoError(t, err)
req := &ethpb.ExecutionProofsByRootRequest{
BlockRoot: blockRoot[:],
CountNeeded: 5, // Request 5 but only 2 available
AlreadyHave: []primitives.ExecutionProofId{},
}
err = service.executionProofsByRootRPCHandler(t.Context(), req, stream)
require.NoError(t, err)
if util.WaitTimeout(&wg, 1*time.Second) {
t.Fatal("Did not receive stream within 1 sec")
}
})
}
func TestValidateExecutionProofsByRootRequest(t *testing.T) {
t.Run("invalid - count_needed is 0", func(t *testing.T) {
req := &ethpb.ExecutionProofsByRootRequest{
BlockRoot: bytesutil.PadTo([]byte("blockroot"), 32),
CountNeeded: 0,
AlreadyHave: []primitives.ExecutionProofId{},
}
err := validateExecutionProofsByRootRequest(req)
require.ErrorContains(t, "count_needed must be greater than 0", err)
})
t.Run("valid", func(t *testing.T) {
req := &ethpb.ExecutionProofsByRootRequest{
BlockRoot: bytesutil.PadTo([]byte("blockroot"), 32),
CountNeeded: 2,
AlreadyHave: []primitives.ExecutionProofId{},
}
err := validateExecutionProofsByRootRequest(req)
require.NoError(t, err)
})
}
func TestSendExecutionProofsByRootRequest(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.FuluForkEpoch = 0
params.OverrideBeaconConfig(cfg)
params.BeaconConfig().InitializeForkSchedule()
protocolID := protocol.ID(p2p.RPCExecutionProofsByRootTopicV1) + "/" + encoder.ProtocolSuffixSSZSnappy
t.Run("count_needed is 0 - returns error", func(t *testing.T) {
localP2P := p2ptest.NewTestP2P(t)
remoteP2P := p2ptest.NewTestP2P(t)
localP2P.Connect(remoteP2P)
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
blockRoot := bytesutil.PadTo([]byte("blockroot"), 32)
req := &ethpb.ExecutionProofsByRootRequest{
BlockRoot: blockRoot,
CountNeeded: 0,
AlreadyHave: []primitives.ExecutionProofId{},
}
proofs, err := SendExecutionProofsByRootRequest(t.Context(), clock, localP2P, remoteP2P.PeerID(), req)
require.ErrorContains(t, "count_needed must be greater than 0", err)
require.Equal(t, 0, len(proofs))
})
t.Run("success - receives requested proofs", func(t *testing.T) {
localP2P := p2ptest.NewTestP2P(t)
remoteP2P := p2ptest.NewTestP2P(t)
localP2P.Connect(remoteP2P)
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
blockRoot := [32]byte{0x01, 0x02, 0x03}
blockHash := bytesutil.PadTo([]byte("blockhash"), 32)
// Create proofs to send back
proof1 := &ethpb.ExecutionProof{
BlockRoot: blockRoot[:],
BlockHash: blockHash,
Slot: primitives.Slot(10),
ProofId: primitives.ExecutionProofId(1),
ProofData: []byte("proof1"),
}
proof2 := &ethpb.ExecutionProof{
BlockRoot: blockRoot[:],
BlockHash: blockHash,
Slot: primitives.Slot(10),
ProofId: primitives.ExecutionProofId(2),
ProofData: []byte("proof2"),
}
// Setup remote to send proofs
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
defer func() {
_ = stream.Close()
}()
// Read the request (we don't validate it in this test)
_ = &ethpb.ExecutionProofsByRootRequest{}
// Send proof1
require.NoError(t, WriteExecutionProofChunk(stream, remoteP2P.Encoding(), proof1))
// Send proof2
require.NoError(t, WriteExecutionProofChunk(stream, remoteP2P.Encoding(), proof2))
})
req := &ethpb.ExecutionProofsByRootRequest{
BlockRoot: blockRoot[:],
CountNeeded: 2,
AlreadyHave: []primitives.ExecutionProofId{},
}
proofs, err := SendExecutionProofsByRootRequest(t.Context(), clock, localP2P, remoteP2P.PeerID(), req)
require.NoError(t, err)
require.Equal(t, 2, len(proofs))
assert.Equal(t, primitives.ExecutionProofId(1), proofs[0].ProofId)
assert.Equal(t, primitives.ExecutionProofId(2), proofs[1].ProofId)
assert.DeepEqual(t, blockRoot[:], proofs[0].BlockRoot)
assert.DeepEqual(t, blockRoot[:], proofs[1].BlockRoot)
})
t.Run("partial response - EOF before count_needed", func(t *testing.T) {
localP2P := p2ptest.NewTestP2P(t)
remoteP2P := p2ptest.NewTestP2P(t)
localP2P.Connect(remoteP2P)
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
blockRoot := [32]byte{0x01, 0x02, 0x03}
blockHash := bytesutil.PadTo([]byte("blockhash"), 32)
proof1 := &ethpb.ExecutionProof{
BlockRoot: blockRoot[:],
BlockHash: blockHash,
Slot: primitives.Slot(10),
ProofId: primitives.ExecutionProofId(1),
ProofData: []byte("proof1"),
}
// Setup remote to send only 1 proof (but we request 5)
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
defer func() {
_ = stream.Close()
}()
// Send only proof1
require.NoError(t, WriteExecutionProofChunk(stream, remoteP2P.Encoding(), proof1))
})
req := &ethpb.ExecutionProofsByRootRequest{
BlockRoot: blockRoot[:],
CountNeeded: 5, // Request 5 but only get 1
AlreadyHave: []primitives.ExecutionProofId{},
}
proofs, err := SendExecutionProofsByRootRequest(t.Context(), clock, localP2P, remoteP2P.PeerID(), req)
require.NoError(t, err)
require.Equal(t, 1, len(proofs)) // Only received 1
assert.Equal(t, primitives.ExecutionProofId(1), proofs[0].ProofId)
})
t.Run("invalid block root - validation fails", func(t *testing.T) {
localP2P := p2ptest.NewTestP2P(t)
remoteP2P := p2ptest.NewTestP2P(t)
localP2P.Connect(remoteP2P)
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
requestedRoot := [32]byte{0x01, 0x02, 0x03}
wrongRoot := [32]byte{0xFF, 0xFF, 0xFF}
blockHash := bytesutil.PadTo([]byte("blockhash"), 32)
// Create proof with wrong block root
proof1 := &ethpb.ExecutionProof{
BlockRoot: wrongRoot[:], // Wrong root!
BlockHash: blockHash,
Slot: primitives.Slot(10),
ProofId: primitives.ExecutionProofId(1),
ProofData: []byte("proof1"),
}
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
defer func() {
_ = stream.Close()
}()
require.NoError(t, WriteExecutionProofChunk(stream, remoteP2P.Encoding(), proof1))
})
req := &ethpb.ExecutionProofsByRootRequest{
BlockRoot: requestedRoot[:],
CountNeeded: 1,
AlreadyHave: []primitives.ExecutionProofId{},
}
proofs, err := SendExecutionProofsByRootRequest(t.Context(), clock, localP2P, remoteP2P.PeerID(), req)
require.ErrorContains(t, "does not match requested root", err)
require.Equal(t, 0, len(proofs))
})
t.Run("already_have proof - validation fails", func(t *testing.T) {
localP2P := p2ptest.NewTestP2P(t)
remoteP2P := p2ptest.NewTestP2P(t)
localP2P.Connect(remoteP2P)
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
blockRoot := [32]byte{0x01, 0x02, 0x03}
blockHash := bytesutil.PadTo([]byte("blockhash"), 32)
proof1 := &ethpb.ExecutionProof{
BlockRoot: blockRoot[:],
BlockHash: blockHash,
Slot: primitives.Slot(10),
ProofId: primitives.ExecutionProofId(1),
ProofData: []byte("proof1"),
}
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
defer func() {
_ = stream.Close()
}()
require.NoError(t, WriteExecutionProofChunk(stream, remoteP2P.Encoding(), proof1))
})
req := &ethpb.ExecutionProofsByRootRequest{
BlockRoot: blockRoot[:],
CountNeeded: 1,
AlreadyHave: []primitives.ExecutionProofId{1}, // Already have proof_id 1
}
proofs, err := SendExecutionProofsByRootRequest(t.Context(), clock, localP2P, remoteP2P.PeerID(), req)
require.ErrorContains(t, "received proof we already have", err)
require.Equal(t, 0, len(proofs))
})
t.Run("invalid proof_id - validation fails", func(t *testing.T) {
localP2P := p2ptest.NewTestP2P(t)
remoteP2P := p2ptest.NewTestP2P(t)
localP2P.Connect(remoteP2P)
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
blockRoot := [32]byte{0x01, 0x02, 0x03}
blockHash := bytesutil.PadTo([]byte("blockhash"), 32)
proof1 := &ethpb.ExecutionProof{
BlockRoot: blockRoot[:],
BlockHash: blockHash,
Slot: primitives.Slot(10),
ProofId: primitives.ExecutionProofId(255), // Invalid proof_id (max valid is 7)
ProofData: []byte("proof1"),
}
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
defer func() {
_ = stream.Close()
}()
require.NoError(t, WriteExecutionProofChunk(stream, remoteP2P.Encoding(), proof1))
})
req := &ethpb.ExecutionProofsByRootRequest{
BlockRoot: blockRoot[:],
CountNeeded: 1,
AlreadyHave: []primitives.ExecutionProofId{},
}
proofs, err := SendExecutionProofsByRootRequest(t.Context(), clock, localP2P, remoteP2P.PeerID(), req)
require.ErrorContains(t, "invalid proof_id", err)
require.Equal(t, 0, len(proofs))
})
}

View File

@@ -23,7 +23,6 @@ import (
lightClient "github.com/OffchainLabs/prysm/v7/beacon-chain/light-client"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/attestations"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/blstoexec"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/execproofs"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/slashings"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/synccommittee"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/voluntaryexits"
@@ -68,7 +67,6 @@ const (
seenProposerSlashingSize = 100
badBlockSize = 1000
syncMetricsInterval = 10 * time.Second
seenExecutionProofSize = 100
)
var (
@@ -96,7 +94,6 @@ type config struct {
slashingPool slashings.PoolManager
syncCommsPool synccommittee.Pool
blsToExecPool blstoexec.PoolManager
execProofPool execproofs.PoolManager
chain blockchainService
initialSync Checker
blockNotifier blockfeed.Notifier
@@ -238,6 +235,7 @@ func NewService(ctx context.Context, opts ...Option) *Service {
r.subHandler = newSubTopicHandler()
r.rateLimiter = newRateLimiter(r.cfg.p2p)
r.initCaches()
return r
}

View File

@@ -5,6 +5,8 @@ import (
"fmt"
"reflect"
"runtime/debug"
"slices"
"strconv"
"strings"
"sync"
"time"
@@ -14,11 +16,13 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/partialdatacolumnbroadcaster"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/startup"
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
"github.com/OffchainLabs/prysm/v7/config/features"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/monitoring/tracing"
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
@@ -61,6 +65,15 @@ type subscribeParameters struct {
// getSubnetsRequiringPeers is a function that returns all subnets that require peers to be found
// but for which no subscriptions are needed.
getSubnetsRequiringPeers func(currentSlot primitives.Slot) map[uint64]bool
partial *partialSubscribeParameters
}
type partialSubscribeParameters struct {
broadcaster *partialdatacolumnbroadcaster.PartialColumnBroadcaster
validateHeader partialdatacolumnbroadcaster.HeaderValidator
validate partialdatacolumnbroadcaster.ColumnValidator
handle partialdatacolumnbroadcaster.SubHandler
}
// shortTopic is a less verbose version of topic strings used for logging.
@@ -320,6 +333,35 @@ func (s *Service) registerSubscribers(nse params.NetworkScheduleEntry) bool {
// New gossip topic in Fulu.
if params.BeaconConfig().FuluForkEpoch <= nse.Epoch {
s.spawn(func() {
var ps *partialSubscribeParameters
broadcaster := s.cfg.p2p.PartialColumnBroadcaster()
if broadcaster != nil {
ps = &partialSubscribeParameters{
broadcaster: broadcaster,
validateHeader: func(header *ethpb.PartialDataColumnHeader) (bool, error) {
return s.validatePartialDataColumnHeader(context.TODO(), header)
},
validate: func(cellsToVerify []blocks.CellProofBundle) error {
return s.validateKZGProofs(context.TODO(), len(cellsToVerify), slices.Values(cellsToVerify))
},
handle: func(topic string, col blocks.VerifiedRODataColumn) {
ctx, cancel := context.WithTimeout(s.ctx, pubsubMessageTimeout)
defer cancel()
slot := col.SignedBlockHeader.Header.Slot
proposerIndex := col.SignedBlockHeader.Header.ProposerIndex
if !s.hasSeenDataColumnIndex(slot, proposerIndex, col.Index) {
s.setSeenDataColumnIndex(slot, proposerIndex, col.Index)
// This column was completed from a partial message.
partialMessageColumnCompletionsTotal.WithLabelValues(strconv.FormatUint(col.Index, 10)).Inc()
}
err := s.verifiedRODataColumnSubscriber(ctx, col)
if err != nil {
log.WithError(err).Error("Failed to handle verified RO data column subscriber")
}
},
}
}
s.subscribeWithParameters(subscribeParameters{
topicFormat: p2p.DataColumnSubnetTopicFormat,
validate: s.validateDataColumn,
@@ -327,19 +369,9 @@ func (s *Service) registerSubscribers(nse params.NetworkScheduleEntry) bool {
nse: nse,
getSubnetsToJoin: s.dataColumnSubnetIndices,
getSubnetsRequiringPeers: s.allDataColumnSubnets,
partial: ps,
})
})
if features.Get().EnableZkvm {
s.spawn(func() {
s.subscribe(
p2p.ExecutionProofSubnetTopicFormat,
s.validateExecutionProof,
s.executionProofSubscriber,
nse,
)
})
}
}
return true
}
@@ -376,11 +408,10 @@ func (s *Service) subscribe(topic string, validator wrappedVal, handle subHandle
// Impossible condition as it would mean topic does not exist.
panic(fmt.Sprintf("%s is not mapped to any message in GossipTopicMappings", topic)) // lint:nopanic -- Impossible condition.
}
s.subscribeWithBase(s.addDigestToTopic(topic, nse.ForkDigest), validator, handle)
s.subscribeWithBase(s.addDigestToTopic(topic, nse.ForkDigest)+s.cfg.p2p.Encoding().ProtocolSuffix(), validator, handle)
}
func (s *Service) subscribeWithBase(topic string, validator wrappedVal, handle subHandler) *pubsub.Subscription {
topic += s.cfg.p2p.Encoding().ProtocolSuffix()
log := log.WithField("topic", topic)
// Do not resubscribe already seen subscriptions.
@@ -543,7 +574,11 @@ func (s *Service) wrapAndReportValidation(topic string, v wrappedVal) (string, p
func (s *Service) pruneNotWanted(t *subnetTracker, wantedSubnets map[uint64]bool) {
for _, subnet := range t.unwanted(wantedSubnets) {
t.cancelSubscription(subnet)
s.unSubscribeFromTopic(t.fullTopic(subnet, s.cfg.p2p.Encoding().ProtocolSuffix()))
topic := t.fullTopic(subnet, s.cfg.p2p.Encoding().ProtocolSuffix())
if t.partial != nil {
_ = t.partial.broadcaster.Unsubscribe(topic)
}
s.unSubscribeFromTopic(topic)
}
}
@@ -590,9 +625,34 @@ func (s *Service) trySubscribeSubnets(t *subnetTracker) {
subnetsToJoin := t.getSubnetsToJoin(s.cfg.clock.CurrentSlot())
s.pruneNotWanted(t, subnetsToJoin)
for _, subnet := range t.missing(subnetsToJoin) {
// TODO: subscribeWithBase appends the protocol suffix, other methods don't. Make this consistent.
topic := t.fullTopic(subnet, "")
t.track(subnet, s.subscribeWithBase(topic, t.validate, t.handle))
topicStr := t.fullTopic(subnet, s.cfg.p2p.Encoding().ProtocolSuffix())
topicOpts := make([]pubsub.TopicOpt, 0, 2)
requestPartial := t.partial != nil
if requestPartial {
// TODO: do we want the ability to support partial messages without requesting them?
topicOpts = append(topicOpts, pubsub.RequestPartialMessages())
}
topic, err := s.cfg.p2p.JoinTopic(topicStr, topicOpts...)
if err != nil {
log.WithError(err).Error("Failed to join topic")
return
}
if requestPartial {
log.Info("Subscribing to partial columns on", topicStr)
err = t.partial.broadcaster.Subscribe(topic, t.partial.validateHeader, t.partial.validate, t.partial.handle)
if err != nil {
log.WithError(err).Error("Failed to subscribe to partial column")
}
}
// We still need to subscribe to the full columns as well as partial in
// case our peers don't support partial messages.
t.track(subnet, s.subscribeWithBase(topicStr, t.validate, t.handle))
}
}

View File

@@ -11,7 +11,7 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition/interop"
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
"github.com/OffchainLabs/prysm/v7/config/features"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
@@ -23,7 +23,6 @@ import (
"github.com/OffchainLabs/prysm/v7/time/slots"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
"google.golang.org/protobuf/proto"
)
@@ -71,55 +70,12 @@ func (s *Service) beaconBlockSubscriber(ctx context.Context, msg proto.Message)
}
return err
}
// We use the service context to ensure this context is not cancelled
// when the current function returns.
// TODO: Do not broadcast proofs for blocks we have already seen.
go s.generateAndBroadcastExecutionProofs(s.ctx, roBlock)
if err := s.processPendingAttsForBlock(ctx, root); err != nil {
return errors.Wrap(err, "process pending atts for block")
}
return nil
}
func (s *Service) generateAndBroadcastExecutionProofs(ctx context.Context, roBlock blocks.ROBlock) {
const delay = 2 * time.Second
proofTypes := flags.Get().ProofGenerationTypes
if len(proofTypes) == 0 {
return
}
var wg errgroup.Group
for _, proofType := range proofTypes {
wg.Go(func() error {
execProof, err := generateExecProof(roBlock, primitives.ExecutionProofId(proofType), delay)
if err != nil {
return fmt.Errorf("generate exec proof: %w", err)
}
if err := s.cfg.p2p.Broadcast(ctx, execProof); err != nil {
return fmt.Errorf("broadcast exec proof: %w", err)
}
return nil
})
}
if err := wg.Wait(); err != nil {
log.WithError(err).Error("Failed to generate and broadcast execution proofs")
}
log.WithFields(logrus.Fields{
"root": fmt.Sprintf("%#x", roBlock.Root()),
"slot": roBlock.Block().Slot(),
"count": len(proofTypes),
}).Debug("Generated and broadcasted execution proofs")
}
// processSidecarsFromExecutionFromBlock retrieves (if available) sidecars data from the execution client,
// builds corresponding sidecars, save them to the storage, and broadcasts them over P2P if necessary.
func (s *Service) processSidecarsFromExecutionFromBlock(ctx context.Context, roBlock blocks.ROBlock) {
@@ -234,6 +190,10 @@ func (s *Service) processDataColumnSidecarsFromExecution(ctx context.Context, so
ctx, cancel := context.WithTimeout(ctx, secondsPerHalfSlot)
defer cancel()
digest, err := s.currentForkDigest()
if err != nil {
return nil, err
}
log := log.WithFields(logrus.Fields{
"root": fmt.Sprintf("%#x", source.Root()),
"slot": source.Slot(),
@@ -259,11 +219,30 @@ func (s *Service) processDataColumnSidecarsFromExecution(ctx context.Context, so
}
// Try to reconstruct data column constructedSidecars from the execution client.
constructedSidecars, err := s.cfg.executionReconstructor.ConstructDataColumnSidecars(ctx, source)
constructedSidecars, partialColumns, err := s.cfg.executionReconstructor.ConstructDataColumnSidecars(ctx, source)
if err != nil {
return nil, errors.Wrap(err, "reconstruct data column sidecars")
}
partialBroadcaster := s.cfg.p2p.PartialColumnBroadcaster()
if partialBroadcaster != nil {
log.WithField("len(partialColumns)", len(partialColumns)).Debug("Publishing partial columns")
for i := range uint64(len(partialColumns)) {
if !columnIndicesToSample[i] {
continue
}
subnet := peerdas.ComputeSubnetForDataColumnSidecar(i)
topic := fmt.Sprintf(p2p.DataColumnSubnetTopicFormat, digest, subnet) + s.cfg.p2p.Encoding().ProtocolSuffix()
// Publish the partial column. This is idempotent if we republish the same data twice.
// Note, the "partial column" may indeed be complete. We still
// should publish to help our peers.
err = partialBroadcaster.Publish(topic, partialColumns[i])
if err != nil {
log.WithError(err).Warn("Failed to publish partial column")
}
}
}
// No sidecars are retrieved from the EL, retry later
constructedSidecarCount = uint64(len(constructedSidecars))
if constructedSidecarCount == 0 {
@@ -330,7 +309,7 @@ func (s *Service) broadcastAndReceiveUnseenDataColumnSidecars(
}
// Broadcast all the data column sidecars we reconstructed but did not see via gossip (non blocking).
if err := s.cfg.p2p.BroadcastDataColumnSidecars(ctx, unseenSidecars); err != nil {
if err := s.cfg.p2p.BroadcastDataColumnSidecars(ctx, unseenSidecars, nil); err != nil {
return nil, errors.Wrap(err, "broadcast data column sidecars")
}

View File

@@ -3,6 +3,7 @@ package sync
import (
"context"
"fmt"
"strconv"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed"
opfeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/operation"
@@ -24,6 +25,13 @@ func (s *Service) dataColumnSubscriber(ctx context.Context, msg proto.Message) e
return fmt.Errorf("message was not type blocks.VerifiedRODataColumn, type=%T", msg)
}
// Track useful full columns received via gossip (not previously seen)
slot := sidecar.SignedBlockHeader.Header.Slot
proposerIndex := sidecar.SignedBlockHeader.Header.ProposerIndex
if !s.hasSeenDataColumnIndex(slot, proposerIndex, sidecar.Index) {
usefulFullColumnsReceivedTotal.WithLabelValues(strconv.FormatUint(sidecar.Index, 10)).Inc()
}
if err := s.receiveDataColumnSidecar(ctx, sidecar); err != nil {
return errors.Wrap(err, "receive data column sidecar")
}
@@ -51,6 +59,38 @@ func (s *Service) dataColumnSubscriber(ctx context.Context, msg proto.Message) e
return nil
}
func (s *Service) verifiedRODataColumnSubscriber(ctx context.Context, sidecar blocks.VerifiedRODataColumn) error {
log.WithField("slot", sidecar.Slot()).WithField("column", sidecar.Index).Info("Received data column sidecar")
if err := s.receiveDataColumnSidecar(ctx, sidecar); err != nil {
return errors.Wrap(err, "receive data column sidecar")
}
var wg errgroup.Group
wg.Go(func() error {
if err := s.processDataColumnSidecarsFromReconstruction(ctx, sidecar); err != nil {
return errors.Wrap(err, "process data column sidecars from reconstruction")
}
return nil
})
wg.Go(func() error {
// Broadcast our complete column for peers that don't use partial messages
if err := s.cfg.p2p.BroadcastDataColumnSidecars(ctx, []blocks.VerifiedRODataColumn{sidecar}, nil); err != nil {
return errors.Wrap(err, "process data column sidecars from execution")
}
return nil
})
if err := wg.Wait(); err != nil {
return err
}
return nil
}
// receiveDataColumnSidecar receives a single data column sidecar: marks it as seen and saves it to the chain.
// Do not loop over this function to receive multiple sidecars, use receiveDataColumnSidecars instead.
func (s *Service) receiveDataColumnSidecar(ctx context.Context, sidecar blocks.VerifiedRODataColumn) error {

View File

@@ -1,31 +0,0 @@
package sync
import (
"context"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed"
opfeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/operation"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/pkg/errors"
"google.golang.org/protobuf/proto"
)
func (s *Service) executionProofSubscriber(_ context.Context, msg proto.Message) error {
executionProof, ok := msg.(*ethpb.ExecutionProof)
if !ok {
return errors.Errorf("incorrect type of message received, wanted %T but got %T", &ethpb.ExecutionProof{}, msg)
}
// Insert the execution proof into the pool
s.cfg.execProofPool.Insert(executionProof)
// Notify subscribers about the new execution proof
s.cfg.operationNotifier.OperationFeed().Send(&feed.Event{
Type: opfeed.ExecutionProofReceived,
Data: &opfeed.ExecutionProofReceivedData{
ExecutionProof: executionProof,
},
})
return nil
}

Some files were not shown because too many files have changed in this diff Show More