Compare commits

..

9 Commits

Author SHA1 Message Date
Manu NALEPA
cfe59706af saveDataColumnSidecarsExistingFile: Parallelize SSZ encoding of data column sidecars. 2025-12-28 23:21:52 +01:00
Manu NALEPA
b113d6bbde saveDataColumnSidecarsExistingFile: Rename sszEncodedDataColumnSidecars to sszEncodedDataColumnSidecarsBytes/ 2025-12-28 23:21:52 +01:00
Manu NALEPA
45577ef931 saveDataColumnSidecarsNewFile: Parallelize SSZ encoding of data column sidecars. 2025-12-28 23:21:52 +01:00
Manu NALEPA
92865adfe7 saveDataColumnSidecarsNewFile: Rename sszEncodedDataColumnSidecars to sszEncodedDataColumnSidecarsBytes/ 2025-12-28 23:21:52 +01:00
Manu NALEPA
a3863c118b processDataColumnSidecarsFromExecution: Return context canceled error only if the context has been cancelled while some sidecars are still missing. 2025-12-19 16:05:09 +01:00
Manu NALEPA
730e6500e3 beaconBlockSubscriber: Wait for the end of processSidecarsFromExecutionFromBlock before returning.
If returning before waiting, then the `ctx` used in `processSidecarsFromExecutionFromBlock` is canceled as well, stopping early the blobs retrieval from the EL.
2025-12-19 15:51:33 +01:00
Manu NALEPA
dac2c65004 dataColumnSubscriber: Improve error wrapping. 2025-12-19 15:51:28 +01:00
Manu NALEPA
dbfb987e1d processDataColumnSidecarsFromExecution: Use the context parent.
(There is not reason to stop retrying before the end of the context parent.)
2025-12-19 15:51:17 +01:00
Manu NALEPA
3596d00ff9 Add the --disable-get-blobs-v2 flag. 2025-12-19 15:48:29 +01:00
109 changed files with 945 additions and 4243 deletions

View File

@@ -193,7 +193,7 @@ nogo(
"//tools/analyzers/featureconfig:go_default_library",
"//tools/analyzers/gocognit:go_default_library",
"//tools/analyzers/ineffassign:go_default_library",
"//tools/analyzers/httpwriter:go_default_library",
"//tools/analyzers/httperror:go_default_library",
"//tools/analyzers/interfacechecker:go_default_library",
"//tools/analyzers/logcapitalization:go_default_library",
"//tools/analyzers/logruswitherror:go_default_library",

View File

@@ -48,7 +48,6 @@ go_library(
"//beacon-chain/core/electra:go_default_library",
"//beacon-chain/core/epoch/precompute:go_default_library",
"//beacon-chain/core/feed:go_default_library",
"//beacon-chain/core/feed/operation:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/peerdas:go_default_library",
@@ -66,7 +65,6 @@ go_library(
"//beacon-chain/light-client:go_default_library",
"//beacon-chain/operations/attestations:go_default_library",
"//beacon-chain/operations/blstoexec:go_default_library",
"//beacon-chain/operations/execproofs:go_default_library",
"//beacon-chain/operations/slashings:go_default_library",
"//beacon-chain/operations/voluntaryexits:go_default_library",
"//beacon-chain/p2p:go_default_library",
@@ -148,8 +146,6 @@ go_test(
"//beacon-chain/cache/depositsnapshot:go_default_library",
"//beacon-chain/core/altair:go_default_library",
"//beacon-chain/core/blocks:go_default_library",
"//beacon-chain/core/feed:go_default_library",
"//beacon-chain/core/feed/operation:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/peerdas:go_default_library",

View File

@@ -323,17 +323,14 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
var ok bool
e := slots.ToEpoch(slot)
stateEpoch := slots.ToEpoch(st.Slot())
fuluAndNextEpoch := st.Version() >= version.Fulu && e == stateEpoch+1
if e == stateEpoch || fuluAndNextEpoch {
if e == stateEpoch {
val, ok = s.trackedProposer(st, slot)
if !ok {
return emptyAttri
}
}
st = st.Copy()
if slot > st.Slot() {
// At this point either we know we are proposing on a future slot or we need to still compute the
// right proposer index pre-Fulu, either way we need to copy the state to process it.
st = st.Copy()
var err error
st, err = transition.ProcessSlotsUsingNextSlotCache(ctx, st, headRoot, slot)
if err != nil {
@@ -341,7 +338,7 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
return emptyAttri
}
}
if e > stateEpoch && !fuluAndNextEpoch {
if e > stateEpoch {
emptyAttri := payloadattribute.EmptyWithVersion(st.Version())
val, ok = s.trackedProposer(st, slot)
if !ok {

View File

@@ -1053,3 +1053,40 @@ func TestKZGCommitmentToVersionedHashes(t *testing.T) {
require.Equal(t, vhs[0].String(), vh0)
require.Equal(t, vhs[1].String(), vh1)
}
func TestComputePayloadAttribute(t *testing.T) {
service, tr := minimalTestService(t, WithPayloadIDCache(cache.NewPayloadIDCache()))
ctx := tr.ctx
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, Index: 0})
// Cache hit, advance state, no fee recipient
slot := primitives.Slot(1)
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
blk := util.NewBeaconBlockBellatrix()
signed, err := consensusblocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
roblock, err := consensusblocks.NewROBlockWithRoot(signed, [32]byte{'a'})
require.NoError(t, err)
cfg := &postBlockProcessConfig{
ctx: ctx,
roblock: roblock,
}
fcu := &fcuConfig{
headState: st,
proposingSlot: slot,
headRoot: [32]byte{},
}
require.NoError(t, service.computePayloadAttributes(cfg, fcu))
require.Equal(t, false, fcu.attributes.IsEmpty())
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(fcu.attributes.SuggestedFeeRecipient()).String())
// Cache hit, advance state, has fee recipient
suggestedAddr := common.HexToAddress("123")
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, FeeRecipient: primitives.ExecutionAddress(suggestedAddr), Index: 0})
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
require.NoError(t, service.computePayloadAttributes(cfg, fcu))
require.Equal(t, false, fcu.attributes.IsEmpty())
require.Equal(t, suggestedAddr, common.BytesToAddress(fcu.attributes.SuggestedFeeRecipient()))
}

View File

@@ -12,7 +12,6 @@ import (
payloadattribute "github.com/OffchainLabs/prysm/v7/consensus-types/payload-attribute"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/OffchainLabs/prysm/v7/time/slots"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -54,53 +53,58 @@ type fcuConfig struct {
}
// sendFCU handles the logic to notify the engine of a forckhoice update
// when processing an incoming block during regular sync. It
// always updates the shuffling caches and handles epoch transitions .
func (s *Service) sendFCU(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) {
if cfg.postState.Version() < version.Fulu {
// update the caches to compute the right proposer index
// this function is called under a forkchoice lock which we need to release.
s.ForkChoicer().Unlock()
s.updateCachesPostBlockProcessing(cfg)
s.ForkChoicer().Lock()
// for the first time when processing an incoming block during regular sync. It
// always updates the shuffling caches and handles epoch transitions when the
// incoming block is late, preparing payload attributes in this case while it
// only sends a message with empty attributes for early blocks.
func (s *Service) sendFCU(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error {
if !s.isNewHead(cfg.headRoot) {
return nil
}
if err := s.getFCUArgs(cfg, fcuArgs); err != nil {
log.WithError(err).Error("Could not get forkchoice update argument")
return
}
// If head has not been updated and attributes are nil, we can skip the FCU.
if !s.isNewHead(cfg.headRoot) && (fcuArgs.attributes == nil || fcuArgs.attributes.IsEmpty()) {
return
}
// If we are proposing and we aim to reorg the block, we have already sent FCU with attributes on lateBlockTasks
if fcuArgs.attributes != nil && !fcuArgs.attributes.IsEmpty() && s.shouldOverrideFCU(cfg.headRoot, s.CurrentSlot()+1) {
return nil
}
return s.forkchoiceUpdateWithExecution(cfg.ctx, fcuArgs)
}
// sendFCUWithAttributes computes the payload attributes and sends an FCU message
// to the engine if needed
func (s *Service) sendFCUWithAttributes(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) {
slotCtx, cancel := context.WithTimeout(context.Background(), slotDeadline)
defer cancel()
cfg.ctx = slotCtx
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
if err := s.computePayloadAttributes(cfg, fcuArgs); err != nil {
log.WithError(err).Error("Could not compute payload attributes")
return
}
if s.inRegularSync() {
go s.forkchoiceUpdateWithExecution(cfg.ctx, fcuArgs)
if fcuArgs.attributes.IsEmpty() {
return
}
if s.isNewHead(fcuArgs.headRoot) {
if err := s.saveHead(cfg.ctx, fcuArgs.headRoot, fcuArgs.headBlock, fcuArgs.headState); err != nil {
log.WithError(err).Error("Could not save head")
}
s.pruneAttsFromPool(s.ctx, fcuArgs.headState, fcuArgs.headBlock)
if _, err := s.notifyForkchoiceUpdate(cfg.ctx, fcuArgs); err != nil {
log.WithError(err).Error("Could not update forkchoice with payload attributes for proposal")
}
}
// fockchoiceUpdateWithExecution is a wrapper around notifyForkchoiceUpdate. It gets a forkchoice lock and calls the engine.
// The caller of this function should NOT have a lock in forkchoice store.
func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, args *fcuConfig) {
// fockchoiceUpdateWithExecution is a wrapper around notifyForkchoiceUpdate. It decides whether a new call to FCU should be made.
func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, args *fcuConfig) error {
_, span := trace.StartSpan(ctx, "beacon-chain.blockchain.forkchoiceUpdateWithExecution")
defer span.End()
// Note: Use the service context here to avoid the parent context being ended during a forkchoice update.
ctx = trace.NewContext(s.ctx, span)
s.ForkChoicer().Lock()
defer s.ForkChoicer().Unlock()
_, err := s.notifyForkchoiceUpdate(ctx, args)
if err != nil {
log.WithError(err).Error("Could not notify forkchoice update")
return errors.Wrap(err, "could not notify forkchoice update")
}
if err := s.saveHead(ctx, args.headRoot, args.headBlock, args.headState); err != nil {
log.WithError(err).Error("Could not save head")
}
// Only need to prune attestations from pool if the head has changed.
s.pruneAttsFromPool(s.ctx, args.headState, args.headBlock)
return nil
}
// shouldOverrideFCU checks whether the incoming block is still subject to being

View File

@@ -97,7 +97,7 @@ func TestService_forkchoiceUpdateWithExecution_exceptionalCases(t *testing.T) {
headBlock: wsb,
proposingSlot: service.CurrentSlot() + 1,
}
service.forkchoiceUpdateWithExecution(ctx, args)
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, args))
payloadID, has := service.cfg.PayloadIDCache.PayloadID(2, [32]byte{2})
require.Equal(t, true, has)
@@ -151,7 +151,7 @@ func TestService_forkchoiceUpdateWithExecution_SameHeadRootNewProposer(t *testin
headRoot: r,
proposingSlot: service.CurrentSlot() + 1,
}
service.forkchoiceUpdateWithExecution(ctx, args)
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, args))
}
func TestShouldOverrideFCU(t *testing.T) {

View File

@@ -5,7 +5,6 @@ import (
"github.com/OffchainLabs/prysm/v7/async/event"
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/operation"
statefeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/state"
"github.com/OffchainLabs/prysm/v7/beacon-chain/db"
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/filesystem"
@@ -14,7 +13,6 @@ import (
lightclient "github.com/OffchainLabs/prysm/v7/beacon-chain/light-client"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/attestations"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/blstoexec"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/execproofs"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/slashings"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/voluntaryexits"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
@@ -138,14 +136,6 @@ func WithBLSToExecPool(p blstoexec.PoolManager) Option {
}
}
// WithExecProofsPool to keep track of execution proofs.
func WithExecProofsPool(p execproofs.PoolManager) Option {
return func(s *Service) error {
s.cfg.ExecProofsPool = p
return nil
}
}
// WithP2PBroadcaster to broadcast messages after appropriate processing.
func WithP2PBroadcaster(p p2p.Accessor) Option {
return func(s *Service) error {
@@ -276,10 +266,3 @@ func WithStartWaitingDataColumnSidecars(c chan bool) Option {
return nil
}
}
func WithOperationNotifier(operationNotifier operation.Notifier) Option {
return func(s *Service) error {
s.cfg.OperationNotifier = operationNotifier
return nil
}
}

View File

@@ -22,7 +22,7 @@ import (
// The caller of this function must have a lock on forkchoice.
func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) state.ReadOnlyBeaconState {
headEpoch := slots.ToEpoch(s.HeadSlot())
if c.Epoch+1 < headEpoch || c.Epoch == 0 {
if c.Epoch < headEpoch || c.Epoch == 0 {
return nil
}
// Only use head state if the head state is compatible with the target checkpoint.
@@ -30,13 +30,11 @@ func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) st
if err != nil {
return nil
}
// headEpoch - 1 equals c.Epoch if c is from the previous epoch and equals c.Epoch - 1 if c is from the current epoch.
// We don't use the smaller c.Epoch - 1 because forkchoice would not have the data to answer that.
headDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(headRoot), headEpoch-1)
headDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(headRoot), c.Epoch-1)
if err != nil {
return nil
}
targetDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(c.Root), headEpoch-1)
targetDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(c.Root), c.Epoch-1)
if err != nil {
return nil
}
@@ -45,7 +43,7 @@ func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) st
}
// If the head state alone is enough, we can return it directly read only.
if c.Epoch <= headEpoch {
if c.Epoch == headEpoch {
st, err := s.HeadStateReadOnly(ctx)
if err != nil {
return nil

View File

@@ -170,13 +170,12 @@ func TestService_GetRecentPreState(t *testing.T) {
err = s.SetFinalizedCheckpoint(cp0)
require.NoError(t, err)
st, blk, err := prepareForkchoiceState(ctx, 31, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
st, root, err := prepareForkchoiceState(ctx, 31, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
service.head = &head{
root: [32]byte(ckRoot),
state: s,
block: blk,
slot: 31,
}
require.NotNil(t, service.getRecentPreState(ctx, &ethpb.Checkpoint{Epoch: 1, Root: ckRoot}))
@@ -198,13 +197,12 @@ func TestService_GetRecentPreState_Old_Checkpoint(t *testing.T) {
err = s.SetFinalizedCheckpoint(cp0)
require.NoError(t, err)
st, blk, err := prepareForkchoiceState(ctx, 33, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
st, root, err := prepareForkchoiceState(ctx, 33, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
service.head = &head{
root: [32]byte(ckRoot),
state: s,
block: blk,
slot: 33,
}
require.IsNil(t, service.getRecentPreState(ctx, &ethpb.Checkpoint{}))
@@ -229,7 +227,6 @@ func TestService_GetRecentPreState_Same_DependentRoots(t *testing.T) {
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
st, blk, err = prepareForkchoiceState(ctx, 64, [32]byte{'T'}, blk.Root(), [32]byte{}, cp0, cp0)
require.NoError(t, err)
headBlock := blk
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
st, blk, err = prepareForkchoiceState(ctx, 33, [32]byte{'U'}, [32]byte(ckRoot), [32]byte{}, cp0, cp0)
require.NoError(t, err)
@@ -238,9 +235,8 @@ func TestService_GetRecentPreState_Same_DependentRoots(t *testing.T) {
service.head = &head{
root: [32]byte{'T'},
block: headBlock,
slot: 64,
state: s,
slot: 64,
}
require.NotNil(t, service.getRecentPreState(ctx, &ethpb.Checkpoint{Epoch: 2, Root: cpRoot[:]}))
}
@@ -267,7 +263,6 @@ func TestService_GetRecentPreState_Different_DependentRoots(t *testing.T) {
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
st, blk, err = prepareForkchoiceState(ctx, 64, [32]byte{'U'}, blk.Root(), [32]byte{}, cp0, cp0)
require.NoError(t, err)
headBlock := blk
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
st, blk, err = prepareForkchoiceState(ctx, 33, [32]byte{'V'}, [32]byte(ckRoot), [32]byte{}, cp0, cp0)
require.NoError(t, err)
@@ -275,8 +270,7 @@ func TestService_GetRecentPreState_Different_DependentRoots(t *testing.T) {
cpRoot := blk.Root()
service.head = &head{
root: [32]byte{'U'},
block: headBlock,
root: [32]byte{'T'},
state: s,
slot: 64,
}
@@ -293,13 +287,12 @@ func TestService_GetRecentPreState_Different(t *testing.T) {
err = s.SetFinalizedCheckpoint(cp0)
require.NoError(t, err)
st, blk, err := prepareForkchoiceState(ctx, 33, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
st, root, err := prepareForkchoiceState(ctx, 33, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
service.head = &head{
root: [32]byte(ckRoot),
state: s,
block: blk,
slot: 33,
}
require.IsNil(t, service.getRecentPreState(ctx, &ethpb.Checkpoint{}))

View File

@@ -7,8 +7,6 @@ import (
"github.com/OffchainLabs/go-bitfield"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/operation"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
coreTime "github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
@@ -17,7 +15,6 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/filesystem"
forkchoicetypes "github.com/OffchainLabs/prysm/v7/beacon-chain/forkchoice/types"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
"github.com/OffchainLabs/prysm/v7/config/features"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
@@ -69,6 +66,9 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
startTime := time.Now()
fcuArgs := &fcuConfig{}
if s.inRegularSync() {
defer s.handleSecondFCUCall(cfg, fcuArgs)
}
if features.Get().EnableLightClient && slots.ToEpoch(s.CurrentSlot()) >= params.BeaconConfig().AltairForkEpoch {
defer s.processLightClientUpdates(cfg)
}
@@ -105,16 +105,12 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
s.logNonCanonicalBlockReceived(cfg.roblock.Root(), cfg.headRoot)
return nil
}
s.sendFCU(cfg, fcuArgs)
// Pre-Fulu the caches are updated when computing the payload attributes
if cfg.postState.Version() >= version.Fulu {
go func() {
ctx, cancel := context.WithTimeout(s.ctx, slotDeadline)
defer cancel()
cfg.ctx = ctx
s.updateCachesPostBlockProcessing(cfg)
}()
if err := s.getFCUArgs(cfg, fcuArgs); err != nil {
log.WithError(err).Error("Could not get forkchoice update argument")
return nil
}
if err := s.sendFCU(cfg, fcuArgs); err != nil {
return errors.Wrap(err, "could not send FCU to engine")
}
return nil
@@ -299,6 +295,14 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
return errors.Wrap(err, "could not set optimistic block to valid")
}
}
arg := &fcuConfig{
headState: preState,
headRoot: lastBR,
headBlock: lastB,
}
if _, err := s.notifyForkchoiceUpdate(ctx, arg); err != nil {
return err
}
return s.saveHeadNoDB(ctx, lastB, lastBR, preState, !isValidPayload)
}
@@ -326,7 +330,6 @@ func (s *Service) areSidecarsAvailable(ctx context.Context, avs das.Availability
return nil
}
// the caller of this function must not hold a lock in forkchoice store.
func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.BeaconState) error {
e := coreTime.CurrentEpoch(st)
if err := helpers.UpdateCommitteeCache(ctx, st, e); err != nil {
@@ -356,9 +359,7 @@ func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.Beacon
if e > 0 {
e = e - 1
}
s.ForkChoicer().RLock()
target, err := s.cfg.ForkChoiceStore.TargetRootForEpoch(r, e)
s.ForkChoicer().RUnlock()
if err != nil {
log.WithError(err).Error("Could not update proposer index state-root map")
return nil
@@ -371,7 +372,7 @@ func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.Beacon
}
// Epoch boundary tasks: it copies the headState and updates the epoch boundary
// caches. The caller of this function must not hold a lock in forkchoice store.
// caches.
func (s *Service) handleEpochBoundary(ctx context.Context, slot primitives.Slot, headState state.BeaconState, blockRoot []byte) error {
ctx, span := trace.StartSpan(ctx, "blockChain.handleEpochBoundary")
defer span.End()
@@ -665,17 +666,10 @@ func (s *Service) isDataAvailable(
return errors.New("invalid nil beacon block")
}
root, blockVersion := roBlock.Root(), roBlock.Version()
root := roBlock.Root()
blockVersion := block.Version()
if blockVersion >= version.Fulu {
if err := s.areExecutionProofsAvailable(ctx, root); err != nil {
return fmt.Errorf("are execution proofs available: %w", err)
}
if err := s.areDataColumnsAvailable(ctx, root, block); err != nil {
return fmt.Errorf("are data columns available: %w", err)
}
return nil
return s.areDataColumnsAvailable(ctx, root, block)
}
if blockVersion >= version.Deneb {
@@ -685,67 +679,6 @@ func (s *Service) isDataAvailable(
return nil
}
// areExecutionProofsAvailable blocks until we have enough execution proofs to import the block,
// or an error or context cancellation occurs.
// This check is only performed for lightweight verifier nodes that need zkVM proofs
// to validate block execution (nodes without execution layer + proof generation capability).
// A nil result means that the data availability check is successful.
func (s *Service) areExecutionProofsAvailable(ctx context.Context, blockRoot [fieldparams.RootLength]byte) error {
// Return early if zkVM features are disabled (no need to check for execution proofs),
// or if the generation proof is enabled (we will generate proofs ourselves).
if !features.Get().EnableZkvm || len(flags.Get().ProofGenerationTypes) > 0 {
return nil
}
requiredProofCount := params.BeaconConfig().MinProofsRequired
log := log.WithFields(logrus.Fields{
"root": fmt.Sprintf("%#x", blockRoot),
"requiredProofCount": requiredProofCount,
})
// Subscribe to execution proof received events.
eventsChan := make(chan *feed.Event, 1)
subscription := s.cfg.OperationNotifier.OperationFeed().Subscribe(eventsChan)
defer subscription.Unsubscribe()
// Return early if we already have enough proofs.
if actualProofCount := uint64(s.cfg.ExecProofsPool.Count(blockRoot)); actualProofCount >= requiredProofCount {
log.WithField("actualProofCount", actualProofCount).Debug("Already have enough execution proofs")
return nil
}
// Some proofs are missing; wait for them.
for {
select {
case <-ctx.Done():
return ctx.Err()
case event := <-eventsChan:
if event.Type != operation.ExecutionProofReceived {
continue
}
proofWrapper, ok := event.Data.(*operation.ExecutionProofReceivedData)
if !ok {
log.Error("Could not cast event data to ExecutionProofReceivedData")
continue
}
proof := proofWrapper.ExecutionProof
// Skip if the proof is for a different block.
if bytesutil.ToBytes32(proof.BlockRoot) != blockRoot {
continue
}
// Return if we have enough proofs.
if actualProofCount := uint64(s.cfg.ExecProofsPool.Count(blockRoot)); actualProofCount >= requiredProofCount {
log.WithField("actualProofCount", actualProofCount).Debug("Got enough execution proofs")
return nil
}
}
}
}
// areDataColumnsAvailable blocks until all data columns committed to in the block are available,
// or an error or context cancellation occurs. A nil result means that the data availability check is successful.
func (s *Service) areDataColumnsAvailable(
@@ -979,6 +912,8 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
if currentSlot == s.HeadSlot() {
return
}
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
// return early if we are in init sync
if !s.inRegularSync() {
return
@@ -991,32 +926,14 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
if lastState == nil {
lastRoot, lastState = headRoot[:], headState
}
// Before Fulu we need to process the next slot to find out if we are proposing.
if lastState.Version() < version.Fulu {
// Copy all the field tries in our cached state in the event of late
// blocks.
lastState.CopyAllTries()
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
log.WithError(err).Debug("Could not update next slot state cache")
}
if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil {
log.WithError(err).Error("Could not update epoch boundary caches")
}
} else {
// After Fulu, we can update the caches asynchronously after sending FCU to the engine
defer func() {
go func() {
ctx, cancel := context.WithTimeout(s.ctx, slotDeadline)
defer cancel()
lastState.CopyAllTries()
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
log.WithError(err).Debug("Could not update next slot state cache")
}
if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil {
log.WithError(err).Error("Could not update epoch boundary caches")
}
}()
}()
// Copy all the field tries in our cached state in the event of late
// blocks.
lastState.CopyAllTries()
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
log.WithError(err).Debug("Could not update next slot state cache")
}
if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil {
log.WithError(err).Error("Could not update epoch boundary caches")
}
// return early if we already started building a block for the current
// head root
@@ -1046,8 +963,6 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
headBlock: headBlock,
attributes: attribute,
}
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
_, err = s.notifyForkchoiceUpdate(ctx, fcuArgs)
if err != nil {
log.WithError(err).Debug("could not perform late block tasks: failed to update forkchoice with engine")

View File

@@ -42,8 +42,14 @@ func (s *Service) getFCUArgs(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) er
if err := s.getFCUArgsEarlyBlock(cfg, fcuArgs); err != nil {
return err
}
fcuArgs.attributes = s.getPayloadAttribute(cfg.ctx, fcuArgs.headState, fcuArgs.proposingSlot, cfg.headRoot[:])
return nil
if !s.inRegularSync() {
return nil
}
slot := cfg.roblock.Block().Slot()
if slots.WithinVotingWindow(s.genesisTime, slot) {
return nil
}
return s.computePayloadAttributes(cfg, fcuArgs)
}
func (s *Service) getFCUArgsEarlyBlock(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error {
@@ -167,19 +173,26 @@ func (s *Service) processLightClientUpdates(cfg *postBlockProcessConfig) {
// updateCachesPostBlockProcessing updates the next slot cache and handles the epoch
// boundary in order to compute the right proposer indices after processing
// state transition. The caller of this function must not hold a lock in forkchoice store.
func (s *Service) updateCachesPostBlockProcessing(cfg *postBlockProcessConfig) {
// state transition. This function is called on late blocks while still locked,
// before sending FCU to the engine.
func (s *Service) updateCachesPostBlockProcessing(cfg *postBlockProcessConfig) error {
slot := cfg.postState.Slot()
root := cfg.roblock.Root()
if err := transition.UpdateNextSlotCache(cfg.ctx, root[:], cfg.postState); err != nil {
log.WithError(err).Error("Could not update next slot state cache")
return
return errors.Wrap(err, "could not update next slot state cache")
}
if !slots.IsEpochEnd(slot) {
return
return nil
}
if err := s.handleEpochBoundary(cfg.ctx, slot, cfg.postState, root[:]); err != nil {
log.WithError(err).Error("Could not handle epoch boundary")
return s.handleEpochBoundary(cfg.ctx, slot, cfg.postState, root[:])
}
// handleSecondFCUCall handles a second call to FCU when syncing a new block.
// This is useful when proposing in the next block and we want to defer the
// computation of the next slot shuffling.
func (s *Service) handleSecondFCUCall(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) {
if (fcuArgs.attributes == nil || fcuArgs.attributes.IsEmpty()) && cfg.headRoot == cfg.roblock.Root() {
go s.sendFCUWithAttributes(cfg, fcuArgs)
}
}
@@ -189,6 +202,20 @@ func reportProcessingTime(startTime time.Time) {
onBlockProcessingTime.Observe(float64(time.Since(startTime).Milliseconds()))
}
// computePayloadAttributes modifies the passed FCU arguments to
// contain the right payload attributes with the tracked proposer. It gets
// called on blocks that arrive after the attestation voting window, or in a
// background routine after syncing early blocks.
func (s *Service) computePayloadAttributes(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error {
if cfg.roblock.Root() == cfg.headRoot {
if err := s.updateCachesPostBlockProcessing(cfg); err != nil {
return err
}
}
fcuArgs.attributes = s.getPayloadAttribute(cfg.ctx, fcuArgs.headState, fcuArgs.proposingSlot, cfg.headRoot[:])
return nil
}
// getBlockPreState returns the pre state of an incoming block. It uses the parent root of the block
// to retrieve the state in DB. It verifies the pre state's validity and the incoming block
// is in the correct time window.

View File

@@ -13,8 +13,6 @@ import (
mock "github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/operation"
statefeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/state"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
@@ -740,9 +738,7 @@ func TestOnBlock_CanFinalize_WithOnTick(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
service.cfg.ForkChoiceStore.Unlock()
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
require.NoError(t, err)
@@ -792,9 +788,7 @@ func TestOnBlock_CanFinalize(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
service.cfg.ForkChoiceStore.Unlock()
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
require.NoError(t, err)
@@ -822,9 +816,25 @@ func TestOnBlock_NilBlock(t *testing.T) {
service, tr := minimalTestService(t)
signed := &consensusblocks.SignedBeaconBlock{}
roblock := consensusblocks.ROBlock{ReadOnlySignedBeaconBlock: signed}
service.cfg.ForkChoiceStore.Lock()
err := service.postBlockProcess(&postBlockProcessConfig{tr.ctx, roblock, [32]byte{}, nil, true})
service.cfg.ForkChoiceStore.Unlock()
require.Equal(t, true, IsInvalidBlock(err))
}
func TestOnBlock_InvalidSignature(t *testing.T) {
service, tr := minimalTestService(t)
ctx := tr.ctx
gs, keys := util.DeterministicGenesisState(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
blk, err := util.GenerateFullBlock(gs, keys, util.DefaultBlockGenConfig(), 1)
require.NoError(t, err)
blk.Signature = []byte{'a'} // Mutate the signature.
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
_, err = service.validateStateTransition(ctx, preState, wsb)
require.Equal(t, true, IsInvalidBlock(err))
}
@@ -856,9 +866,7 @@ func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
testState, err = service.cfg.StateGen.StateByRoot(ctx, r)
require.NoError(t, err)
}
@@ -1331,9 +1339,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
lock.Lock()
roblock, err := consensusblocks.NewROBlockWithRoot(wsb1, r1)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
service.cfg.ForkChoiceStore.Unlock()
lock.Unlock()
wg.Done()
}()
@@ -1345,9 +1351,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
lock.Lock()
roblock, err := consensusblocks.NewROBlockWithRoot(wsb2, r2)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
service.cfg.ForkChoiceStore.Unlock()
lock.Unlock()
wg.Done()
}()
@@ -1359,9 +1363,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
lock.Lock()
roblock, err := consensusblocks.NewROBlockWithRoot(wsb3, r3)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
service.cfg.ForkChoiceStore.Unlock()
lock.Unlock()
wg.Done()
}()
@@ -1373,9 +1375,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
lock.Lock()
roblock, err := consensusblocks.NewROBlockWithRoot(wsb4, r4)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
service.cfg.ForkChoiceStore.Unlock()
lock.Unlock()
wg.Done()
}()
@@ -1400,6 +1400,197 @@ func Test_verifyBlkFinalizedSlot_invalidBlock(t *testing.T) {
require.Equal(t, true, IsInvalidBlock(err))
}
// See the description in #10777 and #10782 for the full setup
// We sync optimistically a chain of blocks. Block 17 is the last block in Epoch
// 2. Block 18 justifies block 12 (the first in Epoch 2) and Block 19 returns
// INVALID from FCU, with LVH block 17. No head is viable. We check
// that the node is optimistic and that we can actually import a block on top of
// 17 and recover.
func TestStore_NoViableHead_FCU(t *testing.T) {
params.SetupTestConfigCleanup(t)
config := params.BeaconConfig()
config.SlotsPerEpoch = 6
config.AltairForkEpoch = 1
config.BellatrixForkEpoch = 2
params.OverrideBeaconConfig(config)
mockEngine := &mockExecution.EngineClient{ErrNewPayload: execution.ErrAcceptedSyncingPayloadStatus, ErrForkchoiceUpdated: execution.ErrAcceptedSyncingPayloadStatus}
service, tr := minimalTestService(t, WithExecutionEngineCaller(mockEngine))
ctx := tr.ctx
st, keys := util.DeterministicGenesisState(t, 64)
stateRoot, err := st.HashTreeRoot(ctx)
require.NoError(t, err, "Could not hash genesis state")
require.NoError(t, service.saveGenesisData(ctx, st))
genesis := blocks.NewGenesisBlock(stateRoot[:])
wsb, err := consensusblocks.NewSignedBeaconBlock(genesis)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb), "Could not save genesis block")
parentRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err, "Could not get signing root")
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, parentRoot), "Could not save genesis state")
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
for i := 1; i < 6; i++ {
driftGenesisTime(service, primitives.Slot(i), 0)
st, err := service.HeadState(ctx)
require.NoError(t, err)
b, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
require.NoError(t, err)
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
}
for i := 6; i < 12; i++ {
driftGenesisTime(service, primitives.Slot(i), 0)
st, err := service.HeadState(ctx)
require.NoError(t, err)
b, err := util.GenerateFullBlockAltair(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
require.NoError(t, err)
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
require.NoError(t, err)
}
for i := 12; i < 18; i++ {
driftGenesisTime(service, primitives.Slot(i), 0)
st, err := service.HeadState(ctx)
require.NoError(t, err)
b, err := util.GenerateFullBlockBellatrix(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
require.NoError(t, err)
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
require.NoError(t, err)
}
// Check that we haven't justified the second epoch yet
jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint()
require.Equal(t, primitives.Epoch(0), jc.Epoch)
// import a block that justifies the second epoch
driftGenesisTime(service, 18, 0)
validHeadState, err := service.HeadState(ctx)
require.NoError(t, err)
b, err := util.GenerateFullBlockBellatrix(validHeadState, keys, util.DefaultBlockGenConfig(), 18)
require.NoError(t, err)
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
firstInvalidRoot, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, firstInvalidRoot, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, firstInvalidRoot)
require.NoError(t, err)
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
require.NoError(t, err)
jc = service.cfg.ForkChoiceStore.JustifiedCheckpoint()
require.Equal(t, primitives.Epoch(2), jc.Epoch)
sjc := validHeadState.CurrentJustifiedCheckpoint()
require.Equal(t, primitives.Epoch(0), sjc.Epoch)
lvh := b.Block.Body.ExecutionPayload.ParentHash
// check our head
require.Equal(t, firstInvalidRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
// import another block to find out that it was invalid
mockEngine = &mockExecution.EngineClient{ErrNewPayload: execution.ErrAcceptedSyncingPayloadStatus, ErrForkchoiceUpdated: execution.ErrInvalidPayloadStatus, ForkChoiceUpdatedResp: lvh}
service.cfg.ExecutionEngineCaller = mockEngine
driftGenesisTime(service, 19, 0)
st, err = service.HeadState(ctx)
require.NoError(t, err)
b, err = util.GenerateFullBlockBellatrix(st, keys, util.DefaultBlockGenConfig(), 19)
require.NoError(t, err)
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err = service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err = service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
// Check that forkchoice's head is the last invalid block imported. The
// store's headroot is the previous head (since the invalid block did
// not finish importing) one and that the node is optimistic
require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot())
headRoot, err := service.HeadRoot(ctx)
require.NoError(t, err)
require.Equal(t, firstInvalidRoot, bytesutil.ToBytes32(headRoot))
optimistic, err := service.IsOptimistic(ctx)
require.NoError(t, err)
require.Equal(t, true, optimistic)
// import another block based on the last valid head state
mockEngine = &mockExecution.EngineClient{}
service.cfg.ExecutionEngineCaller = mockEngine
driftGenesisTime(service, 20, 0)
b, err = util.GenerateFullBlockBellatrix(validHeadState, keys, &util.BlockGenConfig{}, 20)
require.NoError(t, err)
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
root, err = b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err = service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err = service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
require.NoError(t, err)
// Check the newly imported block is head, it justified the right
// checkpoint and the node is no longer optimistic
require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot())
sjc = service.CurrentJustifiedCheckpt()
require.Equal(t, jc.Epoch, sjc.Epoch)
require.Equal(t, jc.Root, bytesutil.ToBytes32(sjc.Root))
optimistic, err = service.IsOptimistic(ctx)
require.NoError(t, err)
require.Equal(t, false, optimistic)
}
// See the description in #10777 and #10782 for the full setup
// We sync optimistically a chain of blocks. Block 17 is the last block in Epoch
// 2. Block 18 justifies block 12 (the first in Epoch 2) and Block 19 returns
@@ -1451,9 +1642,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
}
for i := 6; i < 12; i++ {
@@ -1473,9 +1662,8 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
require.NoError(t, err)
}
for i := 12; i < 18; i++ {
@@ -1496,9 +1684,8 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
require.NoError(t, err)
}
// Check that we haven't justified the second epoch yet
jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint()
@@ -1521,9 +1708,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, firstInvalidRoot, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, firstInvalidRoot)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
service.cfg.ForkChoiceStore.Unlock()
require.NoError(t, err)
jc = service.cfg.ForkChoiceStore.JustifiedCheckpoint()
require.Equal(t, primitives.Epoch(2), jc.Epoch)
@@ -1533,10 +1718,6 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
lvh := b.Block.Body.ExecutionPayload.ParentHash
// check our head
require.Equal(t, firstInvalidRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
isBlock18OptimisticAfterImport, err := service.IsOptimisticForRoot(ctx, firstInvalidRoot)
require.NoError(t, err)
require.Equal(t, true, isBlock18OptimisticAfterImport)
time.Sleep(20 * time.Millisecond) // wait for async forkchoice update to be processed
// import another block to find out that it was invalid
mockEngine = &mockExecution.EngineClient{ErrNewPayload: execution.ErrInvalidPayloadStatus, NewPayloadResp: lvh}
@@ -1587,9 +1768,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
service.cfg.ForkChoiceStore.Unlock()
require.NoError(t, err)
// Check the newly imported block is head, it justified the right
// checkpoint and the node is no longer optimistic
@@ -1656,9 +1835,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
}
for i := 6; i < 12; i++ {
@@ -1679,9 +1856,8 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
require.NoError(t, err)
}
// import the merge block
@@ -1701,9 +1877,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, lastValidRoot, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, lastValidRoot)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
service.cfg.ForkChoiceStore.Unlock()
require.NoError(t, err)
// save the post state and the payload Hash of this block since it will
// be the LVH
@@ -1732,9 +1906,8 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, invalidRoots[i-13], wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, invalidRoots[i-13])
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
require.NoError(t, err)
}
// Check that we have justified the second epoch
jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint()
@@ -1802,9 +1975,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
service.cfg.ForkChoiceStore.Unlock()
// Check that the head is still INVALID and the node is still optimistic
require.Equal(t, invalidHeadRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
optimistic, err = service.IsOptimistic(ctx)
@@ -1829,9 +2000,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
service.cfg.ForkChoiceStore.Unlock()
require.NoError(t, err)
st, err = service.cfg.StateGen.StateByRoot(ctx, root)
require.NoError(t, err)
@@ -1859,9 +2028,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
service.cfg.ForkChoiceStore.Unlock()
require.NoError(t, err)
require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot())
sjc = service.CurrentJustifiedCheckpt()
@@ -1905,6 +2072,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
require.NoError(t, service.cfg.BeaconDB.SaveGenesisBlockRoot(ctx, genesisRoot), "Could not save genesis state")
for i := 1; i < 6; i++ {
t.Log(i)
driftGenesisTime(service, primitives.Slot(i), 0)
st, err := service.HeadState(ctx)
require.NoError(t, err)
@@ -1921,9 +2089,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
}
for i := 6; i < 12; i++ {
@@ -1943,9 +2109,8 @@ func TestNoViableHead_Reboot(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
require.NoError(t, err)
}
// import the merge block
@@ -1965,9 +2130,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, lastValidRoot, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, lastValidRoot)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
service.cfg.ForkChoiceStore.Unlock()
require.NoError(t, err)
// save the post state and the payload Hash of this block since it will
// be the LVH
@@ -1998,9 +2161,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
require.NoError(t, err)
@@ -2121,9 +2282,7 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
st, err = service.HeadState(ctx)
require.NoError(t, err)
@@ -2189,9 +2348,7 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
st, err = service.HeadState(ctx)
require.NoError(t, err)
@@ -2474,10 +2631,7 @@ func TestRollbackBlock(t *testing.T) {
require.NoError(t, err)
// Rollback block insertion into db and caches.
service.cfg.ForkChoiceStore.Lock()
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
service.cfg.ForkChoiceStore.Unlock()
require.ErrorContains(t, fmt.Sprintf("could not insert block %d to fork choice store", roblock.Block().Slot()), err)
require.ErrorContains(t, fmt.Sprintf("could not insert block %d to fork choice store", roblock.Block().Slot()), service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
// The block should no longer exist.
require.Equal(t, false, service.cfg.BeaconDB.HasBlock(ctx, root))
@@ -2578,9 +2732,7 @@ func TestRollbackBlock_ContextDeadline(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
b, err = util.GenerateFullBlock(postState, keys, util.DefaultBlockGenConfig(), 34)
require.NoError(t, err)
@@ -2614,10 +2766,7 @@ func TestRollbackBlock_ContextDeadline(t *testing.T) {
require.NoError(t, postState.SetFinalizedCheckpoint(cj))
// Rollback block insertion into db and caches.
service.cfg.ForkChoiceStore.Lock()
err = service.postBlockProcess(&postBlockProcessConfig{cancCtx, roblock, [32]byte{}, postState, false})
service.cfg.ForkChoiceStore.Unlock()
require.ErrorContains(t, "context canceled", err)
require.ErrorContains(t, "context canceled", service.postBlockProcess(&postBlockProcessConfig{cancCtx, roblock, [32]byte{}, postState, false}))
// The block should no longer exist.
require.Equal(t, false, service.cfg.BeaconDB.HasBlock(ctx, root))
@@ -3003,113 +3152,6 @@ func TestIsDataAvailable(t *testing.T) {
err = service.isDataAvailable(ctx, roBlock)
require.NotNil(t, err)
})
t.Run("EIP-8025 (Optional Proofs) - already enough proofs", func(t *testing.T) {
// Enable zkVM feature
resetCfg := features.InitWithReset(&features.Flags{
EnableZkvm: true,
})
defer resetCfg()
// Set MinProofsRequired for testing
cfg := params.BeaconConfig().Copy()
cfg.MinProofsRequired = 3
params.OverrideBeaconConfig(cfg)
// Setup with sufficient data columns
minimumColumnsCountToReconstruct := peerdas.MinimumColumnCountToReconstruct()
indices := make([]uint64, 0, minimumColumnsCountToReconstruct)
for i := range minimumColumnsCountToReconstruct {
indices = append(indices, i)
}
testParams := testIsAvailableParams{
columnsToSave: indices,
blobKzgCommitmentsCount: 3,
}
ctx, _, service, root, signed := testIsAvailableSetup(t, testParams)
// Insert MinProofsRequired execution proofs into the pool
for i := range cfg.MinProofsRequired {
proof := &ethpb.ExecutionProof{
BlockRoot: root[:],
Slot: signed.Block().Slot(),
ProofId: primitives.ExecutionProofId(i),
ProofData: []byte{byte(i)},
}
service.cfg.ExecProofsPool.Insert(proof)
}
roBlock, err := consensusblocks.NewROBlockWithRoot(signed, root)
require.NoError(t, err)
err = service.isDataAvailable(ctx, roBlock)
require.NoError(t, err)
})
t.Run("EIP-8025 (Optional Proofs) - data columns success then wait for execution proofs", func(t *testing.T) {
// Enable zkVM feature
resetCfg := features.InitWithReset(&features.Flags{
EnableZkvm: true,
})
defer resetCfg()
// Set MinProofsRequired for testing
cfg := params.BeaconConfig().Copy()
cfg.MinProofsRequired = 3
params.OverrideBeaconConfig(cfg)
// Setup with sufficient data columns
minimumColumnsCountToReconstruct := peerdas.MinimumColumnCountToReconstruct()
indices := make([]uint64, 0, minimumColumnsCountToReconstruct)
for i := range minimumColumnsCountToReconstruct {
indices = append(indices, i)
}
testParams := testIsAvailableParams{
options: []Option{
WithOperationNotifier(&mock.MockOperationNotifier{}),
},
columnsToSave: indices,
blobKzgCommitmentsCount: 3,
}
ctx, _, service, root, signed := testIsAvailableSetup(t, testParams)
// Goroutine to send execution proofs after data columns are available
go func() {
// Wait a bit to simulate async proof arrival
time.Sleep(50 * time.Millisecond)
// Send ExecutionProofReceived events
opfeed := service.cfg.OperationNotifier.OperationFeed()
for i := range cfg.MinProofsRequired {
proof := &ethpb.ExecutionProof{
BlockRoot: root[:],
Slot: signed.Block().Slot(),
ProofId: primitives.ExecutionProofId(i),
ProofData: []byte{byte(i)},
}
service.cfg.ExecProofsPool.Insert(proof)
opfeed.Send(&feed.Event{
Type: operation.ExecutionProofReceived,
Data: &operation.ExecutionProofReceivedData{
ExecutionProof: proof,
},
})
}
}()
ctx, cancel := context.WithTimeout(ctx, time.Second*2)
defer cancel()
roBlock, err := consensusblocks.NewROBlockWithRoot(signed, root)
require.NoError(t, err)
err = service.isDataAvailable(ctx, roBlock)
require.NoError(t, err)
})
}
// Test_postBlockProcess_EventSending tests that block processed events are only sent
@@ -3220,9 +3262,7 @@ func Test_postBlockProcess_EventSending(t *testing.T) {
}
// Execute postBlockProcess
service.cfg.ForkChoiceStore.Lock()
err = service.postBlockProcess(cfg)
service.cfg.ForkChoiceStore.Unlock()
// Check error expectation
if tt.expectError {

View File

@@ -66,54 +66,52 @@ func (s *Service) VerifyLmdFfgConsistency(ctx context.Context, a ethpb.Att) erro
// This routine processes fork choice attestations from the pool to account for validator votes and fork choice.
func (s *Service) spawnProcessAttestationsRoutine() {
_, err := s.clockWaiter.WaitForClock(s.ctx)
if err != nil {
log.WithError(err).Error("Failed to receive genesis data")
return
}
if s.genesisTime.IsZero() {
log.Warn("ProcessAttestations routine waiting for genesis time")
for s.genesisTime.IsZero() {
if err := s.ctx.Err(); err != nil {
log.WithError(err).Error("Giving up waiting for genesis time")
return
}
time.Sleep(1 * time.Second)
}
log.Warn("Genesis time received, now available to process attestations")
}
// Wait for node to be synced before running the routine.
if err := s.waitForSync(); err != nil {
log.WithError(err).Error("Could not wait to sync")
return
}
reorgInterval := time.Second*time.Duration(params.BeaconConfig().SecondsPerSlot) - reorgLateBlockCountAttestations
ticker := slots.NewSlotTickerWithIntervals(s.genesisTime, []time.Duration{0, reorgInterval})
for {
select {
case <-s.ctx.Done():
go func() {
_, err := s.clockWaiter.WaitForClock(s.ctx)
if err != nil {
log.WithError(err).Error("Failed to receive genesis data")
return
case slotInterval := <-ticker.C():
if slotInterval.Interval > 0 {
if s.validating() {
s.UpdateHead(s.ctx, slotInterval.Slot+1)
}
continue
}
s.cfg.ForkChoiceStore.Lock()
if err := s.cfg.ForkChoiceStore.NewSlot(s.ctx, slotInterval.Slot); err != nil {
log.WithError(err).Error("Could not process new slot")
}
s.cfg.ForkChoiceStore.Unlock()
s.UpdateHead(s.ctx, slotInterval.Slot)
}
}
if s.genesisTime.IsZero() {
log.Warn("ProcessAttestations routine waiting for genesis time")
for s.genesisTime.IsZero() {
if err := s.ctx.Err(); err != nil {
log.WithError(err).Error("Giving up waiting for genesis time")
return
}
time.Sleep(1 * time.Second)
}
log.Warn("Genesis time received, now available to process attestations")
}
// Wait for node to be synced before running the routine.
if err := s.waitForSync(); err != nil {
log.WithError(err).Error("Could not wait to sync")
return
}
reorgInterval := time.Second*time.Duration(params.BeaconConfig().SecondsPerSlot) - reorgLateBlockCountAttestations
ticker := slots.NewSlotTickerWithIntervals(s.genesisTime, []time.Duration{0, reorgInterval})
for {
select {
case <-s.ctx.Done():
return
case slotInterval := <-ticker.C():
if slotInterval.Interval > 0 {
if s.validating() {
s.UpdateHead(s.ctx, slotInterval.Slot+1)
}
} else {
s.cfg.ForkChoiceStore.Lock()
if err := s.cfg.ForkChoiceStore.NewSlot(s.ctx, slotInterval.Slot); err != nil {
log.WithError(err).Error("Could not process new slot")
}
s.cfg.ForkChoiceStore.Unlock()
s.UpdateHead(s.ctx, slotInterval.Slot)
}
}
}
}()
}
// UpdateHead updates the canonical head of the chain based on information from fork-choice attestations and votes.
@@ -158,15 +156,13 @@ func (s *Service) UpdateHead(ctx context.Context, proposingSlot primitives.Slot)
}
if s.inRegularSync() {
fcuArgs.attributes = s.getPayloadAttribute(ctx, headState, proposingSlot, newHeadRoot[:])
if fcuArgs.attributes != nil && s.shouldOverrideFCU(newHeadRoot, proposingSlot) {
return
}
go s.forkchoiceUpdateWithExecution(s.ctx, fcuArgs)
}
if err := s.saveHead(s.ctx, fcuArgs.headRoot, fcuArgs.headBlock, fcuArgs.headState); err != nil {
log.WithError(err).Error("Could not save head")
if fcuArgs.attributes != nil && s.shouldOverrideFCU(newHeadRoot, proposingSlot) {
return
}
if err := s.forkchoiceUpdateWithExecution(s.ctx, fcuArgs); err != nil {
log.WithError(err).Error("Could not update forkchoice")
}
s.pruneAttsFromPool(s.ctx, fcuArgs.headState, fcuArgs.headBlock)
}
// This processes fork choice attestations from the pool to account for validator votes and fork choice.

View File

@@ -117,9 +117,7 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, tRoot, wsb, postState))
roblock, err := blocks.NewROBlockWithRoot(wsb, tRoot)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
copied, err = service.cfg.StateGen.StateByRoot(ctx, tRoot)
require.NoError(t, err)
require.Equal(t, 2, fcs.NodeCount())
@@ -179,9 +177,7 @@ func TestService_UpdateHead_NoAtts(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, tRoot, wsb, postState))
roblock, err := blocks.NewROBlockWithRoot(wsb, tRoot)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
require.Equal(t, 2, fcs.NodeCount())
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
require.Equal(t, tRoot, service.head.root)

View File

@@ -12,7 +12,6 @@ import (
"github.com/OffchainLabs/prysm/v7/async/event"
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/operation"
statefeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/state"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
@@ -25,7 +24,6 @@ import (
lightClient "github.com/OffchainLabs/prysm/v7/beacon-chain/light-client"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/attestations"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/blstoexec"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/execproofs"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/slashings"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/voluntaryexits"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
@@ -87,11 +85,9 @@ type config struct {
ExitPool voluntaryexits.PoolManager
SlashingPool slashings.PoolManager
BLSToExecPool blstoexec.PoolManager
ExecProofsPool execproofs.PoolManager
P2P p2p.Accessor
MaxRoutines int
StateNotifier statefeed.Notifier
OperationNotifier operation.Notifier
ForkChoiceStore f.ForkChoicer
AttService *attestations.Service
StateGen *stategen.State
@@ -215,9 +211,7 @@ func (s *Service) Start() {
if err := s.StartFromSavedState(s.cfg.FinalizedStateAtStartUp); err != nil {
log.Fatal(err)
}
go s.spawnProcessAttestationsRoutine()
go s.spawnFinalizedProofsPruningRoutine()
s.spawnProcessAttestationsRoutine()
go s.runLateBlockTasks()
}
@@ -573,46 +567,3 @@ func fuluForkSlot() (primitives.Slot, error) {
return forkFuluSlot, nil
}
// spawnFinalizedProofsPruningRoutine prunes execution proofs pool on every epoch.
// It removes proofs older than the finalized checkpoint to prevent unbounded
// memory growth.
// TODO: Manage cases where the network is not finalizing for a long time (avoid OOMs...)
func (s *Service) spawnFinalizedProofsPruningRoutine() {
ticker := slots.NewSlotTicker(s.genesisTime, params.BeaconConfig().SecondsPerSlot)
defer ticker.Done()
for {
select {
case slot := <-ticker.C():
// Only prune at the start of each epoch
if !slots.IsEpochStart(slot) {
continue
}
finalizedCheckpoint := s.FinalizedCheckpt()
if finalizedCheckpoint == nil {
log.Error("Finalized checkpoint is nil, cannot prune execution proofs")
continue
}
finalizedSlot, err := slots.EpochStart(finalizedCheckpoint.Epoch)
if err != nil {
log.WithError(err).Error("Could not get finalized slot")
continue
}
// Prune proofs older than finalized slot
if count := s.cfg.ExecProofsPool.PruneUpTo(finalizedSlot); count > 0 {
log.WithFields(logrus.Fields{
"prunedCount": count,
"finalizedSlot": finalizedSlot,
}).Debug("Pruned finalized execution proofs")
}
case <-s.ctx.Done():
log.Debug("Context closed, exiting routine")
return
}
}
}

View File

@@ -290,3 +290,52 @@ func TestProcessBlockHeader_OK(t *testing.T) {
}
assert.Equal(t, true, proto.Equal(nsh, expected), "Expected %v, received %v", expected, nsh)
}
func TestBlockSignatureSet_OK(t *testing.T) {
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
for i := range validators {
validators[i] = &ethpb.Validator{
PublicKey: make([]byte, 32),
WithdrawalCredentials: make([]byte, 32),
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
Slashed: true,
}
}
state, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, state.SetValidators(validators))
require.NoError(t, state.SetSlot(10))
require.NoError(t, state.SetLatestBlockHeader(util.HydrateBeaconHeader(&ethpb.BeaconBlockHeader{
Slot: 9,
ProposerIndex: 0,
})))
latestBlockSignedRoot, err := state.LatestBlockHeader().HashTreeRoot()
require.NoError(t, err)
currentEpoch := time.CurrentEpoch(state)
priv, err := bls.RandKey()
require.NoError(t, err)
pID, err := helpers.BeaconProposerIndex(t.Context(), state)
require.NoError(t, err)
block := util.NewBeaconBlock()
block.Block.Slot = 10
block.Block.ProposerIndex = pID
block.Block.Body.RandaoReveal = bytesutil.PadTo([]byte{'A', 'B', 'C'}, 96)
block.Block.ParentRoot = latestBlockSignedRoot[:]
block.Signature, err = signing.ComputeDomainAndSign(state, currentEpoch, block.Block, params.BeaconConfig().DomainBeaconProposer, priv)
require.NoError(t, err)
proposerIdx, err := helpers.BeaconProposerIndex(t.Context(), state)
require.NoError(t, err)
validators[proposerIdx].Slashed = false
validators[proposerIdx].PublicKey = priv.PublicKey().Marshal()
err = state.UpdateValidatorAtIndex(proposerIdx, validators[proposerIdx])
require.NoError(t, err)
set, err := blocks.BlockSignatureBatch(state, block.Block.ProposerIndex, block.Signature, block.Block.HashTreeRoot)
require.NoError(t, err)
verified, err := set.Verify()
require.NoError(t, err)
assert.Equal(t, true, verified, "Block signature set returned a set which was unable to be verified")
}

View File

@@ -122,6 +122,24 @@ func VerifyBlockSignatureUsingCurrentFork(beaconState state.ReadOnlyBeaconState,
return nil
}
// BlockSignatureBatch retrieves the block signature batch from the provided block and its corresponding state.
func BlockSignatureBatch(beaconState state.ReadOnlyBeaconState,
proposerIndex primitives.ValidatorIndex,
sig []byte,
rootFunc func() ([32]byte, error)) (*bls.SignatureBatch, error) {
currentEpoch := slots.ToEpoch(beaconState.Slot())
domain, err := signing.Domain(beaconState.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorsRoot())
if err != nil {
return nil, err
}
proposer, err := beaconState.ValidatorAtIndex(proposerIndex)
if err != nil {
return nil, err
}
proposerPubKey := proposer.PublicKey
return signing.BlockSignatureBatch(proposerPubKey, sig, domain, rootFunc)
}
// RandaoSignatureBatch retrieves the relevant randao specific signature batch object
// from a block and its corresponding state.
func RandaoSignatureBatch(

View File

@@ -278,12 +278,12 @@ func ProcessConsolidationRequests(ctx context.Context, st state.BeaconState, req
if uint64(curEpoch) < e {
continue
}
hasBal, err := st.HasPendingBalanceToWithdraw(srcIdx)
bal, err := st.PendingBalanceToWithdraw(srcIdx)
if err != nil {
log.WithError(err).Error("Failed to fetch pending balance to withdraw")
continue
}
if hasBal {
if bal > 0 {
continue
}

View File

@@ -46,9 +46,6 @@ const (
// DataColumnReceived is sent after a data column has been seen after gossip validation rules.
DataColumnReceived = 12
// ExecutionProofReceived is sent after a execution proof object has been received from gossip or rpc.
ExecutionProofReceived = 13
)
// UnAggregatedAttReceivedData is the data sent with UnaggregatedAttReceived events.
@@ -80,11 +77,6 @@ type BLSToExecutionChangeReceivedData struct {
Change *ethpb.SignedBLSToExecutionChange
}
// ExecutionProofReceivedData is the data sent with ExecutionProofReceived events.
type ExecutionProofReceivedData struct {
ExecutionProof *ethpb.ExecutionProof
}
// BlobSidecarReceivedData is the data sent with BlobSidecarReceived events.
type BlobSidecarReceivedData struct {
Blob *blocks.VerifiedROBlob

View File

@@ -182,6 +182,12 @@ func ProcessBlockNoVerifyAnySig(
return nil, nil, err
}
sig := signed.Signature()
bSet, err := b.BlockSignatureBatch(st, blk.ProposerIndex(), sig[:], blk.HashTreeRoot)
if err != nil {
tracing.AnnotateError(span, err)
return nil, nil, errors.Wrap(err, "could not retrieve block signature set")
}
randaoReveal := signed.Block().Body().RandaoReveal()
rSet, err := b.RandaoSignatureBatch(ctx, st, randaoReveal[:])
if err != nil {
@@ -195,7 +201,7 @@ func ProcessBlockNoVerifyAnySig(
// Merge beacon block, randao and attestations signatures into a set.
set := bls.NewSet()
set.Join(rSet).Join(aSet)
set.Join(bSet).Join(rSet).Join(aSet)
if blk.Version() >= version.Capella {
changes, err := signed.Block().Body().BLSToExecutionChanges()

View File

@@ -157,8 +157,9 @@ func TestProcessBlockNoVerify_SigSetContainsDescriptions(t *testing.T) {
set, _, err := transition.ProcessBlockNoVerifyAnySig(t.Context(), beaconState, wsb)
require.NoError(t, err)
assert.Equal(t, len(set.Signatures), len(set.Descriptions), "Signatures and descriptions do not match up")
assert.Equal(t, "randao signature", set.Descriptions[0])
assert.Equal(t, "attestation signature", set.Descriptions[1])
assert.Equal(t, "block signature", set.Descriptions[0])
assert.Equal(t, "randao signature", set.Descriptions[1])
assert.Equal(t, "attestation signature", set.Descriptions[2])
}
func TestProcessOperationsNoVerifyAttsSigs_OK(t *testing.T) {

View File

@@ -67,9 +67,9 @@ func NewSyncNeeds(current CurrentSlotter, oldestSlotFlagPtr *primitives.Slot, bl
// Override spec minimum block retention with user-provided flag only if it is lower than the spec minimum.
sn.blockRetention = primitives.Epoch(params.BeaconConfig().MinEpochsForBlockRequests)
if oldestSlotFlagPtr != nil {
if *oldestSlotFlagPtr <= syncEpochOffset(current(), sn.blockRetention) {
oldestEpoch := slots.ToEpoch(*oldestSlotFlagPtr)
if oldestEpoch < sn.blockRetention {
sn.validOldestSlotPtr = oldestSlotFlagPtr
} else {
log.WithField("backfill-oldest-slot", *oldestSlotFlagPtr).

View File

@@ -128,9 +128,6 @@ func TestSyncNeedsInitialize(t *testing.T) {
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
minBlobEpochs := params.BeaconConfig().MinEpochsForBlobsSidecarsRequest
minColEpochs := params.BeaconConfig().MinEpochsForDataColumnSidecarsRequest
denebSlot := slots.UnsafeEpochStart(params.BeaconConfig().DenebForkEpoch)
fuluSlot := slots.UnsafeEpochStart(params.BeaconConfig().FuluForkEpoch)
minSlots := slots.UnsafeEpochStart(primitives.Epoch(params.BeaconConfig().MinEpochsForBlockRequests))
currentSlot := primitives.Slot(10000)
currentFunc := func() primitives.Slot { return currentSlot }
@@ -144,7 +141,6 @@ func TestSyncNeedsInitialize(t *testing.T) {
expectedCol primitives.Epoch
name string
input SyncNeeds
current func() primitives.Slot
}{
{
name: "basic initialization with no flags",
@@ -178,13 +174,13 @@ func TestSyncNeedsInitialize(t *testing.T) {
{
name: "valid oldestSlotFlagPtr (earlier than spec minimum)",
blobRetentionFlag: 0,
oldestSlotFlagPtr: &denebSlot,
oldestSlotFlagPtr: func() *primitives.Slot {
slot := primitives.Slot(10)
return &slot
}(),
expectValidOldest: true,
expectedBlob: minBlobEpochs,
expectedCol: minColEpochs,
current: func() primitives.Slot {
return fuluSlot + minSlots
},
},
{
name: "invalid oldestSlotFlagPtr (later than spec minimum)",
@@ -214,9 +210,6 @@ func TestSyncNeedsInitialize(t *testing.T) {
{
name: "both blob retention flag and oldest slot set",
blobRetentionFlag: minBlobEpochs + 5,
current: func() primitives.Slot {
return fuluSlot + minSlots
},
oldestSlotFlagPtr: func() *primitives.Slot {
slot := primitives.Slot(100)
return &slot
@@ -239,27 +232,16 @@ func TestSyncNeedsInitialize(t *testing.T) {
expectedBlob: 5000,
expectedCol: 5000,
},
{
name: "regression for deneb start",
blobRetentionFlag: 8212500,
expectValidOldest: true,
oldestSlotFlagPtr: &denebSlot,
current: func() primitives.Slot {
return fuluSlot + minSlots
},
expectedBlob: 8212500,
expectedCol: 8212500,
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
if tc.current == nil {
tc.current = currentFunc
}
result, err := NewSyncNeeds(tc.current, tc.oldestSlotFlagPtr, tc.blobRetentionFlag)
result, err := NewSyncNeeds(currentFunc, tc.oldestSlotFlagPtr, tc.blobRetentionFlag)
require.NoError(t, err)
// Check that current, deneb, fulu are set correctly
require.Equal(t, currentSlot, result.current())
// Check retention calculations
require.Equal(t, tc.expectedBlob, result.blobRetention)
require.Equal(t, tc.expectedCol, result.colRetention)

View File

@@ -186,162 +186,73 @@ func (dcs *DataColumnStorage) WarmCache() {
highestStoredEpoch := primitives.Epoch(0)
// List all period directories
periodFileInfos, err := afero.ReadDir(dcs.fs, ".")
if err != nil {
log.WithError(err).Error("Error reading top directory during warm cache")
return
}
// Iterate through periods
for _, periodFileInfo := range periodFileInfos {
if !periodFileInfo.IsDir() {
continue
// Walk the data column filesystem to warm up the cache.
if err := afero.Walk(dcs.fs, ".", func(path string, info os.FileInfo, fileErr error) (err error) {
if fileErr != nil {
return fileErr
}
periodPath := periodFileInfo.Name()
// If not a leaf, skip.
if info.IsDir() {
return nil
}
// List all epoch directories in this period
epochFileInfos, err := afero.ReadDir(dcs.fs, periodPath)
// Extract metadata from the file path.
fileMetadata, err := extractFileMetadata(path)
if err != nil {
log.WithError(err).WithField("period", periodPath).Error("Error reading period directory during warm cache")
continue
log.WithError(err).Error("Error encountered while extracting file metadata")
return nil
}
// Iterate through epochs
for _, epochFileInfo := range epochFileInfos {
if !epochFileInfo.IsDir() {
continue
}
epochPath := path.Join(periodPath, epochFileInfo.Name())
// List all .sszs files in this epoch
files, err := listEpochFiles(dcs.fs, epochPath)
if err != nil {
log.WithError(err).WithField("epoch", epochPath).Error("Error listing epoch files during warm cache")
continue
}
if len(files) == 0 {
continue
}
// Process all files in this epoch in parallel
epochHighest, err := dcs.processEpochFiles(files)
if err != nil {
log.WithError(err).WithField("epoch", epochPath).Error("Error processing epoch files during warm cache")
}
highestStoredEpoch = max(highestStoredEpoch, epochHighest)
// Open the data column filesystem file.
f, err := dcs.fs.Open(path)
if err != nil {
log.WithError(err).Error("Error encountered while opening data column filesystem file")
return nil
}
// Close the file.
defer func() {
// Overwrite the existing error only if it is nil, since the close error is less important.
closeErr := f.Close()
if closeErr != nil && err == nil {
err = closeErr
}
}()
// Read the metadata of the file.
metadata, err := dcs.metadata(f)
if err != nil {
log.WithError(err).Error("Error encountered while reading metadata from data column filesystem file")
return nil
}
// Check the indices.
indices := metadata.indices.all()
if len(indices) == 0 {
return nil
}
// Build the ident.
dataColumnsIdent := DataColumnsIdent{Root: fileMetadata.blockRoot, Epoch: fileMetadata.epoch, Indices: indices}
// Update the highest stored epoch.
highestStoredEpoch = max(highestStoredEpoch, fileMetadata.epoch)
// Set the ident in the cache.
if err := dcs.cache.set(dataColumnsIdent); err != nil {
log.WithError(err).Error("Error encountered while ensuring data column filesystem cache")
}
return nil
}); err != nil {
log.WithError(err).Error("Error encountered while walking data column filesystem.")
}
// Prune the cache and the filesystem
// Prune the cache and the filesystem.
dcs.prune()
totalElapsed := time.Since(start)
// Log summary
log.WithField("elapsed", totalElapsed).Info("Data column filesystem cache warm-up complete")
}
// listEpochFiles lists all .sszs files in an epoch directory.
func listEpochFiles(fs afero.Fs, epochPath string) ([]string, error) {
fileInfos, err := afero.ReadDir(fs, epochPath)
if err != nil {
return nil, errors.Wrap(err, "read epoch directory")
}
files := make([]string, 0, len(fileInfos))
for _, fileInfo := range fileInfos {
if fileInfo.IsDir() {
continue
}
fileName := fileInfo.Name()
if strings.HasSuffix(fileName, "."+dataColumnsFileExtension) {
files = append(files, path.Join(epochPath, fileName))
}
}
return files, nil
}
// processEpochFiles processes all .sszs files in an epoch directory in parallel.
func (dcs *DataColumnStorage) processEpochFiles(files []string) (primitives.Epoch, error) {
var (
eg errgroup.Group
mu sync.Mutex
)
highestEpoch := primitives.Epoch(0)
for _, filePath := range files {
eg.Go(func() error {
epoch, err := dcs.processFile(filePath)
if err != nil {
log.WithError(err).WithField("file", filePath).Error("Error processing file during warm cache")
return nil
}
mu.Lock()
defer mu.Unlock()
highestEpoch = max(highestEpoch, epoch)
return nil
})
}
if err := eg.Wait(); err != nil {
return highestEpoch, err
}
return highestEpoch, nil
}
// processFile processes a single .sszs file.
func (dcs *DataColumnStorage) processFile(filePath string) (primitives.Epoch, error) {
// Extract metadata from the file path
fileMetadata, err := extractFileMetadata(filePath)
if err != nil {
return 0, errors.Wrap(err, "extract file metadata")
}
// Open the file (each goroutine gets its own FD)
f, err := dcs.fs.Open(filePath)
if err != nil {
return 0, errors.Wrap(err, "open file")
}
defer func() {
if closeErr := f.Close(); closeErr != nil {
log.WithError(closeErr).WithField("file", filePath).Error("Error closing file during warm cache")
}
}()
// Read metadata
metadata, err := dcs.metadata(f)
if err != nil {
return 0, errors.Wrap(err, "read metadata")
}
// Extract indices
indices := metadata.indices.all()
if len(indices) == 0 {
return fileMetadata.epoch, nil // No indices, skip
}
// Build ident and set in cache (thread-safe)
dataColumnsIdent := DataColumnsIdent{
Root: fileMetadata.blockRoot,
Epoch: fileMetadata.epoch,
Indices: indices,
}
if err := dcs.cache.set(dataColumnsIdent); err != nil {
return 0, errors.Wrap(err, "cache set")
}
return fileMetadata.epoch, nil
log.WithField("elapsed", time.Since(start)).Info("Data column filesystem cache warm-up complete")
}
// Summary returns the DataColumnStorageSummary.
@@ -715,7 +626,7 @@ func (dcs *DataColumnStorage) saveDataColumnSidecarsExistingFile(filePath string
}
// Create the SSZ encoded data column sidecars.
var sszEncodedDataColumnSidecars []byte
var sszEncodedDataColumnSidecarsBytes []byte
// Initialize the count of the saved SSZ encoded data column sidecar.
storedCount := uint8(0)
@@ -726,7 +637,26 @@ func (dcs *DataColumnStorage) saveDataColumnSidecarsExistingFile(filePath string
break
}
for _, dataColumnSidecar := range dataColumnSidecars {
var wg errgroup.Group
sszEncodedDataColumnSidecars := make([][]byte, len(dataColumnSidecars))
for i, dataColumnSidecar := range dataColumnSidecars {
wg.Go(func() error {
// SSZ encode the data column sidecar.
sszEncodedDataColumnSidecar, err := dataColumnSidecar.MarshalSSZ()
if err != nil {
return errors.Wrap(err, "data column sidecar marshal SSZ")
}
sszEncodedDataColumnSidecars[i] = sszEncodedDataColumnSidecar
return nil
})
}
if err := wg.Wait(); err != nil {
return err
}
for i, dataColumnSidecar := range dataColumnSidecars {
// Extract the data columns index.
dataColumnIndex := dataColumnSidecar.Index
@@ -748,10 +678,7 @@ func (dcs *DataColumnStorage) saveDataColumnSidecarsExistingFile(filePath string
}
// SSZ encode the data column sidecar.
sszEncodedDataColumnSidecar, err := dataColumnSidecar.MarshalSSZ()
if err != nil {
return errors.Wrap(err, "data column sidecar marshal SSZ")
}
sszEncodedDataColumnSidecar := sszEncodedDataColumnSidecars[i]
// Compute the size of the SSZ encoded data column sidecar.
incomingSszEncodedDataColumnSidecarSize := uint32(len(sszEncodedDataColumnSidecar))
@@ -770,7 +697,7 @@ func (dcs *DataColumnStorage) saveDataColumnSidecarsExistingFile(filePath string
storedCount++
// Append the SSZ encoded data column sidecar to the SSZ encoded data column sidecars.
sszEncodedDataColumnSidecars = append(sszEncodedDataColumnSidecars, sszEncodedDataColumnSidecar...)
sszEncodedDataColumnSidecarsBytes = append(sszEncodedDataColumnSidecarsBytes, sszEncodedDataColumnSidecar...)
}
}
@@ -785,11 +712,11 @@ func (dcs *DataColumnStorage) saveDataColumnSidecarsExistingFile(filePath string
}
// Append the SSZ encoded data column sidecars to the end of the file.
count, err = file.WriteAt(sszEncodedDataColumnSidecars, metadata.fileSize)
count, err = file.WriteAt(sszEncodedDataColumnSidecarsBytes, metadata.fileSize)
if err != nil {
return errors.Wrap(err, "write SSZ encoded data column sidecars")
}
if count != len(sszEncodedDataColumnSidecars) {
if count != len(sszEncodedDataColumnSidecarsBytes) {
return errWrongBytesWritten
}
@@ -811,7 +738,7 @@ func (dcs *DataColumnStorage) saveDataColumnSidecarsNewFile(filePath string, inp
var (
sszEncodedDataColumnSidecarRefSize int
sszEncodedDataColumnSidecars []byte
sszEncodedDataColumnSidecarsBytes []byte
)
// Initialize the count of the saved SSZ encoded data column sidecar.
@@ -823,7 +750,26 @@ func (dcs *DataColumnStorage) saveDataColumnSidecarsNewFile(filePath string, inp
break
}
for _, dataColumnSidecar := range dataColumnSidecars {
var wg errgroup.Group
sszEncodedDataColumnSidecars := make([][]byte, len(dataColumnSidecars))
for i, dataColumnSidecar := range dataColumnSidecars {
wg.Go(func() error {
// SSZ encode the first data column sidecar.
sszEncodedDataColumnSidecar, err := dataColumnSidecar.MarshalSSZ()
if err != nil {
return errors.Wrap(err, "data column sidecar marshal SSZ")
}
sszEncodedDataColumnSidecars[i] = sszEncodedDataColumnSidecar
return nil
})
}
if err := wg.Wait(); err != nil {
return err
}
for i, dataColumnSidecar := range dataColumnSidecars {
// Extract the data column index.
dataColumnIndex := dataColumnSidecar.Index
@@ -846,10 +792,7 @@ func (dcs *DataColumnStorage) saveDataColumnSidecarsNewFile(filePath string, inp
storedCount++
// SSZ encode the first data column sidecar.
sszEncodedDataColumnSidecar, err := dataColumnSidecar.MarshalSSZ()
if err != nil {
return errors.Wrap(err, "data column sidecar marshal SSZ")
}
sszEncodedDataColumnSidecar := sszEncodedDataColumnSidecars[i]
// Check if the size of the SSZ encoded data column sidecar is correct.
if sszEncodedDataColumnSidecarRefSize != 0 && len(sszEncodedDataColumnSidecar) != sszEncodedDataColumnSidecarRefSize {
@@ -860,7 +803,7 @@ func (dcs *DataColumnStorage) saveDataColumnSidecarsNewFile(filePath string, inp
sszEncodedDataColumnSidecarRefSize = len(sszEncodedDataColumnSidecar)
// Append the first SSZ encoded data column sidecar to the SSZ encoded data column sidecars.
sszEncodedDataColumnSidecars = append(sszEncodedDataColumnSidecars, sszEncodedDataColumnSidecar...)
sszEncodedDataColumnSidecarsBytes = append(sszEncodedDataColumnSidecarsBytes, sszEncodedDataColumnSidecar...)
}
}
@@ -897,12 +840,12 @@ func (dcs *DataColumnStorage) saveDataColumnSidecarsNewFile(filePath string, inp
rawIndices := indices.raw()
// Concatenate the version, the data column sidecar size, the data column indices and the SSZ encoded data column sidecar.
countToWrite := headerSize + len(sszEncodedDataColumnSidecars)
countToWrite := headerSize + len(sszEncodedDataColumnSidecarsBytes)
bytes := make([]byte, 0, countToWrite)
bytes = append(bytes, byte(version))
bytes = append(bytes, encodedSszEncodedDataColumnSidecarSize[:]...)
bytes = append(bytes, rawIndices[:]...)
bytes = append(bytes, sszEncodedDataColumnSidecars...)
bytes = append(bytes, sszEncodedDataColumnSidecarsBytes...)
countWritten, err := file.Write(bytes)
if err != nil {

View File

@@ -40,6 +40,7 @@ go_library(
"//beacon-chain/state/state-native:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//beacon-chain/verification:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",

View File

@@ -11,6 +11,7 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v7/beacon-chain/execution/types"
"github.com/OffchainLabs/prysm/v7/beacon-chain/verification"
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
@@ -538,6 +539,10 @@ func (s *Service) GetBlobsV2(ctx context.Context, versionedHashes []common.Hash)
return nil, errors.New(fmt.Sprintf("%s is not supported", GetBlobsV2))
}
if flags.Get().DisableGetBlobsV2 {
return []*pb.BlobAndProofV2{}, nil
}
result := make([]*pb.BlobAndProofV2, len(versionedHashes))
err := s.rpcClient.CallContext(ctx, &result, GetBlobsV2, versionedHashes)

View File

@@ -37,7 +37,6 @@ go_library(
"//beacon-chain/node/registration:go_default_library",
"//beacon-chain/operations/attestations:go_default_library",
"//beacon-chain/operations/blstoexec:go_default_library",
"//beacon-chain/operations/execproofs:go_default_library",
"//beacon-chain/operations/slashings:go_default_library",
"//beacon-chain/operations/synccommittee:go_default_library",
"//beacon-chain/operations/voluntaryexits:go_default_library",

View File

@@ -40,7 +40,6 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/node/registration"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/attestations"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/blstoexec"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/execproofs"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/slashings"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/synccommittee"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/voluntaryexits"
@@ -103,7 +102,6 @@ type BeaconNode struct {
slashingsPool slashings.PoolManager
syncCommitteePool synccommittee.Pool
blsToExecPool blstoexec.PoolManager
execProofsPool execproofs.PoolManager
depositCache cache.DepositCache
trackedValidatorsCache *cache.TrackedValidatorsCache
payloadIDCache *cache.PayloadIDCache
@@ -158,7 +156,6 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
slashingsPool: slashings.NewPool(),
syncCommitteePool: synccommittee.NewPool(),
blsToExecPool: blstoexec.NewPool(),
execProofsPool: execproofs.NewPool(),
trackedValidatorsCache: cache.NewTrackedValidatorsCache(),
payloadIDCache: cache.NewPayloadIDCache(),
slasherBlockHeadersFeed: new(event.Feed),
@@ -740,7 +737,6 @@ func (b *BeaconNode) registerBlockchainService(fc forkchoice.ForkChoicer, gs *st
blockchain.WithExitPool(b.exitPool),
blockchain.WithSlashingPool(b.slashingsPool),
blockchain.WithBLSToExecPool(b.blsToExecPool),
blockchain.WithExecProofsPool(b.execProofsPool),
blockchain.WithP2PBroadcaster(b.fetchP2P()),
blockchain.WithStateNotifier(b),
blockchain.WithAttestationService(attService),
@@ -756,7 +752,6 @@ func (b *BeaconNode) registerBlockchainService(fc forkchoice.ForkChoicer, gs *st
blockchain.WithSyncChecker(b.syncChecker),
blockchain.WithSlasherEnabled(b.slasherEnabled),
blockchain.WithLightClientStore(b.lcStore),
blockchain.WithOperationNotifier(b),
)
blockchainService, err := blockchain.NewService(b.ctx, opts...)
@@ -832,7 +827,6 @@ func (b *BeaconNode) registerSyncService(initialSyncComplete chan struct{}, bFil
regularsync.WithSlashingPool(b.slashingsPool),
regularsync.WithSyncCommsPool(b.syncCommitteePool),
regularsync.WithBlsToExecPool(b.blsToExecPool),
regularsync.WithExecProofPool(b.execProofsPool),
regularsync.WithStateGen(b.stateGen),
regularsync.WithSlasherAttestationsFeed(b.slasherAttestationsFeed),
regularsync.WithSlasherBlockHeadersFeed(b.slasherBlockHeadersFeed),

View File

@@ -1,16 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["pool.go"],
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/operations/execproofs",
visibility = ["//visibility:public"],
deps = [
"//config/fieldparams:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
],
)

View File

@@ -1,174 +0,0 @@
package execproofs
import (
"fmt"
"sync"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
// ProofKey uniquely identifies an execution proof by block root and proof type.
type ProofKey struct {
Root [fieldparams.RootLength]byte
ProofId primitives.ExecutionProofId
}
// String returns a string representation for logging.
func (k ProofKey) String() string {
return fmt.Sprintf("root=%#x,proofId=%d", k.Root, k.ProofId)
}
var (
execProofInPoolTotal = promauto.NewGauge(prometheus.GaugeOpts{
Name: "exec_proof_pool_total",
Help: "The number of execution proofs in the operation pool.",
})
)
var _ PoolManager = (*ExecProofPool)(nil)
// PoolManager maintains execution proofs received via gossip.
// These proofs are used for data availability checks when importing blocks.
// Lightweight verifier nodes need a minimum number of proofs from different zkVM types
// to verify block execution correctness.
type PoolManager interface {
// Insert inserts a proof into the pool.
// If a proof with the same block root and proof ID already exists, it is not added again.
Insert(executionProof *ethpb.ExecutionProof)
// Get returns a copy of all proofs for a specific block root
Get(blockRoot [fieldparams.RootLength]byte) []*ethpb.ExecutionProof
// Ids returns the list of (unique) proof types available for a specific block root
Ids(blockRoot [fieldparams.RootLength]byte) []primitives.ExecutionProofId
// Count counts the number of proofs for a specific block root
Count(blockRoot [fieldparams.RootLength]byte) uint64
// Exists checks if a proof exists for the given block root and proof ID
Exists(blockRoot [fieldparams.RootLength]byte, proofId primitives.ExecutionProofId) bool
// PruneUpTo removes proofs older than the target slot
PruneUpTo(targetSlot primitives.Slot) int
}
// ExecProofPool is a concrete implementation of type ExecProofPoolManager.
type ExecProofPool struct {
lock sync.RWMutex
m map[[fieldparams.RootLength]byte]map[primitives.ExecutionProofId]*ethpb.ExecutionProof
}
// NewPool returns an initialized pool.
func NewPool() *ExecProofPool {
return &ExecProofPool{
m: make(map[[fieldparams.RootLength]byte]map[primitives.ExecutionProofId]*ethpb.ExecutionProof),
}
}
// Insert inserts a proof into the pool.
// If a proof with the same block root and proof ID already exists, it is not added again.
func (p *ExecProofPool) Insert(proof *ethpb.ExecutionProof) {
p.lock.Lock()
defer p.lock.Unlock()
blockRoot := bytesutil.ToBytes32(proof.BlockRoot)
// Create the inner map if it doesn't exist
if p.m[blockRoot] == nil {
p.m[blockRoot] = make(map[primitives.ExecutionProofId]*ethpb.ExecutionProof)
}
// Check if proof already exists
if _, exists := p.m[blockRoot][proof.ProofId]; exists {
return
}
// Insert new proof
p.m[blockRoot][proof.ProofId] = proof
execProofInPoolTotal.Inc()
}
// Get returns a copy of all proofs for a specific block root
func (p *ExecProofPool) Get(blockRoot [fieldparams.RootLength]byte) []*ethpb.ExecutionProof {
p.lock.RLock()
defer p.lock.RUnlock()
proofsByType, exists := p.m[blockRoot]
if !exists {
return nil
}
result := make([]*ethpb.ExecutionProof, 0, len(proofsByType))
for _, proof := range proofsByType {
result = append(result, proof.Copy())
}
return result
}
func (p *ExecProofPool) Ids(blockRoot [fieldparams.RootLength]byte) []primitives.ExecutionProofId {
p.lock.RLock()
defer p.lock.RUnlock()
proofById, exists := p.m[blockRoot]
if !exists {
return nil
}
ids := make([]primitives.ExecutionProofId, 0, len(proofById))
for id := range proofById {
ids = append(ids, id)
}
return ids
}
// Count counts the number of proofs for a specific block root
func (p *ExecProofPool) Count(blockRoot [fieldparams.RootLength]byte) uint64 {
p.lock.RLock()
defer p.lock.RUnlock()
return uint64(len(p.m[blockRoot]))
}
// Exists checks if a proof exists for the given block root and proof ID
func (p *ExecProofPool) Exists(blockRoot [fieldparams.RootLength]byte, proofId primitives.ExecutionProofId) bool {
p.lock.RLock()
defer p.lock.RUnlock()
proofsByType, exists := p.m[blockRoot]
if !exists {
return false
}
_, exists = proofsByType[proofId]
return exists
}
// PruneUpTo removes proofs older than the given slot
func (p *ExecProofPool) PruneUpTo(targetSlot primitives.Slot) int {
p.lock.Lock()
defer p.lock.Unlock()
pruned := 0
for blockRoot, proofsByType := range p.m {
for proofId, proof := range proofsByType {
if proof.Slot < targetSlot {
delete(proofsByType, proofId)
execProofInPoolTotal.Dec()
pruned++
}
}
// Clean up empty inner maps
if len(proofsByType) == 0 {
delete(p.m, blockRoot)
}
}
return pruned
}

View File

@@ -165,7 +165,6 @@ go_test(
"//beacon-chain/startup:go_default_library",
"//beacon-chain/state/stategen/mock:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",

View File

@@ -589,11 +589,6 @@ func (s *Service) createLocalNode(
localNode.Set(quicEntry)
}
if features.Get().EnableZkvm {
zkvmKeyEntry := enr.WithEntry(zkvmEnabledKeyEnrKey, true)
localNode.Set(zkvmKeyEntry)
}
localNode.SetFallbackIP(ipAddr)
localNode.SetFallbackUDP(udpPort)

View File

@@ -25,7 +25,6 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers/scorers"
testp2p "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/startup"
"github.com/OffchainLabs/prysm/v7/config/features"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/wrapper"
leakybucket "github.com/OffchainLabs/prysm/v7/container/leaky-bucket"
@@ -244,19 +243,12 @@ func TestCreateLocalNode(t *testing.T) {
name string
cfg *Config
expectedError bool
zkvmEnabled bool
}{
{
name: "valid config",
cfg: &Config{},
expectedError: false,
},
{
name: "valid config with zkVM enabled",
cfg: &Config{},
expectedError: false,
zkvmEnabled: true,
},
{
name: "invalid host address",
cfg: &Config{HostAddress: "invalid"},
@@ -281,15 +273,6 @@ func TestCreateLocalNode(t *testing.T) {
for _, tt := range testCases {
t.Run(tt.name, func(t *testing.T) {
if tt.zkvmEnabled {
resetCfg := features.InitWithReset(&features.Flags{
EnableZkvm: true,
})
t.Cleanup(func() {
resetCfg()
})
}
// Define ports. Use unique ports since this test validates ENR content.
const (
udpPort = 3100
@@ -365,14 +348,6 @@ func TestCreateLocalNode(t *testing.T) {
custodyGroupCount := new(uint64)
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(params.BeaconNetworkConfig().CustodyGroupCountKey, custodyGroupCount)))
require.Equal(t, custodyRequirement, *custodyGroupCount)
// Check zkVM enabled key if applicable.
if tt.zkvmEnabled {
zkvmEnabled := new(bool)
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(params.BeaconNetworkConfig().ZkvmEnabledKey, zkvmEnabled)))
require.Equal(t, features.Get().EnableZkvm, *zkvmEnabled)
}
})
}
}

View File

@@ -52,9 +52,6 @@ const (
// lightClientFinalityUpdateWeight specifies the scoring weight that we apply to
// our light client finality update topic.
lightClientFinalityUpdateWeight = 0.05
// executionProofWeight specifies the scoring weight that we apply to
// our execution proof topic.
executionProofWeight = 0.05
// maxInMeshScore describes the max score a peer can attain from being in the mesh.
maxInMeshScore = 10
@@ -148,8 +145,6 @@ func (s *Service) topicScoreParams(topic string) (*pubsub.TopicScoreParams, erro
return defaultLightClientOptimisticUpdateTopicParams(), nil
case strings.Contains(topic, GossipLightClientFinalityUpdateMessage):
return defaultLightClientFinalityUpdateTopicParams(), nil
case strings.Contains(topic, GossipExecutionProofMessage):
return defaultExecutionProofTopicParams(), nil
default:
return nil, errors.Errorf("unrecognized topic provided for parameter registration: %s", topic)
}
@@ -515,28 +510,6 @@ func defaultBlsToExecutionChangeTopicParams() *pubsub.TopicScoreParams {
}
}
func defaultExecutionProofTopicParams() *pubsub.TopicScoreParams {
return &pubsub.TopicScoreParams{
TopicWeight: executionProofWeight,
TimeInMeshWeight: maxInMeshScore / inMeshCap(),
TimeInMeshQuantum: inMeshTime(),
TimeInMeshCap: inMeshCap(),
FirstMessageDeliveriesWeight: 2,
FirstMessageDeliveriesDecay: scoreDecay(oneHundredEpochs),
FirstMessageDeliveriesCap: 5,
MeshMessageDeliveriesWeight: 0,
MeshMessageDeliveriesDecay: 0,
MeshMessageDeliveriesCap: 0,
MeshMessageDeliveriesThreshold: 0,
MeshMessageDeliveriesWindow: 0,
MeshMessageDeliveriesActivation: 0,
MeshFailurePenaltyWeight: 0,
MeshFailurePenaltyDecay: 0,
InvalidMessageDeliveriesWeight: -2000,
InvalidMessageDeliveriesDecay: scoreDecay(invalidDecayPeriod),
}
}
func defaultLightClientOptimisticUpdateTopicParams() *pubsub.TopicScoreParams {
return &pubsub.TopicScoreParams{
TopicWeight: lightClientOptimisticUpdateWeight,

View File

@@ -25,7 +25,6 @@ var gossipTopicMappings = map[string]func() proto.Message{
LightClientOptimisticUpdateTopicFormat: func() proto.Message { return &ethpb.LightClientOptimisticUpdateAltair{} },
LightClientFinalityUpdateTopicFormat: func() proto.Message { return &ethpb.LightClientFinalityUpdateAltair{} },
DataColumnSubnetTopicFormat: func() proto.Message { return &ethpb.DataColumnSidecar{} },
ExecutionProofSubnetTopicFormat: func() proto.Message { return &ethpb.ExecutionProof{} },
}
// GossipTopicMappings is a function to return the assigned data type

View File

@@ -602,33 +602,6 @@ func (p *Status) All() []peer.ID {
return pids
}
// ZkvmEnabledPeers returns all connected peers that have zkvm enabled in their ENR.
func (p *Status) ZkvmEnabledPeers() []peer.ID {
p.store.RLock()
defer p.store.RUnlock()
peers := make([]peer.ID, 0)
for pid, peerData := range p.store.Peers() {
if peerData.ConnState != Connected {
continue
}
if peerData.Enr == nil {
continue
}
var enabled bool
entry := enr.WithEntry(params.BeaconNetworkConfig().ZkvmEnabledKey, &enabled)
if err := peerData.Enr.Load(entry); err != nil {
continue
}
if enabled {
peers = append(peers, pid)
}
}
return peers
}
// Prune clears out and removes outdated and disconnected peers.
func (p *Status) Prune() {
p.store.Lock()

View File

@@ -1341,75 +1341,3 @@ func createPeer(t *testing.T, p *peers.Status, addr ma.Multiaddr,
p.SetConnectionState(id, state)
return id
}
func TestZkvmEnabledPeers(t *testing.T) {
p := peers.NewStatus(t.Context(), &peers.StatusConfig{
PeerLimit: 30,
ScorerParams: &scorers.Config{
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
Threshold: 1,
},
},
})
// Create peer 1: Connected, zkVM enabled
pid1 := addPeer(t, p, peers.Connected)
record1 := new(enr.Record)
zkvmEnabled := true
record1.Set(enr.WithEntry(params.BeaconNetworkConfig().ZkvmEnabledKey, &zkvmEnabled))
p.Add(record1, pid1, nil, network.DirOutbound)
p.SetConnectionState(pid1, peers.Connected)
// Create peer 2: Connected, zkVM disabled
pid2 := addPeer(t, p, peers.Connected)
record2 := new(enr.Record)
zkvmDisabled := false
record2.Set(enr.WithEntry(params.BeaconNetworkConfig().ZkvmEnabledKey, &zkvmDisabled))
p.Add(record2, pid2, nil, network.DirOutbound)
p.SetConnectionState(pid2, peers.Connected)
// Create peer 3: Connected, zkVM enabled
pid3 := addPeer(t, p, peers.Connected)
record3 := new(enr.Record)
record3.Set(enr.WithEntry(params.BeaconNetworkConfig().ZkvmEnabledKey, &zkvmEnabled))
p.Add(record3, pid3, nil, network.DirOutbound)
p.SetConnectionState(pid3, peers.Connected)
// Create peer 4: Disconnected, zkVM enabled (should not be included)
pid4 := addPeer(t, p, peers.Disconnected)
record4 := new(enr.Record)
record4.Set(enr.WithEntry(params.BeaconNetworkConfig().ZkvmEnabledKey, &zkvmEnabled))
p.Add(record4, pid4, nil, network.DirOutbound)
p.SetConnectionState(pid4, peers.Disconnected)
// Create peer 5: Connected, no ENR (should not be included)
pid5 := addPeer(t, p, peers.Connected)
p.Add(nil, pid5, nil, network.DirOutbound)
p.SetConnectionState(pid5, peers.Connected)
// Create peer 6: Connected, no zkVM key in ENR (should not be included)
pid6 := addPeer(t, p, peers.Connected)
record6 := new(enr.Record)
record6.Set(enr.WithEntry("other_key", "other_value"))
p.Add(record6, pid6, nil, network.DirOutbound)
p.SetConnectionState(pid6, peers.Connected)
// Get zkVM enabled peers
zkvmPeers := p.ZkvmEnabledPeers()
// Should return only pid1 and pid3 (connected peers with zkVM enabled)
assert.Equal(t, 2, len(zkvmPeers), "Expected 2 zkVM enabled peers")
// Verify the returned peers are correct
zkvmPeerMap := make(map[peer.ID]bool)
for _, pid := range zkvmPeers {
zkvmPeerMap[pid] = true
}
assert.Equal(t, true, zkvmPeerMap[pid1], "pid1 should be in zkVM enabled peers")
assert.Equal(t, true, zkvmPeerMap[pid3], "pid3 should be in zkVM enabled peers")
assert.Equal(t, false, zkvmPeerMap[pid2], "pid2 should not be in zkVM enabled peers (disabled)")
assert.Equal(t, false, zkvmPeerMap[pid4], "pid4 should not be in zkVM enabled peers (disconnected)")
assert.Equal(t, false, zkvmPeerMap[pid5], "pid5 should not be in zkVM enabled peers (no ENR)")
assert.Equal(t, false, zkvmPeerMap[pid6], "pid6 should not be in zkVM enabled peers (no zkVM key)")
}

View File

@@ -67,9 +67,6 @@ const (
// DataColumnSidecarsByRangeName is the name for the DataColumnSidecarsByRange v1 message topic.
DataColumnSidecarsByRangeName = "/data_column_sidecars_by_range"
// ExecutionProofsByRootName is the name for the ExecutionProofsByRoot v1 message topic.
ExecutionProofsByRootName = "/execution_proofs_by_root"
)
const (
@@ -109,9 +106,6 @@ const (
// RPCDataColumnSidecarsByRangeTopicV1 is a topic for requesting data column sidecars by their slot.
// /eth2/beacon_chain/req/data_column_sidecars_by_range/1 - New in Fulu.
RPCDataColumnSidecarsByRangeTopicV1 = protocolPrefix + DataColumnSidecarsByRangeName + SchemaVersionV1
// RPCExecutionProofsByRootTopicV1 is a topic for requesting execution proofs by their block root.
// /eth2/beacon_chain/req/execution_proofs_by_root/1 - New in Fulu.
RPCExecutionProofsByRootTopicV1 = protocolPrefix + ExecutionProofsByRootName + SchemaVersionV1
// V2 RPC Topics
// RPCStatusTopicV2 defines the v1 topic for the status rpc method.
@@ -176,9 +170,6 @@ var (
// DataColumnSidecarsByRoot v1 Message
RPCDataColumnSidecarsByRootTopicV1: p2ptypes.DataColumnsByRootIdentifiers{},
// ExecutionProofsByRoot v1 Message
RPCExecutionProofsByRootTopicV1: new(pb.ExecutionProofsByRootRequest),
}
// Maps all registered protocol prefixes.
@@ -202,7 +193,6 @@ var (
LightClientOptimisticUpdateName: true,
DataColumnSidecarsByRootName: true,
DataColumnSidecarsByRangeName: true,
ExecutionProofsByRootName: true,
}
// Maps all the RPC messages which are to updated in altair.

View File

@@ -36,7 +36,6 @@ var (
attSubnetEnrKey = params.BeaconNetworkConfig().AttSubnetKey
syncCommsSubnetEnrKey = params.BeaconNetworkConfig().SyncCommsSubnetKey
custodyGroupCountEnrKey = params.BeaconNetworkConfig().CustodyGroupCountKey
zkvmEnabledKeyEnrKey = params.BeaconNetworkConfig().ZkvmEnabledKey
)
// The value used with the subnet, in order

View File

@@ -46,8 +46,6 @@ const (
GossipLightClientOptimisticUpdateMessage = "light_client_optimistic_update"
// GossipDataColumnSidecarMessage is the name for the data column sidecar message type.
GossipDataColumnSidecarMessage = "data_column_sidecar"
// GossipExecutionProofMessage is the name for the execution proof message type.
GossipExecutionProofMessage = "execution_proof"
// Topic Formats
//
@@ -77,8 +75,6 @@ const (
LightClientOptimisticUpdateTopicFormat = GossipProtocolAndDigest + GossipLightClientOptimisticUpdateMessage
// DataColumnSubnetTopicFormat is the topic format for the data column subnet.
DataColumnSubnetTopicFormat = GossipProtocolAndDigest + GossipDataColumnSidecarMessage + "_%d"
// ExecutionProofSubnetTopicFormat is the topic format for the execution proof subnet.
ExecutionProofSubnetTopicFormat = GossipProtocolAndDigest + GossipExecutionProofMessage // + "_%d" (PoC only have one global topic)
)
// topic is a struct representing a single gossipsub topic.
@@ -162,7 +158,6 @@ func (s *Service) allTopics() []topic {
newTopic(altair, future, empty, GossipLightClientOptimisticUpdateMessage),
newTopic(altair, future, empty, GossipLightClientFinalityUpdateMessage),
newTopic(capella, future, empty, GossipBlsToExecutionChangeMessage),
newTopic(fulu, future, empty, GossipExecutionProofMessage),
}
last := params.GetNetworkScheduleEntry(genesis)
schedule := []params.NetworkScheduleEntry{last}

View File

@@ -14,7 +14,6 @@ import (
"github.com/OffchainLabs/prysm/v7/api"
"github.com/OffchainLabs/prysm/v7/api/server/structs"
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
coreblocks "github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
corehelpers "github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/filters"
@@ -958,13 +957,6 @@ func (s *Server) validateConsensus(ctx context.Context, b *eth.GenericSignedBeac
}
}
}
blockRoot, err := blk.Block().HashTreeRoot()
if err != nil {
return errors.Wrap(err, "could not hash block")
}
if err := coreblocks.VerifyBlockSignatureUsingCurrentFork(parentState, blk, blockRoot); err != nil {
return errors.Wrap(err, "could not verify block signature")
}
_, err = transition.ExecuteStateTransition(ctx, parentState, blk)
if err != nil {
return errors.Wrap(err, "could not execute state transition")

View File

@@ -40,7 +40,6 @@ func GetForkSchedule(w http.ResponseWriter, r *http.Request) {
httputil.WriteJson(w, &structs.GetForkScheduleResponse{
Data: data,
})
return
}
previous := schedule[0]
for _, entry := range schedule {

View File

@@ -169,11 +169,6 @@ func TestGetSpec(t *testing.T) {
config.BlobsidecarSubnetCountElectra = 102
config.SyncMessageDueBPS = 103
// EIP-8025
config.MaxProofDataBytes = 200
config.MinEpochsForExecutionProofRequests = 201
config.MinProofsRequired = 202
var dbp [4]byte
copy(dbp[:], []byte{'0', '0', '0', '1'})
config.DomainBeaconProposer = dbp
@@ -210,7 +205,7 @@ func TestGetSpec(t *testing.T) {
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp))
data, ok := resp.Data.(map[string]any)
require.Equal(t, true, ok)
assert.Equal(t, 178, len(data))
assert.Equal(t, 175, len(data))
for k, v := range data {
t.Run(k, func(t *testing.T) {
switch k {
@@ -582,12 +577,6 @@ func TestGetSpec(t *testing.T) {
assert.Equal(t, "102", v)
case "SYNC_MESSAGE_DUE_BPS":
assert.Equal(t, "103", v)
case "MAX_PROOF_DATA_BYTES":
assert.Equal(t, "200", v)
case "MIN_EPOCHS_FOR_EXECUTION_PROOF_REQUESTS":
assert.Equal(t, "201", v)
case "MIN_PROOFS_REQUIRED":
assert.Equal(t, "202", v)
case "BLOB_SCHEDULE":
blobSchedule, ok := v.([]any)
assert.Equal(t, true, ok)

View File

@@ -14,7 +14,6 @@ go_library(
"decode_pubsub.go",
"doc.go",
"error.go",
"exec_proofs.go",
"fork_watcher.go",
"fuzz_exports.go", # keep
"log.go",
@@ -32,7 +31,6 @@ go_library(
"rpc_chunked_response.go",
"rpc_data_column_sidecars_by_range.go",
"rpc_data_column_sidecars_by_root.go",
"rpc_execution_proofs_by_root_topic.go",
"rpc_goodbye.go",
"rpc_light_client.go",
"rpc_metadata.go",
@@ -48,7 +46,6 @@ go_library(
"subscriber_blob_sidecar.go",
"subscriber_bls_to_execution_change.go",
"subscriber_data_column_sidecar.go",
"subscriber_execution_proofs.go",
"subscriber_handlers.go",
"subscriber_sync_committee_message.go",
"subscriber_sync_contribution_proof.go",
@@ -60,7 +57,6 @@ go_library(
"validate_blob.go",
"validate_bls_to_execution_change.go",
"validate_data_column.go",
"validate_execution_proof.go",
"validate_light_client.go",
"validate_proposer_slashing.go",
"validate_sync_committee_message.go",
@@ -97,7 +93,6 @@ go_library(
"//beacon-chain/light-client:go_default_library",
"//beacon-chain/operations/attestations:go_default_library",
"//beacon-chain/operations/blstoexec:go_default_library",
"//beacon-chain/operations/execproofs:go_default_library",
"//beacon-chain/operations/slashings:go_default_library",
"//beacon-chain/operations/synccommittee:go_default_library",
"//beacon-chain/operations/voluntaryexits:go_default_library",
@@ -192,7 +187,6 @@ go_test(
"rpc_blob_sidecars_by_root_test.go",
"rpc_data_column_sidecars_by_range_test.go",
"rpc_data_column_sidecars_by_root_test.go",
"rpc_execution_proofs_by_root_topic_test.go",
"rpc_goodbye_test.go",
"rpc_handler_test.go",
"rpc_light_client_test.go",
@@ -217,7 +211,6 @@ go_test(
"validate_blob_test.go",
"validate_bls_to_execution_change_test.go",
"validate_data_column_test.go",
"validate_execution_proof_test.go",
"validate_light_client_test.go",
"validate_proposer_slashing_test.go",
"validate_sync_committee_message_test.go",
@@ -251,7 +244,6 @@ go_test(
"//beacon-chain/light-client:go_default_library",
"//beacon-chain/operations/attestations:go_default_library",
"//beacon-chain/operations/blstoexec:go_default_library",
"//beacon-chain/operations/execproofs:go_default_library",
"//beacon-chain/operations/slashings:go_default_library",
"//beacon-chain/operations/slashings/mock:go_default_library",
"//beacon-chain/p2p:go_default_library",

View File

@@ -1,65 +0,0 @@
package sync
import (
"fmt"
"time"
"errors"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
)
// generateExecProof returns a dummy execution proof after the specified delay.
func generateExecProof(roBlock blocks.ROBlock, proofID primitives.ExecutionProofId, delay time.Duration) (*ethpb.ExecutionProof, error) {
// Simulate proof generation work
time.Sleep(delay)
// Create a dummy proof with some deterministic data
block := roBlock.Block()
if block == nil {
return nil, errors.New("nil block")
}
body := block.Body()
if body == nil {
return nil, errors.New("nil block body")
}
executionData, err := body.Execution()
if err != nil {
return nil, fmt.Errorf("execution: %w", err)
}
if executionData == nil {
return nil, errors.New("nil execution data")
}
hash, err := executionData.HashTreeRoot()
if err != nil {
return nil, fmt.Errorf("hash tree root: %w", err)
}
proofData := []byte{
0xFF, // Magic byte for dummy proof
byte(proofID),
// Include some payload hash bytes
hash[0],
hash[1],
hash[2],
hash[3],
}
blockRoot := roBlock.Root()
proof := &ethpb.ExecutionProof{
ProofId: proofID,
Slot: block.Slot(),
BlockHash: hash[:],
BlockRoot: blockRoot[:],
ProofData: proofData,
}
return proof, nil
}

View File

@@ -12,7 +12,6 @@ import (
lightClient "github.com/OffchainLabs/prysm/v7/beacon-chain/light-client"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/attestations"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/blstoexec"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/execproofs"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/slashings"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/synccommittee"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/voluntaryexits"
@@ -89,13 +88,6 @@ func WithBlsToExecPool(blsToExecPool blstoexec.PoolManager) Option {
}
}
func WithExecProofPool(execProofPool execproofs.PoolManager) Option {
return func(s *Service) error {
s.cfg.execProofPool = execProofPool
return nil
}
}
func WithChainService(chain blockchainService) Option {
return func(s *Service) error {
s.cfg.chain = chain

View File

@@ -259,10 +259,6 @@ func (s *Service) processBlock(ctx context.Context, b interfaces.ReadOnlySignedB
return errors.Wrap(err, "request and save missing data column sidecars")
}
if err := s.requestAndSaveMissingExecutionProofs([]blocks.ROBlock{roBlock}); err != nil {
return errors.Wrap(err, "request and save missing execution proofs")
}
return nil
}

View File

@@ -100,10 +100,6 @@ func newRateLimiter(p2pProvider p2p.P2P) *limiter {
topicMap[addEncoding(p2p.RPCDataColumnSidecarsByRootTopicV1)] = dataColumnSidecars
// DataColumnSidecarsByRangeV1
topicMap[addEncoding(p2p.RPCDataColumnSidecarsByRangeTopicV1)] = dataColumnSidecars
executionProofs := leakybucket.NewCollector(1, defaultBurstLimit, leakyBucketPeriod, false /* deleteEmptyBuckets */);
// ExecutionProofsByRootV1
topicMap[addEncoding(p2p.RPCExecutionProofsByRootTopicV1)] = executionProofs
// General topic for all rpc requests.
topicMap[rpcLimiterTopic] = leakybucket.NewCollector(5, defaultBurstLimit*2, leakyBucketPeriod, false /* deleteEmptyBuckets */)

View File

@@ -17,7 +17,7 @@ import (
func TestNewRateLimiter(t *testing.T) {
rlimiter := newRateLimiter(mockp2p.NewTestP2P(t))
assert.Equal(t, len(rlimiter.limiterMap), 21, "correct number of topics not registered")
assert.Equal(t, len(rlimiter.limiterMap), 20, "correct number of topics not registered")
}
func TestNewRateLimiter_FreeCorrectly(t *testing.T) {

View File

@@ -51,7 +51,6 @@ func (s *Service) rpcHandlerByTopicFromFork(forkIndex int) (map[string]rpcHandle
p2p.RPCBlobSidecarsByRangeTopicV1: s.blobSidecarsByRangeRPCHandler, // Modified in Fulu
p2p.RPCDataColumnSidecarsByRootTopicV1: s.dataColumnSidecarByRootRPCHandler, // Added in Fulu
p2p.RPCDataColumnSidecarsByRangeTopicV1: s.dataColumnSidecarsByRangeRPCHandler, // Added in Fulu
p2p.RPCExecutionProofsByRootTopicV1: s.executionProofsByRootRPCHandler, // Added in Fulu
}, nil
}

View File

@@ -11,13 +11,11 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/types"
"github.com/OffchainLabs/prysm/v7/beacon-chain/sync/verify"
"github.com/OffchainLabs/prysm/v7/beacon-chain/verification"
"github.com/OffchainLabs/prysm/v7/config/features"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/OffchainLabs/prysm/v7/time/slots"
libp2pcore "github.com/libp2p/go-libp2p/core"
@@ -89,77 +87,9 @@ func (s *Service) sendBeaconBlocksRequest(ctx context.Context, requests *types.B
return errors.Wrap(err, "request and save missing data columns")
}
if err := s.requestAndSaveMissingExecutionProofs(postFuluBlocks); err != nil {
return errors.Wrap(err, "request and save missing execution proofs")
}
return err
}
func (s *Service) requestAndSaveMissingExecutionProofs(blks []blocks.ROBlock) error {
if len(blks) == 0 {
return nil
}
// TODO: Parallelize requests for multiple blocks.
for _, blk := range blks {
if err := s.sendAndSaveExecutionProofs(s.ctx, blk); err != nil {
return err
}
}
return nil
}
func (s *Service) sendAndSaveExecutionProofs(ctx context.Context, block blocks.ROBlock) error {
if !features.Get().EnableZkvm {
return nil
}
// Check proof retention period.
blockEpoch := slots.ToEpoch(block.Block().Slot())
currentEpoch := slots.ToEpoch(s.cfg.clock.CurrentSlot())
if !params.WithinExecutionProofPeriod(blockEpoch, currentEpoch) {
return nil
}
// Check how many proofs are needed with Execution Proof Pool.
storedIds := s.cfg.execProofPool.Ids(block.Root())
count := uint64(len(storedIds))
if count >= params.BeaconConfig().MinProofsRequired {
return nil
}
// Construct request
blockRoot := block.Root()
req := &ethpb.ExecutionProofsByRootRequest{
BlockRoot: blockRoot[:],
CountNeeded: params.BeaconConfig().MinProofsRequired - count,
AlreadyHave: storedIds,
}
// Call SendExecutionProofByRootRequest
zkvmEnabledPeers := s.cfg.p2p.Peers().ZkvmEnabledPeers()
if len(zkvmEnabledPeers) == 0 {
return fmt.Errorf("no zkVM enabled peers available to request execution proofs")
}
// TODO: For simplicity, just pick the first peer for now.
// In the future, we can implement better peer selection logic.
pid := zkvmEnabledPeers[0]
proofs, err := SendExecutionProofsByRootRequest(ctx, s.cfg.clock, s.cfg.p2p, pid, req)
if err != nil {
return fmt.Errorf("send execution proofs by root request: %w", err)
}
// Insert ExecProofPool
// TODO: Implement multiple proof insertion in ExecProofPool to avoid multiple locks.
for _, proof := range proofs {
s.cfg.execProofPool.Insert(proof)
}
return nil
}
// requestAndSaveMissingDataColumns checks if the data columns are missing for the given block.
// If so, requests them and saves them to the storage.
func (s *Service) requestAndSaveMissingDataColumnSidecars(blks []blocks.ROBlock) error {

View File

@@ -182,21 +182,3 @@ func WriteDataColumnSidecarChunk(stream libp2pcore.Stream, tor blockchain.Tempor
return nil
}
func WriteExecutionProofChunk(stream libp2pcore.Stream, encoding encoder.NetworkEncoding, proof *ethpb.ExecutionProof) error {
// Success response code.
if _, err := stream.Write([]byte{responseCodeSuccess}); err != nil {
return errors.Wrap(err, "stream write")
}
ctxBytes := params.ForkDigest(slots.ToEpoch(proof.Slot))
if err := writeContextToStream(ctxBytes[:], stream); err != nil {
return errors.Wrap(err, "write context to stream")
}
// Execution proof.
if _, err := encoding.EncodeWithMaxLength(stream, proof); err != nil {
return errors.Wrap(err, "encode with max length")
}
return nil
}

View File

@@ -1,219 +0,0 @@
package sync
import (
"context"
"errors"
"fmt"
"io"
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/time/slots"
libp2pcore "github.com/libp2p/go-libp2p/core"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/sirupsen/logrus"
)
// SendExecutionProofsByRootRequest sends ExecutionProofsByRoot request and returns fetched execution proofs, if any.
func SendExecutionProofsByRootRequest(
ctx context.Context,
clock blockchain.TemporalOracle,
p2pProvider p2p.P2P,
pid peer.ID,
req *ethpb.ExecutionProofsByRootRequest,
) ([]*ethpb.ExecutionProof, error) {
// Validate request
if req.CountNeeded == 0 {
return nil, errors.New("count_needed must be greater than 0")
}
topic, err := p2p.TopicFromMessage(p2p.ExecutionProofsByRootName, slots.ToEpoch(clock.CurrentSlot()))
if err != nil {
return nil, err
}
log.WithFields(logrus.Fields{
"topic": topic,
"block_root": bytesutil.ToBytes32(req.BlockRoot),
"count": req.CountNeeded,
"already": len(req.AlreadyHave),
}).Debug("Sending execution proofs by root request")
stream, err := p2pProvider.Send(ctx, req, topic, pid)
if err != nil {
return nil, err
}
defer closeStream(stream, log)
// Read execution proofs from stream
proofs := make([]*ethpb.ExecutionProof, 0, req.CountNeeded)
alreadyHaveSet := make(map[primitives.ExecutionProofId]struct{})
for _, id := range req.AlreadyHave {
alreadyHaveSet[id] = struct{}{}
}
for i := uint64(0); i < req.CountNeeded; i++ {
isFirstChunk := i == 0
proof, err := ReadChunkedExecutionProof(stream, p2pProvider, isFirstChunk)
if errors.Is(err, io.EOF) {
break
}
if err != nil {
return nil, err
}
// Validate proof
if err := validateExecutionProof(proof, req, alreadyHaveSet); err != nil {
return nil, err
}
proofs = append(proofs, proof)
}
return proofs, nil
}
// ReadChunkedExecutionProof reads a chunked execution proof from the stream.
func ReadChunkedExecutionProof(
stream libp2pcore.Stream,
encoding p2p.EncodingProvider,
isFirstChunk bool,
) (*ethpb.ExecutionProof, error) {
// Read status code for each chunk (like data columns, not like blocks)
code, errMsg, err := ReadStatusCode(stream, encoding.Encoding())
if err != nil {
return nil, err
}
if code != 0 {
return nil, errors.New(errMsg)
}
// Read context bytes (fork digest)
_, err = readContextFromStream(stream)
if err != nil {
return nil, fmt.Errorf("read context from stream: %w", err)
}
// Decode the proof
proof := &ethpb.ExecutionProof{}
if err := encoding.Encoding().DecodeWithMaxLength(stream, proof); err != nil {
return nil, err
}
return proof, nil
}
// validateExecutionProof validates a received execution proof against the request.
func validateExecutionProof(
proof *ethpb.ExecutionProof,
req *ethpb.ExecutionProofsByRootRequest,
alreadyHaveSet map[primitives.ExecutionProofId]struct{},
) error {
// Check block root matches
proofRoot := bytesutil.ToBytes32(proof.BlockRoot)
reqRoot := bytesutil.ToBytes32(req.BlockRoot)
if proofRoot != reqRoot {
return fmt.Errorf("proof block root %#x does not match requested root %#x",
proofRoot, reqRoot)
}
// Check we didn't already have this proof
if _, ok := alreadyHaveSet[proof.ProofId]; ok {
return fmt.Errorf("received proof we already have: proof_id=%d", proof.ProofId)
}
// Check proof ID is valid (within max range)
if !proof.ProofId.IsValid() {
return fmt.Errorf("invalid proof_id: %d", proof.ProofId)
}
return nil
}
// executionProofsByRootRPCHandler handles incoming ExecutionProofsByRoot RPC requests.
func (s *Service) executionProofsByRootRPCHandler(ctx context.Context, msg any, stream libp2pcore.Stream) error {
ctx, span := trace.StartSpan(ctx, "sync.executionProofsByRootRPCHandler")
defer span.End()
_, cancel := context.WithTimeout(ctx, ttfbTimeout)
defer cancel()
req, ok := msg.(*ethpb.ExecutionProofsByRootRequest)
if !ok {
return errors.New("message is not type ExecutionProofsByRootRequest")
}
remotePeer := stream.Conn().RemotePeer()
SetRPCStreamDeadlines(stream)
// Validate request
if err := s.rateLimiter.validateRequest(stream, 1); err != nil {
return err
}
// Penalize peers that send invalid requests.
if err := validateExecutionProofsByRootRequest(req); err != nil {
s.downscorePeer(remotePeer, "executionProofsByRootRPCHandlerValidationError")
s.writeErrorResponseToStream(responseCodeInvalidRequest, err.Error(), stream)
return fmt.Errorf("validate execution proofs by root request: %w", err)
}
blockRoot := bytesutil.ToBytes32(req.BlockRoot)
log := log.WithFields(logrus.Fields{
"blockroot": fmt.Sprintf("%#x", blockRoot),
"neededCount": req.CountNeeded,
"alreadyHave": req.AlreadyHave,
"peer": remotePeer.String(),
})
s.rateLimiter.add(stream, 1)
defer closeStream(stream, log)
// Get proofs from execution proof pool
storedProofs := s.cfg.execProofPool.Get(blockRoot)
// Filter out not requested proofs
alreadyHave := make(map[primitives.ExecutionProofId]bool)
for _, id := range req.AlreadyHave {
alreadyHave[id] = true
}
// Send proofs
sentCount := uint64(0)
for _, proof := range storedProofs {
if sentCount >= req.CountNeeded {
break
}
// Skip proofs the requester already has
if alreadyHave[proof.ProofId] {
continue
}
// Write proof to stream
SetStreamWriteDeadline(stream, defaultWriteDuration)
if err := WriteExecutionProofChunk(stream, s.cfg.p2p.Encoding(), proof); err != nil {
log.WithError(err).Debug("Could not send execution proof")
s.writeErrorResponseToStream(responseCodeServerError, "could not send execution proof", stream)
return err
}
sentCount++
}
log.WithField("sentCount", sentCount).Debug("Responded to execution proofs by root request")
return nil
}
func validateExecutionProofsByRootRequest(req *ethpb.ExecutionProofsByRootRequest) error {
if req.CountNeeded == 0 {
return errors.New("count_needed must be greater than 0")
}
return nil
}

View File

@@ -1,727 +0,0 @@
package sync
import (
"io"
"sync"
"testing"
"time"
chainMock "github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/testing"
testDB "github.com/OffchainLabs/prysm/v7/beacon-chain/db/testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/execproofs"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/encoder"
p2ptest "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/startup"
"github.com/OffchainLabs/prysm/v7/config/features"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/assert"
"github.com/OffchainLabs/prysm/v7/testing/require"
"github.com/OffchainLabs/prysm/v7/testing/util"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/protocol"
"github.com/pkg/errors"
)
func TestExecutionProofsByRootRPCHandler(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.FuluForkEpoch = 0
params.OverrideBeaconConfig(cfg)
params.BeaconConfig().InitializeForkSchedule()
protocolID := protocol.ID(p2p.RPCExecutionProofsByRootTopicV1) + "/" + encoder.ProtocolSuffixSSZSnappy
t.Run("wrong message type", func(t *testing.T) {
service := &Service{}
err := service.executionProofsByRootRPCHandler(t.Context(), nil, nil)
require.ErrorContains(t, "message is not type ExecutionProofsByRootRequest", err)
})
t.Run("invalid request - count_needed is 0", func(t *testing.T) {
resetCfg := features.InitWithReset(&features.Flags{
EnableZkvm: true,
})
defer resetCfg()
localP2P := p2ptest.NewTestP2P(t)
service := &Service{cfg: &config{p2p: localP2P}, rateLimiter: newRateLimiter(localP2P)}
remoteP2P := p2ptest.NewTestP2P(t)
var wg sync.WaitGroup
wg.Add(1)
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
defer wg.Done()
code, errMsg, err := readStatusCodeNoDeadline(stream, localP2P.Encoding())
require.NoError(t, err)
require.Equal(t, responseCodeInvalidRequest, code)
require.Equal(t, "count_needed must be greater than 0", errMsg)
})
localP2P.Connect(remoteP2P)
stream, err := localP2P.BHost.NewStream(t.Context(), remoteP2P.BHost.ID(), protocolID)
require.NoError(t, err)
blockRoot := bytesutil.PadTo([]byte("blockroot"), 32)
req := &ethpb.ExecutionProofsByRootRequest{
BlockRoot: blockRoot,
CountNeeded: 0, // Invalid: must be > 0
AlreadyHave: []primitives.ExecutionProofId{},
}
require.Equal(t, true, localP2P.Peers().Scorers().BadResponsesScorer().Score(remoteP2P.PeerID()) >= 0)
err = service.executionProofsByRootRPCHandler(t.Context(), req, stream)
require.NotNil(t, err)
require.Equal(t, true, localP2P.Peers().Scorers().BadResponsesScorer().Score(remoteP2P.PeerID()) < 0)
if util.WaitTimeout(&wg, 1*time.Second) {
t.Fatal("Did not receive stream within 1 sec")
}
})
t.Run("zkVM disabled - returns empty", func(t *testing.T) {
resetCfg := features.InitWithReset(&features.Flags{
EnableZkvm: false, // Disabled
})
defer resetCfg()
localP2P := p2ptest.NewTestP2P(t)
execProofPool := execproofs.NewPool()
service := &Service{
cfg: &config{
p2p: localP2P,
execProofPool: execProofPool,
},
rateLimiter: newRateLimiter(localP2P),
}
remoteP2P := p2ptest.NewTestP2P(t)
var wg sync.WaitGroup
wg.Add(1)
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
defer wg.Done()
// Should receive no proofs (stream should end)
_, err := ReadChunkedExecutionProof(stream, localP2P, true)
require.ErrorIs(t, err, io.EOF)
})
localP2P.Connect(remoteP2P)
stream, err := localP2P.BHost.NewStream(t.Context(), remoteP2P.BHost.ID(), protocolID)
require.NoError(t, err)
blockRoot := bytesutil.PadTo([]byte("blockroot"), 32)
req := &ethpb.ExecutionProofsByRootRequest{
BlockRoot: blockRoot,
CountNeeded: 2,
AlreadyHave: []primitives.ExecutionProofId{},
}
err = service.executionProofsByRootRPCHandler(t.Context(), req, stream)
require.NoError(t, err)
if util.WaitTimeout(&wg, 1*time.Second) {
t.Fatal("Did not receive stream within 1 sec")
}
})
t.Run("no proofs available", func(t *testing.T) {
resetCfg := features.InitWithReset(&features.Flags{
EnableZkvm: true,
})
defer resetCfg()
localP2P := p2ptest.NewTestP2P(t)
execProofPool := execproofs.NewPool()
service := &Service{
cfg: &config{
p2p: localP2P,
execProofPool: execProofPool,
},
rateLimiter: newRateLimiter(localP2P),
}
remoteP2P := p2ptest.NewTestP2P(t)
var wg sync.WaitGroup
wg.Add(1)
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
defer wg.Done()
// Should receive no proofs (stream should end)
_, err := ReadChunkedExecutionProof(stream, localP2P, true)
require.ErrorIs(t, err, io.EOF)
})
localP2P.Connect(remoteP2P)
stream, err := localP2P.BHost.NewStream(t.Context(), remoteP2P.BHost.ID(), protocolID)
require.NoError(t, err)
blockRoot := bytesutil.PadTo([]byte("blockroot"), 32)
req := &ethpb.ExecutionProofsByRootRequest{
BlockRoot: blockRoot,
CountNeeded: 2,
AlreadyHave: []primitives.ExecutionProofId{},
}
err = service.executionProofsByRootRPCHandler(t.Context(), req, stream)
require.NoError(t, err)
if util.WaitTimeout(&wg, 1*time.Second) {
t.Fatal("Did not receive stream within 1 sec")
}
})
t.Run("nominal - returns requested proofs", func(t *testing.T) {
resetCfg := features.InitWithReset(&features.Flags{
EnableZkvm: true,
})
defer resetCfg()
localP2P := p2ptest.NewTestP2P(t)
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
// Create execution proof pool with some proofs
execProofPool := execproofs.NewPool()
blockRoot := [32]byte{0x01, 0x02, 0x03}
// Add 3 proofs for the same block
blockHash := bytesutil.PadTo([]byte("blockhash"), 32)
proof1 := &ethpb.ExecutionProof{
BlockRoot: blockRoot[:],
BlockHash: blockHash,
Slot: primitives.Slot(10),
ProofId: primitives.ExecutionProofId(1),
ProofData: []byte("proof1"),
}
proof2 := &ethpb.ExecutionProof{
BlockRoot: blockRoot[:],
BlockHash: blockHash,
Slot: primitives.Slot(10),
ProofId: primitives.ExecutionProofId(2),
ProofData: []byte("proof2"),
}
proof3 := &ethpb.ExecutionProof{
BlockRoot: blockRoot[:],
BlockHash: blockHash,
Slot: primitives.Slot(10),
ProofId: primitives.ExecutionProofId(3),
ProofData: []byte("proof3"),
}
execProofPool.Insert(proof1)
execProofPool.Insert(proof2)
execProofPool.Insert(proof3)
beaconDB := testDB.SetupDB(t)
service := &Service{
cfg: &config{
p2p: localP2P,
beaconDB: beaconDB,
clock: clock,
execProofPool: execProofPool,
chain: &chainMock.ChainService{},
},
rateLimiter: newRateLimiter(localP2P),
}
remoteP2P := p2ptest.NewTestP2P(t)
var wg sync.WaitGroup
wg.Add(1)
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
defer wg.Done()
proofs := make([]*ethpb.ExecutionProof, 0, 2)
for i := range 2 {
isFirstChunk := i == 0
proof, err := ReadChunkedExecutionProof(stream, remoteP2P, isFirstChunk)
if errors.Is(err, io.EOF) {
break
}
assert.NoError(t, err)
proofs = append(proofs, proof)
}
assert.Equal(t, 2, len(proofs))
// Should receive proof1 and proof2 (first 2 in pool)
assert.DeepEqual(t, blockRoot[:], proofs[0].BlockRoot)
assert.DeepEqual(t, blockRoot[:], proofs[1].BlockRoot)
assert.Equal(t, primitives.ExecutionProofId(1), proofs[0].ProofId)
assert.Equal(t, primitives.ExecutionProofId(2), proofs[1].ProofId)
})
localP2P.Connect(remoteP2P)
stream, err := localP2P.BHost.NewStream(t.Context(), remoteP2P.BHost.ID(), protocolID)
require.NoError(t, err)
req := &ethpb.ExecutionProofsByRootRequest{
BlockRoot: blockRoot[:],
CountNeeded: 2,
AlreadyHave: []primitives.ExecutionProofId{},
}
err = service.executionProofsByRootRPCHandler(t.Context(), req, stream)
require.NoError(t, err)
require.Equal(t, true, localP2P.Peers().Scorers().BadResponsesScorer().Score(remoteP2P.PeerID()) >= 0)
if util.WaitTimeout(&wg, 1*time.Second) {
t.Fatal("Did not receive stream within 1 sec")
}
})
t.Run("filters already_have proofs", func(t *testing.T) {
resetCfg := features.InitWithReset(&features.Flags{
EnableZkvm: true,
})
defer resetCfg()
localP2P := p2ptest.NewTestP2P(t)
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
// Create execution proof pool with some proofs
execProofPool := execproofs.NewPool()
blockRoot := [32]byte{0x01, 0x02, 0x03}
// Add 4 proofs for the same block
blockHash := bytesutil.PadTo([]byte("blockhash"), 32)
proof1 := &ethpb.ExecutionProof{
BlockRoot: blockRoot[:],
BlockHash: blockHash,
Slot: primitives.Slot(10),
ProofId: primitives.ExecutionProofId(1),
ProofData: []byte("proof1"),
}
proof2 := &ethpb.ExecutionProof{
BlockRoot: blockRoot[:],
BlockHash: blockHash,
Slot: primitives.Slot(10),
ProofId: primitives.ExecutionProofId(2),
ProofData: []byte("proof2"),
}
proof3 := &ethpb.ExecutionProof{
BlockRoot: blockRoot[:],
BlockHash: blockHash,
Slot: primitives.Slot(10),
ProofId: primitives.ExecutionProofId(3),
ProofData: []byte("proof3"),
}
proof4 := &ethpb.ExecutionProof{
BlockRoot: blockRoot[:],
BlockHash: blockHash,
Slot: primitives.Slot(10),
ProofId: primitives.ExecutionProofId(4),
ProofData: []byte("proof4"),
}
execProofPool.Insert(proof1)
execProofPool.Insert(proof2)
execProofPool.Insert(proof3)
execProofPool.Insert(proof4)
beaconDB := testDB.SetupDB(t)
service := &Service{
cfg: &config{
p2p: localP2P,
beaconDB: beaconDB,
clock: clock,
execProofPool: execProofPool,
chain: &chainMock.ChainService{},
},
rateLimiter: newRateLimiter(localP2P),
}
remoteP2P := p2ptest.NewTestP2P(t)
var wg sync.WaitGroup
wg.Add(1)
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
defer wg.Done()
proofs := make([]*ethpb.ExecutionProof, 0, 2)
for i := range 3 {
isFirstChunk := i == 0
proof, err := ReadChunkedExecutionProof(stream, remoteP2P, isFirstChunk)
if errors.Is(err, io.EOF) {
break
}
assert.NoError(t, err)
proofs = append(proofs, proof)
}
// Should skip proof1 and proof2 (already_have), and return proof3 and proof4
assert.Equal(t, 2, len(proofs))
assert.Equal(t, primitives.ExecutionProofId(3), proofs[0].ProofId)
assert.Equal(t, primitives.ExecutionProofId(4), proofs[1].ProofId)
})
localP2P.Connect(remoteP2P)
stream, err := localP2P.BHost.NewStream(t.Context(), remoteP2P.BHost.ID(), protocolID)
require.NoError(t, err)
req := &ethpb.ExecutionProofsByRootRequest{
BlockRoot: blockRoot[:],
CountNeeded: 2,
AlreadyHave: []primitives.ExecutionProofId{1, 2}, // Already have proof1 and proof2
}
err = service.executionProofsByRootRPCHandler(t.Context(), req, stream)
require.NoError(t, err)
if util.WaitTimeout(&wg, 1*time.Second) {
t.Fatal("Did not receive stream within 1 sec")
}
})
t.Run("partial send - less proofs than requested", func(t *testing.T) {
resetCfg := features.InitWithReset(&features.Flags{
EnableZkvm: true,
})
defer resetCfg()
localP2P := p2ptest.NewTestP2P(t)
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
// Create execution proof pool with only 2 proofs
execProofPool := execproofs.NewPool()
blockRoot := [32]byte{0x01, 0x02, 0x03}
blockHash := bytesutil.PadTo([]byte("blockhash"), 32)
proof1 := &ethpb.ExecutionProof{
BlockRoot: blockRoot[:],
BlockHash: blockHash,
Slot: primitives.Slot(10),
ProofId: primitives.ExecutionProofId(1),
ProofData: []byte("proof1"),
}
proof2 := &ethpb.ExecutionProof{
BlockRoot: blockRoot[:],
BlockHash: blockHash,
Slot: primitives.Slot(10),
ProofId: primitives.ExecutionProofId(2),
ProofData: []byte("proof2"),
}
execProofPool.Insert(proof1)
execProofPool.Insert(proof2)
beaconDB := testDB.SetupDB(t)
service := &Service{
cfg: &config{
p2p: localP2P,
beaconDB: beaconDB,
clock: clock,
execProofPool: execProofPool,
chain: &chainMock.ChainService{},
},
rateLimiter: newRateLimiter(localP2P),
}
remoteP2P := p2ptest.NewTestP2P(t)
var wg sync.WaitGroup
wg.Add(1)
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
defer wg.Done()
proofs := make([]*ethpb.ExecutionProof, 0, 5)
for i := range 5 {
isFirstChunk := i == 0
proof, err := ReadChunkedExecutionProof(stream, remoteP2P, isFirstChunk)
if errors.Is(err, io.EOF) {
break
}
assert.NoError(t, err)
proofs = append(proofs, proof)
}
// Should only receive 2 proofs (not 5 as requested)
assert.Equal(t, 2, len(proofs))
assert.Equal(t, primitives.ExecutionProofId(1), proofs[0].ProofId)
assert.Equal(t, primitives.ExecutionProofId(2), proofs[1].ProofId)
})
localP2P.Connect(remoteP2P)
stream, err := localP2P.BHost.NewStream(t.Context(), remoteP2P.BHost.ID(), protocolID)
require.NoError(t, err)
req := &ethpb.ExecutionProofsByRootRequest{
BlockRoot: blockRoot[:],
CountNeeded: 5, // Request 5 but only 2 available
AlreadyHave: []primitives.ExecutionProofId{},
}
err = service.executionProofsByRootRPCHandler(t.Context(), req, stream)
require.NoError(t, err)
if util.WaitTimeout(&wg, 1*time.Second) {
t.Fatal("Did not receive stream within 1 sec")
}
})
}
func TestValidateExecutionProofsByRootRequest(t *testing.T) {
t.Run("invalid - count_needed is 0", func(t *testing.T) {
req := &ethpb.ExecutionProofsByRootRequest{
BlockRoot: bytesutil.PadTo([]byte("blockroot"), 32),
CountNeeded: 0,
AlreadyHave: []primitives.ExecutionProofId{},
}
err := validateExecutionProofsByRootRequest(req)
require.ErrorContains(t, "count_needed must be greater than 0", err)
})
t.Run("valid", func(t *testing.T) {
req := &ethpb.ExecutionProofsByRootRequest{
BlockRoot: bytesutil.PadTo([]byte("blockroot"), 32),
CountNeeded: 2,
AlreadyHave: []primitives.ExecutionProofId{},
}
err := validateExecutionProofsByRootRequest(req)
require.NoError(t, err)
})
}
func TestSendExecutionProofsByRootRequest(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.FuluForkEpoch = 0
params.OverrideBeaconConfig(cfg)
params.BeaconConfig().InitializeForkSchedule()
protocolID := protocol.ID(p2p.RPCExecutionProofsByRootTopicV1) + "/" + encoder.ProtocolSuffixSSZSnappy
t.Run("count_needed is 0 - returns error", func(t *testing.T) {
localP2P := p2ptest.NewTestP2P(t)
remoteP2P := p2ptest.NewTestP2P(t)
localP2P.Connect(remoteP2P)
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
blockRoot := bytesutil.PadTo([]byte("blockroot"), 32)
req := &ethpb.ExecutionProofsByRootRequest{
BlockRoot: blockRoot,
CountNeeded: 0,
AlreadyHave: []primitives.ExecutionProofId{},
}
proofs, err := SendExecutionProofsByRootRequest(t.Context(), clock, localP2P, remoteP2P.PeerID(), req)
require.ErrorContains(t, "count_needed must be greater than 0", err)
require.Equal(t, 0, len(proofs))
})
t.Run("success - receives requested proofs", func(t *testing.T) {
localP2P := p2ptest.NewTestP2P(t)
remoteP2P := p2ptest.NewTestP2P(t)
localP2P.Connect(remoteP2P)
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
blockRoot := [32]byte{0x01, 0x02, 0x03}
blockHash := bytesutil.PadTo([]byte("blockhash"), 32)
// Create proofs to send back
proof1 := &ethpb.ExecutionProof{
BlockRoot: blockRoot[:],
BlockHash: blockHash,
Slot: primitives.Slot(10),
ProofId: primitives.ExecutionProofId(1),
ProofData: []byte("proof1"),
}
proof2 := &ethpb.ExecutionProof{
BlockRoot: blockRoot[:],
BlockHash: blockHash,
Slot: primitives.Slot(10),
ProofId: primitives.ExecutionProofId(2),
ProofData: []byte("proof2"),
}
// Setup remote to send proofs
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
defer func() {
_ = stream.Close()
}()
// Read the request (we don't validate it in this test)
_ = &ethpb.ExecutionProofsByRootRequest{}
// Send proof1
require.NoError(t, WriteExecutionProofChunk(stream, remoteP2P.Encoding(), proof1))
// Send proof2
require.NoError(t, WriteExecutionProofChunk(stream, remoteP2P.Encoding(), proof2))
})
req := &ethpb.ExecutionProofsByRootRequest{
BlockRoot: blockRoot[:],
CountNeeded: 2,
AlreadyHave: []primitives.ExecutionProofId{},
}
proofs, err := SendExecutionProofsByRootRequest(t.Context(), clock, localP2P, remoteP2P.PeerID(), req)
require.NoError(t, err)
require.Equal(t, 2, len(proofs))
assert.Equal(t, primitives.ExecutionProofId(1), proofs[0].ProofId)
assert.Equal(t, primitives.ExecutionProofId(2), proofs[1].ProofId)
assert.DeepEqual(t, blockRoot[:], proofs[0].BlockRoot)
assert.DeepEqual(t, blockRoot[:], proofs[1].BlockRoot)
})
t.Run("partial response - EOF before count_needed", func(t *testing.T) {
localP2P := p2ptest.NewTestP2P(t)
remoteP2P := p2ptest.NewTestP2P(t)
localP2P.Connect(remoteP2P)
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
blockRoot := [32]byte{0x01, 0x02, 0x03}
blockHash := bytesutil.PadTo([]byte("blockhash"), 32)
proof1 := &ethpb.ExecutionProof{
BlockRoot: blockRoot[:],
BlockHash: blockHash,
Slot: primitives.Slot(10),
ProofId: primitives.ExecutionProofId(1),
ProofData: []byte("proof1"),
}
// Setup remote to send only 1 proof (but we request 5)
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
defer func() {
_ = stream.Close()
}()
// Send only proof1
require.NoError(t, WriteExecutionProofChunk(stream, remoteP2P.Encoding(), proof1))
})
req := &ethpb.ExecutionProofsByRootRequest{
BlockRoot: blockRoot[:],
CountNeeded: 5, // Request 5 but only get 1
AlreadyHave: []primitives.ExecutionProofId{},
}
proofs, err := SendExecutionProofsByRootRequest(t.Context(), clock, localP2P, remoteP2P.PeerID(), req)
require.NoError(t, err)
require.Equal(t, 1, len(proofs)) // Only received 1
assert.Equal(t, primitives.ExecutionProofId(1), proofs[0].ProofId)
})
t.Run("invalid block root - validation fails", func(t *testing.T) {
localP2P := p2ptest.NewTestP2P(t)
remoteP2P := p2ptest.NewTestP2P(t)
localP2P.Connect(remoteP2P)
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
requestedRoot := [32]byte{0x01, 0x02, 0x03}
wrongRoot := [32]byte{0xFF, 0xFF, 0xFF}
blockHash := bytesutil.PadTo([]byte("blockhash"), 32)
// Create proof with wrong block root
proof1 := &ethpb.ExecutionProof{
BlockRoot: wrongRoot[:], // Wrong root!
BlockHash: blockHash,
Slot: primitives.Slot(10),
ProofId: primitives.ExecutionProofId(1),
ProofData: []byte("proof1"),
}
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
defer func() {
_ = stream.Close()
}()
require.NoError(t, WriteExecutionProofChunk(stream, remoteP2P.Encoding(), proof1))
})
req := &ethpb.ExecutionProofsByRootRequest{
BlockRoot: requestedRoot[:],
CountNeeded: 1,
AlreadyHave: []primitives.ExecutionProofId{},
}
proofs, err := SendExecutionProofsByRootRequest(t.Context(), clock, localP2P, remoteP2P.PeerID(), req)
require.ErrorContains(t, "does not match requested root", err)
require.Equal(t, 0, len(proofs))
})
t.Run("already_have proof - validation fails", func(t *testing.T) {
localP2P := p2ptest.NewTestP2P(t)
remoteP2P := p2ptest.NewTestP2P(t)
localP2P.Connect(remoteP2P)
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
blockRoot := [32]byte{0x01, 0x02, 0x03}
blockHash := bytesutil.PadTo([]byte("blockhash"), 32)
proof1 := &ethpb.ExecutionProof{
BlockRoot: blockRoot[:],
BlockHash: blockHash,
Slot: primitives.Slot(10),
ProofId: primitives.ExecutionProofId(1),
ProofData: []byte("proof1"),
}
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
defer func() {
_ = stream.Close()
}()
require.NoError(t, WriteExecutionProofChunk(stream, remoteP2P.Encoding(), proof1))
})
req := &ethpb.ExecutionProofsByRootRequest{
BlockRoot: blockRoot[:],
CountNeeded: 1,
AlreadyHave: []primitives.ExecutionProofId{1}, // Already have proof_id 1
}
proofs, err := SendExecutionProofsByRootRequest(t.Context(), clock, localP2P, remoteP2P.PeerID(), req)
require.ErrorContains(t, "received proof we already have", err)
require.Equal(t, 0, len(proofs))
})
t.Run("invalid proof_id - validation fails", func(t *testing.T) {
localP2P := p2ptest.NewTestP2P(t)
remoteP2P := p2ptest.NewTestP2P(t)
localP2P.Connect(remoteP2P)
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
blockRoot := [32]byte{0x01, 0x02, 0x03}
blockHash := bytesutil.PadTo([]byte("blockhash"), 32)
proof1 := &ethpb.ExecutionProof{
BlockRoot: blockRoot[:],
BlockHash: blockHash,
Slot: primitives.Slot(10),
ProofId: primitives.ExecutionProofId(255), // Invalid proof_id (max valid is 7)
ProofData: []byte("proof1"),
}
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
defer func() {
_ = stream.Close()
}()
require.NoError(t, WriteExecutionProofChunk(stream, remoteP2P.Encoding(), proof1))
})
req := &ethpb.ExecutionProofsByRootRequest{
BlockRoot: blockRoot[:],
CountNeeded: 1,
AlreadyHave: []primitives.ExecutionProofId{},
}
proofs, err := SendExecutionProofsByRootRequest(t.Context(), clock, localP2P, remoteP2P.PeerID(), req)
require.ErrorContains(t, "invalid proof_id", err)
require.Equal(t, 0, len(proofs))
})
}

View File

@@ -23,7 +23,6 @@ import (
lightClient "github.com/OffchainLabs/prysm/v7/beacon-chain/light-client"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/attestations"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/blstoexec"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/execproofs"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/slashings"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/synccommittee"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/voluntaryexits"
@@ -68,7 +67,6 @@ const (
seenProposerSlashingSize = 100
badBlockSize = 1000
syncMetricsInterval = 10 * time.Second
seenExecutionProofSize = 100
)
var (
@@ -96,7 +94,6 @@ type config struct {
slashingPool slashings.PoolManager
syncCommsPool synccommittee.Pool
blsToExecPool blstoexec.PoolManager
execProofPool execproofs.PoolManager
chain blockchainService
initialSync Checker
blockNotifier blockfeed.Notifier
@@ -238,6 +235,7 @@ func NewService(ctx context.Context, opts ...Option) *Service {
r.subHandler = newSubTopicHandler()
r.rateLimiter = newRateLimiter(r.cfg.p2p)
r.initCaches()
return r
}

View File

@@ -329,17 +329,6 @@ func (s *Service) registerSubscribers(nse params.NetworkScheduleEntry) bool {
getSubnetsRequiringPeers: s.allDataColumnSubnets,
})
})
if features.Get().EnableZkvm {
s.spawn(func() {
s.subscribe(
p2p.ExecutionProofSubnetTopicFormat,
s.validateExecutionProof,
s.executionProofSubscriber,
nse,
)
})
}
}
return true
}

View File

@@ -5,13 +5,13 @@ import (
"fmt"
"os"
"path"
"sync"
"time"
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition/interop"
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
"github.com/OffchainLabs/prysm/v7/config/features"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
@@ -23,7 +23,6 @@ import (
"github.com/OffchainLabs/prysm/v7/time/slots"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
"google.golang.org/protobuf/proto"
)
@@ -50,7 +49,15 @@ func (s *Service) beaconBlockSubscriber(ctx context.Context, msg proto.Message)
return errors.Wrap(err, "new ro block with root")
}
go s.processSidecarsFromExecutionFromBlock(ctx, roBlock)
var wg sync.WaitGroup
wg.Go(func() {
if err := s.processSidecarsFromExecutionFromBlock(ctx, roBlock); err != nil {
log.WithError(err).WithFields(logrus.Fields{
"root": fmt.Sprintf("%#x", root),
"slot": block.Slot(),
}).Error("Failed to process sidecars from execution from block")
}
})
if err := s.cfg.chain.ReceiveBlock(ctx, signed, root, nil); err != nil {
if blockchain.IsInvalidBlock(err) {
@@ -72,64 +79,32 @@ func (s *Service) beaconBlockSubscriber(ctx context.Context, msg proto.Message)
return err
}
// We use the service context to ensure this context is not cancelled
// when the current function returns.
// TODO: Do not broadcast proofs for blocks we have already seen.
go s.generateAndBroadcastExecutionProofs(s.ctx, roBlock)
if err := s.processPendingAttsForBlock(ctx, root); err != nil {
return errors.Wrap(err, "process pending atts for block")
}
wg.Wait()
return nil
}
func (s *Service) generateAndBroadcastExecutionProofs(ctx context.Context, roBlock blocks.ROBlock) {
const delay = 2 * time.Second
proofTypes := flags.Get().ProofGenerationTypes
if len(proofTypes) == 0 {
return
}
var wg errgroup.Group
for _, proofType := range proofTypes {
wg.Go(func() error {
execProof, err := generateExecProof(roBlock, primitives.ExecutionProofId(proofType), delay)
if err != nil {
return fmt.Errorf("generate exec proof: %w", err)
}
if err := s.cfg.p2p.Broadcast(ctx, execProof); err != nil {
return fmt.Errorf("broadcast exec proof: %w", err)
}
return nil
})
}
if err := wg.Wait(); err != nil {
log.WithError(err).Error("Failed to generate and broadcast execution proofs")
}
}
// processSidecarsFromExecutionFromBlock retrieves (if available) sidecars data from the execution client,
// builds corresponding sidecars, save them to the storage, and broadcasts them over P2P if necessary.
func (s *Service) processSidecarsFromExecutionFromBlock(ctx context.Context, roBlock blocks.ROBlock) {
func (s *Service) processSidecarsFromExecutionFromBlock(ctx context.Context, roBlock blocks.ROBlock) error {
if roBlock.Version() >= version.Fulu {
if err := s.processDataColumnSidecarsFromExecution(ctx, peerdas.PopulateFromBlock(roBlock)); err != nil {
log.WithError(err).Error("Failed to process data column sidecars from execution")
return
return errors.Wrap(err, "process data column sidecars from execution")
}
return
return nil
}
if roBlock.Version() >= version.Deneb {
s.processBlobSidecarsFromExecution(ctx, roBlock)
return
return nil
}
return nil
}
// processBlobSidecarsFromExecution retrieves (if available) blob sidecars data from the execution client,
@@ -207,7 +182,6 @@ func (s *Service) processDataColumnSidecarsFromExecution(ctx context.Context, so
key := fmt.Sprintf("%#x", source.Root())
if _, err, _ := s.columnSidecarsExecSingleFlight.Do(key, func() (any, error) {
const delay = 250 * time.Millisecond
secondsPerHalfSlot := time.Duration(params.BeaconConfig().SecondsPerSlot/2) * time.Second
commitments, err := source.Commitments()
if err != nil {
@@ -225,9 +199,6 @@ func (s *Service) processDataColumnSidecarsFromExecution(ctx context.Context, so
return nil, errors.Wrap(err, "column indices to sample")
}
ctx, cancel := context.WithTimeout(ctx, secondsPerHalfSlot)
defer cancel()
log := log.WithFields(logrus.Fields{
"root": fmt.Sprintf("%#x", source.Root()),
"slot": source.Slot(),
@@ -248,6 +219,11 @@ func (s *Service) processDataColumnSidecarsFromExecution(ctx context.Context, so
return nil, nil
}
// Return if the context is done.
if ctx.Err() != nil {
return nil, ctx.Err()
}
if iteration == 0 {
dataColumnsRecoveredFromELAttempts.Inc()
}
@@ -259,20 +235,10 @@ func (s *Service) processDataColumnSidecarsFromExecution(ctx context.Context, so
}
// No sidecars are retrieved from the EL, retry later
constructedSidecarCount = uint64(len(constructedSidecars))
if constructedSidecarCount == 0 {
if ctx.Err() != nil {
return nil, ctx.Err()
}
time.Sleep(delay)
continue
}
dataColumnsRecoveredFromELTotal.Inc()
constructedCount := uint64(len(constructedSidecars))
// Boundary check.
if constructedSidecarCount != fieldparams.NumberOfColumns {
if constructedSidecarCount > 0 && constructedSidecarCount != fieldparams.NumberOfColumns {
return nil, errors.Errorf("reconstruct data column sidecars returned %d sidecars, expected %d - should never happen", constructedSidecarCount, fieldparams.NumberOfColumns)
}
@@ -281,14 +247,24 @@ func (s *Service) processDataColumnSidecarsFromExecution(ctx context.Context, so
return nil, errors.Wrap(err, "broadcast and receive unseen data column sidecars")
}
log.WithFields(logrus.Fields{
"count": len(unseenIndices),
"indices": helpers.SortedPrettySliceFromMap(unseenIndices),
}).Debug("Constructed data column sidecars from the execution client")
if constructedCount > 0 {
dataColumnsRecoveredFromELTotal.Inc()
dataColumnSidecarsObtainedViaELCount.Observe(float64(len(unseenIndices)))
log.WithFields(logrus.Fields{
"root": fmt.Sprintf("%#x", source.Root()),
"slot": source.Slot(),
"proposerIndex": source.ProposerIndex(),
"iteration": iteration,
"type": source.Type(),
"count": len(unseenIndices),
"indices": helpers.SortedPrettySliceFromMap(unseenIndices),
}).Debug("Constructed data column sidecars from the execution client")
return nil, nil
return nil, nil
}
// Wait before retrying.
time.Sleep(delay)
}
}); err != nil {
return err
@@ -323,6 +299,11 @@ func (s *Service) broadcastAndReceiveUnseenDataColumnSidecars(
unseenIndices[sidecar.Index] = true
}
// Exit early if there are no nothing to broadcast or receive.
if len(unseenSidecars) == 0 {
return nil, nil
}
// Broadcast all the data column sidecars we reconstructed but did not see via gossip (non blocking).
if err := s.cfg.p2p.BroadcastDataColumnSidecars(ctx, unseenSidecars); err != nil {
return nil, errors.Wrap(err, "broadcast data column sidecars")

View File

@@ -194,7 +194,8 @@ func TestProcessSidecarsFromExecutionFromBlock(t *testing.T) {
},
seenBlobCache: lruwrpr.New(1),
}
s.processSidecarsFromExecutionFromBlock(t.Context(), roBlock)
err := s.processSidecarsFromExecutionFromBlock(t.Context(), roBlock)
require.NoError(t, err)
require.Equal(t, tt.expectedBlobCount, len(chainService.Blobs))
})
}
@@ -293,7 +294,8 @@ func TestProcessSidecarsFromExecutionFromBlock(t *testing.T) {
roBlock, err := blocks.NewROBlock(sb)
require.NoError(t, err)
s.processSidecarsFromExecutionFromBlock(t.Context(), roBlock)
err = s.processSidecarsFromExecutionFromBlock(t.Context(), roBlock)
require.NoError(t, err)
require.Equal(t, tt.expectedDataColumnCount, len(chainService.DataColumns))
})
}

View File

@@ -25,12 +25,12 @@ func (s *Service) dataColumnSubscriber(ctx context.Context, msg proto.Message) e
}
if err := s.receiveDataColumnSidecar(ctx, sidecar); err != nil {
return errors.Wrap(err, "receive data column sidecar")
return wrapDataColumnError(sidecar, "receive data column sidecar", err)
}
wg.Go(func() error {
if err := s.processDataColumnSidecarsFromReconstruction(ctx, sidecar); err != nil {
return errors.Wrap(err, "process data column sidecars from reconstruction")
return wrapDataColumnError(sidecar, "process data column sidecars from reconstruction", err)
}
return nil
@@ -38,7 +38,7 @@ func (s *Service) dataColumnSubscriber(ctx context.Context, msg proto.Message) e
wg.Go(func() error {
if err := s.processDataColumnSidecarsFromExecution(ctx, peerdas.PopulateFromSidecar(sidecar)); err != nil {
return errors.Wrap(err, "process data column sidecars from execution")
return wrapDataColumnError(sidecar, "process data column sidecars from execution", err)
}
return nil
@@ -110,3 +110,7 @@ func (s *Service) allDataColumnSubnets(_ primitives.Slot) map[uint64]bool {
return allSubnets
}
func wrapDataColumnError(sidecar blocks.VerifiedRODataColumn, message string, err error) error {
return fmt.Errorf("%s - slot %d, root %s: %w", message, sidecar.SignedBlockHeader.Header.Slot, fmt.Sprintf("%#x", sidecar.BlockRoot()), err)
}

View File

@@ -1,31 +0,0 @@
package sync
import (
"context"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed"
opfeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/operation"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/pkg/errors"
"google.golang.org/protobuf/proto"
)
func (s *Service) executionProofSubscriber(_ context.Context, msg proto.Message) error {
executionProof, ok := msg.(*ethpb.ExecutionProof)
if !ok {
return errors.Errorf("incorrect type of message received, wanted %T but got %T", &ethpb.ExecutionProof{}, msg)
}
// Insert the execution proof into the pool
s.cfg.execProofPool.Insert(executionProof)
// Notify subscribers about the new execution proof
s.cfg.operationNotifier.OperationFeed().Send(&feed.Event{
Type: opfeed.ExecutionProofReceived,
Data: &opfeed.ExecutionProofReceivedData{
ExecutionProof: executionProof,
},
})
return nil
}

View File

@@ -3,12 +3,11 @@ package sync
import (
"context"
"fmt"
"slices"
"math"
"time"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/operation"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
"github.com/OffchainLabs/prysm/v7/beacon-chain/verification"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
@@ -193,13 +192,19 @@ func (s *Service) validateDataColumn(ctx context.Context, pid peer.ID, msg *pubs
dataColumnSidecarArrivalGossipSummary.Observe(float64(sinceSlotStartTime.Milliseconds()))
dataColumnSidecarVerificationGossipHistogram.Observe(float64(validationTime.Milliseconds()))
peerGossipScore := s.cfg.p2p.Peers().Scorers().GossipScorer().Score(pid)
select {
case s.dataColumnLogCh <- dataColumnLogEntry{
slot: roDataColumn.Slot(),
index: roDataColumn.Index,
root: roDataColumn.BlockRoot(),
validationTime: validationTime,
sinceStartTime: sinceSlotStartTime,
Slot: roDataColumn.Slot(),
ColIdx: roDataColumn.Index,
PropIdx: roDataColumn.ProposerIndex(),
BlockRoot: roDataColumn.BlockRoot(),
ParentRoot: roDataColumn.ParentRoot(),
PeerSuffix: pid.String()[len(pid.String())-6:],
PeerGossipScore: peerGossipScore,
validationTime: validationTime,
sinceStartTime: sinceSlotStartTime,
}:
default:
log.WithField("slot", roDataColumn.Slot()).Warn("Failed to send data column log entry")
@@ -244,69 +249,68 @@ func computeCacheKey(slot primitives.Slot, proposerIndex primitives.ValidatorInd
}
type dataColumnLogEntry struct {
slot primitives.Slot
index uint64
root [32]byte
validationTime time.Duration
sinceStartTime time.Duration
Slot primitives.Slot
ColIdx uint64
PropIdx primitives.ValidatorIndex
BlockRoot [32]byte
ParentRoot [32]byte
PeerSuffix string
PeerGossipScore float64
validationTime time.Duration
sinceStartTime time.Duration
}
func (s *Service) processDataColumnLogs() {
ticker := time.NewTicker(1 * time.Second)
slotStats := make(map[[fieldparams.RootLength]byte][]dataColumnLogEntry)
defer ticker.Stop()
slotStats := make(map[primitives.Slot][fieldparams.NumberOfColumns]dataColumnLogEntry)
for {
select {
case col := <-s.dataColumnLogCh:
cols := slotStats[col.root]
cols = append(cols, col)
slotStats[col.root] = cols
case entry := <-s.dataColumnLogCh:
cols := slotStats[entry.Slot]
cols[entry.ColIdx] = entry
slotStats[entry.Slot] = cols
case <-ticker.C:
for root, columns := range slotStats {
indices := make([]uint64, 0, fieldparams.NumberOfColumns)
minValidationTime, maxValidationTime, sumValidationTime := time.Duration(0), time.Duration(0), time.Duration(0)
minSinceStartTime, maxSinceStartTime, sumSinceStartTime := time.Duration(0), time.Duration(0), time.Duration(0)
for slot, columns := range slotStats {
var (
colIndices = make([]uint64, 0, fieldparams.NumberOfColumns)
peers = make([]string, 0, fieldparams.NumberOfColumns)
gossipScores = make([]float64, 0, fieldparams.NumberOfColumns)
validationTimes = make([]string, 0, fieldparams.NumberOfColumns)
sinceStartTimes = make([]string, 0, fieldparams.NumberOfColumns)
)
totalReceived := 0
for _, column := range columns {
indices = append(indices, column.index)
sumValidationTime += column.validationTime
sumSinceStartTime += column.sinceStartTime
if totalReceived == 0 {
minValidationTime, maxValidationTime = column.validationTime, column.validationTime
minSinceStartTime, maxSinceStartTime = column.sinceStartTime, column.sinceStartTime
totalReceived++
for _, entry := range columns {
if entry.PeerSuffix == "" {
continue
}
minValidationTime, maxValidationTime = min(minValidationTime, column.validationTime), max(maxValidationTime, column.validationTime)
minSinceStartTime, maxSinceStartTime = min(minSinceStartTime, column.sinceStartTime), max(maxSinceStartTime, column.sinceStartTime)
colIndices = append(colIndices, entry.ColIdx)
peers = append(peers, entry.PeerSuffix)
gossipScores = append(gossipScores, roundFloat(entry.PeerGossipScore, 2))
validationTimes = append(validationTimes, fmt.Sprintf("%.2fms", float64(entry.validationTime.Milliseconds())))
sinceStartTimes = append(sinceStartTimes, fmt.Sprintf("%.2fms", float64(entry.sinceStartTime.Milliseconds())))
totalReceived++
}
if totalReceived > 0 {
slices.Sort(indices)
avgValidationTime := sumValidationTime / time.Duration(totalReceived)
avgSinceStartTime := sumSinceStartTime / time.Duration(totalReceived)
log.WithFields(logrus.Fields{
"slot": columns[0].slot,
"root": fmt.Sprintf("%#x", root),
"count": totalReceived,
"indices": helpers.PrettySlice(indices),
"validationTime": prettyMinMaxAverage(minValidationTime, maxValidationTime, avgValidationTime),
"sinceStartTime": prettyMinMaxAverage(minSinceStartTime, maxSinceStartTime, avgSinceStartTime),
}).Debug("Accepted data column sidecars summary")
}
log.WithFields(logrus.Fields{
"slot": slot,
"receivedCount": totalReceived,
"columnIndices": colIndices,
"peers": peers,
"gossipScores": gossipScores,
"validationTimes": validationTimes,
"sinceStartTimes": sinceStartTimes,
}).Debug("Accepted data column sidecars summary")
}
slotStats = make(map[[fieldparams.RootLength]byte][]dataColumnLogEntry)
slotStats = make(map[primitives.Slot][fieldparams.NumberOfColumns]dataColumnLogEntry)
}
}
}
func prettyMinMaxAverage(min, max, average time.Duration) string {
return fmt.Sprintf("[min: %v, avg: %v, max: %v]", min, average, max)
func roundFloat(f float64, decimals int) float64 {
mult := math.Pow(10, float64(decimals))
return math.Round(f*mult) / mult
}

View File

@@ -1,132 +0,0 @@
package sync
import (
"context"
"fmt"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/time/slots"
pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/libp2p/go-libp2p/core/peer"
)
func (s *Service) validateExecutionProof(ctx context.Context, pid peer.ID, msg *pubsub.Message) (pubsub.ValidationResult, error) {
// Always accept messages our own messages.
if pid == s.cfg.p2p.PeerID() {
return pubsub.ValidationAccept, nil
}
// Ignore messages during initial sync.
if s.cfg.initialSync.Syncing() {
return pubsub.ValidationIgnore, nil
}
// Reject messages with a nil topic.
if msg.Topic == nil {
return pubsub.ValidationReject, p2p.ErrInvalidTopic
}
// Decode the message, reject if it fails.
m, err := s.decodePubsubMessage(msg)
if err != nil {
log.WithError(err).Error("Failed to decode message")
return pubsub.ValidationReject, err
}
// Reject messages that are not of the expected type.
executionProof, ok := m.(*ethpb.ExecutionProof)
if !ok {
log.WithField("message", m).Error("Message is not of type *ethpb.ExecutionProof")
return pubsub.ValidationReject, errWrongMessage
}
// 1. Verify proof is not from the future
if err := s.proofNotFromFutureSlot(executionProof); err != nil {
return pubsub.ValidationReject, err
}
// 2. Verify proof slot is greater than finalized slot
if err := s.proofAboveFinalizedSlot(ctx, executionProof); err != nil {
return pubsub.ValidationReject, err
}
// 3. Check if the proof is already in the DA checker cache (execution proof pool)
// If it exists in the cache, we know it has already passed validation.
blockRoot := bytesutil.ToBytes32(executionProof.BlockRoot)
if s.isProofCachedInPool(blockRoot, executionProof.ProofId) {
return pubsub.ValidationIgnore, nil
}
// 4. Verify proof size limits
if uint64(len(executionProof.ProofData)) > params.BeaconConfig().MaxProofDataBytes {
return pubsub.ValidationReject, fmt.Errorf("execution proof data size %d exceeds maximum allowed %d", len(executionProof.ProofData), params.BeaconConfig().MaxProofDataBytes)
}
// 5. Run zkVM proof verification
if err := s.verifyExecutionProof(executionProof); err != nil {
return pubsub.ValidationReject, err
}
// Validation successful, return accept
return pubsub.ValidationAccept, nil
}
// TODO: Do we need encapsulation for all those verification functions?
// proofNotFromFutureSlot checks whether the execution proof is from a future slot.
func (s *Service) proofNotFromFutureSlot(executionProof *ethpb.ExecutionProof) error {
currentSlot := s.cfg.clock.CurrentSlot()
proofSlot := executionProof.Slot
if currentSlot == proofSlot {
return nil
}
earliestStart, err := s.cfg.clock.SlotStart(proofSlot)
if err != nil {
// TODO: Should we penalize the peer for this?
return fmt.Errorf("failed to compute start time for proof slot %d: %w", proofSlot, err)
}
earliestStart = earliestStart.Add(-1 * params.BeaconConfig().MaximumGossipClockDisparityDuration())
// If the system time is still before earliestStart, we consider the proof from a future slot and return an error.
if s.cfg.clock.Now().Before(earliestStart) {
return fmt.Errorf("slot %d is too far in the future (current slot: %d)", proofSlot, currentSlot)
}
return nil
}
// proofAboveFinalizedSlot checks whether the execution proof's slot is after the finalized slot.
func (s *Service) proofAboveFinalizedSlot(ctx context.Context, executionProof *ethpb.ExecutionProof) error {
finalizedCheckpoint, err := s.cfg.beaconDB.FinalizedCheckpoint(ctx)
if err != nil {
// TODO: Should we penalize the peer for this?
return fmt.Errorf("failed to get finalized checkpoint: %w", err)
}
fSlot, err := slots.EpochStart(finalizedCheckpoint.Epoch)
if err != nil {
// TODO: Should we penalize the peer for this?
return fmt.Errorf("failed to compute start slot for finalized epoch %d: %w", finalizedCheckpoint.Epoch, err)
}
if executionProof.Slot <= fSlot {
return fmt.Errorf("execution proof slot %d is not after finalized slot %d", executionProof.Slot, fSlot)
}
return nil
}
// isProofCachedInPool checks if the execution proof is already present in the pool.
func (s *Service) isProofCachedInPool(blockRoot [32]byte, proofId primitives.ExecutionProofId) bool {
return s.cfg.execProofPool.Exists(blockRoot, proofId)
}
// verifyExecutionProof performs the actual verification of the execution proof.
func (s *Service) verifyExecutionProof(_ *ethpb.ExecutionProof) error {
// For now, say all proof are valid.
return nil
}

View File

@@ -1,408 +0,0 @@
package sync
import (
"bytes"
"context"
"fmt"
"testing"
"time"
mock "github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/testing"
testingdb "github.com/OffchainLabs/prysm/v7/beacon-chain/db/testing"
doublylinkedtree "github.com/OffchainLabs/prysm/v7/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/execproofs"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/encoder"
mockp2p "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/startup"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stategen"
mockSync "github.com/OffchainLabs/prysm/v7/beacon-chain/sync/initial-sync/testing"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/assert"
"github.com/OffchainLabs/prysm/v7/testing/require"
pubsub "github.com/libp2p/go-libp2p-pubsub"
pubsubpb "github.com/libp2p/go-libp2p-pubsub/pb"
"github.com/libp2p/go-libp2p/core/peer"
)
func TestValidateExecutionProof(t *testing.T) {
beaconDB := testingdb.SetupDB(t)
p2pService := mockp2p.NewTestP2P(t)
ctx := context.Background()
fcp := &ethpb.Checkpoint{
Epoch: 1,
}
require.NoError(t, beaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{
Root: params.BeaconConfig().ZeroHash[:],
Slot: 0,
}))
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, fcp))
defaultTopic := p2p.ExecutionProofSubnetTopicFormat + "/" + encoder.ProtocolSuffixSSZSnappy
fakeDigest := []byte{0xAB, 0x00, 0xCC, 0x9E}
chainService := &mock.ChainService{
Genesis: time.Now(),
ValidatorsRoot: [32]byte{'A'},
FinalizedCheckPoint: fcp,
}
currentSlot := primitives.Slot(100)
genesisTime := time.Now().Add(-time.Duration(uint64(currentSlot)*params.BeaconConfig().SecondsPerSlot) * time.Second)
tests := []struct {
name string
setupService func() *Service
proof *ethpb.ExecutionProof
topic *string
pid peer.ID
want pubsub.ValidationResult
wantErr bool
}{
{
name: "Ignore when syncing",
setupService: func() *Service {
s := &Service{
cfg: &config{
p2p: p2pService,
initialSync: &mockSync.Sync{IsSyncing: true},
chain: chainService,
clock: startup.NewClock(genesisTime, [32]byte{'A'}),
},
}
s.initCaches()
return s
},
proof: &ethpb.ExecutionProof{
Slot: currentSlot,
ProofId: primitives.ExecutionProofId(1),
BlockRoot: make([]byte, 32),
BlockHash: make([]byte, 32),
ProofData: make([]byte, 100),
},
topic: func() *string {
t := fmt.Sprintf(defaultTopic, fakeDigest)
return &t
}(),
pid: "random-peer",
want: pubsub.ValidationIgnore,
wantErr: false,
},
{
name: "Reject nil topic",
setupService: func() *Service {
s := &Service{
cfg: &config{
p2p: p2pService,
initialSync: &mockSync.Sync{IsSyncing: false},
chain: chainService,
clock: startup.NewClock(genesisTime, [32]byte{'A'}),
},
}
s.initCaches()
return s
},
proof: &ethpb.ExecutionProof{
Slot: currentSlot,
ProofId: primitives.ExecutionProofId(1),
BlockRoot: make([]byte, 32),
BlockHash: make([]byte, 32),
ProofData: make([]byte, 100),
},
topic: nil,
pid: "random-peer",
want: pubsub.ValidationReject,
wantErr: true,
},
{
name: "Reject proof from future slot",
setupService: func() *Service {
s := &Service{
cfg: &config{
p2p: p2pService,
initialSync: &mockSync.Sync{IsSyncing: false},
chain: chainService,
clock: startup.NewClock(genesisTime, [32]byte{'A'}),
beaconDB: beaconDB,
stateGen: stategen.New(beaconDB, doublylinkedtree.New()),
execProofPool: execproofs.NewPool(),
},
}
s.initCaches()
return s
},
proof: &ethpb.ExecutionProof{
Slot: currentSlot + 1000, // Far future slot
ProofId: primitives.ExecutionProofId(1),
BlockRoot: make([]byte, 32),
BlockHash: make([]byte, 32),
ProofData: make([]byte, 100),
},
topic: func() *string {
t := fmt.Sprintf(defaultTopic, fakeDigest)
return &t
}(),
pid: "random-peer",
want: pubsub.ValidationReject,
wantErr: true,
},
{
name: "Reject proof below finalized slot",
setupService: func() *Service {
s := &Service{
cfg: &config{
p2p: p2pService,
initialSync: &mockSync.Sync{IsSyncing: false},
chain: chainService,
clock: startup.NewClock(genesisTime, [32]byte{'A'}),
beaconDB: beaconDB,
stateGen: stategen.New(beaconDB, doublylinkedtree.New()),
execProofPool: execproofs.NewPool(),
},
}
s.initCaches()
return s
},
proof: &ethpb.ExecutionProof{
Slot: primitives.Slot(5), // Before finalized epoch 1
ProofId: 1,
BlockRoot: make([]byte, 32),
BlockHash: make([]byte, 32),
ProofData: make([]byte, 100),
},
topic: func() *string {
t := fmt.Sprintf(defaultTopic, fakeDigest)
return &t
}(),
pid: "random-peer",
want: pubsub.ValidationReject,
wantErr: true,
},
{
name: "Ignore already seen proof",
setupService: func() *Service {
s := &Service{
cfg: &config{
p2p: p2pService,
initialSync: &mockSync.Sync{IsSyncing: false},
chain: chainService,
clock: startup.NewClock(genesisTime, [32]byte{'A'}),
beaconDB: beaconDB,
stateGen: stategen.New(beaconDB, doublylinkedtree.New()),
execProofPool: execproofs.NewPool(),
},
}
s.initCaches()
return s
},
proof: &ethpb.ExecutionProof{
Slot: currentSlot,
ProofId: primitives.ExecutionProofId(1),
BlockRoot: make([]byte, 32),
BlockHash: make([]byte, 32),
ProofData: make([]byte, 100),
},
topic: func() *string {
t := fmt.Sprintf(defaultTopic, fakeDigest)
return &t
}(),
pid: "random-peer",
want: pubsub.ValidationIgnore,
wantErr: false,
},
{
name: "Ignore proof already in pool",
setupService: func() *Service {
pool := execproofs.NewPool()
pool.Insert(&ethpb.ExecutionProof{
Slot: currentSlot,
ProofId: primitives.ExecutionProofId(1),
BlockRoot: make([]byte, 32),
BlockHash: make([]byte, 32),
ProofData: make([]byte, 100),
})
s := &Service{
cfg: &config{
p2p: p2pService,
initialSync: &mockSync.Sync{IsSyncing: false},
chain: chainService,
clock: startup.NewClock(genesisTime, [32]byte{'A'}),
beaconDB: beaconDB,
stateGen: stategen.New(beaconDB, doublylinkedtree.New()),
execProofPool: pool,
},
}
s.initCaches()
return s
},
proof: &ethpb.ExecutionProof{
Slot: currentSlot,
ProofId: primitives.ExecutionProofId(1),
BlockRoot: make([]byte, 32),
BlockHash: make([]byte, 32),
ProofData: make([]byte, 100),
},
topic: func() *string {
t := fmt.Sprintf(defaultTopic, fakeDigest)
return &t
}(),
pid: "random-peer",
want: pubsub.ValidationIgnore,
wantErr: false,
},
{
name: "Reject proof if no verifier found",
setupService: func() *Service {
s := &Service{
cfg: &config{
p2p: p2pService,
initialSync: &mockSync.Sync{IsSyncing: false},
chain: chainService,
clock: startup.NewClock(genesisTime, [32]byte{'A'}),
beaconDB: beaconDB,
stateGen: stategen.New(beaconDB, doublylinkedtree.New()),
execProofPool: execproofs.NewPool(),
},
}
s.initCaches()
return s
},
proof: &ethpb.ExecutionProof{
Slot: currentSlot,
ProofId: primitives.ExecutionProofId(1),
BlockRoot: make([]byte, 32),
BlockHash: make([]byte, 32),
ProofData: make([]byte, 100),
},
topic: func() *string {
t := fmt.Sprintf(defaultTopic, fakeDigest)
return &t
}(),
pid: "random-peer",
want: pubsub.ValidationReject,
wantErr: true,
},
{
name: "Reject proof if verification fails",
setupService: func() *Service {
s := &Service{
cfg: &config{
p2p: p2pService,
initialSync: &mockSync.Sync{IsSyncing: false},
chain: chainService,
clock: startup.NewClock(genesisTime, [32]byte{'A'}),
beaconDB: beaconDB,
stateGen: stategen.New(beaconDB, doublylinkedtree.New()),
execProofPool: execproofs.NewPool(),
},
}
s.initCaches()
return s
},
proof: &ethpb.ExecutionProof{
Slot: currentSlot,
ProofId: primitives.ExecutionProofId(1),
BlockRoot: make([]byte, 32),
BlockHash: make([]byte, 32),
ProofData: make([]byte, 100),
},
topic: func() *string {
t := fmt.Sprintf(defaultTopic, fakeDigest)
return &t
}(),
pid: "random-peer",
want: pubsub.ValidationReject,
wantErr: true,
},
{
name: "Accept valid proof",
setupService: func() *Service {
s := &Service{
cfg: &config{
p2p: p2pService,
initialSync: &mockSync.Sync{IsSyncing: false},
chain: chainService,
clock: startup.NewClock(genesisTime, [32]byte{'A'}),
beaconDB: beaconDB,
stateGen: stategen.New(beaconDB, doublylinkedtree.New()),
execProofPool: execproofs.NewPool(),
},
}
s.initCaches()
return s
},
proof: &ethpb.ExecutionProof{
Slot: currentSlot,
ProofId: primitives.ExecutionProofId(1),
BlockRoot: make([]byte, 32),
BlockHash: make([]byte, 32),
ProofData: make([]byte, 100),
},
topic: func() *string {
t := fmt.Sprintf(defaultTopic, fakeDigest)
return &t
}(),
pid: "random-peer",
want: pubsub.ValidationAccept,
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := tt.setupService()
// Create pubsub message
buf := new(bytes.Buffer)
_, err := p2pService.Encoding().EncodeGossip(buf, tt.proof)
require.NoError(t, err)
msg := &pubsub.Message{
Message: &pubsubpb.Message{
Data: buf.Bytes(),
Topic: tt.topic,
},
}
// Validate
result, err := s.validateExecutionProof(ctx, tt.pid, msg)
if tt.wantErr {
assert.NotNil(t, err)
} else {
assert.NoError(t, err)
}
assert.Equal(t, tt.want, result)
// If validation accepted, check that ValidatorData is set
if result == pubsub.ValidationAccept {
assert.NotNil(t, msg.ValidatorData)
validatedProof, ok := msg.ValidatorData.(*ethpb.ExecutionProof)
assert.Equal(t, true, ok)
// Check that the validated proof matches the original
assert.Equal(t, tt.proof.ProofId, validatedProof.ProofId)
assert.Equal(t, tt.proof.Slot, validatedProof.Slot)
assert.DeepEqual(t, tt.proof.BlockRoot, validatedProof.BlockRoot)
assert.DeepEqual(t, tt.proof.BlockHash, validatedProof.BlockHash)
assert.DeepEqual(t, tt.proof.ProofData, validatedProof.ProofData)
}
})
}
}
type alwaysFailVerifier struct{}
func (v *alwaysFailVerifier) Verify(proof *ethpb.ExecutionProof) (bool, error) {
return false, nil
}
func (v *alwaysFailVerifier) GetProofId() primitives.ExecutionProofId {
return primitives.ExecutionProofId(1)
}

View File

@@ -687,12 +687,6 @@ func sbrNotFound(t *testing.T, expectedRoot [32]byte) *mockStateByRooter {
}}
}
func sbrReturnsState(st state.BeaconState) *mockStateByRooter {
return &mockStateByRooter{sbr: func(_ context.Context, _ [32]byte) (state.BeaconState, error) {
return st, nil
}}
}
func sbrForValOverride(idx primitives.ValidatorIndex, val *ethpb.Validator) *mockStateByRooter {
return sbrForValOverrideWithT(nil, idx, val)
}

View File

@@ -11,10 +11,12 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
forkchoicetypes "github.com/OffchainLabs/prysm/v7/beacon-chain/forkchoice/types"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v7/runtime/logging"
"github.com/OffchainLabs/prysm/v7/time/slots"
@@ -482,19 +484,88 @@ func (dv *RODataColumnsVerifier) SidecarProposerExpected(ctx context.Context) (e
defer dv.recordResult(RequireSidecarProposerExpected, &err)
for _, dataColumn := range dv.dataColumns {
dataColumnSlot := dataColumn.Slot()
type slotParentRoot struct {
slot primitives.Slot
parentRoot [fieldparams.RootLength]byte
}
// Get the verifying state, it is guaranteed to have the correct proposer in the lookahead.
verifyingState, err := dv.getVerifyingState(ctx, dataColumn)
if err != nil {
return columnErrBuilder(errors.Wrap(err, "verifying state"))
targetRootBySlotParentRoot := make(map[slotParentRoot][fieldparams.RootLength]byte)
var targetRootFromCache = func(slot primitives.Slot, parentRoot [fieldparams.RootLength]byte) ([fieldparams.RootLength]byte, error) {
// Use cached values if available.
slotParentRoot := slotParentRoot{slot: slot, parentRoot: parentRoot}
if root, ok := targetRootBySlotParentRoot[slotParentRoot]; ok {
return root, nil
}
// Use proposer lookahead directly
idx, err := helpers.BeaconProposerIndexAtSlot(ctx, verifyingState, dataColumnSlot)
// Compute the epoch of the data column slot.
dataColumnEpoch := slots.ToEpoch(slot)
if dataColumnEpoch > 0 {
dataColumnEpoch = dataColumnEpoch - 1
}
// Compute the target root for the epoch.
targetRoot, err := dv.fc.TargetRootForEpoch(parentRoot, dataColumnEpoch)
if err != nil {
return columnErrBuilder(errors.Wrap(err, "proposer from lookahead"))
return [fieldparams.RootLength]byte{}, columnErrBuilder(errors.Wrap(err, "target root from epoch"))
}
// Store the target root in the cache.
targetRootBySlotParentRoot[slotParentRoot] = targetRoot
return targetRoot, nil
}
for _, dataColumn := range dv.dataColumns {
// Extract the slot of the data column.
dataColumnSlot := dataColumn.Slot()
// Extract the root of the parent block corresponding to the data column.
parentRoot := dataColumn.ParentRoot()
// Compute the target root for the data column.
targetRoot, err := targetRootFromCache(dataColumnSlot, parentRoot)
if err != nil {
return columnErrBuilder(errors.Wrap(err, "target root"))
}
// Compute the epoch of the data column slot.
dataColumnEpoch := slots.ToEpoch(dataColumnSlot)
if dataColumnEpoch > 0 {
dataColumnEpoch = dataColumnEpoch - 1
}
// Create a checkpoint for the target root.
checkpoint := &forkchoicetypes.Checkpoint{Root: targetRoot, Epoch: dataColumnEpoch}
// Try to extract the proposer index from the data column in the cache.
idx, cached := dv.pc.Proposer(checkpoint, dataColumnSlot)
if !cached {
parentRoot := dataColumn.ParentRoot()
// Ensure the expensive index computation is only performed once for
// concurrent requests for the same signature data.
idxAny, err, _ := dv.sg.Do(concatRootSlot(parentRoot, dataColumnSlot), func() (any, error) {
verifyingState, err := dv.getVerifyingState(ctx, dataColumn)
if err != nil {
return nil, columnErrBuilder(errors.Wrap(err, "verifying state"))
}
idx, err = helpers.BeaconProposerIndexAtSlot(ctx, verifyingState, dataColumnSlot)
if err != nil {
return nil, columnErrBuilder(errors.Wrap(err, "compute proposer"))
}
return idx, nil
})
if err != nil {
return err
}
var ok bool
if idx, ok = idxAny.(primitives.ValidatorIndex); !ok {
return columnErrBuilder(errors.New("type assertion to ValidatorIndex failed"))
}
}
if idx != dataColumn.ProposerIndex() {
@@ -555,3 +626,7 @@ func inclusionProofKey(c blocks.RODataColumn) ([32]byte, error) {
return sha256.Sum256(unhashedKey), nil
}
func concatRootSlot(root [fieldparams.RootLength]byte, slot primitives.Slot) string {
return string(root[:]) + fmt.Sprintf("%d", slot)
}

View File

@@ -2,6 +2,7 @@ package verification
import (
"reflect"
"sync"
"testing"
"time"
@@ -794,90 +795,87 @@ func TestDataColumnsSidecarProposerExpected(t *testing.T) {
blobCount = 1
)
ctx := t.Context()
parentRoot := [fieldparams.RootLength]byte{}
// Create a Fulu state to get the expected proposer from the lookahead.
fuluState, _ := util.DeterministicGenesisStateFulu(t, 32)
expectedProposer, err := fuluState.ProposerLookahead()
require.NoError(t, err)
expectedProposerIdx := primitives.ValidatorIndex(expectedProposer[columnSlot])
// Generate data columns with the expected proposer index.
matchingColumns := generateTestDataColumnsWithProposer(t, parentRoot, columnSlot, blobCount, expectedProposerIdx)
// Generate data columns with wrong proposer index.
wrongColumns := generateTestDataColumnsWithProposer(t, parentRoot, columnSlot, blobCount, expectedProposerIdx+1)
t.Run("Proposer matches", func(t *testing.T) {
initializer := Initializer{
shared: &sharedResources{
sr: sbrReturnsState(fuluState),
hsp: &mockHeadStateProvider{
headRoot: parentRoot[:],
headSlot: columnSlot, // Same epoch so HeadStateReadOnly is used
headStateReadOnly: fuluState,
},
fc: &mockForkchoicer{},
columns := GenerateTestDataColumns(t, parentRoot, columnSlot, blobCount)
firstColumn := columns[0]
ctx := t.Context()
testCases := []struct {
name string
stateByRooter StateByRooter
proposerCache proposerCache
columns []blocks.RODataColumn
error string
}{
{
name: "Cached, matches",
stateByRooter: nil,
proposerCache: &mockProposerCache{
ProposerCB: pcReturnsIdx(firstColumn.ProposerIndex()),
},
}
verifier := initializer.NewDataColumnsVerifier(matchingColumns, GossipDataColumnSidecarRequirements)
err := verifier.SidecarProposerExpected(ctx)
require.NoError(t, err)
require.Equal(t, true, verifier.results.executed(RequireSidecarProposerExpected))
require.NoError(t, verifier.results.result(RequireSidecarProposerExpected))
})
t.Run("Proposer does not match", func(t *testing.T) {
initializer := Initializer{
shared: &sharedResources{
sr: sbrReturnsState(fuluState),
hsp: &mockHeadStateProvider{
headRoot: parentRoot[:],
headSlot: columnSlot, // Same epoch so HeadStateReadOnly is used
headStateReadOnly: fuluState,
},
fc: &mockForkchoicer{},
columns: columns,
},
{
name: "Cached, does not match",
stateByRooter: nil,
proposerCache: &mockProposerCache{
ProposerCB: pcReturnsIdx(firstColumn.ProposerIndex() + 1),
},
}
verifier := initializer.NewDataColumnsVerifier(wrongColumns, GossipDataColumnSidecarRequirements)
err := verifier.SidecarProposerExpected(ctx)
require.ErrorContains(t, errSidecarUnexpectedProposer.Error(), err)
require.Equal(t, true, verifier.results.executed(RequireSidecarProposerExpected))
require.NotNil(t, verifier.results.result(RequireSidecarProposerExpected))
})
t.Run("State lookup failure", func(t *testing.T) {
columns := GenerateTestDataColumns(t, parentRoot, columnSlot, blobCount)
initializer := Initializer{
shared: &sharedResources{
sr: sbrNotFound(t, columns[0].ParentRoot()),
hsp: &mockHeadStateProvider{},
fc: &mockForkchoicer{},
columns: columns,
error: errSidecarUnexpectedProposer.Error(),
},
{
name: "Not cached, state lookup failure",
stateByRooter: sbrNotFound(t, firstColumn.ParentRoot()),
proposerCache: &mockProposerCache{
ProposerCB: pcReturnsNotFound(),
},
}
verifier := initializer.NewDataColumnsVerifier(columns, GossipDataColumnSidecarRequirements)
err := verifier.SidecarProposerExpected(ctx)
require.ErrorContains(t, "verifying state", err)
require.Equal(t, true, verifier.results.executed(RequireSidecarProposerExpected))
require.NotNil(t, verifier.results.result(RequireSidecarProposerExpected))
})
}
func generateTestDataColumnsWithProposer(t *testing.T, parent [fieldparams.RootLength]byte, slot primitives.Slot, blobCount int, proposer primitives.ValidatorIndex) []blocks.RODataColumn {
roBlock, roBlobs := util.GenerateTestDenebBlockWithSidecar(t, parent, slot, blobCount, util.WithProposer(proposer))
blobs := make([]kzg.Blob, 0, len(roBlobs))
for i := range roBlobs {
blobs = append(blobs, kzg.Blob(roBlobs[i].Blob))
columns: columns,
error: "verifying state",
},
}
cellsPerBlob, proofsPerBlob := util.GenerateCellsAndProofs(t, blobs)
roDataColumnSidecars, err := peerdas.DataColumnSidecars(cellsPerBlob, proofsPerBlob, peerdas.PopulateFromBlock(roBlock))
require.NoError(t, err)
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
initializer := Initializer{
shared: &sharedResources{
sr: tc.stateByRooter,
pc: tc.proposerCache,
hsp: &mockHeadStateProvider{},
fc: &mockForkchoicer{
TargetRootForEpochCB: fcReturnsTargetRoot([fieldparams.RootLength]byte{}),
},
},
}
return roDataColumnSidecars
verifier := initializer.NewDataColumnsVerifier(tc.columns, GossipDataColumnSidecarRequirements)
var wg sync.WaitGroup
var err1, err2 error
wg.Go(func() {
err1 = verifier.SidecarProposerExpected(ctx)
})
wg.Go(func() {
err2 = verifier.SidecarProposerExpected(ctx)
})
wg.Wait()
require.Equal(t, true, verifier.results.executed(RequireSidecarProposerExpected))
if len(tc.error) > 0 {
require.ErrorContains(t, tc.error, err1)
require.ErrorContains(t, tc.error, err2)
require.NotNil(t, verifier.results.result(RequireSidecarProposerExpected))
return
}
require.NoError(t, err1)
require.NoError(t, err2)
require.NoError(t, verifier.results.result(RequireSidecarProposerExpected))
err := verifier.SidecarProposerExpected(ctx)
require.NoError(t, err)
})
}
}
func TestColumnRequirementSatisfaction(t *testing.T) {
@@ -924,3 +922,12 @@ func TestColumnRequirementSatisfaction(t *testing.T) {
require.NoError(t, err)
}
func TestConcatRootSlot(t *testing.T) {
root := [fieldparams.RootLength]byte{1, 2, 3}
const slot = primitives.Slot(3210)
const expected = "\x01\x02\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x003210"
actual := concatRootSlot(root, slot)
require.Equal(t, expected, actual)
}

View File

@@ -1,3 +0,0 @@
### Added
- `primitives.BuilderIndex`: SSZ `uint64` wrapper for builder registry indices.

View File

@@ -1,2 +0,0 @@
#### Fixed
- Fix validation logic for `--backfill-oldest-slot`, which was rejecting slots newer than 1056767.

View File

@@ -1,3 +0,0 @@
### Changed
- Data column sidecars cache warmup: Process in parallel all sidecars for a given epoch.

View File

@@ -1,3 +0,0 @@
### Changed
- Summarize DEBUG log corresponding to incoming via gossip data column sidecar.

View File

@@ -0,0 +1,2 @@
### Added
- `--disable-get-blobs-v2` flag.

View File

@@ -1,2 +0,0 @@
### Changed
- Use lookahead to validate data column sidecar proposer index.

View File

@@ -1,3 +0,0 @@
### Changed
- Notify the engine about forkchoice updates in the background.

View File

@@ -1,2 +0,0 @@
### Changed
- Use a separate context when updating the slot cache.

View File

@@ -1,3 +0,0 @@
### Fixed
- Do not process slots and copy states for next epoch proposers after Fulu

View File

@@ -1,2 +0,0 @@
### Ignored
- D not send FCU on block batches.

View File

@@ -1,3 +0,0 @@
### Changed
- Do not check block signature on state transition.

View File

@@ -1,3 +0,0 @@
### Added
- Use the head state to validate attestations for the previous epoch if head is compatible with the target checkpoint.

View File

@@ -1,3 +0,0 @@
### Changed
- Extend `httperror` analyzer to more functions.

View File

@@ -1,3 +0,0 @@
### Fixed
- avoid panic when fork schedule is empty [#16175](https://github.com/OffchainLabs/prysm/pull/16175)

View File

@@ -1,3 +0,0 @@
### Changed
- Performance improvement in ProcessConsolidationRequests: Use more performance HasPendingBalanceToWithdraw instead of PendingBalanceToWithdraw as no need to calculate full total pending balance.

View File

@@ -20,7 +20,6 @@ go_library(
"//cmd:go_default_library",
"//config/features:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_urfave_cli_v2//:go_default_library",

View File

@@ -356,12 +356,9 @@ var (
Usage: "A comma-separated list of exponents (of 2) in decreasing order, defining the state diff hierarchy levels. The last exponent must be greater than or equal to 5.",
Value: cli.NewIntSlice(21, 18, 16, 13, 11, 9, 5),
}
// ZKVM Generation Proof Type
ZkvmGenerationProofTypeFlag = &cli.IntSliceFlag{
Name: "zkvm-generation-proof-types",
Usage: `
Comma-separated list of proof type IDs to generate
(e.g., '0,1' where 0=SP1+Reth, 1=Risc0+Geth).
Optional - nodes can verify proofs without generating them.`,
// DisableGetBlobsV2 disables the engine_getBlobsV2 usage.
DisableGetBlobsV2 = &cli.BoolFlag{
Name: "disable-get-blobs-v2",
Usage: "Disables the engine_getBlobsV2 usage.",
}
)

View File

@@ -5,7 +5,6 @@ import (
"github.com/OffchainLabs/prysm/v7/cmd"
"github.com/OffchainLabs/prysm/v7/config/features"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/pkg/errors"
"github.com/urfave/cli/v2"
)
@@ -18,6 +17,7 @@ type GlobalFlags struct {
SubscribeToAllSubnets bool
Supernode bool
SemiSupernode bool
DisableGetBlobsV2 bool
MinimumSyncPeers int
MinimumPeersPerSubnet int
MaxConcurrentDials int
@@ -28,7 +28,6 @@ type GlobalFlags struct {
DataColumnBatchLimit int
DataColumnBatchLimitBurstFactor int
StateDiffExponents []int
ProofGenerationTypes []primitives.ExecutionProofId
}
var globalConfig *GlobalFlags
@@ -74,6 +73,11 @@ func ConfigureGlobalFlags(ctx *cli.Context) error {
cfg.SemiSupernode = true
}
if ctx.Bool(DisableGetBlobsV2.Name) {
log.Warning("Disabling `engine_getBlobsV2` API")
cfg.DisableGetBlobsV2 = true
}
// State-diff-exponents
cfg.StateDiffExponents = ctx.IntSlice(StateDiffExponents.Name)
if features.Get().EnableStateDiff {
@@ -86,19 +90,6 @@ func ConfigureGlobalFlags(ctx *cli.Context) error {
}
}
// zkVM Proof Generation Types
proofTypes := make([]primitives.ExecutionProofId, 0, len(ctx.IntSlice(ZkvmGenerationProofTypeFlag.Name)))
for _, t := range ctx.IntSlice(ZkvmGenerationProofTypeFlag.Name) {
proofTypes = append(proofTypes, primitives.ExecutionProofId(t))
}
cfg.ProofGenerationTypes = proofTypes
if features.Get().EnableZkvm {
if err := validateZkvmProofGenerationTypes(cfg.ProofGenerationTypes); err != nil {
return fmt.Errorf("validate Zkvm proof generation types: %w", err)
}
}
cfg.BlockBatchLimit = ctx.Int(BlockBatchLimit.Name)
cfg.BlockBatchLimitBurstFactor = ctx.Int(BlockBatchLimitBurstFactor.Name)
cfg.BlobBatchLimit = ctx.Int(BlobBatchLimit.Name)
@@ -150,13 +141,3 @@ func validateStateDiffExponents(exponents []int) error {
}
return nil
}
// validateZkvmProofGenerationTypes validates the provided proof IDs.
func validateZkvmProofGenerationTypes(types []primitives.ExecutionProofId) error {
for _, t := range types {
if t >= primitives.EXECUTION_PROOF_TYPE_COUNT {
return fmt.Errorf("invalid zkvm proof generation type: %d; valid types are between 0 and %d", t, primitives.EXECUTION_PROOF_TYPE_COUNT-1)
}
}
return nil
}

View File

@@ -147,6 +147,7 @@ var appFlags = []cli.Flag{
flags.SlasherDirFlag,
flags.SlasherFlag,
flags.JwtId,
flags.DisableGetBlobsV2,
storage.BlobStoragePathFlag,
storage.DataColumnStoragePathFlag,
storage.BlobStorageLayout,
@@ -156,7 +157,6 @@ var appFlags = []cli.Flag{
dasFlags.BackfillOldestSlot,
dasFlags.BlobRetentionEpochFlag,
flags.BatchVerifierLimit,
flags.ZkvmGenerationProofTypeFlag,
}
func init() {

View File

@@ -169,6 +169,7 @@ var appHelpFlagGroups = []flagGroup{
flags.ExecutionJWTSecretFlag,
flags.JwtId,
flags.InteropMockEth1DataVotesFlag,
flags.DisableGetBlobsV2,
},
},
{ // Flags relevant to configuring beacon chain monitoring.
@@ -231,12 +232,6 @@ var appHelpFlagGroups = []flagGroup{
flags.SetGCPercent,
},
},
{
Name: "zkvm",
Flags: []cli.Flag{
flags.ZkvmGenerationProofTypeFlag,
},
},
}
func init() {

View File

@@ -52,7 +52,6 @@ type Flags struct {
DisableDutiesV2 bool // DisableDutiesV2 sets validator client to use the get Duties endpoint
EnableWeb bool // EnableWeb enables the webui on the validator client
EnableStateDiff bool // EnableStateDiff enables the experimental state diff feature for the beacon node.
EnableZkvm bool // EnableZkvm enables zkVM related features.
// Logging related toggles.
DisableGRPCConnectionLogs bool // Disables logging when a new grpc client has connected.
@@ -299,11 +298,6 @@ func ConfigureBeaconChain(ctx *cli.Context) error {
}
}
if ctx.IsSet(EnableZkvmFlag.Name) {
logEnabled(EnableZkvmFlag)
cfg.EnableZkvm = true
}
cfg.AggregateIntervals = [3]time.Duration{aggregateFirstInterval.Value, aggregateSecondInterval.Value, aggregateThirdInterval.Value}
Init(cfg)
return nil

View File

@@ -211,17 +211,6 @@ var (
Name: "ignore-unviable-attestations",
Usage: "Ignores attestations whose target state is not viable with respect to the current head (avoid expensive state replay from lagging attesters).",
}
// Activate ZKVM execution proof mode
EnableZkvmFlag = &cli.BoolFlag{
Name: "activate-zkvm",
Usage: `
Activates ZKVM execution proof mode. Enables the node to subscribe to the
execution_proof gossip topic, receive and verify execution proofs from peers,
and advertise zkVM support in its ENR for peer discovery.
Use --zkvm-generation-proof-types to specify which proof types this node
should generate (optional - nodes can verify without generating).
`,
}
)
// devModeFlags holds list of flags that are set when development mode is on.
@@ -283,7 +272,6 @@ var BeaconChainFlags = combinedFlags([]cli.Flag{
enableExperimentalAttestationPool,
forceHeadFlag,
blacklistRoots,
EnableZkvmFlag,
}, deprecatedBeaconFlags, deprecatedFlags, upcomingDeprecation)
func combinedFlags(flags ...[]cli.Flag) []cli.Flag {

View File

@@ -310,11 +310,6 @@ type BeaconChainConfig struct {
// Blobs Values
BlobSchedule []BlobScheduleEntry `yaml:"BLOB_SCHEDULE" spec:"true"`
// EIP-8025: Optional Execution Proofs
MaxProofDataBytes uint64 `yaml:"MAX_PROOF_DATA_BYTES" spec:"true"` // MaxProofDataBytes is the maximum number of bytes for execution proof data.
MinProofsRequired uint64 `yaml:"MIN_PROOFS_REQUIRED" spec:"true"` // MinProofsRequired is the minimum number of execution proofs required for a block to be considered valid.
MinEpochsForExecutionProofRequests uint64 `yaml:"MIN_EPOCHS_FOR_EXECUTION_PROOF_REQUESTS" spec:"true"` // MinEpochsForExecutionProofRequests is the minimum number of epochs the node will keep the execution proofs for.
// Deprecated_MaxBlobsPerBlock defines the max blobs that could exist in a block.
// Deprecated: This field is no longer supported. Avoid using it.
DeprecatedMaxBlobsPerBlock int `yaml:"MAX_BLOBS_PER_BLOCK" spec:"true"`
@@ -737,20 +732,6 @@ func WithinDAPeriod(block, current primitives.Epoch) bool {
return block+BeaconConfig().MinEpochsForBlobsSidecarsRequest >= current
}
// WithinExecutionProofPeriod checks if the given epoch is within the execution proof retention period.
// This is used to determine whether execution proofs should be requested or generated for blocks at the given epoch.
// Returns true if the epoch is at or after the retention boundary (Fulu fork epoch or proof retention epoch).
func WithinExecutionProofPeriod(epoch, current primitives.Epoch) bool {
proofRetentionEpoch := primitives.Epoch(0)
if current >= primitives.Epoch(BeaconConfig().MinEpochsForExecutionProofRequests) {
proofRetentionEpoch = current - primitives.Epoch(BeaconConfig().MinEpochsForExecutionProofRequests)
}
boundaryEpoch := primitives.MaxEpoch(BeaconConfig().FuluForkEpoch, proofRetentionEpoch)
return epoch >= boundaryEpoch
}
// EpochsDuration returns the time duration of the given number of epochs.
func EpochsDuration(count primitives.Epoch, b *BeaconChainConfig) time.Duration {
return SlotsDuration(SlotsForEpochs(count, b), b)

View File

@@ -38,7 +38,6 @@ var mainnetNetworkConfig = &NetworkConfig{
AttSubnetKey: "attnets",
SyncCommsSubnetKey: "syncnets",
CustodyGroupCountKey: "cgc",
ZkvmEnabledKey: "zkvm",
MinimumPeersInSubnetSearch: 20,
ContractDeploymentBlock: 11184524, // Note: contract was deployed in block 11052984 but no transactions were sent until 11184524.
BootstrapNodes: []string{
@@ -356,11 +355,6 @@ var mainnetBeaconConfig = &BeaconChainConfig{
MaxBlobsPerBlock: 21,
},
},
// EIP-8025: Optional Execution Proofs
MaxProofDataBytes: 1_048_576, // 1 MiB
MinProofsRequired: 2,
MinEpochsForExecutionProofRequests: 2,
}
// MainnetTestConfig provides a version of the mainnet config that has a different name

View File

@@ -11,7 +11,6 @@ type NetworkConfig struct {
AttSubnetKey string // AttSubnetKey is the ENR key of the subnet bitfield.
SyncCommsSubnetKey string // SyncCommsSubnetKey is the ENR key of the sync committee subnet bitfield.
CustodyGroupCountKey string // CustodyGroupsCountKey is the ENR key of the custody group count.
ZkvmEnabledKey string // ZkvmEnabledKey is the ENR key of whether zkVM mode is enabled or not.
MinimumPeersInSubnetSearch uint64 // PeersInSubnetSearch is the required amount of peers that we need to be able to lookup in a subnet search.
// Chain Network Config

View File

@@ -4,14 +4,12 @@ go_library(
name = "go_default_library",
srcs = [
"basis_points.go",
"builder_index.go",
"committee_bits_mainnet.go",
"committee_bits_minimal.go", # keep
"committee_index.go",
"domain.go",
"epoch.go",
"execution_address.go",
"execution_proof_id.go",
"kzg.go",
"payload_id.go",
"slot.go",
@@ -33,11 +31,9 @@ go_library(
go_test(
name = "go_default_test",
srcs = [
"builder_index_test.go",
"committee_index_test.go",
"domain_test.go",
"epoch_test.go",
"execution_proof_id_test.go",
"slot_test.go",
"sszbytes_test.go",
"sszuint64_test.go",

View File

@@ -1,54 +0,0 @@
package primitives
import (
"fmt"
fssz "github.com/prysmaticlabs/fastssz"
)
var _ fssz.HashRoot = (BuilderIndex)(0)
var _ fssz.Marshaler = (*BuilderIndex)(nil)
var _ fssz.Unmarshaler = (*BuilderIndex)(nil)
// BuilderIndex is an index into the builder registry.
type BuilderIndex uint64
// HashTreeRoot returns the SSZ hash tree root of the index.
func (b BuilderIndex) HashTreeRoot() ([32]byte, error) {
return fssz.HashWithDefaultHasher(b)
}
// HashTreeRootWith appends the SSZ uint64 representation of the index to the given hasher.
func (b BuilderIndex) HashTreeRootWith(hh *fssz.Hasher) error {
hh.PutUint64(uint64(b))
return nil
}
// UnmarshalSSZ decodes the SSZ-encoded uint64 index from buf.
func (b *BuilderIndex) UnmarshalSSZ(buf []byte) error {
if len(buf) != b.SizeSSZ() {
return fmt.Errorf("expected buffer of length %d received %d", b.SizeSSZ(), len(buf))
}
*b = BuilderIndex(fssz.UnmarshallUint64(buf))
return nil
}
// MarshalSSZTo appends the SSZ-encoded index to dst and returns the extended buffer.
func (b *BuilderIndex) MarshalSSZTo(dst []byte) ([]byte, error) {
marshalled, err := b.MarshalSSZ()
if err != nil {
return nil, err
}
return append(dst, marshalled...), nil
}
// MarshalSSZ encodes the index as an SSZ uint64.
func (b *BuilderIndex) MarshalSSZ() ([]byte, error) {
marshalled := fssz.MarshalUint64([]byte{}, uint64(*b))
return marshalled, nil
}
// SizeSSZ returns the size of the SSZ-encoded index in bytes.
func (b *BuilderIndex) SizeSSZ() int {
return 8
}

View File

@@ -1,86 +0,0 @@
package primitives_test
import (
"encoding/binary"
"slices"
"strconv"
"testing"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/testing/require"
)
func TestBuilderIndex_SSZRoundTripAndHashRoot(t *testing.T) {
cases := []uint64{
0,
1,
42,
(1 << 32) - 1,
1 << 32,
^uint64(0),
}
for _, v := range cases {
t.Run("v="+u64name(v), func(t *testing.T) {
t.Parallel()
val := primitives.BuilderIndex(v)
require.Equal(t, 8, (&val).SizeSSZ())
enc, err := (&val).MarshalSSZ()
require.NoError(t, err)
require.Equal(t, 8, len(enc))
wantEnc := make([]byte, 8)
binary.LittleEndian.PutUint64(wantEnc, v)
require.DeepEqual(t, wantEnc, enc)
dstPrefix := []byte("prefix:")
dst, err := (&val).MarshalSSZTo(slices.Clone(dstPrefix))
require.NoError(t, err)
wantDst := append(dstPrefix, wantEnc...)
require.DeepEqual(t, wantDst, dst)
var decoded primitives.BuilderIndex
require.NoError(t, (&decoded).UnmarshalSSZ(enc))
require.Equal(t, val, decoded)
root, err := val.HashTreeRoot()
require.NoError(t, err)
var wantRoot [32]byte
binary.LittleEndian.PutUint64(wantRoot[:8], v)
require.Equal(t, wantRoot, root)
})
}
}
func TestBuilderIndex_UnmarshalSSZRejectsWrongSize(t *testing.T) {
for _, size := range []int{7, 9} {
t.Run("size="+strconv.Itoa(size), func(t *testing.T) {
t.Parallel()
var v primitives.BuilderIndex
err := (&v).UnmarshalSSZ(make([]byte, size))
require.ErrorContains(t, "expected buffer of length 8", err)
})
}
}
func u64name(v uint64) string {
switch v {
case 0:
return "0"
case 1:
return "1"
case 42:
return "42"
case (1 << 32) - 1:
return "2^32-1"
case 1 << 32:
return "2^32"
case ^uint64(0):
return "max"
default:
return "custom"
}
}

View File

@@ -1,64 +0,0 @@
package primitives
import (
"fmt"
fssz "github.com/prysmaticlabs/fastssz"
)
var _ fssz.HashRoot = (ExecutionProofId)(0)
var _ fssz.Marshaler = (*ExecutionProofId)(nil)
var _ fssz.Unmarshaler = (*ExecutionProofId)(nil)
// Number of execution proofs
// Each proof represents a different zkVM+EL combination
//
// TODO(zkproofs): The number 8 is a parameter that we will want to configure in the future
const EXECUTION_PROOF_TYPE_COUNT = 8
// ExecutionProofId identifies which zkVM/proof system a proof belongs to.
type ExecutionProofId uint8
func (id *ExecutionProofId) IsValid() bool {
return uint8(*id) < EXECUTION_PROOF_TYPE_COUNT
}
// HashTreeRoot --
func (id ExecutionProofId) HashTreeRoot() ([32]byte, error) {
return fssz.HashWithDefaultHasher(id)
}
// HashTreeRootWith --
func (id ExecutionProofId) HashTreeRootWith(hh *fssz.Hasher) error {
hh.PutUint8(uint8(id))
return nil
}
// UnmarshalSSZ --
func (id *ExecutionProofId) UnmarshalSSZ(buf []byte) error {
if len(buf) != id.SizeSSZ() {
return fmt.Errorf("expected buffer of length %d received %d", id.SizeSSZ(), len(buf))
}
*id = ExecutionProofId(fssz.UnmarshallUint8(buf))
return nil
}
// MarshalSSZTo --
func (id *ExecutionProofId) MarshalSSZTo(buf []byte) ([]byte, error) {
marshalled, err := id.MarshalSSZ()
if err != nil {
return nil, err
}
return append(buf, marshalled...), nil
}
// MarshalSSZ --
func (id *ExecutionProofId) MarshalSSZ() ([]byte, error) {
marshalled := fssz.MarshalUint8([]byte{}, uint8(*id))
return marshalled, nil
}
// SizeSSZ --
func (id *ExecutionProofId) SizeSSZ() int {
return 1
}

View File

@@ -1,73 +0,0 @@
package primitives_test
import (
"testing"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
)
func TestExecutionProofId_IsValid(t *testing.T) {
tests := []struct {
name string
id primitives.ExecutionProofId
valid bool
}{
{
name: "valid proof id 0",
id: 0,
valid: true,
},
{
name: "valid proof id 1",
id: 1,
valid: true,
},
{
name: "valid proof id 7 (max valid)",
id: 7,
valid: true,
},
{
name: "invalid proof id 8 (at limit)",
id: 8,
valid: false,
},
{
name: "invalid proof id 255",
id: 255,
valid: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := tt.id.IsValid(); got != tt.valid {
t.Errorf("ExecutionProofId.IsValid() = %v, want %v", got, tt.valid)
}
})
}
}
func TestExecutionProofId_Casting(t *testing.T) {
id := primitives.ExecutionProofId(5)
t.Run("uint8", func(t *testing.T) {
if uint8(id) != 5 {
t.Errorf("Casting to uint8 failed: got %v, want 5", uint8(id))
}
})
t.Run("from uint8", func(t *testing.T) {
var x uint8 = 7
if primitives.ExecutionProofId(x) != 7 {
t.Errorf("Casting from uint8 failed: got %v, want 7", primitives.ExecutionProofId(x))
}
})
t.Run("int", func(t *testing.T) {
var x = 3
if primitives.ExecutionProofId(x) != 3 {
t.Errorf("Casting from int failed: got %v, want 3", primitives.ExecutionProofId(x))
}
})
}

View File

@@ -1,72 +0,0 @@
# Kurtosis scripts for EIP-8025
## How to run
I slightly modified [Manu's tip](https://hackmd.io/8z4thpsyQJioaU6jj0Wazw) by adding those in my `~/.zshrc`.
```zsh
# Kurtosis Aliases
blog() {
docker logs -f "$(docker ps | grep cl-"$1"-prysm-geth | awk '{print $NF}')" 2>&1
}
vlog() {
docker logs -f "$(docker ps | grep vc-"$1"-geth-prysm | awk '{print $NF}')" 2>&1
}
dora() {
open http://localhost:$(docker ps --format '{{.Ports}} {{.Names}}' | awk '/dora/ {split($1, a, "->"); split(a[1], b, ":"); print b[2]}')
}
graf() {
open http://localhost:$(docker ps --format '{{.Ports}} {{.Names}}' | awk '/grafana/ {split($1, a, "->"); split(a[1], b, ":"); print b[2]}')
}
devnet () {
local args_file_path="./kurtosis/default.yaml"
if [ ! -z "$1" ]; then
args_file_path="$1"
echo "Using custom args-file path: $args_file_path"
else
echo "Using default args-file path: $args_file_path"
fi
kurtosis clean -a &&
bazel build //cmd/beacon-chain:oci_image_tarball --platforms=@io_bazel_rules_go//go/toolchain:linux_arm64_cgo --config=release &&
docker load -i bazel-bin/cmd/beacon-chain/oci_image_tarball/tarball.tar &&
docker tag gcr.io/offchainlabs/prysm/beacon-chain prysm-bn-custom-image &&
bazel build //cmd/validator:oci_image_tarball --platforms=@io_bazel_rules_go//go/toolchain:linux_arm64_cgo --config=release &&
docker load -i bazel-bin/cmd/validator/oci_image_tarball/tarball.tar &&
docker tag gcr.io/offchainlabs/prysm/validator prysm-vc-custom-image &&
kurtosis run github.com/ethpandaops/ethereum-package --args-file="$args_file_path" --verbosity brief &&
dora
}
stop() {
kurtosis clean -a
}
dps() {
docker ps --format "table {{.ID}}\\t{{.Image}}\\t{{.Status}}\\t{{.Names}}" -a
}
```
At the project directory, you can simply spin up a devnet with:
```bash
$ devnet
```
Or you can specify the network parameter YAML file like:
```bash
$ devnet ./kurtosis/proof_verify.yaml
```
### Running scripts with local images
Images from Prysm can be automatically loaded from `devnet` command, but if you want to run a script with `lighthouse`:
#### `./kurtosis/interop.yaml`
- `lighthouse:local`: Please build your own image following [Lighthouse's guide](https://lighthouse-book.sigmaprime.io/installation_docker.html?highlight=docker#building-the-docker-image) on [`kevaundray/kw/sel-alternative`](https://github.com/kevaundray/lighthouse/tree/kw/sel-alternative/) branch.

View File

@@ -1,16 +0,0 @@
participants:
- el_type: geth
cl_type: prysm
cl_image: prysm-bn-custom-image
cl_extra_params:
- --activate-zkvm
- --zkvm-generation-proof-types=0,1
vc_image: prysm-vc-custom-image
count: 4
network_params:
seconds_per_slot: 2
global_log_level: debug
snooper_enabled: false
additional_services:
- dora
- prometheus_grafana

View File

@@ -1,38 +0,0 @@
# 3 nodes (2 from Prysm, 1 from Lighthouse) generate proofs and
# 1 node only verifies
participants:
# Prysm: Proof generating nodes (nodes 1-2)
- el_type: geth
el_image: ethereum/client-go:latest
cl_type: prysm
cl_image: prysm-bn-custom-image
cl_extra_params:
- --activate-zkvm
- --zkvm-generation-proof-types=0,1
vc_image: prysm-vc-custom-image
count: 2
# Lighthouse: Proof generating nodes (node 3)
- el_type: geth
el_image: ethereum/client-go:latest
cl_type: lighthouse
cl_image: lighthouse:local
cl_extra_params:
- --activate-zkvm
- --zkvm-generation-proof-types=0,1
- --target-peers=3
count: 1
# Prysm: Proof verifying only node (node 4)
- el_type: dummy
cl_type: prysm
cl_image: prysm-bn-custom-image
cl_extra_params:
- --activate-zkvm
vc_image: prysm-vc-custom-image
count: 1
network_params:
seconds_per_slot: 2
global_log_level: debug
snooper_enabled: false
additional_services:
- dora
- prometheus_grafana

View File

@@ -1,27 +0,0 @@
# 3 nodes generate proofs, 1 node only verifies
participants:
# Proof generating nodes (nodes 1-3)
- el_type: geth
el_image: ethereum/client-go:latest
cl_type: prysm
cl_image: prysm-bn-custom-image
cl_extra_params:
- --activate-zkvm
- --zkvm-generation-proof-types=0,1
vc_image: prysm-vc-custom-image
count: 3
# Proof verifying only node (node 4)
- el_type: dummy
cl_type: prysm
cl_image: prysm-bn-custom-image
cl_extra_params:
- --activate-zkvm
vc_image: prysm-vc-custom-image
count: 1
network_params:
seconds_per_slot: 2
global_log_level: debug
snooper_enabled: false
additional_services:
- dora
- prometheus_grafana

Some files were not shown because too many files have changed in this diff Show More