mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-22 11:48:13 -05:00
Compare commits
72 Commits
bazel_mirr
...
poc/option
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
66f63aee9c | ||
|
|
698b6922f0 | ||
|
|
ca228fca44 | ||
|
|
0db74365e0 | ||
|
|
6f90101364 | ||
|
|
49e1763ec2 | ||
|
|
c2527c82cd | ||
|
|
d4ea8fafd6 | ||
|
|
07d1d6bdf9 | ||
|
|
f938da99d9 | ||
|
|
9deec69cc7 | ||
|
|
2767f08f4d | ||
|
|
4d6663b4de | ||
|
|
e713560a68 | ||
|
|
4571e50609 | ||
|
|
175738919e | ||
|
|
f1cbdc9fa6 | ||
|
|
156383c9c8 | ||
|
|
5ede7c8fe0 | ||
|
|
3324c7b655 | ||
|
|
d477bcfa20 | ||
|
|
38183471da | ||
|
|
3c3e2b42e9 | ||
|
|
d496f7bfab | ||
|
|
55e2663f82 | ||
|
|
5f0afd09c6 | ||
|
|
95fff68b11 | ||
|
|
d0bc0fcda8 | ||
|
|
8b2acd5f47 | ||
|
|
fb071ebe20 | ||
|
|
a174d0cd53 | ||
|
|
06655dcd1f | ||
|
|
c1dcf97c0c | ||
|
|
f596223096 | ||
|
|
a184afdfb4 | ||
|
|
056843bcae | ||
|
|
a587a9dd6e | ||
|
|
dde9dc3dd9 | ||
|
|
960d666801 | ||
|
|
1468c20c54 | ||
|
|
68d8988121 | ||
|
|
9ca5bf0119 | ||
|
|
bf8f494792 | ||
|
|
cab25267b5 | ||
|
|
b9c23dae89 | ||
|
|
7944731ccf | ||
|
|
4d2a61a2e0 | ||
|
|
8708c198c9 | ||
|
|
2857eeae6e | ||
|
|
4912c29d06 | ||
|
|
d520158510 | ||
|
|
c13d61a959 | ||
|
|
f5c61ebaea | ||
|
|
ae3d465615 | ||
|
|
f23210853d | ||
|
|
6dc49b41f2 | ||
|
|
e56550af48 | ||
|
|
20f617ecc9 | ||
|
|
adb1de9caa | ||
|
|
2d9e6ad2c8 | ||
|
|
e8eb022145 | ||
|
|
38be9400f1 | ||
|
|
b01e760e0a | ||
|
|
da4a8f1dd3 | ||
|
|
0dca170953 | ||
|
|
cd549abbfa | ||
|
|
28a661518e | ||
|
|
4ab5888c4c | ||
|
|
0d818bc687 | ||
|
|
0e90a0f2d8 | ||
|
|
2de069d543 | ||
|
|
50e88045bb |
@@ -48,6 +48,7 @@ go_library(
|
||||
"//beacon-chain/core/electra:go_default_library",
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/operation:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
@@ -65,6 +66,7 @@ go_library(
|
||||
"//beacon-chain/light-client:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/blstoexec:go_default_library",
|
||||
"//beacon-chain/operations/execproofs:go_default_library",
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
@@ -146,6 +148,8 @@ go_test(
|
||||
"//beacon-chain/cache/depositsnapshot:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/operation:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
|
||||
@@ -1053,40 +1053,3 @@ func TestKZGCommitmentToVersionedHashes(t *testing.T) {
|
||||
require.Equal(t, vhs[0].String(), vh0)
|
||||
require.Equal(t, vhs[1].String(), vh1)
|
||||
}
|
||||
|
||||
func TestComputePayloadAttribute(t *testing.T) {
|
||||
service, tr := minimalTestService(t, WithPayloadIDCache(cache.NewPayloadIDCache()))
|
||||
ctx := tr.ctx
|
||||
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
|
||||
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, Index: 0})
|
||||
// Cache hit, advance state, no fee recipient
|
||||
slot := primitives.Slot(1)
|
||||
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
signed, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(signed, [32]byte{'a'})
|
||||
require.NoError(t, err)
|
||||
cfg := &postBlockProcessConfig{
|
||||
ctx: ctx,
|
||||
roblock: roblock,
|
||||
}
|
||||
fcu := &fcuConfig{
|
||||
headState: st,
|
||||
proposingSlot: slot,
|
||||
headRoot: [32]byte{},
|
||||
}
|
||||
require.NoError(t, service.computePayloadAttributes(cfg, fcu))
|
||||
require.Equal(t, false, fcu.attributes.IsEmpty())
|
||||
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(fcu.attributes.SuggestedFeeRecipient()).String())
|
||||
|
||||
// Cache hit, advance state, has fee recipient
|
||||
suggestedAddr := common.HexToAddress("123")
|
||||
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, FeeRecipient: primitives.ExecutionAddress(suggestedAddr), Index: 0})
|
||||
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
|
||||
require.NoError(t, service.computePayloadAttributes(cfg, fcu))
|
||||
require.Equal(t, false, fcu.attributes.IsEmpty())
|
||||
require.Equal(t, suggestedAddr, common.BytesToAddress(fcu.attributes.SuggestedFeeRecipient()))
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
payloadattribute "github.com/OffchainLabs/prysm/v7/consensus-types/payload-attribute"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -53,58 +54,53 @@ type fcuConfig struct {
|
||||
}
|
||||
|
||||
// sendFCU handles the logic to notify the engine of a forckhoice update
|
||||
// for the first time when processing an incoming block during regular sync. It
|
||||
// always updates the shuffling caches and handles epoch transitions when the
|
||||
// incoming block is late, preparing payload attributes in this case while it
|
||||
// only sends a message with empty attributes for early blocks.
|
||||
func (s *Service) sendFCU(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error {
|
||||
if !s.isNewHead(cfg.headRoot) {
|
||||
return nil
|
||||
// when processing an incoming block during regular sync. It
|
||||
// always updates the shuffling caches and handles epoch transitions .
|
||||
func (s *Service) sendFCU(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) {
|
||||
if cfg.postState.Version() < version.Fulu {
|
||||
// update the caches to compute the right proposer index
|
||||
// this function is called under a forkchoice lock which we need to release.
|
||||
s.ForkChoicer().Unlock()
|
||||
s.updateCachesPostBlockProcessing(cfg)
|
||||
s.ForkChoicer().Lock()
|
||||
}
|
||||
if err := s.getFCUArgs(cfg, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("Could not get forkchoice update argument")
|
||||
return
|
||||
}
|
||||
// If head has not been updated and attributes are nil, we can skip the FCU.
|
||||
if !s.isNewHead(cfg.headRoot) && (fcuArgs.attributes == nil || fcuArgs.attributes.IsEmpty()) {
|
||||
return
|
||||
}
|
||||
// If we are proposing and we aim to reorg the block, we have already sent FCU with attributes on lateBlockTasks
|
||||
if fcuArgs.attributes != nil && !fcuArgs.attributes.IsEmpty() && s.shouldOverrideFCU(cfg.headRoot, s.CurrentSlot()+1) {
|
||||
return nil
|
||||
return
|
||||
}
|
||||
if s.inRegularSync() {
|
||||
go s.forkchoiceUpdateWithExecution(cfg.ctx, fcuArgs)
|
||||
}
|
||||
return s.forkchoiceUpdateWithExecution(cfg.ctx, fcuArgs)
|
||||
}
|
||||
|
||||
// sendFCUWithAttributes computes the payload attributes and sends an FCU message
|
||||
// to the engine if needed
|
||||
func (s *Service) sendFCUWithAttributes(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) {
|
||||
slotCtx, cancel := context.WithTimeout(context.Background(), slotDeadline)
|
||||
defer cancel()
|
||||
cfg.ctx = slotCtx
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
if err := s.computePayloadAttributes(cfg, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("Could not compute payload attributes")
|
||||
return
|
||||
}
|
||||
if fcuArgs.attributes.IsEmpty() {
|
||||
return
|
||||
}
|
||||
if _, err := s.notifyForkchoiceUpdate(cfg.ctx, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("Could not update forkchoice with payload attributes for proposal")
|
||||
if s.isNewHead(fcuArgs.headRoot) {
|
||||
if err := s.saveHead(cfg.ctx, fcuArgs.headRoot, fcuArgs.headBlock, fcuArgs.headState); err != nil {
|
||||
log.WithError(err).Error("Could not save head")
|
||||
}
|
||||
s.pruneAttsFromPool(s.ctx, fcuArgs.headState, fcuArgs.headBlock)
|
||||
}
|
||||
}
|
||||
|
||||
// fockchoiceUpdateWithExecution is a wrapper around notifyForkchoiceUpdate. It decides whether a new call to FCU should be made.
|
||||
func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, args *fcuConfig) error {
|
||||
// fockchoiceUpdateWithExecution is a wrapper around notifyForkchoiceUpdate. It gets a forkchoice lock and calls the engine.
|
||||
// The caller of this function should NOT have a lock in forkchoice store.
|
||||
func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, args *fcuConfig) {
|
||||
_, span := trace.StartSpan(ctx, "beacon-chain.blockchain.forkchoiceUpdateWithExecution")
|
||||
defer span.End()
|
||||
// Note: Use the service context here to avoid the parent context being ended during a forkchoice update.
|
||||
ctx = trace.NewContext(s.ctx, span)
|
||||
s.ForkChoicer().Lock()
|
||||
defer s.ForkChoicer().Unlock()
|
||||
_, err := s.notifyForkchoiceUpdate(ctx, args)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not notify forkchoice update")
|
||||
log.WithError(err).Error("Could not notify forkchoice update")
|
||||
}
|
||||
|
||||
if err := s.saveHead(ctx, args.headRoot, args.headBlock, args.headState); err != nil {
|
||||
log.WithError(err).Error("Could not save head")
|
||||
}
|
||||
|
||||
// Only need to prune attestations from pool if the head has changed.
|
||||
s.pruneAttsFromPool(s.ctx, args.headState, args.headBlock)
|
||||
return nil
|
||||
}
|
||||
|
||||
// shouldOverrideFCU checks whether the incoming block is still subject to being
|
||||
|
||||
@@ -97,7 +97,7 @@ func TestService_forkchoiceUpdateWithExecution_exceptionalCases(t *testing.T) {
|
||||
headBlock: wsb,
|
||||
proposingSlot: service.CurrentSlot() + 1,
|
||||
}
|
||||
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, args))
|
||||
service.forkchoiceUpdateWithExecution(ctx, args)
|
||||
|
||||
payloadID, has := service.cfg.PayloadIDCache.PayloadID(2, [32]byte{2})
|
||||
require.Equal(t, true, has)
|
||||
@@ -151,7 +151,7 @@ func TestService_forkchoiceUpdateWithExecution_SameHeadRootNewProposer(t *testin
|
||||
headRoot: r,
|
||||
proposingSlot: service.CurrentSlot() + 1,
|
||||
}
|
||||
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, args))
|
||||
service.forkchoiceUpdateWithExecution(ctx, args)
|
||||
}
|
||||
|
||||
func TestShouldOverrideFCU(t *testing.T) {
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/async/event"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/operation"
|
||||
statefeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/filesystem"
|
||||
@@ -13,6 +14,7 @@ import (
|
||||
lightclient "github.com/OffchainLabs/prysm/v7/beacon-chain/light-client"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/attestations"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/blstoexec"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/execproofs"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/slashings"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
|
||||
@@ -136,6 +138,14 @@ func WithBLSToExecPool(p blstoexec.PoolManager) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithExecProofsPool to keep track of execution proofs.
|
||||
func WithExecProofsPool(p execproofs.PoolManager) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.ExecProofsPool = p
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithP2PBroadcaster to broadcast messages after appropriate processing.
|
||||
func WithP2PBroadcaster(p p2p.Accessor) Option {
|
||||
return func(s *Service) error {
|
||||
@@ -266,3 +276,10 @@ func WithStartWaitingDataColumnSidecars(c chan bool) Option {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithOperationNotifier(operationNotifier operation.Notifier) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.OperationNotifier = operationNotifier
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
// The caller of this function must have a lock on forkchoice.
|
||||
func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) state.ReadOnlyBeaconState {
|
||||
headEpoch := slots.ToEpoch(s.HeadSlot())
|
||||
if c.Epoch < headEpoch || c.Epoch == 0 {
|
||||
if c.Epoch+1 < headEpoch || c.Epoch == 0 {
|
||||
return nil
|
||||
}
|
||||
// Only use head state if the head state is compatible with the target checkpoint.
|
||||
@@ -30,11 +30,13 @@ func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) st
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
headDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(headRoot), c.Epoch-1)
|
||||
// headEpoch - 1 equals c.Epoch if c is from the previous epoch and equals c.Epoch - 1 if c is from the current epoch.
|
||||
// We don't use the smaller c.Epoch - 1 because forkchoice would not have the data to answer that.
|
||||
headDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(headRoot), headEpoch-1)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
targetDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(c.Root), c.Epoch-1)
|
||||
targetDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(c.Root), headEpoch-1)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
@@ -43,7 +45,7 @@ func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) st
|
||||
}
|
||||
|
||||
// If the head state alone is enough, we can return it directly read only.
|
||||
if c.Epoch == headEpoch {
|
||||
if c.Epoch <= headEpoch {
|
||||
st, err := s.HeadStateReadOnly(ctx)
|
||||
if err != nil {
|
||||
return nil
|
||||
|
||||
@@ -170,12 +170,13 @@ func TestService_GetRecentPreState(t *testing.T) {
|
||||
err = s.SetFinalizedCheckpoint(cp0)
|
||||
require.NoError(t, err)
|
||||
|
||||
st, root, err := prepareForkchoiceState(ctx, 31, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
|
||||
st, blk, err := prepareForkchoiceState(ctx, 31, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
service.head = &head{
|
||||
root: [32]byte(ckRoot),
|
||||
state: s,
|
||||
block: blk,
|
||||
slot: 31,
|
||||
}
|
||||
require.NotNil(t, service.getRecentPreState(ctx, ðpb.Checkpoint{Epoch: 1, Root: ckRoot}))
|
||||
@@ -197,12 +198,13 @@ func TestService_GetRecentPreState_Old_Checkpoint(t *testing.T) {
|
||||
err = s.SetFinalizedCheckpoint(cp0)
|
||||
require.NoError(t, err)
|
||||
|
||||
st, root, err := prepareForkchoiceState(ctx, 33, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
|
||||
st, blk, err := prepareForkchoiceState(ctx, 33, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
service.head = &head{
|
||||
root: [32]byte(ckRoot),
|
||||
state: s,
|
||||
block: blk,
|
||||
slot: 33,
|
||||
}
|
||||
require.IsNil(t, service.getRecentPreState(ctx, ðpb.Checkpoint{}))
|
||||
@@ -227,6 +229,7 @@ func TestService_GetRecentPreState_Same_DependentRoots(t *testing.T) {
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
st, blk, err = prepareForkchoiceState(ctx, 64, [32]byte{'T'}, blk.Root(), [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
headBlock := blk
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
st, blk, err = prepareForkchoiceState(ctx, 33, [32]byte{'U'}, [32]byte(ckRoot), [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
@@ -235,8 +238,9 @@ func TestService_GetRecentPreState_Same_DependentRoots(t *testing.T) {
|
||||
|
||||
service.head = &head{
|
||||
root: [32]byte{'T'},
|
||||
state: s,
|
||||
block: headBlock,
|
||||
slot: 64,
|
||||
state: s,
|
||||
}
|
||||
require.NotNil(t, service.getRecentPreState(ctx, ðpb.Checkpoint{Epoch: 2, Root: cpRoot[:]}))
|
||||
}
|
||||
@@ -263,6 +267,7 @@ func TestService_GetRecentPreState_Different_DependentRoots(t *testing.T) {
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
st, blk, err = prepareForkchoiceState(ctx, 64, [32]byte{'U'}, blk.Root(), [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
headBlock := blk
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
st, blk, err = prepareForkchoiceState(ctx, 33, [32]byte{'V'}, [32]byte(ckRoot), [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
@@ -270,7 +275,8 @@ func TestService_GetRecentPreState_Different_DependentRoots(t *testing.T) {
|
||||
cpRoot := blk.Root()
|
||||
|
||||
service.head = &head{
|
||||
root: [32]byte{'T'},
|
||||
root: [32]byte{'U'},
|
||||
block: headBlock,
|
||||
state: s,
|
||||
slot: 64,
|
||||
}
|
||||
@@ -287,12 +293,13 @@ func TestService_GetRecentPreState_Different(t *testing.T) {
|
||||
err = s.SetFinalizedCheckpoint(cp0)
|
||||
require.NoError(t, err)
|
||||
|
||||
st, root, err := prepareForkchoiceState(ctx, 33, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
|
||||
st, blk, err := prepareForkchoiceState(ctx, 33, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
service.head = &head{
|
||||
root: [32]byte(ckRoot),
|
||||
state: s,
|
||||
block: blk,
|
||||
slot: 33,
|
||||
}
|
||||
require.IsNil(t, service.getRecentPreState(ctx, ðpb.Checkpoint{}))
|
||||
|
||||
@@ -7,6 +7,8 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/go-bitfield"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/operation"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
coreTime "github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
|
||||
@@ -15,6 +17,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/filesystem"
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v7/beacon-chain/forkchoice/types"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
@@ -66,9 +69,6 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
|
||||
startTime := time.Now()
|
||||
fcuArgs := &fcuConfig{}
|
||||
|
||||
if s.inRegularSync() {
|
||||
defer s.handleSecondFCUCall(cfg, fcuArgs)
|
||||
}
|
||||
if features.Get().EnableLightClient && slots.ToEpoch(s.CurrentSlot()) >= params.BeaconConfig().AltairForkEpoch {
|
||||
defer s.processLightClientUpdates(cfg)
|
||||
}
|
||||
@@ -105,12 +105,16 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
|
||||
s.logNonCanonicalBlockReceived(cfg.roblock.Root(), cfg.headRoot)
|
||||
return nil
|
||||
}
|
||||
if err := s.getFCUArgs(cfg, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("Could not get forkchoice update argument")
|
||||
return nil
|
||||
}
|
||||
if err := s.sendFCU(cfg, fcuArgs); err != nil {
|
||||
return errors.Wrap(err, "could not send FCU to engine")
|
||||
s.sendFCU(cfg, fcuArgs)
|
||||
|
||||
// Pre-Fulu the caches are updated when computing the payload attributes
|
||||
if cfg.postState.Version() >= version.Fulu {
|
||||
go func() {
|
||||
ctx, cancel := context.WithTimeout(s.ctx, slotDeadline)
|
||||
defer cancel()
|
||||
cfg.ctx = ctx
|
||||
s.updateCachesPostBlockProcessing(cfg)
|
||||
}()
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -295,14 +299,6 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
return errors.Wrap(err, "could not set optimistic block to valid")
|
||||
}
|
||||
}
|
||||
arg := &fcuConfig{
|
||||
headState: preState,
|
||||
headRoot: lastBR,
|
||||
headBlock: lastB,
|
||||
}
|
||||
if _, err := s.notifyForkchoiceUpdate(ctx, arg); err != nil {
|
||||
return err
|
||||
}
|
||||
return s.saveHeadNoDB(ctx, lastB, lastBR, preState, !isValidPayload)
|
||||
}
|
||||
|
||||
@@ -330,6 +326,7 @@ func (s *Service) areSidecarsAvailable(ctx context.Context, avs das.Availability
|
||||
return nil
|
||||
}
|
||||
|
||||
// the caller of this function must not hold a lock in forkchoice store.
|
||||
func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.BeaconState) error {
|
||||
e := coreTime.CurrentEpoch(st)
|
||||
if err := helpers.UpdateCommitteeCache(ctx, st, e); err != nil {
|
||||
@@ -359,7 +356,9 @@ func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.Beacon
|
||||
if e > 0 {
|
||||
e = e - 1
|
||||
}
|
||||
s.ForkChoicer().RLock()
|
||||
target, err := s.cfg.ForkChoiceStore.TargetRootForEpoch(r, e)
|
||||
s.ForkChoicer().RUnlock()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not update proposer index state-root map")
|
||||
return nil
|
||||
@@ -372,7 +371,7 @@ func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.Beacon
|
||||
}
|
||||
|
||||
// Epoch boundary tasks: it copies the headState and updates the epoch boundary
|
||||
// caches.
|
||||
// caches. The caller of this function must not hold a lock in forkchoice store.
|
||||
func (s *Service) handleEpochBoundary(ctx context.Context, slot primitives.Slot, headState state.BeaconState, blockRoot []byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.handleEpochBoundary")
|
||||
defer span.End()
|
||||
@@ -666,10 +665,17 @@ func (s *Service) isDataAvailable(
|
||||
return errors.New("invalid nil beacon block")
|
||||
}
|
||||
|
||||
root := roBlock.Root()
|
||||
blockVersion := block.Version()
|
||||
root, blockVersion := roBlock.Root(), roBlock.Version()
|
||||
if blockVersion >= version.Fulu {
|
||||
return s.areDataColumnsAvailable(ctx, root, block)
|
||||
if err := s.areExecutionProofsAvailable(ctx, root); err != nil {
|
||||
return fmt.Errorf("are execution proofs available: %w", err)
|
||||
}
|
||||
|
||||
if err := s.areDataColumnsAvailable(ctx, root, block); err != nil {
|
||||
return fmt.Errorf("are data columns available: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if blockVersion >= version.Deneb {
|
||||
@@ -679,6 +685,67 @@ func (s *Service) isDataAvailable(
|
||||
return nil
|
||||
}
|
||||
|
||||
// areExecutionProofsAvailable blocks until we have enough execution proofs to import the block,
|
||||
// or an error or context cancellation occurs.
|
||||
// This check is only performed for lightweight verifier nodes that need zkVM proofs
|
||||
// to validate block execution (nodes without execution layer + proof generation capability).
|
||||
// A nil result means that the data availability check is successful.
|
||||
func (s *Service) areExecutionProofsAvailable(ctx context.Context, blockRoot [fieldparams.RootLength]byte) error {
|
||||
// Return early if zkVM features are disabled (no need to check for execution proofs),
|
||||
// or if the generation proof is enabled (we will generate proofs ourselves).
|
||||
if !features.Get().EnableZkvm || len(flags.Get().ProofGenerationTypes) > 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
requiredProofCount := params.BeaconConfig().MinProofsRequired
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", blockRoot),
|
||||
"requiredProofCount": requiredProofCount,
|
||||
})
|
||||
|
||||
// Subscribe to execution proof received events.
|
||||
eventsChan := make(chan *feed.Event, 1)
|
||||
subscription := s.cfg.OperationNotifier.OperationFeed().Subscribe(eventsChan)
|
||||
defer subscription.Unsubscribe()
|
||||
|
||||
// Return early if we already have enough proofs.
|
||||
if actualProofCount := uint64(s.cfg.ExecProofsPool.Count(blockRoot)); actualProofCount >= requiredProofCount {
|
||||
log.WithField("actualProofCount", actualProofCount).Debug("Already have enough execution proofs")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Some proofs are missing; wait for them.
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case event := <-eventsChan:
|
||||
if event.Type != operation.ExecutionProofReceived {
|
||||
continue
|
||||
}
|
||||
|
||||
proofWrapper, ok := event.Data.(*operation.ExecutionProofReceivedData)
|
||||
if !ok {
|
||||
log.Error("Could not cast event data to ExecutionProofReceivedData")
|
||||
continue
|
||||
}
|
||||
|
||||
proof := proofWrapper.ExecutionProof
|
||||
|
||||
// Skip if the proof is for a different block.
|
||||
if bytesutil.ToBytes32(proof.BlockRoot) != blockRoot {
|
||||
continue
|
||||
}
|
||||
|
||||
// Return if we have enough proofs.
|
||||
if actualProofCount := uint64(s.cfg.ExecProofsPool.Count(blockRoot)); actualProofCount >= requiredProofCount {
|
||||
log.WithField("actualProofCount", actualProofCount).Debug("Got enough execution proofs")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// areDataColumnsAvailable blocks until all data columns committed to in the block are available,
|
||||
// or an error or context cancellation occurs. A nil result means that the data availability check is successful.
|
||||
func (s *Service) areDataColumnsAvailable(
|
||||
@@ -912,8 +979,6 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
if currentSlot == s.HeadSlot() {
|
||||
return
|
||||
}
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
// return early if we are in init sync
|
||||
if !s.inRegularSync() {
|
||||
return
|
||||
@@ -926,14 +991,32 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
if lastState == nil {
|
||||
lastRoot, lastState = headRoot[:], headState
|
||||
}
|
||||
// Copy all the field tries in our cached state in the event of late
|
||||
// blocks.
|
||||
lastState.CopyAllTries()
|
||||
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
|
||||
log.WithError(err).Debug("Could not update next slot state cache")
|
||||
}
|
||||
if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil {
|
||||
log.WithError(err).Error("Could not update epoch boundary caches")
|
||||
// Before Fulu we need to process the next slot to find out if we are proposing.
|
||||
if lastState.Version() < version.Fulu {
|
||||
// Copy all the field tries in our cached state in the event of late
|
||||
// blocks.
|
||||
lastState.CopyAllTries()
|
||||
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
|
||||
log.WithError(err).Debug("Could not update next slot state cache")
|
||||
}
|
||||
if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil {
|
||||
log.WithError(err).Error("Could not update epoch boundary caches")
|
||||
}
|
||||
} else {
|
||||
// After Fulu, we can update the caches asynchronously after sending FCU to the engine
|
||||
defer func() {
|
||||
go func() {
|
||||
ctx, cancel := context.WithTimeout(s.ctx, slotDeadline)
|
||||
defer cancel()
|
||||
lastState.CopyAllTries()
|
||||
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
|
||||
log.WithError(err).Debug("Could not update next slot state cache")
|
||||
}
|
||||
if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil {
|
||||
log.WithError(err).Error("Could not update epoch boundary caches")
|
||||
}
|
||||
}()
|
||||
}()
|
||||
}
|
||||
// return early if we already started building a block for the current
|
||||
// head root
|
||||
@@ -963,6 +1046,8 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
headBlock: headBlock,
|
||||
attributes: attribute,
|
||||
}
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
_, err = s.notifyForkchoiceUpdate(ctx, fcuArgs)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("could not perform late block tasks: failed to update forkchoice with engine")
|
||||
|
||||
@@ -42,14 +42,8 @@ func (s *Service) getFCUArgs(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) er
|
||||
if err := s.getFCUArgsEarlyBlock(cfg, fcuArgs); err != nil {
|
||||
return err
|
||||
}
|
||||
if !s.inRegularSync() {
|
||||
return nil
|
||||
}
|
||||
slot := cfg.roblock.Block().Slot()
|
||||
if slots.WithinVotingWindow(s.genesisTime, slot) {
|
||||
return nil
|
||||
}
|
||||
return s.computePayloadAttributes(cfg, fcuArgs)
|
||||
fcuArgs.attributes = s.getPayloadAttribute(cfg.ctx, fcuArgs.headState, fcuArgs.proposingSlot, cfg.headRoot[:])
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) getFCUArgsEarlyBlock(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error {
|
||||
@@ -173,26 +167,19 @@ func (s *Service) processLightClientUpdates(cfg *postBlockProcessConfig) {
|
||||
|
||||
// updateCachesPostBlockProcessing updates the next slot cache and handles the epoch
|
||||
// boundary in order to compute the right proposer indices after processing
|
||||
// state transition. This function is called on late blocks while still locked,
|
||||
// before sending FCU to the engine.
|
||||
func (s *Service) updateCachesPostBlockProcessing(cfg *postBlockProcessConfig) error {
|
||||
// state transition. The caller of this function must not hold a lock in forkchoice store.
|
||||
func (s *Service) updateCachesPostBlockProcessing(cfg *postBlockProcessConfig) {
|
||||
slot := cfg.postState.Slot()
|
||||
root := cfg.roblock.Root()
|
||||
if err := transition.UpdateNextSlotCache(cfg.ctx, root[:], cfg.postState); err != nil {
|
||||
return errors.Wrap(err, "could not update next slot state cache")
|
||||
log.WithError(err).Error("Could not update next slot state cache")
|
||||
return
|
||||
}
|
||||
if !slots.IsEpochEnd(slot) {
|
||||
return nil
|
||||
return
|
||||
}
|
||||
return s.handleEpochBoundary(cfg.ctx, slot, cfg.postState, root[:])
|
||||
}
|
||||
|
||||
// handleSecondFCUCall handles a second call to FCU when syncing a new block.
|
||||
// This is useful when proposing in the next block and we want to defer the
|
||||
// computation of the next slot shuffling.
|
||||
func (s *Service) handleSecondFCUCall(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) {
|
||||
if (fcuArgs.attributes == nil || fcuArgs.attributes.IsEmpty()) && cfg.headRoot == cfg.roblock.Root() {
|
||||
go s.sendFCUWithAttributes(cfg, fcuArgs)
|
||||
if err := s.handleEpochBoundary(cfg.ctx, slot, cfg.postState, root[:]); err != nil {
|
||||
log.WithError(err).Error("Could not handle epoch boundary")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -202,20 +189,6 @@ func reportProcessingTime(startTime time.Time) {
|
||||
onBlockProcessingTime.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
}
|
||||
|
||||
// computePayloadAttributes modifies the passed FCU arguments to
|
||||
// contain the right payload attributes with the tracked proposer. It gets
|
||||
// called on blocks that arrive after the attestation voting window, or in a
|
||||
// background routine after syncing early blocks.
|
||||
func (s *Service) computePayloadAttributes(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error {
|
||||
if cfg.roblock.Root() == cfg.headRoot {
|
||||
if err := s.updateCachesPostBlockProcessing(cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
fcuArgs.attributes = s.getPayloadAttribute(cfg.ctx, fcuArgs.headState, fcuArgs.proposingSlot, cfg.headRoot[:])
|
||||
return nil
|
||||
}
|
||||
|
||||
// getBlockPreState returns the pre state of an incoming block. It uses the parent root of the block
|
||||
// to retrieve the state in DB. It verifies the pre state's validity and the incoming block
|
||||
// is in the correct time window.
|
||||
|
||||
@@ -13,6 +13,8 @@ import (
|
||||
mock "github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/operation"
|
||||
statefeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
@@ -738,7 +740,9 @@ func TestOnBlock_CanFinalize_WithOnTick(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
|
||||
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
|
||||
require.NoError(t, err)
|
||||
@@ -788,7 +792,9 @@ func TestOnBlock_CanFinalize(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
|
||||
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
|
||||
require.NoError(t, err)
|
||||
@@ -816,25 +822,9 @@ func TestOnBlock_NilBlock(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
signed := &consensusblocks.SignedBeaconBlock{}
|
||||
roblock := consensusblocks.ROBlock{ReadOnlySignedBeaconBlock: signed}
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err := service.postBlockProcess(&postBlockProcessConfig{tr.ctx, roblock, [32]byte{}, nil, true})
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
}
|
||||
|
||||
func TestOnBlock_InvalidSignature(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
|
||||
gs, keys := util.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
|
||||
blk, err := util.GenerateFullBlock(gs, keys, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
blk.Signature = []byte{'a'} // Mutate the signature.
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
_, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
}
|
||||
|
||||
@@ -866,7 +856,9 @@ func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
testState, err = service.cfg.StateGen.StateByRoot(ctx, r)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -1339,7 +1331,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
lock.Lock()
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb1, r1)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
@@ -1351,7 +1345,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
lock.Lock()
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb2, r2)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
@@ -1363,7 +1359,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
lock.Lock()
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb3, r3)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
@@ -1375,7 +1373,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
lock.Lock()
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb4, r4)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
@@ -1400,197 +1400,6 @@ func Test_verifyBlkFinalizedSlot_invalidBlock(t *testing.T) {
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
}
|
||||
|
||||
// See the description in #10777 and #10782 for the full setup
|
||||
// We sync optimistically a chain of blocks. Block 17 is the last block in Epoch
|
||||
// 2. Block 18 justifies block 12 (the first in Epoch 2) and Block 19 returns
|
||||
// INVALID from FCU, with LVH block 17. No head is viable. We check
|
||||
// that the node is optimistic and that we can actually import a block on top of
|
||||
// 17 and recover.
|
||||
func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.SlotsPerEpoch = 6
|
||||
config.AltairForkEpoch = 1
|
||||
config.BellatrixForkEpoch = 2
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
mockEngine := &mockExecution.EngineClient{ErrNewPayload: execution.ErrAcceptedSyncingPayloadStatus, ErrForkchoiceUpdated: execution.ErrAcceptedSyncingPayloadStatus}
|
||||
service, tr := minimalTestService(t, WithExecutionEngineCaller(mockEngine))
|
||||
ctx := tr.ctx
|
||||
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
stateRoot, err := st.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
|
||||
require.NoError(t, service.saveGenesisData(ctx, st))
|
||||
|
||||
genesis := blocks.NewGenesisBlock(stateRoot[:])
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb), "Could not save genesis block")
|
||||
|
||||
parentRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
for i := 1; i < 6; i++ {
|
||||
driftGenesisTime(service, primitives.Slot(i), 0)
|
||||
st, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
|
||||
require.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
driftGenesisTime(service, primitives.Slot(i), 0)
|
||||
st, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlockAltair(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
|
||||
require.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
for i := 12; i < 18; i++ {
|
||||
driftGenesisTime(service, primitives.Slot(i), 0)
|
||||
st, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlockBellatrix(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
|
||||
require.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
// Check that we haven't justified the second epoch yet
|
||||
jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(0), jc.Epoch)
|
||||
|
||||
// import a block that justifies the second epoch
|
||||
driftGenesisTime(service, 18, 0)
|
||||
validHeadState, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlockBellatrix(validHeadState, keys, util.DefaultBlockGenConfig(), 18)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
firstInvalidRoot, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, firstInvalidRoot, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, firstInvalidRoot)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
jc = service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(2), jc.Epoch)
|
||||
|
||||
sjc := validHeadState.CurrentJustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(0), sjc.Epoch)
|
||||
lvh := b.Block.Body.ExecutionPayload.ParentHash
|
||||
// check our head
|
||||
require.Equal(t, firstInvalidRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
|
||||
// import another block to find out that it was invalid
|
||||
mockEngine = &mockExecution.EngineClient{ErrNewPayload: execution.ErrAcceptedSyncingPayloadStatus, ErrForkchoiceUpdated: execution.ErrInvalidPayloadStatus, ForkChoiceUpdatedResp: lvh}
|
||||
service.cfg.ExecutionEngineCaller = mockEngine
|
||||
driftGenesisTime(service, 19, 0)
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err = util.GenerateFullBlockBellatrix(st, keys, util.DefaultBlockGenConfig(), 19)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err = service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
|
||||
// Check that forkchoice's head is the last invalid block imported. The
|
||||
// store's headroot is the previous head (since the invalid block did
|
||||
// not finish importing) one and that the node is optimistic
|
||||
require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
headRoot, err := service.HeadRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, firstInvalidRoot, bytesutil.ToBytes32(headRoot))
|
||||
optimistic, err := service.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, optimistic)
|
||||
|
||||
// import another block based on the last valid head state
|
||||
mockEngine = &mockExecution.EngineClient{}
|
||||
service.cfg.ExecutionEngineCaller = mockEngine
|
||||
driftGenesisTime(service, 20, 0)
|
||||
b, err = util.GenerateFullBlockBellatrix(validHeadState, keys, &util.BlockGenConfig{}, 20)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err = b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
preState, err = service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
|
||||
require.NoError(t, err)
|
||||
// Check the newly imported block is head, it justified the right
|
||||
// checkpoint and the node is no longer optimistic
|
||||
require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
sjc = service.CurrentJustifiedCheckpt()
|
||||
require.Equal(t, jc.Epoch, sjc.Epoch)
|
||||
require.Equal(t, jc.Root, bytesutil.ToBytes32(sjc.Root))
|
||||
optimistic, err = service.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, optimistic)
|
||||
}
|
||||
|
||||
// See the description in #10777 and #10782 for the full setup
|
||||
// We sync optimistically a chain of blocks. Block 17 is the last block in Epoch
|
||||
// 2. Block 18 justifies block 12 (the first in Epoch 2) and Block 19 returns
|
||||
@@ -1642,7 +1451,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
@@ -1662,8 +1473,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
|
||||
for i := 12; i < 18; i++ {
|
||||
@@ -1684,8 +1496,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
// Check that we haven't justified the second epoch yet
|
||||
jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
@@ -1708,7 +1521,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, firstInvalidRoot, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, firstInvalidRoot)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, err)
|
||||
jc = service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(2), jc.Epoch)
|
||||
@@ -1718,6 +1533,10 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
lvh := b.Block.Body.ExecutionPayload.ParentHash
|
||||
// check our head
|
||||
require.Equal(t, firstInvalidRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
isBlock18OptimisticAfterImport, err := service.IsOptimisticForRoot(ctx, firstInvalidRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, isBlock18OptimisticAfterImport)
|
||||
time.Sleep(20 * time.Millisecond) // wait for async forkchoice update to be processed
|
||||
|
||||
// import another block to find out that it was invalid
|
||||
mockEngine = &mockExecution.EngineClient{ErrNewPayload: execution.ErrInvalidPayloadStatus, NewPayloadResp: lvh}
|
||||
@@ -1768,7 +1587,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, err)
|
||||
// Check the newly imported block is head, it justified the right
|
||||
// checkpoint and the node is no longer optimistic
|
||||
@@ -1835,7 +1656,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
@@ -1856,8 +1679,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
|
||||
// import the merge block
|
||||
@@ -1877,7 +1701,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, lastValidRoot, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, lastValidRoot)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, err)
|
||||
// save the post state and the payload Hash of this block since it will
|
||||
// be the LVH
|
||||
@@ -1906,8 +1732,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, invalidRoots[i-13], wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, invalidRoots[i-13])
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
// Check that we have justified the second epoch
|
||||
jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
@@ -1975,7 +1802,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
// Check that the head is still INVALID and the node is still optimistic
|
||||
require.Equal(t, invalidHeadRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
optimistic, err = service.IsOptimistic(ctx)
|
||||
@@ -2000,7 +1829,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, err)
|
||||
st, err = service.cfg.StateGen.StateByRoot(ctx, root)
|
||||
require.NoError(t, err)
|
||||
@@ -2028,7 +1859,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
sjc = service.CurrentJustifiedCheckpt()
|
||||
@@ -2072,7 +1905,6 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveGenesisBlockRoot(ctx, genesisRoot), "Could not save genesis state")
|
||||
|
||||
for i := 1; i < 6; i++ {
|
||||
t.Log(i)
|
||||
driftGenesisTime(service, primitives.Slot(i), 0)
|
||||
st, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -2089,7 +1921,9 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
@@ -2109,8 +1943,9 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
|
||||
// import the merge block
|
||||
@@ -2130,7 +1965,9 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, lastValidRoot, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, lastValidRoot)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, err)
|
||||
// save the post state and the payload Hash of this block since it will
|
||||
// be the LVH
|
||||
@@ -2161,7 +1998,9 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
|
||||
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
|
||||
require.NoError(t, err)
|
||||
@@ -2282,7 +2121,9 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -2348,7 +2189,9 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -2631,7 +2474,10 @@ func TestRollbackBlock(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Rollback block insertion into db and caches.
|
||||
require.ErrorContains(t, fmt.Sprintf("could not insert block %d to fork choice store", roblock.Block().Slot()), service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.ErrorContains(t, fmt.Sprintf("could not insert block %d to fork choice store", roblock.Block().Slot()), err)
|
||||
|
||||
// The block should no longer exist.
|
||||
require.Equal(t, false, service.cfg.BeaconDB.HasBlock(ctx, root))
|
||||
@@ -2732,7 +2578,9 @@ func TestRollbackBlock_ContextDeadline(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
|
||||
b, err = util.GenerateFullBlock(postState, keys, util.DefaultBlockGenConfig(), 34)
|
||||
require.NoError(t, err)
|
||||
@@ -2766,7 +2614,10 @@ func TestRollbackBlock_ContextDeadline(t *testing.T) {
|
||||
require.NoError(t, postState.SetFinalizedCheckpoint(cj))
|
||||
|
||||
// Rollback block insertion into db and caches.
|
||||
require.ErrorContains(t, "context canceled", service.postBlockProcess(&postBlockProcessConfig{cancCtx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{cancCtx, roblock, [32]byte{}, postState, false})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.ErrorContains(t, "context canceled", err)
|
||||
|
||||
// The block should no longer exist.
|
||||
require.Equal(t, false, service.cfg.BeaconDB.HasBlock(ctx, root))
|
||||
@@ -3152,6 +3003,113 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
err = service.isDataAvailable(ctx, roBlock)
|
||||
require.NotNil(t, err)
|
||||
})
|
||||
|
||||
t.Run("EIP-8025 (Optional Proofs) - already enough proofs", func(t *testing.T) {
|
||||
// Enable zkVM feature
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnableZkvm: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
// Set MinProofsRequired for testing
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.MinProofsRequired = 3
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Setup with sufficient data columns
|
||||
minimumColumnsCountToReconstruct := peerdas.MinimumColumnCountToReconstruct()
|
||||
indices := make([]uint64, 0, minimumColumnsCountToReconstruct)
|
||||
for i := range minimumColumnsCountToReconstruct {
|
||||
indices = append(indices, i)
|
||||
}
|
||||
|
||||
testParams := testIsAvailableParams{
|
||||
columnsToSave: indices,
|
||||
blobKzgCommitmentsCount: 3,
|
||||
}
|
||||
|
||||
ctx, _, service, root, signed := testIsAvailableSetup(t, testParams)
|
||||
|
||||
// Insert MinProofsRequired execution proofs into the pool
|
||||
for i := range cfg.MinProofsRequired {
|
||||
proof := ðpb.ExecutionProof{
|
||||
BlockRoot: root[:],
|
||||
Slot: signed.Block().Slot(),
|
||||
ProofId: primitives.ExecutionProofId(i),
|
||||
ProofData: []byte{byte(i)},
|
||||
}
|
||||
service.cfg.ExecProofsPool.Insert(proof)
|
||||
}
|
||||
|
||||
roBlock, err := consensusblocks.NewROBlockWithRoot(signed, root)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = service.isDataAvailable(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("EIP-8025 (Optional Proofs) - data columns success then wait for execution proofs", func(t *testing.T) {
|
||||
// Enable zkVM feature
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnableZkvm: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
// Set MinProofsRequired for testing
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.MinProofsRequired = 3
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Setup with sufficient data columns
|
||||
minimumColumnsCountToReconstruct := peerdas.MinimumColumnCountToReconstruct()
|
||||
indices := make([]uint64, 0, minimumColumnsCountToReconstruct)
|
||||
for i := range minimumColumnsCountToReconstruct {
|
||||
indices = append(indices, i)
|
||||
}
|
||||
|
||||
testParams := testIsAvailableParams{
|
||||
options: []Option{
|
||||
WithOperationNotifier(&mock.MockOperationNotifier{}),
|
||||
},
|
||||
columnsToSave: indices,
|
||||
blobKzgCommitmentsCount: 3,
|
||||
}
|
||||
|
||||
ctx, _, service, root, signed := testIsAvailableSetup(t, testParams)
|
||||
|
||||
// Goroutine to send execution proofs after data columns are available
|
||||
go func() {
|
||||
// Wait a bit to simulate async proof arrival
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
// Send ExecutionProofReceived events
|
||||
opfeed := service.cfg.OperationNotifier.OperationFeed()
|
||||
for i := range cfg.MinProofsRequired {
|
||||
proof := ðpb.ExecutionProof{
|
||||
BlockRoot: root[:],
|
||||
Slot: signed.Block().Slot(),
|
||||
ProofId: primitives.ExecutionProofId(i),
|
||||
ProofData: []byte{byte(i)},
|
||||
}
|
||||
service.cfg.ExecProofsPool.Insert(proof)
|
||||
|
||||
opfeed.Send(&feed.Event{
|
||||
Type: operation.ExecutionProofReceived,
|
||||
Data: &operation.ExecutionProofReceivedData{
|
||||
ExecutionProof: proof,
|
||||
},
|
||||
})
|
||||
}
|
||||
}()
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, time.Second*2)
|
||||
defer cancel()
|
||||
|
||||
roBlock, err := consensusblocks.NewROBlockWithRoot(signed, root)
|
||||
require.NoError(t, err)
|
||||
err = service.isDataAvailable(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
// Test_postBlockProcess_EventSending tests that block processed events are only sent
|
||||
@@ -3262,7 +3220,9 @@ func Test_postBlockProcess_EventSending(t *testing.T) {
|
||||
}
|
||||
|
||||
// Execute postBlockProcess
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(cfg)
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
|
||||
// Check error expectation
|
||||
if tt.expectError {
|
||||
|
||||
@@ -66,52 +66,54 @@ func (s *Service) VerifyLmdFfgConsistency(ctx context.Context, a ethpb.Att) erro
|
||||
|
||||
// This routine processes fork choice attestations from the pool to account for validator votes and fork choice.
|
||||
func (s *Service) spawnProcessAttestationsRoutine() {
|
||||
go func() {
|
||||
_, err := s.clockWaiter.WaitForClock(s.ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to receive genesis data")
|
||||
return
|
||||
}
|
||||
if s.genesisTime.IsZero() {
|
||||
log.Warn("ProcessAttestations routine waiting for genesis time")
|
||||
for s.genesisTime.IsZero() {
|
||||
if err := s.ctx.Err(); err != nil {
|
||||
log.WithError(err).Error("Giving up waiting for genesis time")
|
||||
return
|
||||
}
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
log.Warn("Genesis time received, now available to process attestations")
|
||||
}
|
||||
// Wait for node to be synced before running the routine.
|
||||
if err := s.waitForSync(); err != nil {
|
||||
log.WithError(err).Error("Could not wait to sync")
|
||||
return
|
||||
}
|
||||
_, err := s.clockWaiter.WaitForClock(s.ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to receive genesis data")
|
||||
return
|
||||
}
|
||||
|
||||
reorgInterval := time.Second*time.Duration(params.BeaconConfig().SecondsPerSlot) - reorgLateBlockCountAttestations
|
||||
ticker := slots.NewSlotTickerWithIntervals(s.genesisTime, []time.Duration{0, reorgInterval})
|
||||
for {
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
if s.genesisTime.IsZero() {
|
||||
log.Warn("ProcessAttestations routine waiting for genesis time")
|
||||
for s.genesisTime.IsZero() {
|
||||
if err := s.ctx.Err(); err != nil {
|
||||
log.WithError(err).Error("Giving up waiting for genesis time")
|
||||
return
|
||||
case slotInterval := <-ticker.C():
|
||||
if slotInterval.Interval > 0 {
|
||||
if s.validating() {
|
||||
s.UpdateHead(s.ctx, slotInterval.Slot+1)
|
||||
}
|
||||
} else {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
if err := s.cfg.ForkChoiceStore.NewSlot(s.ctx, slotInterval.Slot); err != nil {
|
||||
log.WithError(err).Error("Could not process new slot")
|
||||
}
|
||||
s.cfg.ForkChoiceStore.Unlock()
|
||||
|
||||
s.UpdateHead(s.ctx, slotInterval.Slot)
|
||||
}
|
||||
}
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
}()
|
||||
log.Warn("Genesis time received, now available to process attestations")
|
||||
}
|
||||
|
||||
// Wait for node to be synced before running the routine.
|
||||
if err := s.waitForSync(); err != nil {
|
||||
log.WithError(err).Error("Could not wait to sync")
|
||||
return
|
||||
}
|
||||
|
||||
reorgInterval := time.Second*time.Duration(params.BeaconConfig().SecondsPerSlot) - reorgLateBlockCountAttestations
|
||||
ticker := slots.NewSlotTickerWithIntervals(s.genesisTime, []time.Duration{0, reorgInterval})
|
||||
for {
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
case slotInterval := <-ticker.C():
|
||||
if slotInterval.Interval > 0 {
|
||||
if s.validating() {
|
||||
s.UpdateHead(s.ctx, slotInterval.Slot+1)
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
if err := s.cfg.ForkChoiceStore.NewSlot(s.ctx, slotInterval.Slot); err != nil {
|
||||
log.WithError(err).Error("Could not process new slot")
|
||||
}
|
||||
s.cfg.ForkChoiceStore.Unlock()
|
||||
|
||||
s.UpdateHead(s.ctx, slotInterval.Slot)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateHead updates the canonical head of the chain based on information from fork-choice attestations and votes.
|
||||
@@ -156,13 +158,15 @@ func (s *Service) UpdateHead(ctx context.Context, proposingSlot primitives.Slot)
|
||||
}
|
||||
if s.inRegularSync() {
|
||||
fcuArgs.attributes = s.getPayloadAttribute(ctx, headState, proposingSlot, newHeadRoot[:])
|
||||
if fcuArgs.attributes != nil && s.shouldOverrideFCU(newHeadRoot, proposingSlot) {
|
||||
return
|
||||
}
|
||||
go s.forkchoiceUpdateWithExecution(s.ctx, fcuArgs)
|
||||
}
|
||||
if fcuArgs.attributes != nil && s.shouldOverrideFCU(newHeadRoot, proposingSlot) {
|
||||
return
|
||||
}
|
||||
if err := s.forkchoiceUpdateWithExecution(s.ctx, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("Could not update forkchoice")
|
||||
if err := s.saveHead(s.ctx, fcuArgs.headRoot, fcuArgs.headBlock, fcuArgs.headState); err != nil {
|
||||
log.WithError(err).Error("Could not save head")
|
||||
}
|
||||
s.pruneAttsFromPool(s.ctx, fcuArgs.headState, fcuArgs.headBlock)
|
||||
}
|
||||
|
||||
// This processes fork choice attestations from the pool to account for validator votes and fork choice.
|
||||
|
||||
@@ -117,7 +117,9 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, tRoot, wsb, postState))
|
||||
roblock, err := blocks.NewROBlockWithRoot(wsb, tRoot)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
copied, err = service.cfg.StateGen.StateByRoot(ctx, tRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, fcs.NodeCount())
|
||||
@@ -177,7 +179,9 @@ func TestService_UpdateHead_NoAtts(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, tRoot, wsb, postState))
|
||||
roblock, err := blocks.NewROBlockWithRoot(wsb, tRoot)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.Equal(t, 2, fcs.NodeCount())
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
||||
require.Equal(t, tRoot, service.head.root)
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/async/event"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/operation"
|
||||
statefeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
@@ -24,6 +25,7 @@ import (
|
||||
lightClient "github.com/OffchainLabs/prysm/v7/beacon-chain/light-client"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/attestations"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/blstoexec"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/execproofs"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/slashings"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
|
||||
@@ -85,9 +87,11 @@ type config struct {
|
||||
ExitPool voluntaryexits.PoolManager
|
||||
SlashingPool slashings.PoolManager
|
||||
BLSToExecPool blstoexec.PoolManager
|
||||
ExecProofsPool execproofs.PoolManager
|
||||
P2P p2p.Accessor
|
||||
MaxRoutines int
|
||||
StateNotifier statefeed.Notifier
|
||||
OperationNotifier operation.Notifier
|
||||
ForkChoiceStore f.ForkChoicer
|
||||
AttService *attestations.Service
|
||||
StateGen *stategen.State
|
||||
@@ -211,7 +215,9 @@ func (s *Service) Start() {
|
||||
if err := s.StartFromSavedState(s.cfg.FinalizedStateAtStartUp); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
s.spawnProcessAttestationsRoutine()
|
||||
|
||||
go s.spawnProcessAttestationsRoutine()
|
||||
go s.spawnFinalizedProofsPruningRoutine()
|
||||
go s.runLateBlockTasks()
|
||||
}
|
||||
|
||||
@@ -567,3 +573,46 @@ func fuluForkSlot() (primitives.Slot, error) {
|
||||
|
||||
return forkFuluSlot, nil
|
||||
}
|
||||
|
||||
// spawnFinalizedProofsPruningRoutine prunes execution proofs pool on every epoch.
|
||||
// It removes proofs older than the finalized checkpoint to prevent unbounded
|
||||
// memory growth.
|
||||
// TODO: Manage cases where the network is not finalizing for a long time (avoid OOMs...)
|
||||
func (s *Service) spawnFinalizedProofsPruningRoutine() {
|
||||
ticker := slots.NewSlotTicker(s.genesisTime, params.BeaconConfig().SecondsPerSlot)
|
||||
defer ticker.Done()
|
||||
|
||||
for {
|
||||
select {
|
||||
case slot := <-ticker.C():
|
||||
// Only prune at the start of each epoch
|
||||
if !slots.IsEpochStart(slot) {
|
||||
continue
|
||||
}
|
||||
|
||||
finalizedCheckpoint := s.FinalizedCheckpt()
|
||||
if finalizedCheckpoint == nil {
|
||||
log.Error("Finalized checkpoint is nil, cannot prune execution proofs")
|
||||
continue
|
||||
}
|
||||
|
||||
finalizedSlot, err := slots.EpochStart(finalizedCheckpoint.Epoch)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get finalized slot")
|
||||
continue
|
||||
}
|
||||
|
||||
// Prune proofs older than finalized slot
|
||||
if count := s.cfg.ExecProofsPool.PruneUpTo(finalizedSlot); count > 0 {
|
||||
log.WithFields(logrus.Fields{
|
||||
"prunedCount": count,
|
||||
"finalizedSlot": finalizedSlot,
|
||||
}).Debug("Pruned finalized execution proofs")
|
||||
}
|
||||
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Context closed, exiting routine")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -290,52 +290,3 @@ func TestProcessBlockHeader_OK(t *testing.T) {
|
||||
}
|
||||
assert.Equal(t, true, proto.Equal(nsh, expected), "Expected %v, received %v", expected, nsh)
|
||||
}
|
||||
|
||||
func TestBlockSignatureSet_OK(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: make([]byte, 32),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
Slashed: true,
|
||||
}
|
||||
}
|
||||
|
||||
state, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, state.SetValidators(validators))
|
||||
require.NoError(t, state.SetSlot(10))
|
||||
require.NoError(t, state.SetLatestBlockHeader(util.HydrateBeaconHeader(ðpb.BeaconBlockHeader{
|
||||
Slot: 9,
|
||||
ProposerIndex: 0,
|
||||
})))
|
||||
|
||||
latestBlockSignedRoot, err := state.LatestBlockHeader().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
currentEpoch := time.CurrentEpoch(state)
|
||||
priv, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
pID, err := helpers.BeaconProposerIndex(t.Context(), state)
|
||||
require.NoError(t, err)
|
||||
block := util.NewBeaconBlock()
|
||||
block.Block.Slot = 10
|
||||
block.Block.ProposerIndex = pID
|
||||
block.Block.Body.RandaoReveal = bytesutil.PadTo([]byte{'A', 'B', 'C'}, 96)
|
||||
block.Block.ParentRoot = latestBlockSignedRoot[:]
|
||||
block.Signature, err = signing.ComputeDomainAndSign(state, currentEpoch, block.Block, params.BeaconConfig().DomainBeaconProposer, priv)
|
||||
require.NoError(t, err)
|
||||
proposerIdx, err := helpers.BeaconProposerIndex(t.Context(), state)
|
||||
require.NoError(t, err)
|
||||
validators[proposerIdx].Slashed = false
|
||||
validators[proposerIdx].PublicKey = priv.PublicKey().Marshal()
|
||||
err = state.UpdateValidatorAtIndex(proposerIdx, validators[proposerIdx])
|
||||
require.NoError(t, err)
|
||||
set, err := blocks.BlockSignatureBatch(state, block.Block.ProposerIndex, block.Signature, block.Block.HashTreeRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
verified, err := set.Verify()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, verified, "Block signature set returned a set which was unable to be verified")
|
||||
}
|
||||
|
||||
@@ -122,24 +122,6 @@ func VerifyBlockSignatureUsingCurrentFork(beaconState state.ReadOnlyBeaconState,
|
||||
return nil
|
||||
}
|
||||
|
||||
// BlockSignatureBatch retrieves the block signature batch from the provided block and its corresponding state.
|
||||
func BlockSignatureBatch(beaconState state.ReadOnlyBeaconState,
|
||||
proposerIndex primitives.ValidatorIndex,
|
||||
sig []byte,
|
||||
rootFunc func() ([32]byte, error)) (*bls.SignatureBatch, error) {
|
||||
currentEpoch := slots.ToEpoch(beaconState.Slot())
|
||||
domain, err := signing.Domain(beaconState.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorsRoot())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
proposer, err := beaconState.ValidatorAtIndex(proposerIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
proposerPubKey := proposer.PublicKey
|
||||
return signing.BlockSignatureBatch(proposerPubKey, sig, domain, rootFunc)
|
||||
}
|
||||
|
||||
// RandaoSignatureBatch retrieves the relevant randao specific signature batch object
|
||||
// from a block and its corresponding state.
|
||||
func RandaoSignatureBatch(
|
||||
|
||||
@@ -46,6 +46,9 @@ const (
|
||||
|
||||
// DataColumnReceived is sent after a data column has been seen after gossip validation rules.
|
||||
DataColumnReceived = 12
|
||||
|
||||
// ExecutionProofReceived is sent after a execution proof object has been received from gossip or rpc.
|
||||
ExecutionProofReceived = 13
|
||||
)
|
||||
|
||||
// UnAggregatedAttReceivedData is the data sent with UnaggregatedAttReceived events.
|
||||
@@ -77,6 +80,11 @@ type BLSToExecutionChangeReceivedData struct {
|
||||
Change *ethpb.SignedBLSToExecutionChange
|
||||
}
|
||||
|
||||
// ExecutionProofReceivedData is the data sent with ExecutionProofReceived events.
|
||||
type ExecutionProofReceivedData struct {
|
||||
ExecutionProof *ethpb.ExecutionProof
|
||||
}
|
||||
|
||||
// BlobSidecarReceivedData is the data sent with BlobSidecarReceived events.
|
||||
type BlobSidecarReceivedData struct {
|
||||
Blob *blocks.VerifiedROBlob
|
||||
|
||||
@@ -182,12 +182,6 @@ func ProcessBlockNoVerifyAnySig(
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
sig := signed.Signature()
|
||||
bSet, err := b.BlockSignatureBatch(st, blk.ProposerIndex(), sig[:], blk.HashTreeRoot)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, nil, errors.Wrap(err, "could not retrieve block signature set")
|
||||
}
|
||||
randaoReveal := signed.Block().Body().RandaoReveal()
|
||||
rSet, err := b.RandaoSignatureBatch(ctx, st, randaoReveal[:])
|
||||
if err != nil {
|
||||
@@ -201,7 +195,7 @@ func ProcessBlockNoVerifyAnySig(
|
||||
|
||||
// Merge beacon block, randao and attestations signatures into a set.
|
||||
set := bls.NewSet()
|
||||
set.Join(bSet).Join(rSet).Join(aSet)
|
||||
set.Join(rSet).Join(aSet)
|
||||
|
||||
if blk.Version() >= version.Capella {
|
||||
changes, err := signed.Block().Body().BLSToExecutionChanges()
|
||||
|
||||
@@ -157,9 +157,8 @@ func TestProcessBlockNoVerify_SigSetContainsDescriptions(t *testing.T) {
|
||||
set, _, err := transition.ProcessBlockNoVerifyAnySig(t.Context(), beaconState, wsb)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, len(set.Signatures), len(set.Descriptions), "Signatures and descriptions do not match up")
|
||||
assert.Equal(t, "block signature", set.Descriptions[0])
|
||||
assert.Equal(t, "randao signature", set.Descriptions[1])
|
||||
assert.Equal(t, "attestation signature", set.Descriptions[2])
|
||||
assert.Equal(t, "randao signature", set.Descriptions[0])
|
||||
assert.Equal(t, "attestation signature", set.Descriptions[1])
|
||||
}
|
||||
|
||||
func TestProcessOperationsNoVerifyAttsSigs_OK(t *testing.T) {
|
||||
|
||||
@@ -67,9 +67,9 @@ func NewSyncNeeds(current CurrentSlotter, oldestSlotFlagPtr *primitives.Slot, bl
|
||||
|
||||
// Override spec minimum block retention with user-provided flag only if it is lower than the spec minimum.
|
||||
sn.blockRetention = primitives.Epoch(params.BeaconConfig().MinEpochsForBlockRequests)
|
||||
|
||||
if oldestSlotFlagPtr != nil {
|
||||
oldestEpoch := slots.ToEpoch(*oldestSlotFlagPtr)
|
||||
if oldestEpoch < sn.blockRetention {
|
||||
if *oldestSlotFlagPtr <= syncEpochOffset(current(), sn.blockRetention) {
|
||||
sn.validOldestSlotPtr = oldestSlotFlagPtr
|
||||
} else {
|
||||
log.WithField("backfill-oldest-slot", *oldestSlotFlagPtr).
|
||||
|
||||
@@ -128,6 +128,9 @@ func TestSyncNeedsInitialize(t *testing.T) {
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
minBlobEpochs := params.BeaconConfig().MinEpochsForBlobsSidecarsRequest
|
||||
minColEpochs := params.BeaconConfig().MinEpochsForDataColumnSidecarsRequest
|
||||
denebSlot := slots.UnsafeEpochStart(params.BeaconConfig().DenebForkEpoch)
|
||||
fuluSlot := slots.UnsafeEpochStart(params.BeaconConfig().FuluForkEpoch)
|
||||
minSlots := slots.UnsafeEpochStart(primitives.Epoch(params.BeaconConfig().MinEpochsForBlockRequests))
|
||||
|
||||
currentSlot := primitives.Slot(10000)
|
||||
currentFunc := func() primitives.Slot { return currentSlot }
|
||||
@@ -141,6 +144,7 @@ func TestSyncNeedsInitialize(t *testing.T) {
|
||||
expectedCol primitives.Epoch
|
||||
name string
|
||||
input SyncNeeds
|
||||
current func() primitives.Slot
|
||||
}{
|
||||
{
|
||||
name: "basic initialization with no flags",
|
||||
@@ -174,13 +178,13 @@ func TestSyncNeedsInitialize(t *testing.T) {
|
||||
{
|
||||
name: "valid oldestSlotFlagPtr (earlier than spec minimum)",
|
||||
blobRetentionFlag: 0,
|
||||
oldestSlotFlagPtr: func() *primitives.Slot {
|
||||
slot := primitives.Slot(10)
|
||||
return &slot
|
||||
}(),
|
||||
oldestSlotFlagPtr: &denebSlot,
|
||||
expectValidOldest: true,
|
||||
expectedBlob: minBlobEpochs,
|
||||
expectedCol: minColEpochs,
|
||||
current: func() primitives.Slot {
|
||||
return fuluSlot + minSlots
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid oldestSlotFlagPtr (later than spec minimum)",
|
||||
@@ -210,6 +214,9 @@ func TestSyncNeedsInitialize(t *testing.T) {
|
||||
{
|
||||
name: "both blob retention flag and oldest slot set",
|
||||
blobRetentionFlag: minBlobEpochs + 5,
|
||||
current: func() primitives.Slot {
|
||||
return fuluSlot + minSlots
|
||||
},
|
||||
oldestSlotFlagPtr: func() *primitives.Slot {
|
||||
slot := primitives.Slot(100)
|
||||
return &slot
|
||||
@@ -232,16 +239,27 @@ func TestSyncNeedsInitialize(t *testing.T) {
|
||||
expectedBlob: 5000,
|
||||
expectedCol: 5000,
|
||||
},
|
||||
{
|
||||
name: "regression for deneb start",
|
||||
blobRetentionFlag: 8212500,
|
||||
expectValidOldest: true,
|
||||
oldestSlotFlagPtr: &denebSlot,
|
||||
current: func() primitives.Slot {
|
||||
return fuluSlot + minSlots
|
||||
},
|
||||
expectedBlob: 8212500,
|
||||
expectedCol: 8212500,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
result, err := NewSyncNeeds(currentFunc, tc.oldestSlotFlagPtr, tc.blobRetentionFlag)
|
||||
if tc.current == nil {
|
||||
tc.current = currentFunc
|
||||
}
|
||||
result, err := NewSyncNeeds(tc.current, tc.oldestSlotFlagPtr, tc.blobRetentionFlag)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check that current, deneb, fulu are set correctly
|
||||
require.Equal(t, currentSlot, result.current())
|
||||
|
||||
// Check retention calculations
|
||||
require.Equal(t, tc.expectedBlob, result.blobRetention)
|
||||
require.Equal(t, tc.expectedCol, result.colRetention)
|
||||
|
||||
@@ -38,6 +38,7 @@ go_library(
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_spf13_afero//:go_default_library",
|
||||
"@org_golang_x_sync//errgroup:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/afero"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -185,73 +186,162 @@ func (dcs *DataColumnStorage) WarmCache() {
|
||||
|
||||
highestStoredEpoch := primitives.Epoch(0)
|
||||
|
||||
// Walk the data column filesystem to warm up the cache.
|
||||
if err := afero.Walk(dcs.fs, ".", func(path string, info os.FileInfo, fileErr error) (err error) {
|
||||
if fileErr != nil {
|
||||
return fileErr
|
||||
}
|
||||
|
||||
// If not a leaf, skip.
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Extract metadata from the file path.
|
||||
fileMetadata, err := extractFileMetadata(path)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error encountered while extracting file metadata")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Open the data column filesystem file.
|
||||
f, err := dcs.fs.Open(path)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error encountered while opening data column filesystem file")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close the file.
|
||||
defer func() {
|
||||
// Overwrite the existing error only if it is nil, since the close error is less important.
|
||||
closeErr := f.Close()
|
||||
if closeErr != nil && err == nil {
|
||||
err = closeErr
|
||||
}
|
||||
}()
|
||||
|
||||
// Read the metadata of the file.
|
||||
metadata, err := dcs.metadata(f)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error encountered while reading metadata from data column filesystem file")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check the indices.
|
||||
indices := metadata.indices.all()
|
||||
if len(indices) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Build the ident.
|
||||
dataColumnsIdent := DataColumnsIdent{Root: fileMetadata.blockRoot, Epoch: fileMetadata.epoch, Indices: indices}
|
||||
|
||||
// Update the highest stored epoch.
|
||||
highestStoredEpoch = max(highestStoredEpoch, fileMetadata.epoch)
|
||||
|
||||
// Set the ident in the cache.
|
||||
if err := dcs.cache.set(dataColumnsIdent); err != nil {
|
||||
log.WithError(err).Error("Error encountered while ensuring data column filesystem cache")
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
log.WithError(err).Error("Error encountered while walking data column filesystem.")
|
||||
// List all period directories
|
||||
periodFileInfos, err := afero.ReadDir(dcs.fs, ".")
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error reading top directory during warm cache")
|
||||
return
|
||||
}
|
||||
|
||||
// Prune the cache and the filesystem.
|
||||
// Iterate through periods
|
||||
for _, periodFileInfo := range periodFileInfos {
|
||||
if !periodFileInfo.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
periodPath := periodFileInfo.Name()
|
||||
|
||||
// List all epoch directories in this period
|
||||
epochFileInfos, err := afero.ReadDir(dcs.fs, periodPath)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("period", periodPath).Error("Error reading period directory during warm cache")
|
||||
continue
|
||||
}
|
||||
|
||||
// Iterate through epochs
|
||||
for _, epochFileInfo := range epochFileInfos {
|
||||
if !epochFileInfo.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
epochPath := path.Join(periodPath, epochFileInfo.Name())
|
||||
|
||||
// List all .sszs files in this epoch
|
||||
files, err := listEpochFiles(dcs.fs, epochPath)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("epoch", epochPath).Error("Error listing epoch files during warm cache")
|
||||
continue
|
||||
}
|
||||
|
||||
if len(files) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Process all files in this epoch in parallel
|
||||
epochHighest, err := dcs.processEpochFiles(files)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("epoch", epochPath).Error("Error processing epoch files during warm cache")
|
||||
}
|
||||
|
||||
highestStoredEpoch = max(highestStoredEpoch, epochHighest)
|
||||
}
|
||||
}
|
||||
|
||||
// Prune the cache and the filesystem
|
||||
dcs.prune()
|
||||
|
||||
log.WithField("elapsed", time.Since(start)).Info("Data column filesystem cache warm-up complete")
|
||||
totalElapsed := time.Since(start)
|
||||
|
||||
// Log summary
|
||||
log.WithField("elapsed", totalElapsed).Info("Data column filesystem cache warm-up complete")
|
||||
}
|
||||
|
||||
// listEpochFiles lists all .sszs files in an epoch directory.
|
||||
func listEpochFiles(fs afero.Fs, epochPath string) ([]string, error) {
|
||||
fileInfos, err := afero.ReadDir(fs, epochPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "read epoch directory")
|
||||
}
|
||||
|
||||
files := make([]string, 0, len(fileInfos))
|
||||
for _, fileInfo := range fileInfos {
|
||||
if fileInfo.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
fileName := fileInfo.Name()
|
||||
if strings.HasSuffix(fileName, "."+dataColumnsFileExtension) {
|
||||
files = append(files, path.Join(epochPath, fileName))
|
||||
}
|
||||
}
|
||||
|
||||
return files, nil
|
||||
}
|
||||
|
||||
// processEpochFiles processes all .sszs files in an epoch directory in parallel.
|
||||
func (dcs *DataColumnStorage) processEpochFiles(files []string) (primitives.Epoch, error) {
|
||||
var (
|
||||
eg errgroup.Group
|
||||
mu sync.Mutex
|
||||
)
|
||||
|
||||
highestEpoch := primitives.Epoch(0)
|
||||
for _, filePath := range files {
|
||||
eg.Go(func() error {
|
||||
epoch, err := dcs.processFile(filePath)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("file", filePath).Error("Error processing file during warm cache")
|
||||
return nil
|
||||
}
|
||||
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
highestEpoch = max(highestEpoch, epoch)
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := eg.Wait(); err != nil {
|
||||
return highestEpoch, err
|
||||
}
|
||||
|
||||
return highestEpoch, nil
|
||||
}
|
||||
|
||||
// processFile processes a single .sszs file.
|
||||
func (dcs *DataColumnStorage) processFile(filePath string) (primitives.Epoch, error) {
|
||||
// Extract metadata from the file path
|
||||
fileMetadata, err := extractFileMetadata(filePath)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "extract file metadata")
|
||||
}
|
||||
|
||||
// Open the file (each goroutine gets its own FD)
|
||||
f, err := dcs.fs.Open(filePath)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "open file")
|
||||
}
|
||||
defer func() {
|
||||
if closeErr := f.Close(); closeErr != nil {
|
||||
log.WithError(closeErr).WithField("file", filePath).Error("Error closing file during warm cache")
|
||||
}
|
||||
}()
|
||||
|
||||
// Read metadata
|
||||
metadata, err := dcs.metadata(f)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "read metadata")
|
||||
}
|
||||
|
||||
// Extract indices
|
||||
indices := metadata.indices.all()
|
||||
if len(indices) == 0 {
|
||||
return fileMetadata.epoch, nil // No indices, skip
|
||||
}
|
||||
|
||||
// Build ident and set in cache (thread-safe)
|
||||
dataColumnsIdent := DataColumnsIdent{
|
||||
Root: fileMetadata.blockRoot,
|
||||
Epoch: fileMetadata.epoch,
|
||||
Indices: indices,
|
||||
}
|
||||
|
||||
if err := dcs.cache.set(dataColumnsIdent); err != nil {
|
||||
return 0, errors.Wrap(err, "cache set")
|
||||
}
|
||||
|
||||
return fileMetadata.epoch, nil
|
||||
}
|
||||
|
||||
// Summary returns the DataColumnStorageSummary.
|
||||
|
||||
@@ -37,6 +37,7 @@ go_library(
|
||||
"//beacon-chain/node/registration:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/blstoexec:go_default_library",
|
||||
"//beacon-chain/operations/execproofs:go_default_library",
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
"//beacon-chain/operations/synccommittee:go_default_library",
|
||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||
|
||||
@@ -40,6 +40,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/node/registration"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/attestations"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/blstoexec"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/execproofs"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/slashings"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/synccommittee"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/voluntaryexits"
|
||||
@@ -102,6 +103,7 @@ type BeaconNode struct {
|
||||
slashingsPool slashings.PoolManager
|
||||
syncCommitteePool synccommittee.Pool
|
||||
blsToExecPool blstoexec.PoolManager
|
||||
execProofsPool execproofs.PoolManager
|
||||
depositCache cache.DepositCache
|
||||
trackedValidatorsCache *cache.TrackedValidatorsCache
|
||||
payloadIDCache *cache.PayloadIDCache
|
||||
@@ -156,6 +158,7 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
|
||||
slashingsPool: slashings.NewPool(),
|
||||
syncCommitteePool: synccommittee.NewPool(),
|
||||
blsToExecPool: blstoexec.NewPool(),
|
||||
execProofsPool: execproofs.NewPool(),
|
||||
trackedValidatorsCache: cache.NewTrackedValidatorsCache(),
|
||||
payloadIDCache: cache.NewPayloadIDCache(),
|
||||
slasherBlockHeadersFeed: new(event.Feed),
|
||||
@@ -737,6 +740,7 @@ func (b *BeaconNode) registerBlockchainService(fc forkchoice.ForkChoicer, gs *st
|
||||
blockchain.WithExitPool(b.exitPool),
|
||||
blockchain.WithSlashingPool(b.slashingsPool),
|
||||
blockchain.WithBLSToExecPool(b.blsToExecPool),
|
||||
blockchain.WithExecProofsPool(b.execProofsPool),
|
||||
blockchain.WithP2PBroadcaster(b.fetchP2P()),
|
||||
blockchain.WithStateNotifier(b),
|
||||
blockchain.WithAttestationService(attService),
|
||||
@@ -752,6 +756,7 @@ func (b *BeaconNode) registerBlockchainService(fc forkchoice.ForkChoicer, gs *st
|
||||
blockchain.WithSyncChecker(b.syncChecker),
|
||||
blockchain.WithSlasherEnabled(b.slasherEnabled),
|
||||
blockchain.WithLightClientStore(b.lcStore),
|
||||
blockchain.WithOperationNotifier(b),
|
||||
)
|
||||
|
||||
blockchainService, err := blockchain.NewService(b.ctx, opts...)
|
||||
@@ -827,6 +832,7 @@ func (b *BeaconNode) registerSyncService(initialSyncComplete chan struct{}, bFil
|
||||
regularsync.WithSlashingPool(b.slashingsPool),
|
||||
regularsync.WithSyncCommsPool(b.syncCommitteePool),
|
||||
regularsync.WithBlsToExecPool(b.blsToExecPool),
|
||||
regularsync.WithExecProofPool(b.execProofsPool),
|
||||
regularsync.WithStateGen(b.stateGen),
|
||||
regularsync.WithSlasherAttestationsFeed(b.slasherAttestationsFeed),
|
||||
regularsync.WithSlasherBlockHeadersFeed(b.slasherBlockHeadersFeed),
|
||||
|
||||
16
beacon-chain/operations/execproofs/BUILD.bazel
Normal file
16
beacon-chain/operations/execproofs/BUILD.bazel
Normal file
@@ -0,0 +1,16 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["pool.go"],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/operations/execproofs",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
],
|
||||
)
|
||||
174
beacon-chain/operations/execproofs/pool.go
Normal file
174
beacon-chain/operations/execproofs/pool.go
Normal file
@@ -0,0 +1,174 @@
|
||||
package execproofs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
// ProofKey uniquely identifies an execution proof by block root and proof type.
|
||||
type ProofKey struct {
|
||||
Root [fieldparams.RootLength]byte
|
||||
ProofId primitives.ExecutionProofId
|
||||
}
|
||||
|
||||
// String returns a string representation for logging.
|
||||
func (k ProofKey) String() string {
|
||||
return fmt.Sprintf("root=%#x,proofId=%d", k.Root, k.ProofId)
|
||||
}
|
||||
|
||||
var (
|
||||
execProofInPoolTotal = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "exec_proof_pool_total",
|
||||
Help: "The number of execution proofs in the operation pool.",
|
||||
})
|
||||
)
|
||||
|
||||
var _ PoolManager = (*ExecProofPool)(nil)
|
||||
|
||||
// PoolManager maintains execution proofs received via gossip.
|
||||
// These proofs are used for data availability checks when importing blocks.
|
||||
// Lightweight verifier nodes need a minimum number of proofs from different zkVM types
|
||||
// to verify block execution correctness.
|
||||
type PoolManager interface {
|
||||
// Insert inserts a proof into the pool.
|
||||
// If a proof with the same block root and proof ID already exists, it is not added again.
|
||||
Insert(executionProof *ethpb.ExecutionProof)
|
||||
|
||||
// Get returns a copy of all proofs for a specific block root
|
||||
Get(blockRoot [fieldparams.RootLength]byte) []*ethpb.ExecutionProof
|
||||
|
||||
// Ids returns the list of (unique) proof types available for a specific block root
|
||||
Ids(blockRoot [fieldparams.RootLength]byte) []primitives.ExecutionProofId
|
||||
|
||||
// Count counts the number of proofs for a specific block root
|
||||
Count(blockRoot [fieldparams.RootLength]byte) uint64
|
||||
|
||||
// Exists checks if a proof exists for the given block root and proof ID
|
||||
Exists(blockRoot [fieldparams.RootLength]byte, proofId primitives.ExecutionProofId) bool
|
||||
|
||||
// PruneUpTo removes proofs older than the target slot
|
||||
PruneUpTo(targetSlot primitives.Slot) int
|
||||
}
|
||||
|
||||
// ExecProofPool is a concrete implementation of type ExecProofPoolManager.
|
||||
type ExecProofPool struct {
|
||||
lock sync.RWMutex
|
||||
m map[[fieldparams.RootLength]byte]map[primitives.ExecutionProofId]*ethpb.ExecutionProof
|
||||
}
|
||||
|
||||
// NewPool returns an initialized pool.
|
||||
func NewPool() *ExecProofPool {
|
||||
return &ExecProofPool{
|
||||
m: make(map[[fieldparams.RootLength]byte]map[primitives.ExecutionProofId]*ethpb.ExecutionProof),
|
||||
}
|
||||
}
|
||||
|
||||
// Insert inserts a proof into the pool.
|
||||
// If a proof with the same block root and proof ID already exists, it is not added again.
|
||||
func (p *ExecProofPool) Insert(proof *ethpb.ExecutionProof) {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
blockRoot := bytesutil.ToBytes32(proof.BlockRoot)
|
||||
|
||||
// Create the inner map if it doesn't exist
|
||||
if p.m[blockRoot] == nil {
|
||||
p.m[blockRoot] = make(map[primitives.ExecutionProofId]*ethpb.ExecutionProof)
|
||||
}
|
||||
|
||||
// Check if proof already exists
|
||||
if _, exists := p.m[blockRoot][proof.ProofId]; exists {
|
||||
return
|
||||
}
|
||||
|
||||
// Insert new proof
|
||||
p.m[blockRoot][proof.ProofId] = proof
|
||||
execProofInPoolTotal.Inc()
|
||||
}
|
||||
|
||||
// Get returns a copy of all proofs for a specific block root
|
||||
func (p *ExecProofPool) Get(blockRoot [fieldparams.RootLength]byte) []*ethpb.ExecutionProof {
|
||||
p.lock.RLock()
|
||||
defer p.lock.RUnlock()
|
||||
|
||||
proofsByType, exists := p.m[blockRoot]
|
||||
if !exists {
|
||||
return nil
|
||||
}
|
||||
|
||||
result := make([]*ethpb.ExecutionProof, 0, len(proofsByType))
|
||||
for _, proof := range proofsByType {
|
||||
result = append(result, proof.Copy())
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (p *ExecProofPool) Ids(blockRoot [fieldparams.RootLength]byte) []primitives.ExecutionProofId {
|
||||
p.lock.RLock()
|
||||
defer p.lock.RUnlock()
|
||||
|
||||
proofById, exists := p.m[blockRoot]
|
||||
if !exists {
|
||||
return nil
|
||||
}
|
||||
|
||||
ids := make([]primitives.ExecutionProofId, 0, len(proofById))
|
||||
for id := range proofById {
|
||||
ids = append(ids, id)
|
||||
}
|
||||
|
||||
return ids
|
||||
}
|
||||
|
||||
// Count counts the number of proofs for a specific block root
|
||||
func (p *ExecProofPool) Count(blockRoot [fieldparams.RootLength]byte) uint64 {
|
||||
p.lock.RLock()
|
||||
defer p.lock.RUnlock()
|
||||
|
||||
return uint64(len(p.m[blockRoot]))
|
||||
}
|
||||
|
||||
// Exists checks if a proof exists for the given block root and proof ID
|
||||
func (p *ExecProofPool) Exists(blockRoot [fieldparams.RootLength]byte, proofId primitives.ExecutionProofId) bool {
|
||||
p.lock.RLock()
|
||||
defer p.lock.RUnlock()
|
||||
|
||||
proofsByType, exists := p.m[blockRoot]
|
||||
if !exists {
|
||||
return false
|
||||
}
|
||||
|
||||
_, exists = proofsByType[proofId]
|
||||
return exists
|
||||
}
|
||||
|
||||
// PruneUpTo removes proofs older than the given slot
|
||||
func (p *ExecProofPool) PruneUpTo(targetSlot primitives.Slot) int {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
pruned := 0
|
||||
for blockRoot, proofsByType := range p.m {
|
||||
for proofId, proof := range proofsByType {
|
||||
if proof.Slot < targetSlot {
|
||||
delete(proofsByType, proofId)
|
||||
execProofInPoolTotal.Dec()
|
||||
pruned++
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up empty inner maps
|
||||
if len(proofsByType) == 0 {
|
||||
delete(p.m, blockRoot)
|
||||
}
|
||||
}
|
||||
|
||||
return pruned
|
||||
}
|
||||
@@ -165,6 +165,7 @@ go_test(
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/state/stategen/mock:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
|
||||
@@ -589,6 +589,11 @@ func (s *Service) createLocalNode(
|
||||
localNode.Set(quicEntry)
|
||||
}
|
||||
|
||||
if features.Get().EnableZkvm {
|
||||
zkvmKeyEntry := enr.WithEntry(zkvmEnabledKeyEnrKey, true)
|
||||
localNode.Set(zkvmKeyEntry)
|
||||
}
|
||||
|
||||
localNode.SetFallbackIP(ipAddr)
|
||||
localNode.SetFallbackUDP(udpPort)
|
||||
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers/scorers"
|
||||
testp2p "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/wrapper"
|
||||
leakybucket "github.com/OffchainLabs/prysm/v7/container/leaky-bucket"
|
||||
@@ -243,12 +244,19 @@ func TestCreateLocalNode(t *testing.T) {
|
||||
name string
|
||||
cfg *Config
|
||||
expectedError bool
|
||||
zkvmEnabled bool
|
||||
}{
|
||||
{
|
||||
name: "valid config",
|
||||
cfg: &Config{},
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "valid config with zkVM enabled",
|
||||
cfg: &Config{},
|
||||
expectedError: false,
|
||||
zkvmEnabled: true,
|
||||
},
|
||||
{
|
||||
name: "invalid host address",
|
||||
cfg: &Config{HostAddress: "invalid"},
|
||||
@@ -273,6 +281,15 @@ func TestCreateLocalNode(t *testing.T) {
|
||||
|
||||
for _, tt := range testCases {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if tt.zkvmEnabled {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnableZkvm: true,
|
||||
})
|
||||
t.Cleanup(func() {
|
||||
resetCfg()
|
||||
})
|
||||
}
|
||||
|
||||
// Define ports. Use unique ports since this test validates ENR content.
|
||||
const (
|
||||
udpPort = 3100
|
||||
@@ -348,6 +365,14 @@ func TestCreateLocalNode(t *testing.T) {
|
||||
custodyGroupCount := new(uint64)
|
||||
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(params.BeaconNetworkConfig().CustodyGroupCountKey, custodyGroupCount)))
|
||||
require.Equal(t, custodyRequirement, *custodyGroupCount)
|
||||
|
||||
// Check zkVM enabled key if applicable.
|
||||
if tt.zkvmEnabled {
|
||||
zkvmEnabled := new(bool)
|
||||
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(params.BeaconNetworkConfig().ZkvmEnabledKey, zkvmEnabled)))
|
||||
require.Equal(t, features.Get().EnableZkvm, *zkvmEnabled)
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -52,6 +52,9 @@ const (
|
||||
// lightClientFinalityUpdateWeight specifies the scoring weight that we apply to
|
||||
// our light client finality update topic.
|
||||
lightClientFinalityUpdateWeight = 0.05
|
||||
// executionProofWeight specifies the scoring weight that we apply to
|
||||
// our execution proof topic.
|
||||
executionProofWeight = 0.05
|
||||
|
||||
// maxInMeshScore describes the max score a peer can attain from being in the mesh.
|
||||
maxInMeshScore = 10
|
||||
@@ -145,6 +148,8 @@ func (s *Service) topicScoreParams(topic string) (*pubsub.TopicScoreParams, erro
|
||||
return defaultLightClientOptimisticUpdateTopicParams(), nil
|
||||
case strings.Contains(topic, GossipLightClientFinalityUpdateMessage):
|
||||
return defaultLightClientFinalityUpdateTopicParams(), nil
|
||||
case strings.Contains(topic, GossipExecutionProofMessage):
|
||||
return defaultExecutionProofTopicParams(), nil
|
||||
default:
|
||||
return nil, errors.Errorf("unrecognized topic provided for parameter registration: %s", topic)
|
||||
}
|
||||
@@ -510,6 +515,28 @@ func defaultBlsToExecutionChangeTopicParams() *pubsub.TopicScoreParams {
|
||||
}
|
||||
}
|
||||
|
||||
func defaultExecutionProofTopicParams() *pubsub.TopicScoreParams {
|
||||
return &pubsub.TopicScoreParams{
|
||||
TopicWeight: executionProofWeight,
|
||||
TimeInMeshWeight: maxInMeshScore / inMeshCap(),
|
||||
TimeInMeshQuantum: inMeshTime(),
|
||||
TimeInMeshCap: inMeshCap(),
|
||||
FirstMessageDeliveriesWeight: 2,
|
||||
FirstMessageDeliveriesDecay: scoreDecay(oneHundredEpochs),
|
||||
FirstMessageDeliveriesCap: 5,
|
||||
MeshMessageDeliveriesWeight: 0,
|
||||
MeshMessageDeliveriesDecay: 0,
|
||||
MeshMessageDeliveriesCap: 0,
|
||||
MeshMessageDeliveriesThreshold: 0,
|
||||
MeshMessageDeliveriesWindow: 0,
|
||||
MeshMessageDeliveriesActivation: 0,
|
||||
MeshFailurePenaltyWeight: 0,
|
||||
MeshFailurePenaltyDecay: 0,
|
||||
InvalidMessageDeliveriesWeight: -2000,
|
||||
InvalidMessageDeliveriesDecay: scoreDecay(invalidDecayPeriod),
|
||||
}
|
||||
}
|
||||
|
||||
func defaultLightClientOptimisticUpdateTopicParams() *pubsub.TopicScoreParams {
|
||||
return &pubsub.TopicScoreParams{
|
||||
TopicWeight: lightClientOptimisticUpdateWeight,
|
||||
|
||||
@@ -25,6 +25,7 @@ var gossipTopicMappings = map[string]func() proto.Message{
|
||||
LightClientOptimisticUpdateTopicFormat: func() proto.Message { return ðpb.LightClientOptimisticUpdateAltair{} },
|
||||
LightClientFinalityUpdateTopicFormat: func() proto.Message { return ðpb.LightClientFinalityUpdateAltair{} },
|
||||
DataColumnSubnetTopicFormat: func() proto.Message { return ðpb.DataColumnSidecar{} },
|
||||
ExecutionProofSubnetTopicFormat: func() proto.Message { return ðpb.ExecutionProof{} },
|
||||
}
|
||||
|
||||
// GossipTopicMappings is a function to return the assigned data type
|
||||
|
||||
@@ -602,6 +602,33 @@ func (p *Status) All() []peer.ID {
|
||||
return pids
|
||||
}
|
||||
|
||||
// ZkvmEnabledPeers returns all connected peers that have zkvm enabled in their ENR.
|
||||
func (p *Status) ZkvmEnabledPeers() []peer.ID {
|
||||
p.store.RLock()
|
||||
defer p.store.RUnlock()
|
||||
|
||||
peers := make([]peer.ID, 0)
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
if peerData.ConnState != Connected {
|
||||
continue
|
||||
}
|
||||
if peerData.Enr == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
var enabled bool
|
||||
entry := enr.WithEntry(params.BeaconNetworkConfig().ZkvmEnabledKey, &enabled)
|
||||
if err := peerData.Enr.Load(entry); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if enabled {
|
||||
peers = append(peers, pid)
|
||||
}
|
||||
}
|
||||
return peers
|
||||
}
|
||||
|
||||
// Prune clears out and removes outdated and disconnected peers.
|
||||
func (p *Status) Prune() {
|
||||
p.store.Lock()
|
||||
|
||||
@@ -1341,3 +1341,75 @@ func createPeer(t *testing.T, p *peers.Status, addr ma.Multiaddr,
|
||||
p.SetConnectionState(id, state)
|
||||
return id
|
||||
}
|
||||
|
||||
func TestZkvmEnabledPeers(t *testing.T) {
|
||||
p := peers.NewStatus(t.Context(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: 1,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
// Create peer 1: Connected, zkVM enabled
|
||||
pid1 := addPeer(t, p, peers.Connected)
|
||||
record1 := new(enr.Record)
|
||||
zkvmEnabled := true
|
||||
record1.Set(enr.WithEntry(params.BeaconNetworkConfig().ZkvmEnabledKey, &zkvmEnabled))
|
||||
p.Add(record1, pid1, nil, network.DirOutbound)
|
||||
p.SetConnectionState(pid1, peers.Connected)
|
||||
|
||||
// Create peer 2: Connected, zkVM disabled
|
||||
pid2 := addPeer(t, p, peers.Connected)
|
||||
record2 := new(enr.Record)
|
||||
zkvmDisabled := false
|
||||
record2.Set(enr.WithEntry(params.BeaconNetworkConfig().ZkvmEnabledKey, &zkvmDisabled))
|
||||
p.Add(record2, pid2, nil, network.DirOutbound)
|
||||
p.SetConnectionState(pid2, peers.Connected)
|
||||
|
||||
// Create peer 3: Connected, zkVM enabled
|
||||
pid3 := addPeer(t, p, peers.Connected)
|
||||
record3 := new(enr.Record)
|
||||
record3.Set(enr.WithEntry(params.BeaconNetworkConfig().ZkvmEnabledKey, &zkvmEnabled))
|
||||
p.Add(record3, pid3, nil, network.DirOutbound)
|
||||
p.SetConnectionState(pid3, peers.Connected)
|
||||
|
||||
// Create peer 4: Disconnected, zkVM enabled (should not be included)
|
||||
pid4 := addPeer(t, p, peers.Disconnected)
|
||||
record4 := new(enr.Record)
|
||||
record4.Set(enr.WithEntry(params.BeaconNetworkConfig().ZkvmEnabledKey, &zkvmEnabled))
|
||||
p.Add(record4, pid4, nil, network.DirOutbound)
|
||||
p.SetConnectionState(pid4, peers.Disconnected)
|
||||
|
||||
// Create peer 5: Connected, no ENR (should not be included)
|
||||
pid5 := addPeer(t, p, peers.Connected)
|
||||
p.Add(nil, pid5, nil, network.DirOutbound)
|
||||
p.SetConnectionState(pid5, peers.Connected)
|
||||
|
||||
// Create peer 6: Connected, no zkVM key in ENR (should not be included)
|
||||
pid6 := addPeer(t, p, peers.Connected)
|
||||
record6 := new(enr.Record)
|
||||
record6.Set(enr.WithEntry("other_key", "other_value"))
|
||||
p.Add(record6, pid6, nil, network.DirOutbound)
|
||||
p.SetConnectionState(pid6, peers.Connected)
|
||||
|
||||
// Get zkVM enabled peers
|
||||
zkvmPeers := p.ZkvmEnabledPeers()
|
||||
|
||||
// Should return only pid1 and pid3 (connected peers with zkVM enabled)
|
||||
assert.Equal(t, 2, len(zkvmPeers), "Expected 2 zkVM enabled peers")
|
||||
|
||||
// Verify the returned peers are correct
|
||||
zkvmPeerMap := make(map[peer.ID]bool)
|
||||
for _, pid := range zkvmPeers {
|
||||
zkvmPeerMap[pid] = true
|
||||
}
|
||||
|
||||
assert.Equal(t, true, zkvmPeerMap[pid1], "pid1 should be in zkVM enabled peers")
|
||||
assert.Equal(t, true, zkvmPeerMap[pid3], "pid3 should be in zkVM enabled peers")
|
||||
assert.Equal(t, false, zkvmPeerMap[pid2], "pid2 should not be in zkVM enabled peers (disabled)")
|
||||
assert.Equal(t, false, zkvmPeerMap[pid4], "pid4 should not be in zkVM enabled peers (disconnected)")
|
||||
assert.Equal(t, false, zkvmPeerMap[pid5], "pid5 should not be in zkVM enabled peers (no ENR)")
|
||||
assert.Equal(t, false, zkvmPeerMap[pid6], "pid6 should not be in zkVM enabled peers (no zkVM key)")
|
||||
}
|
||||
|
||||
@@ -67,6 +67,9 @@ const (
|
||||
|
||||
// DataColumnSidecarsByRangeName is the name for the DataColumnSidecarsByRange v1 message topic.
|
||||
DataColumnSidecarsByRangeName = "/data_column_sidecars_by_range"
|
||||
|
||||
// ExecutionProofsByRootName is the name for the ExecutionProofsByRoot v1 message topic.
|
||||
ExecutionProofsByRootName = "/execution_proofs_by_root"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -106,6 +109,9 @@ const (
|
||||
// RPCDataColumnSidecarsByRangeTopicV1 is a topic for requesting data column sidecars by their slot.
|
||||
// /eth2/beacon_chain/req/data_column_sidecars_by_range/1 - New in Fulu.
|
||||
RPCDataColumnSidecarsByRangeTopicV1 = protocolPrefix + DataColumnSidecarsByRangeName + SchemaVersionV1
|
||||
// RPCExecutionProofsByRootTopicV1 is a topic for requesting execution proofs by their block root.
|
||||
// /eth2/beacon_chain/req/execution_proofs_by_root/1 - New in Fulu.
|
||||
RPCExecutionProofsByRootTopicV1 = protocolPrefix + ExecutionProofsByRootName + SchemaVersionV1
|
||||
|
||||
// V2 RPC Topics
|
||||
// RPCStatusTopicV2 defines the v1 topic for the status rpc method.
|
||||
@@ -170,6 +176,9 @@ var (
|
||||
|
||||
// DataColumnSidecarsByRoot v1 Message
|
||||
RPCDataColumnSidecarsByRootTopicV1: p2ptypes.DataColumnsByRootIdentifiers{},
|
||||
|
||||
// ExecutionProofsByRoot v1 Message
|
||||
RPCExecutionProofsByRootTopicV1: new(pb.ExecutionProofsByRootRequest),
|
||||
}
|
||||
|
||||
// Maps all registered protocol prefixes.
|
||||
@@ -193,6 +202,7 @@ var (
|
||||
LightClientOptimisticUpdateName: true,
|
||||
DataColumnSidecarsByRootName: true,
|
||||
DataColumnSidecarsByRangeName: true,
|
||||
ExecutionProofsByRootName: true,
|
||||
}
|
||||
|
||||
// Maps all the RPC messages which are to updated in altair.
|
||||
|
||||
@@ -36,6 +36,7 @@ var (
|
||||
attSubnetEnrKey = params.BeaconNetworkConfig().AttSubnetKey
|
||||
syncCommsSubnetEnrKey = params.BeaconNetworkConfig().SyncCommsSubnetKey
|
||||
custodyGroupCountEnrKey = params.BeaconNetworkConfig().CustodyGroupCountKey
|
||||
zkvmEnabledKeyEnrKey = params.BeaconNetworkConfig().ZkvmEnabledKey
|
||||
)
|
||||
|
||||
// The value used with the subnet, in order
|
||||
|
||||
@@ -46,6 +46,8 @@ const (
|
||||
GossipLightClientOptimisticUpdateMessage = "light_client_optimistic_update"
|
||||
// GossipDataColumnSidecarMessage is the name for the data column sidecar message type.
|
||||
GossipDataColumnSidecarMessage = "data_column_sidecar"
|
||||
// GossipExecutionProofMessage is the name for the execution proof message type.
|
||||
GossipExecutionProofMessage = "execution_proof"
|
||||
|
||||
// Topic Formats
|
||||
//
|
||||
@@ -75,6 +77,8 @@ const (
|
||||
LightClientOptimisticUpdateTopicFormat = GossipProtocolAndDigest + GossipLightClientOptimisticUpdateMessage
|
||||
// DataColumnSubnetTopicFormat is the topic format for the data column subnet.
|
||||
DataColumnSubnetTopicFormat = GossipProtocolAndDigest + GossipDataColumnSidecarMessage + "_%d"
|
||||
// ExecutionProofSubnetTopicFormat is the topic format for the execution proof subnet.
|
||||
ExecutionProofSubnetTopicFormat = GossipProtocolAndDigest + GossipExecutionProofMessage // + "_%d" (PoC only have one global topic)
|
||||
)
|
||||
|
||||
// topic is a struct representing a single gossipsub topic.
|
||||
@@ -158,6 +162,7 @@ func (s *Service) allTopics() []topic {
|
||||
newTopic(altair, future, empty, GossipLightClientOptimisticUpdateMessage),
|
||||
newTopic(altair, future, empty, GossipLightClientFinalityUpdateMessage),
|
||||
newTopic(capella, future, empty, GossipBlsToExecutionChangeMessage),
|
||||
newTopic(fulu, future, empty, GossipExecutionProofMessage),
|
||||
}
|
||||
last := params.GetNetworkScheduleEntry(genesis)
|
||||
schedule := []params.NetworkScheduleEntry{last}
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/api"
|
||||
"github.com/OffchainLabs/prysm/v7/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
||||
coreblocks "github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
corehelpers "github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/filters"
|
||||
@@ -957,6 +958,13 @@ func (s *Server) validateConsensus(ctx context.Context, b *eth.GenericSignedBeac
|
||||
}
|
||||
}
|
||||
}
|
||||
blockRoot, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not hash block")
|
||||
}
|
||||
if err := coreblocks.VerifyBlockSignatureUsingCurrentFork(parentState, blk, blockRoot); err != nil {
|
||||
return errors.Wrap(err, "could not verify block signature")
|
||||
}
|
||||
_, err = transition.ExecuteStateTransition(ctx, parentState, blk)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not execute state transition")
|
||||
|
||||
@@ -169,6 +169,11 @@ func TestGetSpec(t *testing.T) {
|
||||
config.BlobsidecarSubnetCountElectra = 102
|
||||
config.SyncMessageDueBPS = 103
|
||||
|
||||
// EIP-8025
|
||||
config.MaxProofDataBytes = 200
|
||||
config.MinEpochsForExecutionProofRequests = 201
|
||||
config.MinProofsRequired = 202
|
||||
|
||||
var dbp [4]byte
|
||||
copy(dbp[:], []byte{'0', '0', '0', '1'})
|
||||
config.DomainBeaconProposer = dbp
|
||||
@@ -205,7 +210,7 @@ func TestGetSpec(t *testing.T) {
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp))
|
||||
data, ok := resp.Data.(map[string]any)
|
||||
require.Equal(t, true, ok)
|
||||
assert.Equal(t, 175, len(data))
|
||||
assert.Equal(t, 178, len(data))
|
||||
for k, v := range data {
|
||||
t.Run(k, func(t *testing.T) {
|
||||
switch k {
|
||||
@@ -577,6 +582,12 @@ func TestGetSpec(t *testing.T) {
|
||||
assert.Equal(t, "102", v)
|
||||
case "SYNC_MESSAGE_DUE_BPS":
|
||||
assert.Equal(t, "103", v)
|
||||
case "MAX_PROOF_DATA_BYTES":
|
||||
assert.Equal(t, "200", v)
|
||||
case "MIN_EPOCHS_FOR_EXECUTION_PROOF_REQUESTS":
|
||||
assert.Equal(t, "201", v)
|
||||
case "MIN_PROOFS_REQUIRED":
|
||||
assert.Equal(t, "202", v)
|
||||
case "BLOB_SCHEDULE":
|
||||
blobSchedule, ok := v.([]any)
|
||||
assert.Equal(t, true, ok)
|
||||
|
||||
@@ -14,6 +14,7 @@ go_library(
|
||||
"decode_pubsub.go",
|
||||
"doc.go",
|
||||
"error.go",
|
||||
"exec_proofs.go",
|
||||
"fork_watcher.go",
|
||||
"fuzz_exports.go", # keep
|
||||
"log.go",
|
||||
@@ -31,6 +32,7 @@ go_library(
|
||||
"rpc_chunked_response.go",
|
||||
"rpc_data_column_sidecars_by_range.go",
|
||||
"rpc_data_column_sidecars_by_root.go",
|
||||
"rpc_execution_proofs_by_root_topic.go",
|
||||
"rpc_goodbye.go",
|
||||
"rpc_light_client.go",
|
||||
"rpc_metadata.go",
|
||||
@@ -46,6 +48,7 @@ go_library(
|
||||
"subscriber_blob_sidecar.go",
|
||||
"subscriber_bls_to_execution_change.go",
|
||||
"subscriber_data_column_sidecar.go",
|
||||
"subscriber_execution_proofs.go",
|
||||
"subscriber_handlers.go",
|
||||
"subscriber_sync_committee_message.go",
|
||||
"subscriber_sync_contribution_proof.go",
|
||||
@@ -57,6 +60,7 @@ go_library(
|
||||
"validate_blob.go",
|
||||
"validate_bls_to_execution_change.go",
|
||||
"validate_data_column.go",
|
||||
"validate_execution_proof.go",
|
||||
"validate_light_client.go",
|
||||
"validate_proposer_slashing.go",
|
||||
"validate_sync_committee_message.go",
|
||||
@@ -93,6 +97,7 @@ go_library(
|
||||
"//beacon-chain/light-client:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/blstoexec:go_default_library",
|
||||
"//beacon-chain/operations/execproofs:go_default_library",
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
"//beacon-chain/operations/synccommittee:go_default_library",
|
||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||
@@ -187,6 +192,7 @@ go_test(
|
||||
"rpc_blob_sidecars_by_root_test.go",
|
||||
"rpc_data_column_sidecars_by_range_test.go",
|
||||
"rpc_data_column_sidecars_by_root_test.go",
|
||||
"rpc_execution_proofs_by_root_topic_test.go",
|
||||
"rpc_goodbye_test.go",
|
||||
"rpc_handler_test.go",
|
||||
"rpc_light_client_test.go",
|
||||
@@ -211,6 +217,7 @@ go_test(
|
||||
"validate_blob_test.go",
|
||||
"validate_bls_to_execution_change_test.go",
|
||||
"validate_data_column_test.go",
|
||||
"validate_execution_proof_test.go",
|
||||
"validate_light_client_test.go",
|
||||
"validate_proposer_slashing_test.go",
|
||||
"validate_sync_committee_message_test.go",
|
||||
@@ -244,6 +251,7 @@ go_test(
|
||||
"//beacon-chain/light-client:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/blstoexec:go_default_library",
|
||||
"//beacon-chain/operations/execproofs:go_default_library",
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
"//beacon-chain/operations/slashings/mock:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
|
||||
65
beacon-chain/sync/exec_proofs.go
Normal file
65
beacon-chain/sync/exec_proofs.go
Normal file
@@ -0,0 +1,65 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"errors"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// generateExecProof returns a dummy execution proof after the specified delay.
|
||||
func generateExecProof(roBlock blocks.ROBlock, proofID primitives.ExecutionProofId, delay time.Duration) (*ethpb.ExecutionProof, error) {
|
||||
// Simulate proof generation work
|
||||
time.Sleep(delay)
|
||||
|
||||
// Create a dummy proof with some deterministic data
|
||||
block := roBlock.Block()
|
||||
if block == nil {
|
||||
return nil, errors.New("nil block")
|
||||
}
|
||||
|
||||
body := block.Body()
|
||||
if body == nil {
|
||||
return nil, errors.New("nil block body")
|
||||
}
|
||||
|
||||
executionData, err := body.Execution()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("execution: %w", err)
|
||||
}
|
||||
|
||||
if executionData == nil {
|
||||
return nil, errors.New("nil execution data")
|
||||
}
|
||||
|
||||
hash, err := executionData.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("hash tree root: %w", err)
|
||||
}
|
||||
|
||||
proofData := []byte{
|
||||
0xFF, // Magic byte for dummy proof
|
||||
byte(proofID),
|
||||
// Include some payload hash bytes
|
||||
hash[0],
|
||||
hash[1],
|
||||
hash[2],
|
||||
hash[3],
|
||||
}
|
||||
|
||||
blockRoot := roBlock.Root()
|
||||
|
||||
proof := ðpb.ExecutionProof{
|
||||
ProofId: proofID,
|
||||
Slot: block.Slot(),
|
||||
BlockHash: hash[:],
|
||||
BlockRoot: blockRoot[:],
|
||||
ProofData: proofData,
|
||||
}
|
||||
|
||||
return proof, nil
|
||||
}
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
lightClient "github.com/OffchainLabs/prysm/v7/beacon-chain/light-client"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/attestations"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/blstoexec"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/execproofs"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/slashings"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/synccommittee"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/voluntaryexits"
|
||||
@@ -88,6 +89,13 @@ func WithBlsToExecPool(blsToExecPool blstoexec.PoolManager) Option {
|
||||
}
|
||||
}
|
||||
|
||||
func WithExecProofPool(execProofPool execproofs.PoolManager) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.execProofPool = execProofPool
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithChainService(chain blockchainService) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.chain = chain
|
||||
|
||||
@@ -259,6 +259,10 @@ func (s *Service) processBlock(ctx context.Context, b interfaces.ReadOnlySignedB
|
||||
return errors.Wrap(err, "request and save missing data column sidecars")
|
||||
}
|
||||
|
||||
if err := s.requestAndSaveMissingExecutionProofs([]blocks.ROBlock{roBlock}); err != nil {
|
||||
return errors.Wrap(err, "request and save missing execution proofs")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -100,6 +100,10 @@ func newRateLimiter(p2pProvider p2p.P2P) *limiter {
|
||||
topicMap[addEncoding(p2p.RPCDataColumnSidecarsByRootTopicV1)] = dataColumnSidecars
|
||||
// DataColumnSidecarsByRangeV1
|
||||
topicMap[addEncoding(p2p.RPCDataColumnSidecarsByRangeTopicV1)] = dataColumnSidecars
|
||||
|
||||
executionProofs := leakybucket.NewCollector(1, defaultBurstLimit, leakyBucketPeriod, false /* deleteEmptyBuckets */);
|
||||
// ExecutionProofsByRootV1
|
||||
topicMap[addEncoding(p2p.RPCExecutionProofsByRootTopicV1)] = executionProofs
|
||||
|
||||
// General topic for all rpc requests.
|
||||
topicMap[rpcLimiterTopic] = leakybucket.NewCollector(5, defaultBurstLimit*2, leakyBucketPeriod, false /* deleteEmptyBuckets */)
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
|
||||
func TestNewRateLimiter(t *testing.T) {
|
||||
rlimiter := newRateLimiter(mockp2p.NewTestP2P(t))
|
||||
assert.Equal(t, len(rlimiter.limiterMap), 20, "correct number of topics not registered")
|
||||
assert.Equal(t, len(rlimiter.limiterMap), 21, "correct number of topics not registered")
|
||||
}
|
||||
|
||||
func TestNewRateLimiter_FreeCorrectly(t *testing.T) {
|
||||
|
||||
@@ -51,6 +51,7 @@ func (s *Service) rpcHandlerByTopicFromFork(forkIndex int) (map[string]rpcHandle
|
||||
p2p.RPCBlobSidecarsByRangeTopicV1: s.blobSidecarsByRangeRPCHandler, // Modified in Fulu
|
||||
p2p.RPCDataColumnSidecarsByRootTopicV1: s.dataColumnSidecarByRootRPCHandler, // Added in Fulu
|
||||
p2p.RPCDataColumnSidecarsByRangeTopicV1: s.dataColumnSidecarsByRangeRPCHandler, // Added in Fulu
|
||||
p2p.RPCExecutionProofsByRootTopicV1: s.executionProofsByRootRPCHandler, // Added in Fulu
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -11,11 +11,13 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/types"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/sync/verify"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
libp2pcore "github.com/libp2p/go-libp2p/core"
|
||||
@@ -87,9 +89,77 @@ func (s *Service) sendBeaconBlocksRequest(ctx context.Context, requests *types.B
|
||||
return errors.Wrap(err, "request and save missing data columns")
|
||||
}
|
||||
|
||||
if err := s.requestAndSaveMissingExecutionProofs(postFuluBlocks); err != nil {
|
||||
return errors.Wrap(err, "request and save missing execution proofs")
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *Service) requestAndSaveMissingExecutionProofs(blks []blocks.ROBlock) error {
|
||||
if len(blks) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO: Parallelize requests for multiple blocks.
|
||||
for _, blk := range blks {
|
||||
if err := s.sendAndSaveExecutionProofs(s.ctx, blk); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) sendAndSaveExecutionProofs(ctx context.Context, block blocks.ROBlock) error {
|
||||
if !features.Get().EnableZkvm {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check proof retention period.
|
||||
blockEpoch := slots.ToEpoch(block.Block().Slot())
|
||||
currentEpoch := slots.ToEpoch(s.cfg.clock.CurrentSlot())
|
||||
if !params.WithinExecutionProofPeriod(blockEpoch, currentEpoch) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check how many proofs are needed with Execution Proof Pool.
|
||||
storedIds := s.cfg.execProofPool.Ids(block.Root())
|
||||
count := uint64(len(storedIds))
|
||||
if count >= params.BeaconConfig().MinProofsRequired {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Construct request
|
||||
blockRoot := block.Root()
|
||||
req := ðpb.ExecutionProofsByRootRequest{
|
||||
BlockRoot: blockRoot[:],
|
||||
CountNeeded: params.BeaconConfig().MinProofsRequired - count,
|
||||
AlreadyHave: storedIds,
|
||||
}
|
||||
|
||||
// Call SendExecutionProofByRootRequest
|
||||
zkvmEnabledPeers := s.cfg.p2p.Peers().ZkvmEnabledPeers()
|
||||
if len(zkvmEnabledPeers) == 0 {
|
||||
return fmt.Errorf("no zkVM enabled peers available to request execution proofs")
|
||||
}
|
||||
|
||||
// TODO: For simplicity, just pick the first peer for now.
|
||||
// In the future, we can implement better peer selection logic.
|
||||
pid := zkvmEnabledPeers[0]
|
||||
proofs, err := SendExecutionProofsByRootRequest(ctx, s.cfg.clock, s.cfg.p2p, pid, req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("send execution proofs by root request: %w", err)
|
||||
}
|
||||
|
||||
// Insert ExecProofPool
|
||||
// TODO: Implement multiple proof insertion in ExecProofPool to avoid multiple locks.
|
||||
for _, proof := range proofs {
|
||||
s.cfg.execProofPool.Insert(proof)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// requestAndSaveMissingDataColumns checks if the data columns are missing for the given block.
|
||||
// If so, requests them and saves them to the storage.
|
||||
func (s *Service) requestAndSaveMissingDataColumnSidecars(blks []blocks.ROBlock) error {
|
||||
|
||||
@@ -182,3 +182,21 @@ func WriteDataColumnSidecarChunk(stream libp2pcore.Stream, tor blockchain.Tempor
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func WriteExecutionProofChunk(stream libp2pcore.Stream, encoding encoder.NetworkEncoding, proof *ethpb.ExecutionProof) error {
|
||||
// Success response code.
|
||||
if _, err := stream.Write([]byte{responseCodeSuccess}); err != nil {
|
||||
return errors.Wrap(err, "stream write")
|
||||
}
|
||||
ctxBytes := params.ForkDigest(slots.ToEpoch(proof.Slot))
|
||||
if err := writeContextToStream(ctxBytes[:], stream); err != nil {
|
||||
return errors.Wrap(err, "write context to stream")
|
||||
}
|
||||
|
||||
// Execution proof.
|
||||
if _, err := encoding.EncodeWithMaxLength(stream, proof); err != nil {
|
||||
return errors.Wrap(err, "encode with max length")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
219
beacon-chain/sync/rpc_execution_proofs_by_root_topic.go
Normal file
219
beacon-chain/sync/rpc_execution_proofs_by_root_topic.go
Normal file
@@ -0,0 +1,219 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
libp2pcore "github.com/libp2p/go-libp2p/core"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// SendExecutionProofsByRootRequest sends ExecutionProofsByRoot request and returns fetched execution proofs, if any.
|
||||
func SendExecutionProofsByRootRequest(
|
||||
ctx context.Context,
|
||||
clock blockchain.TemporalOracle,
|
||||
p2pProvider p2p.P2P,
|
||||
pid peer.ID,
|
||||
req *ethpb.ExecutionProofsByRootRequest,
|
||||
) ([]*ethpb.ExecutionProof, error) {
|
||||
// Validate request
|
||||
if req.CountNeeded == 0 {
|
||||
return nil, errors.New("count_needed must be greater than 0")
|
||||
}
|
||||
|
||||
topic, err := p2p.TopicFromMessage(p2p.ExecutionProofsByRootName, slots.ToEpoch(clock.CurrentSlot()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"topic": topic,
|
||||
"block_root": bytesutil.ToBytes32(req.BlockRoot),
|
||||
"count": req.CountNeeded,
|
||||
"already": len(req.AlreadyHave),
|
||||
}).Debug("Sending execution proofs by root request")
|
||||
|
||||
stream, err := p2pProvider.Send(ctx, req, topic, pid)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer closeStream(stream, log)
|
||||
|
||||
// Read execution proofs from stream
|
||||
proofs := make([]*ethpb.ExecutionProof, 0, req.CountNeeded)
|
||||
alreadyHaveSet := make(map[primitives.ExecutionProofId]struct{})
|
||||
for _, id := range req.AlreadyHave {
|
||||
alreadyHaveSet[id] = struct{}{}
|
||||
}
|
||||
|
||||
for i := uint64(0); i < req.CountNeeded; i++ {
|
||||
isFirstChunk := i == 0
|
||||
proof, err := ReadChunkedExecutionProof(stream, p2pProvider, isFirstChunk)
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Validate proof
|
||||
if err := validateExecutionProof(proof, req, alreadyHaveSet); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
proofs = append(proofs, proof)
|
||||
}
|
||||
|
||||
return proofs, nil
|
||||
}
|
||||
|
||||
// ReadChunkedExecutionProof reads a chunked execution proof from the stream.
|
||||
func ReadChunkedExecutionProof(
|
||||
stream libp2pcore.Stream,
|
||||
encoding p2p.EncodingProvider,
|
||||
isFirstChunk bool,
|
||||
) (*ethpb.ExecutionProof, error) {
|
||||
// Read status code for each chunk (like data columns, not like blocks)
|
||||
code, errMsg, err := ReadStatusCode(stream, encoding.Encoding())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if code != 0 {
|
||||
return nil, errors.New(errMsg)
|
||||
}
|
||||
|
||||
// Read context bytes (fork digest)
|
||||
_, err = readContextFromStream(stream)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read context from stream: %w", err)
|
||||
}
|
||||
|
||||
// Decode the proof
|
||||
proof := ðpb.ExecutionProof{}
|
||||
if err := encoding.Encoding().DecodeWithMaxLength(stream, proof); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return proof, nil
|
||||
}
|
||||
|
||||
// validateExecutionProof validates a received execution proof against the request.
|
||||
func validateExecutionProof(
|
||||
proof *ethpb.ExecutionProof,
|
||||
req *ethpb.ExecutionProofsByRootRequest,
|
||||
alreadyHaveSet map[primitives.ExecutionProofId]struct{},
|
||||
) error {
|
||||
// Check block root matches
|
||||
proofRoot := bytesutil.ToBytes32(proof.BlockRoot)
|
||||
reqRoot := bytesutil.ToBytes32(req.BlockRoot)
|
||||
if proofRoot != reqRoot {
|
||||
return fmt.Errorf("proof block root %#x does not match requested root %#x",
|
||||
proofRoot, reqRoot)
|
||||
}
|
||||
|
||||
// Check we didn't already have this proof
|
||||
if _, ok := alreadyHaveSet[proof.ProofId]; ok {
|
||||
return fmt.Errorf("received proof we already have: proof_id=%d", proof.ProofId)
|
||||
}
|
||||
|
||||
// Check proof ID is valid (within max range)
|
||||
if !proof.ProofId.IsValid() {
|
||||
return fmt.Errorf("invalid proof_id: %d", proof.ProofId)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// executionProofsByRootRPCHandler handles incoming ExecutionProofsByRoot RPC requests.
|
||||
func (s *Service) executionProofsByRootRPCHandler(ctx context.Context, msg any, stream libp2pcore.Stream) error {
|
||||
ctx, span := trace.StartSpan(ctx, "sync.executionProofsByRootRPCHandler")
|
||||
defer span.End()
|
||||
|
||||
_, cancel := context.WithTimeout(ctx, ttfbTimeout)
|
||||
defer cancel()
|
||||
|
||||
req, ok := msg.(*ethpb.ExecutionProofsByRootRequest)
|
||||
if !ok {
|
||||
return errors.New("message is not type ExecutionProofsByRootRequest")
|
||||
}
|
||||
|
||||
remotePeer := stream.Conn().RemotePeer()
|
||||
SetRPCStreamDeadlines(stream)
|
||||
|
||||
// Validate request
|
||||
if err := s.rateLimiter.validateRequest(stream, 1); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Penalize peers that send invalid requests.
|
||||
if err := validateExecutionProofsByRootRequest(req); err != nil {
|
||||
s.downscorePeer(remotePeer, "executionProofsByRootRPCHandlerValidationError")
|
||||
s.writeErrorResponseToStream(responseCodeInvalidRequest, err.Error(), stream)
|
||||
return fmt.Errorf("validate execution proofs by root request: %w", err)
|
||||
}
|
||||
|
||||
blockRoot := bytesutil.ToBytes32(req.BlockRoot)
|
||||
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"blockroot": fmt.Sprintf("%#x", blockRoot),
|
||||
"neededCount": req.CountNeeded,
|
||||
"alreadyHave": req.AlreadyHave,
|
||||
"peer": remotePeer.String(),
|
||||
})
|
||||
|
||||
s.rateLimiter.add(stream, 1)
|
||||
defer closeStream(stream, log)
|
||||
|
||||
// Get proofs from execution proof pool
|
||||
storedProofs := s.cfg.execProofPool.Get(blockRoot)
|
||||
|
||||
// Filter out not requested proofs
|
||||
alreadyHave := make(map[primitives.ExecutionProofId]bool)
|
||||
for _, id := range req.AlreadyHave {
|
||||
alreadyHave[id] = true
|
||||
}
|
||||
|
||||
// Send proofs
|
||||
sentCount := uint64(0)
|
||||
for _, proof := range storedProofs {
|
||||
if sentCount >= req.CountNeeded {
|
||||
break
|
||||
}
|
||||
|
||||
// Skip proofs the requester already has
|
||||
if alreadyHave[proof.ProofId] {
|
||||
continue
|
||||
}
|
||||
|
||||
// Write proof to stream
|
||||
SetStreamWriteDeadline(stream, defaultWriteDuration)
|
||||
if err := WriteExecutionProofChunk(stream, s.cfg.p2p.Encoding(), proof); err != nil {
|
||||
log.WithError(err).Debug("Could not send execution proof")
|
||||
s.writeErrorResponseToStream(responseCodeServerError, "could not send execution proof", stream)
|
||||
return err
|
||||
}
|
||||
|
||||
sentCount++
|
||||
}
|
||||
|
||||
log.WithField("sentCount", sentCount).Debug("Responded to execution proofs by root request")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateExecutionProofsByRootRequest(req *ethpb.ExecutionProofsByRootRequest) error {
|
||||
if req.CountNeeded == 0 {
|
||||
return errors.New("count_needed must be greater than 0")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
727
beacon-chain/sync/rpc_execution_proofs_by_root_topic_test.go
Normal file
727
beacon-chain/sync/rpc_execution_proofs_by_root_topic_test.go
Normal file
@@ -0,0 +1,727 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
chainMock "github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/testing"
|
||||
testDB "github.com/OffchainLabs/prysm/v7/beacon-chain/db/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/execproofs"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/encoder"
|
||||
p2ptest "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func TestExecutionProofsByRootRPCHandler(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.FuluForkEpoch = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
|
||||
protocolID := protocol.ID(p2p.RPCExecutionProofsByRootTopicV1) + "/" + encoder.ProtocolSuffixSSZSnappy
|
||||
|
||||
t.Run("wrong message type", func(t *testing.T) {
|
||||
service := &Service{}
|
||||
err := service.executionProofsByRootRPCHandler(t.Context(), nil, nil)
|
||||
require.ErrorContains(t, "message is not type ExecutionProofsByRootRequest", err)
|
||||
})
|
||||
|
||||
t.Run("invalid request - count_needed is 0", func(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnableZkvm: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
localP2P := p2ptest.NewTestP2P(t)
|
||||
service := &Service{cfg: &config{p2p: localP2P}, rateLimiter: newRateLimiter(localP2P)}
|
||||
remoteP2P := p2ptest.NewTestP2P(t)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
code, errMsg, err := readStatusCodeNoDeadline(stream, localP2P.Encoding())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, responseCodeInvalidRequest, code)
|
||||
require.Equal(t, "count_needed must be greater than 0", errMsg)
|
||||
})
|
||||
|
||||
localP2P.Connect(remoteP2P)
|
||||
stream, err := localP2P.BHost.NewStream(t.Context(), remoteP2P.BHost.ID(), protocolID)
|
||||
require.NoError(t, err)
|
||||
|
||||
blockRoot := bytesutil.PadTo([]byte("blockroot"), 32)
|
||||
req := ðpb.ExecutionProofsByRootRequest{
|
||||
BlockRoot: blockRoot,
|
||||
CountNeeded: 0, // Invalid: must be > 0
|
||||
AlreadyHave: []primitives.ExecutionProofId{},
|
||||
}
|
||||
|
||||
require.Equal(t, true, localP2P.Peers().Scorers().BadResponsesScorer().Score(remoteP2P.PeerID()) >= 0)
|
||||
|
||||
err = service.executionProofsByRootRPCHandler(t.Context(), req, stream)
|
||||
require.NotNil(t, err)
|
||||
require.Equal(t, true, localP2P.Peers().Scorers().BadResponsesScorer().Score(remoteP2P.PeerID()) < 0)
|
||||
|
||||
if util.WaitTimeout(&wg, 1*time.Second) {
|
||||
t.Fatal("Did not receive stream within 1 sec")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("zkVM disabled - returns empty", func(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnableZkvm: false, // Disabled
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
localP2P := p2ptest.NewTestP2P(t)
|
||||
execProofPool := execproofs.NewPool()
|
||||
service := &Service{
|
||||
cfg: &config{
|
||||
p2p: localP2P,
|
||||
execProofPool: execProofPool,
|
||||
},
|
||||
rateLimiter: newRateLimiter(localP2P),
|
||||
}
|
||||
remoteP2P := p2ptest.NewTestP2P(t)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
// Should receive no proofs (stream should end)
|
||||
_, err := ReadChunkedExecutionProof(stream, localP2P, true)
|
||||
require.ErrorIs(t, err, io.EOF)
|
||||
})
|
||||
|
||||
localP2P.Connect(remoteP2P)
|
||||
stream, err := localP2P.BHost.NewStream(t.Context(), remoteP2P.BHost.ID(), protocolID)
|
||||
require.NoError(t, err)
|
||||
|
||||
blockRoot := bytesutil.PadTo([]byte("blockroot"), 32)
|
||||
req := ðpb.ExecutionProofsByRootRequest{
|
||||
BlockRoot: blockRoot,
|
||||
CountNeeded: 2,
|
||||
AlreadyHave: []primitives.ExecutionProofId{},
|
||||
}
|
||||
|
||||
err = service.executionProofsByRootRPCHandler(t.Context(), req, stream)
|
||||
require.NoError(t, err)
|
||||
|
||||
if util.WaitTimeout(&wg, 1*time.Second) {
|
||||
t.Fatal("Did not receive stream within 1 sec")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("no proofs available", func(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnableZkvm: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
localP2P := p2ptest.NewTestP2P(t)
|
||||
execProofPool := execproofs.NewPool()
|
||||
service := &Service{
|
||||
cfg: &config{
|
||||
p2p: localP2P,
|
||||
execProofPool: execProofPool,
|
||||
},
|
||||
rateLimiter: newRateLimiter(localP2P),
|
||||
}
|
||||
remoteP2P := p2ptest.NewTestP2P(t)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
// Should receive no proofs (stream should end)
|
||||
_, err := ReadChunkedExecutionProof(stream, localP2P, true)
|
||||
require.ErrorIs(t, err, io.EOF)
|
||||
})
|
||||
|
||||
localP2P.Connect(remoteP2P)
|
||||
stream, err := localP2P.BHost.NewStream(t.Context(), remoteP2P.BHost.ID(), protocolID)
|
||||
require.NoError(t, err)
|
||||
|
||||
blockRoot := bytesutil.PadTo([]byte("blockroot"), 32)
|
||||
req := ðpb.ExecutionProofsByRootRequest{
|
||||
BlockRoot: blockRoot,
|
||||
CountNeeded: 2,
|
||||
AlreadyHave: []primitives.ExecutionProofId{},
|
||||
}
|
||||
|
||||
err = service.executionProofsByRootRPCHandler(t.Context(), req, stream)
|
||||
require.NoError(t, err)
|
||||
|
||||
if util.WaitTimeout(&wg, 1*time.Second) {
|
||||
t.Fatal("Did not receive stream within 1 sec")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("nominal - returns requested proofs", func(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnableZkvm: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
localP2P := p2ptest.NewTestP2P(t)
|
||||
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
|
||||
|
||||
// Create execution proof pool with some proofs
|
||||
execProofPool := execproofs.NewPool()
|
||||
blockRoot := [32]byte{0x01, 0x02, 0x03}
|
||||
|
||||
// Add 3 proofs for the same block
|
||||
blockHash := bytesutil.PadTo([]byte("blockhash"), 32)
|
||||
proof1 := ðpb.ExecutionProof{
|
||||
BlockRoot: blockRoot[:],
|
||||
BlockHash: blockHash,
|
||||
Slot: primitives.Slot(10),
|
||||
ProofId: primitives.ExecutionProofId(1),
|
||||
ProofData: []byte("proof1"),
|
||||
}
|
||||
proof2 := ðpb.ExecutionProof{
|
||||
BlockRoot: blockRoot[:],
|
||||
BlockHash: blockHash,
|
||||
Slot: primitives.Slot(10),
|
||||
ProofId: primitives.ExecutionProofId(2),
|
||||
ProofData: []byte("proof2"),
|
||||
}
|
||||
proof3 := ðpb.ExecutionProof{
|
||||
BlockRoot: blockRoot[:],
|
||||
BlockHash: blockHash,
|
||||
Slot: primitives.Slot(10),
|
||||
ProofId: primitives.ExecutionProofId(3),
|
||||
ProofData: []byte("proof3"),
|
||||
}
|
||||
|
||||
execProofPool.Insert(proof1)
|
||||
execProofPool.Insert(proof2)
|
||||
execProofPool.Insert(proof3)
|
||||
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := &Service{
|
||||
cfg: &config{
|
||||
p2p: localP2P,
|
||||
beaconDB: beaconDB,
|
||||
clock: clock,
|
||||
execProofPool: execProofPool,
|
||||
chain: &chainMock.ChainService{},
|
||||
},
|
||||
rateLimiter: newRateLimiter(localP2P),
|
||||
}
|
||||
|
||||
remoteP2P := p2ptest.NewTestP2P(t)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
|
||||
proofs := make([]*ethpb.ExecutionProof, 0, 2)
|
||||
|
||||
for i := range 2 {
|
||||
isFirstChunk := i == 0
|
||||
proof, err := ReadChunkedExecutionProof(stream, remoteP2P, isFirstChunk)
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
|
||||
assert.NoError(t, err)
|
||||
proofs = append(proofs, proof)
|
||||
}
|
||||
|
||||
assert.Equal(t, 2, len(proofs))
|
||||
// Should receive proof1 and proof2 (first 2 in pool)
|
||||
assert.DeepEqual(t, blockRoot[:], proofs[0].BlockRoot)
|
||||
assert.DeepEqual(t, blockRoot[:], proofs[1].BlockRoot)
|
||||
assert.Equal(t, primitives.ExecutionProofId(1), proofs[0].ProofId)
|
||||
assert.Equal(t, primitives.ExecutionProofId(2), proofs[1].ProofId)
|
||||
})
|
||||
|
||||
localP2P.Connect(remoteP2P)
|
||||
stream, err := localP2P.BHost.NewStream(t.Context(), remoteP2P.BHost.ID(), protocolID)
|
||||
require.NoError(t, err)
|
||||
|
||||
req := ðpb.ExecutionProofsByRootRequest{
|
||||
BlockRoot: blockRoot[:],
|
||||
CountNeeded: 2,
|
||||
AlreadyHave: []primitives.ExecutionProofId{},
|
||||
}
|
||||
|
||||
err = service.executionProofsByRootRPCHandler(t.Context(), req, stream)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, localP2P.Peers().Scorers().BadResponsesScorer().Score(remoteP2P.PeerID()) >= 0)
|
||||
|
||||
if util.WaitTimeout(&wg, 1*time.Second) {
|
||||
t.Fatal("Did not receive stream within 1 sec")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("filters already_have proofs", func(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnableZkvm: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
localP2P := p2ptest.NewTestP2P(t)
|
||||
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
|
||||
|
||||
// Create execution proof pool with some proofs
|
||||
execProofPool := execproofs.NewPool()
|
||||
blockRoot := [32]byte{0x01, 0x02, 0x03}
|
||||
|
||||
// Add 4 proofs for the same block
|
||||
blockHash := bytesutil.PadTo([]byte("blockhash"), 32)
|
||||
proof1 := ðpb.ExecutionProof{
|
||||
BlockRoot: blockRoot[:],
|
||||
BlockHash: blockHash,
|
||||
Slot: primitives.Slot(10),
|
||||
ProofId: primitives.ExecutionProofId(1),
|
||||
ProofData: []byte("proof1"),
|
||||
}
|
||||
proof2 := ðpb.ExecutionProof{
|
||||
BlockRoot: blockRoot[:],
|
||||
BlockHash: blockHash,
|
||||
Slot: primitives.Slot(10),
|
||||
ProofId: primitives.ExecutionProofId(2),
|
||||
ProofData: []byte("proof2"),
|
||||
}
|
||||
proof3 := ðpb.ExecutionProof{
|
||||
BlockRoot: blockRoot[:],
|
||||
BlockHash: blockHash,
|
||||
Slot: primitives.Slot(10),
|
||||
ProofId: primitives.ExecutionProofId(3),
|
||||
ProofData: []byte("proof3"),
|
||||
}
|
||||
proof4 := ðpb.ExecutionProof{
|
||||
BlockRoot: blockRoot[:],
|
||||
BlockHash: blockHash,
|
||||
Slot: primitives.Slot(10),
|
||||
ProofId: primitives.ExecutionProofId(4),
|
||||
ProofData: []byte("proof4"),
|
||||
}
|
||||
|
||||
execProofPool.Insert(proof1)
|
||||
execProofPool.Insert(proof2)
|
||||
execProofPool.Insert(proof3)
|
||||
execProofPool.Insert(proof4)
|
||||
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := &Service{
|
||||
cfg: &config{
|
||||
p2p: localP2P,
|
||||
beaconDB: beaconDB,
|
||||
clock: clock,
|
||||
execProofPool: execProofPool,
|
||||
chain: &chainMock.ChainService{},
|
||||
},
|
||||
rateLimiter: newRateLimiter(localP2P),
|
||||
}
|
||||
|
||||
remoteP2P := p2ptest.NewTestP2P(t)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
|
||||
proofs := make([]*ethpb.ExecutionProof, 0, 2)
|
||||
|
||||
for i := range 3 {
|
||||
isFirstChunk := i == 0
|
||||
proof, err := ReadChunkedExecutionProof(stream, remoteP2P, isFirstChunk)
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
|
||||
assert.NoError(t, err)
|
||||
proofs = append(proofs, proof)
|
||||
}
|
||||
|
||||
// Should skip proof1 and proof2 (already_have), and return proof3 and proof4
|
||||
assert.Equal(t, 2, len(proofs))
|
||||
assert.Equal(t, primitives.ExecutionProofId(3), proofs[0].ProofId)
|
||||
assert.Equal(t, primitives.ExecutionProofId(4), proofs[1].ProofId)
|
||||
})
|
||||
|
||||
localP2P.Connect(remoteP2P)
|
||||
stream, err := localP2P.BHost.NewStream(t.Context(), remoteP2P.BHost.ID(), protocolID)
|
||||
require.NoError(t, err)
|
||||
|
||||
req := ðpb.ExecutionProofsByRootRequest{
|
||||
BlockRoot: blockRoot[:],
|
||||
CountNeeded: 2,
|
||||
AlreadyHave: []primitives.ExecutionProofId{1, 2}, // Already have proof1 and proof2
|
||||
}
|
||||
|
||||
err = service.executionProofsByRootRPCHandler(t.Context(), req, stream)
|
||||
require.NoError(t, err)
|
||||
|
||||
if util.WaitTimeout(&wg, 1*time.Second) {
|
||||
t.Fatal("Did not receive stream within 1 sec")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("partial send - less proofs than requested", func(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnableZkvm: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
localP2P := p2ptest.NewTestP2P(t)
|
||||
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
|
||||
|
||||
// Create execution proof pool with only 2 proofs
|
||||
execProofPool := execproofs.NewPool()
|
||||
blockRoot := [32]byte{0x01, 0x02, 0x03}
|
||||
|
||||
blockHash := bytesutil.PadTo([]byte("blockhash"), 32)
|
||||
proof1 := ðpb.ExecutionProof{
|
||||
BlockRoot: blockRoot[:],
|
||||
BlockHash: blockHash,
|
||||
Slot: primitives.Slot(10),
|
||||
ProofId: primitives.ExecutionProofId(1),
|
||||
ProofData: []byte("proof1"),
|
||||
}
|
||||
proof2 := ðpb.ExecutionProof{
|
||||
BlockRoot: blockRoot[:],
|
||||
BlockHash: blockHash,
|
||||
Slot: primitives.Slot(10),
|
||||
ProofId: primitives.ExecutionProofId(2),
|
||||
ProofData: []byte("proof2"),
|
||||
}
|
||||
|
||||
execProofPool.Insert(proof1)
|
||||
execProofPool.Insert(proof2)
|
||||
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := &Service{
|
||||
cfg: &config{
|
||||
p2p: localP2P,
|
||||
beaconDB: beaconDB,
|
||||
clock: clock,
|
||||
execProofPool: execProofPool,
|
||||
chain: &chainMock.ChainService{},
|
||||
},
|
||||
rateLimiter: newRateLimiter(localP2P),
|
||||
}
|
||||
|
||||
remoteP2P := p2ptest.NewTestP2P(t)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
|
||||
proofs := make([]*ethpb.ExecutionProof, 0, 5)
|
||||
|
||||
for i := range 5 {
|
||||
isFirstChunk := i == 0
|
||||
proof, err := ReadChunkedExecutionProof(stream, remoteP2P, isFirstChunk)
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
|
||||
assert.NoError(t, err)
|
||||
proofs = append(proofs, proof)
|
||||
}
|
||||
|
||||
// Should only receive 2 proofs (not 5 as requested)
|
||||
assert.Equal(t, 2, len(proofs))
|
||||
assert.Equal(t, primitives.ExecutionProofId(1), proofs[0].ProofId)
|
||||
assert.Equal(t, primitives.ExecutionProofId(2), proofs[1].ProofId)
|
||||
})
|
||||
|
||||
localP2P.Connect(remoteP2P)
|
||||
stream, err := localP2P.BHost.NewStream(t.Context(), remoteP2P.BHost.ID(), protocolID)
|
||||
require.NoError(t, err)
|
||||
|
||||
req := ðpb.ExecutionProofsByRootRequest{
|
||||
BlockRoot: blockRoot[:],
|
||||
CountNeeded: 5, // Request 5 but only 2 available
|
||||
AlreadyHave: []primitives.ExecutionProofId{},
|
||||
}
|
||||
|
||||
err = service.executionProofsByRootRPCHandler(t.Context(), req, stream)
|
||||
require.NoError(t, err)
|
||||
|
||||
if util.WaitTimeout(&wg, 1*time.Second) {
|
||||
t.Fatal("Did not receive stream within 1 sec")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestValidateExecutionProofsByRootRequest(t *testing.T) {
|
||||
t.Run("invalid - count_needed is 0", func(t *testing.T) {
|
||||
req := ðpb.ExecutionProofsByRootRequest{
|
||||
BlockRoot: bytesutil.PadTo([]byte("blockroot"), 32),
|
||||
CountNeeded: 0,
|
||||
AlreadyHave: []primitives.ExecutionProofId{},
|
||||
}
|
||||
err := validateExecutionProofsByRootRequest(req)
|
||||
require.ErrorContains(t, "count_needed must be greater than 0", err)
|
||||
})
|
||||
|
||||
t.Run("valid", func(t *testing.T) {
|
||||
req := ðpb.ExecutionProofsByRootRequest{
|
||||
BlockRoot: bytesutil.PadTo([]byte("blockroot"), 32),
|
||||
CountNeeded: 2,
|
||||
AlreadyHave: []primitives.ExecutionProofId{},
|
||||
}
|
||||
err := validateExecutionProofsByRootRequest(req)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestSendExecutionProofsByRootRequest(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.FuluForkEpoch = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
|
||||
protocolID := protocol.ID(p2p.RPCExecutionProofsByRootTopicV1) + "/" + encoder.ProtocolSuffixSSZSnappy
|
||||
|
||||
t.Run("count_needed is 0 - returns error", func(t *testing.T) {
|
||||
localP2P := p2ptest.NewTestP2P(t)
|
||||
remoteP2P := p2ptest.NewTestP2P(t)
|
||||
localP2P.Connect(remoteP2P)
|
||||
|
||||
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
|
||||
blockRoot := bytesutil.PadTo([]byte("blockroot"), 32)
|
||||
|
||||
req := ðpb.ExecutionProofsByRootRequest{
|
||||
BlockRoot: blockRoot,
|
||||
CountNeeded: 0,
|
||||
AlreadyHave: []primitives.ExecutionProofId{},
|
||||
}
|
||||
|
||||
proofs, err := SendExecutionProofsByRootRequest(t.Context(), clock, localP2P, remoteP2P.PeerID(), req)
|
||||
require.ErrorContains(t, "count_needed must be greater than 0", err)
|
||||
require.Equal(t, 0, len(proofs))
|
||||
})
|
||||
|
||||
t.Run("success - receives requested proofs", func(t *testing.T) {
|
||||
localP2P := p2ptest.NewTestP2P(t)
|
||||
remoteP2P := p2ptest.NewTestP2P(t)
|
||||
localP2P.Connect(remoteP2P)
|
||||
|
||||
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
|
||||
blockRoot := [32]byte{0x01, 0x02, 0x03}
|
||||
blockHash := bytesutil.PadTo([]byte("blockhash"), 32)
|
||||
|
||||
// Create proofs to send back
|
||||
proof1 := ðpb.ExecutionProof{
|
||||
BlockRoot: blockRoot[:],
|
||||
BlockHash: blockHash,
|
||||
Slot: primitives.Slot(10),
|
||||
ProofId: primitives.ExecutionProofId(1),
|
||||
ProofData: []byte("proof1"),
|
||||
}
|
||||
proof2 := ðpb.ExecutionProof{
|
||||
BlockRoot: blockRoot[:],
|
||||
BlockHash: blockHash,
|
||||
Slot: primitives.Slot(10),
|
||||
ProofId: primitives.ExecutionProofId(2),
|
||||
ProofData: []byte("proof2"),
|
||||
}
|
||||
|
||||
// Setup remote to send proofs
|
||||
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
|
||||
defer func() {
|
||||
_ = stream.Close()
|
||||
}()
|
||||
|
||||
// Read the request (we don't validate it in this test)
|
||||
_ = ðpb.ExecutionProofsByRootRequest{}
|
||||
|
||||
// Send proof1
|
||||
require.NoError(t, WriteExecutionProofChunk(stream, remoteP2P.Encoding(), proof1))
|
||||
|
||||
// Send proof2
|
||||
require.NoError(t, WriteExecutionProofChunk(stream, remoteP2P.Encoding(), proof2))
|
||||
})
|
||||
|
||||
req := ðpb.ExecutionProofsByRootRequest{
|
||||
BlockRoot: blockRoot[:],
|
||||
CountNeeded: 2,
|
||||
AlreadyHave: []primitives.ExecutionProofId{},
|
||||
}
|
||||
|
||||
proofs, err := SendExecutionProofsByRootRequest(t.Context(), clock, localP2P, remoteP2P.PeerID(), req)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(proofs))
|
||||
assert.Equal(t, primitives.ExecutionProofId(1), proofs[0].ProofId)
|
||||
assert.Equal(t, primitives.ExecutionProofId(2), proofs[1].ProofId)
|
||||
assert.DeepEqual(t, blockRoot[:], proofs[0].BlockRoot)
|
||||
assert.DeepEqual(t, blockRoot[:], proofs[1].BlockRoot)
|
||||
})
|
||||
|
||||
t.Run("partial response - EOF before count_needed", func(t *testing.T) {
|
||||
localP2P := p2ptest.NewTestP2P(t)
|
||||
remoteP2P := p2ptest.NewTestP2P(t)
|
||||
localP2P.Connect(remoteP2P)
|
||||
|
||||
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
|
||||
blockRoot := [32]byte{0x01, 0x02, 0x03}
|
||||
blockHash := bytesutil.PadTo([]byte("blockhash"), 32)
|
||||
|
||||
proof1 := ðpb.ExecutionProof{
|
||||
BlockRoot: blockRoot[:],
|
||||
BlockHash: blockHash,
|
||||
Slot: primitives.Slot(10),
|
||||
ProofId: primitives.ExecutionProofId(1),
|
||||
ProofData: []byte("proof1"),
|
||||
}
|
||||
|
||||
// Setup remote to send only 1 proof (but we request 5)
|
||||
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
|
||||
defer func() {
|
||||
_ = stream.Close()
|
||||
}()
|
||||
// Send only proof1
|
||||
require.NoError(t, WriteExecutionProofChunk(stream, remoteP2P.Encoding(), proof1))
|
||||
})
|
||||
|
||||
req := ðpb.ExecutionProofsByRootRequest{
|
||||
BlockRoot: blockRoot[:],
|
||||
CountNeeded: 5, // Request 5 but only get 1
|
||||
AlreadyHave: []primitives.ExecutionProofId{},
|
||||
}
|
||||
|
||||
proofs, err := SendExecutionProofsByRootRequest(t.Context(), clock, localP2P, remoteP2P.PeerID(), req)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(proofs)) // Only received 1
|
||||
assert.Equal(t, primitives.ExecutionProofId(1), proofs[0].ProofId)
|
||||
})
|
||||
|
||||
t.Run("invalid block root - validation fails", func(t *testing.T) {
|
||||
localP2P := p2ptest.NewTestP2P(t)
|
||||
remoteP2P := p2ptest.NewTestP2P(t)
|
||||
localP2P.Connect(remoteP2P)
|
||||
|
||||
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
|
||||
requestedRoot := [32]byte{0x01, 0x02, 0x03}
|
||||
wrongRoot := [32]byte{0xFF, 0xFF, 0xFF}
|
||||
blockHash := bytesutil.PadTo([]byte("blockhash"), 32)
|
||||
|
||||
// Create proof with wrong block root
|
||||
proof1 := ðpb.ExecutionProof{
|
||||
BlockRoot: wrongRoot[:], // Wrong root!
|
||||
BlockHash: blockHash,
|
||||
Slot: primitives.Slot(10),
|
||||
ProofId: primitives.ExecutionProofId(1),
|
||||
ProofData: []byte("proof1"),
|
||||
}
|
||||
|
||||
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
|
||||
defer func() {
|
||||
_ = stream.Close()
|
||||
}()
|
||||
require.NoError(t, WriteExecutionProofChunk(stream, remoteP2P.Encoding(), proof1))
|
||||
})
|
||||
|
||||
req := ðpb.ExecutionProofsByRootRequest{
|
||||
BlockRoot: requestedRoot[:],
|
||||
CountNeeded: 1,
|
||||
AlreadyHave: []primitives.ExecutionProofId{},
|
||||
}
|
||||
|
||||
proofs, err := SendExecutionProofsByRootRequest(t.Context(), clock, localP2P, remoteP2P.PeerID(), req)
|
||||
require.ErrorContains(t, "does not match requested root", err)
|
||||
require.Equal(t, 0, len(proofs))
|
||||
})
|
||||
|
||||
t.Run("already_have proof - validation fails", func(t *testing.T) {
|
||||
localP2P := p2ptest.NewTestP2P(t)
|
||||
remoteP2P := p2ptest.NewTestP2P(t)
|
||||
localP2P.Connect(remoteP2P)
|
||||
|
||||
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
|
||||
blockRoot := [32]byte{0x01, 0x02, 0x03}
|
||||
blockHash := bytesutil.PadTo([]byte("blockhash"), 32)
|
||||
|
||||
proof1 := ðpb.ExecutionProof{
|
||||
BlockRoot: blockRoot[:],
|
||||
BlockHash: blockHash,
|
||||
Slot: primitives.Slot(10),
|
||||
ProofId: primitives.ExecutionProofId(1),
|
||||
ProofData: []byte("proof1"),
|
||||
}
|
||||
|
||||
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
|
||||
defer func() {
|
||||
_ = stream.Close()
|
||||
}()
|
||||
require.NoError(t, WriteExecutionProofChunk(stream, remoteP2P.Encoding(), proof1))
|
||||
})
|
||||
|
||||
req := ðpb.ExecutionProofsByRootRequest{
|
||||
BlockRoot: blockRoot[:],
|
||||
CountNeeded: 1,
|
||||
AlreadyHave: []primitives.ExecutionProofId{1}, // Already have proof_id 1
|
||||
}
|
||||
|
||||
proofs, err := SendExecutionProofsByRootRequest(t.Context(), clock, localP2P, remoteP2P.PeerID(), req)
|
||||
require.ErrorContains(t, "received proof we already have", err)
|
||||
require.Equal(t, 0, len(proofs))
|
||||
})
|
||||
|
||||
t.Run("invalid proof_id - validation fails", func(t *testing.T) {
|
||||
localP2P := p2ptest.NewTestP2P(t)
|
||||
remoteP2P := p2ptest.NewTestP2P(t)
|
||||
localP2P.Connect(remoteP2P)
|
||||
|
||||
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
|
||||
blockRoot := [32]byte{0x01, 0x02, 0x03}
|
||||
blockHash := bytesutil.PadTo([]byte("blockhash"), 32)
|
||||
|
||||
proof1 := ðpb.ExecutionProof{
|
||||
BlockRoot: blockRoot[:],
|
||||
BlockHash: blockHash,
|
||||
Slot: primitives.Slot(10),
|
||||
ProofId: primitives.ExecutionProofId(255), // Invalid proof_id (max valid is 7)
|
||||
ProofData: []byte("proof1"),
|
||||
}
|
||||
|
||||
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
|
||||
defer func() {
|
||||
_ = stream.Close()
|
||||
}()
|
||||
require.NoError(t, WriteExecutionProofChunk(stream, remoteP2P.Encoding(), proof1))
|
||||
})
|
||||
|
||||
req := ðpb.ExecutionProofsByRootRequest{
|
||||
BlockRoot: blockRoot[:],
|
||||
CountNeeded: 1,
|
||||
AlreadyHave: []primitives.ExecutionProofId{},
|
||||
}
|
||||
|
||||
proofs, err := SendExecutionProofsByRootRequest(t.Context(), clock, localP2P, remoteP2P.PeerID(), req)
|
||||
require.ErrorContains(t, "invalid proof_id", err)
|
||||
require.Equal(t, 0, len(proofs))
|
||||
})
|
||||
}
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
lightClient "github.com/OffchainLabs/prysm/v7/beacon-chain/light-client"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/attestations"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/blstoexec"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/execproofs"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/slashings"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/synccommittee"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/voluntaryexits"
|
||||
@@ -67,6 +68,7 @@ const (
|
||||
seenProposerSlashingSize = 100
|
||||
badBlockSize = 1000
|
||||
syncMetricsInterval = 10 * time.Second
|
||||
seenExecutionProofSize = 100
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -94,6 +96,7 @@ type config struct {
|
||||
slashingPool slashings.PoolManager
|
||||
syncCommsPool synccommittee.Pool
|
||||
blsToExecPool blstoexec.PoolManager
|
||||
execProofPool execproofs.PoolManager
|
||||
chain blockchainService
|
||||
initialSync Checker
|
||||
blockNotifier blockfeed.Notifier
|
||||
@@ -235,7 +238,6 @@ func NewService(ctx context.Context, opts ...Option) *Service {
|
||||
r.subHandler = newSubTopicHandler()
|
||||
r.rateLimiter = newRateLimiter(r.cfg.p2p)
|
||||
r.initCaches()
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
|
||||
@@ -329,6 +329,17 @@ func (s *Service) registerSubscribers(nse params.NetworkScheduleEntry) bool {
|
||||
getSubnetsRequiringPeers: s.allDataColumnSubnets,
|
||||
})
|
||||
})
|
||||
|
||||
if features.Get().EnableZkvm {
|
||||
s.spawn(func() {
|
||||
s.subscribe(
|
||||
p2p.ExecutionProofSubnetTopicFormat,
|
||||
s.validateExecutionProof,
|
||||
s.executionProofSubscriber,
|
||||
nse,
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition/interop"
|
||||
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
@@ -22,6 +23,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
@@ -69,12 +71,49 @@ func (s *Service) beaconBlockSubscriber(ctx context.Context, msg proto.Message)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// We use the service context to ensure this context is not cancelled
|
||||
// when the current function returns.
|
||||
// TODO: Do not broadcast proofs for blocks we have already seen.
|
||||
go s.generateAndBroadcastExecutionProofs(s.ctx, roBlock)
|
||||
|
||||
if err := s.processPendingAttsForBlock(ctx, root); err != nil {
|
||||
return errors.Wrap(err, "process pending atts for block")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) generateAndBroadcastExecutionProofs(ctx context.Context, roBlock blocks.ROBlock) {
|
||||
const delay = 2 * time.Second
|
||||
proofTypes := flags.Get().ProofGenerationTypes
|
||||
|
||||
if len(proofTypes) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
var wg errgroup.Group
|
||||
for _, proofType := range proofTypes {
|
||||
wg.Go(func() error {
|
||||
execProof, err := generateExecProof(roBlock, primitives.ExecutionProofId(proofType), delay)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("generate exec proof: %w", err)
|
||||
}
|
||||
|
||||
if err := s.cfg.p2p.Broadcast(ctx, execProof); err != nil {
|
||||
return fmt.Errorf("broadcast exec proof: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := wg.Wait(); err != nil {
|
||||
log.WithError(err).Error("Failed to generate and broadcast execution proofs")
|
||||
}
|
||||
}
|
||||
|
||||
// processSidecarsFromExecutionFromBlock retrieves (if available) sidecars data from the execution client,
|
||||
// builds corresponding sidecars, save them to the storage, and broadcasts them over P2P if necessary.
|
||||
func (s *Service) processSidecarsFromExecutionFromBlock(ctx context.Context, roBlock blocks.ROBlock) {
|
||||
|
||||
31
beacon-chain/sync/subscriber_execution_proofs.go
Normal file
31
beacon-chain/sync/subscriber_execution_proofs.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed"
|
||||
opfeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/operation"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func (s *Service) executionProofSubscriber(_ context.Context, msg proto.Message) error {
|
||||
executionProof, ok := msg.(*ethpb.ExecutionProof)
|
||||
if !ok {
|
||||
return errors.Errorf("incorrect type of message received, wanted %T but got %T", ðpb.ExecutionProof{}, msg)
|
||||
}
|
||||
|
||||
// Insert the execution proof into the pool
|
||||
s.cfg.execProofPool.Insert(executionProof)
|
||||
|
||||
// Notify subscribers about the new execution proof
|
||||
s.cfg.operationNotifier.OperationFeed().Send(&feed.Event{
|
||||
Type: opfeed.ExecutionProofReceived,
|
||||
Data: &opfeed.ExecutionProofReceivedData{
|
||||
ExecutionProof: executionProof,
|
||||
},
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -3,11 +3,12 @@ package sync
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/operation"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/verification"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
@@ -192,19 +193,13 @@ func (s *Service) validateDataColumn(ctx context.Context, pid peer.ID, msg *pubs
|
||||
dataColumnSidecarArrivalGossipSummary.Observe(float64(sinceSlotStartTime.Milliseconds()))
|
||||
dataColumnSidecarVerificationGossipHistogram.Observe(float64(validationTime.Milliseconds()))
|
||||
|
||||
peerGossipScore := s.cfg.p2p.Peers().Scorers().GossipScorer().Score(pid)
|
||||
|
||||
select {
|
||||
case s.dataColumnLogCh <- dataColumnLogEntry{
|
||||
Slot: roDataColumn.Slot(),
|
||||
ColIdx: roDataColumn.Index,
|
||||
PropIdx: roDataColumn.ProposerIndex(),
|
||||
BlockRoot: roDataColumn.BlockRoot(),
|
||||
ParentRoot: roDataColumn.ParentRoot(),
|
||||
PeerSuffix: pid.String()[len(pid.String())-6:],
|
||||
PeerGossipScore: peerGossipScore,
|
||||
validationTime: validationTime,
|
||||
sinceStartTime: sinceSlotStartTime,
|
||||
slot: roDataColumn.Slot(),
|
||||
index: roDataColumn.Index,
|
||||
root: roDataColumn.BlockRoot(),
|
||||
validationTime: validationTime,
|
||||
sinceStartTime: sinceSlotStartTime,
|
||||
}:
|
||||
default:
|
||||
log.WithField("slot", roDataColumn.Slot()).Warn("Failed to send data column log entry")
|
||||
@@ -249,68 +244,69 @@ func computeCacheKey(slot primitives.Slot, proposerIndex primitives.ValidatorInd
|
||||
}
|
||||
|
||||
type dataColumnLogEntry struct {
|
||||
Slot primitives.Slot
|
||||
ColIdx uint64
|
||||
PropIdx primitives.ValidatorIndex
|
||||
BlockRoot [32]byte
|
||||
ParentRoot [32]byte
|
||||
PeerSuffix string
|
||||
PeerGossipScore float64
|
||||
validationTime time.Duration
|
||||
sinceStartTime time.Duration
|
||||
slot primitives.Slot
|
||||
index uint64
|
||||
root [32]byte
|
||||
validationTime time.Duration
|
||||
sinceStartTime time.Duration
|
||||
}
|
||||
|
||||
func (s *Service) processDataColumnLogs() {
|
||||
ticker := time.NewTicker(1 * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
slotStats := make(map[primitives.Slot][fieldparams.NumberOfColumns]dataColumnLogEntry)
|
||||
slotStats := make(map[[fieldparams.RootLength]byte][]dataColumnLogEntry)
|
||||
|
||||
for {
|
||||
select {
|
||||
case entry := <-s.dataColumnLogCh:
|
||||
cols := slotStats[entry.Slot]
|
||||
cols[entry.ColIdx] = entry
|
||||
slotStats[entry.Slot] = cols
|
||||
case col := <-s.dataColumnLogCh:
|
||||
cols := slotStats[col.root]
|
||||
cols = append(cols, col)
|
||||
slotStats[col.root] = cols
|
||||
case <-ticker.C:
|
||||
for slot, columns := range slotStats {
|
||||
var (
|
||||
colIndices = make([]uint64, 0, fieldparams.NumberOfColumns)
|
||||
peers = make([]string, 0, fieldparams.NumberOfColumns)
|
||||
gossipScores = make([]float64, 0, fieldparams.NumberOfColumns)
|
||||
validationTimes = make([]string, 0, fieldparams.NumberOfColumns)
|
||||
sinceStartTimes = make([]string, 0, fieldparams.NumberOfColumns)
|
||||
)
|
||||
for root, columns := range slotStats {
|
||||
indices := make([]uint64, 0, fieldparams.NumberOfColumns)
|
||||
minValidationTime, maxValidationTime, sumValidationTime := time.Duration(0), time.Duration(0), time.Duration(0)
|
||||
minSinceStartTime, maxSinceStartTime, sumSinceStartTime := time.Duration(0), time.Duration(0), time.Duration(0)
|
||||
|
||||
totalReceived := 0
|
||||
for _, entry := range columns {
|
||||
if entry.PeerSuffix == "" {
|
||||
for _, column := range columns {
|
||||
indices = append(indices, column.index)
|
||||
|
||||
sumValidationTime += column.validationTime
|
||||
sumSinceStartTime += column.sinceStartTime
|
||||
|
||||
if totalReceived == 0 {
|
||||
minValidationTime, maxValidationTime = column.validationTime, column.validationTime
|
||||
minSinceStartTime, maxSinceStartTime = column.sinceStartTime, column.sinceStartTime
|
||||
totalReceived++
|
||||
continue
|
||||
}
|
||||
colIndices = append(colIndices, entry.ColIdx)
|
||||
peers = append(peers, entry.PeerSuffix)
|
||||
gossipScores = append(gossipScores, roundFloat(entry.PeerGossipScore, 2))
|
||||
validationTimes = append(validationTimes, fmt.Sprintf("%.2fms", float64(entry.validationTime.Milliseconds())))
|
||||
sinceStartTimes = append(sinceStartTimes, fmt.Sprintf("%.2fms", float64(entry.sinceStartTime.Milliseconds())))
|
||||
|
||||
minValidationTime, maxValidationTime = min(minValidationTime, column.validationTime), max(maxValidationTime, column.validationTime)
|
||||
minSinceStartTime, maxSinceStartTime = min(minSinceStartTime, column.sinceStartTime), max(maxSinceStartTime, column.sinceStartTime)
|
||||
totalReceived++
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": slot,
|
||||
"receivedCount": totalReceived,
|
||||
"columnIndices": colIndices,
|
||||
"peers": peers,
|
||||
"gossipScores": gossipScores,
|
||||
"validationTimes": validationTimes,
|
||||
"sinceStartTimes": sinceStartTimes,
|
||||
}).Debug("Accepted data column sidecars summary")
|
||||
if totalReceived > 0 {
|
||||
slices.Sort(indices)
|
||||
avgValidationTime := sumValidationTime / time.Duration(totalReceived)
|
||||
avgSinceStartTime := sumSinceStartTime / time.Duration(totalReceived)
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": columns[0].slot,
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"count": totalReceived,
|
||||
"indices": helpers.PrettySlice(indices),
|
||||
"validationTime": prettyMinMaxAverage(minValidationTime, maxValidationTime, avgValidationTime),
|
||||
"sinceStartTime": prettyMinMaxAverage(minSinceStartTime, maxSinceStartTime, avgSinceStartTime),
|
||||
}).Debug("Accepted data column sidecars summary")
|
||||
}
|
||||
}
|
||||
slotStats = make(map[primitives.Slot][fieldparams.NumberOfColumns]dataColumnLogEntry)
|
||||
|
||||
slotStats = make(map[[fieldparams.RootLength]byte][]dataColumnLogEntry)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func roundFloat(f float64, decimals int) float64 {
|
||||
mult := math.Pow(10, float64(decimals))
|
||||
return math.Round(f*mult) / mult
|
||||
func prettyMinMaxAverage(min, max, average time.Duration) string {
|
||||
return fmt.Sprintf("[min: %v, avg: %v, max: %v]", min, average, max)
|
||||
}
|
||||
|
||||
132
beacon-chain/sync/validate_execution_proof.go
Normal file
132
beacon-chain/sync/validate_execution_proof.go
Normal file
@@ -0,0 +1,132 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
func (s *Service) validateExecutionProof(ctx context.Context, pid peer.ID, msg *pubsub.Message) (pubsub.ValidationResult, error) {
|
||||
// Always accept messages our own messages.
|
||||
if pid == s.cfg.p2p.PeerID() {
|
||||
return pubsub.ValidationAccept, nil
|
||||
}
|
||||
|
||||
// Ignore messages during initial sync.
|
||||
if s.cfg.initialSync.Syncing() {
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
|
||||
// Reject messages with a nil topic.
|
||||
if msg.Topic == nil {
|
||||
return pubsub.ValidationReject, p2p.ErrInvalidTopic
|
||||
}
|
||||
|
||||
// Decode the message, reject if it fails.
|
||||
m, err := s.decodePubsubMessage(msg)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to decode message")
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
// Reject messages that are not of the expected type.
|
||||
executionProof, ok := m.(*ethpb.ExecutionProof)
|
||||
if !ok {
|
||||
log.WithField("message", m).Error("Message is not of type *ethpb.ExecutionProof")
|
||||
return pubsub.ValidationReject, errWrongMessage
|
||||
}
|
||||
|
||||
// 1. Verify proof is not from the future
|
||||
if err := s.proofNotFromFutureSlot(executionProof); err != nil {
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
// 2. Verify proof slot is greater than finalized slot
|
||||
if err := s.proofAboveFinalizedSlot(ctx, executionProof); err != nil {
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
// 3. Check if the proof is already in the DA checker cache (execution proof pool)
|
||||
// If it exists in the cache, we know it has already passed validation.
|
||||
blockRoot := bytesutil.ToBytes32(executionProof.BlockRoot)
|
||||
if s.isProofCachedInPool(blockRoot, executionProof.ProofId) {
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
|
||||
// 4. Verify proof size limits
|
||||
if uint64(len(executionProof.ProofData)) > params.BeaconConfig().MaxProofDataBytes {
|
||||
return pubsub.ValidationReject, fmt.Errorf("execution proof data size %d exceeds maximum allowed %d", len(executionProof.ProofData), params.BeaconConfig().MaxProofDataBytes)
|
||||
}
|
||||
|
||||
// 5. Run zkVM proof verification
|
||||
if err := s.verifyExecutionProof(executionProof); err != nil {
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
// Validation successful, return accept
|
||||
return pubsub.ValidationAccept, nil
|
||||
}
|
||||
|
||||
// TODO: Do we need encapsulation for all those verification functions?
|
||||
|
||||
// proofNotFromFutureSlot checks whether the execution proof is from a future slot.
|
||||
func (s *Service) proofNotFromFutureSlot(executionProof *ethpb.ExecutionProof) error {
|
||||
currentSlot := s.cfg.clock.CurrentSlot()
|
||||
proofSlot := executionProof.Slot
|
||||
|
||||
if currentSlot == proofSlot {
|
||||
return nil
|
||||
}
|
||||
|
||||
earliestStart, err := s.cfg.clock.SlotStart(proofSlot)
|
||||
if err != nil {
|
||||
// TODO: Should we penalize the peer for this?
|
||||
return fmt.Errorf("failed to compute start time for proof slot %d: %w", proofSlot, err)
|
||||
}
|
||||
|
||||
earliestStart = earliestStart.Add(-1 * params.BeaconConfig().MaximumGossipClockDisparityDuration())
|
||||
// If the system time is still before earliestStart, we consider the proof from a future slot and return an error.
|
||||
if s.cfg.clock.Now().Before(earliestStart) {
|
||||
return fmt.Errorf("slot %d is too far in the future (current slot: %d)", proofSlot, currentSlot)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// proofAboveFinalizedSlot checks whether the execution proof's slot is after the finalized slot.
|
||||
func (s *Service) proofAboveFinalizedSlot(ctx context.Context, executionProof *ethpb.ExecutionProof) error {
|
||||
finalizedCheckpoint, err := s.cfg.beaconDB.FinalizedCheckpoint(ctx)
|
||||
if err != nil {
|
||||
// TODO: Should we penalize the peer for this?
|
||||
return fmt.Errorf("failed to get finalized checkpoint: %w", err)
|
||||
}
|
||||
|
||||
fSlot, err := slots.EpochStart(finalizedCheckpoint.Epoch)
|
||||
if err != nil {
|
||||
// TODO: Should we penalize the peer for this?
|
||||
return fmt.Errorf("failed to compute start slot for finalized epoch %d: %w", finalizedCheckpoint.Epoch, err)
|
||||
}
|
||||
|
||||
if executionProof.Slot <= fSlot {
|
||||
return fmt.Errorf("execution proof slot %d is not after finalized slot %d", executionProof.Slot, fSlot)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// isProofCachedInPool checks if the execution proof is already present in the pool.
|
||||
func (s *Service) isProofCachedInPool(blockRoot [32]byte, proofId primitives.ExecutionProofId) bool {
|
||||
return s.cfg.execProofPool.Exists(blockRoot, proofId)
|
||||
}
|
||||
|
||||
// verifyExecutionProof performs the actual verification of the execution proof.
|
||||
func (s *Service) verifyExecutionProof(_ *ethpb.ExecutionProof) error {
|
||||
// For now, say all proof are valid.
|
||||
return nil
|
||||
}
|
||||
408
beacon-chain/sync/validate_execution_proof_test.go
Normal file
408
beacon-chain/sync/validate_execution_proof_test.go
Normal file
@@ -0,0 +1,408 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
mock "github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/testing"
|
||||
testingdb "github.com/OffchainLabs/prysm/v7/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/OffchainLabs/prysm/v7/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/execproofs"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/encoder"
|
||||
mockp2p "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stategen"
|
||||
mockSync "github.com/OffchainLabs/prysm/v7/beacon-chain/sync/initial-sync/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
pubsubpb "github.com/libp2p/go-libp2p-pubsub/pb"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
func TestValidateExecutionProof(t *testing.T) {
|
||||
beaconDB := testingdb.SetupDB(t)
|
||||
p2pService := mockp2p.NewTestP2P(t)
|
||||
ctx := context.Background()
|
||||
|
||||
fcp := ðpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
}
|
||||
|
||||
require.NoError(t, beaconDB.SaveStateSummary(ctx, ðpb.StateSummary{
|
||||
Root: params.BeaconConfig().ZeroHash[:],
|
||||
Slot: 0,
|
||||
}))
|
||||
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, fcp))
|
||||
|
||||
defaultTopic := p2p.ExecutionProofSubnetTopicFormat + "/" + encoder.ProtocolSuffixSSZSnappy
|
||||
fakeDigest := []byte{0xAB, 0x00, 0xCC, 0x9E}
|
||||
|
||||
chainService := &mock.ChainService{
|
||||
Genesis: time.Now(),
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
FinalizedCheckPoint: fcp,
|
||||
}
|
||||
|
||||
currentSlot := primitives.Slot(100)
|
||||
genesisTime := time.Now().Add(-time.Duration(uint64(currentSlot)*params.BeaconConfig().SecondsPerSlot) * time.Second)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
setupService func() *Service
|
||||
proof *ethpb.ExecutionProof
|
||||
topic *string
|
||||
pid peer.ID
|
||||
want pubsub.ValidationResult
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "Ignore when syncing",
|
||||
setupService: func() *Service {
|
||||
s := &Service{
|
||||
cfg: &config{
|
||||
p2p: p2pService,
|
||||
initialSync: &mockSync.Sync{IsSyncing: true},
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(genesisTime, [32]byte{'A'}),
|
||||
},
|
||||
}
|
||||
s.initCaches()
|
||||
return s
|
||||
},
|
||||
proof: ðpb.ExecutionProof{
|
||||
Slot: currentSlot,
|
||||
ProofId: primitives.ExecutionProofId(1),
|
||||
BlockRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
ProofData: make([]byte, 100),
|
||||
},
|
||||
topic: func() *string {
|
||||
t := fmt.Sprintf(defaultTopic, fakeDigest)
|
||||
return &t
|
||||
}(),
|
||||
pid: "random-peer",
|
||||
want: pubsub.ValidationIgnore,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "Reject nil topic",
|
||||
setupService: func() *Service {
|
||||
s := &Service{
|
||||
cfg: &config{
|
||||
p2p: p2pService,
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(genesisTime, [32]byte{'A'}),
|
||||
},
|
||||
}
|
||||
s.initCaches()
|
||||
return s
|
||||
},
|
||||
proof: ðpb.ExecutionProof{
|
||||
Slot: currentSlot,
|
||||
ProofId: primitives.ExecutionProofId(1),
|
||||
BlockRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
ProofData: make([]byte, 100),
|
||||
},
|
||||
topic: nil,
|
||||
pid: "random-peer",
|
||||
want: pubsub.ValidationReject,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "Reject proof from future slot",
|
||||
setupService: func() *Service {
|
||||
s := &Service{
|
||||
cfg: &config{
|
||||
p2p: p2pService,
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(genesisTime, [32]byte{'A'}),
|
||||
beaconDB: beaconDB,
|
||||
stateGen: stategen.New(beaconDB, doublylinkedtree.New()),
|
||||
execProofPool: execproofs.NewPool(),
|
||||
},
|
||||
}
|
||||
s.initCaches()
|
||||
return s
|
||||
},
|
||||
proof: ðpb.ExecutionProof{
|
||||
Slot: currentSlot + 1000, // Far future slot
|
||||
ProofId: primitives.ExecutionProofId(1),
|
||||
BlockRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
ProofData: make([]byte, 100),
|
||||
},
|
||||
topic: func() *string {
|
||||
t := fmt.Sprintf(defaultTopic, fakeDigest)
|
||||
return &t
|
||||
}(),
|
||||
pid: "random-peer",
|
||||
want: pubsub.ValidationReject,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "Reject proof below finalized slot",
|
||||
setupService: func() *Service {
|
||||
s := &Service{
|
||||
cfg: &config{
|
||||
p2p: p2pService,
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(genesisTime, [32]byte{'A'}),
|
||||
beaconDB: beaconDB,
|
||||
stateGen: stategen.New(beaconDB, doublylinkedtree.New()),
|
||||
execProofPool: execproofs.NewPool(),
|
||||
},
|
||||
}
|
||||
s.initCaches()
|
||||
return s
|
||||
},
|
||||
proof: ðpb.ExecutionProof{
|
||||
Slot: primitives.Slot(5), // Before finalized epoch 1
|
||||
ProofId: 1,
|
||||
BlockRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
ProofData: make([]byte, 100),
|
||||
},
|
||||
topic: func() *string {
|
||||
t := fmt.Sprintf(defaultTopic, fakeDigest)
|
||||
return &t
|
||||
}(),
|
||||
pid: "random-peer",
|
||||
want: pubsub.ValidationReject,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "Ignore already seen proof",
|
||||
setupService: func() *Service {
|
||||
s := &Service{
|
||||
cfg: &config{
|
||||
p2p: p2pService,
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(genesisTime, [32]byte{'A'}),
|
||||
beaconDB: beaconDB,
|
||||
stateGen: stategen.New(beaconDB, doublylinkedtree.New()),
|
||||
execProofPool: execproofs.NewPool(),
|
||||
},
|
||||
}
|
||||
s.initCaches()
|
||||
return s
|
||||
},
|
||||
proof: ðpb.ExecutionProof{
|
||||
Slot: currentSlot,
|
||||
ProofId: primitives.ExecutionProofId(1),
|
||||
BlockRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
ProofData: make([]byte, 100),
|
||||
},
|
||||
topic: func() *string {
|
||||
t := fmt.Sprintf(defaultTopic, fakeDigest)
|
||||
return &t
|
||||
}(),
|
||||
pid: "random-peer",
|
||||
want: pubsub.ValidationIgnore,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "Ignore proof already in pool",
|
||||
setupService: func() *Service {
|
||||
pool := execproofs.NewPool()
|
||||
pool.Insert(ðpb.ExecutionProof{
|
||||
Slot: currentSlot,
|
||||
ProofId: primitives.ExecutionProofId(1),
|
||||
BlockRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
ProofData: make([]byte, 100),
|
||||
})
|
||||
|
||||
s := &Service{
|
||||
cfg: &config{
|
||||
p2p: p2pService,
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(genesisTime, [32]byte{'A'}),
|
||||
beaconDB: beaconDB,
|
||||
stateGen: stategen.New(beaconDB, doublylinkedtree.New()),
|
||||
execProofPool: pool,
|
||||
},
|
||||
}
|
||||
s.initCaches()
|
||||
return s
|
||||
},
|
||||
proof: ðpb.ExecutionProof{
|
||||
Slot: currentSlot,
|
||||
ProofId: primitives.ExecutionProofId(1),
|
||||
BlockRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
ProofData: make([]byte, 100),
|
||||
},
|
||||
topic: func() *string {
|
||||
t := fmt.Sprintf(defaultTopic, fakeDigest)
|
||||
return &t
|
||||
}(),
|
||||
pid: "random-peer",
|
||||
want: pubsub.ValidationIgnore,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "Reject proof if no verifier found",
|
||||
setupService: func() *Service {
|
||||
s := &Service{
|
||||
cfg: &config{
|
||||
p2p: p2pService,
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(genesisTime, [32]byte{'A'}),
|
||||
beaconDB: beaconDB,
|
||||
stateGen: stategen.New(beaconDB, doublylinkedtree.New()),
|
||||
execProofPool: execproofs.NewPool(),
|
||||
},
|
||||
}
|
||||
s.initCaches()
|
||||
return s
|
||||
},
|
||||
proof: ðpb.ExecutionProof{
|
||||
Slot: currentSlot,
|
||||
ProofId: primitives.ExecutionProofId(1),
|
||||
BlockRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
ProofData: make([]byte, 100),
|
||||
},
|
||||
topic: func() *string {
|
||||
t := fmt.Sprintf(defaultTopic, fakeDigest)
|
||||
return &t
|
||||
}(),
|
||||
pid: "random-peer",
|
||||
want: pubsub.ValidationReject,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "Reject proof if verification fails",
|
||||
setupService: func() *Service {
|
||||
s := &Service{
|
||||
cfg: &config{
|
||||
p2p: p2pService,
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(genesisTime, [32]byte{'A'}),
|
||||
beaconDB: beaconDB,
|
||||
stateGen: stategen.New(beaconDB, doublylinkedtree.New()),
|
||||
execProofPool: execproofs.NewPool(),
|
||||
},
|
||||
}
|
||||
s.initCaches()
|
||||
return s
|
||||
},
|
||||
proof: ðpb.ExecutionProof{
|
||||
Slot: currentSlot,
|
||||
ProofId: primitives.ExecutionProofId(1),
|
||||
BlockRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
ProofData: make([]byte, 100),
|
||||
},
|
||||
topic: func() *string {
|
||||
t := fmt.Sprintf(defaultTopic, fakeDigest)
|
||||
return &t
|
||||
}(),
|
||||
pid: "random-peer",
|
||||
want: pubsub.ValidationReject,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "Accept valid proof",
|
||||
setupService: func() *Service {
|
||||
s := &Service{
|
||||
cfg: &config{
|
||||
p2p: p2pService,
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(genesisTime, [32]byte{'A'}),
|
||||
beaconDB: beaconDB,
|
||||
stateGen: stategen.New(beaconDB, doublylinkedtree.New()),
|
||||
execProofPool: execproofs.NewPool(),
|
||||
},
|
||||
}
|
||||
s.initCaches()
|
||||
return s
|
||||
},
|
||||
proof: ðpb.ExecutionProof{
|
||||
Slot: currentSlot,
|
||||
ProofId: primitives.ExecutionProofId(1),
|
||||
BlockRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
ProofData: make([]byte, 100),
|
||||
},
|
||||
topic: func() *string {
|
||||
t := fmt.Sprintf(defaultTopic, fakeDigest)
|
||||
return &t
|
||||
}(),
|
||||
pid: "random-peer",
|
||||
want: pubsub.ValidationAccept,
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s := tt.setupService()
|
||||
|
||||
// Create pubsub message
|
||||
buf := new(bytes.Buffer)
|
||||
_, err := p2pService.Encoding().EncodeGossip(buf, tt.proof)
|
||||
require.NoError(t, err)
|
||||
|
||||
msg := &pubsub.Message{
|
||||
Message: &pubsubpb.Message{
|
||||
Data: buf.Bytes(),
|
||||
Topic: tt.topic,
|
||||
},
|
||||
}
|
||||
|
||||
// Validate
|
||||
result, err := s.validateExecutionProof(ctx, tt.pid, msg)
|
||||
|
||||
if tt.wantErr {
|
||||
assert.NotNil(t, err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
assert.Equal(t, tt.want, result)
|
||||
|
||||
// If validation accepted, check that ValidatorData is set
|
||||
if result == pubsub.ValidationAccept {
|
||||
assert.NotNil(t, msg.ValidatorData)
|
||||
validatedProof, ok := msg.ValidatorData.(*ethpb.ExecutionProof)
|
||||
assert.Equal(t, true, ok)
|
||||
|
||||
// Check that the validated proof matches the original
|
||||
assert.Equal(t, tt.proof.ProofId, validatedProof.ProofId)
|
||||
assert.Equal(t, tt.proof.Slot, validatedProof.Slot)
|
||||
assert.DeepEqual(t, tt.proof.BlockRoot, validatedProof.BlockRoot)
|
||||
assert.DeepEqual(t, tt.proof.BlockHash, validatedProof.BlockHash)
|
||||
assert.DeepEqual(t, tt.proof.ProofData, validatedProof.ProofData)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type alwaysFailVerifier struct{}
|
||||
|
||||
func (v *alwaysFailVerifier) Verify(proof *ethpb.ExecutionProof) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (v *alwaysFailVerifier) GetProofId() primitives.ExecutionProofId {
|
||||
return primitives.ExecutionProofId(1)
|
||||
}
|
||||
@@ -687,6 +687,12 @@ func sbrNotFound(t *testing.T, expectedRoot [32]byte) *mockStateByRooter {
|
||||
}}
|
||||
}
|
||||
|
||||
func sbrReturnsState(st state.BeaconState) *mockStateByRooter {
|
||||
return &mockStateByRooter{sbr: func(_ context.Context, _ [32]byte) (state.BeaconState, error) {
|
||||
return st, nil
|
||||
}}
|
||||
}
|
||||
|
||||
func sbrForValOverride(idx primitives.ValidatorIndex, val *ethpb.Validator) *mockStateByRooter {
|
||||
return sbrForValOverrideWithT(nil, idx, val)
|
||||
}
|
||||
|
||||
@@ -11,12 +11,10 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v7/beacon-chain/forkchoice/types"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/logging"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
@@ -484,88 +482,19 @@ func (dv *RODataColumnsVerifier) SidecarProposerExpected(ctx context.Context) (e
|
||||
|
||||
defer dv.recordResult(RequireSidecarProposerExpected, &err)
|
||||
|
||||
type slotParentRoot struct {
|
||||
slot primitives.Slot
|
||||
parentRoot [fieldparams.RootLength]byte
|
||||
}
|
||||
|
||||
targetRootBySlotParentRoot := make(map[slotParentRoot][fieldparams.RootLength]byte)
|
||||
|
||||
var targetRootFromCache = func(slot primitives.Slot, parentRoot [fieldparams.RootLength]byte) ([fieldparams.RootLength]byte, error) {
|
||||
// Use cached values if available.
|
||||
slotParentRoot := slotParentRoot{slot: slot, parentRoot: parentRoot}
|
||||
if root, ok := targetRootBySlotParentRoot[slotParentRoot]; ok {
|
||||
return root, nil
|
||||
}
|
||||
|
||||
// Compute the epoch of the data column slot.
|
||||
dataColumnEpoch := slots.ToEpoch(slot)
|
||||
if dataColumnEpoch > 0 {
|
||||
dataColumnEpoch = dataColumnEpoch - 1
|
||||
}
|
||||
|
||||
// Compute the target root for the epoch.
|
||||
targetRoot, err := dv.fc.TargetRootForEpoch(parentRoot, dataColumnEpoch)
|
||||
if err != nil {
|
||||
return [fieldparams.RootLength]byte{}, columnErrBuilder(errors.Wrap(err, "target root from epoch"))
|
||||
}
|
||||
|
||||
// Store the target root in the cache.
|
||||
targetRootBySlotParentRoot[slotParentRoot] = targetRoot
|
||||
|
||||
return targetRoot, nil
|
||||
}
|
||||
|
||||
for _, dataColumn := range dv.dataColumns {
|
||||
// Extract the slot of the data column.
|
||||
dataColumnSlot := dataColumn.Slot()
|
||||
|
||||
// Extract the root of the parent block corresponding to the data column.
|
||||
parentRoot := dataColumn.ParentRoot()
|
||||
|
||||
// Compute the target root for the data column.
|
||||
targetRoot, err := targetRootFromCache(dataColumnSlot, parentRoot)
|
||||
// Get the verifying state, it is guaranteed to have the correct proposer in the lookahead.
|
||||
verifyingState, err := dv.getVerifyingState(ctx, dataColumn)
|
||||
if err != nil {
|
||||
return columnErrBuilder(errors.Wrap(err, "target root"))
|
||||
return columnErrBuilder(errors.Wrap(err, "verifying state"))
|
||||
}
|
||||
|
||||
// Compute the epoch of the data column slot.
|
||||
dataColumnEpoch := slots.ToEpoch(dataColumnSlot)
|
||||
if dataColumnEpoch > 0 {
|
||||
dataColumnEpoch = dataColumnEpoch - 1
|
||||
}
|
||||
|
||||
// Create a checkpoint for the target root.
|
||||
checkpoint := &forkchoicetypes.Checkpoint{Root: targetRoot, Epoch: dataColumnEpoch}
|
||||
|
||||
// Try to extract the proposer index from the data column in the cache.
|
||||
idx, cached := dv.pc.Proposer(checkpoint, dataColumnSlot)
|
||||
|
||||
if !cached {
|
||||
parentRoot := dataColumn.ParentRoot()
|
||||
// Ensure the expensive index computation is only performed once for
|
||||
// concurrent requests for the same signature data.
|
||||
idxAny, err, _ := dv.sg.Do(concatRootSlot(parentRoot, dataColumnSlot), func() (any, error) {
|
||||
verifyingState, err := dv.getVerifyingState(ctx, dataColumn)
|
||||
if err != nil {
|
||||
return nil, columnErrBuilder(errors.Wrap(err, "verifying state"))
|
||||
}
|
||||
|
||||
idx, err = helpers.BeaconProposerIndexAtSlot(ctx, verifyingState, dataColumnSlot)
|
||||
if err != nil {
|
||||
return nil, columnErrBuilder(errors.Wrap(err, "compute proposer"))
|
||||
}
|
||||
|
||||
return idx, nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var ok bool
|
||||
if idx, ok = idxAny.(primitives.ValidatorIndex); !ok {
|
||||
return columnErrBuilder(errors.New("type assertion to ValidatorIndex failed"))
|
||||
}
|
||||
// Use proposer lookahead directly
|
||||
idx, err := helpers.BeaconProposerIndexAtSlot(ctx, verifyingState, dataColumnSlot)
|
||||
if err != nil {
|
||||
return columnErrBuilder(errors.Wrap(err, "proposer from lookahead"))
|
||||
}
|
||||
|
||||
if idx != dataColumn.ProposerIndex() {
|
||||
@@ -626,7 +555,3 @@ func inclusionProofKey(c blocks.RODataColumn) ([32]byte, error) {
|
||||
|
||||
return sha256.Sum256(unhashedKey), nil
|
||||
}
|
||||
|
||||
func concatRootSlot(root [fieldparams.RootLength]byte, slot primitives.Slot) string {
|
||||
return string(root[:]) + fmt.Sprintf("%d", slot)
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package verification
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -795,87 +794,90 @@ func TestDataColumnsSidecarProposerExpected(t *testing.T) {
|
||||
blobCount = 1
|
||||
)
|
||||
|
||||
parentRoot := [fieldparams.RootLength]byte{}
|
||||
columns := GenerateTestDataColumns(t, parentRoot, columnSlot, blobCount)
|
||||
firstColumn := columns[0]
|
||||
ctx := t.Context()
|
||||
testCases := []struct {
|
||||
name string
|
||||
stateByRooter StateByRooter
|
||||
proposerCache proposerCache
|
||||
columns []blocks.RODataColumn
|
||||
error string
|
||||
}{
|
||||
{
|
||||
name: "Cached, matches",
|
||||
stateByRooter: nil,
|
||||
proposerCache: &mockProposerCache{
|
||||
ProposerCB: pcReturnsIdx(firstColumn.ProposerIndex()),
|
||||
},
|
||||
columns: columns,
|
||||
},
|
||||
{
|
||||
name: "Cached, does not match",
|
||||
stateByRooter: nil,
|
||||
proposerCache: &mockProposerCache{
|
||||
ProposerCB: pcReturnsIdx(firstColumn.ProposerIndex() + 1),
|
||||
},
|
||||
columns: columns,
|
||||
error: errSidecarUnexpectedProposer.Error(),
|
||||
},
|
||||
{
|
||||
name: "Not cached, state lookup failure",
|
||||
stateByRooter: sbrNotFound(t, firstColumn.ParentRoot()),
|
||||
proposerCache: &mockProposerCache{
|
||||
ProposerCB: pcReturnsNotFound(),
|
||||
},
|
||||
columns: columns,
|
||||
error: "verifying state",
|
||||
},
|
||||
}
|
||||
parentRoot := [fieldparams.RootLength]byte{}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
initializer := Initializer{
|
||||
shared: &sharedResources{
|
||||
sr: tc.stateByRooter,
|
||||
pc: tc.proposerCache,
|
||||
hsp: &mockHeadStateProvider{},
|
||||
fc: &mockForkchoicer{
|
||||
TargetRootForEpochCB: fcReturnsTargetRoot([fieldparams.RootLength]byte{}),
|
||||
},
|
||||
// Create a Fulu state to get the expected proposer from the lookahead.
|
||||
fuluState, _ := util.DeterministicGenesisStateFulu(t, 32)
|
||||
expectedProposer, err := fuluState.ProposerLookahead()
|
||||
require.NoError(t, err)
|
||||
expectedProposerIdx := primitives.ValidatorIndex(expectedProposer[columnSlot])
|
||||
|
||||
// Generate data columns with the expected proposer index.
|
||||
matchingColumns := generateTestDataColumnsWithProposer(t, parentRoot, columnSlot, blobCount, expectedProposerIdx)
|
||||
// Generate data columns with wrong proposer index.
|
||||
wrongColumns := generateTestDataColumnsWithProposer(t, parentRoot, columnSlot, blobCount, expectedProposerIdx+1)
|
||||
|
||||
t.Run("Proposer matches", func(t *testing.T) {
|
||||
initializer := Initializer{
|
||||
shared: &sharedResources{
|
||||
sr: sbrReturnsState(fuluState),
|
||||
hsp: &mockHeadStateProvider{
|
||||
headRoot: parentRoot[:],
|
||||
headSlot: columnSlot, // Same epoch so HeadStateReadOnly is used
|
||||
headStateReadOnly: fuluState,
|
||||
},
|
||||
}
|
||||
fc: &mockForkchoicer{},
|
||||
},
|
||||
}
|
||||
|
||||
verifier := initializer.NewDataColumnsVerifier(tc.columns, GossipDataColumnSidecarRequirements)
|
||||
var wg sync.WaitGroup
|
||||
verifier := initializer.NewDataColumnsVerifier(matchingColumns, GossipDataColumnSidecarRequirements)
|
||||
err := verifier.SidecarProposerExpected(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, verifier.results.executed(RequireSidecarProposerExpected))
|
||||
require.NoError(t, verifier.results.result(RequireSidecarProposerExpected))
|
||||
})
|
||||
|
||||
var err1, err2 error
|
||||
wg.Go(func() {
|
||||
err1 = verifier.SidecarProposerExpected(ctx)
|
||||
})
|
||||
wg.Go(func() {
|
||||
err2 = verifier.SidecarProposerExpected(ctx)
|
||||
})
|
||||
wg.Wait()
|
||||
t.Run("Proposer does not match", func(t *testing.T) {
|
||||
initializer := Initializer{
|
||||
shared: &sharedResources{
|
||||
sr: sbrReturnsState(fuluState),
|
||||
hsp: &mockHeadStateProvider{
|
||||
headRoot: parentRoot[:],
|
||||
headSlot: columnSlot, // Same epoch so HeadStateReadOnly is used
|
||||
headStateReadOnly: fuluState,
|
||||
},
|
||||
fc: &mockForkchoicer{},
|
||||
},
|
||||
}
|
||||
|
||||
require.Equal(t, true, verifier.results.executed(RequireSidecarProposerExpected))
|
||||
verifier := initializer.NewDataColumnsVerifier(wrongColumns, GossipDataColumnSidecarRequirements)
|
||||
err := verifier.SidecarProposerExpected(ctx)
|
||||
require.ErrorContains(t, errSidecarUnexpectedProposer.Error(), err)
|
||||
require.Equal(t, true, verifier.results.executed(RequireSidecarProposerExpected))
|
||||
require.NotNil(t, verifier.results.result(RequireSidecarProposerExpected))
|
||||
})
|
||||
|
||||
if len(tc.error) > 0 {
|
||||
require.ErrorContains(t, tc.error, err1)
|
||||
require.ErrorContains(t, tc.error, err2)
|
||||
require.NotNil(t, verifier.results.result(RequireSidecarProposerExpected))
|
||||
return
|
||||
}
|
||||
t.Run("State lookup failure", func(t *testing.T) {
|
||||
columns := GenerateTestDataColumns(t, parentRoot, columnSlot, blobCount)
|
||||
initializer := Initializer{
|
||||
shared: &sharedResources{
|
||||
sr: sbrNotFound(t, columns[0].ParentRoot()),
|
||||
hsp: &mockHeadStateProvider{},
|
||||
fc: &mockForkchoicer{},
|
||||
},
|
||||
}
|
||||
|
||||
require.NoError(t, err1)
|
||||
require.NoError(t, err2)
|
||||
require.NoError(t, verifier.results.result(RequireSidecarProposerExpected))
|
||||
verifier := initializer.NewDataColumnsVerifier(columns, GossipDataColumnSidecarRequirements)
|
||||
err := verifier.SidecarProposerExpected(ctx)
|
||||
require.ErrorContains(t, "verifying state", err)
|
||||
require.Equal(t, true, verifier.results.executed(RequireSidecarProposerExpected))
|
||||
require.NotNil(t, verifier.results.result(RequireSidecarProposerExpected))
|
||||
})
|
||||
}
|
||||
|
||||
err := verifier.SidecarProposerExpected(ctx)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
func generateTestDataColumnsWithProposer(t *testing.T, parent [fieldparams.RootLength]byte, slot primitives.Slot, blobCount int, proposer primitives.ValidatorIndex) []blocks.RODataColumn {
|
||||
roBlock, roBlobs := util.GenerateTestDenebBlockWithSidecar(t, parent, slot, blobCount, util.WithProposer(proposer))
|
||||
blobs := make([]kzg.Blob, 0, len(roBlobs))
|
||||
for i := range roBlobs {
|
||||
blobs = append(blobs, kzg.Blob(roBlobs[i].Blob))
|
||||
}
|
||||
|
||||
cellsPerBlob, proofsPerBlob := util.GenerateCellsAndProofs(t, blobs)
|
||||
roDataColumnSidecars, err := peerdas.DataColumnSidecars(cellsPerBlob, proofsPerBlob, peerdas.PopulateFromBlock(roBlock))
|
||||
require.NoError(t, err)
|
||||
|
||||
return roDataColumnSidecars
|
||||
}
|
||||
|
||||
func TestColumnRequirementSatisfaction(t *testing.T) {
|
||||
@@ -922,12 +924,3 @@ func TestColumnRequirementSatisfaction(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestConcatRootSlot(t *testing.T) {
|
||||
root := [fieldparams.RootLength]byte{1, 2, 3}
|
||||
const slot = primitives.Slot(3210)
|
||||
|
||||
const expected = "\x01\x02\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x003210"
|
||||
|
||||
actual := concatRootSlot(root, slot)
|
||||
require.Equal(t, expected, actual)
|
||||
}
|
||||
|
||||
2
changelog/kasey_fix-backfill-flag.md
Normal file
2
changelog/kasey_fix-backfill-flag.md
Normal file
@@ -0,0 +1,2 @@
|
||||
#### Fixed
|
||||
- Fix validation logic for `--backfill-oldest-slot`, which was rejecting slots newer than 1056767.
|
||||
3
changelog/manu-cache-warmup.md
Normal file
3
changelog/manu-cache-warmup.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Data column sidecars cache warmup: Process in parallel all sidecars for a given epoch.
|
||||
3
changelog/manu-log.md
Normal file
3
changelog/manu-log.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Summarize DEBUG log corresponding to incoming via gossip data column sidecar.
|
||||
2
changelog/potuz_dcs_pc_removal.md
Normal file
2
changelog/potuz_dcs_pc_removal.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Changed
|
||||
- Use lookahead to validate data column sidecar proposer index.
|
||||
3
changelog/potuz_dont_lock_fcu.md
Normal file
3
changelog/potuz_dont_lock_fcu.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Notify the engine about forkchoice updates in the background.
|
||||
2
changelog/potuz_fcu_ctx.md
Normal file
2
changelog/potuz_fcu_ctx.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Changed
|
||||
- Use a separate context when updating the slot cache.
|
||||
2
changelog/potuz_no_fcu_on_batches.md
Normal file
2
changelog/potuz_no_fcu_on_batches.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Ignored
|
||||
- D not send FCU on block batches.
|
||||
3
changelog/potuz_remove_signature_check.md
Normal file
3
changelog/potuz_remove_signature_check.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Do not check block signature on state transition.
|
||||
3
changelog/potuz_use_head_previous_epoch.md
Normal file
3
changelog/potuz_use_head_previous_epoch.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Added
|
||||
|
||||
- Use the head state to validate attestations for the previous epoch if head is compatible with the target checkpoint.
|
||||
@@ -20,6 +20,7 @@ go_library(
|
||||
"//cmd:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
|
||||
@@ -356,4 +356,12 @@ var (
|
||||
Usage: "A comma-separated list of exponents (of 2) in decreasing order, defining the state diff hierarchy levels. The last exponent must be greater than or equal to 5.",
|
||||
Value: cli.NewIntSlice(21, 18, 16, 13, 11, 9, 5),
|
||||
}
|
||||
// ZKVM Generation Proof Type
|
||||
ZkvmGenerationProofTypeFlag = &cli.IntSliceFlag{
|
||||
Name: "zkvm-generation-proof-types",
|
||||
Usage: `
|
||||
Comma-separated list of proof type IDs to generate
|
||||
(e.g., '0,1' where 0=SP1+Reth, 1=Risc0+Geth).
|
||||
Optional - nodes can verify proofs without generating them.`,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/cmd"
|
||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
@@ -27,6 +28,7 @@ type GlobalFlags struct {
|
||||
DataColumnBatchLimit int
|
||||
DataColumnBatchLimitBurstFactor int
|
||||
StateDiffExponents []int
|
||||
ProofGenerationTypes []primitives.ExecutionProofId
|
||||
}
|
||||
|
||||
var globalConfig *GlobalFlags
|
||||
@@ -84,6 +86,19 @@ func ConfigureGlobalFlags(ctx *cli.Context) error {
|
||||
}
|
||||
}
|
||||
|
||||
// zkVM Proof Generation Types
|
||||
proofTypes := make([]primitives.ExecutionProofId, 0, len(ctx.IntSlice(ZkvmGenerationProofTypeFlag.Name)))
|
||||
for _, t := range ctx.IntSlice(ZkvmGenerationProofTypeFlag.Name) {
|
||||
proofTypes = append(proofTypes, primitives.ExecutionProofId(t))
|
||||
}
|
||||
cfg.ProofGenerationTypes = proofTypes
|
||||
|
||||
if features.Get().EnableZkvm {
|
||||
if err := validateZkvmProofGenerationTypes(cfg.ProofGenerationTypes); err != nil {
|
||||
return fmt.Errorf("validate Zkvm proof generation types: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
cfg.BlockBatchLimit = ctx.Int(BlockBatchLimit.Name)
|
||||
cfg.BlockBatchLimitBurstFactor = ctx.Int(BlockBatchLimitBurstFactor.Name)
|
||||
cfg.BlobBatchLimit = ctx.Int(BlobBatchLimit.Name)
|
||||
@@ -135,3 +150,13 @@ func validateStateDiffExponents(exponents []int) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateZkvmProofGenerationTypes validates the provided proof IDs.
|
||||
func validateZkvmProofGenerationTypes(types []primitives.ExecutionProofId) error {
|
||||
for _, t := range types {
|
||||
if t >= primitives.EXECUTION_PROOF_TYPE_COUNT {
|
||||
return fmt.Errorf("invalid zkvm proof generation type: %d; valid types are between 0 and %d", t, primitives.EXECUTION_PROOF_TYPE_COUNT-1)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -156,6 +156,7 @@ var appFlags = []cli.Flag{
|
||||
dasFlags.BackfillOldestSlot,
|
||||
dasFlags.BlobRetentionEpochFlag,
|
||||
flags.BatchVerifierLimit,
|
||||
flags.ZkvmGenerationProofTypeFlag,
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
@@ -231,6 +231,12 @@ var appHelpFlagGroups = []flagGroup{
|
||||
flags.SetGCPercent,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "zkvm",
|
||||
Flags: []cli.Flag{
|
||||
flags.ZkvmGenerationProofTypeFlag,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
@@ -52,6 +52,7 @@ type Flags struct {
|
||||
DisableDutiesV2 bool // DisableDutiesV2 sets validator client to use the get Duties endpoint
|
||||
EnableWeb bool // EnableWeb enables the webui on the validator client
|
||||
EnableStateDiff bool // EnableStateDiff enables the experimental state diff feature for the beacon node.
|
||||
EnableZkvm bool // EnableZkvm enables zkVM related features.
|
||||
|
||||
// Logging related toggles.
|
||||
DisableGRPCConnectionLogs bool // Disables logging when a new grpc client has connected.
|
||||
@@ -298,6 +299,11 @@ func ConfigureBeaconChain(ctx *cli.Context) error {
|
||||
}
|
||||
}
|
||||
|
||||
if ctx.IsSet(EnableZkvmFlag.Name) {
|
||||
logEnabled(EnableZkvmFlag)
|
||||
cfg.EnableZkvm = true
|
||||
}
|
||||
|
||||
cfg.AggregateIntervals = [3]time.Duration{aggregateFirstInterval.Value, aggregateSecondInterval.Value, aggregateThirdInterval.Value}
|
||||
Init(cfg)
|
||||
return nil
|
||||
|
||||
@@ -211,6 +211,17 @@ var (
|
||||
Name: "ignore-unviable-attestations",
|
||||
Usage: "Ignores attestations whose target state is not viable with respect to the current head (avoid expensive state replay from lagging attesters).",
|
||||
}
|
||||
// Activate ZKVM execution proof mode
|
||||
EnableZkvmFlag = &cli.BoolFlag{
|
||||
Name: "activate-zkvm",
|
||||
Usage: `
|
||||
Activates ZKVM execution proof mode. Enables the node to subscribe to the
|
||||
execution_proof gossip topic, receive and verify execution proofs from peers,
|
||||
and advertise zkVM support in its ENR for peer discovery.
|
||||
Use --zkvm-generation-proof-types to specify which proof types this node
|
||||
should generate (optional - nodes can verify without generating).
|
||||
`,
|
||||
}
|
||||
)
|
||||
|
||||
// devModeFlags holds list of flags that are set when development mode is on.
|
||||
@@ -272,6 +283,7 @@ var BeaconChainFlags = combinedFlags([]cli.Flag{
|
||||
enableExperimentalAttestationPool,
|
||||
forceHeadFlag,
|
||||
blacklistRoots,
|
||||
EnableZkvmFlag,
|
||||
}, deprecatedBeaconFlags, deprecatedFlags, upcomingDeprecation)
|
||||
|
||||
func combinedFlags(flags ...[]cli.Flag) []cli.Flag {
|
||||
|
||||
@@ -310,6 +310,11 @@ type BeaconChainConfig struct {
|
||||
// Blobs Values
|
||||
BlobSchedule []BlobScheduleEntry `yaml:"BLOB_SCHEDULE" spec:"true"`
|
||||
|
||||
// EIP-8025: Optional Execution Proofs
|
||||
MaxProofDataBytes uint64 `yaml:"MAX_PROOF_DATA_BYTES" spec:"true"` // MaxProofDataBytes is the maximum number of bytes for execution proof data.
|
||||
MinProofsRequired uint64 `yaml:"MIN_PROOFS_REQUIRED" spec:"true"` // MinProofsRequired is the minimum number of execution proofs required for a block to be considered valid.
|
||||
MinEpochsForExecutionProofRequests uint64 `yaml:"MIN_EPOCHS_FOR_EXECUTION_PROOF_REQUESTS" spec:"true"` // MinEpochsForExecutionProofRequests is the minimum number of epochs the node will keep the execution proofs for.
|
||||
|
||||
// Deprecated_MaxBlobsPerBlock defines the max blobs that could exist in a block.
|
||||
// Deprecated: This field is no longer supported. Avoid using it.
|
||||
DeprecatedMaxBlobsPerBlock int `yaml:"MAX_BLOBS_PER_BLOCK" spec:"true"`
|
||||
@@ -732,6 +737,20 @@ func WithinDAPeriod(block, current primitives.Epoch) bool {
|
||||
return block+BeaconConfig().MinEpochsForBlobsSidecarsRequest >= current
|
||||
}
|
||||
|
||||
// WithinExecutionProofPeriod checks if the given epoch is within the execution proof retention period.
|
||||
// This is used to determine whether execution proofs should be requested or generated for blocks at the given epoch.
|
||||
// Returns true if the epoch is at or after the retention boundary (Fulu fork epoch or proof retention epoch).
|
||||
func WithinExecutionProofPeriod(epoch, current primitives.Epoch) bool {
|
||||
proofRetentionEpoch := primitives.Epoch(0)
|
||||
if current >= primitives.Epoch(BeaconConfig().MinEpochsForExecutionProofRequests) {
|
||||
proofRetentionEpoch = current - primitives.Epoch(BeaconConfig().MinEpochsForExecutionProofRequests)
|
||||
}
|
||||
|
||||
boundaryEpoch := primitives.MaxEpoch(BeaconConfig().FuluForkEpoch, proofRetentionEpoch)
|
||||
|
||||
return epoch >= boundaryEpoch
|
||||
}
|
||||
|
||||
// EpochsDuration returns the time duration of the given number of epochs.
|
||||
func EpochsDuration(count primitives.Epoch, b *BeaconChainConfig) time.Duration {
|
||||
return SlotsDuration(SlotsForEpochs(count, b), b)
|
||||
|
||||
@@ -38,6 +38,7 @@ var mainnetNetworkConfig = &NetworkConfig{
|
||||
AttSubnetKey: "attnets",
|
||||
SyncCommsSubnetKey: "syncnets",
|
||||
CustodyGroupCountKey: "cgc",
|
||||
ZkvmEnabledKey: "zkvm",
|
||||
MinimumPeersInSubnetSearch: 20,
|
||||
ContractDeploymentBlock: 11184524, // Note: contract was deployed in block 11052984 but no transactions were sent until 11184524.
|
||||
BootstrapNodes: []string{
|
||||
@@ -355,6 +356,11 @@ var mainnetBeaconConfig = &BeaconChainConfig{
|
||||
MaxBlobsPerBlock: 21,
|
||||
},
|
||||
},
|
||||
|
||||
// EIP-8025: Optional Execution Proofs
|
||||
MaxProofDataBytes: 1_048_576, // 1 MiB
|
||||
MinProofsRequired: 2,
|
||||
MinEpochsForExecutionProofRequests: 2,
|
||||
}
|
||||
|
||||
// MainnetTestConfig provides a version of the mainnet config that has a different name
|
||||
|
||||
@@ -11,6 +11,7 @@ type NetworkConfig struct {
|
||||
AttSubnetKey string // AttSubnetKey is the ENR key of the subnet bitfield.
|
||||
SyncCommsSubnetKey string // SyncCommsSubnetKey is the ENR key of the sync committee subnet bitfield.
|
||||
CustodyGroupCountKey string // CustodyGroupsCountKey is the ENR key of the custody group count.
|
||||
ZkvmEnabledKey string // ZkvmEnabledKey is the ENR key of whether zkVM mode is enabled or not.
|
||||
MinimumPeersInSubnetSearch uint64 // PeersInSubnetSearch is the required amount of peers that we need to be able to lookup in a subnet search.
|
||||
|
||||
// Chain Network Config
|
||||
|
||||
@@ -11,6 +11,7 @@ go_library(
|
||||
"domain.go",
|
||||
"epoch.go",
|
||||
"execution_address.go",
|
||||
"execution_proof_id.go",
|
||||
"kzg.go",
|
||||
"payload_id.go",
|
||||
"slot.go",
|
||||
@@ -36,6 +37,7 @@ go_test(
|
||||
"committee_index_test.go",
|
||||
"domain_test.go",
|
||||
"epoch_test.go",
|
||||
"execution_proof_id_test.go",
|
||||
"slot_test.go",
|
||||
"sszbytes_test.go",
|
||||
"sszuint64_test.go",
|
||||
|
||||
64
consensus-types/primitives/execution_proof_id.go
Normal file
64
consensus-types/primitives/execution_proof_id.go
Normal file
@@ -0,0 +1,64 @@
|
||||
package primitives
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
fssz "github.com/prysmaticlabs/fastssz"
|
||||
)
|
||||
|
||||
var _ fssz.HashRoot = (ExecutionProofId)(0)
|
||||
var _ fssz.Marshaler = (*ExecutionProofId)(nil)
|
||||
var _ fssz.Unmarshaler = (*ExecutionProofId)(nil)
|
||||
|
||||
// Number of execution proofs
|
||||
// Each proof represents a different zkVM+EL combination
|
||||
//
|
||||
// TODO(zkproofs): The number 8 is a parameter that we will want to configure in the future
|
||||
const EXECUTION_PROOF_TYPE_COUNT = 8
|
||||
|
||||
// ExecutionProofId identifies which zkVM/proof system a proof belongs to.
|
||||
type ExecutionProofId uint8
|
||||
|
||||
func (id *ExecutionProofId) IsValid() bool {
|
||||
return uint8(*id) < EXECUTION_PROOF_TYPE_COUNT
|
||||
}
|
||||
|
||||
// HashTreeRoot --
|
||||
func (id ExecutionProofId) HashTreeRoot() ([32]byte, error) {
|
||||
return fssz.HashWithDefaultHasher(id)
|
||||
}
|
||||
|
||||
// HashTreeRootWith --
|
||||
func (id ExecutionProofId) HashTreeRootWith(hh *fssz.Hasher) error {
|
||||
hh.PutUint8(uint8(id))
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnmarshalSSZ --
|
||||
func (id *ExecutionProofId) UnmarshalSSZ(buf []byte) error {
|
||||
if len(buf) != id.SizeSSZ() {
|
||||
return fmt.Errorf("expected buffer of length %d received %d", id.SizeSSZ(), len(buf))
|
||||
}
|
||||
*id = ExecutionProofId(fssz.UnmarshallUint8(buf))
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalSSZTo --
|
||||
func (id *ExecutionProofId) MarshalSSZTo(buf []byte) ([]byte, error) {
|
||||
marshalled, err := id.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return append(buf, marshalled...), nil
|
||||
}
|
||||
|
||||
// MarshalSSZ --
|
||||
func (id *ExecutionProofId) MarshalSSZ() ([]byte, error) {
|
||||
marshalled := fssz.MarshalUint8([]byte{}, uint8(*id))
|
||||
return marshalled, nil
|
||||
}
|
||||
|
||||
// SizeSSZ --
|
||||
func (id *ExecutionProofId) SizeSSZ() int {
|
||||
return 1
|
||||
}
|
||||
73
consensus-types/primitives/execution_proof_id_test.go
Normal file
73
consensus-types/primitives/execution_proof_id_test.go
Normal file
@@ -0,0 +1,73 @@
|
||||
package primitives_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
)
|
||||
|
||||
func TestExecutionProofId_IsValid(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
id primitives.ExecutionProofId
|
||||
valid bool
|
||||
}{
|
||||
{
|
||||
name: "valid proof id 0",
|
||||
id: 0,
|
||||
valid: true,
|
||||
},
|
||||
{
|
||||
name: "valid proof id 1",
|
||||
id: 1,
|
||||
valid: true,
|
||||
},
|
||||
{
|
||||
name: "valid proof id 7 (max valid)",
|
||||
id: 7,
|
||||
valid: true,
|
||||
},
|
||||
{
|
||||
name: "invalid proof id 8 (at limit)",
|
||||
id: 8,
|
||||
valid: false,
|
||||
},
|
||||
{
|
||||
name: "invalid proof id 255",
|
||||
id: 255,
|
||||
valid: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := tt.id.IsValid(); got != tt.valid {
|
||||
t.Errorf("ExecutionProofId.IsValid() = %v, want %v", got, tt.valid)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestExecutionProofId_Casting(t *testing.T) {
|
||||
id := primitives.ExecutionProofId(5)
|
||||
|
||||
t.Run("uint8", func(t *testing.T) {
|
||||
if uint8(id) != 5 {
|
||||
t.Errorf("Casting to uint8 failed: got %v, want 5", uint8(id))
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("from uint8", func(t *testing.T) {
|
||||
var x uint8 = 7
|
||||
if primitives.ExecutionProofId(x) != 7 {
|
||||
t.Errorf("Casting from uint8 failed: got %v, want 7", primitives.ExecutionProofId(x))
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("int", func(t *testing.T) {
|
||||
var x = 3
|
||||
if primitives.ExecutionProofId(x) != 3 {
|
||||
t.Errorf("Casting from int failed: got %v, want 3", primitives.ExecutionProofId(x))
|
||||
}
|
||||
})
|
||||
}
|
||||
72
kurtosis/README.md
Normal file
72
kurtosis/README.md
Normal file
@@ -0,0 +1,72 @@
|
||||
# Kurtosis scripts for EIP-8025
|
||||
|
||||
## How to run
|
||||
|
||||
I slightly modified [Manu's tip](https://hackmd.io/8z4thpsyQJioaU6jj0Wazw) by adding those in my `~/.zshrc`.
|
||||
|
||||
```zsh
|
||||
# Kurtosis Aliases
|
||||
blog() {
|
||||
docker logs -f "$(docker ps | grep cl-"$1"-prysm-geth | awk '{print $NF}')" 2>&1
|
||||
}
|
||||
|
||||
vlog() {
|
||||
docker logs -f "$(docker ps | grep vc-"$1"-geth-prysm | awk '{print $NF}')" 2>&1
|
||||
}
|
||||
|
||||
dora() {
|
||||
open http://localhost:$(docker ps --format '{{.Ports}} {{.Names}}' | awk '/dora/ {split($1, a, "->"); split(a[1], b, ":"); print b[2]}')
|
||||
}
|
||||
|
||||
graf() {
|
||||
open http://localhost:$(docker ps --format '{{.Ports}} {{.Names}}' | awk '/grafana/ {split($1, a, "->"); split(a[1], b, ":"); print b[2]}')
|
||||
}
|
||||
|
||||
devnet () {
|
||||
local args_file_path="./kurtosis/default.yaml"
|
||||
if [ ! -z "$1" ]; then
|
||||
args_file_path="$1"
|
||||
echo "Using custom args-file path: $args_file_path"
|
||||
else
|
||||
echo "Using default args-file path: $args_file_path"
|
||||
fi
|
||||
|
||||
kurtosis clean -a &&
|
||||
bazel build //cmd/beacon-chain:oci_image_tarball --platforms=@io_bazel_rules_go//go/toolchain:linux_arm64_cgo --config=release &&
|
||||
docker load -i bazel-bin/cmd/beacon-chain/oci_image_tarball/tarball.tar &&
|
||||
docker tag gcr.io/offchainlabs/prysm/beacon-chain prysm-bn-custom-image &&
|
||||
bazel build //cmd/validator:oci_image_tarball --platforms=@io_bazel_rules_go//go/toolchain:linux_arm64_cgo --config=release &&
|
||||
docker load -i bazel-bin/cmd/validator/oci_image_tarball/tarball.tar &&
|
||||
docker tag gcr.io/offchainlabs/prysm/validator prysm-vc-custom-image &&
|
||||
kurtosis run github.com/ethpandaops/ethereum-package --args-file="$args_file_path" --verbosity brief &&
|
||||
dora
|
||||
}
|
||||
|
||||
stop() {
|
||||
kurtosis clean -a
|
||||
}
|
||||
|
||||
dps() {
|
||||
docker ps --format "table {{.ID}}\\t{{.Image}}\\t{{.Status}}\\t{{.Names}}" -a
|
||||
}
|
||||
```
|
||||
|
||||
At the project directory, you can simply spin up a devnet with:
|
||||
|
||||
```bash
|
||||
$ devnet
|
||||
```
|
||||
|
||||
Or you can specify the network parameter YAML file like:
|
||||
|
||||
```bash
|
||||
$ devnet ./kurtosis/proof_verify.yaml
|
||||
```
|
||||
|
||||
### Running scripts with local images
|
||||
|
||||
Images from Prysm can be automatically loaded from `devnet` command, but if you want to run a script with `lighthouse`:
|
||||
|
||||
#### `./kurtosis/interop.yaml`
|
||||
|
||||
- `lighthouse:local`: Please build your own image following [Lighthouse's guide](https://lighthouse-book.sigmaprime.io/installation_docker.html?highlight=docker#building-the-docker-image) on [`kevaundray/kw/sel-alternative`](https://github.com/kevaundray/lighthouse/tree/kw/sel-alternative/) branch.
|
||||
16
kurtosis/default.yaml
Normal file
16
kurtosis/default.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
participants:
|
||||
- el_type: geth
|
||||
cl_type: prysm
|
||||
cl_image: prysm-bn-custom-image
|
||||
cl_extra_params:
|
||||
- --activate-zkvm
|
||||
- --zkvm-generation-proof-types=0,1
|
||||
vc_image: prysm-vc-custom-image
|
||||
count: 4
|
||||
network_params:
|
||||
seconds_per_slot: 2
|
||||
global_log_level: debug
|
||||
snooper_enabled: false
|
||||
additional_services:
|
||||
- dora
|
||||
- prometheus_grafana
|
||||
38
kurtosis/interop.yaml
Normal file
38
kurtosis/interop.yaml
Normal file
@@ -0,0 +1,38 @@
|
||||
# 3 nodes (2 from Prysm, 1 from Lighthouse) generate proofs and
|
||||
# 1 node only verifies
|
||||
participants:
|
||||
# Prysm: Proof generating nodes (nodes 1-2)
|
||||
- el_type: geth
|
||||
el_image: ethereum/client-go:latest
|
||||
cl_type: prysm
|
||||
cl_image: prysm-bn-custom-image
|
||||
cl_extra_params:
|
||||
- --activate-zkvm
|
||||
- --zkvm-generation-proof-types=0,1
|
||||
vc_image: prysm-vc-custom-image
|
||||
count: 2
|
||||
# Lighthouse: Proof generating nodes (node 3)
|
||||
- el_type: geth
|
||||
el_image: ethereum/client-go:latest
|
||||
cl_type: lighthouse
|
||||
cl_image: lighthouse:local
|
||||
cl_extra_params:
|
||||
- --activate-zkvm
|
||||
- --zkvm-generation-proof-types=0,1
|
||||
- --target-peers=3
|
||||
count: 1
|
||||
# Prysm: Proof verifying only node (node 4)
|
||||
- el_type: dummy
|
||||
cl_type: prysm
|
||||
cl_image: prysm-bn-custom-image
|
||||
cl_extra_params:
|
||||
- --activate-zkvm
|
||||
vc_image: prysm-vc-custom-image
|
||||
count: 1
|
||||
network_params:
|
||||
seconds_per_slot: 2
|
||||
global_log_level: debug
|
||||
snooper_enabled: false
|
||||
additional_services:
|
||||
- dora
|
||||
- prometheus_grafana
|
||||
27
kurtosis/proof_verify.yaml
Normal file
27
kurtosis/proof_verify.yaml
Normal file
@@ -0,0 +1,27 @@
|
||||
# 3 nodes generate proofs, 1 node only verifies
|
||||
participants:
|
||||
# Proof generating nodes (nodes 1-3)
|
||||
- el_type: geth
|
||||
el_image: ethereum/client-go:latest
|
||||
cl_type: prysm
|
||||
cl_image: prysm-bn-custom-image
|
||||
cl_extra_params:
|
||||
- --activate-zkvm
|
||||
- --zkvm-generation-proof-types=0,1
|
||||
vc_image: prysm-vc-custom-image
|
||||
count: 3
|
||||
# Proof verifying only node (node 4)
|
||||
- el_type: dummy
|
||||
cl_type: prysm
|
||||
cl_image: prysm-bn-custom-image
|
||||
cl_extra_params:
|
||||
- --activate-zkvm
|
||||
vc_image: prysm-vc-custom-image
|
||||
count: 1
|
||||
network_params:
|
||||
seconds_per_slot: 2
|
||||
global_log_level: debug
|
||||
snooper_enabled: false
|
||||
additional_services:
|
||||
- dora
|
||||
- prometheus_grafana
|
||||
@@ -371,6 +371,11 @@ go_library(
|
||||
"beacon_block.go",
|
||||
"cloners.go",
|
||||
"eip_7521.go",
|
||||
"execution_proof.go",
|
||||
# NOTE: ExecutionProof includes an alias type of uint8,
|
||||
# which is not supported by fastssz sszgen.
|
||||
# Temporarily managed manually.
|
||||
"execution_proof.ssz.go",
|
||||
"gloas.go",
|
||||
"log.go",
|
||||
"sync_committee_mainnet.go",
|
||||
@@ -427,6 +432,7 @@ ssz_proto_files(
|
||||
"beacon_state.proto",
|
||||
"blobs.proto",
|
||||
"data_columns.proto",
|
||||
"execution_proof.proto",
|
||||
"gloas.proto",
|
||||
"light_client.proto",
|
||||
"sync_committee.proto",
|
||||
|
||||
18
proto/prysm/v1alpha1/execution_proof.go
Normal file
18
proto/prysm/v1alpha1/execution_proof.go
Normal file
@@ -0,0 +1,18 @@
|
||||
package eth
|
||||
|
||||
import "github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
|
||||
// Copy --
|
||||
func (e *ExecutionProof) Copy() *ExecutionProof {
|
||||
if e == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &ExecutionProof{
|
||||
ProofId: e.ProofId,
|
||||
Slot: e.Slot,
|
||||
BlockHash: bytesutil.SafeCopyBytes(e.BlockHash),
|
||||
BlockRoot: bytesutil.SafeCopyBytes(e.BlockRoot),
|
||||
ProofData: bytesutil.SafeCopyBytes(e.ProofData),
|
||||
}
|
||||
}
|
||||
268
proto/prysm/v1alpha1/execution_proof.pb.go
generated
Executable file
268
proto/prysm/v1alpha1/execution_proof.pb.go
generated
Executable file
@@ -0,0 +1,268 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.36.3
|
||||
// protoc v3.21.7
|
||||
// source: proto/prysm/v1alpha1/execution_proof.proto
|
||||
|
||||
package eth
|
||||
|
||||
import (
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
|
||||
github_com_OffchainLabs_prysm_v7_consensus_types_primitives "github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
_ "github.com/OffchainLabs/prysm/v7/proto/eth/ext"
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
type ExecutionProof struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
ProofId github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ExecutionProofId `protobuf:"varint,1,opt,name=proof_id,json=proofId,proto3" json:"proof_id,omitempty" cast-type:"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.ExecutionProofId"`
|
||||
Slot github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Slot `protobuf:"varint,2,opt,name=slot,proto3" json:"slot,omitempty" cast-type:"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.Slot"`
|
||||
BlockHash []byte `protobuf:"bytes,3,opt,name=block_hash,json=blockHash,proto3" json:"block_hash,omitempty" ssz-size:"32"`
|
||||
BlockRoot []byte `protobuf:"bytes,4,opt,name=block_root,json=blockRoot,proto3" json:"block_root,omitempty" ssz-size:"32"`
|
||||
ProofData []byte `protobuf:"bytes,5,opt,name=proof_data,json=proofData,proto3" json:"proof_data,omitempty" ssz-max:"1048576"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *ExecutionProof) Reset() {
|
||||
*x = ExecutionProof{}
|
||||
mi := &file_proto_prysm_v1alpha1_execution_proof_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *ExecutionProof) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*ExecutionProof) ProtoMessage() {}
|
||||
|
||||
func (x *ExecutionProof) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_proto_prysm_v1alpha1_execution_proof_proto_msgTypes[0]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use ExecutionProof.ProtoReflect.Descriptor instead.
|
||||
func (*ExecutionProof) Descriptor() ([]byte, []int) {
|
||||
return file_proto_prysm_v1alpha1_execution_proof_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *ExecutionProof) GetProofId() github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ExecutionProofId {
|
||||
if x != nil {
|
||||
return x.ProofId
|
||||
}
|
||||
return github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ExecutionProofId(0)
|
||||
}
|
||||
|
||||
func (x *ExecutionProof) GetSlot() github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Slot {
|
||||
if x != nil {
|
||||
return x.Slot
|
||||
}
|
||||
return github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Slot(0)
|
||||
}
|
||||
|
||||
func (x *ExecutionProof) GetBlockHash() []byte {
|
||||
if x != nil {
|
||||
return x.BlockHash
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *ExecutionProof) GetBlockRoot() []byte {
|
||||
if x != nil {
|
||||
return x.BlockRoot
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *ExecutionProof) GetProofData() []byte {
|
||||
if x != nil {
|
||||
return x.ProofData
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type ExecutionProofsByRootRequest struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
BlockRoot []byte `protobuf:"bytes,1,opt,name=block_root,json=blockRoot,proto3" json:"block_root,omitempty" ssz-size:"32"`
|
||||
CountNeeded uint64 `protobuf:"varint,2,opt,name=count_needed,json=countNeeded,proto3" json:"count_needed,omitempty"`
|
||||
AlreadyHave []github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ExecutionProofId `protobuf:"varint,3,rep,packed,name=already_have,json=alreadyHave,proto3" json:"already_have,omitempty" cast-type:"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.ExecutionProofId" ssz-max:"8"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *ExecutionProofsByRootRequest) Reset() {
|
||||
*x = ExecutionProofsByRootRequest{}
|
||||
mi := &file_proto_prysm_v1alpha1_execution_proof_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *ExecutionProofsByRootRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*ExecutionProofsByRootRequest) ProtoMessage() {}
|
||||
|
||||
func (x *ExecutionProofsByRootRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_proto_prysm_v1alpha1_execution_proof_proto_msgTypes[1]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use ExecutionProofsByRootRequest.ProtoReflect.Descriptor instead.
|
||||
func (*ExecutionProofsByRootRequest) Descriptor() ([]byte, []int) {
|
||||
return file_proto_prysm_v1alpha1_execution_proof_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *ExecutionProofsByRootRequest) GetBlockRoot() []byte {
|
||||
if x != nil {
|
||||
return x.BlockRoot
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *ExecutionProofsByRootRequest) GetCountNeeded() uint64 {
|
||||
if x != nil {
|
||||
return x.CountNeeded
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *ExecutionProofsByRootRequest) GetAlreadyHave() []github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ExecutionProofId {
|
||||
if x != nil {
|
||||
return x.AlreadyHave
|
||||
}
|
||||
return []github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ExecutionProofId(nil)
|
||||
}
|
||||
|
||||
var File_proto_prysm_v1alpha1_execution_proof_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_proto_prysm_v1alpha1_execution_proof_proto_rawDesc = []byte{
|
||||
0x0a, 0x2a, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x31,
|
||||
0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e,
|
||||
0x5f, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x65, 0x74,
|
||||
0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70,
|
||||
0x68, 0x61, 0x31, 0x1a, 0x1b, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x65, 0x74, 0x68, 0x2f, 0x65,
|
||||
0x78, 0x74, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x22, 0xd1, 0x02, 0x0a, 0x0e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72,
|
||||
0x6f, 0x6f, 0x66, 0x12, 0x6b, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x5f, 0x69, 0x64, 0x18,
|
||||
0x01, 0x20, 0x01, 0x28, 0x04, 0x42, 0x50, 0x82, 0xb5, 0x18, 0x4c, 0x67, 0x69, 0x74, 0x68, 0x75,
|
||||
0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61,
|
||||
0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x37, 0x2f, 0x63, 0x6f, 0x6e, 0x73,
|
||||
0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x72, 0x69, 0x6d,
|
||||
0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e,
|
||||
0x50, 0x72, 0x6f, 0x6f, 0x66, 0x49, 0x64, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x49, 0x64,
|
||||
0x12, 0x58, 0x0a, 0x04, 0x73, 0x6c, 0x6f, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x42, 0x44,
|
||||
0x82, 0xb5, 0x18, 0x40, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f,
|
||||
0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73,
|
||||
0x6d, 0x2f, 0x76, 0x37, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x74,
|
||||
0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2e,
|
||||
0x53, 0x6c, 0x6f, 0x74, 0x52, 0x04, 0x73, 0x6c, 0x6f, 0x74, 0x12, 0x25, 0x0a, 0x0a, 0x62, 0x6c,
|
||||
0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06,
|
||||
0x8a, 0xb5, 0x18, 0x02, 0x33, 0x32, 0x52, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73,
|
||||
0x68, 0x12, 0x25, 0x0a, 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18,
|
||||
0x04, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x33, 0x32, 0x52, 0x09, 0x62,
|
||||
0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x2a, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x6f,
|
||||
0x66, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x0b, 0x92, 0xb5,
|
||||
0x18, 0x07, 0x31, 0x30, 0x34, 0x38, 0x35, 0x37, 0x36, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x6f, 0x66,
|
||||
0x44, 0x61, 0x74, 0x61, 0x22, 0xe2, 0x01, 0x0a, 0x1c, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69,
|
||||
0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x73, 0x42, 0x79, 0x52, 0x6f, 0x6f, 0x74, 0x52, 0x65,
|
||||
0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x72,
|
||||
0x6f, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x33,
|
||||
0x32, 0x52, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x21, 0x0a, 0x0c,
|
||||
0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x6e, 0x65, 0x65, 0x64, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01,
|
||||
0x28, 0x04, 0x52, 0x0b, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x4e, 0x65, 0x65, 0x64, 0x65, 0x64, 0x12,
|
||||
0x78, 0x0a, 0x0c, 0x61, 0x6c, 0x72, 0x65, 0x61, 0x64, 0x79, 0x5f, 0x68, 0x61, 0x76, 0x65, 0x18,
|
||||
0x03, 0x20, 0x03, 0x28, 0x04, 0x42, 0x55, 0x82, 0xb5, 0x18, 0x4c, 0x67, 0x69, 0x74, 0x68, 0x75,
|
||||
0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61,
|
||||
0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x37, 0x2f, 0x63, 0x6f, 0x6e, 0x73,
|
||||
0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x72, 0x69, 0x6d,
|
||||
0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e,
|
||||
0x50, 0x72, 0x6f, 0x6f, 0x66, 0x49, 0x64, 0x92, 0xb5, 0x18, 0x01, 0x38, 0x52, 0x0b, 0x61, 0x6c,
|
||||
0x72, 0x65, 0x61, 0x64, 0x79, 0x48, 0x61, 0x76, 0x65, 0x42, 0x9d, 0x01, 0x0a, 0x19, 0x6f, 0x72,
|
||||
0x67, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76,
|
||||
0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x13, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69,
|
||||
0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x39,
|
||||
0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68,
|
||||
0x61, 0x69, 0x6e, 0x4c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x37,
|
||||
0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x31, 0x61,
|
||||
0x6c, 0x70, 0x68, 0x61, 0x31, 0x3b, 0x65, 0x74, 0x68, 0xaa, 0x02, 0x15, 0x45, 0x74, 0x68, 0x65,
|
||||
0x72, 0x65, 0x75, 0x6d, 0x2e, 0x45, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61,
|
||||
0x31, 0xca, 0x02, 0x15, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x5c, 0x45, 0x74, 0x68,
|
||||
0x5c, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_proto_prysm_v1alpha1_execution_proof_proto_rawDescOnce sync.Once
|
||||
file_proto_prysm_v1alpha1_execution_proof_proto_rawDescData = file_proto_prysm_v1alpha1_execution_proof_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_proto_prysm_v1alpha1_execution_proof_proto_rawDescGZIP() []byte {
|
||||
file_proto_prysm_v1alpha1_execution_proof_proto_rawDescOnce.Do(func() {
|
||||
file_proto_prysm_v1alpha1_execution_proof_proto_rawDescData = protoimpl.X.CompressGZIP(file_proto_prysm_v1alpha1_execution_proof_proto_rawDescData)
|
||||
})
|
||||
return file_proto_prysm_v1alpha1_execution_proof_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_proto_prysm_v1alpha1_execution_proof_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
|
||||
var file_proto_prysm_v1alpha1_execution_proof_proto_goTypes = []any{
|
||||
(*ExecutionProof)(nil), // 0: ethereum.eth.v1alpha1.ExecutionProof
|
||||
(*ExecutionProofsByRootRequest)(nil), // 1: ethereum.eth.v1alpha1.ExecutionProofsByRootRequest
|
||||
}
|
||||
var file_proto_prysm_v1alpha1_execution_proof_proto_depIdxs = []int32{
|
||||
0, // [0:0] is the sub-list for method output_type
|
||||
0, // [0:0] is the sub-list for method input_type
|
||||
0, // [0:0] is the sub-list for extension type_name
|
||||
0, // [0:0] is the sub-list for extension extendee
|
||||
0, // [0:0] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_proto_prysm_v1alpha1_execution_proof_proto_init() }
|
||||
func file_proto_prysm_v1alpha1_execution_proof_proto_init() {
|
||||
if File_proto_prysm_v1alpha1_execution_proof_proto != nil {
|
||||
return
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_proto_prysm_v1alpha1_execution_proof_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 2,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_proto_prysm_v1alpha1_execution_proof_proto_goTypes,
|
||||
DependencyIndexes: file_proto_prysm_v1alpha1_execution_proof_proto_depIdxs,
|
||||
MessageInfos: file_proto_prysm_v1alpha1_execution_proof_proto_msgTypes,
|
||||
}.Build()
|
||||
File_proto_prysm_v1alpha1_execution_proof_proto = out.File
|
||||
file_proto_prysm_v1alpha1_execution_proof_proto_rawDesc = nil
|
||||
file_proto_prysm_v1alpha1_execution_proof_proto_goTypes = nil
|
||||
file_proto_prysm_v1alpha1_execution_proof_proto_depIdxs = nil
|
||||
}
|
||||
52
proto/prysm/v1alpha1/execution_proof.proto
Normal file
52
proto/prysm/v1alpha1/execution_proof.proto
Normal file
@@ -0,0 +1,52 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package ethereum.eth.v1alpha1;
|
||||
|
||||
import "proto/eth/ext/options.proto";
|
||||
|
||||
option csharp_namespace = "Ethereum.Eth.v1alpha1";
|
||||
option go_package = "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1;eth";
|
||||
option java_multiple_files = true;
|
||||
option java_outer_classname = "ExecutionProofProto";
|
||||
option java_package = "org.ethereum.eth.v1alpha1";
|
||||
option php_namespace = "Ethereum\\Eth\\v1alpha1";
|
||||
|
||||
message ExecutionProof {
|
||||
// Which proof type (zkVM+EL combination) this proof belongs to
|
||||
// Examples: 0=SP1+Reth, 1=Risc0+Geth, 2=SP1+Geth, etc.
|
||||
uint64 proof_id = 1 [
|
||||
(ethereum.eth.ext.cast_type) =
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.ExecutionProofId"
|
||||
];
|
||||
|
||||
// The slot of the beacon block this proof validates
|
||||
uint64 slot = 2 [
|
||||
(ethereum.eth.ext.cast_type) =
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.Slot"
|
||||
];
|
||||
|
||||
// The block hash of the execution payload this proof validates
|
||||
bytes block_hash = 3 [ (ethereum.eth.ext.ssz_size) = "32" ];
|
||||
|
||||
// The beacon block root corresponding to the beacon block
|
||||
// with the execution payload, that this proof attests to.
|
||||
bytes block_root = 4 [ (ethereum.eth.ext.ssz_size) = "32" ];
|
||||
|
||||
// The actual proof data
|
||||
bytes proof_data = 5 [ (ethereum.eth.ext.ssz_max) = "1048576" ];
|
||||
}
|
||||
|
||||
message ExecutionProofsByRootRequest {
|
||||
// The block root we need proofs for
|
||||
bytes block_root = 1 [ (ethereum.eth.ext.ssz_size) = "32" ];
|
||||
|
||||
// The number of proofs needed
|
||||
uint64 count_needed = 2;
|
||||
|
||||
// We already have these proof IDs, so don't send them again
|
||||
repeated uint64 already_have = 3 [
|
||||
(ethereum.eth.ext.ssz_max) = "8",
|
||||
(ethereum.eth.ext.cast_type) =
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.ExecutionProofId"
|
||||
];
|
||||
}
|
||||
300
proto/prysm/v1alpha1/execution_proof.ssz.go
Normal file
300
proto/prysm/v1alpha1/execution_proof.ssz.go
Normal file
@@ -0,0 +1,300 @@
|
||||
// NOTE: This file is auto-generated by sszgen, but modified manually
|
||||
// to handle the alias type ExecutionProofId which is based on uint8.
|
||||
package eth
|
||||
|
||||
import (
|
||||
github_com_OffchainLabs_prysm_v7_consensus_types_primitives "github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
)
|
||||
|
||||
// MarshalSSZ ssz marshals the ExecutionProof object
|
||||
func (e *ExecutionProof) MarshalSSZ() ([]byte, error) {
|
||||
return ssz.MarshalSSZ(e)
|
||||
}
|
||||
|
||||
// MarshalSSZTo ssz marshals the ExecutionProof object to a target array
|
||||
func (e *ExecutionProof) MarshalSSZTo(buf []byte) (dst []byte, err error) {
|
||||
dst = buf
|
||||
offset := int(77)
|
||||
|
||||
// Field (0) 'ProofId'
|
||||
dst = ssz.MarshalUint8(dst, uint8(e.ProofId))
|
||||
|
||||
// Field (1) 'Slot'
|
||||
dst = ssz.MarshalUint64(dst, uint64(e.Slot))
|
||||
|
||||
// Field (2) 'BlockHash'
|
||||
if size := len(e.BlockHash); size != 32 {
|
||||
err = ssz.ErrBytesLengthFn("--.BlockHash", size, 32)
|
||||
return
|
||||
}
|
||||
dst = append(dst, e.BlockHash...)
|
||||
|
||||
// Field (3) 'BlockRoot'
|
||||
if size := len(e.BlockRoot); size != 32 {
|
||||
err = ssz.ErrBytesLengthFn("--.BlockRoot", size, 32)
|
||||
return
|
||||
}
|
||||
dst = append(dst, e.BlockRoot...)
|
||||
|
||||
// Offset (4) 'ProofData'
|
||||
dst = ssz.WriteOffset(dst, offset)
|
||||
offset += len(e.ProofData)
|
||||
|
||||
// Field (4) 'ProofData'
|
||||
if size := len(e.ProofData); size > 1048576 {
|
||||
err = ssz.ErrBytesLengthFn("--.ProofData", size, 1048576)
|
||||
return
|
||||
}
|
||||
dst = append(dst, e.ProofData...)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalSSZ ssz unmarshals the ExecutionProof object
|
||||
func (e *ExecutionProof) UnmarshalSSZ(buf []byte) error {
|
||||
var err error
|
||||
size := uint64(len(buf))
|
||||
if size < 77 {
|
||||
return ssz.ErrSize
|
||||
}
|
||||
|
||||
tail := buf
|
||||
var o4 uint64
|
||||
|
||||
// Field (0) 'ProofId'
|
||||
e.ProofId = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ExecutionProofId(ssz.UnmarshallUint8(buf[0:1]))
|
||||
|
||||
// Field (1) 'Slot'
|
||||
e.Slot = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Slot(ssz.UnmarshallUint64(buf[1:9]))
|
||||
|
||||
// Field (2) 'BlockHash'
|
||||
if cap(e.BlockHash) == 0 {
|
||||
e.BlockHash = make([]byte, 0, len(buf[9:41]))
|
||||
}
|
||||
e.BlockHash = append(e.BlockHash, buf[9:41]...)
|
||||
|
||||
// Field (3) 'BlockRoot'
|
||||
if cap(e.BlockRoot) == 0 {
|
||||
e.BlockRoot = make([]byte, 0, len(buf[41:73]))
|
||||
}
|
||||
e.BlockRoot = append(e.BlockRoot, buf[41:73]...)
|
||||
|
||||
// Offset (4) 'ProofData'
|
||||
if o4 = ssz.ReadOffset(buf[73:77]); o4 > size {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
if o4 != 77 {
|
||||
return ssz.ErrInvalidVariableOffset
|
||||
}
|
||||
|
||||
// Field (4) 'ProofData'
|
||||
{
|
||||
buf = tail[o4:]
|
||||
if len(buf) > 1048576 {
|
||||
return ssz.ErrBytesLength
|
||||
}
|
||||
if cap(e.ProofData) == 0 {
|
||||
e.ProofData = make([]byte, 0, len(buf))
|
||||
}
|
||||
e.ProofData = append(e.ProofData, buf...)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// SizeSSZ returns the ssz encoded size in bytes for the ExecutionProof object
|
||||
func (e *ExecutionProof) SizeSSZ() (size int) {
|
||||
size = 77
|
||||
|
||||
// Field (4) 'ProofData'
|
||||
size += len(e.ProofData)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// HashTreeRoot ssz hashes the ExecutionProof object
|
||||
func (e *ExecutionProof) HashTreeRoot() ([32]byte, error) {
|
||||
return ssz.HashWithDefaultHasher(e)
|
||||
}
|
||||
|
||||
// HashTreeRootWith ssz hashes the ExecutionProof object with a hasher
|
||||
func (e *ExecutionProof) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
indx := hh.Index()
|
||||
|
||||
// Field (0) 'ProofId'
|
||||
hh.PutUint8(uint8(e.ProofId))
|
||||
|
||||
// Field (1) 'Slot'
|
||||
hh.PutUint64(uint64(e.Slot))
|
||||
|
||||
// Field (2) 'BlockHash'
|
||||
if size := len(e.BlockHash); size != 32 {
|
||||
err = ssz.ErrBytesLengthFn("--.BlockHash", size, 32)
|
||||
return
|
||||
}
|
||||
hh.PutBytes(e.BlockHash)
|
||||
|
||||
// Field (3) 'BlockRoot'
|
||||
if size := len(e.BlockRoot); size != 32 {
|
||||
err = ssz.ErrBytesLengthFn("--.BlockRoot", size, 32)
|
||||
return
|
||||
}
|
||||
hh.PutBytes(e.BlockRoot)
|
||||
|
||||
// Field (4) 'ProofData'
|
||||
{
|
||||
elemIndx := hh.Index()
|
||||
byteLen := uint64(len(e.ProofData))
|
||||
if byteLen > 1048576 {
|
||||
err = ssz.ErrIncorrectListSize
|
||||
return
|
||||
}
|
||||
hh.PutBytes(e.ProofData)
|
||||
hh.MerkleizeWithMixin(elemIndx, byteLen, (1048576+31)/32)
|
||||
}
|
||||
|
||||
hh.Merkleize(indx)
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalSSZ ssz marshals the ExecutionProofsByRootRequest object
|
||||
func (e *ExecutionProofsByRootRequest) MarshalSSZ() ([]byte, error) {
|
||||
return ssz.MarshalSSZ(e)
|
||||
}
|
||||
|
||||
// MarshalSSZTo ssz marshals the ExecutionProofsByRootRequest object to a target array
|
||||
func (e *ExecutionProofsByRootRequest) MarshalSSZTo(buf []byte) (dst []byte, err error) {
|
||||
dst = buf
|
||||
offset := int(44)
|
||||
|
||||
// Field (0) 'BlockRoot'
|
||||
if size := len(e.BlockRoot); size != 32 {
|
||||
err = ssz.ErrBytesLengthFn("--.BlockRoot", size, 32)
|
||||
return
|
||||
}
|
||||
dst = append(dst, e.BlockRoot...)
|
||||
|
||||
// Field (1) 'CountNeeded'
|
||||
dst = ssz.MarshalUint64(dst, e.CountNeeded)
|
||||
|
||||
// Offset (2) 'AlreadyHave'
|
||||
dst = ssz.WriteOffset(dst, offset)
|
||||
offset += len(e.AlreadyHave) * 1
|
||||
|
||||
// Field (2) 'AlreadyHave'
|
||||
if size := len(e.AlreadyHave); size > 8 {
|
||||
err = ssz.ErrListTooBigFn("--.AlreadyHave", size, 8)
|
||||
return
|
||||
}
|
||||
for ii := 0; ii < len(e.AlreadyHave); ii++ {
|
||||
dst = ssz.MarshalUint8(dst, uint8(e.AlreadyHave[ii]))
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalSSZ ssz unmarshals the ExecutionProofsByRootRequest object
|
||||
func (e *ExecutionProofsByRootRequest) UnmarshalSSZ(buf []byte) error {
|
||||
var err error
|
||||
size := uint64(len(buf))
|
||||
if size < 44 {
|
||||
return ssz.ErrSize
|
||||
}
|
||||
|
||||
tail := buf
|
||||
var o2 uint64
|
||||
|
||||
// Field (0) 'BlockRoot'
|
||||
if cap(e.BlockRoot) == 0 {
|
||||
e.BlockRoot = make([]byte, 0, len(buf[0:32]))
|
||||
}
|
||||
e.BlockRoot = append(e.BlockRoot, buf[0:32]...)
|
||||
|
||||
// Field (1) 'CountNeeded'
|
||||
e.CountNeeded = ssz.UnmarshallUint64(buf[32:40])
|
||||
|
||||
// Offset (2) 'AlreadyHave'
|
||||
if o2 = ssz.ReadOffset(buf[40:44]); o2 > size {
|
||||
return ssz.ErrOffset
|
||||
}
|
||||
|
||||
if o2 != 44 {
|
||||
return ssz.ErrInvalidVariableOffset
|
||||
}
|
||||
|
||||
// Field (2) 'AlreadyHave'
|
||||
{
|
||||
buf = tail[o2:]
|
||||
num, err := ssz.DivideInt2(len(buf), 1, 8)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// `primitives.ExecutionProofId` is an alias of `uint8`,
|
||||
// but we need to handle the conversion manually here
|
||||
// to call `ssz.ExtendUint8`.
|
||||
alreadyHave := make([]uint8, len(e.AlreadyHave))
|
||||
for i, v := range e.AlreadyHave {
|
||||
alreadyHave[i] = uint8(v)
|
||||
}
|
||||
alreadyHave = ssz.ExtendUint8(alreadyHave, num)
|
||||
alreadyHave2 := make([]github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ExecutionProofId, len(alreadyHave))
|
||||
for i, v := range alreadyHave {
|
||||
alreadyHave2[i] = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ExecutionProofId(v)
|
||||
}
|
||||
e.AlreadyHave = alreadyHave2
|
||||
for ii := range num {
|
||||
e.AlreadyHave[ii] = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ExecutionProofId(ssz.UnmarshallUint8(buf[ii*1 : (ii+1)*1]))
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// SizeSSZ returns the ssz encoded size in bytes for the ExecutionProofsByRootRequest object
|
||||
func (e *ExecutionProofsByRootRequest) SizeSSZ() (size int) {
|
||||
size = 44
|
||||
|
||||
// Field (2) 'AlreadyHave'
|
||||
size += len(e.AlreadyHave) * 1
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// HashTreeRoot ssz hashes the ExecutionProofsByRootRequest object
|
||||
func (e *ExecutionProofsByRootRequest) HashTreeRoot() ([32]byte, error) {
|
||||
return ssz.HashWithDefaultHasher(e)
|
||||
}
|
||||
|
||||
// HashTreeRootWith ssz hashes the ExecutionProofsByRootRequest object with a hasher
|
||||
func (e *ExecutionProofsByRootRequest) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
indx := hh.Index()
|
||||
|
||||
// Field (0) 'BlockRoot'
|
||||
if size := len(e.BlockRoot); size != 32 {
|
||||
err = ssz.ErrBytesLengthFn("--.BlockRoot", size, 32)
|
||||
return
|
||||
}
|
||||
hh.PutBytes(e.BlockRoot)
|
||||
|
||||
// Field (1) 'CountNeeded'
|
||||
hh.PutUint64(e.CountNeeded)
|
||||
|
||||
// Field (2) 'AlreadyHave'
|
||||
{
|
||||
if size := len(e.AlreadyHave); size > 8 {
|
||||
err = ssz.ErrListTooBigFn("--.AlreadyHave", size, 8)
|
||||
return
|
||||
}
|
||||
subIndx := hh.Index()
|
||||
for _, i := range e.AlreadyHave {
|
||||
hh.AppendUint8(uint8(i))
|
||||
}
|
||||
hh.FillUpTo32()
|
||||
|
||||
numItems := uint64(len(e.AlreadyHave))
|
||||
hh.MerkleizeWithMixin(subIndx, numItems, ssz.CalculateLimit(8, numItems, 1))
|
||||
}
|
||||
|
||||
hh.Merkleize(indx)
|
||||
return
|
||||
}
|
||||
@@ -44,6 +44,13 @@ func WithProposerSigning(idx primitives.ValidatorIndex, sk bls.SecretKey, valRoo
|
||||
}
|
||||
}
|
||||
|
||||
// WithProposer sets the proposer index for the generated block without signing.
|
||||
func WithProposer(idx primitives.ValidatorIndex) DenebBlockGeneratorOption {
|
||||
return func(g *denebBlockGenerator) {
|
||||
g.proposer = idx
|
||||
}
|
||||
}
|
||||
|
||||
func WithPayloadSetter(p *enginev1.ExecutionPayloadDeneb) DenebBlockGeneratorOption {
|
||||
return func(g *denebBlockGenerator) {
|
||||
g.payload = p
|
||||
|
||||
Reference in New Issue
Block a user