Compare commits

...

63 Commits

Author SHA1 Message Date
Manu NALEPA
66f63aee9c [WIP] simplify 2026-01-22 14:20:13 +01:00
Jun Song
698b6922f0 Use dummy for el_type when we want to launch zk attester node (#11) 2026-01-05 19:32:53 +09:00
Jun Song
ca228fca44 Merge branch 'develop' into poc/optional-proofs 2026-01-05 15:43:22 +09:00
Jun Song
4d6663b4de Implement exec proof service & pruning logics (#10)
* Initialize exec proof service

* Fix wrong condition for starting exec proof pool service
2025-12-26 18:35:08 +09:00
Jun Song
e713560a68 Add interop.yaml script with guide (#8) 2025-12-26 16:02:18 +09:00
Jun Song
4571e50609 Implement RPC for execution proofs & Fix broken unit tests (#9)
* Add ExecutionProofsByRootRequest struct with SSZ support

* Add skeleton for requesting execution proofs

* Check proof retention before sending the request

* Implement sendAndSaveExecutionProofs with skeleton SendExecutionProofsByRootRequest

* Nuke deprecated request alias

* Implement handler and sender without peer selection logic

* Add peer selection logic with zkvm entry key

* Fix broken tests

* Add TestZkvmEnabledPeers

* Fix stream read code for execution proof & Add unit test for handler

* Add sender test
2025-12-26 16:02:08 +09:00
Jun Song
175738919e Check whether proof generation is needed (#7)
* Check proof retention

* Check whether we already have requested execution proof or not
2025-12-24 15:57:53 +09:00
Jun Song
f1cbdc9fa6 Verify execution proofs received from gossip (#6) 2025-12-24 15:31:40 +09:00
Jun Song
156383c9c8 Merge branch 'develop' into poc/optional-proofs 2025-12-24 14:52:53 +09:00
Developer Uche
5ede7c8fe0 Merge pull request #5 from syjn99/fix/proof-gen-devnet
Skip DA check when node is able to generate proofs & Add some kurtosis scripts
2025-12-18 21:15:11 +01:00
Jun Song
3324c7b655 Add proof_verify devnet script 2025-12-16 01:01:48 +09:00
Jun Song
d477bcfa20 Add useful logs 2025-12-16 01:01:32 +09:00
Jun Song
38183471da Add default kurtosis script for proof gen devnet 2025-12-16 00:44:34 +09:00
Jun Song
3c3e2b42e9 Skip waiting for proof if it's proof generator node 2025-12-16 00:44:21 +09:00
Developer Uche
d496f7bfab Merge pull request #4 from syjn99/fix/zkvm-enr
Set zkVM ENR entry correctly if mode is enabled
2025-12-15 11:10:15 +01:00
Developer Uche
55e2663f82 Merge pull request #3 from syjn99/fix/optional-p2p
Add missing pieces regarding Gossip
2025-12-15 11:09:42 +01:00
Jun Song
5f0afd09c6 Add DA failure case 2025-12-10 17:37:04 +09:00
Jun Song
95fff68b11 Add waiting case for DA 2025-12-10 17:33:28 +09:00
Jun Song
d0bc0fcda8 Add happy case for execution proofs DA 2025-12-10 17:26:57 +09:00
Jun Song
8b2acd5f47 Add validate_execution_proof_test.go 2025-12-10 17:04:05 +09:00
Jun Song
fb071ebe20 Add execution proofs pool tests 2025-12-10 16:25:22 +09:00
Jun Song
a174d0cd53 Set zkVM entry correctly if mode is enabled 2025-12-10 16:16:16 +09:00
Jun Song
06655dcd1f Resolve build issues 2025-12-10 13:07:58 +09:00
Jun Song
c1dcf97c0c Fix mock exec proof pool 2025-12-10 12:55:32 +09:00
Jun Song
f596223096 Add blocking logic for DA in EIP-8025 2025-12-10 12:53:06 +09:00
Jun Song
a184afdfb4 Implement execution proof pool 2025-12-10 12:10:20 +09:00
Jun Song
056843bcae Register execution proof pool for sync/blockchain services 2025-12-10 12:00:32 +09:00
Jun Song
a587a9dd6e Add skeletons for pool and verifier logics 2025-12-10 11:53:08 +09:00
Jun Song
dde9dc3dd9 Mark proof as seen 2025-12-10 11:48:29 +09:00
Jun Song
960d666801 Add proof size validation 2025-12-10 11:44:22 +09:00
Jun Song
1468c20c54 Add basic validation logics for execution proof gossip 2025-12-09 23:19:56 +09:00
Jun Song
68d8988121 Use alias of BeaconBlockByRootsReq for ExecutionProofsByRoot 2025-12-09 22:37:37 +09:00
Jun Song
9ca5bf0119 Build issue with Bazel 2025-12-09 22:36:48 +09:00
Jun Song
bf8f494792 Use different gossip param weight 2025-12-09 12:57:21 +09:00
Jun Song
cab25267b5 Fix gossip subscriber match with BLSToExecutionChange 2025-12-09 12:49:33 +09:00
Jun Song
b9c23dae89 Run gazelle 2025-12-09 12:44:56 +09:00
developeruche
7944731ccf done with p2p sub-task save; SendExecutionProofByRootRequest, executionProofsByRootRPCHandler 2025-12-07 23:41:54 +01:00
developeruche
4d2a61a2e0 Merge branch 'poc/optional-proofs' of https://github.com/developeruche/prysm into poc/optional-proofs 2025-12-07 19:02:36 +01:00
developeruche
8708c198c9 gossip functionality ready save validation logic 2025-12-07 19:01:50 +01:00
Developer Uche
2857eeae6e Merge pull request #1 from syjn99/feat/dummy-proof-gen-service 2025-12-06 01:27:44 +01:00
Jun Song
4912c29d06 Generate proofs that are registered without checking 2025-12-04 18:22:09 +09:00
Jun Song
d520158510 Register dummy registry 2025-12-04 18:15:13 +09:00
Jun Song
c13d61a959 Add basic flow for proof generation 2025-12-04 18:09:08 +09:00
Jun Song
f5c61ebaea Print as hex string 2025-12-04 17:32:49 +09:00
Jun Song
ae3d465615 Add missing flag activation 2025-12-04 17:31:54 +09:00
Jun Song
f23210853d Subscribe to the block import event in proofgen service 2025-12-04 17:10:34 +09:00
developeruche
6dc49b41f2 change execution proof topic from subnet to global 2025-12-04 09:04:36 +01:00
developeruche
e56550af48 added execution proof to gossip topics 2025-12-02 06:23:44 +01:00
developeruche
20f617ecc9 enr zkvm config 2025-12-01 16:41:20 +01:00
developeruche
adb1de9caa moved proof_cache to beacon cache 2025-12-01 13:03:53 +01:00
Jun Song
2d9e6ad2c8 Add skeleton proof generation service 2025-11-29 21:35:56 +09:00
Jun Song
e8eb022145 Parse flag & Register in the global context 2025-11-29 21:24:37 +09:00
Jun Song
38be9400f1 Rename with underscores 2025-11-29 20:27:36 +09:00
Jun Song
b01e760e0a Make compatible with codebase 2025-11-29 20:25:52 +09:00
Jun Song
da4a8f1dd3 Add ExecutionProofId & ExecutionProof type 2025-11-29 19:36:42 +09:00
Jun Song
0dca170953 Merge branch 'develop' into poc/optional-proofs-2 2025-11-29 19:36:37 +09:00
developeruche
cd549abbfa added cli flags 2025-11-10 07:15:14 +01:00
developeruche
28a661518e lastest consensus-type, zkvm-execution-layer 2025-11-08 17:07:43 +01:00
developeruche
4ab5888c4c add registry proof gen/verification 2025-11-07 21:20:57 +01:00
developeruche
0d818bc687 add proof gen n verify interfaces 2025-11-07 15:47:35 +01:00
developeruche
0e90a0f2d8 add proof cache 2025-11-07 10:40:00 +01:00
developeruche
2de069d543 add config 2025-11-07 10:38:09 +01:00
developeruche
50e88045bb add consensus types 2025-11-05 10:13:19 +01:00
61 changed files with 3460 additions and 49 deletions

View File

@@ -48,6 +48,7 @@ go_library(
"//beacon-chain/core/electra:go_default_library",
"//beacon-chain/core/epoch/precompute:go_default_library",
"//beacon-chain/core/feed:go_default_library",
"//beacon-chain/core/feed/operation:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/peerdas:go_default_library",
@@ -65,6 +66,7 @@ go_library(
"//beacon-chain/light-client:go_default_library",
"//beacon-chain/operations/attestations:go_default_library",
"//beacon-chain/operations/blstoexec:go_default_library",
"//beacon-chain/operations/execproofs:go_default_library",
"//beacon-chain/operations/slashings:go_default_library",
"//beacon-chain/operations/voluntaryexits:go_default_library",
"//beacon-chain/p2p:go_default_library",
@@ -146,6 +148,8 @@ go_test(
"//beacon-chain/cache/depositsnapshot:go_default_library",
"//beacon-chain/core/altair:go_default_library",
"//beacon-chain/core/blocks:go_default_library",
"//beacon-chain/core/feed:go_default_library",
"//beacon-chain/core/feed/operation:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/peerdas:go_default_library",

View File

@@ -5,6 +5,7 @@ import (
"github.com/OffchainLabs/prysm/v7/async/event"
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/operation"
statefeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/state"
"github.com/OffchainLabs/prysm/v7/beacon-chain/db"
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/filesystem"
@@ -13,6 +14,7 @@ import (
lightclient "github.com/OffchainLabs/prysm/v7/beacon-chain/light-client"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/attestations"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/blstoexec"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/execproofs"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/slashings"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/voluntaryexits"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
@@ -136,6 +138,14 @@ func WithBLSToExecPool(p blstoexec.PoolManager) Option {
}
}
// WithExecProofsPool to keep track of execution proofs.
func WithExecProofsPool(p execproofs.PoolManager) Option {
return func(s *Service) error {
s.cfg.ExecProofsPool = p
return nil
}
}
// WithP2PBroadcaster to broadcast messages after appropriate processing.
func WithP2PBroadcaster(p p2p.Accessor) Option {
return func(s *Service) error {
@@ -266,3 +276,10 @@ func WithStartWaitingDataColumnSidecars(c chan bool) Option {
return nil
}
}
func WithOperationNotifier(operationNotifier operation.Notifier) Option {
return func(s *Service) error {
s.cfg.OperationNotifier = operationNotifier
return nil
}
}

View File

@@ -7,6 +7,8 @@ import (
"github.com/OffchainLabs/go-bitfield"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/operation"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
coreTime "github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
@@ -15,6 +17,7 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/filesystem"
forkchoicetypes "github.com/OffchainLabs/prysm/v7/beacon-chain/forkchoice/types"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
"github.com/OffchainLabs/prysm/v7/config/features"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
@@ -113,6 +116,7 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
s.updateCachesPostBlockProcessing(cfg)
}()
}
return nil
}
@@ -661,10 +665,17 @@ func (s *Service) isDataAvailable(
return errors.New("invalid nil beacon block")
}
root := roBlock.Root()
blockVersion := block.Version()
root, blockVersion := roBlock.Root(), roBlock.Version()
if blockVersion >= version.Fulu {
return s.areDataColumnsAvailable(ctx, root, block)
if err := s.areExecutionProofsAvailable(ctx, root); err != nil {
return fmt.Errorf("are execution proofs available: %w", err)
}
if err := s.areDataColumnsAvailable(ctx, root, block); err != nil {
return fmt.Errorf("are data columns available: %w", err)
}
return nil
}
if blockVersion >= version.Deneb {
@@ -674,6 +685,67 @@ func (s *Service) isDataAvailable(
return nil
}
// areExecutionProofsAvailable blocks until we have enough execution proofs to import the block,
// or an error or context cancellation occurs.
// This check is only performed for lightweight verifier nodes that need zkVM proofs
// to validate block execution (nodes without execution layer + proof generation capability).
// A nil result means that the data availability check is successful.
func (s *Service) areExecutionProofsAvailable(ctx context.Context, blockRoot [fieldparams.RootLength]byte) error {
// Return early if zkVM features are disabled (no need to check for execution proofs),
// or if the generation proof is enabled (we will generate proofs ourselves).
if !features.Get().EnableZkvm || len(flags.Get().ProofGenerationTypes) > 0 {
return nil
}
requiredProofCount := params.BeaconConfig().MinProofsRequired
log := log.WithFields(logrus.Fields{
"root": fmt.Sprintf("%#x", blockRoot),
"requiredProofCount": requiredProofCount,
})
// Subscribe to execution proof received events.
eventsChan := make(chan *feed.Event, 1)
subscription := s.cfg.OperationNotifier.OperationFeed().Subscribe(eventsChan)
defer subscription.Unsubscribe()
// Return early if we already have enough proofs.
if actualProofCount := uint64(s.cfg.ExecProofsPool.Count(blockRoot)); actualProofCount >= requiredProofCount {
log.WithField("actualProofCount", actualProofCount).Debug("Already have enough execution proofs")
return nil
}
// Some proofs are missing; wait for them.
for {
select {
case <-ctx.Done():
return ctx.Err()
case event := <-eventsChan:
if event.Type != operation.ExecutionProofReceived {
continue
}
proofWrapper, ok := event.Data.(*operation.ExecutionProofReceivedData)
if !ok {
log.Error("Could not cast event data to ExecutionProofReceivedData")
continue
}
proof := proofWrapper.ExecutionProof
// Skip if the proof is for a different block.
if bytesutil.ToBytes32(proof.BlockRoot) != blockRoot {
continue
}
// Return if we have enough proofs.
if actualProofCount := uint64(s.cfg.ExecProofsPool.Count(blockRoot)); actualProofCount >= requiredProofCount {
log.WithField("actualProofCount", actualProofCount).Debug("Got enough execution proofs")
return nil
}
}
}
}
// areDataColumnsAvailable blocks until all data columns committed to in the block are available,
// or an error or context cancellation occurs. A nil result means that the data availability check is successful.
func (s *Service) areDataColumnsAvailable(

View File

@@ -13,6 +13,8 @@ import (
mock "github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/operation"
statefeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/state"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
@@ -3001,6 +3003,113 @@ func TestIsDataAvailable(t *testing.T) {
err = service.isDataAvailable(ctx, roBlock)
require.NotNil(t, err)
})
t.Run("EIP-8025 (Optional Proofs) - already enough proofs", func(t *testing.T) {
// Enable zkVM feature
resetCfg := features.InitWithReset(&features.Flags{
EnableZkvm: true,
})
defer resetCfg()
// Set MinProofsRequired for testing
cfg := params.BeaconConfig().Copy()
cfg.MinProofsRequired = 3
params.OverrideBeaconConfig(cfg)
// Setup with sufficient data columns
minimumColumnsCountToReconstruct := peerdas.MinimumColumnCountToReconstruct()
indices := make([]uint64, 0, minimumColumnsCountToReconstruct)
for i := range minimumColumnsCountToReconstruct {
indices = append(indices, i)
}
testParams := testIsAvailableParams{
columnsToSave: indices,
blobKzgCommitmentsCount: 3,
}
ctx, _, service, root, signed := testIsAvailableSetup(t, testParams)
// Insert MinProofsRequired execution proofs into the pool
for i := range cfg.MinProofsRequired {
proof := &ethpb.ExecutionProof{
BlockRoot: root[:],
Slot: signed.Block().Slot(),
ProofId: primitives.ExecutionProofId(i),
ProofData: []byte{byte(i)},
}
service.cfg.ExecProofsPool.Insert(proof)
}
roBlock, err := consensusblocks.NewROBlockWithRoot(signed, root)
require.NoError(t, err)
err = service.isDataAvailable(ctx, roBlock)
require.NoError(t, err)
})
t.Run("EIP-8025 (Optional Proofs) - data columns success then wait for execution proofs", func(t *testing.T) {
// Enable zkVM feature
resetCfg := features.InitWithReset(&features.Flags{
EnableZkvm: true,
})
defer resetCfg()
// Set MinProofsRequired for testing
cfg := params.BeaconConfig().Copy()
cfg.MinProofsRequired = 3
params.OverrideBeaconConfig(cfg)
// Setup with sufficient data columns
minimumColumnsCountToReconstruct := peerdas.MinimumColumnCountToReconstruct()
indices := make([]uint64, 0, minimumColumnsCountToReconstruct)
for i := range minimumColumnsCountToReconstruct {
indices = append(indices, i)
}
testParams := testIsAvailableParams{
options: []Option{
WithOperationNotifier(&mock.MockOperationNotifier{}),
},
columnsToSave: indices,
blobKzgCommitmentsCount: 3,
}
ctx, _, service, root, signed := testIsAvailableSetup(t, testParams)
// Goroutine to send execution proofs after data columns are available
go func() {
// Wait a bit to simulate async proof arrival
time.Sleep(50 * time.Millisecond)
// Send ExecutionProofReceived events
opfeed := service.cfg.OperationNotifier.OperationFeed()
for i := range cfg.MinProofsRequired {
proof := &ethpb.ExecutionProof{
BlockRoot: root[:],
Slot: signed.Block().Slot(),
ProofId: primitives.ExecutionProofId(i),
ProofData: []byte{byte(i)},
}
service.cfg.ExecProofsPool.Insert(proof)
opfeed.Send(&feed.Event{
Type: operation.ExecutionProofReceived,
Data: &operation.ExecutionProofReceivedData{
ExecutionProof: proof,
},
})
}
}()
ctx, cancel := context.WithTimeout(ctx, time.Second*2)
defer cancel()
roBlock, err := consensusblocks.NewROBlockWithRoot(signed, root)
require.NoError(t, err)
err = service.isDataAvailable(ctx, roBlock)
require.NoError(t, err)
})
}
// Test_postBlockProcess_EventSending tests that block processed events are only sent

View File

@@ -66,52 +66,54 @@ func (s *Service) VerifyLmdFfgConsistency(ctx context.Context, a ethpb.Att) erro
// This routine processes fork choice attestations from the pool to account for validator votes and fork choice.
func (s *Service) spawnProcessAttestationsRoutine() {
go func() {
_, err := s.clockWaiter.WaitForClock(s.ctx)
if err != nil {
log.WithError(err).Error("Failed to receive genesis data")
return
}
if s.genesisTime.IsZero() {
log.Warn("ProcessAttestations routine waiting for genesis time")
for s.genesisTime.IsZero() {
if err := s.ctx.Err(); err != nil {
log.WithError(err).Error("Giving up waiting for genesis time")
return
}
time.Sleep(1 * time.Second)
}
log.Warn("Genesis time received, now available to process attestations")
}
// Wait for node to be synced before running the routine.
if err := s.waitForSync(); err != nil {
log.WithError(err).Error("Could not wait to sync")
return
}
_, err := s.clockWaiter.WaitForClock(s.ctx)
if err != nil {
log.WithError(err).Error("Failed to receive genesis data")
return
}
reorgInterval := time.Second*time.Duration(params.BeaconConfig().SecondsPerSlot) - reorgLateBlockCountAttestations
ticker := slots.NewSlotTickerWithIntervals(s.genesisTime, []time.Duration{0, reorgInterval})
for {
select {
case <-s.ctx.Done():
if s.genesisTime.IsZero() {
log.Warn("ProcessAttestations routine waiting for genesis time")
for s.genesisTime.IsZero() {
if err := s.ctx.Err(); err != nil {
log.WithError(err).Error("Giving up waiting for genesis time")
return
case slotInterval := <-ticker.C():
if slotInterval.Interval > 0 {
if s.validating() {
s.UpdateHead(s.ctx, slotInterval.Slot+1)
}
} else {
s.cfg.ForkChoiceStore.Lock()
if err := s.cfg.ForkChoiceStore.NewSlot(s.ctx, slotInterval.Slot); err != nil {
log.WithError(err).Error("Could not process new slot")
}
s.cfg.ForkChoiceStore.Unlock()
s.UpdateHead(s.ctx, slotInterval.Slot)
}
}
time.Sleep(1 * time.Second)
}
}()
log.Warn("Genesis time received, now available to process attestations")
}
// Wait for node to be synced before running the routine.
if err := s.waitForSync(); err != nil {
log.WithError(err).Error("Could not wait to sync")
return
}
reorgInterval := time.Second*time.Duration(params.BeaconConfig().SecondsPerSlot) - reorgLateBlockCountAttestations
ticker := slots.NewSlotTickerWithIntervals(s.genesisTime, []time.Duration{0, reorgInterval})
for {
select {
case <-s.ctx.Done():
return
case slotInterval := <-ticker.C():
if slotInterval.Interval > 0 {
if s.validating() {
s.UpdateHead(s.ctx, slotInterval.Slot+1)
}
continue
}
s.cfg.ForkChoiceStore.Lock()
if err := s.cfg.ForkChoiceStore.NewSlot(s.ctx, slotInterval.Slot); err != nil {
log.WithError(err).Error("Could not process new slot")
}
s.cfg.ForkChoiceStore.Unlock()
s.UpdateHead(s.ctx, slotInterval.Slot)
}
}
}
// UpdateHead updates the canonical head of the chain based on information from fork-choice attestations and votes.

View File

@@ -12,6 +12,7 @@ import (
"github.com/OffchainLabs/prysm/v7/async/event"
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/operation"
statefeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/state"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
@@ -24,6 +25,7 @@ import (
lightClient "github.com/OffchainLabs/prysm/v7/beacon-chain/light-client"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/attestations"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/blstoexec"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/execproofs"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/slashings"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/voluntaryexits"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
@@ -85,9 +87,11 @@ type config struct {
ExitPool voluntaryexits.PoolManager
SlashingPool slashings.PoolManager
BLSToExecPool blstoexec.PoolManager
ExecProofsPool execproofs.PoolManager
P2P p2p.Accessor
MaxRoutines int
StateNotifier statefeed.Notifier
OperationNotifier operation.Notifier
ForkChoiceStore f.ForkChoicer
AttService *attestations.Service
StateGen *stategen.State
@@ -211,7 +215,9 @@ func (s *Service) Start() {
if err := s.StartFromSavedState(s.cfg.FinalizedStateAtStartUp); err != nil {
log.Fatal(err)
}
s.spawnProcessAttestationsRoutine()
go s.spawnProcessAttestationsRoutine()
go s.spawnFinalizedProofsPruningRoutine()
go s.runLateBlockTasks()
}
@@ -567,3 +573,46 @@ func fuluForkSlot() (primitives.Slot, error) {
return forkFuluSlot, nil
}
// spawnFinalizedProofsPruningRoutine prunes execution proofs pool on every epoch.
// It removes proofs older than the finalized checkpoint to prevent unbounded
// memory growth.
// TODO: Manage cases where the network is not finalizing for a long time (avoid OOMs...)
func (s *Service) spawnFinalizedProofsPruningRoutine() {
ticker := slots.NewSlotTicker(s.genesisTime, params.BeaconConfig().SecondsPerSlot)
defer ticker.Done()
for {
select {
case slot := <-ticker.C():
// Only prune at the start of each epoch
if !slots.IsEpochStart(slot) {
continue
}
finalizedCheckpoint := s.FinalizedCheckpt()
if finalizedCheckpoint == nil {
log.Error("Finalized checkpoint is nil, cannot prune execution proofs")
continue
}
finalizedSlot, err := slots.EpochStart(finalizedCheckpoint.Epoch)
if err != nil {
log.WithError(err).Error("Could not get finalized slot")
continue
}
// Prune proofs older than finalized slot
if count := s.cfg.ExecProofsPool.PruneUpTo(finalizedSlot); count > 0 {
log.WithFields(logrus.Fields{
"prunedCount": count,
"finalizedSlot": finalizedSlot,
}).Debug("Pruned finalized execution proofs")
}
case <-s.ctx.Done():
log.Debug("Context closed, exiting routine")
return
}
}
}

View File

@@ -46,6 +46,9 @@ const (
// DataColumnReceived is sent after a data column has been seen after gossip validation rules.
DataColumnReceived = 12
// ExecutionProofReceived is sent after a execution proof object has been received from gossip or rpc.
ExecutionProofReceived = 13
)
// UnAggregatedAttReceivedData is the data sent with UnaggregatedAttReceived events.
@@ -77,6 +80,11 @@ type BLSToExecutionChangeReceivedData struct {
Change *ethpb.SignedBLSToExecutionChange
}
// ExecutionProofReceivedData is the data sent with ExecutionProofReceived events.
type ExecutionProofReceivedData struct {
ExecutionProof *ethpb.ExecutionProof
}
// BlobSidecarReceivedData is the data sent with BlobSidecarReceived events.
type BlobSidecarReceivedData struct {
Blob *blocks.VerifiedROBlob

View File

@@ -37,6 +37,7 @@ go_library(
"//beacon-chain/node/registration:go_default_library",
"//beacon-chain/operations/attestations:go_default_library",
"//beacon-chain/operations/blstoexec:go_default_library",
"//beacon-chain/operations/execproofs:go_default_library",
"//beacon-chain/operations/slashings:go_default_library",
"//beacon-chain/operations/synccommittee:go_default_library",
"//beacon-chain/operations/voluntaryexits:go_default_library",

View File

@@ -40,6 +40,7 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/node/registration"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/attestations"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/blstoexec"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/execproofs"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/slashings"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/synccommittee"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/voluntaryexits"
@@ -102,6 +103,7 @@ type BeaconNode struct {
slashingsPool slashings.PoolManager
syncCommitteePool synccommittee.Pool
blsToExecPool blstoexec.PoolManager
execProofsPool execproofs.PoolManager
depositCache cache.DepositCache
trackedValidatorsCache *cache.TrackedValidatorsCache
payloadIDCache *cache.PayloadIDCache
@@ -156,6 +158,7 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
slashingsPool: slashings.NewPool(),
syncCommitteePool: synccommittee.NewPool(),
blsToExecPool: blstoexec.NewPool(),
execProofsPool: execproofs.NewPool(),
trackedValidatorsCache: cache.NewTrackedValidatorsCache(),
payloadIDCache: cache.NewPayloadIDCache(),
slasherBlockHeadersFeed: new(event.Feed),
@@ -737,6 +740,7 @@ func (b *BeaconNode) registerBlockchainService(fc forkchoice.ForkChoicer, gs *st
blockchain.WithExitPool(b.exitPool),
blockchain.WithSlashingPool(b.slashingsPool),
blockchain.WithBLSToExecPool(b.blsToExecPool),
blockchain.WithExecProofsPool(b.execProofsPool),
blockchain.WithP2PBroadcaster(b.fetchP2P()),
blockchain.WithStateNotifier(b),
blockchain.WithAttestationService(attService),
@@ -752,6 +756,7 @@ func (b *BeaconNode) registerBlockchainService(fc forkchoice.ForkChoicer, gs *st
blockchain.WithSyncChecker(b.syncChecker),
blockchain.WithSlasherEnabled(b.slasherEnabled),
blockchain.WithLightClientStore(b.lcStore),
blockchain.WithOperationNotifier(b),
)
blockchainService, err := blockchain.NewService(b.ctx, opts...)
@@ -827,6 +832,7 @@ func (b *BeaconNode) registerSyncService(initialSyncComplete chan struct{}, bFil
regularsync.WithSlashingPool(b.slashingsPool),
regularsync.WithSyncCommsPool(b.syncCommitteePool),
regularsync.WithBlsToExecPool(b.blsToExecPool),
regularsync.WithExecProofPool(b.execProofsPool),
regularsync.WithStateGen(b.stateGen),
regularsync.WithSlasherAttestationsFeed(b.slasherAttestationsFeed),
regularsync.WithSlasherBlockHeadersFeed(b.slasherBlockHeadersFeed),

View File

@@ -0,0 +1,16 @@
load("@prysm//tools/go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["pool.go"],
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/operations/execproofs",
visibility = ["//visibility:public"],
deps = [
"//config/fieldparams:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
],
)

View File

@@ -0,0 +1,174 @@
package execproofs
import (
"fmt"
"sync"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
// ProofKey uniquely identifies an execution proof by block root and proof type.
type ProofKey struct {
Root [fieldparams.RootLength]byte
ProofId primitives.ExecutionProofId
}
// String returns a string representation for logging.
func (k ProofKey) String() string {
return fmt.Sprintf("root=%#x,proofId=%d", k.Root, k.ProofId)
}
var (
execProofInPoolTotal = promauto.NewGauge(prometheus.GaugeOpts{
Name: "exec_proof_pool_total",
Help: "The number of execution proofs in the operation pool.",
})
)
var _ PoolManager = (*ExecProofPool)(nil)
// PoolManager maintains execution proofs received via gossip.
// These proofs are used for data availability checks when importing blocks.
// Lightweight verifier nodes need a minimum number of proofs from different zkVM types
// to verify block execution correctness.
type PoolManager interface {
// Insert inserts a proof into the pool.
// If a proof with the same block root and proof ID already exists, it is not added again.
Insert(executionProof *ethpb.ExecutionProof)
// Get returns a copy of all proofs for a specific block root
Get(blockRoot [fieldparams.RootLength]byte) []*ethpb.ExecutionProof
// Ids returns the list of (unique) proof types available for a specific block root
Ids(blockRoot [fieldparams.RootLength]byte) []primitives.ExecutionProofId
// Count counts the number of proofs for a specific block root
Count(blockRoot [fieldparams.RootLength]byte) uint64
// Exists checks if a proof exists for the given block root and proof ID
Exists(blockRoot [fieldparams.RootLength]byte, proofId primitives.ExecutionProofId) bool
// PruneUpTo removes proofs older than the target slot
PruneUpTo(targetSlot primitives.Slot) int
}
// ExecProofPool is a concrete implementation of type ExecProofPoolManager.
type ExecProofPool struct {
lock sync.RWMutex
m map[[fieldparams.RootLength]byte]map[primitives.ExecutionProofId]*ethpb.ExecutionProof
}
// NewPool returns an initialized pool.
func NewPool() *ExecProofPool {
return &ExecProofPool{
m: make(map[[fieldparams.RootLength]byte]map[primitives.ExecutionProofId]*ethpb.ExecutionProof),
}
}
// Insert inserts a proof into the pool.
// If a proof with the same block root and proof ID already exists, it is not added again.
func (p *ExecProofPool) Insert(proof *ethpb.ExecutionProof) {
p.lock.Lock()
defer p.lock.Unlock()
blockRoot := bytesutil.ToBytes32(proof.BlockRoot)
// Create the inner map if it doesn't exist
if p.m[blockRoot] == nil {
p.m[blockRoot] = make(map[primitives.ExecutionProofId]*ethpb.ExecutionProof)
}
// Check if proof already exists
if _, exists := p.m[blockRoot][proof.ProofId]; exists {
return
}
// Insert new proof
p.m[blockRoot][proof.ProofId] = proof
execProofInPoolTotal.Inc()
}
// Get returns a copy of all proofs for a specific block root
func (p *ExecProofPool) Get(blockRoot [fieldparams.RootLength]byte) []*ethpb.ExecutionProof {
p.lock.RLock()
defer p.lock.RUnlock()
proofsByType, exists := p.m[blockRoot]
if !exists {
return nil
}
result := make([]*ethpb.ExecutionProof, 0, len(proofsByType))
for _, proof := range proofsByType {
result = append(result, proof.Copy())
}
return result
}
func (p *ExecProofPool) Ids(blockRoot [fieldparams.RootLength]byte) []primitives.ExecutionProofId {
p.lock.RLock()
defer p.lock.RUnlock()
proofById, exists := p.m[blockRoot]
if !exists {
return nil
}
ids := make([]primitives.ExecutionProofId, 0, len(proofById))
for id := range proofById {
ids = append(ids, id)
}
return ids
}
// Count counts the number of proofs for a specific block root
func (p *ExecProofPool) Count(blockRoot [fieldparams.RootLength]byte) uint64 {
p.lock.RLock()
defer p.lock.RUnlock()
return uint64(len(p.m[blockRoot]))
}
// Exists checks if a proof exists for the given block root and proof ID
func (p *ExecProofPool) Exists(blockRoot [fieldparams.RootLength]byte, proofId primitives.ExecutionProofId) bool {
p.lock.RLock()
defer p.lock.RUnlock()
proofsByType, exists := p.m[blockRoot]
if !exists {
return false
}
_, exists = proofsByType[proofId]
return exists
}
// PruneUpTo removes proofs older than the given slot
func (p *ExecProofPool) PruneUpTo(targetSlot primitives.Slot) int {
p.lock.Lock()
defer p.lock.Unlock()
pruned := 0
for blockRoot, proofsByType := range p.m {
for proofId, proof := range proofsByType {
if proof.Slot < targetSlot {
delete(proofsByType, proofId)
execProofInPoolTotal.Dec()
pruned++
}
}
// Clean up empty inner maps
if len(proofsByType) == 0 {
delete(p.m, blockRoot)
}
}
return pruned
}

View File

@@ -165,6 +165,7 @@ go_test(
"//beacon-chain/startup:go_default_library",
"//beacon-chain/state/stategen/mock:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",

View File

@@ -589,6 +589,11 @@ func (s *Service) createLocalNode(
localNode.Set(quicEntry)
}
if features.Get().EnableZkvm {
zkvmKeyEntry := enr.WithEntry(zkvmEnabledKeyEnrKey, true)
localNode.Set(zkvmKeyEntry)
}
localNode.SetFallbackIP(ipAddr)
localNode.SetFallbackUDP(udpPort)

View File

@@ -25,6 +25,7 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers/scorers"
testp2p "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/startup"
"github.com/OffchainLabs/prysm/v7/config/features"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/wrapper"
leakybucket "github.com/OffchainLabs/prysm/v7/container/leaky-bucket"
@@ -243,12 +244,19 @@ func TestCreateLocalNode(t *testing.T) {
name string
cfg *Config
expectedError bool
zkvmEnabled bool
}{
{
name: "valid config",
cfg: &Config{},
expectedError: false,
},
{
name: "valid config with zkVM enabled",
cfg: &Config{},
expectedError: false,
zkvmEnabled: true,
},
{
name: "invalid host address",
cfg: &Config{HostAddress: "invalid"},
@@ -273,6 +281,15 @@ func TestCreateLocalNode(t *testing.T) {
for _, tt := range testCases {
t.Run(tt.name, func(t *testing.T) {
if tt.zkvmEnabled {
resetCfg := features.InitWithReset(&features.Flags{
EnableZkvm: true,
})
t.Cleanup(func() {
resetCfg()
})
}
// Define ports. Use unique ports since this test validates ENR content.
const (
udpPort = 3100
@@ -348,6 +365,14 @@ func TestCreateLocalNode(t *testing.T) {
custodyGroupCount := new(uint64)
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(params.BeaconNetworkConfig().CustodyGroupCountKey, custodyGroupCount)))
require.Equal(t, custodyRequirement, *custodyGroupCount)
// Check zkVM enabled key if applicable.
if tt.zkvmEnabled {
zkvmEnabled := new(bool)
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(params.BeaconNetworkConfig().ZkvmEnabledKey, zkvmEnabled)))
require.Equal(t, features.Get().EnableZkvm, *zkvmEnabled)
}
})
}
}

View File

@@ -52,6 +52,9 @@ const (
// lightClientFinalityUpdateWeight specifies the scoring weight that we apply to
// our light client finality update topic.
lightClientFinalityUpdateWeight = 0.05
// executionProofWeight specifies the scoring weight that we apply to
// our execution proof topic.
executionProofWeight = 0.05
// maxInMeshScore describes the max score a peer can attain from being in the mesh.
maxInMeshScore = 10
@@ -145,6 +148,8 @@ func (s *Service) topicScoreParams(topic string) (*pubsub.TopicScoreParams, erro
return defaultLightClientOptimisticUpdateTopicParams(), nil
case strings.Contains(topic, GossipLightClientFinalityUpdateMessage):
return defaultLightClientFinalityUpdateTopicParams(), nil
case strings.Contains(topic, GossipExecutionProofMessage):
return defaultExecutionProofTopicParams(), nil
default:
return nil, errors.Errorf("unrecognized topic provided for parameter registration: %s", topic)
}
@@ -510,6 +515,28 @@ func defaultBlsToExecutionChangeTopicParams() *pubsub.TopicScoreParams {
}
}
func defaultExecutionProofTopicParams() *pubsub.TopicScoreParams {
return &pubsub.TopicScoreParams{
TopicWeight: executionProofWeight,
TimeInMeshWeight: maxInMeshScore / inMeshCap(),
TimeInMeshQuantum: inMeshTime(),
TimeInMeshCap: inMeshCap(),
FirstMessageDeliveriesWeight: 2,
FirstMessageDeliveriesDecay: scoreDecay(oneHundredEpochs),
FirstMessageDeliveriesCap: 5,
MeshMessageDeliveriesWeight: 0,
MeshMessageDeliveriesDecay: 0,
MeshMessageDeliveriesCap: 0,
MeshMessageDeliveriesThreshold: 0,
MeshMessageDeliveriesWindow: 0,
MeshMessageDeliveriesActivation: 0,
MeshFailurePenaltyWeight: 0,
MeshFailurePenaltyDecay: 0,
InvalidMessageDeliveriesWeight: -2000,
InvalidMessageDeliveriesDecay: scoreDecay(invalidDecayPeriod),
}
}
func defaultLightClientOptimisticUpdateTopicParams() *pubsub.TopicScoreParams {
return &pubsub.TopicScoreParams{
TopicWeight: lightClientOptimisticUpdateWeight,

View File

@@ -25,6 +25,7 @@ var gossipTopicMappings = map[string]func() proto.Message{
LightClientOptimisticUpdateTopicFormat: func() proto.Message { return &ethpb.LightClientOptimisticUpdateAltair{} },
LightClientFinalityUpdateTopicFormat: func() proto.Message { return &ethpb.LightClientFinalityUpdateAltair{} },
DataColumnSubnetTopicFormat: func() proto.Message { return &ethpb.DataColumnSidecar{} },
ExecutionProofSubnetTopicFormat: func() proto.Message { return &ethpb.ExecutionProof{} },
}
// GossipTopicMappings is a function to return the assigned data type

View File

@@ -602,6 +602,33 @@ func (p *Status) All() []peer.ID {
return pids
}
// ZkvmEnabledPeers returns all connected peers that have zkvm enabled in their ENR.
func (p *Status) ZkvmEnabledPeers() []peer.ID {
p.store.RLock()
defer p.store.RUnlock()
peers := make([]peer.ID, 0)
for pid, peerData := range p.store.Peers() {
if peerData.ConnState != Connected {
continue
}
if peerData.Enr == nil {
continue
}
var enabled bool
entry := enr.WithEntry(params.BeaconNetworkConfig().ZkvmEnabledKey, &enabled)
if err := peerData.Enr.Load(entry); err != nil {
continue
}
if enabled {
peers = append(peers, pid)
}
}
return peers
}
// Prune clears out and removes outdated and disconnected peers.
func (p *Status) Prune() {
p.store.Lock()

View File

@@ -1341,3 +1341,75 @@ func createPeer(t *testing.T, p *peers.Status, addr ma.Multiaddr,
p.SetConnectionState(id, state)
return id
}
func TestZkvmEnabledPeers(t *testing.T) {
p := peers.NewStatus(t.Context(), &peers.StatusConfig{
PeerLimit: 30,
ScorerParams: &scorers.Config{
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
Threshold: 1,
},
},
})
// Create peer 1: Connected, zkVM enabled
pid1 := addPeer(t, p, peers.Connected)
record1 := new(enr.Record)
zkvmEnabled := true
record1.Set(enr.WithEntry(params.BeaconNetworkConfig().ZkvmEnabledKey, &zkvmEnabled))
p.Add(record1, pid1, nil, network.DirOutbound)
p.SetConnectionState(pid1, peers.Connected)
// Create peer 2: Connected, zkVM disabled
pid2 := addPeer(t, p, peers.Connected)
record2 := new(enr.Record)
zkvmDisabled := false
record2.Set(enr.WithEntry(params.BeaconNetworkConfig().ZkvmEnabledKey, &zkvmDisabled))
p.Add(record2, pid2, nil, network.DirOutbound)
p.SetConnectionState(pid2, peers.Connected)
// Create peer 3: Connected, zkVM enabled
pid3 := addPeer(t, p, peers.Connected)
record3 := new(enr.Record)
record3.Set(enr.WithEntry(params.BeaconNetworkConfig().ZkvmEnabledKey, &zkvmEnabled))
p.Add(record3, pid3, nil, network.DirOutbound)
p.SetConnectionState(pid3, peers.Connected)
// Create peer 4: Disconnected, zkVM enabled (should not be included)
pid4 := addPeer(t, p, peers.Disconnected)
record4 := new(enr.Record)
record4.Set(enr.WithEntry(params.BeaconNetworkConfig().ZkvmEnabledKey, &zkvmEnabled))
p.Add(record4, pid4, nil, network.DirOutbound)
p.SetConnectionState(pid4, peers.Disconnected)
// Create peer 5: Connected, no ENR (should not be included)
pid5 := addPeer(t, p, peers.Connected)
p.Add(nil, pid5, nil, network.DirOutbound)
p.SetConnectionState(pid5, peers.Connected)
// Create peer 6: Connected, no zkVM key in ENR (should not be included)
pid6 := addPeer(t, p, peers.Connected)
record6 := new(enr.Record)
record6.Set(enr.WithEntry("other_key", "other_value"))
p.Add(record6, pid6, nil, network.DirOutbound)
p.SetConnectionState(pid6, peers.Connected)
// Get zkVM enabled peers
zkvmPeers := p.ZkvmEnabledPeers()
// Should return only pid1 and pid3 (connected peers with zkVM enabled)
assert.Equal(t, 2, len(zkvmPeers), "Expected 2 zkVM enabled peers")
// Verify the returned peers are correct
zkvmPeerMap := make(map[peer.ID]bool)
for _, pid := range zkvmPeers {
zkvmPeerMap[pid] = true
}
assert.Equal(t, true, zkvmPeerMap[pid1], "pid1 should be in zkVM enabled peers")
assert.Equal(t, true, zkvmPeerMap[pid3], "pid3 should be in zkVM enabled peers")
assert.Equal(t, false, zkvmPeerMap[pid2], "pid2 should not be in zkVM enabled peers (disabled)")
assert.Equal(t, false, zkvmPeerMap[pid4], "pid4 should not be in zkVM enabled peers (disconnected)")
assert.Equal(t, false, zkvmPeerMap[pid5], "pid5 should not be in zkVM enabled peers (no ENR)")
assert.Equal(t, false, zkvmPeerMap[pid6], "pid6 should not be in zkVM enabled peers (no zkVM key)")
}

View File

@@ -67,6 +67,9 @@ const (
// DataColumnSidecarsByRangeName is the name for the DataColumnSidecarsByRange v1 message topic.
DataColumnSidecarsByRangeName = "/data_column_sidecars_by_range"
// ExecutionProofsByRootName is the name for the ExecutionProofsByRoot v1 message topic.
ExecutionProofsByRootName = "/execution_proofs_by_root"
)
const (
@@ -106,6 +109,9 @@ const (
// RPCDataColumnSidecarsByRangeTopicV1 is a topic for requesting data column sidecars by their slot.
// /eth2/beacon_chain/req/data_column_sidecars_by_range/1 - New in Fulu.
RPCDataColumnSidecarsByRangeTopicV1 = protocolPrefix + DataColumnSidecarsByRangeName + SchemaVersionV1
// RPCExecutionProofsByRootTopicV1 is a topic for requesting execution proofs by their block root.
// /eth2/beacon_chain/req/execution_proofs_by_root/1 - New in Fulu.
RPCExecutionProofsByRootTopicV1 = protocolPrefix + ExecutionProofsByRootName + SchemaVersionV1
// V2 RPC Topics
// RPCStatusTopicV2 defines the v1 topic for the status rpc method.
@@ -170,6 +176,9 @@ var (
// DataColumnSidecarsByRoot v1 Message
RPCDataColumnSidecarsByRootTopicV1: p2ptypes.DataColumnsByRootIdentifiers{},
// ExecutionProofsByRoot v1 Message
RPCExecutionProofsByRootTopicV1: new(pb.ExecutionProofsByRootRequest),
}
// Maps all registered protocol prefixes.
@@ -193,6 +202,7 @@ var (
LightClientOptimisticUpdateName: true,
DataColumnSidecarsByRootName: true,
DataColumnSidecarsByRangeName: true,
ExecutionProofsByRootName: true,
}
// Maps all the RPC messages which are to updated in altair.

View File

@@ -36,6 +36,7 @@ var (
attSubnetEnrKey = params.BeaconNetworkConfig().AttSubnetKey
syncCommsSubnetEnrKey = params.BeaconNetworkConfig().SyncCommsSubnetKey
custodyGroupCountEnrKey = params.BeaconNetworkConfig().CustodyGroupCountKey
zkvmEnabledKeyEnrKey = params.BeaconNetworkConfig().ZkvmEnabledKey
)
// The value used with the subnet, in order

View File

@@ -46,6 +46,8 @@ const (
GossipLightClientOptimisticUpdateMessage = "light_client_optimistic_update"
// GossipDataColumnSidecarMessage is the name for the data column sidecar message type.
GossipDataColumnSidecarMessage = "data_column_sidecar"
// GossipExecutionProofMessage is the name for the execution proof message type.
GossipExecutionProofMessage = "execution_proof"
// Topic Formats
//
@@ -75,6 +77,8 @@ const (
LightClientOptimisticUpdateTopicFormat = GossipProtocolAndDigest + GossipLightClientOptimisticUpdateMessage
// DataColumnSubnetTopicFormat is the topic format for the data column subnet.
DataColumnSubnetTopicFormat = GossipProtocolAndDigest + GossipDataColumnSidecarMessage + "_%d"
// ExecutionProofSubnetTopicFormat is the topic format for the execution proof subnet.
ExecutionProofSubnetTopicFormat = GossipProtocolAndDigest + GossipExecutionProofMessage // + "_%d" (PoC only have one global topic)
)
// topic is a struct representing a single gossipsub topic.
@@ -158,6 +162,7 @@ func (s *Service) allTopics() []topic {
newTopic(altair, future, empty, GossipLightClientOptimisticUpdateMessage),
newTopic(altair, future, empty, GossipLightClientFinalityUpdateMessage),
newTopic(capella, future, empty, GossipBlsToExecutionChangeMessage),
newTopic(fulu, future, empty, GossipExecutionProofMessage),
}
last := params.GetNetworkScheduleEntry(genesis)
schedule := []params.NetworkScheduleEntry{last}

View File

@@ -169,6 +169,11 @@ func TestGetSpec(t *testing.T) {
config.BlobsidecarSubnetCountElectra = 102
config.SyncMessageDueBPS = 103
// EIP-8025
config.MaxProofDataBytes = 200
config.MinEpochsForExecutionProofRequests = 201
config.MinProofsRequired = 202
var dbp [4]byte
copy(dbp[:], []byte{'0', '0', '0', '1'})
config.DomainBeaconProposer = dbp
@@ -205,7 +210,7 @@ func TestGetSpec(t *testing.T) {
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp))
data, ok := resp.Data.(map[string]any)
require.Equal(t, true, ok)
assert.Equal(t, 175, len(data))
assert.Equal(t, 178, len(data))
for k, v := range data {
t.Run(k, func(t *testing.T) {
switch k {
@@ -577,6 +582,12 @@ func TestGetSpec(t *testing.T) {
assert.Equal(t, "102", v)
case "SYNC_MESSAGE_DUE_BPS":
assert.Equal(t, "103", v)
case "MAX_PROOF_DATA_BYTES":
assert.Equal(t, "200", v)
case "MIN_EPOCHS_FOR_EXECUTION_PROOF_REQUESTS":
assert.Equal(t, "201", v)
case "MIN_PROOFS_REQUIRED":
assert.Equal(t, "202", v)
case "BLOB_SCHEDULE":
blobSchedule, ok := v.([]any)
assert.Equal(t, true, ok)

View File

@@ -14,6 +14,7 @@ go_library(
"decode_pubsub.go",
"doc.go",
"error.go",
"exec_proofs.go",
"fork_watcher.go",
"fuzz_exports.go", # keep
"log.go",
@@ -31,6 +32,7 @@ go_library(
"rpc_chunked_response.go",
"rpc_data_column_sidecars_by_range.go",
"rpc_data_column_sidecars_by_root.go",
"rpc_execution_proofs_by_root_topic.go",
"rpc_goodbye.go",
"rpc_light_client.go",
"rpc_metadata.go",
@@ -46,6 +48,7 @@ go_library(
"subscriber_blob_sidecar.go",
"subscriber_bls_to_execution_change.go",
"subscriber_data_column_sidecar.go",
"subscriber_execution_proofs.go",
"subscriber_handlers.go",
"subscriber_sync_committee_message.go",
"subscriber_sync_contribution_proof.go",
@@ -57,6 +60,7 @@ go_library(
"validate_blob.go",
"validate_bls_to_execution_change.go",
"validate_data_column.go",
"validate_execution_proof.go",
"validate_light_client.go",
"validate_proposer_slashing.go",
"validate_sync_committee_message.go",
@@ -93,6 +97,7 @@ go_library(
"//beacon-chain/light-client:go_default_library",
"//beacon-chain/operations/attestations:go_default_library",
"//beacon-chain/operations/blstoexec:go_default_library",
"//beacon-chain/operations/execproofs:go_default_library",
"//beacon-chain/operations/slashings:go_default_library",
"//beacon-chain/operations/synccommittee:go_default_library",
"//beacon-chain/operations/voluntaryexits:go_default_library",
@@ -187,6 +192,7 @@ go_test(
"rpc_blob_sidecars_by_root_test.go",
"rpc_data_column_sidecars_by_range_test.go",
"rpc_data_column_sidecars_by_root_test.go",
"rpc_execution_proofs_by_root_topic_test.go",
"rpc_goodbye_test.go",
"rpc_handler_test.go",
"rpc_light_client_test.go",
@@ -211,6 +217,7 @@ go_test(
"validate_blob_test.go",
"validate_bls_to_execution_change_test.go",
"validate_data_column_test.go",
"validate_execution_proof_test.go",
"validate_light_client_test.go",
"validate_proposer_slashing_test.go",
"validate_sync_committee_message_test.go",
@@ -244,6 +251,7 @@ go_test(
"//beacon-chain/light-client:go_default_library",
"//beacon-chain/operations/attestations:go_default_library",
"//beacon-chain/operations/blstoexec:go_default_library",
"//beacon-chain/operations/execproofs:go_default_library",
"//beacon-chain/operations/slashings:go_default_library",
"//beacon-chain/operations/slashings/mock:go_default_library",
"//beacon-chain/p2p:go_default_library",

View File

@@ -0,0 +1,65 @@
package sync
import (
"fmt"
"time"
"errors"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
)
// generateExecProof returns a dummy execution proof after the specified delay.
func generateExecProof(roBlock blocks.ROBlock, proofID primitives.ExecutionProofId, delay time.Duration) (*ethpb.ExecutionProof, error) {
// Simulate proof generation work
time.Sleep(delay)
// Create a dummy proof with some deterministic data
block := roBlock.Block()
if block == nil {
return nil, errors.New("nil block")
}
body := block.Body()
if body == nil {
return nil, errors.New("nil block body")
}
executionData, err := body.Execution()
if err != nil {
return nil, fmt.Errorf("execution: %w", err)
}
if executionData == nil {
return nil, errors.New("nil execution data")
}
hash, err := executionData.HashTreeRoot()
if err != nil {
return nil, fmt.Errorf("hash tree root: %w", err)
}
proofData := []byte{
0xFF, // Magic byte for dummy proof
byte(proofID),
// Include some payload hash bytes
hash[0],
hash[1],
hash[2],
hash[3],
}
blockRoot := roBlock.Root()
proof := &ethpb.ExecutionProof{
ProofId: proofID,
Slot: block.Slot(),
BlockHash: hash[:],
BlockRoot: blockRoot[:],
ProofData: proofData,
}
return proof, nil
}

View File

@@ -12,6 +12,7 @@ import (
lightClient "github.com/OffchainLabs/prysm/v7/beacon-chain/light-client"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/attestations"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/blstoexec"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/execproofs"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/slashings"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/synccommittee"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/voluntaryexits"
@@ -88,6 +89,13 @@ func WithBlsToExecPool(blsToExecPool blstoexec.PoolManager) Option {
}
}
func WithExecProofPool(execProofPool execproofs.PoolManager) Option {
return func(s *Service) error {
s.cfg.execProofPool = execProofPool
return nil
}
}
func WithChainService(chain blockchainService) Option {
return func(s *Service) error {
s.cfg.chain = chain

View File

@@ -259,6 +259,10 @@ func (s *Service) processBlock(ctx context.Context, b interfaces.ReadOnlySignedB
return errors.Wrap(err, "request and save missing data column sidecars")
}
if err := s.requestAndSaveMissingExecutionProofs([]blocks.ROBlock{roBlock}); err != nil {
return errors.Wrap(err, "request and save missing execution proofs")
}
return nil
}

View File

@@ -100,6 +100,10 @@ func newRateLimiter(p2pProvider p2p.P2P) *limiter {
topicMap[addEncoding(p2p.RPCDataColumnSidecarsByRootTopicV1)] = dataColumnSidecars
// DataColumnSidecarsByRangeV1
topicMap[addEncoding(p2p.RPCDataColumnSidecarsByRangeTopicV1)] = dataColumnSidecars
executionProofs := leakybucket.NewCollector(1, defaultBurstLimit, leakyBucketPeriod, false /* deleteEmptyBuckets */);
// ExecutionProofsByRootV1
topicMap[addEncoding(p2p.RPCExecutionProofsByRootTopicV1)] = executionProofs
// General topic for all rpc requests.
topicMap[rpcLimiterTopic] = leakybucket.NewCollector(5, defaultBurstLimit*2, leakyBucketPeriod, false /* deleteEmptyBuckets */)

View File

@@ -17,7 +17,7 @@ import (
func TestNewRateLimiter(t *testing.T) {
rlimiter := newRateLimiter(mockp2p.NewTestP2P(t))
assert.Equal(t, len(rlimiter.limiterMap), 20, "correct number of topics not registered")
assert.Equal(t, len(rlimiter.limiterMap), 21, "correct number of topics not registered")
}
func TestNewRateLimiter_FreeCorrectly(t *testing.T) {

View File

@@ -51,6 +51,7 @@ func (s *Service) rpcHandlerByTopicFromFork(forkIndex int) (map[string]rpcHandle
p2p.RPCBlobSidecarsByRangeTopicV1: s.blobSidecarsByRangeRPCHandler, // Modified in Fulu
p2p.RPCDataColumnSidecarsByRootTopicV1: s.dataColumnSidecarByRootRPCHandler, // Added in Fulu
p2p.RPCDataColumnSidecarsByRangeTopicV1: s.dataColumnSidecarsByRangeRPCHandler, // Added in Fulu
p2p.RPCExecutionProofsByRootTopicV1: s.executionProofsByRootRPCHandler, // Added in Fulu
}, nil
}

View File

@@ -11,11 +11,13 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/types"
"github.com/OffchainLabs/prysm/v7/beacon-chain/sync/verify"
"github.com/OffchainLabs/prysm/v7/beacon-chain/verification"
"github.com/OffchainLabs/prysm/v7/config/features"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/OffchainLabs/prysm/v7/time/slots"
libp2pcore "github.com/libp2p/go-libp2p/core"
@@ -87,9 +89,77 @@ func (s *Service) sendBeaconBlocksRequest(ctx context.Context, requests *types.B
return errors.Wrap(err, "request and save missing data columns")
}
if err := s.requestAndSaveMissingExecutionProofs(postFuluBlocks); err != nil {
return errors.Wrap(err, "request and save missing execution proofs")
}
return err
}
func (s *Service) requestAndSaveMissingExecutionProofs(blks []blocks.ROBlock) error {
if len(blks) == 0 {
return nil
}
// TODO: Parallelize requests for multiple blocks.
for _, blk := range blks {
if err := s.sendAndSaveExecutionProofs(s.ctx, blk); err != nil {
return err
}
}
return nil
}
func (s *Service) sendAndSaveExecutionProofs(ctx context.Context, block blocks.ROBlock) error {
if !features.Get().EnableZkvm {
return nil
}
// Check proof retention period.
blockEpoch := slots.ToEpoch(block.Block().Slot())
currentEpoch := slots.ToEpoch(s.cfg.clock.CurrentSlot())
if !params.WithinExecutionProofPeriod(blockEpoch, currentEpoch) {
return nil
}
// Check how many proofs are needed with Execution Proof Pool.
storedIds := s.cfg.execProofPool.Ids(block.Root())
count := uint64(len(storedIds))
if count >= params.BeaconConfig().MinProofsRequired {
return nil
}
// Construct request
blockRoot := block.Root()
req := &ethpb.ExecutionProofsByRootRequest{
BlockRoot: blockRoot[:],
CountNeeded: params.BeaconConfig().MinProofsRequired - count,
AlreadyHave: storedIds,
}
// Call SendExecutionProofByRootRequest
zkvmEnabledPeers := s.cfg.p2p.Peers().ZkvmEnabledPeers()
if len(zkvmEnabledPeers) == 0 {
return fmt.Errorf("no zkVM enabled peers available to request execution proofs")
}
// TODO: For simplicity, just pick the first peer for now.
// In the future, we can implement better peer selection logic.
pid := zkvmEnabledPeers[0]
proofs, err := SendExecutionProofsByRootRequest(ctx, s.cfg.clock, s.cfg.p2p, pid, req)
if err != nil {
return fmt.Errorf("send execution proofs by root request: %w", err)
}
// Insert ExecProofPool
// TODO: Implement multiple proof insertion in ExecProofPool to avoid multiple locks.
for _, proof := range proofs {
s.cfg.execProofPool.Insert(proof)
}
return nil
}
// requestAndSaveMissingDataColumns checks if the data columns are missing for the given block.
// If so, requests them and saves them to the storage.
func (s *Service) requestAndSaveMissingDataColumnSidecars(blks []blocks.ROBlock) error {

View File

@@ -182,3 +182,21 @@ func WriteDataColumnSidecarChunk(stream libp2pcore.Stream, tor blockchain.Tempor
return nil
}
func WriteExecutionProofChunk(stream libp2pcore.Stream, encoding encoder.NetworkEncoding, proof *ethpb.ExecutionProof) error {
// Success response code.
if _, err := stream.Write([]byte{responseCodeSuccess}); err != nil {
return errors.Wrap(err, "stream write")
}
ctxBytes := params.ForkDigest(slots.ToEpoch(proof.Slot))
if err := writeContextToStream(ctxBytes[:], stream); err != nil {
return errors.Wrap(err, "write context to stream")
}
// Execution proof.
if _, err := encoding.EncodeWithMaxLength(stream, proof); err != nil {
return errors.Wrap(err, "encode with max length")
}
return nil
}

View File

@@ -0,0 +1,219 @@
package sync
import (
"context"
"errors"
"fmt"
"io"
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/time/slots"
libp2pcore "github.com/libp2p/go-libp2p/core"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/sirupsen/logrus"
)
// SendExecutionProofsByRootRequest sends ExecutionProofsByRoot request and returns fetched execution proofs, if any.
func SendExecutionProofsByRootRequest(
ctx context.Context,
clock blockchain.TemporalOracle,
p2pProvider p2p.P2P,
pid peer.ID,
req *ethpb.ExecutionProofsByRootRequest,
) ([]*ethpb.ExecutionProof, error) {
// Validate request
if req.CountNeeded == 0 {
return nil, errors.New("count_needed must be greater than 0")
}
topic, err := p2p.TopicFromMessage(p2p.ExecutionProofsByRootName, slots.ToEpoch(clock.CurrentSlot()))
if err != nil {
return nil, err
}
log.WithFields(logrus.Fields{
"topic": topic,
"block_root": bytesutil.ToBytes32(req.BlockRoot),
"count": req.CountNeeded,
"already": len(req.AlreadyHave),
}).Debug("Sending execution proofs by root request")
stream, err := p2pProvider.Send(ctx, req, topic, pid)
if err != nil {
return nil, err
}
defer closeStream(stream, log)
// Read execution proofs from stream
proofs := make([]*ethpb.ExecutionProof, 0, req.CountNeeded)
alreadyHaveSet := make(map[primitives.ExecutionProofId]struct{})
for _, id := range req.AlreadyHave {
alreadyHaveSet[id] = struct{}{}
}
for i := uint64(0); i < req.CountNeeded; i++ {
isFirstChunk := i == 0
proof, err := ReadChunkedExecutionProof(stream, p2pProvider, isFirstChunk)
if errors.Is(err, io.EOF) {
break
}
if err != nil {
return nil, err
}
// Validate proof
if err := validateExecutionProof(proof, req, alreadyHaveSet); err != nil {
return nil, err
}
proofs = append(proofs, proof)
}
return proofs, nil
}
// ReadChunkedExecutionProof reads a chunked execution proof from the stream.
func ReadChunkedExecutionProof(
stream libp2pcore.Stream,
encoding p2p.EncodingProvider,
isFirstChunk bool,
) (*ethpb.ExecutionProof, error) {
// Read status code for each chunk (like data columns, not like blocks)
code, errMsg, err := ReadStatusCode(stream, encoding.Encoding())
if err != nil {
return nil, err
}
if code != 0 {
return nil, errors.New(errMsg)
}
// Read context bytes (fork digest)
_, err = readContextFromStream(stream)
if err != nil {
return nil, fmt.Errorf("read context from stream: %w", err)
}
// Decode the proof
proof := &ethpb.ExecutionProof{}
if err := encoding.Encoding().DecodeWithMaxLength(stream, proof); err != nil {
return nil, err
}
return proof, nil
}
// validateExecutionProof validates a received execution proof against the request.
func validateExecutionProof(
proof *ethpb.ExecutionProof,
req *ethpb.ExecutionProofsByRootRequest,
alreadyHaveSet map[primitives.ExecutionProofId]struct{},
) error {
// Check block root matches
proofRoot := bytesutil.ToBytes32(proof.BlockRoot)
reqRoot := bytesutil.ToBytes32(req.BlockRoot)
if proofRoot != reqRoot {
return fmt.Errorf("proof block root %#x does not match requested root %#x",
proofRoot, reqRoot)
}
// Check we didn't already have this proof
if _, ok := alreadyHaveSet[proof.ProofId]; ok {
return fmt.Errorf("received proof we already have: proof_id=%d", proof.ProofId)
}
// Check proof ID is valid (within max range)
if !proof.ProofId.IsValid() {
return fmt.Errorf("invalid proof_id: %d", proof.ProofId)
}
return nil
}
// executionProofsByRootRPCHandler handles incoming ExecutionProofsByRoot RPC requests.
func (s *Service) executionProofsByRootRPCHandler(ctx context.Context, msg any, stream libp2pcore.Stream) error {
ctx, span := trace.StartSpan(ctx, "sync.executionProofsByRootRPCHandler")
defer span.End()
_, cancel := context.WithTimeout(ctx, ttfbTimeout)
defer cancel()
req, ok := msg.(*ethpb.ExecutionProofsByRootRequest)
if !ok {
return errors.New("message is not type ExecutionProofsByRootRequest")
}
remotePeer := stream.Conn().RemotePeer()
SetRPCStreamDeadlines(stream)
// Validate request
if err := s.rateLimiter.validateRequest(stream, 1); err != nil {
return err
}
// Penalize peers that send invalid requests.
if err := validateExecutionProofsByRootRequest(req); err != nil {
s.downscorePeer(remotePeer, "executionProofsByRootRPCHandlerValidationError")
s.writeErrorResponseToStream(responseCodeInvalidRequest, err.Error(), stream)
return fmt.Errorf("validate execution proofs by root request: %w", err)
}
blockRoot := bytesutil.ToBytes32(req.BlockRoot)
log := log.WithFields(logrus.Fields{
"blockroot": fmt.Sprintf("%#x", blockRoot),
"neededCount": req.CountNeeded,
"alreadyHave": req.AlreadyHave,
"peer": remotePeer.String(),
})
s.rateLimiter.add(stream, 1)
defer closeStream(stream, log)
// Get proofs from execution proof pool
storedProofs := s.cfg.execProofPool.Get(blockRoot)
// Filter out not requested proofs
alreadyHave := make(map[primitives.ExecutionProofId]bool)
for _, id := range req.AlreadyHave {
alreadyHave[id] = true
}
// Send proofs
sentCount := uint64(0)
for _, proof := range storedProofs {
if sentCount >= req.CountNeeded {
break
}
// Skip proofs the requester already has
if alreadyHave[proof.ProofId] {
continue
}
// Write proof to stream
SetStreamWriteDeadline(stream, defaultWriteDuration)
if err := WriteExecutionProofChunk(stream, s.cfg.p2p.Encoding(), proof); err != nil {
log.WithError(err).Debug("Could not send execution proof")
s.writeErrorResponseToStream(responseCodeServerError, "could not send execution proof", stream)
return err
}
sentCount++
}
log.WithField("sentCount", sentCount).Debug("Responded to execution proofs by root request")
return nil
}
func validateExecutionProofsByRootRequest(req *ethpb.ExecutionProofsByRootRequest) error {
if req.CountNeeded == 0 {
return errors.New("count_needed must be greater than 0")
}
return nil
}

View File

@@ -0,0 +1,727 @@
package sync
import (
"io"
"sync"
"testing"
"time"
chainMock "github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/testing"
testDB "github.com/OffchainLabs/prysm/v7/beacon-chain/db/testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/execproofs"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/encoder"
p2ptest "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/startup"
"github.com/OffchainLabs/prysm/v7/config/features"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/assert"
"github.com/OffchainLabs/prysm/v7/testing/require"
"github.com/OffchainLabs/prysm/v7/testing/util"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/protocol"
"github.com/pkg/errors"
)
func TestExecutionProofsByRootRPCHandler(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.FuluForkEpoch = 0
params.OverrideBeaconConfig(cfg)
params.BeaconConfig().InitializeForkSchedule()
protocolID := protocol.ID(p2p.RPCExecutionProofsByRootTopicV1) + "/" + encoder.ProtocolSuffixSSZSnappy
t.Run("wrong message type", func(t *testing.T) {
service := &Service{}
err := service.executionProofsByRootRPCHandler(t.Context(), nil, nil)
require.ErrorContains(t, "message is not type ExecutionProofsByRootRequest", err)
})
t.Run("invalid request - count_needed is 0", func(t *testing.T) {
resetCfg := features.InitWithReset(&features.Flags{
EnableZkvm: true,
})
defer resetCfg()
localP2P := p2ptest.NewTestP2P(t)
service := &Service{cfg: &config{p2p: localP2P}, rateLimiter: newRateLimiter(localP2P)}
remoteP2P := p2ptest.NewTestP2P(t)
var wg sync.WaitGroup
wg.Add(1)
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
defer wg.Done()
code, errMsg, err := readStatusCodeNoDeadline(stream, localP2P.Encoding())
require.NoError(t, err)
require.Equal(t, responseCodeInvalidRequest, code)
require.Equal(t, "count_needed must be greater than 0", errMsg)
})
localP2P.Connect(remoteP2P)
stream, err := localP2P.BHost.NewStream(t.Context(), remoteP2P.BHost.ID(), protocolID)
require.NoError(t, err)
blockRoot := bytesutil.PadTo([]byte("blockroot"), 32)
req := &ethpb.ExecutionProofsByRootRequest{
BlockRoot: blockRoot,
CountNeeded: 0, // Invalid: must be > 0
AlreadyHave: []primitives.ExecutionProofId{},
}
require.Equal(t, true, localP2P.Peers().Scorers().BadResponsesScorer().Score(remoteP2P.PeerID()) >= 0)
err = service.executionProofsByRootRPCHandler(t.Context(), req, stream)
require.NotNil(t, err)
require.Equal(t, true, localP2P.Peers().Scorers().BadResponsesScorer().Score(remoteP2P.PeerID()) < 0)
if util.WaitTimeout(&wg, 1*time.Second) {
t.Fatal("Did not receive stream within 1 sec")
}
})
t.Run("zkVM disabled - returns empty", func(t *testing.T) {
resetCfg := features.InitWithReset(&features.Flags{
EnableZkvm: false, // Disabled
})
defer resetCfg()
localP2P := p2ptest.NewTestP2P(t)
execProofPool := execproofs.NewPool()
service := &Service{
cfg: &config{
p2p: localP2P,
execProofPool: execProofPool,
},
rateLimiter: newRateLimiter(localP2P),
}
remoteP2P := p2ptest.NewTestP2P(t)
var wg sync.WaitGroup
wg.Add(1)
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
defer wg.Done()
// Should receive no proofs (stream should end)
_, err := ReadChunkedExecutionProof(stream, localP2P, true)
require.ErrorIs(t, err, io.EOF)
})
localP2P.Connect(remoteP2P)
stream, err := localP2P.BHost.NewStream(t.Context(), remoteP2P.BHost.ID(), protocolID)
require.NoError(t, err)
blockRoot := bytesutil.PadTo([]byte("blockroot"), 32)
req := &ethpb.ExecutionProofsByRootRequest{
BlockRoot: blockRoot,
CountNeeded: 2,
AlreadyHave: []primitives.ExecutionProofId{},
}
err = service.executionProofsByRootRPCHandler(t.Context(), req, stream)
require.NoError(t, err)
if util.WaitTimeout(&wg, 1*time.Second) {
t.Fatal("Did not receive stream within 1 sec")
}
})
t.Run("no proofs available", func(t *testing.T) {
resetCfg := features.InitWithReset(&features.Flags{
EnableZkvm: true,
})
defer resetCfg()
localP2P := p2ptest.NewTestP2P(t)
execProofPool := execproofs.NewPool()
service := &Service{
cfg: &config{
p2p: localP2P,
execProofPool: execProofPool,
},
rateLimiter: newRateLimiter(localP2P),
}
remoteP2P := p2ptest.NewTestP2P(t)
var wg sync.WaitGroup
wg.Add(1)
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
defer wg.Done()
// Should receive no proofs (stream should end)
_, err := ReadChunkedExecutionProof(stream, localP2P, true)
require.ErrorIs(t, err, io.EOF)
})
localP2P.Connect(remoteP2P)
stream, err := localP2P.BHost.NewStream(t.Context(), remoteP2P.BHost.ID(), protocolID)
require.NoError(t, err)
blockRoot := bytesutil.PadTo([]byte("blockroot"), 32)
req := &ethpb.ExecutionProofsByRootRequest{
BlockRoot: blockRoot,
CountNeeded: 2,
AlreadyHave: []primitives.ExecutionProofId{},
}
err = service.executionProofsByRootRPCHandler(t.Context(), req, stream)
require.NoError(t, err)
if util.WaitTimeout(&wg, 1*time.Second) {
t.Fatal("Did not receive stream within 1 sec")
}
})
t.Run("nominal - returns requested proofs", func(t *testing.T) {
resetCfg := features.InitWithReset(&features.Flags{
EnableZkvm: true,
})
defer resetCfg()
localP2P := p2ptest.NewTestP2P(t)
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
// Create execution proof pool with some proofs
execProofPool := execproofs.NewPool()
blockRoot := [32]byte{0x01, 0x02, 0x03}
// Add 3 proofs for the same block
blockHash := bytesutil.PadTo([]byte("blockhash"), 32)
proof1 := &ethpb.ExecutionProof{
BlockRoot: blockRoot[:],
BlockHash: blockHash,
Slot: primitives.Slot(10),
ProofId: primitives.ExecutionProofId(1),
ProofData: []byte("proof1"),
}
proof2 := &ethpb.ExecutionProof{
BlockRoot: blockRoot[:],
BlockHash: blockHash,
Slot: primitives.Slot(10),
ProofId: primitives.ExecutionProofId(2),
ProofData: []byte("proof2"),
}
proof3 := &ethpb.ExecutionProof{
BlockRoot: blockRoot[:],
BlockHash: blockHash,
Slot: primitives.Slot(10),
ProofId: primitives.ExecutionProofId(3),
ProofData: []byte("proof3"),
}
execProofPool.Insert(proof1)
execProofPool.Insert(proof2)
execProofPool.Insert(proof3)
beaconDB := testDB.SetupDB(t)
service := &Service{
cfg: &config{
p2p: localP2P,
beaconDB: beaconDB,
clock: clock,
execProofPool: execProofPool,
chain: &chainMock.ChainService{},
},
rateLimiter: newRateLimiter(localP2P),
}
remoteP2P := p2ptest.NewTestP2P(t)
var wg sync.WaitGroup
wg.Add(1)
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
defer wg.Done()
proofs := make([]*ethpb.ExecutionProof, 0, 2)
for i := range 2 {
isFirstChunk := i == 0
proof, err := ReadChunkedExecutionProof(stream, remoteP2P, isFirstChunk)
if errors.Is(err, io.EOF) {
break
}
assert.NoError(t, err)
proofs = append(proofs, proof)
}
assert.Equal(t, 2, len(proofs))
// Should receive proof1 and proof2 (first 2 in pool)
assert.DeepEqual(t, blockRoot[:], proofs[0].BlockRoot)
assert.DeepEqual(t, blockRoot[:], proofs[1].BlockRoot)
assert.Equal(t, primitives.ExecutionProofId(1), proofs[0].ProofId)
assert.Equal(t, primitives.ExecutionProofId(2), proofs[1].ProofId)
})
localP2P.Connect(remoteP2P)
stream, err := localP2P.BHost.NewStream(t.Context(), remoteP2P.BHost.ID(), protocolID)
require.NoError(t, err)
req := &ethpb.ExecutionProofsByRootRequest{
BlockRoot: blockRoot[:],
CountNeeded: 2,
AlreadyHave: []primitives.ExecutionProofId{},
}
err = service.executionProofsByRootRPCHandler(t.Context(), req, stream)
require.NoError(t, err)
require.Equal(t, true, localP2P.Peers().Scorers().BadResponsesScorer().Score(remoteP2P.PeerID()) >= 0)
if util.WaitTimeout(&wg, 1*time.Second) {
t.Fatal("Did not receive stream within 1 sec")
}
})
t.Run("filters already_have proofs", func(t *testing.T) {
resetCfg := features.InitWithReset(&features.Flags{
EnableZkvm: true,
})
defer resetCfg()
localP2P := p2ptest.NewTestP2P(t)
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
// Create execution proof pool with some proofs
execProofPool := execproofs.NewPool()
blockRoot := [32]byte{0x01, 0x02, 0x03}
// Add 4 proofs for the same block
blockHash := bytesutil.PadTo([]byte("blockhash"), 32)
proof1 := &ethpb.ExecutionProof{
BlockRoot: blockRoot[:],
BlockHash: blockHash,
Slot: primitives.Slot(10),
ProofId: primitives.ExecutionProofId(1),
ProofData: []byte("proof1"),
}
proof2 := &ethpb.ExecutionProof{
BlockRoot: blockRoot[:],
BlockHash: blockHash,
Slot: primitives.Slot(10),
ProofId: primitives.ExecutionProofId(2),
ProofData: []byte("proof2"),
}
proof3 := &ethpb.ExecutionProof{
BlockRoot: blockRoot[:],
BlockHash: blockHash,
Slot: primitives.Slot(10),
ProofId: primitives.ExecutionProofId(3),
ProofData: []byte("proof3"),
}
proof4 := &ethpb.ExecutionProof{
BlockRoot: blockRoot[:],
BlockHash: blockHash,
Slot: primitives.Slot(10),
ProofId: primitives.ExecutionProofId(4),
ProofData: []byte("proof4"),
}
execProofPool.Insert(proof1)
execProofPool.Insert(proof2)
execProofPool.Insert(proof3)
execProofPool.Insert(proof4)
beaconDB := testDB.SetupDB(t)
service := &Service{
cfg: &config{
p2p: localP2P,
beaconDB: beaconDB,
clock: clock,
execProofPool: execProofPool,
chain: &chainMock.ChainService{},
},
rateLimiter: newRateLimiter(localP2P),
}
remoteP2P := p2ptest.NewTestP2P(t)
var wg sync.WaitGroup
wg.Add(1)
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
defer wg.Done()
proofs := make([]*ethpb.ExecutionProof, 0, 2)
for i := range 3 {
isFirstChunk := i == 0
proof, err := ReadChunkedExecutionProof(stream, remoteP2P, isFirstChunk)
if errors.Is(err, io.EOF) {
break
}
assert.NoError(t, err)
proofs = append(proofs, proof)
}
// Should skip proof1 and proof2 (already_have), and return proof3 and proof4
assert.Equal(t, 2, len(proofs))
assert.Equal(t, primitives.ExecutionProofId(3), proofs[0].ProofId)
assert.Equal(t, primitives.ExecutionProofId(4), proofs[1].ProofId)
})
localP2P.Connect(remoteP2P)
stream, err := localP2P.BHost.NewStream(t.Context(), remoteP2P.BHost.ID(), protocolID)
require.NoError(t, err)
req := &ethpb.ExecutionProofsByRootRequest{
BlockRoot: blockRoot[:],
CountNeeded: 2,
AlreadyHave: []primitives.ExecutionProofId{1, 2}, // Already have proof1 and proof2
}
err = service.executionProofsByRootRPCHandler(t.Context(), req, stream)
require.NoError(t, err)
if util.WaitTimeout(&wg, 1*time.Second) {
t.Fatal("Did not receive stream within 1 sec")
}
})
t.Run("partial send - less proofs than requested", func(t *testing.T) {
resetCfg := features.InitWithReset(&features.Flags{
EnableZkvm: true,
})
defer resetCfg()
localP2P := p2ptest.NewTestP2P(t)
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
// Create execution proof pool with only 2 proofs
execProofPool := execproofs.NewPool()
blockRoot := [32]byte{0x01, 0x02, 0x03}
blockHash := bytesutil.PadTo([]byte("blockhash"), 32)
proof1 := &ethpb.ExecutionProof{
BlockRoot: blockRoot[:],
BlockHash: blockHash,
Slot: primitives.Slot(10),
ProofId: primitives.ExecutionProofId(1),
ProofData: []byte("proof1"),
}
proof2 := &ethpb.ExecutionProof{
BlockRoot: blockRoot[:],
BlockHash: blockHash,
Slot: primitives.Slot(10),
ProofId: primitives.ExecutionProofId(2),
ProofData: []byte("proof2"),
}
execProofPool.Insert(proof1)
execProofPool.Insert(proof2)
beaconDB := testDB.SetupDB(t)
service := &Service{
cfg: &config{
p2p: localP2P,
beaconDB: beaconDB,
clock: clock,
execProofPool: execProofPool,
chain: &chainMock.ChainService{},
},
rateLimiter: newRateLimiter(localP2P),
}
remoteP2P := p2ptest.NewTestP2P(t)
var wg sync.WaitGroup
wg.Add(1)
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
defer wg.Done()
proofs := make([]*ethpb.ExecutionProof, 0, 5)
for i := range 5 {
isFirstChunk := i == 0
proof, err := ReadChunkedExecutionProof(stream, remoteP2P, isFirstChunk)
if errors.Is(err, io.EOF) {
break
}
assert.NoError(t, err)
proofs = append(proofs, proof)
}
// Should only receive 2 proofs (not 5 as requested)
assert.Equal(t, 2, len(proofs))
assert.Equal(t, primitives.ExecutionProofId(1), proofs[0].ProofId)
assert.Equal(t, primitives.ExecutionProofId(2), proofs[1].ProofId)
})
localP2P.Connect(remoteP2P)
stream, err := localP2P.BHost.NewStream(t.Context(), remoteP2P.BHost.ID(), protocolID)
require.NoError(t, err)
req := &ethpb.ExecutionProofsByRootRequest{
BlockRoot: blockRoot[:],
CountNeeded: 5, // Request 5 but only 2 available
AlreadyHave: []primitives.ExecutionProofId{},
}
err = service.executionProofsByRootRPCHandler(t.Context(), req, stream)
require.NoError(t, err)
if util.WaitTimeout(&wg, 1*time.Second) {
t.Fatal("Did not receive stream within 1 sec")
}
})
}
func TestValidateExecutionProofsByRootRequest(t *testing.T) {
t.Run("invalid - count_needed is 0", func(t *testing.T) {
req := &ethpb.ExecutionProofsByRootRequest{
BlockRoot: bytesutil.PadTo([]byte("blockroot"), 32),
CountNeeded: 0,
AlreadyHave: []primitives.ExecutionProofId{},
}
err := validateExecutionProofsByRootRequest(req)
require.ErrorContains(t, "count_needed must be greater than 0", err)
})
t.Run("valid", func(t *testing.T) {
req := &ethpb.ExecutionProofsByRootRequest{
BlockRoot: bytesutil.PadTo([]byte("blockroot"), 32),
CountNeeded: 2,
AlreadyHave: []primitives.ExecutionProofId{},
}
err := validateExecutionProofsByRootRequest(req)
require.NoError(t, err)
})
}
func TestSendExecutionProofsByRootRequest(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.FuluForkEpoch = 0
params.OverrideBeaconConfig(cfg)
params.BeaconConfig().InitializeForkSchedule()
protocolID := protocol.ID(p2p.RPCExecutionProofsByRootTopicV1) + "/" + encoder.ProtocolSuffixSSZSnappy
t.Run("count_needed is 0 - returns error", func(t *testing.T) {
localP2P := p2ptest.NewTestP2P(t)
remoteP2P := p2ptest.NewTestP2P(t)
localP2P.Connect(remoteP2P)
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
blockRoot := bytesutil.PadTo([]byte("blockroot"), 32)
req := &ethpb.ExecutionProofsByRootRequest{
BlockRoot: blockRoot,
CountNeeded: 0,
AlreadyHave: []primitives.ExecutionProofId{},
}
proofs, err := SendExecutionProofsByRootRequest(t.Context(), clock, localP2P, remoteP2P.PeerID(), req)
require.ErrorContains(t, "count_needed must be greater than 0", err)
require.Equal(t, 0, len(proofs))
})
t.Run("success - receives requested proofs", func(t *testing.T) {
localP2P := p2ptest.NewTestP2P(t)
remoteP2P := p2ptest.NewTestP2P(t)
localP2P.Connect(remoteP2P)
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
blockRoot := [32]byte{0x01, 0x02, 0x03}
blockHash := bytesutil.PadTo([]byte("blockhash"), 32)
// Create proofs to send back
proof1 := &ethpb.ExecutionProof{
BlockRoot: blockRoot[:],
BlockHash: blockHash,
Slot: primitives.Slot(10),
ProofId: primitives.ExecutionProofId(1),
ProofData: []byte("proof1"),
}
proof2 := &ethpb.ExecutionProof{
BlockRoot: blockRoot[:],
BlockHash: blockHash,
Slot: primitives.Slot(10),
ProofId: primitives.ExecutionProofId(2),
ProofData: []byte("proof2"),
}
// Setup remote to send proofs
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
defer func() {
_ = stream.Close()
}()
// Read the request (we don't validate it in this test)
_ = &ethpb.ExecutionProofsByRootRequest{}
// Send proof1
require.NoError(t, WriteExecutionProofChunk(stream, remoteP2P.Encoding(), proof1))
// Send proof2
require.NoError(t, WriteExecutionProofChunk(stream, remoteP2P.Encoding(), proof2))
})
req := &ethpb.ExecutionProofsByRootRequest{
BlockRoot: blockRoot[:],
CountNeeded: 2,
AlreadyHave: []primitives.ExecutionProofId{},
}
proofs, err := SendExecutionProofsByRootRequest(t.Context(), clock, localP2P, remoteP2P.PeerID(), req)
require.NoError(t, err)
require.Equal(t, 2, len(proofs))
assert.Equal(t, primitives.ExecutionProofId(1), proofs[0].ProofId)
assert.Equal(t, primitives.ExecutionProofId(2), proofs[1].ProofId)
assert.DeepEqual(t, blockRoot[:], proofs[0].BlockRoot)
assert.DeepEqual(t, blockRoot[:], proofs[1].BlockRoot)
})
t.Run("partial response - EOF before count_needed", func(t *testing.T) {
localP2P := p2ptest.NewTestP2P(t)
remoteP2P := p2ptest.NewTestP2P(t)
localP2P.Connect(remoteP2P)
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
blockRoot := [32]byte{0x01, 0x02, 0x03}
blockHash := bytesutil.PadTo([]byte("blockhash"), 32)
proof1 := &ethpb.ExecutionProof{
BlockRoot: blockRoot[:],
BlockHash: blockHash,
Slot: primitives.Slot(10),
ProofId: primitives.ExecutionProofId(1),
ProofData: []byte("proof1"),
}
// Setup remote to send only 1 proof (but we request 5)
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
defer func() {
_ = stream.Close()
}()
// Send only proof1
require.NoError(t, WriteExecutionProofChunk(stream, remoteP2P.Encoding(), proof1))
})
req := &ethpb.ExecutionProofsByRootRequest{
BlockRoot: blockRoot[:],
CountNeeded: 5, // Request 5 but only get 1
AlreadyHave: []primitives.ExecutionProofId{},
}
proofs, err := SendExecutionProofsByRootRequest(t.Context(), clock, localP2P, remoteP2P.PeerID(), req)
require.NoError(t, err)
require.Equal(t, 1, len(proofs)) // Only received 1
assert.Equal(t, primitives.ExecutionProofId(1), proofs[0].ProofId)
})
t.Run("invalid block root - validation fails", func(t *testing.T) {
localP2P := p2ptest.NewTestP2P(t)
remoteP2P := p2ptest.NewTestP2P(t)
localP2P.Connect(remoteP2P)
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
requestedRoot := [32]byte{0x01, 0x02, 0x03}
wrongRoot := [32]byte{0xFF, 0xFF, 0xFF}
blockHash := bytesutil.PadTo([]byte("blockhash"), 32)
// Create proof with wrong block root
proof1 := &ethpb.ExecutionProof{
BlockRoot: wrongRoot[:], // Wrong root!
BlockHash: blockHash,
Slot: primitives.Slot(10),
ProofId: primitives.ExecutionProofId(1),
ProofData: []byte("proof1"),
}
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
defer func() {
_ = stream.Close()
}()
require.NoError(t, WriteExecutionProofChunk(stream, remoteP2P.Encoding(), proof1))
})
req := &ethpb.ExecutionProofsByRootRequest{
BlockRoot: requestedRoot[:],
CountNeeded: 1,
AlreadyHave: []primitives.ExecutionProofId{},
}
proofs, err := SendExecutionProofsByRootRequest(t.Context(), clock, localP2P, remoteP2P.PeerID(), req)
require.ErrorContains(t, "does not match requested root", err)
require.Equal(t, 0, len(proofs))
})
t.Run("already_have proof - validation fails", func(t *testing.T) {
localP2P := p2ptest.NewTestP2P(t)
remoteP2P := p2ptest.NewTestP2P(t)
localP2P.Connect(remoteP2P)
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
blockRoot := [32]byte{0x01, 0x02, 0x03}
blockHash := bytesutil.PadTo([]byte("blockhash"), 32)
proof1 := &ethpb.ExecutionProof{
BlockRoot: blockRoot[:],
BlockHash: blockHash,
Slot: primitives.Slot(10),
ProofId: primitives.ExecutionProofId(1),
ProofData: []byte("proof1"),
}
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
defer func() {
_ = stream.Close()
}()
require.NoError(t, WriteExecutionProofChunk(stream, remoteP2P.Encoding(), proof1))
})
req := &ethpb.ExecutionProofsByRootRequest{
BlockRoot: blockRoot[:],
CountNeeded: 1,
AlreadyHave: []primitives.ExecutionProofId{1}, // Already have proof_id 1
}
proofs, err := SendExecutionProofsByRootRequest(t.Context(), clock, localP2P, remoteP2P.PeerID(), req)
require.ErrorContains(t, "received proof we already have", err)
require.Equal(t, 0, len(proofs))
})
t.Run("invalid proof_id - validation fails", func(t *testing.T) {
localP2P := p2ptest.NewTestP2P(t)
remoteP2P := p2ptest.NewTestP2P(t)
localP2P.Connect(remoteP2P)
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
blockRoot := [32]byte{0x01, 0x02, 0x03}
blockHash := bytesutil.PadTo([]byte("blockhash"), 32)
proof1 := &ethpb.ExecutionProof{
BlockRoot: blockRoot[:],
BlockHash: blockHash,
Slot: primitives.Slot(10),
ProofId: primitives.ExecutionProofId(255), // Invalid proof_id (max valid is 7)
ProofData: []byte("proof1"),
}
remoteP2P.BHost.SetStreamHandler(protocolID, func(stream network.Stream) {
defer func() {
_ = stream.Close()
}()
require.NoError(t, WriteExecutionProofChunk(stream, remoteP2P.Encoding(), proof1))
})
req := &ethpb.ExecutionProofsByRootRequest{
BlockRoot: blockRoot[:],
CountNeeded: 1,
AlreadyHave: []primitives.ExecutionProofId{},
}
proofs, err := SendExecutionProofsByRootRequest(t.Context(), clock, localP2P, remoteP2P.PeerID(), req)
require.ErrorContains(t, "invalid proof_id", err)
require.Equal(t, 0, len(proofs))
})
}

View File

@@ -23,6 +23,7 @@ import (
lightClient "github.com/OffchainLabs/prysm/v7/beacon-chain/light-client"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/attestations"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/blstoexec"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/execproofs"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/slashings"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/synccommittee"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/voluntaryexits"
@@ -67,6 +68,7 @@ const (
seenProposerSlashingSize = 100
badBlockSize = 1000
syncMetricsInterval = 10 * time.Second
seenExecutionProofSize = 100
)
var (
@@ -94,6 +96,7 @@ type config struct {
slashingPool slashings.PoolManager
syncCommsPool synccommittee.Pool
blsToExecPool blstoexec.PoolManager
execProofPool execproofs.PoolManager
chain blockchainService
initialSync Checker
blockNotifier blockfeed.Notifier
@@ -235,7 +238,6 @@ func NewService(ctx context.Context, opts ...Option) *Service {
r.subHandler = newSubTopicHandler()
r.rateLimiter = newRateLimiter(r.cfg.p2p)
r.initCaches()
return r
}

View File

@@ -329,6 +329,17 @@ func (s *Service) registerSubscribers(nse params.NetworkScheduleEntry) bool {
getSubnetsRequiringPeers: s.allDataColumnSubnets,
})
})
if features.Get().EnableZkvm {
s.spawn(func() {
s.subscribe(
p2p.ExecutionProofSubnetTopicFormat,
s.validateExecutionProof,
s.executionProofSubscriber,
nse,
)
})
}
}
return true
}

View File

@@ -11,6 +11,7 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition/interop"
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
"github.com/OffchainLabs/prysm/v7/config/features"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
@@ -22,6 +23,7 @@ import (
"github.com/OffchainLabs/prysm/v7/time/slots"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
"google.golang.org/protobuf/proto"
)
@@ -69,12 +71,49 @@ func (s *Service) beaconBlockSubscriber(ctx context.Context, msg proto.Message)
}
return err
}
// We use the service context to ensure this context is not cancelled
// when the current function returns.
// TODO: Do not broadcast proofs for blocks we have already seen.
go s.generateAndBroadcastExecutionProofs(s.ctx, roBlock)
if err := s.processPendingAttsForBlock(ctx, root); err != nil {
return errors.Wrap(err, "process pending atts for block")
}
return nil
}
func (s *Service) generateAndBroadcastExecutionProofs(ctx context.Context, roBlock blocks.ROBlock) {
const delay = 2 * time.Second
proofTypes := flags.Get().ProofGenerationTypes
if len(proofTypes) == 0 {
return
}
var wg errgroup.Group
for _, proofType := range proofTypes {
wg.Go(func() error {
execProof, err := generateExecProof(roBlock, primitives.ExecutionProofId(proofType), delay)
if err != nil {
return fmt.Errorf("generate exec proof: %w", err)
}
if err := s.cfg.p2p.Broadcast(ctx, execProof); err != nil {
return fmt.Errorf("broadcast exec proof: %w", err)
}
return nil
})
}
if err := wg.Wait(); err != nil {
log.WithError(err).Error("Failed to generate and broadcast execution proofs")
}
}
// processSidecarsFromExecutionFromBlock retrieves (if available) sidecars data from the execution client,
// builds corresponding sidecars, save them to the storage, and broadcasts them over P2P if necessary.
func (s *Service) processSidecarsFromExecutionFromBlock(ctx context.Context, roBlock blocks.ROBlock) {

View File

@@ -0,0 +1,31 @@
package sync
import (
"context"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed"
opfeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/operation"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/pkg/errors"
"google.golang.org/protobuf/proto"
)
func (s *Service) executionProofSubscriber(_ context.Context, msg proto.Message) error {
executionProof, ok := msg.(*ethpb.ExecutionProof)
if !ok {
return errors.Errorf("incorrect type of message received, wanted %T but got %T", &ethpb.ExecutionProof{}, msg)
}
// Insert the execution proof into the pool
s.cfg.execProofPool.Insert(executionProof)
// Notify subscribers about the new execution proof
s.cfg.operationNotifier.OperationFeed().Send(&feed.Event{
Type: opfeed.ExecutionProofReceived,
Data: &opfeed.ExecutionProofReceivedData{
ExecutionProof: executionProof,
},
})
return nil
}

View File

@@ -0,0 +1,132 @@
package sync
import (
"context"
"fmt"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/time/slots"
pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/libp2p/go-libp2p/core/peer"
)
func (s *Service) validateExecutionProof(ctx context.Context, pid peer.ID, msg *pubsub.Message) (pubsub.ValidationResult, error) {
// Always accept messages our own messages.
if pid == s.cfg.p2p.PeerID() {
return pubsub.ValidationAccept, nil
}
// Ignore messages during initial sync.
if s.cfg.initialSync.Syncing() {
return pubsub.ValidationIgnore, nil
}
// Reject messages with a nil topic.
if msg.Topic == nil {
return pubsub.ValidationReject, p2p.ErrInvalidTopic
}
// Decode the message, reject if it fails.
m, err := s.decodePubsubMessage(msg)
if err != nil {
log.WithError(err).Error("Failed to decode message")
return pubsub.ValidationReject, err
}
// Reject messages that are not of the expected type.
executionProof, ok := m.(*ethpb.ExecutionProof)
if !ok {
log.WithField("message", m).Error("Message is not of type *ethpb.ExecutionProof")
return pubsub.ValidationReject, errWrongMessage
}
// 1. Verify proof is not from the future
if err := s.proofNotFromFutureSlot(executionProof); err != nil {
return pubsub.ValidationReject, err
}
// 2. Verify proof slot is greater than finalized slot
if err := s.proofAboveFinalizedSlot(ctx, executionProof); err != nil {
return pubsub.ValidationReject, err
}
// 3. Check if the proof is already in the DA checker cache (execution proof pool)
// If it exists in the cache, we know it has already passed validation.
blockRoot := bytesutil.ToBytes32(executionProof.BlockRoot)
if s.isProofCachedInPool(blockRoot, executionProof.ProofId) {
return pubsub.ValidationIgnore, nil
}
// 4. Verify proof size limits
if uint64(len(executionProof.ProofData)) > params.BeaconConfig().MaxProofDataBytes {
return pubsub.ValidationReject, fmt.Errorf("execution proof data size %d exceeds maximum allowed %d", len(executionProof.ProofData), params.BeaconConfig().MaxProofDataBytes)
}
// 5. Run zkVM proof verification
if err := s.verifyExecutionProof(executionProof); err != nil {
return pubsub.ValidationReject, err
}
// Validation successful, return accept
return pubsub.ValidationAccept, nil
}
// TODO: Do we need encapsulation for all those verification functions?
// proofNotFromFutureSlot checks whether the execution proof is from a future slot.
func (s *Service) proofNotFromFutureSlot(executionProof *ethpb.ExecutionProof) error {
currentSlot := s.cfg.clock.CurrentSlot()
proofSlot := executionProof.Slot
if currentSlot == proofSlot {
return nil
}
earliestStart, err := s.cfg.clock.SlotStart(proofSlot)
if err != nil {
// TODO: Should we penalize the peer for this?
return fmt.Errorf("failed to compute start time for proof slot %d: %w", proofSlot, err)
}
earliestStart = earliestStart.Add(-1 * params.BeaconConfig().MaximumGossipClockDisparityDuration())
// If the system time is still before earliestStart, we consider the proof from a future slot and return an error.
if s.cfg.clock.Now().Before(earliestStart) {
return fmt.Errorf("slot %d is too far in the future (current slot: %d)", proofSlot, currentSlot)
}
return nil
}
// proofAboveFinalizedSlot checks whether the execution proof's slot is after the finalized slot.
func (s *Service) proofAboveFinalizedSlot(ctx context.Context, executionProof *ethpb.ExecutionProof) error {
finalizedCheckpoint, err := s.cfg.beaconDB.FinalizedCheckpoint(ctx)
if err != nil {
// TODO: Should we penalize the peer for this?
return fmt.Errorf("failed to get finalized checkpoint: %w", err)
}
fSlot, err := slots.EpochStart(finalizedCheckpoint.Epoch)
if err != nil {
// TODO: Should we penalize the peer for this?
return fmt.Errorf("failed to compute start slot for finalized epoch %d: %w", finalizedCheckpoint.Epoch, err)
}
if executionProof.Slot <= fSlot {
return fmt.Errorf("execution proof slot %d is not after finalized slot %d", executionProof.Slot, fSlot)
}
return nil
}
// isProofCachedInPool checks if the execution proof is already present in the pool.
func (s *Service) isProofCachedInPool(blockRoot [32]byte, proofId primitives.ExecutionProofId) bool {
return s.cfg.execProofPool.Exists(blockRoot, proofId)
}
// verifyExecutionProof performs the actual verification of the execution proof.
func (s *Service) verifyExecutionProof(_ *ethpb.ExecutionProof) error {
// For now, say all proof are valid.
return nil
}

View File

@@ -0,0 +1,408 @@
package sync
import (
"bytes"
"context"
"fmt"
"testing"
"time"
mock "github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/testing"
testingdb "github.com/OffchainLabs/prysm/v7/beacon-chain/db/testing"
doublylinkedtree "github.com/OffchainLabs/prysm/v7/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/execproofs"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/encoder"
mockp2p "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/startup"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stategen"
mockSync "github.com/OffchainLabs/prysm/v7/beacon-chain/sync/initial-sync/testing"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/assert"
"github.com/OffchainLabs/prysm/v7/testing/require"
pubsub "github.com/libp2p/go-libp2p-pubsub"
pubsubpb "github.com/libp2p/go-libp2p-pubsub/pb"
"github.com/libp2p/go-libp2p/core/peer"
)
func TestValidateExecutionProof(t *testing.T) {
beaconDB := testingdb.SetupDB(t)
p2pService := mockp2p.NewTestP2P(t)
ctx := context.Background()
fcp := &ethpb.Checkpoint{
Epoch: 1,
}
require.NoError(t, beaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{
Root: params.BeaconConfig().ZeroHash[:],
Slot: 0,
}))
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, fcp))
defaultTopic := p2p.ExecutionProofSubnetTopicFormat + "/" + encoder.ProtocolSuffixSSZSnappy
fakeDigest := []byte{0xAB, 0x00, 0xCC, 0x9E}
chainService := &mock.ChainService{
Genesis: time.Now(),
ValidatorsRoot: [32]byte{'A'},
FinalizedCheckPoint: fcp,
}
currentSlot := primitives.Slot(100)
genesisTime := time.Now().Add(-time.Duration(uint64(currentSlot)*params.BeaconConfig().SecondsPerSlot) * time.Second)
tests := []struct {
name string
setupService func() *Service
proof *ethpb.ExecutionProof
topic *string
pid peer.ID
want pubsub.ValidationResult
wantErr bool
}{
{
name: "Ignore when syncing",
setupService: func() *Service {
s := &Service{
cfg: &config{
p2p: p2pService,
initialSync: &mockSync.Sync{IsSyncing: true},
chain: chainService,
clock: startup.NewClock(genesisTime, [32]byte{'A'}),
},
}
s.initCaches()
return s
},
proof: &ethpb.ExecutionProof{
Slot: currentSlot,
ProofId: primitives.ExecutionProofId(1),
BlockRoot: make([]byte, 32),
BlockHash: make([]byte, 32),
ProofData: make([]byte, 100),
},
topic: func() *string {
t := fmt.Sprintf(defaultTopic, fakeDigest)
return &t
}(),
pid: "random-peer",
want: pubsub.ValidationIgnore,
wantErr: false,
},
{
name: "Reject nil topic",
setupService: func() *Service {
s := &Service{
cfg: &config{
p2p: p2pService,
initialSync: &mockSync.Sync{IsSyncing: false},
chain: chainService,
clock: startup.NewClock(genesisTime, [32]byte{'A'}),
},
}
s.initCaches()
return s
},
proof: &ethpb.ExecutionProof{
Slot: currentSlot,
ProofId: primitives.ExecutionProofId(1),
BlockRoot: make([]byte, 32),
BlockHash: make([]byte, 32),
ProofData: make([]byte, 100),
},
topic: nil,
pid: "random-peer",
want: pubsub.ValidationReject,
wantErr: true,
},
{
name: "Reject proof from future slot",
setupService: func() *Service {
s := &Service{
cfg: &config{
p2p: p2pService,
initialSync: &mockSync.Sync{IsSyncing: false},
chain: chainService,
clock: startup.NewClock(genesisTime, [32]byte{'A'}),
beaconDB: beaconDB,
stateGen: stategen.New(beaconDB, doublylinkedtree.New()),
execProofPool: execproofs.NewPool(),
},
}
s.initCaches()
return s
},
proof: &ethpb.ExecutionProof{
Slot: currentSlot + 1000, // Far future slot
ProofId: primitives.ExecutionProofId(1),
BlockRoot: make([]byte, 32),
BlockHash: make([]byte, 32),
ProofData: make([]byte, 100),
},
topic: func() *string {
t := fmt.Sprintf(defaultTopic, fakeDigest)
return &t
}(),
pid: "random-peer",
want: pubsub.ValidationReject,
wantErr: true,
},
{
name: "Reject proof below finalized slot",
setupService: func() *Service {
s := &Service{
cfg: &config{
p2p: p2pService,
initialSync: &mockSync.Sync{IsSyncing: false},
chain: chainService,
clock: startup.NewClock(genesisTime, [32]byte{'A'}),
beaconDB: beaconDB,
stateGen: stategen.New(beaconDB, doublylinkedtree.New()),
execProofPool: execproofs.NewPool(),
},
}
s.initCaches()
return s
},
proof: &ethpb.ExecutionProof{
Slot: primitives.Slot(5), // Before finalized epoch 1
ProofId: 1,
BlockRoot: make([]byte, 32),
BlockHash: make([]byte, 32),
ProofData: make([]byte, 100),
},
topic: func() *string {
t := fmt.Sprintf(defaultTopic, fakeDigest)
return &t
}(),
pid: "random-peer",
want: pubsub.ValidationReject,
wantErr: true,
},
{
name: "Ignore already seen proof",
setupService: func() *Service {
s := &Service{
cfg: &config{
p2p: p2pService,
initialSync: &mockSync.Sync{IsSyncing: false},
chain: chainService,
clock: startup.NewClock(genesisTime, [32]byte{'A'}),
beaconDB: beaconDB,
stateGen: stategen.New(beaconDB, doublylinkedtree.New()),
execProofPool: execproofs.NewPool(),
},
}
s.initCaches()
return s
},
proof: &ethpb.ExecutionProof{
Slot: currentSlot,
ProofId: primitives.ExecutionProofId(1),
BlockRoot: make([]byte, 32),
BlockHash: make([]byte, 32),
ProofData: make([]byte, 100),
},
topic: func() *string {
t := fmt.Sprintf(defaultTopic, fakeDigest)
return &t
}(),
pid: "random-peer",
want: pubsub.ValidationIgnore,
wantErr: false,
},
{
name: "Ignore proof already in pool",
setupService: func() *Service {
pool := execproofs.NewPool()
pool.Insert(&ethpb.ExecutionProof{
Slot: currentSlot,
ProofId: primitives.ExecutionProofId(1),
BlockRoot: make([]byte, 32),
BlockHash: make([]byte, 32),
ProofData: make([]byte, 100),
})
s := &Service{
cfg: &config{
p2p: p2pService,
initialSync: &mockSync.Sync{IsSyncing: false},
chain: chainService,
clock: startup.NewClock(genesisTime, [32]byte{'A'}),
beaconDB: beaconDB,
stateGen: stategen.New(beaconDB, doublylinkedtree.New()),
execProofPool: pool,
},
}
s.initCaches()
return s
},
proof: &ethpb.ExecutionProof{
Slot: currentSlot,
ProofId: primitives.ExecutionProofId(1),
BlockRoot: make([]byte, 32),
BlockHash: make([]byte, 32),
ProofData: make([]byte, 100),
},
topic: func() *string {
t := fmt.Sprintf(defaultTopic, fakeDigest)
return &t
}(),
pid: "random-peer",
want: pubsub.ValidationIgnore,
wantErr: false,
},
{
name: "Reject proof if no verifier found",
setupService: func() *Service {
s := &Service{
cfg: &config{
p2p: p2pService,
initialSync: &mockSync.Sync{IsSyncing: false},
chain: chainService,
clock: startup.NewClock(genesisTime, [32]byte{'A'}),
beaconDB: beaconDB,
stateGen: stategen.New(beaconDB, doublylinkedtree.New()),
execProofPool: execproofs.NewPool(),
},
}
s.initCaches()
return s
},
proof: &ethpb.ExecutionProof{
Slot: currentSlot,
ProofId: primitives.ExecutionProofId(1),
BlockRoot: make([]byte, 32),
BlockHash: make([]byte, 32),
ProofData: make([]byte, 100),
},
topic: func() *string {
t := fmt.Sprintf(defaultTopic, fakeDigest)
return &t
}(),
pid: "random-peer",
want: pubsub.ValidationReject,
wantErr: true,
},
{
name: "Reject proof if verification fails",
setupService: func() *Service {
s := &Service{
cfg: &config{
p2p: p2pService,
initialSync: &mockSync.Sync{IsSyncing: false},
chain: chainService,
clock: startup.NewClock(genesisTime, [32]byte{'A'}),
beaconDB: beaconDB,
stateGen: stategen.New(beaconDB, doublylinkedtree.New()),
execProofPool: execproofs.NewPool(),
},
}
s.initCaches()
return s
},
proof: &ethpb.ExecutionProof{
Slot: currentSlot,
ProofId: primitives.ExecutionProofId(1),
BlockRoot: make([]byte, 32),
BlockHash: make([]byte, 32),
ProofData: make([]byte, 100),
},
topic: func() *string {
t := fmt.Sprintf(defaultTopic, fakeDigest)
return &t
}(),
pid: "random-peer",
want: pubsub.ValidationReject,
wantErr: true,
},
{
name: "Accept valid proof",
setupService: func() *Service {
s := &Service{
cfg: &config{
p2p: p2pService,
initialSync: &mockSync.Sync{IsSyncing: false},
chain: chainService,
clock: startup.NewClock(genesisTime, [32]byte{'A'}),
beaconDB: beaconDB,
stateGen: stategen.New(beaconDB, doublylinkedtree.New()),
execProofPool: execproofs.NewPool(),
},
}
s.initCaches()
return s
},
proof: &ethpb.ExecutionProof{
Slot: currentSlot,
ProofId: primitives.ExecutionProofId(1),
BlockRoot: make([]byte, 32),
BlockHash: make([]byte, 32),
ProofData: make([]byte, 100),
},
topic: func() *string {
t := fmt.Sprintf(defaultTopic, fakeDigest)
return &t
}(),
pid: "random-peer",
want: pubsub.ValidationAccept,
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := tt.setupService()
// Create pubsub message
buf := new(bytes.Buffer)
_, err := p2pService.Encoding().EncodeGossip(buf, tt.proof)
require.NoError(t, err)
msg := &pubsub.Message{
Message: &pubsubpb.Message{
Data: buf.Bytes(),
Topic: tt.topic,
},
}
// Validate
result, err := s.validateExecutionProof(ctx, tt.pid, msg)
if tt.wantErr {
assert.NotNil(t, err)
} else {
assert.NoError(t, err)
}
assert.Equal(t, tt.want, result)
// If validation accepted, check that ValidatorData is set
if result == pubsub.ValidationAccept {
assert.NotNil(t, msg.ValidatorData)
validatedProof, ok := msg.ValidatorData.(*ethpb.ExecutionProof)
assert.Equal(t, true, ok)
// Check that the validated proof matches the original
assert.Equal(t, tt.proof.ProofId, validatedProof.ProofId)
assert.Equal(t, tt.proof.Slot, validatedProof.Slot)
assert.DeepEqual(t, tt.proof.BlockRoot, validatedProof.BlockRoot)
assert.DeepEqual(t, tt.proof.BlockHash, validatedProof.BlockHash)
assert.DeepEqual(t, tt.proof.ProofData, validatedProof.ProofData)
}
})
}
}
type alwaysFailVerifier struct{}
func (v *alwaysFailVerifier) Verify(proof *ethpb.ExecutionProof) (bool, error) {
return false, nil
}
func (v *alwaysFailVerifier) GetProofId() primitives.ExecutionProofId {
return primitives.ExecutionProofId(1)
}

View File

@@ -20,6 +20,7 @@ go_library(
"//cmd:go_default_library",
"//config/features:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_urfave_cli_v2//:go_default_library",

View File

@@ -356,4 +356,12 @@ var (
Usage: "A comma-separated list of exponents (of 2) in decreasing order, defining the state diff hierarchy levels. The last exponent must be greater than or equal to 5.",
Value: cli.NewIntSlice(21, 18, 16, 13, 11, 9, 5),
}
// ZKVM Generation Proof Type
ZkvmGenerationProofTypeFlag = &cli.IntSliceFlag{
Name: "zkvm-generation-proof-types",
Usage: `
Comma-separated list of proof type IDs to generate
(e.g., '0,1' where 0=SP1+Reth, 1=Risc0+Geth).
Optional - nodes can verify proofs without generating them.`,
}
)

View File

@@ -5,6 +5,7 @@ import (
"github.com/OffchainLabs/prysm/v7/cmd"
"github.com/OffchainLabs/prysm/v7/config/features"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/pkg/errors"
"github.com/urfave/cli/v2"
)
@@ -27,6 +28,7 @@ type GlobalFlags struct {
DataColumnBatchLimit int
DataColumnBatchLimitBurstFactor int
StateDiffExponents []int
ProofGenerationTypes []primitives.ExecutionProofId
}
var globalConfig *GlobalFlags
@@ -84,6 +86,19 @@ func ConfigureGlobalFlags(ctx *cli.Context) error {
}
}
// zkVM Proof Generation Types
proofTypes := make([]primitives.ExecutionProofId, 0, len(ctx.IntSlice(ZkvmGenerationProofTypeFlag.Name)))
for _, t := range ctx.IntSlice(ZkvmGenerationProofTypeFlag.Name) {
proofTypes = append(proofTypes, primitives.ExecutionProofId(t))
}
cfg.ProofGenerationTypes = proofTypes
if features.Get().EnableZkvm {
if err := validateZkvmProofGenerationTypes(cfg.ProofGenerationTypes); err != nil {
return fmt.Errorf("validate Zkvm proof generation types: %w", err)
}
}
cfg.BlockBatchLimit = ctx.Int(BlockBatchLimit.Name)
cfg.BlockBatchLimitBurstFactor = ctx.Int(BlockBatchLimitBurstFactor.Name)
cfg.BlobBatchLimit = ctx.Int(BlobBatchLimit.Name)
@@ -135,3 +150,13 @@ func validateStateDiffExponents(exponents []int) error {
}
return nil
}
// validateZkvmProofGenerationTypes validates the provided proof IDs.
func validateZkvmProofGenerationTypes(types []primitives.ExecutionProofId) error {
for _, t := range types {
if t >= primitives.EXECUTION_PROOF_TYPE_COUNT {
return fmt.Errorf("invalid zkvm proof generation type: %d; valid types are between 0 and %d", t, primitives.EXECUTION_PROOF_TYPE_COUNT-1)
}
}
return nil
}

View File

@@ -156,6 +156,7 @@ var appFlags = []cli.Flag{
dasFlags.BackfillOldestSlot,
dasFlags.BlobRetentionEpochFlag,
flags.BatchVerifierLimit,
flags.ZkvmGenerationProofTypeFlag,
}
func init() {

View File

@@ -231,6 +231,12 @@ var appHelpFlagGroups = []flagGroup{
flags.SetGCPercent,
},
},
{
Name: "zkvm",
Flags: []cli.Flag{
flags.ZkvmGenerationProofTypeFlag,
},
},
}
func init() {

View File

@@ -52,6 +52,7 @@ type Flags struct {
DisableDutiesV2 bool // DisableDutiesV2 sets validator client to use the get Duties endpoint
EnableWeb bool // EnableWeb enables the webui on the validator client
EnableStateDiff bool // EnableStateDiff enables the experimental state diff feature for the beacon node.
EnableZkvm bool // EnableZkvm enables zkVM related features.
// Logging related toggles.
DisableGRPCConnectionLogs bool // Disables logging when a new grpc client has connected.
@@ -298,6 +299,11 @@ func ConfigureBeaconChain(ctx *cli.Context) error {
}
}
if ctx.IsSet(EnableZkvmFlag.Name) {
logEnabled(EnableZkvmFlag)
cfg.EnableZkvm = true
}
cfg.AggregateIntervals = [3]time.Duration{aggregateFirstInterval.Value, aggregateSecondInterval.Value, aggregateThirdInterval.Value}
Init(cfg)
return nil

View File

@@ -211,6 +211,17 @@ var (
Name: "ignore-unviable-attestations",
Usage: "Ignores attestations whose target state is not viable with respect to the current head (avoid expensive state replay from lagging attesters).",
}
// Activate ZKVM execution proof mode
EnableZkvmFlag = &cli.BoolFlag{
Name: "activate-zkvm",
Usage: `
Activates ZKVM execution proof mode. Enables the node to subscribe to the
execution_proof gossip topic, receive and verify execution proofs from peers,
and advertise zkVM support in its ENR for peer discovery.
Use --zkvm-generation-proof-types to specify which proof types this node
should generate (optional - nodes can verify without generating).
`,
}
)
// devModeFlags holds list of flags that are set when development mode is on.
@@ -272,6 +283,7 @@ var BeaconChainFlags = combinedFlags([]cli.Flag{
enableExperimentalAttestationPool,
forceHeadFlag,
blacklistRoots,
EnableZkvmFlag,
}, deprecatedBeaconFlags, deprecatedFlags, upcomingDeprecation)
func combinedFlags(flags ...[]cli.Flag) []cli.Flag {

View File

@@ -310,6 +310,11 @@ type BeaconChainConfig struct {
// Blobs Values
BlobSchedule []BlobScheduleEntry `yaml:"BLOB_SCHEDULE" spec:"true"`
// EIP-8025: Optional Execution Proofs
MaxProofDataBytes uint64 `yaml:"MAX_PROOF_DATA_BYTES" spec:"true"` // MaxProofDataBytes is the maximum number of bytes for execution proof data.
MinProofsRequired uint64 `yaml:"MIN_PROOFS_REQUIRED" spec:"true"` // MinProofsRequired is the minimum number of execution proofs required for a block to be considered valid.
MinEpochsForExecutionProofRequests uint64 `yaml:"MIN_EPOCHS_FOR_EXECUTION_PROOF_REQUESTS" spec:"true"` // MinEpochsForExecutionProofRequests is the minimum number of epochs the node will keep the execution proofs for.
// Deprecated_MaxBlobsPerBlock defines the max blobs that could exist in a block.
// Deprecated: This field is no longer supported. Avoid using it.
DeprecatedMaxBlobsPerBlock int `yaml:"MAX_BLOBS_PER_BLOCK" spec:"true"`
@@ -732,6 +737,20 @@ func WithinDAPeriod(block, current primitives.Epoch) bool {
return block+BeaconConfig().MinEpochsForBlobsSidecarsRequest >= current
}
// WithinExecutionProofPeriod checks if the given epoch is within the execution proof retention period.
// This is used to determine whether execution proofs should be requested or generated for blocks at the given epoch.
// Returns true if the epoch is at or after the retention boundary (Fulu fork epoch or proof retention epoch).
func WithinExecutionProofPeriod(epoch, current primitives.Epoch) bool {
proofRetentionEpoch := primitives.Epoch(0)
if current >= primitives.Epoch(BeaconConfig().MinEpochsForExecutionProofRequests) {
proofRetentionEpoch = current - primitives.Epoch(BeaconConfig().MinEpochsForExecutionProofRequests)
}
boundaryEpoch := primitives.MaxEpoch(BeaconConfig().FuluForkEpoch, proofRetentionEpoch)
return epoch >= boundaryEpoch
}
// EpochsDuration returns the time duration of the given number of epochs.
func EpochsDuration(count primitives.Epoch, b *BeaconChainConfig) time.Duration {
return SlotsDuration(SlotsForEpochs(count, b), b)

View File

@@ -38,6 +38,7 @@ var mainnetNetworkConfig = &NetworkConfig{
AttSubnetKey: "attnets",
SyncCommsSubnetKey: "syncnets",
CustodyGroupCountKey: "cgc",
ZkvmEnabledKey: "zkvm",
MinimumPeersInSubnetSearch: 20,
ContractDeploymentBlock: 11184524, // Note: contract was deployed in block 11052984 but no transactions were sent until 11184524.
BootstrapNodes: []string{
@@ -355,6 +356,11 @@ var mainnetBeaconConfig = &BeaconChainConfig{
MaxBlobsPerBlock: 21,
},
},
// EIP-8025: Optional Execution Proofs
MaxProofDataBytes: 1_048_576, // 1 MiB
MinProofsRequired: 2,
MinEpochsForExecutionProofRequests: 2,
}
// MainnetTestConfig provides a version of the mainnet config that has a different name

View File

@@ -11,6 +11,7 @@ type NetworkConfig struct {
AttSubnetKey string // AttSubnetKey is the ENR key of the subnet bitfield.
SyncCommsSubnetKey string // SyncCommsSubnetKey is the ENR key of the sync committee subnet bitfield.
CustodyGroupCountKey string // CustodyGroupsCountKey is the ENR key of the custody group count.
ZkvmEnabledKey string // ZkvmEnabledKey is the ENR key of whether zkVM mode is enabled or not.
MinimumPeersInSubnetSearch uint64 // PeersInSubnetSearch is the required amount of peers that we need to be able to lookup in a subnet search.
// Chain Network Config

View File

@@ -11,6 +11,7 @@ go_library(
"domain.go",
"epoch.go",
"execution_address.go",
"execution_proof_id.go",
"kzg.go",
"payload_id.go",
"slot.go",
@@ -36,6 +37,7 @@ go_test(
"committee_index_test.go",
"domain_test.go",
"epoch_test.go",
"execution_proof_id_test.go",
"slot_test.go",
"sszbytes_test.go",
"sszuint64_test.go",

View File

@@ -0,0 +1,64 @@
package primitives
import (
"fmt"
fssz "github.com/prysmaticlabs/fastssz"
)
var _ fssz.HashRoot = (ExecutionProofId)(0)
var _ fssz.Marshaler = (*ExecutionProofId)(nil)
var _ fssz.Unmarshaler = (*ExecutionProofId)(nil)
// Number of execution proofs
// Each proof represents a different zkVM+EL combination
//
// TODO(zkproofs): The number 8 is a parameter that we will want to configure in the future
const EXECUTION_PROOF_TYPE_COUNT = 8
// ExecutionProofId identifies which zkVM/proof system a proof belongs to.
type ExecutionProofId uint8
func (id *ExecutionProofId) IsValid() bool {
return uint8(*id) < EXECUTION_PROOF_TYPE_COUNT
}
// HashTreeRoot --
func (id ExecutionProofId) HashTreeRoot() ([32]byte, error) {
return fssz.HashWithDefaultHasher(id)
}
// HashTreeRootWith --
func (id ExecutionProofId) HashTreeRootWith(hh *fssz.Hasher) error {
hh.PutUint8(uint8(id))
return nil
}
// UnmarshalSSZ --
func (id *ExecutionProofId) UnmarshalSSZ(buf []byte) error {
if len(buf) != id.SizeSSZ() {
return fmt.Errorf("expected buffer of length %d received %d", id.SizeSSZ(), len(buf))
}
*id = ExecutionProofId(fssz.UnmarshallUint8(buf))
return nil
}
// MarshalSSZTo --
func (id *ExecutionProofId) MarshalSSZTo(buf []byte) ([]byte, error) {
marshalled, err := id.MarshalSSZ()
if err != nil {
return nil, err
}
return append(buf, marshalled...), nil
}
// MarshalSSZ --
func (id *ExecutionProofId) MarshalSSZ() ([]byte, error) {
marshalled := fssz.MarshalUint8([]byte{}, uint8(*id))
return marshalled, nil
}
// SizeSSZ --
func (id *ExecutionProofId) SizeSSZ() int {
return 1
}

View File

@@ -0,0 +1,73 @@
package primitives_test
import (
"testing"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
)
func TestExecutionProofId_IsValid(t *testing.T) {
tests := []struct {
name string
id primitives.ExecutionProofId
valid bool
}{
{
name: "valid proof id 0",
id: 0,
valid: true,
},
{
name: "valid proof id 1",
id: 1,
valid: true,
},
{
name: "valid proof id 7 (max valid)",
id: 7,
valid: true,
},
{
name: "invalid proof id 8 (at limit)",
id: 8,
valid: false,
},
{
name: "invalid proof id 255",
id: 255,
valid: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := tt.id.IsValid(); got != tt.valid {
t.Errorf("ExecutionProofId.IsValid() = %v, want %v", got, tt.valid)
}
})
}
}
func TestExecutionProofId_Casting(t *testing.T) {
id := primitives.ExecutionProofId(5)
t.Run("uint8", func(t *testing.T) {
if uint8(id) != 5 {
t.Errorf("Casting to uint8 failed: got %v, want 5", uint8(id))
}
})
t.Run("from uint8", func(t *testing.T) {
var x uint8 = 7
if primitives.ExecutionProofId(x) != 7 {
t.Errorf("Casting from uint8 failed: got %v, want 7", primitives.ExecutionProofId(x))
}
})
t.Run("int", func(t *testing.T) {
var x = 3
if primitives.ExecutionProofId(x) != 3 {
t.Errorf("Casting from int failed: got %v, want 3", primitives.ExecutionProofId(x))
}
})
}

72
kurtosis/README.md Normal file
View File

@@ -0,0 +1,72 @@
# Kurtosis scripts for EIP-8025
## How to run
I slightly modified [Manu's tip](https://hackmd.io/8z4thpsyQJioaU6jj0Wazw) by adding those in my `~/.zshrc`.
```zsh
# Kurtosis Aliases
blog() {
docker logs -f "$(docker ps | grep cl-"$1"-prysm-geth | awk '{print $NF}')" 2>&1
}
vlog() {
docker logs -f "$(docker ps | grep vc-"$1"-geth-prysm | awk '{print $NF}')" 2>&1
}
dora() {
open http://localhost:$(docker ps --format '{{.Ports}} {{.Names}}' | awk '/dora/ {split($1, a, "->"); split(a[1], b, ":"); print b[2]}')
}
graf() {
open http://localhost:$(docker ps --format '{{.Ports}} {{.Names}}' | awk '/grafana/ {split($1, a, "->"); split(a[1], b, ":"); print b[2]}')
}
devnet () {
local args_file_path="./kurtosis/default.yaml"
if [ ! -z "$1" ]; then
args_file_path="$1"
echo "Using custom args-file path: $args_file_path"
else
echo "Using default args-file path: $args_file_path"
fi
kurtosis clean -a &&
bazel build //cmd/beacon-chain:oci_image_tarball --platforms=@io_bazel_rules_go//go/toolchain:linux_arm64_cgo --config=release &&
docker load -i bazel-bin/cmd/beacon-chain/oci_image_tarball/tarball.tar &&
docker tag gcr.io/offchainlabs/prysm/beacon-chain prysm-bn-custom-image &&
bazel build //cmd/validator:oci_image_tarball --platforms=@io_bazel_rules_go//go/toolchain:linux_arm64_cgo --config=release &&
docker load -i bazel-bin/cmd/validator/oci_image_tarball/tarball.tar &&
docker tag gcr.io/offchainlabs/prysm/validator prysm-vc-custom-image &&
kurtosis run github.com/ethpandaops/ethereum-package --args-file="$args_file_path" --verbosity brief &&
dora
}
stop() {
kurtosis clean -a
}
dps() {
docker ps --format "table {{.ID}}\\t{{.Image}}\\t{{.Status}}\\t{{.Names}}" -a
}
```
At the project directory, you can simply spin up a devnet with:
```bash
$ devnet
```
Or you can specify the network parameter YAML file like:
```bash
$ devnet ./kurtosis/proof_verify.yaml
```
### Running scripts with local images
Images from Prysm can be automatically loaded from `devnet` command, but if you want to run a script with `lighthouse`:
#### `./kurtosis/interop.yaml`
- `lighthouse:local`: Please build your own image following [Lighthouse's guide](https://lighthouse-book.sigmaprime.io/installation_docker.html?highlight=docker#building-the-docker-image) on [`kevaundray/kw/sel-alternative`](https://github.com/kevaundray/lighthouse/tree/kw/sel-alternative/) branch.

16
kurtosis/default.yaml Normal file
View File

@@ -0,0 +1,16 @@
participants:
- el_type: geth
cl_type: prysm
cl_image: prysm-bn-custom-image
cl_extra_params:
- --activate-zkvm
- --zkvm-generation-proof-types=0,1
vc_image: prysm-vc-custom-image
count: 4
network_params:
seconds_per_slot: 2
global_log_level: debug
snooper_enabled: false
additional_services:
- dora
- prometheus_grafana

38
kurtosis/interop.yaml Normal file
View File

@@ -0,0 +1,38 @@
# 3 nodes (2 from Prysm, 1 from Lighthouse) generate proofs and
# 1 node only verifies
participants:
# Prysm: Proof generating nodes (nodes 1-2)
- el_type: geth
el_image: ethereum/client-go:latest
cl_type: prysm
cl_image: prysm-bn-custom-image
cl_extra_params:
- --activate-zkvm
- --zkvm-generation-proof-types=0,1
vc_image: prysm-vc-custom-image
count: 2
# Lighthouse: Proof generating nodes (node 3)
- el_type: geth
el_image: ethereum/client-go:latest
cl_type: lighthouse
cl_image: lighthouse:local
cl_extra_params:
- --activate-zkvm
- --zkvm-generation-proof-types=0,1
- --target-peers=3
count: 1
# Prysm: Proof verifying only node (node 4)
- el_type: dummy
cl_type: prysm
cl_image: prysm-bn-custom-image
cl_extra_params:
- --activate-zkvm
vc_image: prysm-vc-custom-image
count: 1
network_params:
seconds_per_slot: 2
global_log_level: debug
snooper_enabled: false
additional_services:
- dora
- prometheus_grafana

View File

@@ -0,0 +1,27 @@
# 3 nodes generate proofs, 1 node only verifies
participants:
# Proof generating nodes (nodes 1-3)
- el_type: geth
el_image: ethereum/client-go:latest
cl_type: prysm
cl_image: prysm-bn-custom-image
cl_extra_params:
- --activate-zkvm
- --zkvm-generation-proof-types=0,1
vc_image: prysm-vc-custom-image
count: 3
# Proof verifying only node (node 4)
- el_type: dummy
cl_type: prysm
cl_image: prysm-bn-custom-image
cl_extra_params:
- --activate-zkvm
vc_image: prysm-vc-custom-image
count: 1
network_params:
seconds_per_slot: 2
global_log_level: debug
snooper_enabled: false
additional_services:
- dora
- prometheus_grafana

View File

@@ -371,6 +371,11 @@ go_library(
"beacon_block.go",
"cloners.go",
"eip_7521.go",
"execution_proof.go",
# NOTE: ExecutionProof includes an alias type of uint8,
# which is not supported by fastssz sszgen.
# Temporarily managed manually.
"execution_proof.ssz.go",
"gloas.go",
"log.go",
"sync_committee_mainnet.go",
@@ -427,6 +432,7 @@ ssz_proto_files(
"beacon_state.proto",
"blobs.proto",
"data_columns.proto",
"execution_proof.proto",
"gloas.proto",
"light_client.proto",
"sync_committee.proto",

View File

@@ -0,0 +1,18 @@
package eth
import "github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
// Copy --
func (e *ExecutionProof) Copy() *ExecutionProof {
if e == nil {
return nil
}
return &ExecutionProof{
ProofId: e.ProofId,
Slot: e.Slot,
BlockHash: bytesutil.SafeCopyBytes(e.BlockHash),
BlockRoot: bytesutil.SafeCopyBytes(e.BlockRoot),
ProofData: bytesutil.SafeCopyBytes(e.ProofData),
}
}

268
proto/prysm/v1alpha1/execution_proof.pb.go generated Executable file
View File

@@ -0,0 +1,268 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.3
// protoc v3.21.7
// source: proto/prysm/v1alpha1/execution_proof.proto
package eth
import (
reflect "reflect"
sync "sync"
github_com_OffchainLabs_prysm_v7_consensus_types_primitives "github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
_ "github.com/OffchainLabs/prysm/v7/proto/eth/ext"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type ExecutionProof struct {
state protoimpl.MessageState `protogen:"open.v1"`
ProofId github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ExecutionProofId `protobuf:"varint,1,opt,name=proof_id,json=proofId,proto3" json:"proof_id,omitempty" cast-type:"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.ExecutionProofId"`
Slot github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Slot `protobuf:"varint,2,opt,name=slot,proto3" json:"slot,omitempty" cast-type:"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.Slot"`
BlockHash []byte `protobuf:"bytes,3,opt,name=block_hash,json=blockHash,proto3" json:"block_hash,omitempty" ssz-size:"32"`
BlockRoot []byte `protobuf:"bytes,4,opt,name=block_root,json=blockRoot,proto3" json:"block_root,omitempty" ssz-size:"32"`
ProofData []byte `protobuf:"bytes,5,opt,name=proof_data,json=proofData,proto3" json:"proof_data,omitempty" ssz-max:"1048576"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ExecutionProof) Reset() {
*x = ExecutionProof{}
mi := &file_proto_prysm_v1alpha1_execution_proof_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ExecutionProof) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ExecutionProof) ProtoMessage() {}
func (x *ExecutionProof) ProtoReflect() protoreflect.Message {
mi := &file_proto_prysm_v1alpha1_execution_proof_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ExecutionProof.ProtoReflect.Descriptor instead.
func (*ExecutionProof) Descriptor() ([]byte, []int) {
return file_proto_prysm_v1alpha1_execution_proof_proto_rawDescGZIP(), []int{0}
}
func (x *ExecutionProof) GetProofId() github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ExecutionProofId {
if x != nil {
return x.ProofId
}
return github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ExecutionProofId(0)
}
func (x *ExecutionProof) GetSlot() github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Slot {
if x != nil {
return x.Slot
}
return github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Slot(0)
}
func (x *ExecutionProof) GetBlockHash() []byte {
if x != nil {
return x.BlockHash
}
return nil
}
func (x *ExecutionProof) GetBlockRoot() []byte {
if x != nil {
return x.BlockRoot
}
return nil
}
func (x *ExecutionProof) GetProofData() []byte {
if x != nil {
return x.ProofData
}
return nil
}
type ExecutionProofsByRootRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
BlockRoot []byte `protobuf:"bytes,1,opt,name=block_root,json=blockRoot,proto3" json:"block_root,omitempty" ssz-size:"32"`
CountNeeded uint64 `protobuf:"varint,2,opt,name=count_needed,json=countNeeded,proto3" json:"count_needed,omitempty"`
AlreadyHave []github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ExecutionProofId `protobuf:"varint,3,rep,packed,name=already_have,json=alreadyHave,proto3" json:"already_have,omitempty" cast-type:"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.ExecutionProofId" ssz-max:"8"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ExecutionProofsByRootRequest) Reset() {
*x = ExecutionProofsByRootRequest{}
mi := &file_proto_prysm_v1alpha1_execution_proof_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ExecutionProofsByRootRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ExecutionProofsByRootRequest) ProtoMessage() {}
func (x *ExecutionProofsByRootRequest) ProtoReflect() protoreflect.Message {
mi := &file_proto_prysm_v1alpha1_execution_proof_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ExecutionProofsByRootRequest.ProtoReflect.Descriptor instead.
func (*ExecutionProofsByRootRequest) Descriptor() ([]byte, []int) {
return file_proto_prysm_v1alpha1_execution_proof_proto_rawDescGZIP(), []int{1}
}
func (x *ExecutionProofsByRootRequest) GetBlockRoot() []byte {
if x != nil {
return x.BlockRoot
}
return nil
}
func (x *ExecutionProofsByRootRequest) GetCountNeeded() uint64 {
if x != nil {
return x.CountNeeded
}
return 0
}
func (x *ExecutionProofsByRootRequest) GetAlreadyHave() []github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ExecutionProofId {
if x != nil {
return x.AlreadyHave
}
return []github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ExecutionProofId(nil)
}
var File_proto_prysm_v1alpha1_execution_proof_proto protoreflect.FileDescriptor
var file_proto_prysm_v1alpha1_execution_proof_proto_rawDesc = []byte{
0x0a, 0x2a, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x31,
0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e,
0x5f, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x65, 0x74,
0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70,
0x68, 0x61, 0x31, 0x1a, 0x1b, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x65, 0x74, 0x68, 0x2f, 0x65,
0x78, 0x74, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x22, 0xd1, 0x02, 0x0a, 0x0e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72,
0x6f, 0x6f, 0x66, 0x12, 0x6b, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x5f, 0x69, 0x64, 0x18,
0x01, 0x20, 0x01, 0x28, 0x04, 0x42, 0x50, 0x82, 0xb5, 0x18, 0x4c, 0x67, 0x69, 0x74, 0x68, 0x75,
0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61,
0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x37, 0x2f, 0x63, 0x6f, 0x6e, 0x73,
0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x72, 0x69, 0x6d,
0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e,
0x50, 0x72, 0x6f, 0x6f, 0x66, 0x49, 0x64, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x49, 0x64,
0x12, 0x58, 0x0a, 0x04, 0x73, 0x6c, 0x6f, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x42, 0x44,
0x82, 0xb5, 0x18, 0x40, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f,
0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73,
0x6d, 0x2f, 0x76, 0x37, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x74,
0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2e,
0x53, 0x6c, 0x6f, 0x74, 0x52, 0x04, 0x73, 0x6c, 0x6f, 0x74, 0x12, 0x25, 0x0a, 0x0a, 0x62, 0x6c,
0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06,
0x8a, 0xb5, 0x18, 0x02, 0x33, 0x32, 0x52, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73,
0x68, 0x12, 0x25, 0x0a, 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18,
0x04, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x33, 0x32, 0x52, 0x09, 0x62,
0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x2a, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x6f,
0x66, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x0b, 0x92, 0xb5,
0x18, 0x07, 0x31, 0x30, 0x34, 0x38, 0x35, 0x37, 0x36, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x6f, 0x66,
0x44, 0x61, 0x74, 0x61, 0x22, 0xe2, 0x01, 0x0a, 0x1c, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69,
0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x73, 0x42, 0x79, 0x52, 0x6f, 0x6f, 0x74, 0x52, 0x65,
0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x72,
0x6f, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x33,
0x32, 0x52, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x21, 0x0a, 0x0c,
0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x6e, 0x65, 0x65, 0x64, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01,
0x28, 0x04, 0x52, 0x0b, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x4e, 0x65, 0x65, 0x64, 0x65, 0x64, 0x12,
0x78, 0x0a, 0x0c, 0x61, 0x6c, 0x72, 0x65, 0x61, 0x64, 0x79, 0x5f, 0x68, 0x61, 0x76, 0x65, 0x18,
0x03, 0x20, 0x03, 0x28, 0x04, 0x42, 0x55, 0x82, 0xb5, 0x18, 0x4c, 0x67, 0x69, 0x74, 0x68, 0x75,
0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61,
0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x37, 0x2f, 0x63, 0x6f, 0x6e, 0x73,
0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x72, 0x69, 0x6d,
0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e,
0x50, 0x72, 0x6f, 0x6f, 0x66, 0x49, 0x64, 0x92, 0xb5, 0x18, 0x01, 0x38, 0x52, 0x0b, 0x61, 0x6c,
0x72, 0x65, 0x61, 0x64, 0x79, 0x48, 0x61, 0x76, 0x65, 0x42, 0x9d, 0x01, 0x0a, 0x19, 0x6f, 0x72,
0x67, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76,
0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x13, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69,
0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x39,
0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68,
0x61, 0x69, 0x6e, 0x4c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x37,
0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x31, 0x61,
0x6c, 0x70, 0x68, 0x61, 0x31, 0x3b, 0x65, 0x74, 0x68, 0xaa, 0x02, 0x15, 0x45, 0x74, 0x68, 0x65,
0x72, 0x65, 0x75, 0x6d, 0x2e, 0x45, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61,
0x31, 0xca, 0x02, 0x15, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x5c, 0x45, 0x74, 0x68,
0x5c, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x33,
}
var (
file_proto_prysm_v1alpha1_execution_proof_proto_rawDescOnce sync.Once
file_proto_prysm_v1alpha1_execution_proof_proto_rawDescData = file_proto_prysm_v1alpha1_execution_proof_proto_rawDesc
)
func file_proto_prysm_v1alpha1_execution_proof_proto_rawDescGZIP() []byte {
file_proto_prysm_v1alpha1_execution_proof_proto_rawDescOnce.Do(func() {
file_proto_prysm_v1alpha1_execution_proof_proto_rawDescData = protoimpl.X.CompressGZIP(file_proto_prysm_v1alpha1_execution_proof_proto_rawDescData)
})
return file_proto_prysm_v1alpha1_execution_proof_proto_rawDescData
}
var file_proto_prysm_v1alpha1_execution_proof_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
var file_proto_prysm_v1alpha1_execution_proof_proto_goTypes = []any{
(*ExecutionProof)(nil), // 0: ethereum.eth.v1alpha1.ExecutionProof
(*ExecutionProofsByRootRequest)(nil), // 1: ethereum.eth.v1alpha1.ExecutionProofsByRootRequest
}
var file_proto_prysm_v1alpha1_execution_proof_proto_depIdxs = []int32{
0, // [0:0] is the sub-list for method output_type
0, // [0:0] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() { file_proto_prysm_v1alpha1_execution_proof_proto_init() }
func file_proto_prysm_v1alpha1_execution_proof_proto_init() {
if File_proto_prysm_v1alpha1_execution_proof_proto != nil {
return
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_proto_prysm_v1alpha1_execution_proof_proto_rawDesc,
NumEnums: 0,
NumMessages: 2,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_proto_prysm_v1alpha1_execution_proof_proto_goTypes,
DependencyIndexes: file_proto_prysm_v1alpha1_execution_proof_proto_depIdxs,
MessageInfos: file_proto_prysm_v1alpha1_execution_proof_proto_msgTypes,
}.Build()
File_proto_prysm_v1alpha1_execution_proof_proto = out.File
file_proto_prysm_v1alpha1_execution_proof_proto_rawDesc = nil
file_proto_prysm_v1alpha1_execution_proof_proto_goTypes = nil
file_proto_prysm_v1alpha1_execution_proof_proto_depIdxs = nil
}

View File

@@ -0,0 +1,52 @@
syntax = "proto3";
package ethereum.eth.v1alpha1;
import "proto/eth/ext/options.proto";
option csharp_namespace = "Ethereum.Eth.v1alpha1";
option go_package = "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1;eth";
option java_multiple_files = true;
option java_outer_classname = "ExecutionProofProto";
option java_package = "org.ethereum.eth.v1alpha1";
option php_namespace = "Ethereum\\Eth\\v1alpha1";
message ExecutionProof {
// Which proof type (zkVM+EL combination) this proof belongs to
// Examples: 0=SP1+Reth, 1=Risc0+Geth, 2=SP1+Geth, etc.
uint64 proof_id = 1 [
(ethereum.eth.ext.cast_type) =
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.ExecutionProofId"
];
// The slot of the beacon block this proof validates
uint64 slot = 2 [
(ethereum.eth.ext.cast_type) =
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.Slot"
];
// The block hash of the execution payload this proof validates
bytes block_hash = 3 [ (ethereum.eth.ext.ssz_size) = "32" ];
// The beacon block root corresponding to the beacon block
// with the execution payload, that this proof attests to.
bytes block_root = 4 [ (ethereum.eth.ext.ssz_size) = "32" ];
// The actual proof data
bytes proof_data = 5 [ (ethereum.eth.ext.ssz_max) = "1048576" ];
}
message ExecutionProofsByRootRequest {
// The block root we need proofs for
bytes block_root = 1 [ (ethereum.eth.ext.ssz_size) = "32" ];
// The number of proofs needed
uint64 count_needed = 2;
// We already have these proof IDs, so don't send them again
repeated uint64 already_have = 3 [
(ethereum.eth.ext.ssz_max) = "8",
(ethereum.eth.ext.cast_type) =
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.ExecutionProofId"
];
}

View File

@@ -0,0 +1,300 @@
// NOTE: This file is auto-generated by sszgen, but modified manually
// to handle the alias type ExecutionProofId which is based on uint8.
package eth
import (
github_com_OffchainLabs_prysm_v7_consensus_types_primitives "github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
ssz "github.com/prysmaticlabs/fastssz"
)
// MarshalSSZ ssz marshals the ExecutionProof object
func (e *ExecutionProof) MarshalSSZ() ([]byte, error) {
return ssz.MarshalSSZ(e)
}
// MarshalSSZTo ssz marshals the ExecutionProof object to a target array
func (e *ExecutionProof) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
offset := int(77)
// Field (0) 'ProofId'
dst = ssz.MarshalUint8(dst, uint8(e.ProofId))
// Field (1) 'Slot'
dst = ssz.MarshalUint64(dst, uint64(e.Slot))
// Field (2) 'BlockHash'
if size := len(e.BlockHash); size != 32 {
err = ssz.ErrBytesLengthFn("--.BlockHash", size, 32)
return
}
dst = append(dst, e.BlockHash...)
// Field (3) 'BlockRoot'
if size := len(e.BlockRoot); size != 32 {
err = ssz.ErrBytesLengthFn("--.BlockRoot", size, 32)
return
}
dst = append(dst, e.BlockRoot...)
// Offset (4) 'ProofData'
dst = ssz.WriteOffset(dst, offset)
offset += len(e.ProofData)
// Field (4) 'ProofData'
if size := len(e.ProofData); size > 1048576 {
err = ssz.ErrBytesLengthFn("--.ProofData", size, 1048576)
return
}
dst = append(dst, e.ProofData...)
return
}
// UnmarshalSSZ ssz unmarshals the ExecutionProof object
func (e *ExecutionProof) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size < 77 {
return ssz.ErrSize
}
tail := buf
var o4 uint64
// Field (0) 'ProofId'
e.ProofId = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ExecutionProofId(ssz.UnmarshallUint8(buf[0:1]))
// Field (1) 'Slot'
e.Slot = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Slot(ssz.UnmarshallUint64(buf[1:9]))
// Field (2) 'BlockHash'
if cap(e.BlockHash) == 0 {
e.BlockHash = make([]byte, 0, len(buf[9:41]))
}
e.BlockHash = append(e.BlockHash, buf[9:41]...)
// Field (3) 'BlockRoot'
if cap(e.BlockRoot) == 0 {
e.BlockRoot = make([]byte, 0, len(buf[41:73]))
}
e.BlockRoot = append(e.BlockRoot, buf[41:73]...)
// Offset (4) 'ProofData'
if o4 = ssz.ReadOffset(buf[73:77]); o4 > size {
return ssz.ErrOffset
}
if o4 != 77 {
return ssz.ErrInvalidVariableOffset
}
// Field (4) 'ProofData'
{
buf = tail[o4:]
if len(buf) > 1048576 {
return ssz.ErrBytesLength
}
if cap(e.ProofData) == 0 {
e.ProofData = make([]byte, 0, len(buf))
}
e.ProofData = append(e.ProofData, buf...)
}
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the ExecutionProof object
func (e *ExecutionProof) SizeSSZ() (size int) {
size = 77
// Field (4) 'ProofData'
size += len(e.ProofData)
return
}
// HashTreeRoot ssz hashes the ExecutionProof object
func (e *ExecutionProof) HashTreeRoot() ([32]byte, error) {
return ssz.HashWithDefaultHasher(e)
}
// HashTreeRootWith ssz hashes the ExecutionProof object with a hasher
func (e *ExecutionProof) HashTreeRootWith(hh *ssz.Hasher) (err error) {
indx := hh.Index()
// Field (0) 'ProofId'
hh.PutUint8(uint8(e.ProofId))
// Field (1) 'Slot'
hh.PutUint64(uint64(e.Slot))
// Field (2) 'BlockHash'
if size := len(e.BlockHash); size != 32 {
err = ssz.ErrBytesLengthFn("--.BlockHash", size, 32)
return
}
hh.PutBytes(e.BlockHash)
// Field (3) 'BlockRoot'
if size := len(e.BlockRoot); size != 32 {
err = ssz.ErrBytesLengthFn("--.BlockRoot", size, 32)
return
}
hh.PutBytes(e.BlockRoot)
// Field (4) 'ProofData'
{
elemIndx := hh.Index()
byteLen := uint64(len(e.ProofData))
if byteLen > 1048576 {
err = ssz.ErrIncorrectListSize
return
}
hh.PutBytes(e.ProofData)
hh.MerkleizeWithMixin(elemIndx, byteLen, (1048576+31)/32)
}
hh.Merkleize(indx)
return
}
// MarshalSSZ ssz marshals the ExecutionProofsByRootRequest object
func (e *ExecutionProofsByRootRequest) MarshalSSZ() ([]byte, error) {
return ssz.MarshalSSZ(e)
}
// MarshalSSZTo ssz marshals the ExecutionProofsByRootRequest object to a target array
func (e *ExecutionProofsByRootRequest) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
offset := int(44)
// Field (0) 'BlockRoot'
if size := len(e.BlockRoot); size != 32 {
err = ssz.ErrBytesLengthFn("--.BlockRoot", size, 32)
return
}
dst = append(dst, e.BlockRoot...)
// Field (1) 'CountNeeded'
dst = ssz.MarshalUint64(dst, e.CountNeeded)
// Offset (2) 'AlreadyHave'
dst = ssz.WriteOffset(dst, offset)
offset += len(e.AlreadyHave) * 1
// Field (2) 'AlreadyHave'
if size := len(e.AlreadyHave); size > 8 {
err = ssz.ErrListTooBigFn("--.AlreadyHave", size, 8)
return
}
for ii := 0; ii < len(e.AlreadyHave); ii++ {
dst = ssz.MarshalUint8(dst, uint8(e.AlreadyHave[ii]))
}
return
}
// UnmarshalSSZ ssz unmarshals the ExecutionProofsByRootRequest object
func (e *ExecutionProofsByRootRequest) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size < 44 {
return ssz.ErrSize
}
tail := buf
var o2 uint64
// Field (0) 'BlockRoot'
if cap(e.BlockRoot) == 0 {
e.BlockRoot = make([]byte, 0, len(buf[0:32]))
}
e.BlockRoot = append(e.BlockRoot, buf[0:32]...)
// Field (1) 'CountNeeded'
e.CountNeeded = ssz.UnmarshallUint64(buf[32:40])
// Offset (2) 'AlreadyHave'
if o2 = ssz.ReadOffset(buf[40:44]); o2 > size {
return ssz.ErrOffset
}
if o2 != 44 {
return ssz.ErrInvalidVariableOffset
}
// Field (2) 'AlreadyHave'
{
buf = tail[o2:]
num, err := ssz.DivideInt2(len(buf), 1, 8)
if err != nil {
return err
}
// `primitives.ExecutionProofId` is an alias of `uint8`,
// but we need to handle the conversion manually here
// to call `ssz.ExtendUint8`.
alreadyHave := make([]uint8, len(e.AlreadyHave))
for i, v := range e.AlreadyHave {
alreadyHave[i] = uint8(v)
}
alreadyHave = ssz.ExtendUint8(alreadyHave, num)
alreadyHave2 := make([]github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ExecutionProofId, len(alreadyHave))
for i, v := range alreadyHave {
alreadyHave2[i] = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ExecutionProofId(v)
}
e.AlreadyHave = alreadyHave2
for ii := range num {
e.AlreadyHave[ii] = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ExecutionProofId(ssz.UnmarshallUint8(buf[ii*1 : (ii+1)*1]))
}
}
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the ExecutionProofsByRootRequest object
func (e *ExecutionProofsByRootRequest) SizeSSZ() (size int) {
size = 44
// Field (2) 'AlreadyHave'
size += len(e.AlreadyHave) * 1
return
}
// HashTreeRoot ssz hashes the ExecutionProofsByRootRequest object
func (e *ExecutionProofsByRootRequest) HashTreeRoot() ([32]byte, error) {
return ssz.HashWithDefaultHasher(e)
}
// HashTreeRootWith ssz hashes the ExecutionProofsByRootRequest object with a hasher
func (e *ExecutionProofsByRootRequest) HashTreeRootWith(hh *ssz.Hasher) (err error) {
indx := hh.Index()
// Field (0) 'BlockRoot'
if size := len(e.BlockRoot); size != 32 {
err = ssz.ErrBytesLengthFn("--.BlockRoot", size, 32)
return
}
hh.PutBytes(e.BlockRoot)
// Field (1) 'CountNeeded'
hh.PutUint64(e.CountNeeded)
// Field (2) 'AlreadyHave'
{
if size := len(e.AlreadyHave); size > 8 {
err = ssz.ErrListTooBigFn("--.AlreadyHave", size, 8)
return
}
subIndx := hh.Index()
for _, i := range e.AlreadyHave {
hh.AppendUint8(uint8(i))
}
hh.FillUpTo32()
numItems := uint64(len(e.AlreadyHave))
hh.MerkleizeWithMixin(subIndx, numItems, ssz.CalculateLimit(8, numItems, 1))
}
hh.Merkleize(indx)
return
}