Compare commits

...

66 Commits

Author SHA1 Message Date
Manu NALEPA
1899974ade Implement proof storage. 2026-01-28 15:11:56 +01:00
Manu NALEPA
ffdc4e67b8 Generate proofs when proposing a block (if the node is a proover) 2026-01-23 12:11:23 +01:00
Manu NALEPA
79e93d3faa Set msg.ValidatorData back in the validation function. 2026-01-23 09:17:52 +01:00
Manu NALEPA
66f63aee9c [WIP] simplify 2026-01-22 14:20:13 +01:00
Jun Song
698b6922f0 Use dummy for el_type when we want to launch zk attester node (#11) 2026-01-05 19:32:53 +09:00
Jun Song
ca228fca44 Merge branch 'develop' into poc/optional-proofs 2026-01-05 15:43:22 +09:00
Jun Song
4d6663b4de Implement exec proof service & pruning logics (#10)
* Initialize exec proof service

* Fix wrong condition for starting exec proof pool service
2025-12-26 18:35:08 +09:00
Jun Song
e713560a68 Add interop.yaml script with guide (#8) 2025-12-26 16:02:18 +09:00
Jun Song
4571e50609 Implement RPC for execution proofs & Fix broken unit tests (#9)
* Add ExecutionProofsByRootRequest struct with SSZ support

* Add skeleton for requesting execution proofs

* Check proof retention before sending the request

* Implement sendAndSaveExecutionProofs with skeleton SendExecutionProofsByRootRequest

* Nuke deprecated request alias

* Implement handler and sender without peer selection logic

* Add peer selection logic with zkvm entry key

* Fix broken tests

* Add TestZkvmEnabledPeers

* Fix stream read code for execution proof & Add unit test for handler

* Add sender test
2025-12-26 16:02:08 +09:00
Jun Song
175738919e Check whether proof generation is needed (#7)
* Check proof retention

* Check whether we already have requested execution proof or not
2025-12-24 15:57:53 +09:00
Jun Song
f1cbdc9fa6 Verify execution proofs received from gossip (#6) 2025-12-24 15:31:40 +09:00
Jun Song
156383c9c8 Merge branch 'develop' into poc/optional-proofs 2025-12-24 14:52:53 +09:00
Developer Uche
5ede7c8fe0 Merge pull request #5 from syjn99/fix/proof-gen-devnet
Skip DA check when node is able to generate proofs & Add some kurtosis scripts
2025-12-18 21:15:11 +01:00
Jun Song
3324c7b655 Add proof_verify devnet script 2025-12-16 01:01:48 +09:00
Jun Song
d477bcfa20 Add useful logs 2025-12-16 01:01:32 +09:00
Jun Song
38183471da Add default kurtosis script for proof gen devnet 2025-12-16 00:44:34 +09:00
Jun Song
3c3e2b42e9 Skip waiting for proof if it's proof generator node 2025-12-16 00:44:21 +09:00
Developer Uche
d496f7bfab Merge pull request #4 from syjn99/fix/zkvm-enr
Set zkVM ENR entry correctly if mode is enabled
2025-12-15 11:10:15 +01:00
Developer Uche
55e2663f82 Merge pull request #3 from syjn99/fix/optional-p2p
Add missing pieces regarding Gossip
2025-12-15 11:09:42 +01:00
Jun Song
5f0afd09c6 Add DA failure case 2025-12-10 17:37:04 +09:00
Jun Song
95fff68b11 Add waiting case for DA 2025-12-10 17:33:28 +09:00
Jun Song
d0bc0fcda8 Add happy case for execution proofs DA 2025-12-10 17:26:57 +09:00
Jun Song
8b2acd5f47 Add validate_execution_proof_test.go 2025-12-10 17:04:05 +09:00
Jun Song
fb071ebe20 Add execution proofs pool tests 2025-12-10 16:25:22 +09:00
Jun Song
a174d0cd53 Set zkVM entry correctly if mode is enabled 2025-12-10 16:16:16 +09:00
Jun Song
06655dcd1f Resolve build issues 2025-12-10 13:07:58 +09:00
Jun Song
c1dcf97c0c Fix mock exec proof pool 2025-12-10 12:55:32 +09:00
Jun Song
f596223096 Add blocking logic for DA in EIP-8025 2025-12-10 12:53:06 +09:00
Jun Song
a184afdfb4 Implement execution proof pool 2025-12-10 12:10:20 +09:00
Jun Song
056843bcae Register execution proof pool for sync/blockchain services 2025-12-10 12:00:32 +09:00
Jun Song
a587a9dd6e Add skeletons for pool and verifier logics 2025-12-10 11:53:08 +09:00
Jun Song
dde9dc3dd9 Mark proof as seen 2025-12-10 11:48:29 +09:00
Jun Song
960d666801 Add proof size validation 2025-12-10 11:44:22 +09:00
Jun Song
1468c20c54 Add basic validation logics for execution proof gossip 2025-12-09 23:19:56 +09:00
Jun Song
68d8988121 Use alias of BeaconBlockByRootsReq for ExecutionProofsByRoot 2025-12-09 22:37:37 +09:00
Jun Song
9ca5bf0119 Build issue with Bazel 2025-12-09 22:36:48 +09:00
Jun Song
bf8f494792 Use different gossip param weight 2025-12-09 12:57:21 +09:00
Jun Song
cab25267b5 Fix gossip subscriber match with BLSToExecutionChange 2025-12-09 12:49:33 +09:00
Jun Song
b9c23dae89 Run gazelle 2025-12-09 12:44:56 +09:00
developeruche
7944731ccf done with p2p sub-task save; SendExecutionProofByRootRequest, executionProofsByRootRPCHandler 2025-12-07 23:41:54 +01:00
developeruche
4d2a61a2e0 Merge branch 'poc/optional-proofs' of https://github.com/developeruche/prysm into poc/optional-proofs 2025-12-07 19:02:36 +01:00
developeruche
8708c198c9 gossip functionality ready save validation logic 2025-12-07 19:01:50 +01:00
Developer Uche
2857eeae6e Merge pull request #1 from syjn99/feat/dummy-proof-gen-service 2025-12-06 01:27:44 +01:00
Jun Song
4912c29d06 Generate proofs that are registered without checking 2025-12-04 18:22:09 +09:00
Jun Song
d520158510 Register dummy registry 2025-12-04 18:15:13 +09:00
Jun Song
c13d61a959 Add basic flow for proof generation 2025-12-04 18:09:08 +09:00
Jun Song
f5c61ebaea Print as hex string 2025-12-04 17:32:49 +09:00
Jun Song
ae3d465615 Add missing flag activation 2025-12-04 17:31:54 +09:00
Jun Song
f23210853d Subscribe to the block import event in proofgen service 2025-12-04 17:10:34 +09:00
developeruche
6dc49b41f2 change execution proof topic from subnet to global 2025-12-04 09:04:36 +01:00
developeruche
e56550af48 added execution proof to gossip topics 2025-12-02 06:23:44 +01:00
developeruche
20f617ecc9 enr zkvm config 2025-12-01 16:41:20 +01:00
developeruche
adb1de9caa moved proof_cache to beacon cache 2025-12-01 13:03:53 +01:00
Jun Song
2d9e6ad2c8 Add skeleton proof generation service 2025-11-29 21:35:56 +09:00
Jun Song
e8eb022145 Parse flag & Register in the global context 2025-11-29 21:24:37 +09:00
Jun Song
38be9400f1 Rename with underscores 2025-11-29 20:27:36 +09:00
Jun Song
b01e760e0a Make compatible with codebase 2025-11-29 20:25:52 +09:00
Jun Song
da4a8f1dd3 Add ExecutionProofId & ExecutionProof type 2025-11-29 19:36:42 +09:00
Jun Song
0dca170953 Merge branch 'develop' into poc/optional-proofs-2 2025-11-29 19:36:37 +09:00
developeruche
cd549abbfa added cli flags 2025-11-10 07:15:14 +01:00
developeruche
28a661518e lastest consensus-type, zkvm-execution-layer 2025-11-08 17:07:43 +01:00
developeruche
4ab5888c4c add registry proof gen/verification 2025-11-07 21:20:57 +01:00
developeruche
0d818bc687 add proof gen n verify interfaces 2025-11-07 15:47:35 +01:00
developeruche
0e90a0f2d8 add proof cache 2025-11-07 10:40:00 +01:00
developeruche
2de069d543 add config 2025-11-07 10:38:09 +01:00
developeruche
50e88045bb add consensus types 2025-11-05 10:13:19 +01:00
73 changed files with 4129 additions and 196 deletions

View File

@@ -26,6 +26,7 @@ go_library(
"receive_blob.go",
"receive_block.go",
"receive_data_column.go",
"receive_proof.go",
"service.go",
"setup_forkchoice.go",
"tracked_proposer.go",
@@ -48,6 +49,7 @@ go_library(
"//beacon-chain/core/electra:go_default_library",
"//beacon-chain/core/epoch/precompute:go_default_library",
"//beacon-chain/core/feed:go_default_library",
"//beacon-chain/core/feed/operation:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/peerdas:go_default_library",

View File

@@ -5,6 +5,7 @@ import (
"github.com/OffchainLabs/prysm/v7/async/event"
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/operation"
statefeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/state"
"github.com/OffchainLabs/prysm/v7/beacon-chain/db"
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/filesystem"
@@ -226,6 +227,14 @@ func WithDataColumnStorage(b *filesystem.DataColumnStorage) Option {
}
}
// WithProofStorage sets the proof storage backend for the blockchain service.
func WithProofStorage(p *filesystem.ProofStorage) Option {
return func(s *Service) error {
s.proofStorage = p
return nil
}
}
// WithSyncChecker sets the sync checker for the blockchain service.
func WithSyncChecker(checker Checker) Option {
return func(s *Service) error {
@@ -266,3 +275,10 @@ func WithStartWaitingDataColumnSidecars(c chan bool) Option {
return nil
}
}
func WithOperationNotifier(operationNotifier operation.Notifier) Option {
return func(s *Service) error {
s.cfg.OperationNotifier = operationNotifier
return nil
}
}

View File

@@ -15,6 +15,7 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/filesystem"
forkchoicetypes "github.com/OffchainLabs/prysm/v7/beacon-chain/forkchoice/types"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
"github.com/OffchainLabs/prysm/v7/config/features"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
@@ -113,6 +114,7 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
s.updateCachesPostBlockProcessing(cfg)
}()
}
return nil
}
@@ -661,10 +663,17 @@ func (s *Service) isDataAvailable(
return errors.New("invalid nil beacon block")
}
root := roBlock.Root()
blockVersion := block.Version()
root, blockVersion := roBlock.Root(), roBlock.Version()
if blockVersion >= version.Fulu {
return s.areDataColumnsAvailable(ctx, root, block)
if err := s.areExecutionProofsAvailable(ctx, roBlock); err != nil {
return fmt.Errorf("are execution proofs available: %w", err)
}
if err := s.areDataColumnsAvailable(ctx, root, block); err != nil {
return fmt.Errorf("are data columns available: %w", err)
}
return nil
}
if blockVersion >= version.Deneb {
@@ -674,6 +683,77 @@ func (s *Service) isDataAvailable(
return nil
}
// areExecutionProofsAvailable blocks until we have enough execution proofs to import the block,
// or an error or context cancellation occurs.
// This check is only performed for lightweight verifier nodes that need zkVM proofs
// to validate block execution (nodes without execution layer + proof generation capability).
// A nil result means that the data availability check is successful.
func (s *Service) areExecutionProofsAvailable(ctx context.Context, roBlock consensusblocks.ROBlock) error {
// Return early if zkVM features are disabled (no need to check for execution proofs),
// or if the generation proof is enabled (we will generate proofs ourselves).
if !features.Get().EnableZkvm || len(flags.Get().ProofGenerationTypes) > 0 {
return nil
}
root, slot := roBlock.Root(), roBlock.Block().Slot()
requiredProofCount := params.BeaconConfig().MinProofsRequired
log := log.WithFields(logrus.Fields{
"root": fmt.Sprintf("%#x", root),
"slot": slot,
"requiredProofCount": requiredProofCount,
})
// Subscribe to newly execution proofs stored in the database.
subscription, identChan := s.proofStorage.Subscribe()
defer subscription.Unsubscribe()
// Return early if we already have enough proofs.
if actualProofCount := uint64(s.proofStorage.Summary(root).Count()); actualProofCount >= requiredProofCount {
log.WithField("actualProofCount", actualProofCount).Debug("Already have enough execution proofs")
return nil
}
// Log for DA checks that cross over into the next slot; helpful for debugging.
nextSlot, err := slots.StartTime(s.genesisTime, roBlock.Block().Slot()+1)
if err != nil {
return fmt.Errorf("start time: %w", err)
}
// Avoid logging if DA check is called after next slot start.
if nextSlot.After(time.Now()) {
timer := time.AfterFunc(time.Until(nextSlot), func() {
actualCount := uint64(s.proofStorage.Summary(root).Count())
if actualCount >= requiredProofCount {
return
}
log.WithField("proofsRetrieved", actualCount).Warning("Execution proofs still missing at slot end")
})
defer timer.Stop()
}
// Some proofs are missing; wait for them.
for {
select {
case <-ctx.Done():
return ctx.Err()
case proofIdent := <-identChan:
// Skip if the proof is for a different block.
if proofIdent.BlockRoot != root {
continue
}
// Return if we have enough proofs.
if actualProofCount := uint64(s.proofStorage.Summary(root).Count()); actualProofCount >= requiredProofCount {
log.WithField("actualProofCount", actualProofCount).Debug("Got enough execution proofs")
return nil
}
}
}
}
// areDataColumnsAvailable blocks until all data columns committed to in the block are available,
// or an error or context cancellation occurs. A nil result means that the data availability check is successful.
func (s *Service) areDataColumnsAvailable(
@@ -810,14 +890,7 @@ func (s *Service) areDataColumnsAvailable(
}
case <-ctx.Done():
var missingIndices any = "all"
missingIndicesCount := len(missing)
if missingIndicesCount < fieldparams.NumberOfColumns {
missingIndices = helpers.SortedPrettySliceFromMap(missing)
}
return errors.Wrapf(ctx.Err(), "data column sidecars slot: %d, BlockRoot: %#x, missing: %v", block.Slot(), root, missingIndices)
return errors.Wrapf(ctx.Err(), "data column sidecars slot: %d, BlockRoot: %#x, missing: %v", block.Slot(), root, helpers.SortedPrettySliceFromMap(missing))
}
}
}

View File

@@ -66,52 +66,54 @@ func (s *Service) VerifyLmdFfgConsistency(ctx context.Context, a ethpb.Att) erro
// This routine processes fork choice attestations from the pool to account for validator votes and fork choice.
func (s *Service) spawnProcessAttestationsRoutine() {
go func() {
_, err := s.clockWaiter.WaitForClock(s.ctx)
if err != nil {
log.WithError(err).Error("Failed to receive genesis data")
return
}
if s.genesisTime.IsZero() {
log.Warn("ProcessAttestations routine waiting for genesis time")
for s.genesisTime.IsZero() {
if err := s.ctx.Err(); err != nil {
log.WithError(err).Error("Giving up waiting for genesis time")
return
}
time.Sleep(1 * time.Second)
}
log.Warn("Genesis time received, now available to process attestations")
}
// Wait for node to be synced before running the routine.
if err := s.waitForSync(); err != nil {
log.WithError(err).Error("Could not wait to sync")
return
}
_, err := s.clockWaiter.WaitForClock(s.ctx)
if err != nil {
log.WithError(err).Error("Failed to receive genesis data")
return
}
reorgInterval := time.Second*time.Duration(params.BeaconConfig().SecondsPerSlot) - reorgLateBlockCountAttestations
ticker := slots.NewSlotTickerWithIntervals(s.genesisTime, []time.Duration{0, reorgInterval})
for {
select {
case <-s.ctx.Done():
if s.genesisTime.IsZero() {
log.Warn("ProcessAttestations routine waiting for genesis time")
for s.genesisTime.IsZero() {
if err := s.ctx.Err(); err != nil {
log.WithError(err).Error("Giving up waiting for genesis time")
return
case slotInterval := <-ticker.C():
if slotInterval.Interval > 0 {
if s.validating() {
s.UpdateHead(s.ctx, slotInterval.Slot+1)
}
} else {
s.cfg.ForkChoiceStore.Lock()
if err := s.cfg.ForkChoiceStore.NewSlot(s.ctx, slotInterval.Slot); err != nil {
log.WithError(err).Error("Could not process new slot")
}
s.cfg.ForkChoiceStore.Unlock()
s.UpdateHead(s.ctx, slotInterval.Slot)
}
}
time.Sleep(1 * time.Second)
}
}()
log.Warn("Genesis time received, now available to process attestations")
}
// Wait for node to be synced before running the routine.
if err := s.waitForSync(); err != nil {
log.WithError(err).Error("Could not wait to sync")
return
}
reorgInterval := time.Second*time.Duration(params.BeaconConfig().SecondsPerSlot) - reorgLateBlockCountAttestations
ticker := slots.NewSlotTickerWithIntervals(s.genesisTime, []time.Duration{0, reorgInterval})
for {
select {
case <-s.ctx.Done():
return
case slotInterval := <-ticker.C():
if slotInterval.Interval > 0 {
if s.validating() {
s.UpdateHead(s.ctx, slotInterval.Slot+1)
}
continue
}
s.cfg.ForkChoiceStore.Lock()
if err := s.cfg.ForkChoiceStore.NewSlot(s.ctx, slotInterval.Slot); err != nil {
log.WithError(err).Error("Could not process new slot")
}
s.cfg.ForkChoiceStore.Unlock()
s.UpdateHead(s.ctx, slotInterval.Slot)
}
}
}
// UpdateHead updates the canonical head of the chain based on information from fork-choice attestations and votes.

View File

@@ -59,6 +59,12 @@ type DataColumnReceiver interface {
ReceiveDataColumns([]blocks.VerifiedRODataColumn) error
}
// ProofReceiver interface defines the methods of chain service for receiving new
// execution proofs
type ProofReceiver interface {
ReceiveProof(proof *ethpb.ExecutionProof) error
}
// SlashingReceiver interface defines the methods of chain service for receiving validated slashing over the wire.
type SlashingReceiver interface {
ReceiveAttesterSlashing(ctx context.Context, slashing ethpb.AttSlashing)

View File

@@ -0,0 +1,15 @@
package blockchain
import (
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/pkg/errors"
)
// ReceiveProof saves an execution proof to storage.
func (s *Service) ReceiveProof(proof *ethpb.ExecutionProof) error {
if err := s.proofStorage.Save([]*ethpb.ExecutionProof{proof}); err != nil {
return errors.Wrap(err, "save proof")
}
return nil
}

View File

@@ -12,6 +12,7 @@ import (
"github.com/OffchainLabs/prysm/v7/async/event"
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/operation"
statefeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/state"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
@@ -66,6 +67,7 @@ type Service struct {
blockBeingSynced *currentlySyncingBlock
blobStorage *filesystem.BlobStorage
dataColumnStorage *filesystem.DataColumnStorage
proofStorage *filesystem.ProofStorage
slasherEnabled bool
lcStore *lightClient.Store
startWaitingDataColumnSidecars chan bool // for testing purposes only
@@ -88,6 +90,7 @@ type config struct {
P2P p2p.Accessor
MaxRoutines int
StateNotifier statefeed.Notifier
OperationNotifier operation.Notifier
ForkChoiceStore f.ForkChoicer
AttService *attestations.Service
StateGen *stategen.State
@@ -211,7 +214,8 @@ func (s *Service) Start() {
if err := s.StartFromSavedState(s.cfg.FinalizedStateAtStartUp); err != nil {
log.Fatal(err)
}
s.spawnProcessAttestationsRoutine()
go s.spawnProcessAttestationsRoutine()
go s.runLateBlockTasks()
}

View File

@@ -76,6 +76,7 @@ type ChainService struct {
SyncingRoot [32]byte
Blobs []blocks.VerifiedROBlob
DataColumns []blocks.VerifiedRODataColumn
Proofs []*ethpb.ExecutionProof
TargetRoot [32]byte
MockHeadSlot *primitives.Slot
}
@@ -758,6 +759,12 @@ func (c *ChainService) ReceiveDataColumns(dcs []blocks.VerifiedRODataColumn) err
return nil
}
// ReceiveProof implements the same method in chain service
func (c *ChainService) ReceiveProof(proof *ethpb.ExecutionProof) error {
c.Proofs = append(c.Proofs, proof)
return nil
}
// DependentRootForEpoch mocks the same method in the chain service
func (c *ChainService) DependentRootForEpoch(_ [32]byte, _ primitives.Epoch) ([32]byte, error) {
return c.TargetRoot, nil

View File

@@ -46,6 +46,9 @@ const (
// DataColumnReceived is sent after a data column has been seen after gossip validation rules.
DataColumnReceived = 12
// ExecutionProofReceived is sent after a execution proof object has been received from gossip or rpc.
ExecutionProofReceived = 13
)
// UnAggregatedAttReceivedData is the data sent with UnaggregatedAttReceived events.
@@ -77,6 +80,11 @@ type BLSToExecutionChangeReceivedData struct {
Change *ethpb.SignedBLSToExecutionChange
}
// ExecutionProofReceivedData is the data sent with ExecutionProofReceived events.
type ExecutionProofReceivedData struct {
ExecutionProof *ethpb.ExecutionProof
}
// BlobSidecarReceivedData is the data sent with BlobSidecarReceived events.
type BlobSidecarReceivedData struct {
Blob *blocks.VerifiedROBlob

View File

@@ -7,7 +7,6 @@ go_library(
"cache.go",
"data_column.go",
"data_column_cache.go",
"doc.go",
"iteration.go",
"layout.go",
"layout_by_epoch.go",
@@ -15,6 +14,8 @@ go_library(
"log.go",
"metrics.go",
"mock.go",
"proof.go",
"proof_cache.go",
"pruner.go",
],
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/db/filesystem",
@@ -30,6 +31,7 @@ go_library(
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//io/file:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/logging:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
@@ -52,6 +54,7 @@ go_test(
"iteration_test.go",
"layout_test.go",
"migration_test.go",
"proof_test.go",
"pruner_test.go",
],
embed = [":go_default_library"],

View File

@@ -1,104 +0,0 @@
package filesystem
// nolint:dupword
/*
Data column sidecars storage documentation
==========================================
File organisation
-----------------
- The first byte represents the version of the file structure (up to 0xff = 255).
We set it to 0x01.
Note: This is not strictly needed, but it will help a lot if, in the future,
we want to modify the file structure.
- The next 4 bytes represents the size of a SSZ encoded data column sidecar.
(See the `Computation of the maximum size of a DataColumnSidecar` section to a description
of how this value is computed).
- The next 128 bytes represent the index in the file of a given column.
The first bit of each byte in the index is set to 0 if there is no data column,
and set to 1 if there is a data column.
The remaining 7 bits (from 0 to 127) represent the index of the data column.
This sentinel bit is needed to distinguish between the column with index 0 and no column.
Example: If the column with index 5 is in the 3th position in the file, then indices[5] = 0x80 + 0x03 = 0x83.
- The rest of the file is a repeat of the SSZ encoded data column sidecars.
|------------------------------------------|------------------------------------------------------------------------------------|
| Byte offset | Description |
|------------------------------------------|------------------------------------------------------------------------------------|
| 0 | version (1 byte) | sszEncodedDataColumnSidecarSize (4 bytes) | indices (128 bytes) |
|133 + 0*sszEncodedDataColumnSidecarSize | sszEncodedDataColumnSidecar (sszEncodedDataColumnSidecarSize bytes) |
|133 + 1*sszEncodedDataColumnSidecarSize | sszEncodedDataColumnSidecar (sszEncodedDataColumnSidecarSize bytes) |
|133 + 2*sszEncodedDataColumnSidecarSize | sszEncodedDataColumnSidecar (sszEncodedDataColumnSidecarSize bytes) |
| ... | ... |
|133 + 127*sszEncodedDataColumnSidecarSize | sszEncodedDataColumnSidecar (sszEncodedDataColumnSidecarSize bytes) |
|------------------------------------------|------------------------------------------------------------------------------------|
Each file is named after the block root where the data columns were data columns are committed to.
Example: `0x259c6d2f6a0bb75e2405cea7cb248e5663dc26b9404fd3bcd777afc20de91c1e.sszs`
Database organisation
---------------------
SSZ encoded data column sidecars are stored following the `by-epoch` layout.
- The first layer is a directory corresponding to the `period`, which corresponds to the epoch divided by the 4096.
- The second layer is a directory corresponding to the epoch.
- Then all files are stored in the epoch directory.
Example:
data-columns
├── 0
│   ├── 3638
│   │   ├── 0x259c6d2f6a0bb75e2405cea7cb248e5663dc26b9404fd3bcd777afc20de91c1e.sszs
│   │   ├── 0x2a855b1f6e9a2f04f8383e336325bf7d5ba02d1eab3ef90ef183736f8c768533.sszs
│   │   ├── ...
│   │   ├── 0xeb78e2b2350a71c640f1e96fea9e42f38e65705ab7e6e100c8bc9c589f2c5f2b.sszs
│   │   └── 0xeb7ee68da988fd20d773d45aad01dd62527734367a146e2b048715bd68a4e370.sszs
│   └── 3639
│      ├── 0x0fd231fe95e57936fa44f6c712c490b9e337a481b661dfd46768901e90444330.sszs
│      ├── 0x1bf5edff6b6ba2b65b1db325ff3312bbb57da461ef2ae651bd741af851aada3a.sszs
│      ├── ...
│      ├── 0xa156a527e631f858fee79fab7ef1fde3f6117a2e1201d47c09fbab0c6780c937.sszs
│      └── 0xcd80bc535ddc467dea1d19e0c39c1160875ccd1989061bcd8ce206e3c1261c87.sszs
└── 1
├── 4096
│   ├── 0x0d244009093e2bedb72eb265280290199e8c7bf1d90d7583c41af40d9f662269.sszs
│   ├── 0x11f420928d8de41c50e735caab0369996824a5299c5f054e097965855925697d.sszs
│   ├── ...
│   ├── 0xbe91fc782877ed400d95c02c61aebfdd592635d11f8e64c94b46abd84f45c967.sszs
│   └── 0xf246189f078f02d30173ff74605cf31c9e65b5e463275ebdbeb40476638135ff.sszs
└── 4097
   ├── 0x454d000674793c479e90504c0fe9827b50bb176ae022dab4e37d6a21471ab570.sszs
   ├── 0xac5eb7437d7190c48cfa863e3c45f96a7f8af371d47ac12ccda07129a06af763.sszs
   ├── ...
   ├── 0xb7df30561d9d92ab5fafdd96bca8b44526497c8debf0fc425c7a0770b2abeb83.sszs
   └── 0xc1dd0b1ae847b6ec62303a36d08c6a4a2e9e3ec4be3ff70551972a0ee3de9c14.sszs
Computation of the maximum size of a DataColumnSidecar
------------------------------------------------------
https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/das-core.md#datacolumnsidecar
class DataColumnSidecar(Container):
index: ColumnIndex # Index of column in extended matrix
column: List[Cell, MAX_BLOB_COMMITMENTS_PER_BLOCK]
kzg_commitments: List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK]
kzg_proofs: List[KZGProof, MAX_BLOB_COMMITMENTS_PER_BLOCK]
signed_block_header: SignedBeaconBlockHeader
kzg_commitments_inclusion_proof: Vector[Bytes32, KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH]
- index: 2 bytes (ColumnIndex)
- `column`: 4,096 (MAX_BLOB_COMMITMENTS_PER_BLOCK) * 64 (FIELD_ELEMENTS_PER_CELL) * 32 bytes (BYTES_PER_FIELD_ELEMENT) = 8,388,608 bytes
- kzg_commitments: 4,096 (MAX_BLOB_COMMITMENTS_PER_BLOCK) * 48 bytes (KZGCommitment) = 196,608 bytes
- kzg_proofs: 4,096 (MAX_BLOB_COMMITMENTS_PER_BLOCK) * 48 bytes (KZGProof) = 196,608 bytes
- signed_block_header: 2 bytes (Slot) + 2 bytes (ValidatorIndex) + 3 * 2 bytes (Root) + 96 bytes (BLSSignature) = 106 bytes
- kzg_commitments_inclusion_proof: 4 (KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH) * 32 bytes = 128 bytes
TOTAL: 8,782,060 bytes = 70,256,480 bits
log(70,256,480) / log(2) ~= 26.07
==> 32 bits (4 bytes) are enough to store the maximum size of a data column sidecar.
The maximum size of an SSZ encoded data column can be 2**32 bits = 536,879,912 bytes,
which left a room of 536,879,912 bytes - 8,782,060 bytes ~= 503 mega bytes to store the extra data needed by SSZ encoding (which is more than enough.)
*/

View File

@@ -0,0 +1,197 @@
# Filesystem storage documentation
This document describes the file formats and database organization for storing data column sidecars and execution proofs.
---
# Data column sidecars
## File organisation
- The first byte represents the version of the file structure (up to `0xff = 255`).
We set it to `0x01`.
_(Note: This is not strictly needed, but it will help a lot if, in the future, we want to modify the file structure.)_
- The next 4 bytes represents the size of a SSZ encoded data column sidecar.
(See the [Computation of the maximum size of a DataColumnSidecar](#computation-of-the-maximum-size-of-a-datacolumnsidecar) section for a description
of how this value is computed).
- The next 128 bytes represent the index in the file of a given column.
The first bit of each byte in the index is set to 0 if there is no data column,
and set to 1 if there is a data column.
The remaining 7 bits (from 0 to 127) represent the index of the data column.
This sentinel bit is needed to distinguish between the column with index 0 and no column.
**Example:** If the column with index 5 is in the 3rd position in the file, then `indices[5] = 0x80 + 0x03 = 0x83`.
- The rest of the file is a repeat of the SSZ encoded data column sidecars.
### File layout
| Byte offset | Description |
|-------------|-------------|
| `0` | `version (1 byte) \| sszEncodedDataColumnSidecarSize (4 bytes) \| indices (128 bytes)` |
| `133 + 0×sszEncodedDataColumnSidecarSize` | `sszEncodedDataColumnSidecar (sszEncodedDataColumnSidecarSize bytes)` |
| `133 + 1×sszEncodedDataColumnSidecarSize` | `sszEncodedDataColumnSidecar (sszEncodedDataColumnSidecarSize bytes)` |
| `133 + 2×sszEncodedDataColumnSidecarSize` | `sszEncodedDataColumnSidecar (sszEncodedDataColumnSidecarSize bytes)` |
| ... | ... |
| `133 + 127×sszEncodedDataColumnSidecarSize` | `sszEncodedDataColumnSidecar (sszEncodedDataColumnSidecarSize bytes)` |
Each file is named after the block root where the data columns are committed to.
**Example:** `0x259c6d2f6a0bb75e2405cea7cb248e5663dc26b9404fd3bcd777afc20de91c1e.sszs`
## Database organisation
SSZ encoded data column sidecars are stored following the `by-epoch` layout.
- The first layer is a directory corresponding to the `period`, which corresponds to the epoch divided by 4096.
- The second layer is a directory corresponding to the epoch.
- Then all files are stored in the epoch directory.
### Example directory structure
```
data-columns
├── 0
│ ├── 3638
│ │ ├── 0x259c6d2f6a0bb75e2405cea7cb248e5663dc26b9404fd3bcd777afc20de91c1e.sszs
│ │ ├── 0x2a855b1f6e9a2f04f8383e336325bf7d5ba02d1eab3ef90ef183736f8c768533.sszs
│ │ ├── ...
│ │ ├── 0xeb78e2b2350a71c640f1e96fea9e42f38e65705ab7e6e100c8bc9c589f2c5f2b.sszs
│ │ └── 0xeb7ee68da988fd20d773d45aad01dd62527734367a146e2b048715bd68a4e370.sszs
│ └── 3639
│ ├── 0x0fd231fe95e57936fa44f6c712c490b9e337a481b661dfd46768901e90444330.sszs
│ ├── 0x1bf5edff6b6ba2b65b1db325ff3312bbb57da461ef2ae651bd741af851aada3a.sszs
│ ├── ...
│ ├── 0xa156a527e631f858fee79fab7ef1fde3f6117a2e1201d47c09fbab0c6780c937.sszs
│ └── 0xcd80bc535ddc467dea1d19e0c39c1160875ccd1989061bcd8ce206e3c1261c87.sszs
└── 1
├── 4096
│ ├── 0x0d244009093e2bedb72eb265280290199e8c7bf1d90d7583c41af40d9f662269.sszs
│ ├── 0x11f420928d8de41c50e735caab0369996824a5299c5f054e097965855925697d.sszs
│ ├── ...
│ ├── 0xbe91fc782877ed400d95c02c61aebfdd592635d11f8e64c94b46abd84f45c967.sszs
│ └── 0xf246189f078f02d30173ff74605cf31c9e65b5e463275ebdbeb40476638135ff.sszs
└── 4097
├── 0x454d000674793c479e90504c0fe9827b50bb176ae022dab4e37d6a21471ab570.sszs
├── 0xac5eb7437d7190c48cfa863e3c45f96a7f8af371d47ac12ccda07129a06af763.sszs
├── ...
├── 0xb7df30561d9d92ab5fafdd96bca8b44526497c8debf0fc425c7a0770b2abeb83.sszs
└── 0xc1dd0b1ae847b6ec62303a36d08c6a4a2e9e3ec4be3ff70551972a0ee3de9c14.sszs
```
## Computation of the maximum size of a `DataColumnSidecar`
Reference: [Ethereum Consensus Specs - Fulu DAS Core](https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/das-core.md#datacolumnsidecar)
```python
class DataColumnSidecar(Container):
index: ColumnIndex # Index of column in extended matrix
column: List[Cell, MAX_BLOB_COMMITMENTS_PER_BLOCK]
kzg_commitments: List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK]
kzg_proofs: List[KZGProof, MAX_BLOB_COMMITMENTS_PER_BLOCK]
signed_block_header: SignedBeaconBlockHeader
kzg_commitments_inclusion_proof: Vector[Bytes32, KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH]
```
### Size breakdown
| Field | Calculation | Size |
|-------|-------------|------|
| `index` | `ColumnIndex` | `2 bytes` |
| `column` | `4,096 (MAX_BLOB_COMMITMENTS_PER_BLOCK) × 64 (FIELD_ELEMENTS_PER_CELL) × 32 bytes (BYTES_PER_FIELD_ELEMENT)` | `8,388,608 bytes` |
| `kzg_commitments` | `4,096 (MAX_BLOB_COMMITMENTS_PER_BLOCK) × 48 bytes (KZGCommitment)` | `196,608 bytes` |
| `kzg_proofs` | `4,096 (MAX_BLOB_COMMITMENTS_PER_BLOCK) × 48 bytes (KZGProof)` | `196,608 bytes` |
| `signed_block_header` | `2 bytes (Slot) + 2 bytes (ValidatorIndex) + 3 × 2 bytes (Root) + 96 bytes (BLSSignature)` | `106 bytes` |
| `kzg_commitments_inclusion_proof` | `4 (KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH) × 32 bytes` | `128 bytes` |
**TOTAL:** `8,782,060 bytes = 70,256,480 bits`
```
log(70,256,480) / log(2) ≈ 26.07
```
**Conclusion:** 32 bits (4 bytes) are enough to store the maximum size of a data column sidecar.
The maximum size of an SSZ encoded data column can be `2³² bits = 536,879,912 bytes`,
which leaves a room of `536,879,912 bytes - 8,782,060 bytes ≈ 503 megabytes` to store the extra data needed by SSZ encoding (which is more than enough).
---
# Execution proofs
## File organisation
Unlike data column sidecars (which have a fixed size per block), execution proofs have variable sizes.
To handle this, we use an offset table that stores the position and size of each proof.
- The first byte represents the version of the file structure (up to `0xff = 255`).
We set it to `0x01`.
- The next 64 bytes represent the offset table with 8 slots (one per proof type).
Each slot contains:
- 4 bytes for the offset (relative to end of header)
- 4 bytes for the size of the SSZ-encoded proof
If the size is 0, the proof is not present.
- The rest of the file contains the SSZ encoded proofs, stored contiguously.
### File layout
| Byte offset | Description |
|-------------|-------------|
| `0` | `version (1 byte) \| offsetTable (64 bytes)` |
| `65 + offsetTable[0].offset` | `sszEncodedProof (offsetTable[0].size bytes)` |
| `65 + offsetTable[1].offset` | `sszEncodedProof (offsetTable[1].size bytes)` |
| ... | ... |
| `65 + offsetTable[7].offset` | `sszEncodedProof (offsetTable[7].size bytes)` |
**Header size:** 1 (version) + 64 (offset table) = **65 bytes**
### Offset table entry format
Each slot in the offset table (8 bytes per slot):
- `offset` (4 bytes, big-endian): Offset from end of header where proof data begins
- `size` (4 bytes, big-endian): Size of the SSZ-encoded proof in bytes
**Note:** Offsets are relative to the end of the header (byte 65), not the start of the file.
This maximizes the usable range of the 4-byte offset field.
### Reading a proof with `proofID=N (O(1) access)`
1. Read header (65 bytes)
2. Check slot N: if `size == 0`, proof not present
3. Seek to `(65 + offset)`, read `size` bytes, SSZ unmarshal
Each file is named after the block root.
**Example:** `0x259c6d2f6a0bb75e2405cea7cb248e5663dc26b9404fd3bcd777afc20de91c1e.sszs`
## Database Organisation
SSZ encoded execution proofs are stored following the same `by-epoch` layout as data column sidecars.
- The first layer is a directory corresponding to the `period`, which corresponds to the epoch divided by 4096.
- The second layer is a directory corresponding to the epoch.
- Then all files are stored in the epoch directory.
### Example Directory Structure
```
proofs
├── 0
│ ├── 100
│ │ ├── 0x259c6d2f6a0bb75e2405cea7cb248e5663dc26b9404fd3bcd777afc20de91c1e.sszs
│ │ ├── 0x2a855b1f6e9a2f04f8383e336325bf7d5ba02d1eab3ef90ef183736f8c768533.sszs
│ │ └── ...
│ └── 101
│ ├── 0x0fd231fe95e57936fa44f6c712c490b9e337a481b661dfd46768901e90444330.sszs
│ └── ...
└── 1
└── 4096
├── 0x0d244009093e2bedb72eb265280290199e8c7bf1d90d7583c41af40d9f662269.sszs
└── ...
```

View File

@@ -70,4 +70,36 @@ var (
Name: "data_column_prune_latency",
Help: "Latency of data column prune operations in milliseconds",
})
// Proofs
proofSaveLatency = promauto.NewHistogram(prometheus.HistogramOpts{
Name: "proof_storage_save_latency",
Help: "Latency of proof storage save operations in milliseconds",
Buckets: []float64{3, 5, 7, 9, 11, 13, 20, 50},
})
proofFetchLatency = promauto.NewHistogram(prometheus.HistogramOpts{
Name: "proof_storage_get_latency",
Help: "Latency of proof storage get operations in milliseconds",
Buckets: []float64{3, 5, 7, 9, 11, 13},
})
proofPrunedCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "proof_pruned",
Help: "Number of proof files pruned.",
})
proofWrittenCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "proof_written",
Help: "Number of proof files written",
})
proofDiskCount = promauto.NewGauge(prometheus.GaugeOpts{
Name: "proof_disk_count",
Help: "Approximate number of proof files in storage",
})
proofFileSyncLatency = promauto.NewSummary(prometheus.SummaryOpts{
Name: "proof_file_sync_latency",
Help: "Latency of sync operations when saving proofs in milliseconds",
})
proofPruneLatency = promauto.NewSummary(prometheus.SummaryOpts{
Name: "proof_prune_latency",
Help: "Latency of proof prune operations in milliseconds",
})
)

View File

@@ -144,3 +144,45 @@ func NewEphemeralDataColumnStorageWithMocker(t testing.TB) (*DataColumnMocker, *
fs, dcs := NewEphemeralDataColumnStorageAndFs(t)
return &DataColumnMocker{fs: fs, dcs: dcs}, dcs
}
// Proofs
// ------
// NewEphemeralProofStorage should only be used for tests.
// The instance of ProofStorage returned is backed by an in-memory virtual filesystem,
// improving test performance and simplifying cleanup.
func NewEphemeralProofStorage(t testing.TB, opts ...ProofStorageOption) *ProofStorage {
return NewWarmedEphemeralProofStorageUsingFs(t, afero.NewMemMapFs(), opts...)
}
// NewEphemeralProofStorageAndFs can be used by tests that want access to the virtual filesystem
// in order to interact with it outside the parameters of the ProofStorage API.
func NewEphemeralProofStorageAndFs(t testing.TB, opts ...ProofStorageOption) (afero.Fs, *ProofStorage) {
fs := afero.NewMemMapFs()
ps := NewWarmedEphemeralProofStorageUsingFs(t, fs, opts...)
return fs, ps
}
// NewEphemeralProofStorageUsingFs creates a ProofStorage backed by the provided filesystem.
func NewEphemeralProofStorageUsingFs(t testing.TB, fs afero.Fs, opts ...ProofStorageOption) *ProofStorage {
defaultOpts := []ProofStorageOption{
WithProofRetentionEpochs(params.BeaconConfig().MinEpochsForDataColumnSidecarsRequest),
WithProofFs(fs),
}
// User opts come last so they can override defaults
allOpts := append(defaultOpts, opts...)
ps, err := NewProofStorage(context.Background(), allOpts...)
if err != nil {
t.Fatal(err)
}
return ps
}
// NewWarmedEphemeralProofStorageUsingFs creates a ProofStorage with a warmed cache.
func NewWarmedEphemeralProofStorageUsingFs(t testing.TB, fs afero.Fs, opts ...ProofStorageOption) *ProofStorage {
ps := NewEphemeralProofStorageUsingFs(t, fs, opts...)
ps.WarmCache()
return ps
}

View File

@@ -0,0 +1,964 @@
package filesystem
import (
"context"
"encoding/binary"
"encoding/hex"
"errors"
"fmt"
"io"
"os"
"path"
"path/filepath"
"slices"
"strconv"
"strings"
"sync"
"time"
"github.com/OffchainLabs/prysm/v7/async"
"github.com/OffchainLabs/prysm/v7/async/event"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/io/file"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/time/slots"
"github.com/spf13/afero"
)
const (
proofVersion = 0x01
proofVersionSize = 1 // bytes
maxProofTypes = 8 // ExecutionProofId max value (EXECUTION_PROOF_TYPE_COUNT)
proofOffsetSize = 4 // bytes for offset (uint32)
proofSizeSize = 4 // bytes for size (uint32)
proofSlotSize = proofOffsetSize + proofSizeSize // 8 bytes per slot
proofOffsetTableSize = maxProofTypes * proofSlotSize // 64 bytes
proofHeaderSize = proofVersionSize + proofOffsetTableSize // 65 bytes
proofsFileExtension = "sszs"
proofPrunePeriod = 1 * time.Minute
)
var (
errProofIDTooLarge = errors.New("proof ID too large")
errWrongProofBytesWritten = errors.New("wrong number of bytes written")
errWrongProofVersion = errors.New("wrong proof version")
errWrongProofBytesRead = errors.New("wrong number of bytes read")
errNoProofBasePath = errors.New("ProofStorage base path not specified in init")
errProofAlreadyExists = errors.New("proof already exists")
)
type (
// ProofIdent is a unique identifier for a proof.
ProofIdent struct {
BlockRoot [fieldparams.RootLength]byte
Epoch primitives.Epoch
ProofID uint64
}
// ProofsIdent is a collection of unique identifiers for proofs.
ProofsIdent struct {
BlockRoot [fieldparams.RootLength]byte
Epoch primitives.Epoch
ProofIDs []uint64
}
// ProofStorage is the concrete implementation of the filesystem backend for saving and retrieving ExecutionProofs.
ProofStorage struct {
base string
retentionEpochs primitives.Epoch
fs afero.Fs
cache *proofCache
proofFeed *event.Feed
pruneMu sync.RWMutex
mu sync.Mutex // protects muChans
muChans map[[fieldparams.RootLength]byte]*proofMuChan
}
// ProofStorageOption is a functional option for configuring a ProofStorage.
ProofStorageOption func(*ProofStorage) error
proofMuChan struct {
mu *sync.RWMutex
toStore chan []*ethpb.ExecutionProof
}
// proofSlotEntry represents the offset and size for a proof in the file.
proofSlotEntry struct {
offset uint32
size uint32
}
// proofOffsetTable is the offset table with 8 slots indexed by proofID.
proofOffsetTable [maxProofTypes]proofSlotEntry
// proofFileMetadata contains metadata extracted from a proof file path.
proofFileMetadata struct {
period uint64
epoch primitives.Epoch
blockRoot [fieldparams.RootLength]byte
}
)
// WithProofBasePath is a required option that sets the base path of proof storage.
func WithProofBasePath(base string) ProofStorageOption {
return func(ps *ProofStorage) error {
ps.base = base
return nil
}
}
// WithProofRetentionEpochs is an option that changes the number of epochs proofs will be persisted.
func WithProofRetentionEpochs(e primitives.Epoch) ProofStorageOption {
return func(ps *ProofStorage) error {
ps.retentionEpochs = e
return nil
}
}
// WithProofFs allows the afero.Fs implementation to be customized.
// Used by tests to substitute an in-memory filesystem.
func WithProofFs(fs afero.Fs) ProofStorageOption {
return func(ps *ProofStorage) error {
ps.fs = fs
return nil
}
}
// NewProofStorage creates a new instance of the ProofStorage object.
func NewProofStorage(ctx context.Context, opts ...ProofStorageOption) (*ProofStorage, error) {
storage := &ProofStorage{
proofFeed: new(event.Feed),
muChans: make(map[[fieldparams.RootLength]byte]*proofMuChan),
}
for _, o := range opts {
if err := o(storage); err != nil {
return nil, fmt.Errorf("failed to create proof storage: %w", err)
}
}
// Allow tests to set up a different fs using WithProofFs.
if storage.fs == nil {
if storage.base == "" {
return nil, errNoProofBasePath
}
storage.base = path.Clean(storage.base)
if err := file.MkdirAll(storage.base); err != nil {
return nil, fmt.Errorf("failed to create proof storage at %s: %w", storage.base, err)
}
storage.fs = afero.NewBasePathFs(afero.NewOsFs(), storage.base)
}
storage.cache = newProofCache()
async.RunEvery(ctx, proofPrunePeriod, func() {
storage.pruneMu.Lock()
defer storage.pruneMu.Unlock()
storage.prune()
})
return storage, nil
}
// WarmCache warms the cache of the proof filesystem.
func (ps *ProofStorage) WarmCache() {
start := time.Now()
log.Info("Proof filesystem cache warm-up started")
ps.pruneMu.Lock()
defer ps.pruneMu.Unlock()
// List all period directories
periodFileInfos, err := afero.ReadDir(ps.fs, ".")
if err != nil {
log.WithError(err).Error("Error reading top directory during proof warm cache")
return
}
// Iterate through periods
for _, periodFileInfo := range periodFileInfos {
if !periodFileInfo.IsDir() {
continue
}
periodPath := periodFileInfo.Name()
// List all epoch directories in this period
epochFileInfos, err := afero.ReadDir(ps.fs, periodPath)
if err != nil {
log.WithError(err).WithField("period", periodPath).Error("Error reading period directory during proof warm cache")
continue
}
// Iterate through epochs
for _, epochFileInfo := range epochFileInfos {
if !epochFileInfo.IsDir() {
continue
}
epochPath := path.Join(periodPath, epochFileInfo.Name())
// List all .sszs files in this epoch
files, err := ps.listProofEpochFiles(epochPath)
if err != nil {
log.WithError(err).WithField("epoch", epochPath).Error("Error listing epoch files during proof warm cache")
continue
}
// Process all files in this epoch in parallel
ps.processProofEpochFiles(files)
}
}
// Prune the cache and the filesystem
ps.prune()
totalElapsed := time.Since(start)
log.WithField("elapsed", totalElapsed).Info("Proof filesystem cache warm-up complete")
}
// listProofEpochFiles lists all .sszs files in an epoch directory.
func (ps *ProofStorage) listProofEpochFiles(epochPath string) ([]string, error) {
fileInfos, err := afero.ReadDir(ps.fs, epochPath)
if err != nil {
return nil, fmt.Errorf("read epoch directory: %w", err)
}
files := make([]string, 0, len(fileInfos))
for _, fileInfo := range fileInfos {
if fileInfo.IsDir() {
continue
}
fileName := fileInfo.Name()
if strings.HasSuffix(fileName, "."+proofsFileExtension) {
files = append(files, path.Join(epochPath, fileName))
}
}
return files, nil
}
// processProofEpochFiles processes all proof files in an epoch in parallel.
func (ps *ProofStorage) processProofEpochFiles(files []string) {
var wg sync.WaitGroup
for _, filePath := range files {
wg.Go(func() {
if err := ps.processProofFile(filePath); err != nil {
log.WithError(err).WithField("file", filePath).Error("Error processing proof file during warm cache")
}
})
}
wg.Wait()
}
// processProofFile processes a single .sszs proof file for cache warming.
func (ps *ProofStorage) processProofFile(filePath string) error {
// Extract metadata from the file path
fileMetadata, err := extractProofFileMetadata(filePath)
if err != nil {
return fmt.Errorf("extract proof file metadata: %w", err)
}
// Open the file
f, err := ps.fs.Open(filePath)
if err != nil {
return fmt.Errorf("open file: %w", err)
}
defer func() {
if closeErr := f.Close(); closeErr != nil {
log.WithError(closeErr).WithField("file", filePath).Error("Error closing file during proof warm cache")
}
}()
// Read the offset table
offsetTable, _, err := ps.readHeader(f)
if err != nil {
return fmt.Errorf("read header: %w", err)
}
// Add all present proofs to the cache
for proofID, entry := range offsetTable {
if entry.size == 0 {
continue
}
proofIdent := ProofIdent{
BlockRoot: fileMetadata.blockRoot,
Epoch: fileMetadata.epoch,
ProofID: uint64(proofID),
}
ps.cache.set(proofIdent)
}
return nil
}
// Summary returns the ProofStorageSummary for a given root.
func (ps *ProofStorage) Summary(root [fieldparams.RootLength]byte) ProofStorageSummary {
return ps.cache.Summary(root)
}
// Save saves execution proofs into the database.
func (ps *ProofStorage) Save(proofs []*ethpb.ExecutionProof) error {
startTime := time.Now()
if len(proofs) == 0 {
return nil
}
proofsByRoot := make(map[[fieldparams.RootLength]byte][]*ethpb.ExecutionProof)
// Group proofs by root.
for _, proof := range proofs {
// Check if the proof ID is valid.
proofID := uint64(proof.ProofId)
if proofID >= maxProofTypes {
return errProofIDTooLarge
}
// Extract block root from proof.
var blockRoot [fieldparams.RootLength]byte
copy(blockRoot[:], proof.BlockRoot)
// Group proofs by root.
proofsByRoot[blockRoot] = append(proofsByRoot[blockRoot], proof)
}
for blockRoot, proofsForRoot := range proofsByRoot {
// Compute epoch from slot.
epoch := slots.ToEpoch(proofsForRoot[0].Slot)
// Save proofs in the filesystem.
if err := ps.saveFilesystem(blockRoot, epoch, proofsForRoot); err != nil {
return fmt.Errorf("save filesystem: %w", err)
}
// Get all proof IDs.
proofIDs := make([]uint64, 0, len(proofsForRoot))
for _, proof := range proofsForRoot {
proofIDs = append(proofIDs, uint64(proof.ProofId))
}
// Compute the proofs ident.
proofsIdent := ProofsIdent{BlockRoot: blockRoot, Epoch: epoch, ProofIDs: proofIDs}
// Set proofs in the cache.
ps.cache.setMultiple(proofsIdent)
// Notify the proof feed.
ps.proofFeed.Send(proofsIdent)
}
proofSaveLatency.Observe(float64(time.Since(startTime).Milliseconds()))
return nil
}
// saveFilesystem saves proofs into the database.
// This function expects all proofs to belong to the same block.
func (ps *ProofStorage) saveFilesystem(root [fieldparams.RootLength]byte, epoch primitives.Epoch, proofs []*ethpb.ExecutionProof) error {
// Compute the file path.
filePath := proofFilePath(root, epoch)
ps.pruneMu.RLock()
defer ps.pruneMu.RUnlock()
fileMu, toStore := ps.fileMutexChan(root)
toStore <- proofs
fileMu.Lock()
defer fileMu.Unlock()
// Check if the file exists.
exists, err := afero.Exists(ps.fs, filePath)
if err != nil {
return fmt.Errorf("afero exists: %w", err)
}
if exists {
if err := ps.saveProofExistingFile(filePath, toStore); err != nil {
return fmt.Errorf("save proof existing file: %w", err)
}
return nil
}
if err := ps.saveProofNewFile(filePath, toStore); err != nil {
return fmt.Errorf("save proof new file: %w", err)
}
return nil
}
// Subscribe subscribes to the proof feed.
// It returns the subscription and a 1-size buffer channel to receive proof idents.
func (ps *ProofStorage) Subscribe() (event.Subscription, <-chan ProofsIdent) {
identsChan := make(chan ProofsIdent, 1)
subscription := ps.proofFeed.Subscribe(identsChan)
return subscription, identsChan
}
// Get retrieves execution proofs from the database.
// If one of the requested proofs is not found, it is just skipped.
// If proofIDs is nil, then all stored proofs are returned.
func (ps *ProofStorage) Get(root [fieldparams.RootLength]byte, proofIDs []uint64) ([]*ethpb.ExecutionProof, error) {
ps.pruneMu.RLock()
defer ps.pruneMu.RUnlock()
fileMu, _ := ps.fileMutexChan(root)
fileMu.RLock()
defer fileMu.RUnlock()
startTime := time.Now()
// Build all proofIDs if none are provided.
if proofIDs == nil {
proofIDs = make([]uint64, maxProofTypes)
for i := range proofIDs {
proofIDs[i] = uint64(i)
}
}
summary, ok := ps.cache.get(root)
if !ok {
// Nothing found in db. Exit early.
return nil, nil
}
// Check if any requested proofID exists.
if !slices.ContainsFunc(proofIDs, summary.HasProof) {
return nil, nil
}
// Compute the file path.
filePath := proofFilePath(root, summary.epoch)
// Open the proof file.
file, err := ps.fs.Open(filePath)
if err != nil {
return nil, fmt.Errorf("proof file open: %w", err)
}
defer func() {
if closeErr := file.Close(); closeErr != nil {
log.WithError(closeErr).WithField("file", filePath).Error("Error closing proof file")
}
}()
// Read the header.
offsetTable, _, err := ps.readHeader(file)
if err != nil {
return nil, fmt.Errorf("read header: %w", err)
}
// Retrieve proofs from the file.
proofs := make([]*ethpb.ExecutionProof, 0, len(proofIDs))
for _, proofID := range proofIDs {
if proofID >= maxProofTypes {
continue
}
entry := offsetTable[proofID]
// Skip if the proof is not saved.
if entry.size == 0 {
continue
}
// Seek to the proof offset (offset is relative to end of header).
_, err = file.Seek(proofHeaderSize+int64(entry.offset), io.SeekStart)
if err != nil {
return nil, fmt.Errorf("seek: %w", err)
}
// Read the SSZ encoded proof.
sszProof := make([]byte, entry.size)
n, err := io.ReadFull(file, sszProof)
if err != nil {
return nil, fmt.Errorf("read proof: %w", err)
}
if n != int(entry.size) {
return nil, errWrongProofBytesRead
}
// Unmarshal the proof.
proof := new(ethpb.ExecutionProof)
if err := proof.UnmarshalSSZ(sszProof); err != nil {
return nil, fmt.Errorf("unmarshal proof: %w", err)
}
proofs = append(proofs, proof)
}
proofFetchLatency.Observe(float64(time.Since(startTime).Milliseconds()))
return proofs, nil
}
// Remove deletes all proofs for a given root.
func (ps *ProofStorage) Remove(blockRoot [fieldparams.RootLength]byte) error {
ps.pruneMu.RLock()
defer ps.pruneMu.RUnlock()
fileMu, _ := ps.fileMutexChan(blockRoot)
fileMu.Lock()
defer fileMu.Unlock()
summary, ok := ps.cache.get(blockRoot)
if !ok {
// Nothing found in db. Exit early.
return nil
}
// Remove the proofs from the cache.
ps.cache.evict(blockRoot)
// Remove the proof file.
filePath := proofFilePath(blockRoot, summary.epoch)
if err := ps.fs.Remove(filePath); err != nil {
return fmt.Errorf("remove: %w", err)
}
return nil
}
// Clear deletes all files on the filesystem.
func (ps *ProofStorage) Clear() error {
ps.pruneMu.Lock()
defer ps.pruneMu.Unlock()
dirs, err := listDir(ps.fs, ".")
if err != nil {
return fmt.Errorf("list dir: %w", err)
}
ps.cache.clear()
for _, dir := range dirs {
if err := ps.fs.RemoveAll(dir); err != nil {
return fmt.Errorf("remove all: %w", err)
}
}
return nil
}
// saveProofNewFile saves proofs to a new file.
func (ps *ProofStorage) saveProofNewFile(filePath string, inputProofs chan []*ethpb.ExecutionProof) (err error) {
// Initialize the offset table.
var offsetTable proofOffsetTable
var sszEncodedProofs []byte
currentOffset := uint32(0)
for {
proofs := pullProofChan(inputProofs)
if len(proofs) == 0 {
break
}
for _, proof := range proofs {
proofID := uint64(proof.ProofId)
if proofID >= maxProofTypes {
continue
}
// Skip if already in offset table (duplicate).
if offsetTable[proofID].size != 0 {
continue
}
// SSZ encode the proof.
sszProof, err := proof.MarshalSSZ()
if err != nil {
return fmt.Errorf("marshal proof SSZ: %w", err)
}
proofSize := uint32(len(sszProof))
// Update offset table.
offsetTable[proofID] = proofSlotEntry{
offset: currentOffset,
size: proofSize,
}
// Append SSZ encoded proof.
sszEncodedProofs = append(sszEncodedProofs, sszProof...)
currentOffset += proofSize
}
}
if len(sszEncodedProofs) == 0 {
// Nothing to save.
return nil
}
// Create directory structure.
dir := filepath.Dir(filePath)
if err := ps.fs.MkdirAll(dir, directoryPermissions()); err != nil {
return fmt.Errorf("mkdir all: %w", err)
}
file, err := ps.fs.Create(filePath)
if err != nil {
return fmt.Errorf("create proof file: %w", err)
}
defer func() {
closeErr := file.Close()
if closeErr != nil && err == nil {
err = closeErr
}
}()
// Build the file content.
countToWrite := proofHeaderSize + len(sszEncodedProofs)
bytes := make([]byte, 0, countToWrite)
// Write version byte.
bytes = append(bytes, byte(proofVersion))
// Write offset table.
bytes = append(bytes, encodeOffsetTable(offsetTable)...)
// Write SSZ encoded proofs.
bytes = append(bytes, sszEncodedProofs...)
countWritten, err := file.Write(bytes)
if err != nil {
return fmt.Errorf("write: %w", err)
}
if countWritten != countToWrite {
return errWrongProofBytesWritten
}
syncStart := time.Now()
if err := file.Sync(); err != nil {
return fmt.Errorf("sync: %w", err)
}
proofFileSyncLatency.Observe(float64(time.Since(syncStart).Milliseconds()))
return nil
}
// saveProofExistingFile saves proofs to an existing file.
func (ps *ProofStorage) saveProofExistingFile(filePath string, inputProofs chan []*ethpb.ExecutionProof) (err error) {
// Open the file for read/write.
file, err := ps.fs.OpenFile(filePath, os.O_RDWR, os.FileMode(0600))
if err != nil {
return fmt.Errorf("open proof file: %w", err)
}
defer func() {
closeErr := file.Close()
if closeErr != nil && err == nil {
err = closeErr
}
}()
// Read current header.
offsetTable, fileSize, err := ps.readHeader(file)
if err != nil {
return fmt.Errorf("read header: %w", err)
}
var sszEncodedProofs []byte
currentOffset := uint32(fileSize - proofHeaderSize)
modified := false
for {
proofs := pullProofChan(inputProofs)
if len(proofs) == 0 {
break
}
for _, proof := range proofs {
proofID := uint64(proof.ProofId)
if proofID >= maxProofTypes {
continue
}
// Skip if proof already exists.
if offsetTable[proofID].size != 0 {
continue
}
// SSZ encode the proof.
sszProof, err := proof.MarshalSSZ()
if err != nil {
return fmt.Errorf("marshal proof SSZ: %w", err)
}
proofSize := uint32(len(sszProof))
// Update offset table.
offsetTable[proofID] = proofSlotEntry{
offset: currentOffset,
size: proofSize,
}
// Append SSZ encoded proof.
sszEncodedProofs = append(sszEncodedProofs, sszProof...)
currentOffset += proofSize
modified = true
}
}
if !modified {
return nil
}
// Write updated offset table back to file (at position 1, after version byte).
encodedTable := encodeOffsetTable(offsetTable)
count, err := file.WriteAt(encodedTable, int64(proofVersionSize))
if err != nil {
return fmt.Errorf("write offset table: %w", err)
}
if count != proofOffsetTableSize {
return errWrongProofBytesWritten
}
// Append the SSZ encoded proofs to the end of the file.
count, err = file.WriteAt(sszEncodedProofs, fileSize)
if err != nil {
return fmt.Errorf("write SSZ encoded proofs: %w", err)
}
if count != len(sszEncodedProofs) {
return errWrongProofBytesWritten
}
syncStart := time.Now()
if err := file.Sync(); err != nil {
return fmt.Errorf("sync: %w", err)
}
proofFileSyncLatency.Observe(float64(time.Since(syncStart).Milliseconds()))
return nil
}
// readHeader reads the file header and returns the offset table and file size.
func (ps *ProofStorage) readHeader(file afero.File) (proofOffsetTable, int64, error) {
var header [proofHeaderSize]byte
countRead, err := file.ReadAt(header[:], 0)
if err != nil {
return proofOffsetTable{}, 0, fmt.Errorf("read at: %w", err)
}
if countRead != proofHeaderSize {
return proofOffsetTable{}, 0, errWrongProofBytesRead
}
// Check version.
fileVersion := int(header[0])
if fileVersion != proofVersion {
return proofOffsetTable{}, 0, errWrongProofVersion
}
// Decode offset table and compute file size.
var offsetTable proofOffsetTable
fileSize := int64(proofHeaderSize)
for i := range offsetTable {
pos := proofVersionSize + i*proofSlotSize
offsetTable[i].offset = binary.BigEndian.Uint32(header[pos : pos+proofOffsetSize])
offsetTable[i].size = binary.BigEndian.Uint32(header[pos+proofOffsetSize : pos+proofSlotSize])
fileSize += int64(offsetTable[i].size)
}
return offsetTable, fileSize, nil
}
// prune cleans the cache, the filesystem and mutexes.
func (ps *ProofStorage) prune() {
startTime := time.Now()
defer func() {
proofPruneLatency.Observe(float64(time.Since(startTime).Milliseconds()))
}()
highestStoredEpoch := ps.cache.HighestEpoch()
// Check if we need to prune.
if highestStoredEpoch < ps.retentionEpochs {
return
}
highestEpochToPrune := highestStoredEpoch - ps.retentionEpochs
highestPeriodToPrune := proofPeriod(highestEpochToPrune)
// Prune the cache.
prunedCount := ps.cache.pruneUpTo(highestEpochToPrune)
if prunedCount == 0 {
return
}
proofPrunedCounter.Add(float64(prunedCount))
// Prune the filesystem.
periodFileInfos, err := afero.ReadDir(ps.fs, ".")
if err != nil {
log.WithError(err).Error("Error encountered while reading top directory during proof prune")
return
}
for _, periodFileInfo := range periodFileInfos {
periodStr := periodFileInfo.Name()
period, err := strconv.ParseUint(periodStr, 10, 64)
if err != nil {
log.WithError(err).Errorf("Error encountered while parsing period %s", periodStr)
continue
}
if period < highestPeriodToPrune {
// Remove everything lower than highest period to prune.
if err := ps.fs.RemoveAll(periodStr); err != nil {
log.WithError(err).Error("Error encountered while removing period directory during proof prune")
}
continue
}
if period > highestPeriodToPrune {
// Do not remove anything higher than highest period to prune.
continue
}
// if period == highestPeriodToPrune
epochFileInfos, err := afero.ReadDir(ps.fs, periodStr)
if err != nil {
log.WithError(err).Error("Error encountered while reading epoch directory during proof prune")
continue
}
for _, epochFileInfo := range epochFileInfos {
epochStr := epochFileInfo.Name()
epochDir := path.Join(periodStr, epochStr)
epoch, err := strconv.ParseUint(epochStr, 10, 64)
if err != nil {
log.WithError(err).Errorf("Error encountered while parsing epoch %s", epochStr)
continue
}
if primitives.Epoch(epoch) > highestEpochToPrune {
continue
}
if err := ps.fs.RemoveAll(epochDir); err != nil {
log.WithError(err).Error("Error encountered while removing epoch directory during proof prune")
continue
}
}
}
ps.mu.Lock()
defer ps.mu.Unlock()
clear(ps.muChans)
}
// fileMutexChan returns the file mutex and channel for a given block root.
func (ps *ProofStorage) fileMutexChan(root [fieldparams.RootLength]byte) (*sync.RWMutex, chan []*ethpb.ExecutionProof) {
ps.mu.Lock()
defer ps.mu.Unlock()
mc, ok := ps.muChans[root]
if !ok {
mc = &proofMuChan{
mu: new(sync.RWMutex),
toStore: make(chan []*ethpb.ExecutionProof, 1),
}
ps.muChans[root] = mc
return mc.mu, mc.toStore
}
return mc.mu, mc.toStore
}
// pullProofChan pulls proofs from the input channel until it is empty.
func pullProofChan(inputProofs chan []*ethpb.ExecutionProof) []*ethpb.ExecutionProof {
proofs := make([]*ethpb.ExecutionProof, 0, maxProofTypes)
for {
select {
case batch := <-inputProofs:
proofs = append(proofs, batch...)
default:
return proofs
}
}
}
// proofFilePath builds the file path in database for a given root and epoch.
func proofFilePath(root [fieldparams.RootLength]byte, epoch primitives.Epoch) string {
return path.Join(
fmt.Sprintf("%d", proofPeriod(epoch)),
fmt.Sprintf("%d", epoch),
fmt.Sprintf("%#x.%s", root, proofsFileExtension),
)
}
// extractProofFileMetadata extracts the metadata from a proof file path.
func extractProofFileMetadata(path string) (*proofFileMetadata, error) {
// Use filepath.Separator to handle both Windows (\) and Unix (/) path separators
parts := strings.Split(path, string(filepath.Separator))
if len(parts) != 3 {
return nil, fmt.Errorf("unexpected proof file %s", path)
}
period, err := strconv.ParseUint(parts[0], 10, 64)
if err != nil {
return nil, fmt.Errorf("failed to parse period from %s: %w", path, err)
}
epoch, err := strconv.ParseUint(parts[1], 10, 64)
if err != nil {
return nil, fmt.Errorf("failed to parse epoch from %s: %w", path, err)
}
partsRoot := strings.Split(parts[2], ".")
if len(partsRoot) != 2 {
return nil, fmt.Errorf("failed to parse root from %s", path)
}
blockRootString := partsRoot[0]
if len(blockRootString) != 2+2*fieldparams.RootLength || blockRootString[:2] != "0x" {
return nil, fmt.Errorf("unexpected proof file name %s", path)
}
if partsRoot[1] != proofsFileExtension {
return nil, fmt.Errorf("unexpected extension %s", path)
}
blockRootSlice, err := hex.DecodeString(blockRootString[2:])
if err != nil {
return nil, fmt.Errorf("decode string from %s: %w", path, err)
}
var blockRoot [fieldparams.RootLength]byte
copy(blockRoot[:], blockRootSlice)
result := &proofFileMetadata{period: period, epoch: primitives.Epoch(epoch), blockRoot: blockRoot}
return result, nil
}
// proofPeriod computes the period of a given epoch.
func proofPeriod(epoch primitives.Epoch) uint64 {
return uint64(epoch / params.BeaconConfig().MinEpochsForDataColumnSidecarsRequest)
}
// encodeOffsetTable encodes the offset table to bytes.
func encodeOffsetTable(table proofOffsetTable) []byte {
result := make([]byte, proofOffsetTableSize)
for i, entry := range table {
offset := i * proofSlotSize
binary.BigEndian.PutUint32(result[offset:offset+proofOffsetSize], entry.offset)
binary.BigEndian.PutUint32(result[offset+proofOffsetSize:offset+proofSlotSize], entry.size)
}
return result
}

View File

@@ -0,0 +1,206 @@
package filesystem
import (
"slices"
"sync"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
)
// ProofStorageSummary represents cached information about the proofs on disk for each root the cache knows about.
type ProofStorageSummary struct {
epoch primitives.Epoch
proofIDs map[uint64]bool
}
// HasProof returns true if the proof with the given proofID is available in the filesystem.
func (s ProofStorageSummary) HasProof(proofID uint64) bool {
if s.proofIDs == nil {
return false
}
_, ok := s.proofIDs[proofID]
return ok
}
// Count returns the number of available proofs.
func (s ProofStorageSummary) Count() int {
return len(s.proofIDs)
}
// All returns all stored proofIDs sorted in ascending order.
func (s ProofStorageSummary) All() []uint64 {
if s.proofIDs == nil {
return nil
}
ids := make([]uint64, 0, len(s.proofIDs))
for id := range s.proofIDs {
ids = append(ids, id)
}
slices.Sort(ids)
return ids
}
type proofCache struct {
mu sync.RWMutex
proofCount float64
lowestCachedEpoch primitives.Epoch
highestCachedEpoch primitives.Epoch
cache map[[fieldparams.RootLength]byte]ProofStorageSummary
}
func newProofCache() *proofCache {
return &proofCache{
cache: make(map[[fieldparams.RootLength]byte]ProofStorageSummary),
lowestCachedEpoch: params.BeaconConfig().FarFutureEpoch,
}
}
// Summary returns the ProofStorageSummary for `root`.
// The ProofStorageSummary can be used to check for the presence of proofs based on proofID.
func (pc *proofCache) Summary(root [fieldparams.RootLength]byte) ProofStorageSummary {
pc.mu.RLock()
defer pc.mu.RUnlock()
return pc.cache[root]
}
// HighestEpoch returns the highest cached epoch.
func (pc *proofCache) HighestEpoch() primitives.Epoch {
pc.mu.RLock()
defer pc.mu.RUnlock()
return pc.highestCachedEpoch
}
// set adds a proof to the cache.
func (pc *proofCache) set(ident ProofIdent) {
pc.mu.Lock()
defer pc.mu.Unlock()
summary := pc.cache[ident.BlockRoot]
if summary.proofIDs == nil {
summary.proofIDs = make(map[uint64]bool)
}
summary.epoch = ident.Epoch
if _, exists := summary.proofIDs[ident.ProofID]; exists {
pc.cache[ident.BlockRoot] = summary
return
}
summary.proofIDs[ident.ProofID] = true
pc.lowestCachedEpoch = min(pc.lowestCachedEpoch, ident.Epoch)
pc.highestCachedEpoch = max(pc.highestCachedEpoch, ident.Epoch)
pc.cache[ident.BlockRoot] = summary
pc.proofCount++
proofDiskCount.Set(pc.proofCount)
proofWrittenCounter.Inc()
}
// setMultiple adds multiple proofs to the cache.
func (pc *proofCache) setMultiple(ident ProofsIdent) {
pc.mu.Lock()
defer pc.mu.Unlock()
summary := pc.cache[ident.BlockRoot]
if summary.proofIDs == nil {
summary.proofIDs = make(map[uint64]bool)
}
summary.epoch = ident.Epoch
addedCount := 0
for _, proofID := range ident.ProofIDs {
if _, exists := summary.proofIDs[proofID]; exists {
continue
}
summary.proofIDs[proofID] = true
addedCount++
}
if addedCount == 0 {
pc.cache[ident.BlockRoot] = summary
return
}
pc.lowestCachedEpoch = min(pc.lowestCachedEpoch, ident.Epoch)
pc.highestCachedEpoch = max(pc.highestCachedEpoch, ident.Epoch)
pc.cache[ident.BlockRoot] = summary
pc.proofCount += float64(addedCount)
proofDiskCount.Set(pc.proofCount)
proofWrittenCounter.Add(float64(addedCount))
}
// get returns the ProofStorageSummary for the given block root.
// If the root is not in the cache, the second return value will be false.
func (pc *proofCache) get(blockRoot [fieldparams.RootLength]byte) (ProofStorageSummary, bool) {
pc.mu.RLock()
defer pc.mu.RUnlock()
summary, ok := pc.cache[blockRoot]
return summary, ok
}
// evict removes the ProofStorageSummary for the given block root from the cache.
func (pc *proofCache) evict(blockRoot [fieldparams.RootLength]byte) int {
pc.mu.Lock()
defer pc.mu.Unlock()
summary, ok := pc.cache[blockRoot]
if !ok {
return 0
}
deleted := len(summary.proofIDs)
delete(pc.cache, blockRoot)
if deleted > 0 {
pc.proofCount -= float64(deleted)
proofDiskCount.Set(pc.proofCount)
}
return deleted
}
// pruneUpTo removes all entries from the cache up to the given target epoch included.
func (pc *proofCache) pruneUpTo(targetEpoch primitives.Epoch) uint64 {
pc.mu.Lock()
defer pc.mu.Unlock()
prunedCount := uint64(0)
newLowestCachedEpoch := params.BeaconConfig().FarFutureEpoch
newHighestCachedEpoch := primitives.Epoch(0)
for blockRoot, summary := range pc.cache {
epoch := summary.epoch
if epoch > targetEpoch {
newLowestCachedEpoch = min(newLowestCachedEpoch, epoch)
newHighestCachedEpoch = max(newHighestCachedEpoch, epoch)
}
if epoch <= targetEpoch {
prunedCount += uint64(len(summary.proofIDs))
delete(pc.cache, blockRoot)
}
}
if prunedCount > 0 {
pc.lowestCachedEpoch = newLowestCachedEpoch
pc.highestCachedEpoch = newHighestCachedEpoch
pc.proofCount -= float64(prunedCount)
proofDiskCount.Set(pc.proofCount)
}
return prunedCount
}
// clear removes all entries from the cache.
func (pc *proofCache) clear() uint64 {
return pc.pruneUpTo(params.BeaconConfig().FarFutureEpoch)
}

View File

@@ -0,0 +1,407 @@
package filesystem
import (
"encoding/binary"
"os"
"testing"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/require"
"github.com/spf13/afero"
)
func createTestProof(t *testing.T, slot primitives.Slot, proofID uint64, blockRoot [32]byte) *ethpb.ExecutionProof {
t.Helper()
return &ethpb.ExecutionProof{
ProofId: primitives.ExecutionProofId(proofID),
Slot: slot,
BlockHash: make([]byte, 32),
BlockRoot: blockRoot[:],
ProofData: []byte("test proof data for proofID " + string(rune('0'+proofID))),
}
}
// assertProofsEqual compares two proofs by comparing their SSZ-encoded bytes.
func assertProofsEqual(t *testing.T, expected, actual *ethpb.ExecutionProof) {
t.Helper()
expectedSSZ, err := expected.MarshalSSZ()
require.NoError(t, err)
actualSSZ, err := actual.MarshalSSZ()
require.NoError(t, err)
require.DeepEqual(t, expectedSSZ, actualSSZ)
}
func TestNewProofStorage(t *testing.T) {
ctx := t.Context()
t.Run("No base path", func(t *testing.T) {
_, err := NewProofStorage(ctx)
require.ErrorIs(t, err, errNoProofBasePath)
})
t.Run("Nominal", func(t *testing.T) {
dir := t.TempDir()
storage, err := NewProofStorage(ctx, WithProofBasePath(dir))
require.NoError(t, err)
require.Equal(t, dir, storage.base)
})
}
func TestProofSaveAndGet(t *testing.T) {
t.Run("proof ID too large", func(t *testing.T) {
_, proofStorage := NewEphemeralProofStorageAndFs(t)
proof := &ethpb.ExecutionProof{
ProofId: primitives.ExecutionProofId(maxProofTypes), // too large
Slot: 1,
BlockHash: make([]byte, 32),
BlockRoot: make([]byte, 32),
ProofData: []byte("test"),
}
err := proofStorage.Save([]*ethpb.ExecutionProof{proof})
require.ErrorIs(t, err, errProofIDTooLarge)
})
t.Run("save empty slice", func(t *testing.T) {
_, proofStorage := NewEphemeralProofStorageAndFs(t)
err := proofStorage.Save([]*ethpb.ExecutionProof{})
require.NoError(t, err)
})
t.Run("save and get single proof", func(t *testing.T) {
_, proofStorage := NewEphemeralProofStorageAndFs(t)
blockRoot := [32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}
proof := createTestProof(t, 32, 2, blockRoot)
err := proofStorage.Save([]*ethpb.ExecutionProof{proof})
require.NoError(t, err)
// Check summary
summary := proofStorage.Summary(blockRoot)
require.Equal(t, true, summary.HasProof(2))
require.Equal(t, false, summary.HasProof(0))
require.Equal(t, false, summary.HasProof(1))
require.Equal(t, 1, summary.Count())
// Get the proof
proofs, err := proofStorage.Get(blockRoot, []uint64{2})
require.NoError(t, err)
require.Equal(t, 1, len(proofs))
assertProofsEqual(t, proof, proofs[0])
})
t.Run("save and get multiple proofs", func(t *testing.T) {
_, proofStorage := NewEphemeralProofStorageAndFs(t)
blockRoot := [32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}
// Save first proof
proof1 := createTestProof(t, 32, 0, blockRoot)
err := proofStorage.Save([]*ethpb.ExecutionProof{proof1})
require.NoError(t, err)
// Save second proof (should append to existing file)
proof2 := createTestProof(t, 32, 3, blockRoot)
err = proofStorage.Save([]*ethpb.ExecutionProof{proof2})
require.NoError(t, err)
// Save third proof
proof3 := createTestProof(t, 32, 7, blockRoot)
err = proofStorage.Save([]*ethpb.ExecutionProof{proof3})
require.NoError(t, err)
// Check summary
summary := proofStorage.Summary(blockRoot)
require.Equal(t, true, summary.HasProof(0))
require.Equal(t, false, summary.HasProof(1))
require.Equal(t, false, summary.HasProof(2))
require.Equal(t, true, summary.HasProof(3))
require.Equal(t, false, summary.HasProof(4))
require.Equal(t, false, summary.HasProof(5))
require.Equal(t, false, summary.HasProof(6))
require.Equal(t, true, summary.HasProof(7))
require.Equal(t, 3, summary.Count())
// Get all proofs
proofs, err := proofStorage.Get(blockRoot, nil)
require.NoError(t, err)
require.Equal(t, 3, len(proofs))
// Get specific proofs
proofs, err = proofStorage.Get(blockRoot, []uint64{0, 3})
require.NoError(t, err)
require.Equal(t, 2, len(proofs))
assertProofsEqual(t, proof1, proofs[0])
assertProofsEqual(t, proof2, proofs[1])
})
t.Run("duplicate proof is ignored", func(t *testing.T) {
_, proofStorage := NewEphemeralProofStorageAndFs(t)
blockRoot := [32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}
proof := createTestProof(t, 32, 2, blockRoot)
// Save first time
err := proofStorage.Save([]*ethpb.ExecutionProof{proof})
require.NoError(t, err)
// Save same proof again (should be silently ignored)
err = proofStorage.Save([]*ethpb.ExecutionProof{proof})
require.NoError(t, err)
// Check count
summary := proofStorage.Summary(blockRoot)
require.Equal(t, 1, summary.Count())
// Get the proof
proofs, err := proofStorage.Get(blockRoot, nil)
require.NoError(t, err)
require.Equal(t, 1, len(proofs))
})
t.Run("get non-existent root", func(t *testing.T) {
_, proofStorage := NewEphemeralProofStorageAndFs(t)
proofs, err := proofStorage.Get([fieldparams.RootLength]byte{1}, []uint64{0, 1, 2})
require.NoError(t, err)
require.Equal(t, 0, len(proofs))
})
t.Run("get non-existent proofIDs", func(t *testing.T) {
_, proofStorage := NewEphemeralProofStorageAndFs(t)
blockRoot := [32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}
proof := createTestProof(t, 32, 2, blockRoot)
err := proofStorage.Save([]*ethpb.ExecutionProof{proof})
require.NoError(t, err)
// Try to get proofIDs that don't exist
proofs, err := proofStorage.Get(blockRoot, []uint64{0, 1, 3, 4})
require.NoError(t, err)
require.Equal(t, 0, len(proofs))
})
}
func TestProofRemove(t *testing.T) {
t.Run("remove non-existent", func(t *testing.T) {
_, proofStorage := NewEphemeralProofStorageAndFs(t)
err := proofStorage.Remove([fieldparams.RootLength]byte{1})
require.NoError(t, err)
})
t.Run("remove existing", func(t *testing.T) {
_, proofStorage := NewEphemeralProofStorageAndFs(t)
blockRoot1 := [32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}
blockRoot2 := [32]byte{32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}
proof1 := createTestProof(t, 32, 0, blockRoot1)
proof2 := createTestProof(t, 64, 1, blockRoot2)
err := proofStorage.Save([]*ethpb.ExecutionProof{proof1})
require.NoError(t, err)
err = proofStorage.Save([]*ethpb.ExecutionProof{proof2})
require.NoError(t, err)
// Remove first proof
err = proofStorage.Remove(blockRoot1)
require.NoError(t, err)
// Check first proof is gone
summary := proofStorage.Summary(blockRoot1)
require.Equal(t, 0, summary.Count())
proofs, err := proofStorage.Get(blockRoot1, nil)
require.NoError(t, err)
require.Equal(t, 0, len(proofs))
// Check second proof still exists
summary = proofStorage.Summary(blockRoot2)
require.Equal(t, 1, summary.Count())
proofs, err = proofStorage.Get(blockRoot2, nil)
require.NoError(t, err)
require.Equal(t, 1, len(proofs))
})
}
func TestProofClear(t *testing.T) {
_, proofStorage := NewEphemeralProofStorageAndFs(t)
blockRoot1 := [32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}
blockRoot2 := [32]byte{32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}
proof1 := createTestProof(t, 32, 0, blockRoot1)
proof2 := createTestProof(t, 64, 1, blockRoot2)
err := proofStorage.Save([]*ethpb.ExecutionProof{proof1})
require.NoError(t, err)
err = proofStorage.Save([]*ethpb.ExecutionProof{proof2})
require.NoError(t, err)
// Clear all
err = proofStorage.Clear()
require.NoError(t, err)
// Check both are gone
summary := proofStorage.Summary(blockRoot1)
require.Equal(t, 0, summary.Count())
summary = proofStorage.Summary(blockRoot2)
require.Equal(t, 0, summary.Count())
}
func TestProofWarmCache(t *testing.T) {
fs, proofStorage := NewEphemeralProofStorageAndFs(t)
blockRoot1 := [32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}
blockRoot2 := [32]byte{32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}
// Save proofs
proof1a := createTestProof(t, 32, 0, blockRoot1)
proof1b := createTestProof(t, 32, 3, blockRoot1)
proof2 := createTestProof(t, 64, 5, blockRoot2)
err := proofStorage.Save([]*ethpb.ExecutionProof{proof1a})
require.NoError(t, err)
err = proofStorage.Save([]*ethpb.ExecutionProof{proof1b})
require.NoError(t, err)
err = proofStorage.Save([]*ethpb.ExecutionProof{proof2})
require.NoError(t, err)
// Verify files exist
files, err := afero.ReadDir(fs, "0/1")
require.NoError(t, err)
require.Equal(t, 1, len(files))
files, err = afero.ReadDir(fs, "0/2")
require.NoError(t, err)
require.Equal(t, 1, len(files))
// Create a new storage with the same filesystem
proofStorage2 := NewEphemeralProofStorageUsingFs(t, fs)
// Before warm cache, cache should be empty
summary := proofStorage2.Summary(blockRoot1)
require.Equal(t, 0, summary.Count())
// Warm cache
proofStorage2.WarmCache()
// After warm cache, cache should be populated
summary = proofStorage2.Summary(blockRoot1)
require.Equal(t, 2, summary.Count())
require.Equal(t, true, summary.HasProof(0))
require.Equal(t, true, summary.HasProof(3))
summary = proofStorage2.Summary(blockRoot2)
require.Equal(t, 1, summary.Count())
require.Equal(t, true, summary.HasProof(5))
}
func TestProofSubscribe(t *testing.T) {
_, proofStorage := NewEphemeralProofStorageAndFs(t)
sub, ch := proofStorage.Subscribe()
defer sub.Unsubscribe()
blockRoot := [32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}
proof := createTestProof(t, 32, 2, blockRoot)
err := proofStorage.Save([]*ethpb.ExecutionProof{proof})
require.NoError(t, err)
// Should receive notification
ident := <-ch
require.Equal(t, blockRoot, ident.BlockRoot)
require.DeepEqual(t, []uint64{2}, ident.ProofIDs)
require.Equal(t, primitives.Epoch(1), ident.Epoch)
}
func TestProofReadHeader(t *testing.T) {
t.Run("wrong version", func(t *testing.T) {
_, proofStorage := NewEphemeralProofStorageAndFs(t)
blockRoot := [32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}
proof := createTestProof(t, 32, 0, blockRoot)
err := proofStorage.Save([]*ethpb.ExecutionProof{proof})
require.NoError(t, err)
// Get the file path
filePath := proofFilePath(blockRoot, 1)
// Alter the version
file, err := proofStorage.fs.OpenFile(filePath, os.O_RDWR, os.FileMode(0600))
require.NoError(t, err)
_, err = file.Write([]byte{42}) // wrong version
require.NoError(t, err)
// Try to read header
_, _, err = proofStorage.readHeader(file)
require.ErrorIs(t, err, errWrongProofVersion)
err = file.Close()
require.NoError(t, err)
})
}
func TestEncodeOffsetTable(t *testing.T) {
var table proofOffsetTable
table[0] = proofSlotEntry{offset: 0, size: 100}
table[3] = proofSlotEntry{offset: 100, size: 200}
table[7] = proofSlotEntry{offset: 300, size: 300}
encoded := encodeOffsetTable(table)
require.Equal(t, proofOffsetTableSize, len(encoded))
// Decode manually and verify
var decoded proofOffsetTable
for i := range decoded {
pos := i * proofSlotSize
decoded[i].offset = binary.BigEndian.Uint32(encoded[pos : pos+proofOffsetSize])
decoded[i].size = binary.BigEndian.Uint32(encoded[pos+proofOffsetSize : pos+proofSlotSize])
}
require.Equal(t, table, decoded)
}
func TestProofFilePath(t *testing.T) {
blockRoot := [32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}
epoch := primitives.Epoch(100)
path := proofFilePath(blockRoot, epoch)
require.Equal(t, "0/100/0x0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20.sszs", path)
}
func TestExtractProofFileMetadata(t *testing.T) {
t.Run("valid path", func(t *testing.T) {
path := "0/100/0x0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20.sszs"
metadata, err := extractProofFileMetadata(path)
require.NoError(t, err)
expectedRoot := [32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}
require.Equal(t, uint64(0), metadata.period)
require.Equal(t, primitives.Epoch(100), metadata.epoch)
require.Equal(t, expectedRoot, metadata.blockRoot)
})
t.Run("invalid path - wrong number of parts", func(t *testing.T) {
_, err := extractProofFileMetadata("invalid/path.sszs")
require.ErrorContains(t, "unexpected proof file", err)
})
t.Run("invalid path - wrong extension", func(t *testing.T) {
_, err := extractProofFileMetadata("0/100/0x0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20.txt")
require.ErrorContains(t, "unexpected extension", err)
})
}

View File

@@ -124,6 +124,8 @@ type BeaconNode struct {
BlobStorageOptions []filesystem.BlobStorageOption
DataColumnStorage *filesystem.DataColumnStorage
DataColumnStorageOptions []filesystem.DataColumnStorageOption
ProofStorage *filesystem.ProofStorage
ProofStorageOptions []filesystem.ProofStorageOption
verifyInitWaiter *verification.InitializerWaiter
lhsp *verification.LazyHeadStateProvider
syncChecker *initialsync.SyncChecker
@@ -228,6 +230,15 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
return nil, errors.Wrap(err, "could not clear data column storage")
}
if beacon.ProofStorage == nil {
proofStorage, err := filesystem.NewProofStorage(cliCtx.Context, beacon.ProofStorageOptions...)
if err != nil {
return nil, errors.Wrap(err, "new proof storage")
}
beacon.ProofStorage = proofStorage
}
bfs, err := startBaseServices(cliCtx, beacon, depositAddress, dbClearer)
if err != nil {
return nil, errors.Wrap(err, "could not start modules")
@@ -747,11 +758,13 @@ func (b *BeaconNode) registerBlockchainService(fc forkchoice.ForkChoicer, gs *st
blockchain.WithSyncComplete(syncComplete),
blockchain.WithBlobStorage(b.BlobStorage),
blockchain.WithDataColumnStorage(b.DataColumnStorage),
blockchain.WithProofStorage(b.ProofStorage),
blockchain.WithTrackedValidatorsCache(b.trackedValidatorsCache),
blockchain.WithPayloadIDCache(b.payloadIDCache),
blockchain.WithSyncChecker(b.syncChecker),
blockchain.WithSlasherEnabled(b.slasherEnabled),
blockchain.WithLightClientStore(b.lcStore),
blockchain.WithOperationNotifier(b),
)
blockchainService, err := blockchain.NewService(b.ctx, opts...)
@@ -836,6 +849,7 @@ func (b *BeaconNode) registerSyncService(initialSyncComplete chan struct{}, bFil
regularsync.WithStateNotifier(b),
regularsync.WithBlobStorage(b.BlobStorage),
regularsync.WithDataColumnStorage(b.DataColumnStorage),
regularsync.WithExecutionProofStorage(b.ProofStorage),
regularsync.WithVerifierWaiter(b.verifyInitWaiter),
regularsync.WithAvailableBlocker(bFillStore),
regularsync.WithTrackedValidatorsCache(b.trackedValidatorsCache),
@@ -962,6 +976,7 @@ func (b *BeaconNode) registerRPCService(router *http.ServeMux) error {
BlockReceiver: chainService,
BlobReceiver: chainService,
DataColumnReceiver: chainService,
ProofReceiver: chainService,
AttestationReceiver: chainService,
GenesisTimeFetcher: chainService,
GenesisFetcher: chainService,

View File

@@ -35,6 +35,13 @@ func WithBuilderFlagOptions(opts []builder.Option) Option {
}
}
func WithConfigOptions(opt ...params.Option) Option {
return func(bn *BeaconNode) error {
bn.ConfigOptions = append(bn.ConfigOptions, opt...)
return nil
}
}
// WithBlobStorage sets the BlobStorage backend for the BeaconNode
func WithBlobStorage(bs *filesystem.BlobStorage) Option {
return func(bn *BeaconNode) error {
@@ -52,13 +59,6 @@ func WithBlobStorageOptions(opt ...filesystem.BlobStorageOption) Option {
}
}
func WithConfigOptions(opt ...params.Option) Option {
return func(bn *BeaconNode) error {
bn.ConfigOptions = append(bn.ConfigOptions, opt...)
return nil
}
}
// WithDataColumnStorage sets the DataColumnStorage backend for the BeaconNode
func WithDataColumnStorage(bs *filesystem.DataColumnStorage) Option {
return func(bn *BeaconNode) error {
@@ -75,3 +75,20 @@ func WithDataColumnStorageOptions(opt ...filesystem.DataColumnStorageOption) Opt
return nil
}
}
// WithDataColumnStorage sets the DataColumnStorage backend for the BeaconNode
func WithProofStorage(bs *filesystem.ProofStorage) Option {
return func(bn *BeaconNode) error {
bn.ProofStorage = bs
return nil
}
}
// WithDataColumnStorageOptions appends 1 or more filesystem.DataColumnStorageOption on the beacon node,
// to be used when initializing data column storage.
func WithProofStorageOption(opt ...filesystem.ProofStorageOption) Option {
return func(bn *BeaconNode) error {
bn.ProofStorageOptions = append(bn.ProofStorageOptions, opt...)
return nil
}
}

View File

@@ -165,6 +165,7 @@ go_test(
"//beacon-chain/startup:go_default_library",
"//beacon-chain/state/stategen/mock:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",

View File

@@ -589,6 +589,11 @@ func (s *Service) createLocalNode(
localNode.Set(quicEntry)
}
if features.Get().EnableZkvm {
zkvmKeyEntry := enr.WithEntry(zkvmEnabledKeyEnrKey, true)
localNode.Set(zkvmKeyEntry)
}
localNode.SetFallbackIP(ipAddr)
localNode.SetFallbackUDP(udpPort)

View File

@@ -25,6 +25,7 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers/scorers"
testp2p "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/startup"
"github.com/OffchainLabs/prysm/v7/config/features"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/wrapper"
leakybucket "github.com/OffchainLabs/prysm/v7/container/leaky-bucket"
@@ -243,12 +244,19 @@ func TestCreateLocalNode(t *testing.T) {
name string
cfg *Config
expectedError bool
zkvmEnabled bool
}{
{
name: "valid config",
cfg: &Config{},
expectedError: false,
},
{
name: "valid config with zkVM enabled",
cfg: &Config{},
expectedError: false,
zkvmEnabled: true,
},
{
name: "invalid host address",
cfg: &Config{HostAddress: "invalid"},
@@ -273,6 +281,15 @@ func TestCreateLocalNode(t *testing.T) {
for _, tt := range testCases {
t.Run(tt.name, func(t *testing.T) {
if tt.zkvmEnabled {
resetCfg := features.InitWithReset(&features.Flags{
EnableZkvm: true,
})
t.Cleanup(func() {
resetCfg()
})
}
// Define ports. Use unique ports since this test validates ENR content.
const (
udpPort = 3100
@@ -348,6 +365,14 @@ func TestCreateLocalNode(t *testing.T) {
custodyGroupCount := new(uint64)
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(params.BeaconNetworkConfig().CustodyGroupCountKey, custodyGroupCount)))
require.Equal(t, custodyRequirement, *custodyGroupCount)
// Check zkVM enabled key if applicable.
if tt.zkvmEnabled {
zkvmEnabled := new(bool)
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(params.BeaconNetworkConfig().ZkvmEnabledKey, zkvmEnabled)))
require.Equal(t, features.Get().EnableZkvm, *zkvmEnabled)
}
})
}
}

View File

@@ -52,6 +52,9 @@ const (
// lightClientFinalityUpdateWeight specifies the scoring weight that we apply to
// our light client finality update topic.
lightClientFinalityUpdateWeight = 0.05
// executionProofWeight specifies the scoring weight that we apply to
// our execution proof topic.
executionProofWeight = 0.05
// maxInMeshScore describes the max score a peer can attain from being in the mesh.
maxInMeshScore = 10
@@ -145,6 +148,8 @@ func (s *Service) topicScoreParams(topic string) (*pubsub.TopicScoreParams, erro
return defaultLightClientOptimisticUpdateTopicParams(), nil
case strings.Contains(topic, GossipLightClientFinalityUpdateMessage):
return defaultLightClientFinalityUpdateTopicParams(), nil
case strings.Contains(topic, GossipExecutionProofMessage):
return defaultExecutionProofTopicParams(), nil
default:
return nil, errors.Errorf("unrecognized topic provided for parameter registration: %s", topic)
}
@@ -510,6 +515,28 @@ func defaultBlsToExecutionChangeTopicParams() *pubsub.TopicScoreParams {
}
}
func defaultExecutionProofTopicParams() *pubsub.TopicScoreParams {
return &pubsub.TopicScoreParams{
TopicWeight: executionProofWeight,
TimeInMeshWeight: maxInMeshScore / inMeshCap(),
TimeInMeshQuantum: inMeshTime(),
TimeInMeshCap: inMeshCap(),
FirstMessageDeliveriesWeight: 2,
FirstMessageDeliveriesDecay: scoreDecay(oneHundredEpochs),
FirstMessageDeliveriesCap: 5,
MeshMessageDeliveriesWeight: 0,
MeshMessageDeliveriesDecay: 0,
MeshMessageDeliveriesCap: 0,
MeshMessageDeliveriesThreshold: 0,
MeshMessageDeliveriesWindow: 0,
MeshMessageDeliveriesActivation: 0,
MeshFailurePenaltyWeight: 0,
MeshFailurePenaltyDecay: 0,
InvalidMessageDeliveriesWeight: -2000,
InvalidMessageDeliveriesDecay: scoreDecay(invalidDecayPeriod),
}
}
func defaultLightClientOptimisticUpdateTopicParams() *pubsub.TopicScoreParams {
return &pubsub.TopicScoreParams{
TopicWeight: lightClientOptimisticUpdateWeight,

View File

@@ -25,6 +25,7 @@ var gossipTopicMappings = map[string]func() proto.Message{
LightClientOptimisticUpdateTopicFormat: func() proto.Message { return &ethpb.LightClientOptimisticUpdateAltair{} },
LightClientFinalityUpdateTopicFormat: func() proto.Message { return &ethpb.LightClientFinalityUpdateAltair{} },
DataColumnSubnetTopicFormat: func() proto.Message { return &ethpb.DataColumnSidecar{} },
ExecutionProofSubnetTopicFormat: func() proto.Message { return &ethpb.ExecutionProof{} },
}
// GossipTopicMappings is a function to return the assigned data type

View File

@@ -602,6 +602,33 @@ func (p *Status) All() []peer.ID {
return pids
}
// ZkvmEnabledPeers returns all connected peers that have zkvm enabled in their ENR.
func (p *Status) ZkvmEnabledPeers() []peer.ID {
p.store.RLock()
defer p.store.RUnlock()
peers := make([]peer.ID, 0)
for pid, peerData := range p.store.Peers() {
if peerData.ConnState != Connected {
continue
}
if peerData.Enr == nil {
continue
}
var enabled bool
entry := enr.WithEntry(params.BeaconNetworkConfig().ZkvmEnabledKey, &enabled)
if err := peerData.Enr.Load(entry); err != nil {
continue
}
if enabled {
peers = append(peers, pid)
}
}
return peers
}
// Prune clears out and removes outdated and disconnected peers.
func (p *Status) Prune() {
p.store.Lock()

View File

@@ -1341,3 +1341,75 @@ func createPeer(t *testing.T, p *peers.Status, addr ma.Multiaddr,
p.SetConnectionState(id, state)
return id
}
func TestZkvmEnabledPeers(t *testing.T) {
p := peers.NewStatus(t.Context(), &peers.StatusConfig{
PeerLimit: 30,
ScorerParams: &scorers.Config{
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
Threshold: 1,
},
},
})
// Create peer 1: Connected, zkVM enabled
pid1 := addPeer(t, p, peers.Connected)
record1 := new(enr.Record)
zkvmEnabled := true
record1.Set(enr.WithEntry(params.BeaconNetworkConfig().ZkvmEnabledKey, &zkvmEnabled))
p.Add(record1, pid1, nil, network.DirOutbound)
p.SetConnectionState(pid1, peers.Connected)
// Create peer 2: Connected, zkVM disabled
pid2 := addPeer(t, p, peers.Connected)
record2 := new(enr.Record)
zkvmDisabled := false
record2.Set(enr.WithEntry(params.BeaconNetworkConfig().ZkvmEnabledKey, &zkvmDisabled))
p.Add(record2, pid2, nil, network.DirOutbound)
p.SetConnectionState(pid2, peers.Connected)
// Create peer 3: Connected, zkVM enabled
pid3 := addPeer(t, p, peers.Connected)
record3 := new(enr.Record)
record3.Set(enr.WithEntry(params.BeaconNetworkConfig().ZkvmEnabledKey, &zkvmEnabled))
p.Add(record3, pid3, nil, network.DirOutbound)
p.SetConnectionState(pid3, peers.Connected)
// Create peer 4: Disconnected, zkVM enabled (should not be included)
pid4 := addPeer(t, p, peers.Disconnected)
record4 := new(enr.Record)
record4.Set(enr.WithEntry(params.BeaconNetworkConfig().ZkvmEnabledKey, &zkvmEnabled))
p.Add(record4, pid4, nil, network.DirOutbound)
p.SetConnectionState(pid4, peers.Disconnected)
// Create peer 5: Connected, no ENR (should not be included)
pid5 := addPeer(t, p, peers.Connected)
p.Add(nil, pid5, nil, network.DirOutbound)
p.SetConnectionState(pid5, peers.Connected)
// Create peer 6: Connected, no zkVM key in ENR (should not be included)
pid6 := addPeer(t, p, peers.Connected)
record6 := new(enr.Record)
record6.Set(enr.WithEntry("other_key", "other_value"))
p.Add(record6, pid6, nil, network.DirOutbound)
p.SetConnectionState(pid6, peers.Connected)
// Get zkVM enabled peers
zkvmPeers := p.ZkvmEnabledPeers()
// Should return only pid1 and pid3 (connected peers with zkVM enabled)
assert.Equal(t, 2, len(zkvmPeers), "Expected 2 zkVM enabled peers")
// Verify the returned peers are correct
zkvmPeerMap := make(map[peer.ID]bool)
for _, pid := range zkvmPeers {
zkvmPeerMap[pid] = true
}
assert.Equal(t, true, zkvmPeerMap[pid1], "pid1 should be in zkVM enabled peers")
assert.Equal(t, true, zkvmPeerMap[pid3], "pid3 should be in zkVM enabled peers")
assert.Equal(t, false, zkvmPeerMap[pid2], "pid2 should not be in zkVM enabled peers (disabled)")
assert.Equal(t, false, zkvmPeerMap[pid4], "pid4 should not be in zkVM enabled peers (disconnected)")
assert.Equal(t, false, zkvmPeerMap[pid5], "pid5 should not be in zkVM enabled peers (no ENR)")
assert.Equal(t, false, zkvmPeerMap[pid6], "pid6 should not be in zkVM enabled peers (no zkVM key)")
}

View File

@@ -67,6 +67,9 @@ const (
// DataColumnSidecarsByRangeName is the name for the DataColumnSidecarsByRange v1 message topic.
DataColumnSidecarsByRangeName = "/data_column_sidecars_by_range"
// ExecutionProofsByRootName is the name for the ExecutionProofsByRoot v1 message topic.
ExecutionProofsByRootName = "/execution_proofs_by_root"
)
const (
@@ -106,6 +109,9 @@ const (
// RPCDataColumnSidecarsByRangeTopicV1 is a topic for requesting data column sidecars by their slot.
// /eth2/beacon_chain/req/data_column_sidecars_by_range/1 - New in Fulu.
RPCDataColumnSidecarsByRangeTopicV1 = protocolPrefix + DataColumnSidecarsByRangeName + SchemaVersionV1
// RPCExecutionProofsByRootTopicV1 is a topic for requesting execution proofs by their block root.
// /eth2/beacon_chain/req/execution_proofs_by_root/1 - New in Fulu.
RPCExecutionProofsByRootTopicV1 = protocolPrefix + ExecutionProofsByRootName + SchemaVersionV1
// V2 RPC Topics
// RPCStatusTopicV2 defines the v1 topic for the status rpc method.
@@ -170,6 +176,9 @@ var (
// DataColumnSidecarsByRoot v1 Message
RPCDataColumnSidecarsByRootTopicV1: p2ptypes.DataColumnsByRootIdentifiers{},
// ExecutionProofsByRoot v1 Message
RPCExecutionProofsByRootTopicV1: new(pb.ExecutionProofsByRootRequest),
}
// Maps all registered protocol prefixes.
@@ -193,6 +202,7 @@ var (
LightClientOptimisticUpdateName: true,
DataColumnSidecarsByRootName: true,
DataColumnSidecarsByRangeName: true,
ExecutionProofsByRootName: true,
}
// Maps all the RPC messages which are to updated in altair.

View File

@@ -36,6 +36,7 @@ var (
attSubnetEnrKey = params.BeaconNetworkConfig().AttSubnetKey
syncCommsSubnetEnrKey = params.BeaconNetworkConfig().SyncCommsSubnetKey
custodyGroupCountEnrKey = params.BeaconNetworkConfig().CustodyGroupCountKey
zkvmEnabledKeyEnrKey = params.BeaconNetworkConfig().ZkvmEnabledKey
)
// The value used with the subnet, in order

View File

@@ -46,6 +46,8 @@ const (
GossipLightClientOptimisticUpdateMessage = "light_client_optimistic_update"
// GossipDataColumnSidecarMessage is the name for the data column sidecar message type.
GossipDataColumnSidecarMessage = "data_column_sidecar"
// GossipExecutionProofMessage is the name for the execution proof message type.
GossipExecutionProofMessage = "execution_proof"
// Topic Formats
//
@@ -75,6 +77,8 @@ const (
LightClientOptimisticUpdateTopicFormat = GossipProtocolAndDigest + GossipLightClientOptimisticUpdateMessage
// DataColumnSubnetTopicFormat is the topic format for the data column subnet.
DataColumnSubnetTopicFormat = GossipProtocolAndDigest + GossipDataColumnSidecarMessage + "_%d"
// ExecutionProofSubnetTopicFormat is the topic format for the execution proof subnet.
ExecutionProofSubnetTopicFormat = GossipProtocolAndDigest + GossipExecutionProofMessage // + "_%d" (PoC only have one global topic)
)
// topic is a struct representing a single gossipsub topic.
@@ -158,6 +162,7 @@ func (s *Service) allTopics() []topic {
newTopic(altair, future, empty, GossipLightClientOptimisticUpdateMessage),
newTopic(altair, future, empty, GossipLightClientFinalityUpdateMessage),
newTopic(capella, future, empty, GossipBlsToExecutionChangeMessage),
newTopic(fulu, future, empty, GossipExecutionProofMessage),
}
last := params.GetNetworkScheduleEntry(genesis)
schedule := []params.NetworkScheduleEntry{last}

View File

@@ -169,6 +169,11 @@ func TestGetSpec(t *testing.T) {
config.BlobsidecarSubnetCountElectra = 102
config.SyncMessageDueBPS = 103
// EIP-8025
config.MaxProofDataBytes = 200
config.MinEpochsForExecutionProofRequests = 201
config.MinProofsRequired = 202
var dbp [4]byte
copy(dbp[:], []byte{'0', '0', '0', '1'})
config.DomainBeaconProposer = dbp
@@ -205,7 +210,7 @@ func TestGetSpec(t *testing.T) {
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp))
data, ok := resp.Data.(map[string]any)
require.Equal(t, true, ok)
assert.Equal(t, 175, len(data))
assert.Equal(t, 178, len(data))
for k, v := range data {
t.Run(k, func(t *testing.T) {
switch k {
@@ -577,6 +582,12 @@ func TestGetSpec(t *testing.T) {
assert.Equal(t, "102", v)
case "SYNC_MESSAGE_DUE_BPS":
assert.Equal(t, "103", v)
case "MAX_PROOF_DATA_BYTES":
assert.Equal(t, "200", v)
case "MIN_EPOCHS_FOR_EXECUTION_PROOF_REQUESTS":
assert.Equal(t, "201", v)
case "MIN_PROOFS_REQUIRED":
assert.Equal(t, "202", v)
case "BLOB_SCHEDULE":
blobSchedule, ok := v.([]any)
assert.Equal(t, true, ok)

View File

@@ -42,6 +42,7 @@ go_library(
"//beacon-chain/blockchain/kzg:go_default_library",
"//beacon-chain/builder:go_default_library",
"//beacon-chain/cache:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
"//beacon-chain/cache/depositsnapshot:go_default_library",
"//beacon-chain/core/blocks:go_default_library",
"//beacon-chain/core/electra:go_default_library",

View File

@@ -19,6 +19,7 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/kv"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
@@ -321,38 +322,91 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
return nil, status.Errorf(codes.Internal, "%s: %v", "handle block failed", err)
}
var wg sync.WaitGroup
errChan := make(chan error, 1)
var wg errgroup.Group
blockBroadcastDone := make(chan bool)
wg.Add(1)
go func() {
if err := vs.broadcastReceiveBlock(ctx, &wg, block, root); err != nil {
errChan <- errors.Wrap(err, "broadcast/receive block failed")
return
wg.Go(func() error {
if err := vs.broadcastReceiveBlock(ctx, blockBroadcastDone, block, root); err != nil {
return fmt.Errorf("broadcast receive block: %w", err)
}
errChan <- nil
}()
wg.Wait()
return nil
})
if err := vs.broadcastAndReceiveSidecars(ctx, block, root, blobSidecars, dataColumnSidecars); err != nil {
return nil, status.Errorf(codes.Internal, "Could not broadcast/receive sidecars: %v", err)
}
if err := <-errChan; err != nil {
return nil, status.Errorf(codes.Internal, "Could not broadcast/receive block: %v", err)
wg.Go(func() error {
if err := vs.broadcastAndReceiveSidecars(ctx, blockBroadcastDone, block, root, blobSidecars, dataColumnSidecars); err != nil {
return fmt.Errorf("broadcast and receive sidecars: %w", err)
}
return nil
})
if err := wg.Wait(); err != nil {
return nil, status.Errorf(codes.Internal, "Could not broadcast/receive block/sidecars: %v", err)
}
// Generate and broadcast execution proofs.
go vs.generateAndBroadcastExecutionProofs(ctx, rob)
return &ethpb.ProposeResponse{BlockRoot: root[:]}, nil
}
// TODO: This is a duplicate from the same function in the sync package.
func (vs *Server) generateAndBroadcastExecutionProofs(ctx context.Context, roBlock blocks.ROBlock) {
const delay = 2 * time.Second
proofTypes := flags.Get().ProofGenerationTypes
if len(proofTypes) == 0 {
return
}
var wg errgroup.Group
for _, proofType := range proofTypes {
wg.Go(func() error {
execProof, err := generateExecProof(roBlock, primitives.ExecutionProofId(proofType), delay)
if err != nil {
return fmt.Errorf("generate exec proof: %w", err)
}
if err := vs.P2P.Broadcast(ctx, execProof); err != nil {
return fmt.Errorf("broadcast exec proof: %w", err)
}
// Save the proof to storage.
if vs.ProofReceiver != nil {
if err := vs.ProofReceiver.ReceiveProof(execProof); err != nil {
return fmt.Errorf("receive proof: %w", err)
}
}
return nil
})
}
if err := wg.Wait(); err != nil {
log.WithError(err).Error("Failed to generate and broadcast execution proofs")
}
log.WithFields(logrus.Fields{
"root": fmt.Sprintf("%#x", roBlock.Root()),
"slot": roBlock.Block().Slot(),
"count": len(proofTypes),
}).Debug("Generated and broadcasted execution proofs")
}
// broadcastAndReceiveSidecars broadcasts and receives sidecars.
func (vs *Server) broadcastAndReceiveSidecars(
ctx context.Context,
blockBroadcastDone <-chan bool,
block interfaces.SignedBeaconBlock,
root [fieldparams.RootLength]byte,
blobSidecars []*ethpb.BlobSidecar,
dataColumnSidecars []blocks.RODataColumn,
) error {
// Wait for block broadcast to complete before broadcasting sidecars.
<-blockBroadcastDone
if block.Version() >= version.Fulu {
if err := vs.broadcastAndReceiveDataColumns(ctx, dataColumnSidecars); err != nil {
return errors.Wrap(err, "broadcast and receive data columns")
@@ -434,11 +488,14 @@ func (vs *Server) handleUnblindedBlock(
}
// broadcastReceiveBlock broadcasts a block and handles its reception.
func (vs *Server) broadcastReceiveBlock(ctx context.Context, wg *sync.WaitGroup, block interfaces.SignedBeaconBlock, root [fieldparams.RootLength]byte) error {
if err := vs.broadcastBlock(ctx, wg, block, root); err != nil {
// It closes the blockBroadcastDone channel once broadcasting is complete (but before receiving the block).
func (vs *Server) broadcastReceiveBlock(ctx context.Context, blockBroadcastDone chan<- bool, block interfaces.SignedBeaconBlock, root [fieldparams.RootLength]byte) error {
if err := vs.broadcastBlock(ctx, block, root); err != nil {
return errors.Wrap(err, "broadcast block")
}
close(blockBroadcastDone)
vs.BlockNotifier.BlockFeed().Send(&feed.Event{
Type: blockfeed.ReceivedBlock,
Data: &blockfeed.ReceivedBlockData{SignedBlock: block},
@@ -451,9 +508,7 @@ func (vs *Server) broadcastReceiveBlock(ctx context.Context, wg *sync.WaitGroup,
return nil
}
func (vs *Server) broadcastBlock(ctx context.Context, wg *sync.WaitGroup, block interfaces.SignedBeaconBlock, root [fieldparams.RootLength]byte) error {
defer wg.Done()
func (vs *Server) broadcastBlock(ctx context.Context, block interfaces.SignedBeaconBlock, root [fieldparams.RootLength]byte) error {
protoBlock, err := block.Proto()
if err != nil {
return errors.Wrap(err, "protobuf conversion failed")
@@ -650,3 +705,57 @@ func blobsAndProofs(req *ethpb.GenericSignedBeaconBlock) ([][]byte, [][]byte, er
return nil, nil, errors.Errorf("unknown request type provided: %T", req)
}
}
// generateExecProof returns a dummy execution proof after the specified delay.
// TODO: This is a duplicate from the same function in the sync package.
func generateExecProof(roBlock blocks.ROBlock, proofID primitives.ExecutionProofId, delay time.Duration) (*ethpb.ExecutionProof, error) {
// Simulate proof generation work
time.Sleep(delay)
// Create a dummy proof with some deterministic data
block := roBlock.Block()
if block == nil {
return nil, errors.New("nil block")
}
body := block.Body()
if body == nil {
return nil, errors.New("nil block body")
}
executionData, err := body.Execution()
if err != nil {
return nil, fmt.Errorf("execution: %w", err)
}
if executionData == nil {
return nil, errors.New("nil execution data")
}
hash, err := executionData.HashTreeRoot()
if err != nil {
return nil, fmt.Errorf("hash tree root: %w", err)
}
proofData := []byte{
0xFF, // Magic byte for dummy proof
byte(proofID),
// Include some payload hash bytes
hash[0],
hash[1],
hash[2],
hash[3],
}
blockRoot := roBlock.Root()
proof := &ethpb.ExecutionProof{
ProofId: proofID,
Slot: block.Slot(),
BlockHash: hash[:],
BlockRoot: blockRoot[:],
ProofData: proofData,
}
return proof, nil
}

View File

@@ -70,6 +70,7 @@ type Server struct {
BlockReceiver blockchain.BlockReceiver
BlobReceiver blockchain.BlobReceiver
DataColumnReceiver blockchain.DataColumnReceiver
ProofReceiver blockchain.ProofReceiver
MockEth1Votes bool
Eth1BlockFetcher execution.POWBlockFetcher
PendingDepositsFetcher depositsnapshot.PendingDepositsFetcher

View File

@@ -90,6 +90,7 @@ type Config struct {
BlockReceiver blockchain.BlockReceiver
BlobReceiver blockchain.BlobReceiver
DataColumnReceiver blockchain.DataColumnReceiver
ProofReceiver blockchain.ProofReceiver
ExecutionChainService execution.Chain
ChainStartFetcher execution.ChainStartFetcher
ExecutionChainInfoFetcher execution.ChainInfoFetcher
@@ -240,6 +241,7 @@ func NewService(ctx context.Context, cfg *Config) *Service {
BlockReceiver: s.cfg.BlockReceiver,
BlobReceiver: s.cfg.BlobReceiver,
DataColumnReceiver: s.cfg.DataColumnReceiver,
ProofReceiver: s.cfg.ProofReceiver,
MockEth1Votes: s.cfg.MockEth1Votes,
Eth1BlockFetcher: s.cfg.ExecutionChainService,
PendingDepositsFetcher: s.cfg.PendingDepositFetcher,

View File

@@ -14,6 +14,7 @@ go_library(
"decode_pubsub.go",
"doc.go",
"error.go",
"exec_proofs.go",
"fork_watcher.go",
"fuzz_exports.go", # keep
"log.go",
@@ -31,6 +32,7 @@ go_library(
"rpc_chunked_response.go",
"rpc_data_column_sidecars_by_range.go",
"rpc_data_column_sidecars_by_root.go",
"rpc_execution_proofs_by_root_topic.go",
"rpc_goodbye.go",
"rpc_light_client.go",
"rpc_metadata.go",
@@ -46,6 +48,7 @@ go_library(
"subscriber_blob_sidecar.go",
"subscriber_bls_to_execution_change.go",
"subscriber_data_column_sidecar.go",
"subscriber_execution_proofs.go",
"subscriber_handlers.go",
"subscriber_sync_committee_message.go",
"subscriber_sync_contribution_proof.go",
@@ -57,6 +60,7 @@ go_library(
"validate_blob.go",
"validate_bls_to_execution_change.go",
"validate_data_column.go",
"validate_execution_proof.go",
"validate_light_client.go",
"validate_proposer_slashing.go",
"validate_sync_committee_message.go",

View File

@@ -0,0 +1,65 @@
package sync
import (
"fmt"
"time"
"errors"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
)
// generateExecProof returns a dummy execution proof after the specified delay.
func generateExecProof(roBlock blocks.ROBlock, proofID primitives.ExecutionProofId, delay time.Duration) (*ethpb.ExecutionProof, error) {
// Simulate proof generation work
time.Sleep(delay)
// Create a dummy proof with some deterministic data
block := roBlock.Block()
if block == nil {
return nil, errors.New("nil block")
}
body := block.Body()
if body == nil {
return nil, errors.New("nil block body")
}
executionData, err := body.Execution()
if err != nil {
return nil, fmt.Errorf("execution: %w", err)
}
if executionData == nil {
return nil, errors.New("nil execution data")
}
hash, err := executionData.HashTreeRoot()
if err != nil {
return nil, fmt.Errorf("hash tree root: %w", err)
}
proofData := []byte{
0xFF, // Magic byte for dummy proof
byte(proofID),
// Include some payload hash bytes
hash[0],
hash[1],
hash[2],
hash[3],
}
blockRoot := roBlock.Root()
proof := &ethpb.ExecutionProof{
ProofId: proofID,
Slot: block.Slot(),
BlockHash: hash[:],
BlockRoot: blockRoot[:],
ProofData: proofData,
}
return proof, nil
}

View File

@@ -167,17 +167,25 @@ func WithStateNotifier(n statefeed.Notifier) Option {
}
// WithBlobStorage gives the sync package direct access to BlobStorage.
func WithBlobStorage(b *filesystem.BlobStorage) Option {
func WithBlobStorage(storage *filesystem.BlobStorage) Option {
return func(s *Service) error {
s.cfg.blobStorage = b
s.cfg.blobStorage = storage
return nil
}
}
// WithDataColumnStorage gives the sync package direct access to DataColumnStorage.
func WithDataColumnStorage(b *filesystem.DataColumnStorage) Option {
func WithDataColumnStorage(storage *filesystem.DataColumnStorage) Option {
return func(s *Service) error {
s.cfg.dataColumnStorage = b
s.cfg.dataColumnStorage = storage
return nil
}
}
// WithDataColumnStorage gives the sync package direct access to DataColumnStorage.
func WithExecutionProofStorage(storage *filesystem.ProofStorage) Option {
return func(s *Service) error {
s.cfg.proofStorage = storage
return nil
}
}

View File

@@ -259,6 +259,10 @@ func (s *Service) processBlock(ctx context.Context, b interfaces.ReadOnlySignedB
return errors.Wrap(err, "request and save missing data column sidecars")
}
if err := s.requestAndSaveMissingExecutionProofs([]blocks.ROBlock{roBlock}); err != nil {
return errors.Wrap(err, "request and save missing execution proofs")
}
return nil
}

View File

@@ -100,6 +100,10 @@ func newRateLimiter(p2pProvider p2p.P2P) *limiter {
topicMap[addEncoding(p2p.RPCDataColumnSidecarsByRootTopicV1)] = dataColumnSidecars
// DataColumnSidecarsByRangeV1
topicMap[addEncoding(p2p.RPCDataColumnSidecarsByRangeTopicV1)] = dataColumnSidecars
executionProofs := leakybucket.NewCollector(1, defaultBurstLimit, leakyBucketPeriod, false /* deleteEmptyBuckets */);
// ExecutionProofsByRootV1
topicMap[addEncoding(p2p.RPCExecutionProofsByRootTopicV1)] = executionProofs
// General topic for all rpc requests.
topicMap[rpcLimiterTopic] = leakybucket.NewCollector(5, defaultBurstLimit*2, leakyBucketPeriod, false /* deleteEmptyBuckets */)

View File

@@ -17,7 +17,7 @@ import (
func TestNewRateLimiter(t *testing.T) {
rlimiter := newRateLimiter(mockp2p.NewTestP2P(t))
assert.Equal(t, len(rlimiter.limiterMap), 20, "correct number of topics not registered")
assert.Equal(t, len(rlimiter.limiterMap), 21, "correct number of topics not registered")
}
func TestNewRateLimiter_FreeCorrectly(t *testing.T) {

View File

@@ -51,6 +51,7 @@ func (s *Service) rpcHandlerByTopicFromFork(forkIndex int) (map[string]rpcHandle
p2p.RPCBlobSidecarsByRangeTopicV1: s.blobSidecarsByRangeRPCHandler, // Modified in Fulu
p2p.RPCDataColumnSidecarsByRootTopicV1: s.dataColumnSidecarByRootRPCHandler, // Added in Fulu
p2p.RPCDataColumnSidecarsByRangeTopicV1: s.dataColumnSidecarsByRangeRPCHandler, // Added in Fulu
p2p.RPCExecutionProofsByRootTopicV1: s.executionProofsByRootRPCHandler, // Added in Fulu
}, nil
}

View File

@@ -11,11 +11,14 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/types"
"github.com/OffchainLabs/prysm/v7/beacon-chain/sync/verify"
"github.com/OffchainLabs/prysm/v7/beacon-chain/verification"
"github.com/OffchainLabs/prysm/v7/config/features"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/OffchainLabs/prysm/v7/time/slots"
libp2pcore "github.com/libp2p/go-libp2p/core"
@@ -87,9 +90,84 @@ func (s *Service) sendBeaconBlocksRequest(ctx context.Context, requests *types.B
return errors.Wrap(err, "request and save missing data columns")
}
if err := s.requestAndSaveMissingExecutionProofs(postFuluBlocks); err != nil {
return errors.Wrap(err, "request and save missing execution proofs")
}
return err
}
func (s *Service) requestAndSaveMissingExecutionProofs(blks []blocks.ROBlock) error {
if len(blks) == 0 {
return nil
}
// TODO: Parallelize requests for multiple blocks.
for _, blk := range blks {
if err := s.sendAndSaveExecutionProofs(s.ctx, blk); err != nil {
return err
}
}
return nil
}
func (s *Service) sendAndSaveExecutionProofs(ctx context.Context, block blocks.ROBlock) error {
if !features.Get().EnableZkvm {
return nil
}
// Check proof retention period.
blockEpoch := slots.ToEpoch(block.Block().Slot())
currentEpoch := slots.ToEpoch(s.cfg.clock.CurrentSlot())
if !params.WithinExecutionProofPeriod(blockEpoch, currentEpoch) {
return nil
}
// Check how many proofs are needed with Execution Proof Pool.
// TODO: All should return the same type ExecutionProofId.
root := block.Root()
proofStorage := s.cfg.proofStorage
storedIds := proofStorage.Summary(root).All()
count := uint64(len(storedIds))
if count >= params.BeaconConfig().MinProofsRequired {
return nil
}
alreadyHave := make([]primitives.ExecutionProofId, 0, len(storedIds))
for _, id := range storedIds {
alreadyHave = append(alreadyHave, primitives.ExecutionProofId(id))
}
// Construct request
req := &ethpb.ExecutionProofsByRootRequest{
BlockRoot: root[:],
CountNeeded: params.BeaconConfig().MinProofsRequired - count,
AlreadyHave: alreadyHave,
}
// Call SendExecutionProofByRootRequest
zkvmEnabledPeers := s.cfg.p2p.Peers().ZkvmEnabledPeers()
if len(zkvmEnabledPeers) == 0 {
return fmt.Errorf("no zkVM enabled peers available to request execution proofs")
}
// TODO: For simplicity, just pick the first peer for now.
// In the future, we can implement better peer selection logic.
pid := zkvmEnabledPeers[0]
proofs, err := SendExecutionProofsByRootRequest(ctx, s.cfg.clock, s.cfg.p2p, pid, req)
if err != nil {
return fmt.Errorf("send execution proofs by root request: %w", err)
}
// Save the proofs into storage.
if err := proofStorage.Save(proofs); err != nil {
return fmt.Errorf("proof storage save: %w", err)
}
return nil
}
// requestAndSaveMissingDataColumns checks if the data columns are missing for the given block.
// If so, requests them and saves them to the storage.
func (s *Service) requestAndSaveMissingDataColumnSidecars(blks []blocks.ROBlock) error {

View File

@@ -182,3 +182,21 @@ func WriteDataColumnSidecarChunk(stream libp2pcore.Stream, tor blockchain.Tempor
return nil
}
func WriteExecutionProofChunk(stream libp2pcore.Stream, encoding encoder.NetworkEncoding, proof *ethpb.ExecutionProof) error {
// Success response code.
if _, err := stream.Write([]byte{responseCodeSuccess}); err != nil {
return errors.Wrap(err, "stream write")
}
ctxBytes := params.ForkDigest(slots.ToEpoch(proof.Slot))
if err := writeContextToStream(ctxBytes[:], stream); err != nil {
return errors.Wrap(err, "write context to stream")
}
// Execution proof.
if _, err := encoding.EncodeWithMaxLength(stream, proof); err != nil {
return errors.Wrap(err, "encode with max length")
}
return nil
}

View File

@@ -0,0 +1,228 @@
package sync
import (
"context"
"errors"
"fmt"
"io"
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/time/slots"
libp2pcore "github.com/libp2p/go-libp2p/core"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/sirupsen/logrus"
)
// SendExecutionProofsByRootRequest sends ExecutionProofsByRoot request and returns fetched execution proofs, if any.
func SendExecutionProofsByRootRequest(
ctx context.Context,
clock blockchain.TemporalOracle,
p2pProvider p2p.P2P,
pid peer.ID,
req *ethpb.ExecutionProofsByRootRequest,
) ([]*ethpb.ExecutionProof, error) {
// Validate request
if req.CountNeeded == 0 {
return nil, errors.New("count_needed must be greater than 0")
}
topic, err := p2p.TopicFromMessage(p2p.ExecutionProofsByRootName, slots.ToEpoch(clock.CurrentSlot()))
if err != nil {
return nil, err
}
log.WithFields(logrus.Fields{
"topic": topic,
"block_root": bytesutil.ToBytes32(req.BlockRoot),
"count": req.CountNeeded,
"already": len(req.AlreadyHave),
}).Debug("Sending execution proofs by root request")
stream, err := p2pProvider.Send(ctx, req, topic, pid)
if err != nil {
return nil, err
}
defer closeStream(stream, log)
// Read execution proofs from stream
proofs := make([]*ethpb.ExecutionProof, 0, req.CountNeeded)
alreadyHaveSet := make(map[primitives.ExecutionProofId]struct{})
for _, id := range req.AlreadyHave {
alreadyHaveSet[id] = struct{}{}
}
for i := uint64(0); i < req.CountNeeded; i++ {
isFirstChunk := i == 0
proof, err := ReadChunkedExecutionProof(stream, p2pProvider, isFirstChunk)
if errors.Is(err, io.EOF) {
break
}
if err != nil {
return nil, err
}
// Validate proof
if err := validateExecutionProof(proof, req, alreadyHaveSet); err != nil {
return nil, err
}
proofs = append(proofs, proof)
}
return proofs, nil
}
// ReadChunkedExecutionProof reads a chunked execution proof from the stream.
func ReadChunkedExecutionProof(
stream libp2pcore.Stream,
encoding p2p.EncodingProvider,
isFirstChunk bool,
) (*ethpb.ExecutionProof, error) {
// Read status code for each chunk (like data columns, not like blocks)
code, errMsg, err := ReadStatusCode(stream, encoding.Encoding())
if err != nil {
return nil, err
}
if code != 0 {
return nil, errors.New(errMsg)
}
// Read context bytes (fork digest)
_, err = readContextFromStream(stream)
if err != nil {
return nil, fmt.Errorf("read context from stream: %w", err)
}
// Decode the proof
proof := &ethpb.ExecutionProof{}
if err := encoding.Encoding().DecodeWithMaxLength(stream, proof); err != nil {
return nil, err
}
return proof, nil
}
// validateExecutionProof validates a received execution proof against the request.
func validateExecutionProof(
proof *ethpb.ExecutionProof,
req *ethpb.ExecutionProofsByRootRequest,
alreadyHaveSet map[primitives.ExecutionProofId]struct{},
) error {
// Check block root matches
proofRoot := bytesutil.ToBytes32(proof.BlockRoot)
reqRoot := bytesutil.ToBytes32(req.BlockRoot)
if proofRoot != reqRoot {
return fmt.Errorf("proof block root %#x does not match requested root %#x",
proofRoot, reqRoot)
}
// Check we didn't already have this proof
if _, ok := alreadyHaveSet[proof.ProofId]; ok {
return fmt.Errorf("received proof we already have: proof_id=%d", proof.ProofId)
}
// Check proof ID is valid (within max range)
if !proof.ProofId.IsValid() {
return fmt.Errorf("invalid proof_id: %d", proof.ProofId)
}
return nil
}
// executionProofsByRootRPCHandler handles incoming ExecutionProofsByRoot RPC requests.
func (s *Service) executionProofsByRootRPCHandler(ctx context.Context, msg any, stream libp2pcore.Stream) error {
ctx, span := trace.StartSpan(ctx, "sync.executionProofsByRootRPCHandler")
defer span.End()
_, cancel := context.WithTimeout(ctx, ttfbTimeout)
defer cancel()
req, ok := msg.(*ethpb.ExecutionProofsByRootRequest)
if !ok {
return errors.New("message is not type ExecutionProofsByRootRequest")
}
remotePeer := stream.Conn().RemotePeer()
SetRPCStreamDeadlines(stream)
// Validate request
if err := s.rateLimiter.validateRequest(stream, 1); err != nil {
return err
}
// Penalize peers that send invalid requests.
if err := validateExecutionProofsByRootRequest(req); err != nil {
s.downscorePeer(remotePeer, "executionProofsByRootRPCHandlerValidationError")
s.writeErrorResponseToStream(responseCodeInvalidRequest, err.Error(), stream)
return fmt.Errorf("validate execution proofs by root request: %w", err)
}
blockRoot := bytesutil.ToBytes32(req.BlockRoot)
log := log.WithFields(logrus.Fields{
"blockroot": fmt.Sprintf("%#x", blockRoot),
"neededCount": req.CountNeeded,
"alreadyHave": req.AlreadyHave,
"peer": remotePeer.String(),
})
s.rateLimiter.add(stream, 1)
defer closeStream(stream, log)
// Get proofs from execution proof pool
summary := s.cfg.proofStorage.Summary(blockRoot)
// Filter out not requested proofs
alreadyHave := make(map[primitives.ExecutionProofId]bool)
for _, id := range req.AlreadyHave {
alreadyHave[id] = true
}
// Determine which proofs to fetch (not already had by requester)
proofIDsToFetch := make([]uint64, 0, len(summary.All()))
for _, proofId := range summary.All() {
if !alreadyHave[primitives.ExecutionProofId(proofId)] {
proofIDsToFetch = append(proofIDsToFetch, proofId)
}
}
// Load all proofs at once
proofs, err := s.cfg.proofStorage.Get(blockRoot, proofIDsToFetch)
if err != nil {
return fmt.Errorf("proof storage get: %w", err)
}
// Send proofs
sentCount := uint64(0)
for _, proof := range proofs {
if sentCount >= req.CountNeeded {
break
}
// Write proof to stream
SetStreamWriteDeadline(stream, defaultWriteDuration)
if err := WriteExecutionProofChunk(stream, s.cfg.p2p.Encoding(), proof); err != nil {
log.WithError(err).Debug("Could not send execution proof")
s.writeErrorResponseToStream(responseCodeServerError, "could not send execution proof", stream)
return err
}
sentCount++
}
log.WithField("sentCount", sentCount).Debug("Responded to execution proofs by root request")
return nil
}
func validateExecutionProofsByRootRequest(req *ethpb.ExecutionProofsByRootRequest) error {
if req.CountNeeded == 0 {
return errors.New("count_needed must be greater than 0")
}
return nil
}

View File

@@ -67,6 +67,7 @@ const (
seenProposerSlashingSize = 100
badBlockSize = 1000
syncMetricsInterval = 10 * time.Second
seenExecutionProofSize = 100
)
var (
@@ -106,6 +107,7 @@ type config struct {
stateNotifier statefeed.Notifier
blobStorage *filesystem.BlobStorage
dataColumnStorage *filesystem.DataColumnStorage
proofStorage *filesystem.ProofStorage
batchVerifierLimit int
}
@@ -114,6 +116,7 @@ type blockchainService interface {
blockchain.BlockReceiver
blockchain.BlobReceiver
blockchain.DataColumnReceiver
blockchain.ProofReceiver
blockchain.HeadFetcher
blockchain.FinalizationFetcher
blockchain.ForkFetcher
@@ -146,6 +149,7 @@ type Service struct {
seenBlobLock sync.RWMutex
seenBlobCache *lru.Cache
seenDataColumnCache *slotAwareCache
seenProofCache *slotAwareCache
seenAggregatedAttestationLock sync.RWMutex
seenAggregatedAttestationCache *lru.Cache
seenUnAggregatedAttestationLock sync.RWMutex
@@ -235,7 +239,6 @@ func NewService(ctx context.Context, opts ...Option) *Service {
r.subHandler = newSubTopicHandler()
r.rateLimiter = newRateLimiter(r.cfg.p2p)
r.initCaches()
return r
}
@@ -346,6 +349,7 @@ func (s *Service) initCaches() {
s.seenBlockCache = lruwrpr.New(seenBlockSize)
s.seenBlobCache = lruwrpr.New(seenBlockSize * params.BeaconConfig().DeprecatedMaxBlobsPerBlockElectra)
s.seenDataColumnCache = newSlotAwareCache(seenDataColumnSize)
s.seenProofCache = newSlotAwareCache(seenExecutionProofSize)
s.seenAggregatedAttestationCache = lruwrpr.New(seenAggregatedAttSize)
s.seenUnAggregatedAttestationCache = lruwrpr.New(seenUnaggregatedAttSize)
s.seenSyncMessageCache = lruwrpr.New(seenSyncMsgSize)

View File

@@ -329,6 +329,17 @@ func (s *Service) registerSubscribers(nse params.NetworkScheduleEntry) bool {
getSubnetsRequiringPeers: s.allDataColumnSubnets,
})
})
if features.Get().EnableZkvm {
s.spawn(func() {
s.subscribe(
p2p.ExecutionProofSubnetTopicFormat,
s.validateExecutionProof,
s.executionProofSubscriber,
nse,
)
})
}
}
return true
}

View File

@@ -11,6 +11,7 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition/interop"
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
"github.com/OffchainLabs/prysm/v7/config/features"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
@@ -22,6 +23,7 @@ import (
"github.com/OffchainLabs/prysm/v7/time/slots"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
"google.golang.org/protobuf/proto"
)
@@ -69,12 +71,60 @@ func (s *Service) beaconBlockSubscriber(ctx context.Context, msg proto.Message)
}
return err
}
// We use the service context to ensure this context is not cancelled
// when the current function returns.
// TODO: Do not broadcast proofs for blocks we have already seen.
go s.generateAndBroadcastExecutionProofs(s.ctx, roBlock)
if err := s.processPendingAttsForBlock(ctx, root); err != nil {
return errors.Wrap(err, "process pending atts for block")
}
return nil
}
func (s *Service) generateAndBroadcastExecutionProofs(ctx context.Context, roBlock blocks.ROBlock) {
const delay = 2 * time.Second
proofTypes := flags.Get().ProofGenerationTypes
// Exit early if proof generation is disabled.
if len(proofTypes) == 0 {
return
}
var wg errgroup.Group
for _, proofType := range proofTypes {
wg.Go(func() error {
execProof, err := generateExecProof(roBlock, primitives.ExecutionProofId(proofType), delay)
if err != nil {
return fmt.Errorf("generate exec proof: %w", err)
}
if err := s.cfg.p2p.Broadcast(ctx, execProof); err != nil {
return fmt.Errorf("broadcast exec proof: %w", err)
}
if err := s.cfg.chain.ReceiveProof(execProof); err != nil {
return errors.Wrap(err, "receive proof")
}
return nil
})
}
if err := wg.Wait(); err != nil {
log.WithError(err).Error("Failed to generate and broadcast execution proofs")
}
log.WithFields(logrus.Fields{
"root": fmt.Sprintf("%#x", roBlock.Root()),
"slot": roBlock.Block().Slot(),
"count": len(proofTypes),
}).Debug("Generated and broadcasted execution proofs")
}
// processSidecarsFromExecutionFromBlock retrieves (if available) sidecars data from the execution client,
// builds corresponding sidecars, save them to the storage, and broadcasts them over P2P if necessary.
func (s *Service) processSidecarsFromExecutionFromBlock(ctx context.Context, roBlock blocks.ROBlock) {

View File

@@ -0,0 +1,37 @@
package sync
import (
"context"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed"
opfeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/operation"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/pkg/errors"
"google.golang.org/protobuf/proto"
)
func (s *Service) executionProofSubscriber(_ context.Context, msg proto.Message) error {
executionProof, ok := msg.(*ethpb.ExecutionProof)
if !ok {
return errors.Errorf("incorrect type of message received, wanted %T but got %T", &ethpb.ExecutionProof{}, msg)
}
// Insert the execution proof into the pool
s.setSeenProof(executionProof.Slot, bytesutil.ToBytes32(executionProof.BlockRoot), executionProof.ProofId)
// Save the proof to storage.
if err := s.cfg.chain.ReceiveProof(executionProof); err != nil {
return errors.Wrap(err, "receive proof")
}
// Notify subscribers about the new execution proof
s.cfg.operationNotifier.OperationFeed().Send(&feed.Event{
Type: opfeed.ExecutionProofReceived,
Data: &opfeed.ExecutionProofReceivedData{
ExecutionProof: executionProof,
},
})
return nil
}

View File

@@ -0,0 +1,131 @@
package sync
import (
"context"
"fmt"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/sirupsen/logrus"
)
func (s *Service) validateExecutionProof(ctx context.Context, pid peer.ID, msg *pubsub.Message) (pubsub.ValidationResult, error) {
// Always accept messages our own messages.
if pid == s.cfg.p2p.PeerID() {
return pubsub.ValidationAccept, nil
}
// Ignore messages during initial sync.
if s.cfg.initialSync.Syncing() {
return pubsub.ValidationIgnore, nil
}
// Reject messages with a nil topic.
if msg.Topic == nil {
return pubsub.ValidationReject, p2p.ErrInvalidTopic
}
// Decode the message, reject if it fails.
m, err := s.decodePubsubMessage(msg)
if err != nil {
log.WithError(err).Error("Failed to decode message")
return pubsub.ValidationReject, err
}
// Reject messages that are not of the expected type.
executionProof, ok := m.(*ethpb.ExecutionProof)
if !ok {
log.WithField("message", m).Error("Message is not of type *ethpb.ExecutionProof")
return pubsub.ValidationReject, errWrongMessage
}
// 1. Verify proof is not from the future
if err := s.proofNotFromFutureSlot(executionProof); err != nil {
return pubsub.ValidationReject, err
}
// 3. Check if the proof is already in the DA checker cache (execution proof pool)
// If it exists in the cache, we know it has already passed validation.
blockRoot := bytesutil.ToBytes32(executionProof.BlockRoot)
if s.hasSeenProof(blockRoot, executionProof.ProofId) {
return pubsub.ValidationIgnore, nil
}
// 4. Verify proof size limits
if uint64(len(executionProof.ProofData)) > params.BeaconConfig().MaxProofDataBytes {
return pubsub.ValidationReject, fmt.Errorf("execution proof data size %d exceeds maximum allowed %d", len(executionProof.ProofData), params.BeaconConfig().MaxProofDataBytes)
}
// 5. Run zkVM proof verification
if err := s.verifyExecutionProof(executionProof); err != nil {
return pubsub.ValidationReject, err
}
log.WithFields(logrus.Fields{
"root": fmt.Sprintf("%#x", blockRoot),
"slot": executionProof.Slot,
"id": executionProof.ProofId,
}).Debug("Accepted execution proof")
// Validation successful, return accept
msg.ValidatorData = executionProof
return pubsub.ValidationAccept, nil
}
// TODO: Do we need encapsulation for all those verification functions?
// proofNotFromFutureSlot checks whether the execution proof is from a future slot.
func (s *Service) proofNotFromFutureSlot(executionProof *ethpb.ExecutionProof) error {
currentSlot := s.cfg.clock.CurrentSlot()
proofSlot := executionProof.Slot
if currentSlot == proofSlot {
return nil
}
earliestStart, err := s.cfg.clock.SlotStart(proofSlot)
if err != nil {
// TODO: Should we penalize the peer for this?
return fmt.Errorf("failed to compute start time for proof slot %d: %w", proofSlot, err)
}
earliestStart = earliestStart.Add(-1 * params.BeaconConfig().MaximumGossipClockDisparityDuration())
// If the system time is still before earliestStart, we consider the proof from a future slot and return an error.
if s.cfg.clock.Now().Before(earliestStart) {
return fmt.Errorf("slot %d is too far in the future (current slot: %d)", proofSlot, currentSlot)
}
return nil
}
// Returns true if the column with the same slot, proposer index, and column index has been seen before.
func (s *Service) hasSeenProof(blockRoot [32]byte, proofId primitives.ExecutionProofId) bool {
key := computeProofCacheKey(blockRoot, proofId)
_, seen := s.seenProofCache.Get(key)
return seen
}
// Sets the data column with the same slot, proposer index, and data column index as seen.
func (s *Service) setSeenProof(slot primitives.Slot, blockRoot [32]byte, proofId primitives.ExecutionProofId) {
key := computeProofCacheKey(blockRoot, proofId)
s.seenProofCache.Add(slot, key, true)
}
// verifyExecutionProof performs the actual verification of the execution proof.
func (s *Service) verifyExecutionProof(_ *ethpb.ExecutionProof) error {
// For now, say all proof are valid.
return nil
}
func computeProofCacheKey(blockRoot [32]byte, proofId primitives.ExecutionProofId) string {
key := make([]byte, 0, 33)
key = append(key, blockRoot[:]...)
key = append(key, bytesutil.Bytes1(uint64(proofId))...)
return string(key)
}

View File

@@ -1,6 +1,8 @@
package flags
import (
"fmt"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/urfave/cli/v2"
)
@@ -17,9 +19,18 @@ var (
Value: uint64(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest),
Aliases: []string{"extend-blob-retention-epoch"},
}
ExecutionProofRetentionEpochFlag = &cli.Uint64Flag{
Name: "execution-proof-retention-epochs",
Usage: fmt.Sprintf(
"Override the default execution proof retention period (measured in epochs). The node will exit with an error at startup if the value is less than the default of %d epochs.",
params.BeaconConfig().MinEpochsForExecutionProofRequests,
),
Value: uint64(params.BeaconConfig().MinEpochsForExecutionProofRequests),
}
)
var Flags = []cli.Flag{
BackfillOldestSlot,
BlobRetentionEpochFlag,
ExecutionProofRetentionEpochFlag,
}

View File

@@ -20,6 +20,7 @@ go_library(
"//cmd:go_default_library",
"//config/features:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_urfave_cli_v2//:go_default_library",

View File

@@ -356,4 +356,12 @@ var (
Usage: "A comma-separated list of exponents (of 2) in decreasing order, defining the state diff hierarchy levels. The last exponent must be greater than or equal to 5.",
Value: cli.NewIntSlice(21, 18, 16, 13, 11, 9, 5),
}
// ZKVM Generation Proof Type
ZkvmGenerationProofTypeFlag = &cli.IntSliceFlag{
Name: "zkvm-generation-proof-types",
Usage: `
Comma-separated list of proof type IDs to generate
(e.g., '0,1' where 0=SP1+Reth, 1=Risc0+Geth).
Optional - nodes can verify proofs without generating them.`,
}
)

View File

@@ -5,6 +5,7 @@ import (
"github.com/OffchainLabs/prysm/v7/cmd"
"github.com/OffchainLabs/prysm/v7/config/features"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/pkg/errors"
"github.com/urfave/cli/v2"
)
@@ -27,6 +28,7 @@ type GlobalFlags struct {
DataColumnBatchLimit int
DataColumnBatchLimitBurstFactor int
StateDiffExponents []int
ProofGenerationTypes []primitives.ExecutionProofId
}
var globalConfig *GlobalFlags
@@ -84,6 +86,19 @@ func ConfigureGlobalFlags(ctx *cli.Context) error {
}
}
// zkVM Proof Generation Types
proofTypes := make([]primitives.ExecutionProofId, 0, len(ctx.IntSlice(ZkvmGenerationProofTypeFlag.Name)))
for _, t := range ctx.IntSlice(ZkvmGenerationProofTypeFlag.Name) {
proofTypes = append(proofTypes, primitives.ExecutionProofId(t))
}
cfg.ProofGenerationTypes = proofTypes
if features.Get().EnableZkvm {
if err := validateZkvmProofGenerationTypes(cfg.ProofGenerationTypes); err != nil {
return fmt.Errorf("validate Zkvm proof generation types: %w", err)
}
}
cfg.BlockBatchLimit = ctx.Int(BlockBatchLimit.Name)
cfg.BlockBatchLimitBurstFactor = ctx.Int(BlockBatchLimitBurstFactor.Name)
cfg.BlobBatchLimit = ctx.Int(BlobBatchLimit.Name)
@@ -135,3 +150,13 @@ func validateStateDiffExponents(exponents []int) error {
}
return nil
}
// validateZkvmProofGenerationTypes validates the provided proof IDs.
func validateZkvmProofGenerationTypes(types []primitives.ExecutionProofId) error {
for _, t := range types {
if t >= primitives.EXECUTION_PROOF_TYPE_COUNT {
return fmt.Errorf("invalid zkvm proof generation type: %d; valid types are between 0 and %d", t, primitives.EXECUTION_PROOF_TYPE_COUNT-1)
}
}
return nil
}

View File

@@ -156,6 +156,7 @@ var appFlags = []cli.Flag{
dasFlags.BackfillOldestSlot,
dasFlags.BlobRetentionEpochFlag,
flags.BatchVerifierLimit,
flags.ZkvmGenerationProofTypeFlag,
}
func init() {

View File

@@ -33,6 +33,10 @@ var (
Name: "data-column-path",
Usage: "Location for data column storage. Default location will be a 'data-columns' directory next to the beacon db.",
}
ExecutionProofStoragePathFlag = &cli.PathFlag{
Name: "execution-proof-path",
Usage: "Location for execution proof storage. Default location will be a 'execution-proofs' directory next to the beacon db.",
}
)
// Flags is the list of CLI flags for configuring blob storage.
@@ -40,6 +44,7 @@ var Flags = []cli.Flag{
BlobStoragePathFlag,
BlobStorageLayout,
DataColumnStoragePathFlag,
ExecutionProofStoragePathFlag,
}
func layoutOptions() string {
@@ -72,11 +77,13 @@ func BeaconNodeOptions(c *cli.Context) ([]node.Option, error) {
if err != nil {
return nil, errors.Wrap(err, "detecting blob storage layout")
}
if layout == filesystem.LayoutNameFlat {
log.Warnf("Existing '%s' blob storage layout detected. Consider setting the flag --%s=%s for faster startup and more reliable pruning. Setting this flag will automatically migrate your existing blob storage to the newer layout on the next restart.",
if layout == filesystem.LayoutNameFlat {
log.Warningf(
"Existing '%s' blob storage layout detected. Consider setting the flag --%s=%s for faster startup and more reliable pruning. Setting this flag will automatically migrate your existing blob storage to the newer layout on the next restart.",
filesystem.LayoutNameFlat, BlobStorageLayout.Name, filesystem.LayoutNameByEpoch)
}
blobStorageOptions := node.WithBlobStorageOptions(
filesystem.WithBlobRetentionEpochs(blobRetentionEpoch),
filesystem.WithBasePath(blobPath),
@@ -93,7 +100,17 @@ func BeaconNodeOptions(c *cli.Context) ([]node.Option, error) {
filesystem.WithDataColumnBasePath(dataColumnStoragePath(c)),
)
opts := []node.Option{blobStorageOptions, dataColumnStorageOption}
executionProofRetentionEpoch, err := executionProofRetentionEpoch(c)
if err != nil {
return nil, errors.Wrap(err, "execution proof retention epoch")
}
proofStorageOption := node.WithProofStorageOption(
filesystem.WithProofRetentionEpochs(executionProofRetentionEpoch),
filesystem.WithProofBasePath(executionProofStoragePath(c)),
)
opts := []node.Option{blobStorageOptions, dataColumnStorageOption, proofStorageOption}
return opts, nil
}
@@ -165,6 +182,17 @@ func dataColumnStoragePath(c *cli.Context) string {
return dataColumnsPath
}
// TODO: Create a generic function for these storage path getters.
func executionProofStoragePath(c *cli.Context) string {
executionProofPath := c.Path(ExecutionProofStoragePathFlag.Name)
if executionProofPath == "" {
// append a "execution-proofs" subdir to the end of the data dir path
executionProofPath = filepath.Join(c.String(cmd.DataDirFlag.Name), "execution-proofs")
}
return executionProofPath
}
var errInvalidBlobRetentionEpochs = errors.New("value is smaller than spec minimum")
// blobRetentionEpoch returns the spec default MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUEST
@@ -205,6 +233,26 @@ func dataColumnRetentionEpoch(cliCtx *cli.Context) (primitives.Epoch, error) {
return customValue, nil
}
// executionProofRetentionEpoch returns the spec default MIN_EPOCHS_FOR_EXECUTION_PROOFS_REQUEST
// or a user-specified flag overriding this value. If a user-specified override is
// smaller than the spec default, an error will be returned.
// TODO: Create a generic function for these retention epoch getters.
func executionProofRetentionEpoch(cliCtx *cli.Context) (primitives.Epoch, error) {
defaultValue := params.BeaconConfig().MinEpochsForExecutionProofRequests
if !cliCtx.IsSet(das.ExecutionProofRetentionEpochFlag.Name) {
return defaultValue, nil
}
customValue := primitives.Epoch(cliCtx.Uint64(das.ExecutionProofRetentionEpochFlag.Name))
// Validate the epoch value against the spec default.
if customValue < defaultValue {
return defaultValue, errors.Wrapf(errInvalidBlobRetentionEpochs, "%s=%d, spec=%d", das.ExecutionProofRetentionEpochFlag.Name, customValue, defaultValue)
}
return customValue, nil
}
func init() {
BlobStorageLayout.Action = validateLayoutFlag
}

View File

@@ -231,6 +231,12 @@ var appHelpFlagGroups = []flagGroup{
flags.SetGCPercent,
},
},
{
Name: "zkvm",
Flags: []cli.Flag{
flags.ZkvmGenerationProofTypeFlag,
},
},
}
func init() {

View File

@@ -52,6 +52,7 @@ type Flags struct {
DisableDutiesV2 bool // DisableDutiesV2 sets validator client to use the get Duties endpoint
EnableWeb bool // EnableWeb enables the webui on the validator client
EnableStateDiff bool // EnableStateDiff enables the experimental state diff feature for the beacon node.
EnableZkvm bool // EnableZkvm enables zkVM related features.
// Logging related toggles.
DisableGRPCConnectionLogs bool // Disables logging when a new grpc client has connected.
@@ -298,6 +299,11 @@ func ConfigureBeaconChain(ctx *cli.Context) error {
}
}
if ctx.IsSet(EnableZkvmFlag.Name) {
logEnabled(EnableZkvmFlag)
cfg.EnableZkvm = true
}
cfg.AggregateIntervals = [3]time.Duration{aggregateFirstInterval.Value, aggregateSecondInterval.Value, aggregateThirdInterval.Value}
Init(cfg)
return nil

View File

@@ -211,6 +211,17 @@ var (
Name: "ignore-unviable-attestations",
Usage: "Ignores attestations whose target state is not viable with respect to the current head (avoid expensive state replay from lagging attesters).",
}
// Activate ZKVM execution proof mode
EnableZkvmFlag = &cli.BoolFlag{
Name: "activate-zkvm",
Usage: `
Activates ZKVM execution proof mode. Enables the node to subscribe to the
execution_proof gossip topic, receive and verify execution proofs from peers,
and advertise zkVM support in its ENR for peer discovery.
Use --zkvm-generation-proof-types to specify which proof types this node
should generate (optional - nodes can verify without generating).
`,
}
)
// devModeFlags holds list of flags that are set when development mode is on.
@@ -272,6 +283,7 @@ var BeaconChainFlags = combinedFlags([]cli.Flag{
enableExperimentalAttestationPool,
forceHeadFlag,
blacklistRoots,
EnableZkvmFlag,
}, deprecatedBeaconFlags, deprecatedFlags, upcomingDeprecation)
func combinedFlags(flags ...[]cli.Flag) []cli.Flag {

View File

@@ -310,6 +310,11 @@ type BeaconChainConfig struct {
// Blobs Values
BlobSchedule []BlobScheduleEntry `yaml:"BLOB_SCHEDULE" spec:"true"`
// EIP-8025: Optional Execution Proofs
MaxProofDataBytes uint64 `yaml:"MAX_PROOF_DATA_BYTES" spec:"true"` // MaxProofDataBytes is the maximum number of bytes for execution proof data.
MinProofsRequired uint64 `yaml:"MIN_PROOFS_REQUIRED" spec:"true"` // MinProofsRequired is the minimum number of execution proofs required for a block to be considered valid.
MinEpochsForExecutionProofRequests primitives.Epoch `yaml:"MIN_EPOCHS_FOR_EXECUTION_PROOF_REQUESTS" spec:"true"` // MinEpochsForExecutionProofRequests is the minimum number of epochs the node will keep the execution proofs for.
// Deprecated_MaxBlobsPerBlock defines the max blobs that could exist in a block.
// Deprecated: This field is no longer supported. Avoid using it.
DeprecatedMaxBlobsPerBlock int `yaml:"MAX_BLOBS_PER_BLOCK" spec:"true"`
@@ -732,6 +737,20 @@ func WithinDAPeriod(block, current primitives.Epoch) bool {
return block+BeaconConfig().MinEpochsForBlobsSidecarsRequest >= current
}
// WithinExecutionProofPeriod checks if the given epoch is within the execution proof retention period.
// This is used to determine whether execution proofs should be requested or generated for blocks at the given epoch.
// Returns true if the epoch is at or after the retention boundary (Fulu fork epoch or proof retention epoch).
func WithinExecutionProofPeriod(epoch, current primitives.Epoch) bool {
proofRetentionEpoch := primitives.Epoch(0)
if current >= primitives.Epoch(BeaconConfig().MinEpochsForExecutionProofRequests) {
proofRetentionEpoch = current - primitives.Epoch(BeaconConfig().MinEpochsForExecutionProofRequests)
}
boundaryEpoch := primitives.MaxEpoch(BeaconConfig().FuluForkEpoch, proofRetentionEpoch)
return epoch >= boundaryEpoch
}
// EpochsDuration returns the time duration of the given number of epochs.
func EpochsDuration(count primitives.Epoch, b *BeaconChainConfig) time.Duration {
return SlotsDuration(SlotsForEpochs(count, b), b)

View File

@@ -38,6 +38,7 @@ var mainnetNetworkConfig = &NetworkConfig{
AttSubnetKey: "attnets",
SyncCommsSubnetKey: "syncnets",
CustodyGroupCountKey: "cgc",
ZkvmEnabledKey: "zkvm",
MinimumPeersInSubnetSearch: 20,
ContractDeploymentBlock: 11184524, // Note: contract was deployed in block 11052984 but no transactions were sent until 11184524.
BootstrapNodes: []string{
@@ -355,6 +356,11 @@ var mainnetBeaconConfig = &BeaconChainConfig{
MaxBlobsPerBlock: 21,
},
},
// EIP-8025: Optional Execution Proofs
MaxProofDataBytes: 1_048_576, // 1 MiB
MinProofsRequired: 2,
MinEpochsForExecutionProofRequests: 2,
}
// MainnetTestConfig provides a version of the mainnet config that has a different name

View File

@@ -11,6 +11,7 @@ type NetworkConfig struct {
AttSubnetKey string // AttSubnetKey is the ENR key of the subnet bitfield.
SyncCommsSubnetKey string // SyncCommsSubnetKey is the ENR key of the sync committee subnet bitfield.
CustodyGroupCountKey string // CustodyGroupsCountKey is the ENR key of the custody group count.
ZkvmEnabledKey string // ZkvmEnabledKey is the ENR key of whether zkVM mode is enabled or not.
MinimumPeersInSubnetSearch uint64 // PeersInSubnetSearch is the required amount of peers that we need to be able to lookup in a subnet search.
// Chain Network Config

View File

@@ -11,6 +11,7 @@ go_library(
"domain.go",
"epoch.go",
"execution_address.go",
"execution_proof_id.go",
"kzg.go",
"payload_id.go",
"slot.go",
@@ -36,6 +37,7 @@ go_test(
"committee_index_test.go",
"domain_test.go",
"epoch_test.go",
"execution_proof_id_test.go",
"slot_test.go",
"sszbytes_test.go",
"sszuint64_test.go",

View File

@@ -0,0 +1,64 @@
package primitives
import (
"fmt"
fssz "github.com/prysmaticlabs/fastssz"
)
var _ fssz.HashRoot = (ExecutionProofId)(0)
var _ fssz.Marshaler = (*ExecutionProofId)(nil)
var _ fssz.Unmarshaler = (*ExecutionProofId)(nil)
// Number of execution proofs
// Each proof represents a different zkVM+EL combination
//
// TODO(zkproofs): The number 8 is a parameter that we will want to configure in the future
const EXECUTION_PROOF_TYPE_COUNT = 8
// ExecutionProofId identifies which zkVM/proof system a proof belongs to.
type ExecutionProofId uint8
func (id *ExecutionProofId) IsValid() bool {
return uint8(*id) < EXECUTION_PROOF_TYPE_COUNT
}
// HashTreeRoot --
func (id ExecutionProofId) HashTreeRoot() ([32]byte, error) {
return fssz.HashWithDefaultHasher(id)
}
// HashTreeRootWith --
func (id ExecutionProofId) HashTreeRootWith(hh *fssz.Hasher) error {
hh.PutUint8(uint8(id))
return nil
}
// UnmarshalSSZ --
func (id *ExecutionProofId) UnmarshalSSZ(buf []byte) error {
if len(buf) != id.SizeSSZ() {
return fmt.Errorf("expected buffer of length %d received %d", id.SizeSSZ(), len(buf))
}
*id = ExecutionProofId(fssz.UnmarshallUint8(buf))
return nil
}
// MarshalSSZTo --
func (id *ExecutionProofId) MarshalSSZTo(buf []byte) ([]byte, error) {
marshalled, err := id.MarshalSSZ()
if err != nil {
return nil, err
}
return append(buf, marshalled...), nil
}
// MarshalSSZ --
func (id *ExecutionProofId) MarshalSSZ() ([]byte, error) {
marshalled := fssz.MarshalUint8([]byte{}, uint8(*id))
return marshalled, nil
}
// SizeSSZ --
func (id *ExecutionProofId) SizeSSZ() int {
return 1
}

View File

@@ -0,0 +1,73 @@
package primitives_test
import (
"testing"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
)
func TestExecutionProofId_IsValid(t *testing.T) {
tests := []struct {
name string
id primitives.ExecutionProofId
valid bool
}{
{
name: "valid proof id 0",
id: 0,
valid: true,
},
{
name: "valid proof id 1",
id: 1,
valid: true,
},
{
name: "valid proof id 7 (max valid)",
id: 7,
valid: true,
},
{
name: "invalid proof id 8 (at limit)",
id: 8,
valid: false,
},
{
name: "invalid proof id 255",
id: 255,
valid: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := tt.id.IsValid(); got != tt.valid {
t.Errorf("ExecutionProofId.IsValid() = %v, want %v", got, tt.valid)
}
})
}
}
func TestExecutionProofId_Casting(t *testing.T) {
id := primitives.ExecutionProofId(5)
t.Run("uint8", func(t *testing.T) {
if uint8(id) != 5 {
t.Errorf("Casting to uint8 failed: got %v, want 5", uint8(id))
}
})
t.Run("from uint8", func(t *testing.T) {
var x uint8 = 7
if primitives.ExecutionProofId(x) != 7 {
t.Errorf("Casting from uint8 failed: got %v, want 7", primitives.ExecutionProofId(x))
}
})
t.Run("int", func(t *testing.T) {
var x = 3
if primitives.ExecutionProofId(x) != 3 {
t.Errorf("Casting from int failed: got %v, want 3", primitives.ExecutionProofId(x))
}
})
}

72
kurtosis/README.md Normal file
View File

@@ -0,0 +1,72 @@
# Kurtosis scripts for EIP-8025
## How to run
I slightly modified [Manu's tip](https://hackmd.io/8z4thpsyQJioaU6jj0Wazw) by adding those in my `~/.zshrc`.
```zsh
# Kurtosis Aliases
blog() {
docker logs -f "$(docker ps | grep cl-"$1"-prysm-geth | awk '{print $NF}')" 2>&1
}
vlog() {
docker logs -f "$(docker ps | grep vc-"$1"-geth-prysm | awk '{print $NF}')" 2>&1
}
dora() {
open http://localhost:$(docker ps --format '{{.Ports}} {{.Names}}' | awk '/dora/ {split($1, a, "->"); split(a[1], b, ":"); print b[2]}')
}
graf() {
open http://localhost:$(docker ps --format '{{.Ports}} {{.Names}}' | awk '/grafana/ {split($1, a, "->"); split(a[1], b, ":"); print b[2]}')
}
devnet () {
local args_file_path="./kurtosis/default.yaml"
if [ ! -z "$1" ]; then
args_file_path="$1"
echo "Using custom args-file path: $args_file_path"
else
echo "Using default args-file path: $args_file_path"
fi
kurtosis clean -a &&
bazel build //cmd/beacon-chain:oci_image_tarball --platforms=@io_bazel_rules_go//go/toolchain:linux_arm64_cgo --config=release &&
docker load -i bazel-bin/cmd/beacon-chain/oci_image_tarball/tarball.tar &&
docker tag gcr.io/offchainlabs/prysm/beacon-chain prysm-bn-custom-image &&
bazel build //cmd/validator:oci_image_tarball --platforms=@io_bazel_rules_go//go/toolchain:linux_arm64_cgo --config=release &&
docker load -i bazel-bin/cmd/validator/oci_image_tarball/tarball.tar &&
docker tag gcr.io/offchainlabs/prysm/validator prysm-vc-custom-image &&
kurtosis run github.com/ethpandaops/ethereum-package --args-file="$args_file_path" --verbosity brief &&
dora
}
stop() {
kurtosis clean -a
}
dps() {
docker ps --format "table {{.ID}}\\t{{.Image}}\\t{{.Status}}\\t{{.Names}}" -a
}
```
At the project directory, you can simply spin up a devnet with:
```bash
$ devnet
```
Or you can specify the network parameter YAML file like:
```bash
$ devnet ./kurtosis/proof_verify.yaml
```
### Running scripts with local images
Images from Prysm can be automatically loaded from `devnet` command, but if you want to run a script with `lighthouse`:
#### `./kurtosis/interop.yaml`
- `lighthouse:local`: Please build your own image following [Lighthouse's guide](https://lighthouse-book.sigmaprime.io/installation_docker.html?highlight=docker#building-the-docker-image) on [`kevaundray/kw/sel-alternative`](https://github.com/kevaundray/lighthouse/tree/kw/sel-alternative/) branch.

16
kurtosis/default.yaml Normal file
View File

@@ -0,0 +1,16 @@
participants:
- el_type: geth
cl_type: prysm
cl_image: prysm-bn-custom-image
cl_extra_params:
- --activate-zkvm
- --zkvm-generation-proof-types=0,1
vc_image: prysm-vc-custom-image
count: 4
network_params:
seconds_per_slot: 2
global_log_level: debug
snooper_enabled: false
additional_services:
- dora
- prometheus_grafana

38
kurtosis/interop.yaml Normal file
View File

@@ -0,0 +1,38 @@
# 3 nodes (2 from Prysm, 1 from Lighthouse) generate proofs and
# 1 node only verifies
participants:
# Prysm: Proof generating nodes (nodes 1-2)
- el_type: geth
el_image: ethereum/client-go:latest
cl_type: prysm
cl_image: prysm-bn-custom-image
cl_extra_params:
- --activate-zkvm
- --zkvm-generation-proof-types=0,1
vc_image: prysm-vc-custom-image
count: 2
# Lighthouse: Proof generating nodes (node 3)
- el_type: geth
el_image: ethereum/client-go:latest
cl_type: lighthouse
cl_image: lighthouse:local
cl_extra_params:
- --activate-zkvm
- --zkvm-generation-proof-types=0,1
- --target-peers=3
count: 1
# Prysm: Proof verifying only node (node 4)
- el_type: dummy
cl_type: prysm
cl_image: prysm-bn-custom-image
cl_extra_params:
- --activate-zkvm
vc_image: prysm-vc-custom-image
count: 1
network_params:
seconds_per_slot: 2
global_log_level: debug
snooper_enabled: false
additional_services:
- dora
- prometheus_grafana

View File

@@ -0,0 +1,27 @@
# 3 nodes generate proofs, 1 node only verifies
participants:
# Proof generating nodes (nodes 1-3)
- el_type: geth
el_image: ethereum/client-go:latest
cl_type: prysm
cl_image: prysm-bn-custom-image
cl_extra_params:
- --activate-zkvm
- --zkvm-generation-proof-types=0,1
vc_image: prysm-vc-custom-image
count: 3
# Proof verifying only node (node 4)
- el_type: dummy
cl_type: prysm
cl_image: prysm-bn-custom-image
cl_extra_params:
- --activate-zkvm
vc_image: prysm-vc-custom-image
count: 1
network_params:
seconds_per_slot: 2
global_log_level: debug
snooper_enabled: false
additional_services:
- dora
- prometheus_grafana

View File

@@ -371,6 +371,11 @@ go_library(
"beacon_block.go",
"cloners.go",
"eip_7521.go",
"execution_proof.go",
# NOTE: ExecutionProof includes an alias type of uint8,
# which is not supported by fastssz sszgen.
# Temporarily managed manually.
"execution_proof.ssz.go",
"gloas.go",
"log.go",
"sync_committee_mainnet.go",
@@ -427,6 +432,7 @@ ssz_proto_files(
"beacon_state.proto",
"blobs.proto",
"data_columns.proto",
"execution_proof.proto",
"gloas.proto",
"light_client.proto",
"sync_committee.proto",

View File

@@ -0,0 +1,18 @@
package eth
import "github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
// Copy --
func (e *ExecutionProof) Copy() *ExecutionProof {
if e == nil {
return nil
}
return &ExecutionProof{
ProofId: e.ProofId,
Slot: e.Slot,
BlockHash: bytesutil.SafeCopyBytes(e.BlockHash),
BlockRoot: bytesutil.SafeCopyBytes(e.BlockRoot),
ProofData: bytesutil.SafeCopyBytes(e.ProofData),
}
}

268
proto/prysm/v1alpha1/execution_proof.pb.go generated Executable file
View File

@@ -0,0 +1,268 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.3
// protoc v3.21.7
// source: proto/prysm/v1alpha1/execution_proof.proto
package eth
import (
reflect "reflect"
sync "sync"
github_com_OffchainLabs_prysm_v7_consensus_types_primitives "github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
_ "github.com/OffchainLabs/prysm/v7/proto/eth/ext"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type ExecutionProof struct {
state protoimpl.MessageState `protogen:"open.v1"`
ProofId github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ExecutionProofId `protobuf:"varint,1,opt,name=proof_id,json=proofId,proto3" json:"proof_id,omitempty" cast-type:"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.ExecutionProofId"`
Slot github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Slot `protobuf:"varint,2,opt,name=slot,proto3" json:"slot,omitempty" cast-type:"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.Slot"`
BlockHash []byte `protobuf:"bytes,3,opt,name=block_hash,json=blockHash,proto3" json:"block_hash,omitempty" ssz-size:"32"`
BlockRoot []byte `protobuf:"bytes,4,opt,name=block_root,json=blockRoot,proto3" json:"block_root,omitempty" ssz-size:"32"`
ProofData []byte `protobuf:"bytes,5,opt,name=proof_data,json=proofData,proto3" json:"proof_data,omitempty" ssz-max:"1048576"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ExecutionProof) Reset() {
*x = ExecutionProof{}
mi := &file_proto_prysm_v1alpha1_execution_proof_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ExecutionProof) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ExecutionProof) ProtoMessage() {}
func (x *ExecutionProof) ProtoReflect() protoreflect.Message {
mi := &file_proto_prysm_v1alpha1_execution_proof_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ExecutionProof.ProtoReflect.Descriptor instead.
func (*ExecutionProof) Descriptor() ([]byte, []int) {
return file_proto_prysm_v1alpha1_execution_proof_proto_rawDescGZIP(), []int{0}
}
func (x *ExecutionProof) GetProofId() github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ExecutionProofId {
if x != nil {
return x.ProofId
}
return github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ExecutionProofId(0)
}
func (x *ExecutionProof) GetSlot() github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Slot {
if x != nil {
return x.Slot
}
return github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Slot(0)
}
func (x *ExecutionProof) GetBlockHash() []byte {
if x != nil {
return x.BlockHash
}
return nil
}
func (x *ExecutionProof) GetBlockRoot() []byte {
if x != nil {
return x.BlockRoot
}
return nil
}
func (x *ExecutionProof) GetProofData() []byte {
if x != nil {
return x.ProofData
}
return nil
}
type ExecutionProofsByRootRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
BlockRoot []byte `protobuf:"bytes,1,opt,name=block_root,json=blockRoot,proto3" json:"block_root,omitempty" ssz-size:"32"`
CountNeeded uint64 `protobuf:"varint,2,opt,name=count_needed,json=countNeeded,proto3" json:"count_needed,omitempty"`
AlreadyHave []github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ExecutionProofId `protobuf:"varint,3,rep,packed,name=already_have,json=alreadyHave,proto3" json:"already_have,omitempty" cast-type:"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.ExecutionProofId" ssz-max:"8"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ExecutionProofsByRootRequest) Reset() {
*x = ExecutionProofsByRootRequest{}
mi := &file_proto_prysm_v1alpha1_execution_proof_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ExecutionProofsByRootRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ExecutionProofsByRootRequest) ProtoMessage() {}
func (x *ExecutionProofsByRootRequest) ProtoReflect() protoreflect.Message {
mi := &file_proto_prysm_v1alpha1_execution_proof_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ExecutionProofsByRootRequest.ProtoReflect.Descriptor instead.
func (*ExecutionProofsByRootRequest) Descriptor() ([]byte, []int) {
return file_proto_prysm_v1alpha1_execution_proof_proto_rawDescGZIP(), []int{1}
}
func (x *ExecutionProofsByRootRequest) GetBlockRoot() []byte {
if x != nil {
return x.BlockRoot
}
return nil
}
func (x *ExecutionProofsByRootRequest) GetCountNeeded() uint64 {
if x != nil {
return x.CountNeeded
}
return 0
}
func (x *ExecutionProofsByRootRequest) GetAlreadyHave() []github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ExecutionProofId {
if x != nil {
return x.AlreadyHave
}
return []github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ExecutionProofId(nil)
}
var File_proto_prysm_v1alpha1_execution_proof_proto protoreflect.FileDescriptor
var file_proto_prysm_v1alpha1_execution_proof_proto_rawDesc = []byte{
0x0a, 0x2a, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x31,
0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e,
0x5f, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x65, 0x74,
0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70,
0x68, 0x61, 0x31, 0x1a, 0x1b, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x65, 0x74, 0x68, 0x2f, 0x65,
0x78, 0x74, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x22, 0xd1, 0x02, 0x0a, 0x0e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72,
0x6f, 0x6f, 0x66, 0x12, 0x6b, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x5f, 0x69, 0x64, 0x18,
0x01, 0x20, 0x01, 0x28, 0x04, 0x42, 0x50, 0x82, 0xb5, 0x18, 0x4c, 0x67, 0x69, 0x74, 0x68, 0x75,
0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61,
0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x37, 0x2f, 0x63, 0x6f, 0x6e, 0x73,
0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x72, 0x69, 0x6d,
0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e,
0x50, 0x72, 0x6f, 0x6f, 0x66, 0x49, 0x64, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x49, 0x64,
0x12, 0x58, 0x0a, 0x04, 0x73, 0x6c, 0x6f, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x42, 0x44,
0x82, 0xb5, 0x18, 0x40, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f,
0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73,
0x6d, 0x2f, 0x76, 0x37, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x74,
0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2e,
0x53, 0x6c, 0x6f, 0x74, 0x52, 0x04, 0x73, 0x6c, 0x6f, 0x74, 0x12, 0x25, 0x0a, 0x0a, 0x62, 0x6c,
0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06,
0x8a, 0xb5, 0x18, 0x02, 0x33, 0x32, 0x52, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73,
0x68, 0x12, 0x25, 0x0a, 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18,
0x04, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x33, 0x32, 0x52, 0x09, 0x62,
0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x2a, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x6f,
0x66, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x0b, 0x92, 0xb5,
0x18, 0x07, 0x31, 0x30, 0x34, 0x38, 0x35, 0x37, 0x36, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x6f, 0x66,
0x44, 0x61, 0x74, 0x61, 0x22, 0xe2, 0x01, 0x0a, 0x1c, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69,
0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x73, 0x42, 0x79, 0x52, 0x6f, 0x6f, 0x74, 0x52, 0x65,
0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x72,
0x6f, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x33,
0x32, 0x52, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x21, 0x0a, 0x0c,
0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x6e, 0x65, 0x65, 0x64, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01,
0x28, 0x04, 0x52, 0x0b, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x4e, 0x65, 0x65, 0x64, 0x65, 0x64, 0x12,
0x78, 0x0a, 0x0c, 0x61, 0x6c, 0x72, 0x65, 0x61, 0x64, 0x79, 0x5f, 0x68, 0x61, 0x76, 0x65, 0x18,
0x03, 0x20, 0x03, 0x28, 0x04, 0x42, 0x55, 0x82, 0xb5, 0x18, 0x4c, 0x67, 0x69, 0x74, 0x68, 0x75,
0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61,
0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x37, 0x2f, 0x63, 0x6f, 0x6e, 0x73,
0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x72, 0x69, 0x6d,
0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e,
0x50, 0x72, 0x6f, 0x6f, 0x66, 0x49, 0x64, 0x92, 0xb5, 0x18, 0x01, 0x38, 0x52, 0x0b, 0x61, 0x6c,
0x72, 0x65, 0x61, 0x64, 0x79, 0x48, 0x61, 0x76, 0x65, 0x42, 0x9d, 0x01, 0x0a, 0x19, 0x6f, 0x72,
0x67, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76,
0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x13, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69,
0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x39,
0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68,
0x61, 0x69, 0x6e, 0x4c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x37,
0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x31, 0x61,
0x6c, 0x70, 0x68, 0x61, 0x31, 0x3b, 0x65, 0x74, 0x68, 0xaa, 0x02, 0x15, 0x45, 0x74, 0x68, 0x65,
0x72, 0x65, 0x75, 0x6d, 0x2e, 0x45, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61,
0x31, 0xca, 0x02, 0x15, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x5c, 0x45, 0x74, 0x68,
0x5c, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x33,
}
var (
file_proto_prysm_v1alpha1_execution_proof_proto_rawDescOnce sync.Once
file_proto_prysm_v1alpha1_execution_proof_proto_rawDescData = file_proto_prysm_v1alpha1_execution_proof_proto_rawDesc
)
func file_proto_prysm_v1alpha1_execution_proof_proto_rawDescGZIP() []byte {
file_proto_prysm_v1alpha1_execution_proof_proto_rawDescOnce.Do(func() {
file_proto_prysm_v1alpha1_execution_proof_proto_rawDescData = protoimpl.X.CompressGZIP(file_proto_prysm_v1alpha1_execution_proof_proto_rawDescData)
})
return file_proto_prysm_v1alpha1_execution_proof_proto_rawDescData
}
var file_proto_prysm_v1alpha1_execution_proof_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
var file_proto_prysm_v1alpha1_execution_proof_proto_goTypes = []any{
(*ExecutionProof)(nil), // 0: ethereum.eth.v1alpha1.ExecutionProof
(*ExecutionProofsByRootRequest)(nil), // 1: ethereum.eth.v1alpha1.ExecutionProofsByRootRequest
}
var file_proto_prysm_v1alpha1_execution_proof_proto_depIdxs = []int32{
0, // [0:0] is the sub-list for method output_type
0, // [0:0] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() { file_proto_prysm_v1alpha1_execution_proof_proto_init() }
func file_proto_prysm_v1alpha1_execution_proof_proto_init() {
if File_proto_prysm_v1alpha1_execution_proof_proto != nil {
return
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_proto_prysm_v1alpha1_execution_proof_proto_rawDesc,
NumEnums: 0,
NumMessages: 2,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_proto_prysm_v1alpha1_execution_proof_proto_goTypes,
DependencyIndexes: file_proto_prysm_v1alpha1_execution_proof_proto_depIdxs,
MessageInfos: file_proto_prysm_v1alpha1_execution_proof_proto_msgTypes,
}.Build()
File_proto_prysm_v1alpha1_execution_proof_proto = out.File
file_proto_prysm_v1alpha1_execution_proof_proto_rawDesc = nil
file_proto_prysm_v1alpha1_execution_proof_proto_goTypes = nil
file_proto_prysm_v1alpha1_execution_proof_proto_depIdxs = nil
}

View File

@@ -0,0 +1,52 @@
syntax = "proto3";
package ethereum.eth.v1alpha1;
import "proto/eth/ext/options.proto";
option csharp_namespace = "Ethereum.Eth.v1alpha1";
option go_package = "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1;eth";
option java_multiple_files = true;
option java_outer_classname = "ExecutionProofProto";
option java_package = "org.ethereum.eth.v1alpha1";
option php_namespace = "Ethereum\\Eth\\v1alpha1";
message ExecutionProof {
// Which proof type (zkVM+EL combination) this proof belongs to
// Examples: 0=SP1+Reth, 1=Risc0+Geth, 2=SP1+Geth, etc.
uint64 proof_id = 1 [
(ethereum.eth.ext.cast_type) =
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.ExecutionProofId"
];
// The slot of the beacon block this proof validates
uint64 slot = 2 [
(ethereum.eth.ext.cast_type) =
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.Slot"
];
// The block hash of the execution payload this proof validates
bytes block_hash = 3 [ (ethereum.eth.ext.ssz_size) = "32" ];
// The beacon block root corresponding to the beacon block
// with the execution payload, that this proof attests to.
bytes block_root = 4 [ (ethereum.eth.ext.ssz_size) = "32" ];
// The actual proof data
bytes proof_data = 5 [ (ethereum.eth.ext.ssz_max) = "1048576" ];
}
message ExecutionProofsByRootRequest {
// The block root we need proofs for
bytes block_root = 1 [ (ethereum.eth.ext.ssz_size) = "32" ];
// The number of proofs needed
uint64 count_needed = 2;
// We already have these proof IDs, so don't send them again
repeated uint64 already_have = 3 [
(ethereum.eth.ext.ssz_max) = "8",
(ethereum.eth.ext.cast_type) =
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.ExecutionProofId"
];
}

View File

@@ -0,0 +1,300 @@
// NOTE: This file is auto-generated by sszgen, but modified manually
// to handle the alias type ExecutionProofId which is based on uint8.
package eth
import (
github_com_OffchainLabs_prysm_v7_consensus_types_primitives "github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
ssz "github.com/prysmaticlabs/fastssz"
)
// MarshalSSZ ssz marshals the ExecutionProof object
func (e *ExecutionProof) MarshalSSZ() ([]byte, error) {
return ssz.MarshalSSZ(e)
}
// MarshalSSZTo ssz marshals the ExecutionProof object to a target array
func (e *ExecutionProof) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
offset := int(77)
// Field (0) 'ProofId'
dst = ssz.MarshalUint8(dst, uint8(e.ProofId))
// Field (1) 'Slot'
dst = ssz.MarshalUint64(dst, uint64(e.Slot))
// Field (2) 'BlockHash'
if size := len(e.BlockHash); size != 32 {
err = ssz.ErrBytesLengthFn("--.BlockHash", size, 32)
return
}
dst = append(dst, e.BlockHash...)
// Field (3) 'BlockRoot'
if size := len(e.BlockRoot); size != 32 {
err = ssz.ErrBytesLengthFn("--.BlockRoot", size, 32)
return
}
dst = append(dst, e.BlockRoot...)
// Offset (4) 'ProofData'
dst = ssz.WriteOffset(dst, offset)
offset += len(e.ProofData)
// Field (4) 'ProofData'
if size := len(e.ProofData); size > 1048576 {
err = ssz.ErrBytesLengthFn("--.ProofData", size, 1048576)
return
}
dst = append(dst, e.ProofData...)
return
}
// UnmarshalSSZ ssz unmarshals the ExecutionProof object
func (e *ExecutionProof) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size < 77 {
return ssz.ErrSize
}
tail := buf
var o4 uint64
// Field (0) 'ProofId'
e.ProofId = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ExecutionProofId(ssz.UnmarshallUint8(buf[0:1]))
// Field (1) 'Slot'
e.Slot = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Slot(ssz.UnmarshallUint64(buf[1:9]))
// Field (2) 'BlockHash'
if cap(e.BlockHash) == 0 {
e.BlockHash = make([]byte, 0, len(buf[9:41]))
}
e.BlockHash = append(e.BlockHash, buf[9:41]...)
// Field (3) 'BlockRoot'
if cap(e.BlockRoot) == 0 {
e.BlockRoot = make([]byte, 0, len(buf[41:73]))
}
e.BlockRoot = append(e.BlockRoot, buf[41:73]...)
// Offset (4) 'ProofData'
if o4 = ssz.ReadOffset(buf[73:77]); o4 > size {
return ssz.ErrOffset
}
if o4 != 77 {
return ssz.ErrInvalidVariableOffset
}
// Field (4) 'ProofData'
{
buf = tail[o4:]
if len(buf) > 1048576 {
return ssz.ErrBytesLength
}
if cap(e.ProofData) == 0 {
e.ProofData = make([]byte, 0, len(buf))
}
e.ProofData = append(e.ProofData, buf...)
}
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the ExecutionProof object
func (e *ExecutionProof) SizeSSZ() (size int) {
size = 77
// Field (4) 'ProofData'
size += len(e.ProofData)
return
}
// HashTreeRoot ssz hashes the ExecutionProof object
func (e *ExecutionProof) HashTreeRoot() ([32]byte, error) {
return ssz.HashWithDefaultHasher(e)
}
// HashTreeRootWith ssz hashes the ExecutionProof object with a hasher
func (e *ExecutionProof) HashTreeRootWith(hh *ssz.Hasher) (err error) {
indx := hh.Index()
// Field (0) 'ProofId'
hh.PutUint8(uint8(e.ProofId))
// Field (1) 'Slot'
hh.PutUint64(uint64(e.Slot))
// Field (2) 'BlockHash'
if size := len(e.BlockHash); size != 32 {
err = ssz.ErrBytesLengthFn("--.BlockHash", size, 32)
return
}
hh.PutBytes(e.BlockHash)
// Field (3) 'BlockRoot'
if size := len(e.BlockRoot); size != 32 {
err = ssz.ErrBytesLengthFn("--.BlockRoot", size, 32)
return
}
hh.PutBytes(e.BlockRoot)
// Field (4) 'ProofData'
{
elemIndx := hh.Index()
byteLen := uint64(len(e.ProofData))
if byteLen > 1048576 {
err = ssz.ErrIncorrectListSize
return
}
hh.PutBytes(e.ProofData)
hh.MerkleizeWithMixin(elemIndx, byteLen, (1048576+31)/32)
}
hh.Merkleize(indx)
return
}
// MarshalSSZ ssz marshals the ExecutionProofsByRootRequest object
func (e *ExecutionProofsByRootRequest) MarshalSSZ() ([]byte, error) {
return ssz.MarshalSSZ(e)
}
// MarshalSSZTo ssz marshals the ExecutionProofsByRootRequest object to a target array
func (e *ExecutionProofsByRootRequest) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
offset := int(44)
// Field (0) 'BlockRoot'
if size := len(e.BlockRoot); size != 32 {
err = ssz.ErrBytesLengthFn("--.BlockRoot", size, 32)
return
}
dst = append(dst, e.BlockRoot...)
// Field (1) 'CountNeeded'
dst = ssz.MarshalUint64(dst, e.CountNeeded)
// Offset (2) 'AlreadyHave'
dst = ssz.WriteOffset(dst, offset)
offset += len(e.AlreadyHave) * 1
// Field (2) 'AlreadyHave'
if size := len(e.AlreadyHave); size > 8 {
err = ssz.ErrListTooBigFn("--.AlreadyHave", size, 8)
return
}
for ii := 0; ii < len(e.AlreadyHave); ii++ {
dst = ssz.MarshalUint8(dst, uint8(e.AlreadyHave[ii]))
}
return
}
// UnmarshalSSZ ssz unmarshals the ExecutionProofsByRootRequest object
func (e *ExecutionProofsByRootRequest) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size < 44 {
return ssz.ErrSize
}
tail := buf
var o2 uint64
// Field (0) 'BlockRoot'
if cap(e.BlockRoot) == 0 {
e.BlockRoot = make([]byte, 0, len(buf[0:32]))
}
e.BlockRoot = append(e.BlockRoot, buf[0:32]...)
// Field (1) 'CountNeeded'
e.CountNeeded = ssz.UnmarshallUint64(buf[32:40])
// Offset (2) 'AlreadyHave'
if o2 = ssz.ReadOffset(buf[40:44]); o2 > size {
return ssz.ErrOffset
}
if o2 != 44 {
return ssz.ErrInvalidVariableOffset
}
// Field (2) 'AlreadyHave'
{
buf = tail[o2:]
num, err := ssz.DivideInt2(len(buf), 1, 8)
if err != nil {
return err
}
// `primitives.ExecutionProofId` is an alias of `uint8`,
// but we need to handle the conversion manually here
// to call `ssz.ExtendUint8`.
alreadyHave := make([]uint8, len(e.AlreadyHave))
for i, v := range e.AlreadyHave {
alreadyHave[i] = uint8(v)
}
alreadyHave = ssz.ExtendUint8(alreadyHave, num)
alreadyHave2 := make([]github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ExecutionProofId, len(alreadyHave))
for i, v := range alreadyHave {
alreadyHave2[i] = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ExecutionProofId(v)
}
e.AlreadyHave = alreadyHave2
for ii := range num {
e.AlreadyHave[ii] = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ExecutionProofId(ssz.UnmarshallUint8(buf[ii*1 : (ii+1)*1]))
}
}
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the ExecutionProofsByRootRequest object
func (e *ExecutionProofsByRootRequest) SizeSSZ() (size int) {
size = 44
// Field (2) 'AlreadyHave'
size += len(e.AlreadyHave) * 1
return
}
// HashTreeRoot ssz hashes the ExecutionProofsByRootRequest object
func (e *ExecutionProofsByRootRequest) HashTreeRoot() ([32]byte, error) {
return ssz.HashWithDefaultHasher(e)
}
// HashTreeRootWith ssz hashes the ExecutionProofsByRootRequest object with a hasher
func (e *ExecutionProofsByRootRequest) HashTreeRootWith(hh *ssz.Hasher) (err error) {
indx := hh.Index()
// Field (0) 'BlockRoot'
if size := len(e.BlockRoot); size != 32 {
err = ssz.ErrBytesLengthFn("--.BlockRoot", size, 32)
return
}
hh.PutBytes(e.BlockRoot)
// Field (1) 'CountNeeded'
hh.PutUint64(e.CountNeeded)
// Field (2) 'AlreadyHave'
{
if size := len(e.AlreadyHave); size > 8 {
err = ssz.ErrListTooBigFn("--.AlreadyHave", size, 8)
return
}
subIndx := hh.Index()
for _, i := range e.AlreadyHave {
hh.AppendUint8(uint8(i))
}
hh.FillUpTo32()
numItems := uint64(len(e.AlreadyHave))
hh.MerkleizeWithMixin(subIndx, numItems, ssz.CalculateLimit(8, numItems, 1))
}
hh.Merkleize(indx)
return
}